nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
PyMesh/PyMesh
384ba882b7558ba6e8653ed263c419226c22bddf
python/pymesh/wires/WireNetwork.py
python
WireNetwork.load_from_file
(self, wire_file)
Load vertices and edges from a file. Args: wire_file (:py:class:`str`): Input wire file name. The file should have the following format:: # This is a comment v x y z v x y z ... l i j # where i and j are vertex indices (starting from 1) l i j ...
Load vertices and edges from a file.
[ "Load", "vertices", "and", "edges", "from", "a", "file", "." ]
def load_from_file(self, wire_file): """ Load vertices and edges from a file. Args: wire_file (:py:class:`str`): Input wire file name. The file should have the following format:: # This is a comment v x y z v x y z ... l i j # where i and j are vertex indices (starting from 1) l i j ... """ self.raw_wires = PyMesh.WireNetwork.create(wire_file) self.__initialize_wires()
[ "def", "load_from_file", "(", "self", ",", "wire_file", ")", ":", "self", ".", "raw_wires", "=", "PyMesh", ".", "WireNetwork", ".", "create", "(", "wire_file", ")", "self", ".", "__initialize_wires", "(", ")" ]
https://github.com/PyMesh/PyMesh/blob/384ba882b7558ba6e8653ed263c419226c22bddf/python/pymesh/wires/WireNetwork.py#L102-L120
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/protobuf/python/google/protobuf/message.py
python
Message.SerializeToString
(self)
Serializes the protocol message to a binary string. Returns: A binary string representation of the message if all of the required fields in the message are set (i.e. the message is initialized). Raises: message.EncodeError if the message isn't initialized.
Serializes the protocol message to a binary string.
[ "Serializes", "the", "protocol", "message", "to", "a", "binary", "string", "." ]
def SerializeToString(self): """Serializes the protocol message to a binary string. Returns: A binary string representation of the message if all of the required fields in the message are set (i.e. the message is initialized). Raises: message.EncodeError if the message isn't initialized. """ raise NotImplementedError
[ "def", "SerializeToString", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/protobuf/python/google/protobuf/message.py#L187-L197
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/setuptools/py2/setuptools/msvc.py
python
EnvironmentInfo.UCRTIncludes
(self)
return [join(include, '%sucrt' % self._ucrt_subdir)]
Microsoft Universal C Runtime SDK Include. Return ------ list of str paths
Microsoft Universal C Runtime SDK Include.
[ "Microsoft", "Universal", "C", "Runtime", "SDK", "Include", "." ]
def UCRTIncludes(self): """ Microsoft Universal C Runtime SDK Include. Return ------ list of str paths """ if self.vs_ver < 14.0: return [] include = join(self.si.UniversalCRTSdkDir, 'include') return [join(include, '%sucrt' % self._ucrt_subdir)]
[ "def", "UCRTIncludes", "(", "self", ")", ":", "if", "self", ".", "vs_ver", "<", "14.0", ":", "return", "[", "]", "include", "=", "join", "(", "self", ".", "si", ".", "UniversalCRTSdkDir", ",", "'include'", ")", "return", "[", "join", "(", "include", ",", "'%sucrt'", "%", "self", ".", "_ucrt_subdir", ")", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/setuptools/msvc.py#L1496-L1509
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/nn/metrics/perplexity.py
python
Perplexity.update
(self, *inputs)
Updates the internal evaluation result: math:preds and :math:labels. Args: inputs: Input `preds` and `labels`. `preds` and `labels` are a `Tensor`, list or numpy.ndarray. `preds` is the predicted values, `labels` is the labels of the data. The shape of `preds` and `labels` are both :math:`(N, C)`. Raises: ValueError: If the number of the inputs is not 2. RuntimeError: If preds and labels have different lengths. RuntimeError: If label shape is not equal to pred shape.
Updates the internal evaluation result: math:preds and :math:labels.
[ "Updates", "the", "internal", "evaluation", "result", ":", "math", ":", "preds", "and", ":", "math", ":", "labels", "." ]
def update(self, *inputs): """ Updates the internal evaluation result: math:preds and :math:labels. Args: inputs: Input `preds` and `labels`. `preds` and `labels` are a `Tensor`, list or numpy.ndarray. `preds` is the predicted values, `labels` is the labels of the data. The shape of `preds` and `labels` are both :math:`(N, C)`. Raises: ValueError: If the number of the inputs is not 2. RuntimeError: If preds and labels have different lengths. RuntimeError: If label shape is not equal to pred shape. """ if len(inputs) != 2: raise ValueError("For 'Perplexity.update', it needs 2 inputs (predicted value, label), but got {}." .format(len(inputs))) preds = [self._convert_data(inputs[0])] labels = [self._convert_data(inputs[1])] if len(preds) != len(labels): raise RuntimeError("For 'Perplexity.update', predicted value (input[0]) and label (input[1]) should have " "the same length, but got predicted value length {}, label length {}." .format(len(preds), len(labels))) loss = 0. num = 0 for label, pred in zip(labels, preds): if label.size != pred.size / pred.shape[-1]: raise RuntimeError("For 'Perplexity.update', predicted value (input[0]) and label (input[1]) should " "have the same shape, but got predicted value shape {}, label shape {}." .format(pred.shape, label.shape)) label = label.reshape((label.size,)) label_expand = label.astype(int) label_expand = np.expand_dims(label_expand, axis=1) first_indices = np.arange(label_expand.shape[0])[:, None] pred = np.squeeze(pred[first_indices, label_expand]) if self.ignore_label is not None: ignore = (label == self.ignore_label).astype(pred.dtype) num -= np.sum(ignore) pred = pred * (1 - ignore) + ignore loss -= np.sum(np.log(np.maximum(1e-10, pred))) num += pred.size self._sum_metric += loss self._num_inst += num
[ "def", "update", "(", "self", ",", "*", "inputs", ")", ":", "if", "len", "(", "inputs", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"For 'Perplexity.update', it needs 2 inputs (predicted value, label), but got {}.\"", ".", "format", "(", "len", "(", "inputs", ")", ")", ")", "preds", "=", "[", "self", ".", "_convert_data", "(", "inputs", "[", "0", "]", ")", "]", "labels", "=", "[", "self", ".", "_convert_data", "(", "inputs", "[", "1", "]", ")", "]", "if", "len", "(", "preds", ")", "!=", "len", "(", "labels", ")", ":", "raise", "RuntimeError", "(", "\"For 'Perplexity.update', predicted value (input[0]) and label (input[1]) should have \"", "\"the same length, but got predicted value length {}, label length {}.\"", ".", "format", "(", "len", "(", "preds", ")", ",", "len", "(", "labels", ")", ")", ")", "loss", "=", "0.", "num", "=", "0", "for", "label", ",", "pred", "in", "zip", "(", "labels", ",", "preds", ")", ":", "if", "label", ".", "size", "!=", "pred", ".", "size", "/", "pred", ".", "shape", "[", "-", "1", "]", ":", "raise", "RuntimeError", "(", "\"For 'Perplexity.update', predicted value (input[0]) and label (input[1]) should \"", "\"have the same shape, but got predicted value shape {}, label shape {}.\"", ".", "format", "(", "pred", ".", "shape", ",", "label", ".", "shape", ")", ")", "label", "=", "label", ".", "reshape", "(", "(", "label", ".", "size", ",", ")", ")", "label_expand", "=", "label", ".", "astype", "(", "int", ")", "label_expand", "=", "np", ".", "expand_dims", "(", "label_expand", ",", "axis", "=", "1", ")", "first_indices", "=", "np", ".", "arange", "(", "label_expand", ".", "shape", "[", "0", "]", ")", "[", ":", ",", "None", "]", "pred", "=", "np", ".", "squeeze", "(", "pred", "[", "first_indices", ",", "label_expand", "]", ")", "if", "self", ".", "ignore_label", "is", "not", "None", ":", "ignore", "=", "(", "label", "==", "self", ".", "ignore_label", ")", ".", "astype", "(", "pred", ".", "dtype", ")", "num", "-=", "np", ".", "sum", "(", "ignore", ")", "pred", "=", "pred", "*", "(", "1", "-", "ignore", ")", "+", "ignore", "loss", "-=", "np", ".", "sum", "(", "np", ".", "log", "(", "np", ".", "maximum", "(", "1e-10", ",", "pred", ")", ")", ")", "num", "+=", "pred", ".", "size", "self", ".", "_sum_metric", "+=", "loss", "self", ".", "_num_inst", "+=", "num" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/nn/metrics/perplexity.py#L69-L114
SoarGroup/Soar
a1c5e249499137a27da60533c72969eef3b8ab6b
scons/scons-local-4.1.0/SCons/Environment.py
python
Base.PrependUnique
(self, delete_existing=0, **kw)
Prepend values to existing construction variables in an Environment, if they're not already there. If delete_existing is 1, removes existing values first, so values move to front.
Prepend values to existing construction variables in an Environment, if they're not already there. If delete_existing is 1, removes existing values first, so values move to front.
[ "Prepend", "values", "to", "existing", "construction", "variables", "in", "an", "Environment", "if", "they", "re", "not", "already", "there", ".", "If", "delete_existing", "is", "1", "removes", "existing", "values", "first", "so", "values", "move", "to", "front", "." ]
def PrependUnique(self, delete_existing=0, **kw): """Prepend values to existing construction variables in an Environment, if they're not already there. If delete_existing is 1, removes existing values first, so values move to front. """ kw = copy_non_reserved_keywords(kw) for key, val in kw.items(): if is_List(val): val = _delete_duplicates(val, not delete_existing) if key not in self._dict or self._dict[key] in ('', None): self._dict[key] = val elif is_Dict(self._dict[key]) and is_Dict(val): self._dict[key].update(val) elif is_List(val): dk = self._dict[key] if not is_List(dk): dk = [dk] if delete_existing: dk = [x for x in dk if x not in val] else: val = [x for x in val if x not in dk] self._dict[key] = val + dk else: dk = self._dict[key] if is_List(dk): # By elimination, val is not a list. Since dk is a # list, wrap val in a list first. if delete_existing: dk = [x for x in dk if x not in val] self._dict[key] = [val] + dk else: if val not in dk: self._dict[key] = [val] + dk else: if delete_existing: dk = [x for x in dk if x not in val] self._dict[key] = val + dk self.scanner_map_delete(kw)
[ "def", "PrependUnique", "(", "self", ",", "delete_existing", "=", "0", ",", "*", "*", "kw", ")", ":", "kw", "=", "copy_non_reserved_keywords", "(", "kw", ")", "for", "key", ",", "val", "in", "kw", ".", "items", "(", ")", ":", "if", "is_List", "(", "val", ")", ":", "val", "=", "_delete_duplicates", "(", "val", ",", "not", "delete_existing", ")", "if", "key", "not", "in", "self", ".", "_dict", "or", "self", ".", "_dict", "[", "key", "]", "in", "(", "''", ",", "None", ")", ":", "self", ".", "_dict", "[", "key", "]", "=", "val", "elif", "is_Dict", "(", "self", ".", "_dict", "[", "key", "]", ")", "and", "is_Dict", "(", "val", ")", ":", "self", ".", "_dict", "[", "key", "]", ".", "update", "(", "val", ")", "elif", "is_List", "(", "val", ")", ":", "dk", "=", "self", ".", "_dict", "[", "key", "]", "if", "not", "is_List", "(", "dk", ")", ":", "dk", "=", "[", "dk", "]", "if", "delete_existing", ":", "dk", "=", "[", "x", "for", "x", "in", "dk", "if", "x", "not", "in", "val", "]", "else", ":", "val", "=", "[", "x", "for", "x", "in", "val", "if", "x", "not", "in", "dk", "]", "self", ".", "_dict", "[", "key", "]", "=", "val", "+", "dk", "else", ":", "dk", "=", "self", ".", "_dict", "[", "key", "]", "if", "is_List", "(", "dk", ")", ":", "# By elimination, val is not a list. Since dk is a", "# list, wrap val in a list first.", "if", "delete_existing", ":", "dk", "=", "[", "x", "for", "x", "in", "dk", "if", "x", "not", "in", "val", "]", "self", ".", "_dict", "[", "key", "]", "=", "[", "val", "]", "+", "dk", "else", ":", "if", "val", "not", "in", "dk", ":", "self", ".", "_dict", "[", "key", "]", "=", "[", "val", "]", "+", "dk", "else", ":", "if", "delete_existing", ":", "dk", "=", "[", "x", "for", "x", "in", "dk", "if", "x", "not", "in", "val", "]", "self", ".", "_dict", "[", "key", "]", "=", "val", "+", "dk", "self", ".", "scanner_map_delete", "(", "kw", ")" ]
https://github.com/SoarGroup/Soar/blob/a1c5e249499137a27da60533c72969eef3b8ab6b/scons/scons-local-4.1.0/SCons/Environment.py#L1775-L1813
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/numpy/array_ops.py
python
apply_along_axis
(func1d, axis, arr, *args, **kwargs)
return res
Applies a function to 1-D slices along the given axis. Executes ``func1d(a, *args, **kwargs)`` where `func1d` operates on 1-D arrays and `a` is a 1-D slice of arr along axis. Args: func1d (function): Maps `(M,) -> (Nj…)`. This function should accept 1-D arrays. It is applied to 1-D slices of arr along the specified axis. axis (int): Axis along which arr is sliced. arr (Tensor): Input array with shape `(Ni…, M, Nk…)`. args (any): Additional arguments to `func1d`. kwargs (any): Additional named arguments to `func1d`. Returns: Tensor with shape `(Ni…, Nj…, Nk…)`, the output array. Its shape is identical to the shape of `arr`, except along the `axis` dimension. This axis is removed, and replaced with new dimensions equal to the shape of the return value of `func1d`. So if `func1d` returns a scalar, the output will have one fewer dimensions than `arr`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Raises: ValueError: If axis is out of the range. Examples: >>> import mindspore.numpy as np >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) >>> print(np.apply_along_axis(np.diag, -1, b)) [[[1 0 0] [0 2 0] [0 0 3]] [[4 0 0] [0 5 0] [0 0 6]] [[7 0 0] [0 8 0] [0 0 9]]]
Applies a function to 1-D slices along the given axis. Executes ``func1d(a, *args, **kwargs)`` where `func1d` operates on 1-D arrays and `a` is a 1-D slice of arr along axis.
[ "Applies", "a", "function", "to", "1", "-", "D", "slices", "along", "the", "given", "axis", ".", "Executes", "func1d", "(", "a", "*", "args", "**", "kwargs", ")", "where", "func1d", "operates", "on", "1", "-", "D", "arrays", "and", "a", "is", "a", "1", "-", "D", "slice", "of", "arr", "along", "axis", "." ]
def apply_along_axis(func1d, axis, arr, *args, **kwargs): """ Applies a function to 1-D slices along the given axis. Executes ``func1d(a, *args, **kwargs)`` where `func1d` operates on 1-D arrays and `a` is a 1-D slice of arr along axis. Args: func1d (function): Maps `(M,) -> (Nj…)`. This function should accept 1-D arrays. It is applied to 1-D slices of arr along the specified axis. axis (int): Axis along which arr is sliced. arr (Tensor): Input array with shape `(Ni…, M, Nk…)`. args (any): Additional arguments to `func1d`. kwargs (any): Additional named arguments to `func1d`. Returns: Tensor with shape `(Ni…, Nj…, Nk…)`, the output array. Its shape is identical to the shape of `arr`, except along the `axis` dimension. This axis is removed, and replaced with new dimensions equal to the shape of the return value of `func1d`. So if `func1d` returns a scalar, the output will have one fewer dimensions than `arr`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Raises: ValueError: If axis is out of the range. Examples: >>> import mindspore.numpy as np >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) >>> print(np.apply_along_axis(np.diag, -1, b)) [[[1 0 0] [0 2 0] [0 0 3]] [[4 0 0] [0 5 0] [0 0 6]] [[7 0 0] [0 8 0] [0 0 9]]] """ ndim = F.rank(arr) shape = F.shape(arr) axis = _check_axis_in_range(axis, ndim) arr = moveaxis(arr, axis, -1) arr = F.reshape(arr, (-1, F.shape(arr)[-1])) slices = [] for i in range(F.shape(arr)[0]): slices.append(func1d(arr[i], *args, **kwargs)) stacked_slices = stack(slices) shape_stacked = (_tuple_slice(shape, None, axis) + _tuple_slice(shape, axis + 1, None) + _tuple_slice(F.shape(stacked_slices), 1, None)) res = F.reshape(stacked_slices, shape_stacked) # moves the dimensions returned by `func1d` back to `axis` ndim_func = F.rank(res) - ndim + 1 if ndim_func >= 1: res = moveaxis(res, F.make_range(ndim - 1, F.rank(res)), F.make_range(axis, axis + ndim_func)) return res
[ "def", "apply_along_axis", "(", "func1d", ",", "axis", ",", "arr", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ndim", "=", "F", ".", "rank", "(", "arr", ")", "shape", "=", "F", ".", "shape", "(", "arr", ")", "axis", "=", "_check_axis_in_range", "(", "axis", ",", "ndim", ")", "arr", "=", "moveaxis", "(", "arr", ",", "axis", ",", "-", "1", ")", "arr", "=", "F", ".", "reshape", "(", "arr", ",", "(", "-", "1", ",", "F", ".", "shape", "(", "arr", ")", "[", "-", "1", "]", ")", ")", "slices", "=", "[", "]", "for", "i", "in", "range", "(", "F", ".", "shape", "(", "arr", ")", "[", "0", "]", ")", ":", "slices", ".", "append", "(", "func1d", "(", "arr", "[", "i", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", "stacked_slices", "=", "stack", "(", "slices", ")", "shape_stacked", "=", "(", "_tuple_slice", "(", "shape", ",", "None", ",", "axis", ")", "+", "_tuple_slice", "(", "shape", ",", "axis", "+", "1", ",", "None", ")", "+", "_tuple_slice", "(", "F", ".", "shape", "(", "stacked_slices", ")", ",", "1", ",", "None", ")", ")", "res", "=", "F", ".", "reshape", "(", "stacked_slices", ",", "shape_stacked", ")", "# moves the dimensions returned by `func1d` back to `axis`", "ndim_func", "=", "F", ".", "rank", "(", "res", ")", "-", "ndim", "+", "1", "if", "ndim_func", ">=", "1", ":", "res", "=", "moveaxis", "(", "res", ",", "F", ".", "make_range", "(", "ndim", "-", "1", ",", "F", ".", "rank", "(", "res", ")", ")", ",", "F", ".", "make_range", "(", "axis", ",", "axis", "+", "ndim_func", ")", ")", "return", "res" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/numpy/array_ops.py#L2259-L2317
google/llvm-propeller
45c226984fe8377ebfb2ad7713c680d652ba678d
clang/bindings/python/clang/cindex.py
python
Cursor.underlying_typedef_type
(self)
return self._underlying_type
Return the underlying type of a typedef declaration. Returns a Type for the typedef this cursor is a declaration for. If the current cursor is not a typedef, this raises.
Return the underlying type of a typedef declaration.
[ "Return", "the", "underlying", "type", "of", "a", "typedef", "declaration", "." ]
def underlying_typedef_type(self): """Return the underlying type of a typedef declaration. Returns a Type for the typedef this cursor is a declaration for. If the current cursor is not a typedef, this raises. """ if not hasattr(self, '_underlying_type'): assert self.kind.is_declaration() self._underlying_type = \ conf.lib.clang_getTypedefDeclUnderlyingType(self) return self._underlying_type
[ "def", "underlying_typedef_type", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_underlying_type'", ")", ":", "assert", "self", ".", "kind", ".", "is_declaration", "(", ")", "self", ".", "_underlying_type", "=", "conf", ".", "lib", ".", "clang_getTypedefDeclUnderlyingType", "(", "self", ")", "return", "self", ".", "_underlying_type" ]
https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/clang/bindings/python/clang/cindex.py#L1688-L1699
JumpingYang001/webrtc
c03d6e965e1f54aeadd670e491eabe5fdb8db968
tools_webrtc/presubmit_checks_lib/check_package_boundaries.py
python
_CheckBuildFile
(build_file_path, packages)
Iterates over all the targets of the given BUILD.gn file, and verifies that the source files referenced by it don't belong to any of it's subpackages. Returns an iterator over PackageBoundaryViolations for this package.
Iterates over all the targets of the given BUILD.gn file, and verifies that the source files referenced by it don't belong to any of it's subpackages. Returns an iterator over PackageBoundaryViolations for this package.
[ "Iterates", "over", "all", "the", "targets", "of", "the", "given", "BUILD", ".", "gn", "file", "and", "verifies", "that", "the", "source", "files", "referenced", "by", "it", "don", "t", "belong", "to", "any", "of", "it", "s", "subpackages", ".", "Returns", "an", "iterator", "over", "PackageBoundaryViolations", "for", "this", "package", "." ]
def _CheckBuildFile(build_file_path, packages): """Iterates over all the targets of the given BUILD.gn file, and verifies that the source files referenced by it don't belong to any of it's subpackages. Returns an iterator over PackageBoundaryViolations for this package. """ package = os.path.dirname(build_file_path) subpackages_re = _BuildSubpackagesPattern(packages, package) build_file_contents = _ReadFileAndPrependLines(build_file_path) for target_match in TARGET_RE.finditer(build_file_contents): target_name = target_match.group('target_name') target_contents = target_match.group('target_contents') for sources_match in SOURCES_RE.finditer(target_contents): sources = sources_match.group('sources') for subpackages_match in subpackages_re.finditer(sources): subpackage = subpackages_match.group('subpackage') source_file = subpackages_match.group('source_file') if subpackage: yield PackageBoundaryViolation(build_file_path, target_name, source_file, subpackage)
[ "def", "_CheckBuildFile", "(", "build_file_path", ",", "packages", ")", ":", "package", "=", "os", ".", "path", ".", "dirname", "(", "build_file_path", ")", "subpackages_re", "=", "_BuildSubpackagesPattern", "(", "packages", ",", "package", ")", "build_file_contents", "=", "_ReadFileAndPrependLines", "(", "build_file_path", ")", "for", "target_match", "in", "TARGET_RE", ".", "finditer", "(", "build_file_contents", ")", ":", "target_name", "=", "target_match", ".", "group", "(", "'target_name'", ")", "target_contents", "=", "target_match", ".", "group", "(", "'target_contents'", ")", "for", "sources_match", "in", "SOURCES_RE", ".", "finditer", "(", "target_contents", ")", ":", "sources", "=", "sources_match", ".", "group", "(", "'sources'", ")", "for", "subpackages_match", "in", "subpackages_re", ".", "finditer", "(", "sources", ")", ":", "subpackage", "=", "subpackages_match", ".", "group", "(", "'subpackage'", ")", "source_file", "=", "subpackages_match", ".", "group", "(", "'source_file'", ")", "if", "subpackage", ":", "yield", "PackageBoundaryViolation", "(", "build_file_path", ",", "target_name", ",", "source_file", ",", "subpackage", ")" ]
https://github.com/JumpingYang001/webrtc/blob/c03d6e965e1f54aeadd670e491eabe5fdb8db968/tools_webrtc/presubmit_checks_lib/check_package_boundaries.py#L59-L79
bingwin/MicroChat
81d9a71a212c1cbca5bba497ec42659a7d25dccf
mars/lint/cpplint.py
python
CheckForCopyright
(filename, lines, error)
Logs an error if no Copyright message appears at the top of the file.
Logs an error if no Copyright message appears at the top of the file.
[ "Logs", "an", "error", "if", "no", "Copyright", "message", "appears", "at", "the", "top", "of", "the", "file", "." ]
def CheckForCopyright(filename, lines, error): """Logs an error if no Copyright message appears at the top of the file.""" # We'll say it should occur by line 10. Don't forget there's a # dummy line at the front. for line in xrange(1, min(len(lines), 11)): if re.search(r'Copyright', lines[line], re.I): break else: # means no copyright line was found error(filename, 0, 'legal/copyright', 5, 'No copyright message found. ' 'You should have a line: "Copyright [year] <Copyright Owner>"')
[ "def", "CheckForCopyright", "(", "filename", ",", "lines", ",", "error", ")", ":", "# We'll say it should occur by line 10. Don't forget there's a", "# dummy line at the front.", "for", "line", "in", "xrange", "(", "1", ",", "min", "(", "len", "(", "lines", ")", ",", "11", ")", ")", ":", "if", "re", ".", "search", "(", "r'Copyright'", ",", "lines", "[", "line", "]", ",", "re", ".", "I", ")", ":", "break", "else", ":", "# means no copyright line was found", "error", "(", "filename", ",", "0", ",", "'legal/copyright'", ",", "5", ",", "'No copyright message found. '", "'You should have a line: \"Copyright [year] <Copyright Owner>\"'", ")" ]
https://github.com/bingwin/MicroChat/blob/81d9a71a212c1cbca5bba497ec42659a7d25dccf/mars/lint/cpplint.py#L1627-L1637
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/grid.py
python
Grid.CanDragColMove
(*args, **kwargs)
return _grid.Grid_CanDragColMove(*args, **kwargs)
CanDragColMove(self) -> bool
CanDragColMove(self) -> bool
[ "CanDragColMove", "(", "self", ")", "-", ">", "bool" ]
def CanDragColMove(*args, **kwargs): """CanDragColMove(self) -> bool""" return _grid.Grid_CanDragColMove(*args, **kwargs)
[ "def", "CanDragColMove", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_grid", ".", "Grid_CanDragColMove", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/grid.py#L1638-L1640
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/AWSPythonSDK/1.5.8/docutils/writers/latex2e/__init__.py
python
LaTeXTranslator.__init__
(self, document, babel_class=Babel)
Heterogeneous stack. Used by visit_* and depart_* functions in conjunction with the tree traversal. Make sure that the pops correspond to the pushes.
Heterogeneous stack.
[ "Heterogeneous", "stack", "." ]
def __init__(self, document, babel_class=Babel): nodes.NodeVisitor.__init__(self, document) # Reporter # ~~~~~~~~ self.warn = self.document.reporter.warning self.error = self.document.reporter.error # Settings # ~~~~~~~~ self.settings = settings = document.settings self.latex_encoding = self.to_latex_encoding(settings.output_encoding) self.use_latex_toc = settings.use_latex_toc self.use_latex_docinfo = settings.use_latex_docinfo self._use_latex_citations = settings.use_latex_citations self._reference_label = settings.reference_label self.hyperlink_color = settings.hyperlink_color self.compound_enumerators = settings.compound_enumerators self.font_encoding = getattr(settings, 'font_encoding', '') self.section_prefix_for_enumerators = ( settings.section_prefix_for_enumerators) self.section_enumerator_separator = ( settings.section_enumerator_separator.replace('_', r'\_')) # literal blocks: self.literal_block_env = 'alltt' self.literal_block_options = '' if settings.literal_block_env != '': (none, self.literal_block_env, self.literal_block_options, none ) = re.split(r'(\w+)(.*)', settings.literal_block_env) elif settings.use_verbatim_when_possible: self.literal_block_env = 'verbatim' # if self.settings.use_bibtex: self.bibtex = self.settings.use_bibtex.split(',',1) # TODO avoid errors on not declared citations. else: self.bibtex = None # language module for Docutils-generated text # (labels, bibliographic_fields, and author_separators) self.language_module = languages.get_language(settings.language_code, document.reporter) self.babel = babel_class(settings.language_code, document.reporter) self.author_separator = self.language_module.author_separators[0] d_options = [self.settings.documentoptions] if self.babel.language not in ('english', ''): d_options.append(self.babel.language) self.documentoptions = ','.join(filter(None, d_options)) self.d_class = DocumentClass(settings.documentclass, settings.use_part_section) # graphic package options: if self.settings.graphicx_option == '': self.graphicx_package = r'\usepackage{graphicx}' elif self.settings.graphicx_option.lower() == 'auto': self.graphicx_package = PreambleCmds.graphicx_auto else: self.graphicx_package = (r'\usepackage[%s]{graphicx}' % self.settings.graphicx_option) # footnotes: self.docutils_footnotes = settings.docutils_footnotes # @@ table_style: list of values from fixed set: warn? # for s in self.settings.table_style: # if s not in Writer.table_style_values: # self.warn('Ignoring value "%s" in "table-style" setting.' %s) # Output collection stacks # ~~~~~~~~~~~~~~~~~~~~~~~~ # Document parts self.head_prefix = [r'\documentclass[%s]{%s}' % (self.documentoptions, self.settings.documentclass)] self.requirements = SortableDict() # made a list in depart_document() self.requirements['__static'] = r'\usepackage{ifthen}' self.latex_preamble = [settings.latex_preamble] self.fallbacks = SortableDict() # made a list in depart_document() self.pdfsetup = [] # PDF properties (hyperref package) self.title = [] self.subtitle = [] self.titledata = [] # \title, \author, \date ## self.body_prefix = ['\\begin{document}\n'] self.body_pre_docinfo = [] # \maketitle self.docinfo = [] self.dedication = [] self.abstract = [] self.body = [] ## self.body_suffix = ['\\end{document}\n'] self.context = [] """Heterogeneous stack. Used by visit_* and depart_* functions in conjunction with the tree traversal. Make sure that the pops correspond to the pushes.""" # Title metadata: self.title_labels = [] self.subtitle_labels = [] # (if use_latex_docinfo: collects lists of # author/organization/contact/address lines) self.author_stack = [] self.date = [] # PDF properties: pdftitle, pdfauthor # TODO?: pdfcreator, pdfproducer, pdfsubject, pdfkeywords self.pdfinfo = [] self.pdfauthor = [] # Stack of section counters so that we don't have to use_latex_toc. # This will grow and shrink as processing occurs. # Initialized for potential first-level sections. self._section_number = [0] # The current stack of enumerations so that we can expand # them into a compound enumeration. self._enumeration_counters = [] # The maximum number of enumeration counters we've used. # If we go beyond this number, we need to create a new # counter; otherwise, just reuse an old one. self._max_enumeration_counters = 0 self._bibitems = [] # object for a table while proccessing. self.table_stack = [] self.active_table = Table(self, 'longtable') # Where to collect the output of visitor methods (default: body) self.out = self.body self.out_stack = [] # stack of output collectors # Process settings # ~~~~~~~~~~~~~~~~ # Encodings: # Docutils' output-encoding => TeX input encoding if self.latex_encoding != 'ascii': self.requirements['_inputenc'] = (r'\usepackage[%s]{inputenc}' % self.latex_encoding) # TeX font encoding if not self.is_xetex: if self.font_encoding: self.requirements['_fontenc'] = (r'\usepackage[%s]{fontenc}' % self.font_encoding) # ensure \textquotedbl is defined: for enc in self.font_encoding.split(','): enc = enc.strip() if enc == 'OT1': self.requirements['_textquotedblOT1'] = ( r'\DeclareTextSymbol{\textquotedbl}{OT1}{`\"}') elif enc not in ('T1', 'T2A', 'T2B', 'T2C', 'T4', 'T5'): self.requirements['_textquotedbl'] = ( r'\DeclareTextSymbolDefault{\textquotedbl}{T1}') # page layout with typearea (if there are relevant document options) if (settings.documentclass.find('scr') == -1 and (self.documentoptions.find('DIV') != -1 or self.documentoptions.find('BCOR') != -1)): self.requirements['typearea'] = r'\usepackage{typearea}' # Stylesheets # (the name `self.stylesheet` is singular because only one # stylesheet was supported before Docutils 0.6). self.stylesheet = [self.stylesheet_call(path) for path in utils.get_stylesheet_list(settings)] # PDF setup if self.hyperlink_color in ('0', 'false', 'False', ''): self.hyperref_options = '' else: self.hyperref_options = 'colorlinks=true,linkcolor=%s,urlcolor=%s' % ( self.hyperlink_color, self.hyperlink_color) if settings.hyperref_options: self.hyperref_options += ',' + settings.hyperref_options # LaTeX Toc # include all supported sections in toc and PDF bookmarks # (or use documentclass-default (as currently))? ## if self.use_latex_toc: ## self.requirements['tocdepth'] = (r'\setcounter{tocdepth}{%d}' % ## len(self.d_class.sections)) # Section numbering if settings.sectnum_xform: # section numbering by Docutils PreambleCmds.secnumdepth = r'\setcounter{secnumdepth}{0}' else: # section numbering by LaTeX: secnumdepth = settings.sectnum_depth # Possible values of settings.sectnum_depth: # None "sectnum" directive without depth arg -> LaTeX default # 0 no "sectnum" directive -> no section numbers # >0 value of "depth" argument -> translate to LaTeX levels: # -1 part (0 with "article" document class) # 0 chapter (missing in "article" document class) # 1 section # 2 subsection # 3 subsubsection # 4 paragraph # 5 subparagraph if secnumdepth is not None: # limit to supported levels secnumdepth = min(secnumdepth, len(self.d_class.sections)) # adjust to document class and use_part_section settings if 'chapter' in self.d_class.sections: secnumdepth -= 1 if self.d_class.sections[0] == 'part': secnumdepth -= 1 PreambleCmds.secnumdepth = \ r'\setcounter{secnumdepth}{%d}' % secnumdepth # start with specified number: if (hasattr(settings, 'sectnum_start') and settings.sectnum_start != 1): self.requirements['sectnum_start'] = ( r'\setcounter{%s}{%d}' % (self.d_class.sections[0], settings.sectnum_start-1))
[ "def", "__init__", "(", "self", ",", "document", ",", "babel_class", "=", "Babel", ")", ":", "nodes", ".", "NodeVisitor", ".", "__init__", "(", "self", ",", "document", ")", "# Reporter", "# ~~~~~~~~", "self", ".", "warn", "=", "self", ".", "document", ".", "reporter", ".", "warning", "self", ".", "error", "=", "self", ".", "document", ".", "reporter", ".", "error", "# Settings", "# ~~~~~~~~", "self", ".", "settings", "=", "settings", "=", "document", ".", "settings", "self", ".", "latex_encoding", "=", "self", ".", "to_latex_encoding", "(", "settings", ".", "output_encoding", ")", "self", ".", "use_latex_toc", "=", "settings", ".", "use_latex_toc", "self", ".", "use_latex_docinfo", "=", "settings", ".", "use_latex_docinfo", "self", ".", "_use_latex_citations", "=", "settings", ".", "use_latex_citations", "self", ".", "_reference_label", "=", "settings", ".", "reference_label", "self", ".", "hyperlink_color", "=", "settings", ".", "hyperlink_color", "self", ".", "compound_enumerators", "=", "settings", ".", "compound_enumerators", "self", ".", "font_encoding", "=", "getattr", "(", "settings", ",", "'font_encoding'", ",", "''", ")", "self", ".", "section_prefix_for_enumerators", "=", "(", "settings", ".", "section_prefix_for_enumerators", ")", "self", ".", "section_enumerator_separator", "=", "(", "settings", ".", "section_enumerator_separator", ".", "replace", "(", "'_'", ",", "r'\\_'", ")", ")", "# literal blocks:", "self", ".", "literal_block_env", "=", "'alltt'", "self", ".", "literal_block_options", "=", "''", "if", "settings", ".", "literal_block_env", "!=", "''", ":", "(", "none", ",", "self", ".", "literal_block_env", ",", "self", ".", "literal_block_options", ",", "none", ")", "=", "re", ".", "split", "(", "r'(\\w+)(.*)'", ",", "settings", ".", "literal_block_env", ")", "elif", "settings", ".", "use_verbatim_when_possible", ":", "self", ".", "literal_block_env", "=", "'verbatim'", "#", "if", "self", ".", "settings", ".", "use_bibtex", ":", "self", ".", "bibtex", "=", "self", ".", "settings", ".", "use_bibtex", ".", "split", "(", "','", ",", "1", ")", "# TODO avoid errors on not declared citations.", "else", ":", "self", ".", "bibtex", "=", "None", "# language module for Docutils-generated text", "# (labels, bibliographic_fields, and author_separators)", "self", ".", "language_module", "=", "languages", ".", "get_language", "(", "settings", ".", "language_code", ",", "document", ".", "reporter", ")", "self", ".", "babel", "=", "babel_class", "(", "settings", ".", "language_code", ",", "document", ".", "reporter", ")", "self", ".", "author_separator", "=", "self", ".", "language_module", ".", "author_separators", "[", "0", "]", "d_options", "=", "[", "self", ".", "settings", ".", "documentoptions", "]", "if", "self", ".", "babel", ".", "language", "not", "in", "(", "'english'", ",", "''", ")", ":", "d_options", ".", "append", "(", "self", ".", "babel", ".", "language", ")", "self", ".", "documentoptions", "=", "','", ".", "join", "(", "filter", "(", "None", ",", "d_options", ")", ")", "self", ".", "d_class", "=", "DocumentClass", "(", "settings", ".", "documentclass", ",", "settings", ".", "use_part_section", ")", "# graphic package options:", "if", "self", ".", "settings", ".", "graphicx_option", "==", "''", ":", "self", ".", "graphicx_package", "=", "r'\\usepackage{graphicx}'", "elif", "self", ".", "settings", ".", "graphicx_option", ".", "lower", "(", ")", "==", "'auto'", ":", "self", ".", "graphicx_package", "=", "PreambleCmds", ".", "graphicx_auto", "else", ":", "self", ".", "graphicx_package", "=", "(", "r'\\usepackage[%s]{graphicx}'", "%", "self", ".", "settings", ".", "graphicx_option", ")", "# footnotes:", "self", ".", "docutils_footnotes", "=", "settings", ".", "docutils_footnotes", "# @@ table_style: list of values from fixed set: warn?", "# for s in self.settings.table_style:", "# if s not in Writer.table_style_values:", "# self.warn('Ignoring value \"%s\" in \"table-style\" setting.' %s)", "# Output collection stacks", "# ~~~~~~~~~~~~~~~~~~~~~~~~", "# Document parts", "self", ".", "head_prefix", "=", "[", "r'\\documentclass[%s]{%s}'", "%", "(", "self", ".", "documentoptions", ",", "self", ".", "settings", ".", "documentclass", ")", "]", "self", ".", "requirements", "=", "SortableDict", "(", ")", "# made a list in depart_document()", "self", ".", "requirements", "[", "'__static'", "]", "=", "r'\\usepackage{ifthen}'", "self", ".", "latex_preamble", "=", "[", "settings", ".", "latex_preamble", "]", "self", ".", "fallbacks", "=", "SortableDict", "(", ")", "# made a list in depart_document()", "self", ".", "pdfsetup", "=", "[", "]", "# PDF properties (hyperref package)", "self", ".", "title", "=", "[", "]", "self", ".", "subtitle", "=", "[", "]", "self", ".", "titledata", "=", "[", "]", "# \\title, \\author, \\date", "## self.body_prefix = ['\\\\begin{document}\\n']", "self", ".", "body_pre_docinfo", "=", "[", "]", "# \\maketitle", "self", ".", "docinfo", "=", "[", "]", "self", ".", "dedication", "=", "[", "]", "self", ".", "abstract", "=", "[", "]", "self", ".", "body", "=", "[", "]", "## self.body_suffix = ['\\\\end{document}\\n']", "self", ".", "context", "=", "[", "]", "# Title metadata:", "self", ".", "title_labels", "=", "[", "]", "self", ".", "subtitle_labels", "=", "[", "]", "# (if use_latex_docinfo: collects lists of", "# author/organization/contact/address lines)", "self", ".", "author_stack", "=", "[", "]", "self", ".", "date", "=", "[", "]", "# PDF properties: pdftitle, pdfauthor", "# TODO?: pdfcreator, pdfproducer, pdfsubject, pdfkeywords", "self", ".", "pdfinfo", "=", "[", "]", "self", ".", "pdfauthor", "=", "[", "]", "# Stack of section counters so that we don't have to use_latex_toc.", "# This will grow and shrink as processing occurs.", "# Initialized for potential first-level sections.", "self", ".", "_section_number", "=", "[", "0", "]", "# The current stack of enumerations so that we can expand", "# them into a compound enumeration.", "self", ".", "_enumeration_counters", "=", "[", "]", "# The maximum number of enumeration counters we've used.", "# If we go beyond this number, we need to create a new", "# counter; otherwise, just reuse an old one.", "self", ".", "_max_enumeration_counters", "=", "0", "self", ".", "_bibitems", "=", "[", "]", "# object for a table while proccessing.", "self", ".", "table_stack", "=", "[", "]", "self", ".", "active_table", "=", "Table", "(", "self", ",", "'longtable'", ")", "# Where to collect the output of visitor methods (default: body)", "self", ".", "out", "=", "self", ".", "body", "self", ".", "out_stack", "=", "[", "]", "# stack of output collectors", "# Process settings", "# ~~~~~~~~~~~~~~~~", "# Encodings:", "# Docutils' output-encoding => TeX input encoding", "if", "self", ".", "latex_encoding", "!=", "'ascii'", ":", "self", ".", "requirements", "[", "'_inputenc'", "]", "=", "(", "r'\\usepackage[%s]{inputenc}'", "%", "self", ".", "latex_encoding", ")", "# TeX font encoding", "if", "not", "self", ".", "is_xetex", ":", "if", "self", ".", "font_encoding", ":", "self", ".", "requirements", "[", "'_fontenc'", "]", "=", "(", "r'\\usepackage[%s]{fontenc}'", "%", "self", ".", "font_encoding", ")", "# ensure \\textquotedbl is defined:", "for", "enc", "in", "self", ".", "font_encoding", ".", "split", "(", "','", ")", ":", "enc", "=", "enc", ".", "strip", "(", ")", "if", "enc", "==", "'OT1'", ":", "self", ".", "requirements", "[", "'_textquotedblOT1'", "]", "=", "(", "r'\\DeclareTextSymbol{\\textquotedbl}{OT1}{`\\\"}'", ")", "elif", "enc", "not", "in", "(", "'T1'", ",", "'T2A'", ",", "'T2B'", ",", "'T2C'", ",", "'T4'", ",", "'T5'", ")", ":", "self", ".", "requirements", "[", "'_textquotedbl'", "]", "=", "(", "r'\\DeclareTextSymbolDefault{\\textquotedbl}{T1}'", ")", "# page layout with typearea (if there are relevant document options)", "if", "(", "settings", ".", "documentclass", ".", "find", "(", "'scr'", ")", "==", "-", "1", "and", "(", "self", ".", "documentoptions", ".", "find", "(", "'DIV'", ")", "!=", "-", "1", "or", "self", ".", "documentoptions", ".", "find", "(", "'BCOR'", ")", "!=", "-", "1", ")", ")", ":", "self", ".", "requirements", "[", "'typearea'", "]", "=", "r'\\usepackage{typearea}'", "# Stylesheets", "# (the name `self.stylesheet` is singular because only one", "# stylesheet was supported before Docutils 0.6).", "self", ".", "stylesheet", "=", "[", "self", ".", "stylesheet_call", "(", "path", ")", "for", "path", "in", "utils", ".", "get_stylesheet_list", "(", "settings", ")", "]", "# PDF setup", "if", "self", ".", "hyperlink_color", "in", "(", "'0'", ",", "'false'", ",", "'False'", ",", "''", ")", ":", "self", ".", "hyperref_options", "=", "''", "else", ":", "self", ".", "hyperref_options", "=", "'colorlinks=true,linkcolor=%s,urlcolor=%s'", "%", "(", "self", ".", "hyperlink_color", ",", "self", ".", "hyperlink_color", ")", "if", "settings", ".", "hyperref_options", ":", "self", ".", "hyperref_options", "+=", "','", "+", "settings", ".", "hyperref_options", "# LaTeX Toc", "# include all supported sections in toc and PDF bookmarks", "# (or use documentclass-default (as currently))?", "## if self.use_latex_toc:", "## self.requirements['tocdepth'] = (r'\\setcounter{tocdepth}{%d}' %", "## len(self.d_class.sections))", "# Section numbering", "if", "settings", ".", "sectnum_xform", ":", "# section numbering by Docutils", "PreambleCmds", ".", "secnumdepth", "=", "r'\\setcounter{secnumdepth}{0}'", "else", ":", "# section numbering by LaTeX:", "secnumdepth", "=", "settings", ".", "sectnum_depth", "# Possible values of settings.sectnum_depth:", "# None \"sectnum\" directive without depth arg -> LaTeX default", "# 0 no \"sectnum\" directive -> no section numbers", "# >0 value of \"depth\" argument -> translate to LaTeX levels:", "# -1 part (0 with \"article\" document class)", "# 0 chapter (missing in \"article\" document class)", "# 1 section", "# 2 subsection", "# 3 subsubsection", "# 4 paragraph", "# 5 subparagraph", "if", "secnumdepth", "is", "not", "None", ":", "# limit to supported levels", "secnumdepth", "=", "min", "(", "secnumdepth", ",", "len", "(", "self", ".", "d_class", ".", "sections", ")", ")", "# adjust to document class and use_part_section settings", "if", "'chapter'", "in", "self", ".", "d_class", ".", "sections", ":", "secnumdepth", "-=", "1", "if", "self", ".", "d_class", ".", "sections", "[", "0", "]", "==", "'part'", ":", "secnumdepth", "-=", "1", "PreambleCmds", ".", "secnumdepth", "=", "r'\\setcounter{secnumdepth}{%d}'", "%", "secnumdepth", "# start with specified number:", "if", "(", "hasattr", "(", "settings", ",", "'sectnum_start'", ")", "and", "settings", ".", "sectnum_start", "!=", "1", ")", ":", "self", ".", "requirements", "[", "'sectnum_start'", "]", "=", "(", "r'\\setcounter{%s}{%d}'", "%", "(", "self", ".", "d_class", ".", "sections", "[", "0", "]", ",", "settings", ".", "sectnum_start", "-", "1", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/docutils/writers/latex2e/__init__.py#L1185-L1395
google/or-tools
2cb85b4eead4c38e1c54b48044f92087cf165bce
ortools/sat/python/cp_model.py
python
Constraint.OnlyEnforceIf
(self, boolvar)
return self
Adds an enforcement literal to the constraint. This method adds one or more literals (that is, a boolean variable or its negation) as enforcement literals. The conjunction of all these literals determines whether the constraint is active or not. It acts as an implication, so if the conjunction is true, it implies that the constraint must be enforced. If it is false, then the constraint is ignored. BoolOr, BoolAnd, and linear constraints all support enforcement literals. Args: boolvar: A boolean literal or a list of boolean literals. Returns: self.
Adds an enforcement literal to the constraint.
[ "Adds", "an", "enforcement", "literal", "to", "the", "constraint", "." ]
def OnlyEnforceIf(self, boolvar): """Adds an enforcement literal to the constraint. This method adds one or more literals (that is, a boolean variable or its negation) as enforcement literals. The conjunction of all these literals determines whether the constraint is active or not. It acts as an implication, so if the conjunction is true, it implies that the constraint must be enforced. If it is false, then the constraint is ignored. BoolOr, BoolAnd, and linear constraints all support enforcement literals. Args: boolvar: A boolean literal or a list of boolean literals. Returns: self. """ if cmh.is_integral(boolvar) and int(boolvar) == 1: # Always true. Do nothing. pass elif isinstance(boolvar, list): for b in boolvar: if cmh.is_integral(b) and int(b) == 1: pass else: self.__constraint.enforcement_literal.append(b.Index()) else: self.__constraint.enforcement_literal.append(boolvar.Index()) return self
[ "def", "OnlyEnforceIf", "(", "self", ",", "boolvar", ")", ":", "if", "cmh", ".", "is_integral", "(", "boolvar", ")", "and", "int", "(", "boolvar", ")", "==", "1", ":", "# Always true. Do nothing.", "pass", "elif", "isinstance", "(", "boolvar", ",", "list", ")", ":", "for", "b", "in", "boolvar", ":", "if", "cmh", ".", "is_integral", "(", "b", ")", "and", "int", "(", "b", ")", "==", "1", ":", "pass", "else", ":", "self", ".", "__constraint", ".", "enforcement_literal", ".", "append", "(", "b", ".", "Index", "(", ")", ")", "else", ":", "self", ".", "__constraint", ".", "enforcement_literal", ".", "append", "(", "boolvar", ".", "Index", "(", ")", ")", "return", "self" ]
https://github.com/google/or-tools/blob/2cb85b4eead4c38e1c54b48044f92087cf165bce/ortools/sat/python/cp_model.py#L799-L828
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/third_party/lib_x86_64/python2.7/dist-packages/yaml/__init__.py
python
add_path_resolver
(tag, path, kind=None, Loader=Loader, Dumper=Dumper)
Add a path based resolver for the given tag. A path is a list of keys that forms a path to a node in the representation tree. Keys can be string values, integers, or None.
Add a path based resolver for the given tag. A path is a list of keys that forms a path to a node in the representation tree. Keys can be string values, integers, or None.
[ "Add", "a", "path", "based", "resolver", "for", "the", "given", "tag", ".", "A", "path", "is", "a", "list", "of", "keys", "that", "forms", "a", "path", "to", "a", "node", "in", "the", "representation", "tree", ".", "Keys", "can", "be", "string", "values", "integers", "or", "None", "." ]
def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): """ Add a path based resolver for the given tag. A path is a list of keys that forms a path to a node in the representation tree. Keys can be string values, integers, or None. """ Loader.add_path_resolver(tag, path, kind) Dumper.add_path_resolver(tag, path, kind)
[ "def", "add_path_resolver", "(", "tag", ",", "path", ",", "kind", "=", "None", ",", "Loader", "=", "Loader", ",", "Dumper", "=", "Dumper", ")", ":", "Loader", ".", "add_path_resolver", "(", "tag", ",", "path", ",", "kind", ")", "Dumper", ".", "add_path_resolver", "(", "tag", ",", "path", ",", "kind", ")" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/yaml/__init__.py#L231-L239
panda3d/panda3d
833ad89ebad58395d0af0b7ec08538e5e4308265
direct/src/showbase/Messenger.py
python
Messenger.getAllAccepting
(self, object)
Returns the list of all events accepted by the indicated object.
Returns the list of all events accepted by the indicated object.
[ "Returns", "the", "list", "of", "all", "events", "accepted", "by", "the", "indicated", "object", "." ]
def getAllAccepting(self, object): """ Returns the list of all events accepted by the indicated object. """ self.lock.acquire() try: id = self._getMessengerId(object) # Get the list of events this object is listening to eventDict = self.__objectEvents.get(id) if eventDict: return list(eventDict.keys()) return [] finally: self.lock.release()
[ "def", "getAllAccepting", "(", "self", ",", "object", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "try", ":", "id", "=", "self", ".", "_getMessengerId", "(", "object", ")", "# Get the list of events this object is listening to", "eventDict", "=", "self", ".", "__objectEvents", ".", "get", "(", "id", ")", "if", "eventDict", ":", "return", "list", "(", "eventDict", ".", "keys", "(", ")", ")", "return", "[", "]", "finally", ":", "self", ".", "lock", ".", "release", "(", ")" ]
https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/showbase/Messenger.py#L241-L255
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/keras/saving/saving_utils.py
python
_deserialize_nested_config
(deserialize_fn, config)
Deserializes arbitrary Keras `config` using `deserialize_fn`.
Deserializes arbitrary Keras `config` using `deserialize_fn`.
[ "Deserializes", "arbitrary", "Keras", "config", "using", "deserialize_fn", "." ]
def _deserialize_nested_config(deserialize_fn, config): """Deserializes arbitrary Keras `config` using `deserialize_fn`.""" def _is_single_object(obj): if isinstance(obj, dict) and 'class_name' in obj: return True # Serialized Keras object. if isinstance(obj, str): return True # Serialized function or string. return False if config is None: return None if _is_single_object(config): return deserialize_fn(config) elif isinstance(config, dict): return { k: _deserialize_nested_config(deserialize_fn, v) for k, v in config.items() } elif isinstance(config, (tuple, list)): return [_deserialize_nested_config(deserialize_fn, obj) for obj in config] raise ValueError('Saved configuration not understood.')
[ "def", "_deserialize_nested_config", "(", "deserialize_fn", ",", "config", ")", ":", "def", "_is_single_object", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "dict", ")", "and", "'class_name'", "in", "obj", ":", "return", "True", "# Serialized Keras object.", "if", "isinstance", "(", "obj", ",", "str", ")", ":", "return", "True", "# Serialized function or string.", "return", "False", "if", "config", "is", "None", ":", "return", "None", "if", "_is_single_object", "(", "config", ")", ":", "return", "deserialize_fn", "(", "config", ")", "elif", "isinstance", "(", "config", ",", "dict", ")", ":", "return", "{", "k", ":", "_deserialize_nested_config", "(", "deserialize_fn", ",", "v", ")", "for", "k", ",", "v", "in", "config", ".", "items", "(", ")", "}", "elif", "isinstance", "(", "config", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "[", "_deserialize_nested_config", "(", "deserialize_fn", ",", "obj", ")", "for", "obj", "in", "config", "]", "raise", "ValueError", "(", "'Saved configuration not understood.'", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/saving/saving_utils.py#L241-L263
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_misc.py
python
Display.IsOk
(*args, **kwargs)
return _misc_.Display_IsOk(*args, **kwargs)
IsOk(self) -> bool Return true if the object was initialized successfully
IsOk(self) -> bool
[ "IsOk", "(", "self", ")", "-", ">", "bool" ]
def IsOk(*args, **kwargs): """ IsOk(self) -> bool Return true if the object was initialized successfully """ return _misc_.Display_IsOk(*args, **kwargs)
[ "def", "IsOk", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_misc_", ".", "Display_IsOk", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_misc.py#L6113-L6119
BlzFans/wke
b0fa21158312e40c5fbd84682d643022b6c34a93
cygwin/lib/python2.6/distutils/ccompiler.py
python
CCompiler._compile
(self, obj, src, ext, cc_args, extra_postargs, pp_opts)
Compile 'src' to product 'obj'.
Compile 'src' to product 'obj'.
[ "Compile", "src", "to", "product", "obj", "." ]
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): """Compile 'src' to product 'obj'.""" # A concrete compiler class that does not override compile() # should implement _compile(). pass
[ "def", "_compile", "(", "self", ",", "obj", ",", "src", ",", "ext", ",", "cc_args", ",", "extra_postargs", ",", "pp_opts", ")", ":", "# A concrete compiler class that does not override compile()", "# should implement _compile().", "pass" ]
https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/distutils/ccompiler.py#L626-L631
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/Part/BOPTools/JoinFeatures.py
python
makeCutout
(name)
return obj
makeCutout(name): makes an Cutout object.
makeCutout(name): makes an Cutout object.
[ "makeCutout", "(", "name", ")", ":", "makes", "an", "Cutout", "object", "." ]
def makeCutout(name): '''makeCutout(name): makes an Cutout object.''' obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name) FeatureCutout(obj) if FreeCAD.GuiUp: ViewProviderCutout(obj.ViewObject) return obj
[ "def", "makeCutout", "(", "name", ")", ":", "obj", "=", "FreeCAD", ".", "ActiveDocument", ".", "addObject", "(", "\"Part::FeaturePython\"", ",", "name", ")", "FeatureCutout", "(", "obj", ")", "if", "FreeCAD", ".", "GuiUp", ":", "ViewProviderCutout", "(", "obj", ".", "ViewObject", ")", "return", "obj" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Part/BOPTools/JoinFeatures.py#L321-L327
turi-code/SFrame
796b9bdfb2fa1b881d82080754643c7e68629cd2
oss_src/unity/python/sframe/connect/main.py
python
is_connected
()
Returns true if connected to the server.
Returns true if connected to the server.
[ "Returns", "true", "if", "connected", "to", "the", "server", "." ]
def is_connected(): """ Returns true if connected to the server. """ if (__CLIENT__ is not None and __SERVER__ is not None): # both client and server are live return True elif (__CLIENT__ is None and __SERVER__ is None): # both client and server are dead return False else: # unlikely state: one of them are live and the other dead raise RuntimeError('GraphLab connection error.')
[ "def", "is_connected", "(", ")", ":", "if", "(", "__CLIENT__", "is", "not", "None", "and", "__SERVER__", "is", "not", "None", ")", ":", "# both client and server are live", "return", "True", "elif", "(", "__CLIENT__", "is", "None", "and", "__SERVER__", "is", "None", ")", ":", "# both client and server are dead", "return", "False", "else", ":", "# unlikely state: one of them are live and the other dead", "raise", "RuntimeError", "(", "'GraphLab connection error.'", ")" ]
https://github.com/turi-code/SFrame/blob/796b9bdfb2fa1b881d82080754643c7e68629cd2/oss_src/unity/python/sframe/connect/main.py#L117-L129
bareos/bareos
56a10bb368b0a81e977bb51304033fe49d59efb0
core/src/plugins/filed/python/vmware/BareosFdPluginVMware.py
python
BareosFdPluginVMware.parse_plugin_definition
(self, plugindef)
return bareosfd.bRC_OK
Parses the plugin arguments
Parses the plugin arguments
[ "Parses", "the", "plugin", "arguments" ]
def parse_plugin_definition(self, plugindef): """ Parses the plugin arguments """ bareosfd.DebugMessage( 100, "parse_plugin_definition() was called in module %s\n" % (__name__), ) super(BareosFdPluginVMware, self).parse_plugin_definition(plugindef) # if the option config_file is present, parse the given file config_file = self.options.get("config_file") if config_file: if not self.parse_config_file(): return bareosfd.bRC_Error self.vadp.options = self.options return bareosfd.bRC_OK
[ "def", "parse_plugin_definition", "(", "self", ",", "plugindef", ")", ":", "bareosfd", ".", "DebugMessage", "(", "100", ",", "\"parse_plugin_definition() was called in module %s\\n\"", "%", "(", "__name__", ")", ",", ")", "super", "(", "BareosFdPluginVMware", ",", "self", ")", ".", "parse_plugin_definition", "(", "plugindef", ")", "# if the option config_file is present, parse the given file", "config_file", "=", "self", ".", "options", ".", "get", "(", "\"config_file\"", ")", "if", "config_file", ":", "if", "not", "self", ".", "parse_config_file", "(", ")", ":", "return", "bareosfd", ".", "bRC_Error", "self", ".", "vadp", ".", "options", "=", "self", ".", "options", "return", "bareosfd", ".", "bRC_OK" ]
https://github.com/bareos/bareos/blob/56a10bb368b0a81e977bb51304033fe49d59efb0/core/src/plugins/filed/python/vmware/BareosFdPluginVMware.py#L111-L129
espressomd/espresso
7e29f9052e710fe1ebf0f5d2a8076b32921fbc6a
src/python/espressomd/visualization_opengl.py
python
Shape.draw
(self)
Draw shape via rasterization. Used as a default draw method. Can and should be overwritten in child classes to implement a better draw method.
Draw shape via rasterization. Used as a default draw method. Can and should be overwritten in child classes to implement a better draw method.
[ "Draw", "shape", "via", "rasterization", ".", "Used", "as", "a", "default", "draw", "method", ".", "Can", "and", "should", "be", "overwritten", "in", "child", "classes", "to", "implement", "a", "better", "draw", "method", "." ]
def draw(self): """ Draw shape via rasterization. Used as a default draw method. Can and should be overwritten in child classes to implement a better draw method. """ # get and store points of rasterized surface if not already present if self.rasterized_surface_points is None: self.rasterized_surface_points = self._rasterize_shape() set_solid_material(self.color, self.material) OpenGL.GL.glPointSize(self.pointsize) OpenGL.GL.glBegin(OpenGL.GL.GL_POINTS) for point in self.rasterized_surface_points: OpenGL.GL.glVertex3f(point[0], point[1], point[2]) OpenGL.GL.glEnd()
[ "def", "draw", "(", "self", ")", ":", "# get and store points of rasterized surface if not already present", "if", "self", ".", "rasterized_surface_points", "is", "None", ":", "self", ".", "rasterized_surface_points", "=", "self", ".", "_rasterize_shape", "(", ")", "set_solid_material", "(", "self", ".", "color", ",", "self", ".", "material", ")", "OpenGL", ".", "GL", ".", "glPointSize", "(", "self", ".", "pointsize", ")", "OpenGL", ".", "GL", ".", "glBegin", "(", "OpenGL", ".", "GL", ".", "GL_POINTS", ")", "for", "point", "in", "self", ".", "rasterized_surface_points", ":", "OpenGL", ".", "GL", ".", "glVertex3f", "(", "point", "[", "0", "]", ",", "point", "[", "1", "]", ",", "point", "[", "2", "]", ")", "OpenGL", ".", "GL", ".", "glEnd", "(", ")" ]
https://github.com/espressomd/espresso/blob/7e29f9052e710fe1ebf0f5d2a8076b32921fbc6a/src/python/espressomd/visualization_opengl.py#L1899-L1915
fifengine/fifengine
4b62c42e85bec19893cef8e63e6855927cff2c47
engine/python/fife/extensions/pychan/widgets/widget.py
python
Widget.requestModalMouseInputFocus
(self)
Requests modal mouse input focus. When a widget has modal input focus that widget will be the only widget receiving input even if the input occurs outside of the widget and no matter what the input is. The widget must be modal mouse input focusable in order for this to work. Therefore, no other widget should has modal input focus.
Requests modal mouse input focus. When a widget has modal input focus that widget will be the only widget receiving input even if the input occurs outside of the widget and no matter what the input is. The widget must be modal mouse input focusable in order for this to work. Therefore, no other widget should has modal input focus.
[ "Requests", "modal", "mouse", "input", "focus", ".", "When", "a", "widget", "has", "modal", "input", "focus", "that", "widget", "will", "be", "the", "only", "widget", "receiving", "input", "even", "if", "the", "input", "occurs", "outside", "of", "the", "widget", "and", "no", "matter", "what", "the", "input", "is", ".", "The", "widget", "must", "be", "modal", "mouse", "input", "focusable", "in", "order", "for", "this", "to", "work", ".", "Therefore", "no", "other", "widget", "should", "has", "modal", "input", "focus", "." ]
def requestModalMouseInputFocus(self): """ Requests modal mouse input focus. When a widget has modal input focus that widget will be the only widget receiving input even if the input occurs outside of the widget and no matter what the input is. The widget must be modal mouse input focusable in order for this to work. Therefore, no other widget should has modal input focus. """ if self.isVisible(): if self.isModalMouseInputFocusable(): self.real_widget.requestModalMouseInputFocus()
[ "def", "requestModalMouseInputFocus", "(", "self", ")", ":", "if", "self", ".", "isVisible", "(", ")", ":", "if", "self", ".", "isModalMouseInputFocusable", "(", ")", ":", "self", ".", "real_widget", ".", "requestModalMouseInputFocus", "(", ")" ]
https://github.com/fifengine/fifengine/blob/4b62c42e85bec19893cef8e63e6855927cff2c47/engine/python/fife/extensions/pychan/widgets/widget.py#L371-L384
daijifeng001/caffe-rfcn
543f8f6a4b7c88256ea1445ae951a12d1ad9cffd
scripts/cpp_lint.py
python
IsCppString
(line)
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
Does line terminate so, that the next symbol is in string constant. This function does not consider single-line nor multi-line comments. Args: line: is a partial line of code starting from the 0..n. Returns: True, if next character appended to 'line' is inside a string constant.
Does line terminate so, that the next symbol is in string constant.
[ "Does", "line", "terminate", "so", "that", "the", "next", "symbol", "is", "in", "string", "constant", "." ]
def IsCppString(line): """Does line terminate so, that the next symbol is in string constant. This function does not consider single-line nor multi-line comments. Args: line: is a partial line of code starting from the 0..n. Returns: True, if next character appended to 'line' is inside a string constant. """ line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
[ "def", "IsCppString", "(", "line", ")", ":", "line", "=", "line", ".", "replace", "(", "r'\\\\'", ",", "'XX'", ")", "# after this, \\\\\" does not match to \\\"", "return", "(", "(", "line", ".", "count", "(", "'\"'", ")", "-", "line", ".", "count", "(", "r'\\\"'", ")", "-", "line", ".", "count", "(", "\"'\\\"'\"", ")", ")", "&", "1", ")", "==", "1" ]
https://github.com/daijifeng001/caffe-rfcn/blob/543f8f6a4b7c88256ea1445ae951a12d1ad9cffd/scripts/cpp_lint.py#L1045-L1059
cathywu/Sentiment-Analysis
eb501fd1375c0c3f3ab430f963255f1bb858e659
PyML-0.7.9/PyML/classifiers/svm.py
python
SVM.__init__
(self, arg = None, **args)
:Parameters: - `arg` - another SVM object or a kernel object; if no argument is given the kernel function of the training dataset is used :Keywords: - `C` - the svm C parameter - `Cmode` - the way the C parameter is used; values: 'equal', 'classProb', 'fromData'. In 'equal' mode C is set to be the same for both classes In 'classProb' mode each class is assigned a C value that is proportional to the size of the other class. This results in margin error costs being proportional to the ratio of the sizes of the two classes. This is useful for datasets with an unbalanced class distribution. In 'fromData' the value of C for each pattern is taken from the 'C' attribute of the training data. - `optimizer` - which optimizer to use. the options are: 'libsvm' -- run libsvm 'liblinear' -- use liblinear (linear svm only) in this case you have the option to set the loss function - either 'l1' or 'l2' 'mysmo' - use the PyML native optmizer (based on libsvm) 'gist' - use a gist-like optimizer. - `loss` - when using liblinear set this to 'l1' or 'l2' (default: 'l1') - `cacheSize` - size of the kernel cache (in MB).
:Parameters: - `arg` - another SVM object or a kernel object; if no argument is given the kernel function of the training dataset is used :Keywords: - `C` - the svm C parameter - `Cmode` - the way the C parameter is used; values: 'equal', 'classProb', 'fromData'. In 'equal' mode C is set to be the same for both classes In 'classProb' mode each class is assigned a C value that is proportional to the size of the other class. This results in margin error costs being proportional to the ratio of the sizes of the two classes. This is useful for datasets with an unbalanced class distribution. In 'fromData' the value of C for each pattern is taken from the 'C' attribute of the training data. - `optimizer` - which optimizer to use. the options are: 'libsvm' -- run libsvm 'liblinear' -- use liblinear (linear svm only) in this case you have the option to set the loss function - either 'l1' or 'l2' 'mysmo' - use the PyML native optmizer (based on libsvm) 'gist' - use a gist-like optimizer. - `loss` - when using liblinear set this to 'l1' or 'l2' (default: 'l1') - `cacheSize` - size of the kernel cache (in MB).
[ ":", "Parameters", ":", "-", "arg", "-", "another", "SVM", "object", "or", "a", "kernel", "object", ";", "if", "no", "argument", "is", "given", "the", "kernel", "function", "of", "the", "training", "dataset", "is", "used", ":", "Keywords", ":", "-", "C", "-", "the", "svm", "C", "parameter", "-", "Cmode", "-", "the", "way", "the", "C", "parameter", "is", "used", ";", "values", ":", "equal", "classProb", "fromData", ".", "In", "equal", "mode", "C", "is", "set", "to", "be", "the", "same", "for", "both", "classes", "In", "classProb", "mode", "each", "class", "is", "assigned", "a", "C", "value", "that", "is", "proportional", "to", "the", "size", "of", "the", "other", "class", ".", "This", "results", "in", "margin", "error", "costs", "being", "proportional", "to", "the", "ratio", "of", "the", "sizes", "of", "the", "two", "classes", ".", "This", "is", "useful", "for", "datasets", "with", "an", "unbalanced", "class", "distribution", ".", "In", "fromData", "the", "value", "of", "C", "for", "each", "pattern", "is", "taken", "from", "the", "C", "attribute", "of", "the", "training", "data", ".", "-", "optimizer", "-", "which", "optimizer", "to", "use", ".", "the", "options", "are", ":", "libsvm", "--", "run", "libsvm", "liblinear", "--", "use", "liblinear", "(", "linear", "svm", "only", ")", "in", "this", "case", "you", "have", "the", "option", "to", "set", "the", "loss", "function", "-", "either", "l1", "or", "l2", "mysmo", "-", "use", "the", "PyML", "native", "optmizer", "(", "based", "on", "libsvm", ")", "gist", "-", "use", "a", "gist", "-", "like", "optimizer", ".", "-", "loss", "-", "when", "using", "liblinear", "set", "this", "to", "l1", "or", "l2", "(", "default", ":", "l1", ")", "-", "cacheSize", "-", "size", "of", "the", "kernel", "cache", "(", "in", "MB", ")", "." ]
def __init__(self, arg = None, **args): """ :Parameters: - `arg` - another SVM object or a kernel object; if no argument is given the kernel function of the training dataset is used :Keywords: - `C` - the svm C parameter - `Cmode` - the way the C parameter is used; values: 'equal', 'classProb', 'fromData'. In 'equal' mode C is set to be the same for both classes In 'classProb' mode each class is assigned a C value that is proportional to the size of the other class. This results in margin error costs being proportional to the ratio of the sizes of the two classes. This is useful for datasets with an unbalanced class distribution. In 'fromData' the value of C for each pattern is taken from the 'C' attribute of the training data. - `optimizer` - which optimizer to use. the options are: 'libsvm' -- run libsvm 'liblinear' -- use liblinear (linear svm only) in this case you have the option to set the loss function - either 'l1' or 'l2' 'mysmo' - use the PyML native optmizer (based on libsvm) 'gist' - use a gist-like optimizer. - `loss` - when using liblinear set this to 'l1' or 'l2' (default: 'l1') - `cacheSize` - size of the kernel cache (in MB). """ Classifier.__init__(self, arg, **args) self.kernel = None if arg.__class__ == self.__class__ : if arg.kernel is not None : self.kernel = arg.kernel.__class__(arg.kernel) elif hasattr(arg, 'type') and arg.type == 'kernel' : self.kernel = arg.__class__(arg) elif arg is not None : raise ValueError, 'unknown type of argument'
[ "def", "__init__", "(", "self", ",", "arg", "=", "None", ",", "*", "*", "args", ")", ":", "Classifier", ".", "__init__", "(", "self", ",", "arg", ",", "*", "*", "args", ")", "self", ".", "kernel", "=", "None", "if", "arg", ".", "__class__", "==", "self", ".", "__class__", ":", "if", "arg", ".", "kernel", "is", "not", "None", ":", "self", ".", "kernel", "=", "arg", ".", "kernel", ".", "__class__", "(", "arg", ".", "kernel", ")", "elif", "hasattr", "(", "arg", ",", "'type'", ")", "and", "arg", ".", "type", "==", "'kernel'", ":", "self", ".", "kernel", "=", "arg", ".", "__class__", "(", "arg", ")", "elif", "arg", "is", "not", "None", ":", "raise", "ValueError", ",", "'unknown type of argument'" ]
https://github.com/cathywu/Sentiment-Analysis/blob/eb501fd1375c0c3f3ab430f963255f1bb858e659/PyML-0.7.9/PyML/classifiers/svm.py#L46-L84
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
tools/grit/grit/scons.py
python
_SetDependencies
(env, base_dir, res_file, rc_alls, translated_files, static_files)
Sets dependencies in the environment. Args: env: The SCons environment. base_dir: The base directory for filenames. res_file: The res_file specified in the RC flags. rc_alls: All non-rc_header outputs. translated_files: Files that are structures or skeletons, and get translated by GRIT. static_files: Files that are includes, and are used directly by res files.
Sets dependencies in the environment.
[ "Sets", "dependencies", "in", "the", "environment", "." ]
def _SetDependencies(env, base_dir, res_file, rc_alls, translated_files, static_files): """Sets dependencies in the environment. Args: env: The SCons environment. base_dir: The base directory for filenames. res_file: The res_file specified in the RC flags. rc_alls: All non-rc_header outputs. translated_files: Files that are structures or skeletons, and get translated by GRIT. static_files: Files that are includes, and are used directly by res files. """ if res_file: env.Depends(os.path.join(base_dir, res_file), static_files) else: # Make a best effort dependency setup when no res file is specified. translated_files.extend(static_files) for rc_all in rc_alls: env.Depends(rc_all, translated_files)
[ "def", "_SetDependencies", "(", "env", ",", "base_dir", ",", "res_file", ",", "rc_alls", ",", "translated_files", ",", "static_files", ")", ":", "if", "res_file", ":", "env", ".", "Depends", "(", "os", ".", "path", ".", "join", "(", "base_dir", ",", "res_file", ")", ",", "static_files", ")", "else", ":", "# Make a best effort dependency setup when no res file is specified.", "translated_files", ".", "extend", "(", "static_files", ")", "for", "rc_all", "in", "rc_alls", ":", "env", ".", "Depends", "(", "rc_all", ",", "translated_files", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/grit/grit/scons.py#L169-L189
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/xrc.py
python
XmlResource.LoadObject
(*args, **kwargs)
return _xrc.XmlResource_LoadObject(*args, **kwargs)
LoadObject(self, Window parent, String name, String classname) -> Object
LoadObject(self, Window parent, String name, String classname) -> Object
[ "LoadObject", "(", "self", "Window", "parent", "String", "name", "String", "classname", ")", "-", ">", "Object" ]
def LoadObject(*args, **kwargs): """LoadObject(self, Window parent, String name, String classname) -> Object""" return _xrc.XmlResource_LoadObject(*args, **kwargs)
[ "def", "LoadObject", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_xrc", ".", "XmlResource_LoadObject", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/xrc.py#L159-L161
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/algorithms.py
python
_get_hashtable_algo
(values)
return htable, values
Parameters ---------- values : arraylike Returns ------- htable : HashTable subclass values : ndarray
Parameters ---------- values : arraylike
[ "Parameters", "----------", "values", ":", "arraylike" ]
def _get_hashtable_algo(values): """ Parameters ---------- values : arraylike Returns ------- htable : HashTable subclass values : ndarray """ values, _ = _ensure_data(values) ndtype = _check_object_for_strings(values) htable = _hashtables[ndtype] return htable, values
[ "def", "_get_hashtable_algo", "(", "values", ")", ":", "values", ",", "_", "=", "_ensure_data", "(", "values", ")", "ndtype", "=", "_check_object_for_strings", "(", "values", ")", "htable", "=", "_hashtables", "[", "ndtype", "]", "return", "htable", ",", "values" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/algorithms.py#L229-L244
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/mindrecord/shardwriter.py
python
ShardWriter._merge_blob
(self, blob_data)
return merged
Merge multiple blob data whose type is bytes or ndarray Args: blob_data (dict): Dict of blob data Returns: bytes, merged blob data
Merge multiple blob data whose type is bytes or ndarray
[ "Merge", "multiple", "blob", "data", "whose", "type", "is", "bytes", "or", "ndarray" ]
def _merge_blob(self, blob_data): """ Merge multiple blob data whose type is bytes or ndarray Args: blob_data (dict): Dict of blob data Returns: bytes, merged blob data """ if len(blob_data) == 1: values = [v for v in blob_data.values()] return bytes(values[0]) # convert int to bytes def int_to_bytes(x: int) -> bytes: return x.to_bytes(8, 'big') merged = bytes() for field, v in blob_data.items(): # convert ndarray to bytes if isinstance(v, np.ndarray): v = v.astype(self._header.schema[field]["type"]).tobytes() merged += int_to_bytes(len(v)) merged += v return merged
[ "def", "_merge_blob", "(", "self", ",", "blob_data", ")", ":", "if", "len", "(", "blob_data", ")", "==", "1", ":", "values", "=", "[", "v", "for", "v", "in", "blob_data", ".", "values", "(", ")", "]", "return", "bytes", "(", "values", "[", "0", "]", ")", "# convert int to bytes", "def", "int_to_bytes", "(", "x", ":", "int", ")", "->", "bytes", ":", "return", "x", ".", "to_bytes", "(", "8", ",", "'big'", ")", "merged", "=", "bytes", "(", ")", "for", "field", ",", "v", "in", "blob_data", ".", "items", "(", ")", ":", "# convert ndarray to bytes", "if", "isinstance", "(", "v", ",", "np", ".", "ndarray", ")", ":", "v", "=", "v", ".", "astype", "(", "self", ".", "_header", ".", "schema", "[", "field", "]", "[", "\"type\"", "]", ")", ".", "tobytes", "(", ")", "merged", "+=", "int_to_bytes", "(", "len", "(", "v", ")", ")", "merged", "+=", "v", "return", "merged" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/mindrecord/shardwriter.py#L184-L209
eclipse/sumo
7132a9b8b6eea734bdec38479026b4d8c4336d03
tools/contributed/sumopy/agilepy/lib_wx/objpanel.py
python
BooleanWidgetContainer.get_valuewidget_write
(self)
return widget
Return widget to edit numeric value of attribute This is effectively the parametrisation of the masked.NumCtrl widget.
Return widget to edit numeric value of attribute This is effectively the parametrisation of the masked.NumCtrl widget.
[ "Return", "widget", "to", "edit", "numeric", "value", "of", "attribute", "This", "is", "effectively", "the", "parametrisation", "of", "the", "masked", ".", "NumCtrl", "widget", "." ]
def get_valuewidget_write(self): """ Return widget to edit numeric value of attribute This is effectively the parametrisation of the masked.NumCtrl widget. """ # print 'get_numeditwidget',value widget = wx.CheckBox(self.parent, -1) if self.immediate_apply: self.parent.Bind(wx.EVT_CHECKBOX, self.on_apply_immediate, widget) value = self.get_value_obj() widget.SetValue(value) return widget
[ "def", "get_valuewidget_write", "(", "self", ")", ":", "# print 'get_numeditwidget',value", "widget", "=", "wx", ".", "CheckBox", "(", "self", ".", "parent", ",", "-", "1", ")", "if", "self", ".", "immediate_apply", ":", "self", ".", "parent", ".", "Bind", "(", "wx", ".", "EVT_CHECKBOX", ",", "self", ".", "on_apply_immediate", ",", "widget", ")", "value", "=", "self", ".", "get_value_obj", "(", ")", "widget", ".", "SetValue", "(", "value", ")", "return", "widget" ]
https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/contributed/sumopy/agilepy/lib_wx/objpanel.py#L787-L799
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/requests/api.py
python
head
(url, **kwargs)
return request('head', url, **kwargs)
r"""Sends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. If `allow_redirects` is not provided, it will be set to `False` (as opposed to the default :meth:`request` behavior). :return: :class:`Response <Response>` object :rtype: requests.Response
r"""Sends a HEAD request.
[ "r", "Sends", "a", "HEAD", "request", "." ]
def head(url, **kwargs): r"""Sends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. If `allow_redirects` is not provided, it will be set to `False` (as opposed to the default :meth:`request` behavior). :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return request('head', url, **kwargs)
[ "def", "head", "(", "url", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'allow_redirects'", ",", "False", ")", "return", "request", "(", "'head'", ",", "url", ",", "*", "*", "kwargs", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/requests/api.py#L92-L104
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/timeseries/python/timeseries/head.py
python
_TimeSeriesRegressionHead._train_ops
(self, features)
return estimator_lib.EstimatorSpec( loss=model_outputs.loss, mode=estimator_lib.ModeKeys.TRAIN, train_op=train_op)
Add training ops to the graph.
Add training ops to the graph.
[ "Add", "training", "ops", "to", "the", "graph", "." ]
def _train_ops(self, features): """Add training ops to the graph.""" with variable_scope.variable_scope("model"): model_outputs = self.state_manager.define_loss( self.model, features, estimator_lib.ModeKeys.TRAIN) train_op = optimizers.optimize_loss( model_outputs.loss, global_step=variables.get_global_step(), optimizer=self.optimizer, # Learning rate is set in the Optimizer object learning_rate=None) return estimator_lib.EstimatorSpec( loss=model_outputs.loss, mode=estimator_lib.ModeKeys.TRAIN, train_op=train_op)
[ "def", "_train_ops", "(", "self", ",", "features", ")", ":", "with", "variable_scope", ".", "variable_scope", "(", "\"model\"", ")", ":", "model_outputs", "=", "self", ".", "state_manager", ".", "define_loss", "(", "self", ".", "model", ",", "features", ",", "estimator_lib", ".", "ModeKeys", ".", "TRAIN", ")", "train_op", "=", "optimizers", ".", "optimize_loss", "(", "model_outputs", ".", "loss", ",", "global_step", "=", "variables", ".", "get_global_step", "(", ")", ",", "optimizer", "=", "self", ".", "optimizer", ",", "# Learning rate is set in the Optimizer object", "learning_rate", "=", "None", ")", "return", "estimator_lib", ".", "EstimatorSpec", "(", "loss", "=", "model_outputs", ".", "loss", ",", "mode", "=", "estimator_lib", ".", "ModeKeys", ".", "TRAIN", ",", "train_op", "=", "train_op", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/timeseries/python/timeseries/head.py#L74-L89
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/distributions/python/ops/vector_exponential_linear_operator.py
python
VectorExponentialLinearOperator._mode_mean_shape
(self)
return shape
Shape for the mode/mean Tensors.
Shape for the mode/mean Tensors.
[ "Shape", "for", "the", "mode", "/", "mean", "Tensors", "." ]
def _mode_mean_shape(self): """Shape for the mode/mean Tensors.""" shape = self.batch_shape.concatenate(self.event_shape) has_static_shape = shape.is_fully_defined() if not has_static_shape: shape = array_ops.concat([ self.batch_shape_tensor(), self.event_shape_tensor(), ], 0) return shape
[ "def", "_mode_mean_shape", "(", "self", ")", ":", "shape", "=", "self", ".", "batch_shape", ".", "concatenate", "(", "self", ".", "event_shape", ")", "has_static_shape", "=", "shape", ".", "is_fully_defined", "(", ")", "if", "not", "has_static_shape", ":", "shape", "=", "array_ops", ".", "concat", "(", "[", "self", ".", "batch_shape_tensor", "(", ")", ",", "self", ".", "event_shape_tensor", "(", ")", ",", "]", ",", "0", ")", "return", "shape" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/distributions/python/ops/vector_exponential_linear_operator.py#L279-L288
sdhash/sdhash
b9eff63e4e5867e910f41fd69032bbb1c94a2a5e
sdhash-server/gen-py/sdhashsrv/sdhashsrv.py
python
Iface.saveSet
(self, num1, filename)
Parameters: - num1 - filename
Parameters: - num1 - filename
[ "Parameters", ":", "-", "num1", "-", "filename" ]
def saveSet(self, num1, filename): """ Parameters: - num1 - filename """ pass
[ "def", "saveSet", "(", "self", ",", "num1", ",", "filename", ")", ":", "pass" ]
https://github.com/sdhash/sdhash/blob/b9eff63e4e5867e910f41fd69032bbb1c94a2a5e/sdhash-server/gen-py/sdhashsrv/sdhashsrv.py#L76-L82
bumptop/BumpTop
466d23597a07ae738f4265262fa01087fc6e257c
trunk/win/Source/bin/jinja2/meta.py
python
TrackingCodeGenerator.write
(self, x)
Don't write.
Don't write.
[ "Don", "t", "write", "." ]
def write(self, x): """Don't write."""
[ "def", "write", "(", "self", ",", "x", ")", ":" ]
https://github.com/bumptop/BumpTop/blob/466d23597a07ae738f4265262fa01087fc6e257c/trunk/win/Source/bin/jinja2/meta.py#L24-L25
sdhash/sdhash
b9eff63e4e5867e910f41fd69032bbb1c94a2a5e
sdhash-ui/jinja2/environment.py
python
Environment.compile
(self, source, name=None, filename=None, raw=False, defer_init=False)
Compile a node or template source code. The `name` parameter is the load name of the template after it was joined using :meth:`join_path` if necessary, not the filename on the file system. the `filename` parameter is the estimated filename of the template on the file system. If the template came from a database or memory this can be omitted. The return value of this method is a python code object. If the `raw` parameter is `True` the return value will be a string with python code equivalent to the bytecode returned otherwise. This method is mainly used internally. `defer_init` is use internally to aid the module code generator. This causes the generated code to be able to import without the global environment variable to be set. .. versionadded:: 2.4 `defer_init` parameter added.
Compile a node or template source code. The `name` parameter is the load name of the template after it was joined using :meth:`join_path` if necessary, not the filename on the file system. the `filename` parameter is the estimated filename of the template on the file system. If the template came from a database or memory this can be omitted.
[ "Compile", "a", "node", "or", "template", "source", "code", ".", "The", "name", "parameter", "is", "the", "load", "name", "of", "the", "template", "after", "it", "was", "joined", "using", ":", "meth", ":", "join_path", "if", "necessary", "not", "the", "filename", "on", "the", "file", "system", ".", "the", "filename", "parameter", "is", "the", "estimated", "filename", "of", "the", "template", "on", "the", "file", "system", ".", "If", "the", "template", "came", "from", "a", "database", "or", "memory", "this", "can", "be", "omitted", "." ]
def compile(self, source, name=None, filename=None, raw=False, defer_init=False): """Compile a node or template source code. The `name` parameter is the load name of the template after it was joined using :meth:`join_path` if necessary, not the filename on the file system. the `filename` parameter is the estimated filename of the template on the file system. If the template came from a database or memory this can be omitted. The return value of this method is a python code object. If the `raw` parameter is `True` the return value will be a string with python code equivalent to the bytecode returned otherwise. This method is mainly used internally. `defer_init` is use internally to aid the module code generator. This causes the generated code to be able to import without the global environment variable to be set. .. versionadded:: 2.4 `defer_init` parameter added. """ source_hint = None try: if isinstance(source, basestring): source_hint = source source = self._parse(source, name, filename) if self.optimized: source = optimize(source, self) source = self._generate(source, name, filename, defer_init=defer_init) if raw: return source if filename is None: filename = '<template>' else: filename = _encode_filename(filename) return self._compile(source, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source)
[ "def", "compile", "(", "self", ",", "source", ",", "name", "=", "None", ",", "filename", "=", "None", ",", "raw", "=", "False", ",", "defer_init", "=", "False", ")", ":", "source_hint", "=", "None", "try", ":", "if", "isinstance", "(", "source", ",", "basestring", ")", ":", "source_hint", "=", "source", "source", "=", "self", ".", "_parse", "(", "source", ",", "name", ",", "filename", ")", "if", "self", ".", "optimized", ":", "source", "=", "optimize", "(", "source", ",", "self", ")", "source", "=", "self", ".", "_generate", "(", "source", ",", "name", ",", "filename", ",", "defer_init", "=", "defer_init", ")", "if", "raw", ":", "return", "source", "if", "filename", "is", "None", ":", "filename", "=", "'<template>'", "else", ":", "filename", "=", "_encode_filename", "(", "filename", ")", "return", "self", ".", "_compile", "(", "source", ",", "filename", ")", "except", "TemplateSyntaxError", ":", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "self", ".", "handle_exception", "(", "exc_info", ",", "source_hint", "=", "source", ")" ]
https://github.com/sdhash/sdhash/blob/b9eff63e4e5867e910f41fd69032bbb1c94a2a5e/sdhash-ui/jinja2/environment.py#L454-L493
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/rexec.py
python
RExec.s_reload
(self, *args)
return self.s_apply(self.r_reload, args)
Reload the module object, re-parsing and re-initializing it. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment. Similar to the r_reload() method, but has access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout.
Reload the module object, re-parsing and re-initializing it.
[ "Reload", "the", "module", "object", "re", "-", "parsing", "and", "re", "-", "initializing", "it", "." ]
def s_reload(self, *args): """Reload the module object, re-parsing and re-initializing it. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment. Similar to the r_reload() method, but has access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout. """ return self.s_apply(self.r_reload, args)
[ "def", "s_reload", "(", "self", ",", "*", "args", ")", ":", "return", "self", ".", "s_apply", "(", "self", ".", "r_reload", ",", "args", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/rexec.py#L477-L489
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/command/check.py
python
check.check_metadata
(self)
Ensures that all required elements of meta-data are supplied. name, version, URL, (author and author_email) or (maintainer and maintainer_email)). Warns if any are missing.
Ensures that all required elements of meta-data are supplied.
[ "Ensures", "that", "all", "required", "elements", "of", "meta", "-", "data", "are", "supplied", "." ]
def check_metadata(self): """Ensures that all required elements of meta-data are supplied. name, version, URL, (author and author_email) or (maintainer and maintainer_email)). Warns if any are missing. """ metadata = self.distribution.metadata missing = [] for attr in ('name', 'version', 'url'): if not (hasattr(metadata, attr) and getattr(metadata, attr)): missing.append(attr) if missing: self.warn("missing required meta-data: %s" % ', '.join(missing)) if metadata.author: if not metadata.author_email: self.warn("missing meta-data: if 'author' supplied, " + "'author_email' must be supplied too") elif metadata.maintainer: if not metadata.maintainer_email: self.warn("missing meta-data: if 'maintainer' supplied, " + "'maintainer_email' must be supplied too") else: self.warn("missing meta-data: either (author and author_email) " + "or (maintainer and maintainer_email) " + "must be supplied")
[ "def", "check_metadata", "(", "self", ")", ":", "metadata", "=", "self", ".", "distribution", ".", "metadata", "missing", "=", "[", "]", "for", "attr", "in", "(", "'name'", ",", "'version'", ",", "'url'", ")", ":", "if", "not", "(", "hasattr", "(", "metadata", ",", "attr", ")", "and", "getattr", "(", "metadata", ",", "attr", ")", ")", ":", "missing", ".", "append", "(", "attr", ")", "if", "missing", ":", "self", ".", "warn", "(", "\"missing required meta-data: %s\"", "%", "', '", ".", "join", "(", "missing", ")", ")", "if", "metadata", ".", "author", ":", "if", "not", "metadata", ".", "author_email", ":", "self", ".", "warn", "(", "\"missing meta-data: if 'author' supplied, \"", "+", "\"'author_email' must be supplied too\"", ")", "elif", "metadata", ".", "maintainer", ":", "if", "not", "metadata", ".", "maintainer_email", ":", "self", ".", "warn", "(", "\"missing meta-data: if 'maintainer' supplied, \"", "+", "\"'maintainer_email' must be supplied too\"", ")", "else", ":", "self", ".", "warn", "(", "\"missing meta-data: either (author and author_email) \"", "+", "\"or (maintainer and maintainer_email) \"", "+", "\"must be supplied\"", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/command/check.py#L80-L108
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/shelve.py
python
open
(filename, flag='c', protocol=None, writeback=False)
return DbfilenameShelf(filename, flag, protocol, writeback)
Open a persistent dictionary for reading and writing. The filename parameter is the base filename for the underlying database. As a side-effect, an extension may be added to the filename and more than one file may be created. The optional flag parameter has the same interpretation as the flag parameter of anydbm.open(). The optional protocol parameter specifies the version of the pickle protocol (0, 1, or 2). See the module's __doc__ string for an overview of the interface.
Open a persistent dictionary for reading and writing.
[ "Open", "a", "persistent", "dictionary", "for", "reading", "and", "writing", "." ]
def open(filename, flag='c', protocol=None, writeback=False): """Open a persistent dictionary for reading and writing. The filename parameter is the base filename for the underlying database. As a side-effect, an extension may be added to the filename and more than one file may be created. The optional flag parameter has the same interpretation as the flag parameter of anydbm.open(). The optional protocol parameter specifies the version of the pickle protocol (0, 1, or 2). See the module's __doc__ string for an overview of the interface. """ return DbfilenameShelf(filename, flag, protocol, writeback)
[ "def", "open", "(", "filename", ",", "flag", "=", "'c'", ",", "protocol", "=", "None", ",", "writeback", "=", "False", ")", ":", "return", "DbfilenameShelf", "(", "filename", ",", "flag", ",", "protocol", ",", "writeback", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/shelve.py#L226-L239
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/src/generator.py
python
Html.TransformText
(self, text)
return text
Does character substitution on a string and returns the html equivalent of the given string. @param text: text to transform @return: text with all special characters transformed
Does character substitution on a string and returns the html equivalent of the given string. @param text: text to transform @return: text with all special characters transformed
[ "Does", "character", "substitution", "on", "a", "string", "and", "returns", "the", "html", "equivalent", "of", "the", "given", "string", ".", "@param", "text", ":", "text", "to", "transform", "@return", ":", "text", "with", "all", "special", "characters", "transformed" ]
def TransformText(self, text): """Does character substitution on a string and returns the html equivalent of the given string. @param text: text to transform @return: text with all special characters transformed """ text = text.replace('&', "&amp;") # Ampersands text = text.replace('<', "&lt;") # Less Than Symbols text = text.replace('>', "&gt;") # Greater Than Symbols text = text.replace("\"", "&quot;") return text
[ "def", "TransformText", "(", "self", ",", "text", ")", ":", "text", "=", "text", ".", "replace", "(", "'&'", ",", "\"&amp;\"", ")", "# Ampersands", "text", "=", "text", ".", "replace", "(", "'<'", ",", "\"&lt;\"", ")", "# Less Than Symbols", "text", "=", "text", ".", "replace", "(", "'>'", ",", "\"&gt;\"", ")", "# Greater Than Symbols", "text", "=", "text", ".", "replace", "(", "\"\\\"\"", ",", "\"&quot;\"", ")", "return", "text" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/generator.py#L318-L329
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_core.py
python
ImageHandler.GetExtension
(*args, **kwargs)
return _core_.ImageHandler_GetExtension(*args, **kwargs)
GetExtension(self) -> String
GetExtension(self) -> String
[ "GetExtension", "(", "self", ")", "-", ">", "String" ]
def GetExtension(*args, **kwargs): """GetExtension(self) -> String""" return _core_.ImageHandler_GetExtension(*args, **kwargs)
[ "def", "GetExtension", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "ImageHandler_GetExtension", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L2624-L2626
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/lite/python/tflite_convert.py
python
_get_tflite_converter
(flags)
return converter_fn(**converter_kwargs)
Makes a TFLiteConverter object based on the flags provided. Args: flags: argparse.Namespace object containing TFLite flags. Returns: TFLiteConverter object. Raises: ValueError: Invalid flags.
Makes a TFLiteConverter object based on the flags provided.
[ "Makes", "a", "TFLiteConverter", "object", "based", "on", "the", "flags", "provided", "." ]
def _get_tflite_converter(flags): """Makes a TFLiteConverter object based on the flags provided. Args: flags: argparse.Namespace object containing TFLite flags. Returns: TFLiteConverter object. Raises: ValueError: Invalid flags. """ # Parse input and output arrays. input_arrays = _parse_array(flags.input_arrays) input_shapes = None if flags.input_shapes: input_shapes_list = [ _parse_array(shape, type_fn=int) for shape in six.ensure_str(flags.input_shapes).split(":") ] input_shapes = dict(list(zip(input_arrays, input_shapes_list))) output_arrays = _parse_array(flags.output_arrays) converter_kwargs = { "input_arrays": input_arrays, "input_shapes": input_shapes, "output_arrays": output_arrays } # Create TFLiteConverter. if flags.graph_def_file: converter_fn = lite.TFLiteConverter.from_frozen_graph converter_kwargs["graph_def_file"] = flags.graph_def_file elif flags.saved_model_dir: converter_fn = lite.TFLiteConverter.from_saved_model converter_kwargs["saved_model_dir"] = flags.saved_model_dir converter_kwargs["tag_set"] = _parse_set(flags.saved_model_tag_set) converter_kwargs["signature_key"] = flags.saved_model_signature_key elif flags.keras_model_file: converter_fn = lite.TFLiteConverter.from_keras_model_file converter_kwargs["model_file"] = flags.keras_model_file else: raise ValueError("--graph_def_file, --saved_model_dir, or " "--keras_model_file must be specified.") return converter_fn(**converter_kwargs)
[ "def", "_get_tflite_converter", "(", "flags", ")", ":", "# Parse input and output arrays.", "input_arrays", "=", "_parse_array", "(", "flags", ".", "input_arrays", ")", "input_shapes", "=", "None", "if", "flags", ".", "input_shapes", ":", "input_shapes_list", "=", "[", "_parse_array", "(", "shape", ",", "type_fn", "=", "int", ")", "for", "shape", "in", "six", ".", "ensure_str", "(", "flags", ".", "input_shapes", ")", ".", "split", "(", "\":\"", ")", "]", "input_shapes", "=", "dict", "(", "list", "(", "zip", "(", "input_arrays", ",", "input_shapes_list", ")", ")", ")", "output_arrays", "=", "_parse_array", "(", "flags", ".", "output_arrays", ")", "converter_kwargs", "=", "{", "\"input_arrays\"", ":", "input_arrays", ",", "\"input_shapes\"", ":", "input_shapes", ",", "\"output_arrays\"", ":", "output_arrays", "}", "# Create TFLiteConverter.", "if", "flags", ".", "graph_def_file", ":", "converter_fn", "=", "lite", ".", "TFLiteConverter", ".", "from_frozen_graph", "converter_kwargs", "[", "\"graph_def_file\"", "]", "=", "flags", ".", "graph_def_file", "elif", "flags", ".", "saved_model_dir", ":", "converter_fn", "=", "lite", ".", "TFLiteConverter", ".", "from_saved_model", "converter_kwargs", "[", "\"saved_model_dir\"", "]", "=", "flags", ".", "saved_model_dir", "converter_kwargs", "[", "\"tag_set\"", "]", "=", "_parse_set", "(", "flags", ".", "saved_model_tag_set", ")", "converter_kwargs", "[", "\"signature_key\"", "]", "=", "flags", ".", "saved_model_signature_key", "elif", "flags", ".", "keras_model_file", ":", "converter_fn", "=", "lite", ".", "TFLiteConverter", ".", "from_keras_model_file", "converter_kwargs", "[", "\"model_file\"", "]", "=", "flags", ".", "keras_model_file", "else", ":", "raise", "ValueError", "(", "\"--graph_def_file, --saved_model_dir, or \"", "\"--keras_model_file must be specified.\"", ")", "return", "converter_fn", "(", "*", "*", "converter_kwargs", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/lite/python/tflite_convert.py#L106-L151
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
third_party/tlslite/tlslite/X509.py
python
X509.getCommonName
(self)
Get the Subject's Common Name from the certificate. The cryptlib_py module must be installed in order to use this function. @rtype: str or None @return: The CN component of the certificate's subject DN, if present.
Get the Subject's Common Name from the certificate.
[ "Get", "the", "Subject", "s", "Common", "Name", "from", "the", "certificate", "." ]
def getCommonName(self): """Get the Subject's Common Name from the certificate. The cryptlib_py module must be installed in order to use this function. @rtype: str or None @return: The CN component of the certificate's subject DN, if present. """ import cryptlib_py import array c = cryptlib_py.cryptImportCert(self.bytes, cryptlib_py.CRYPT_UNUSED) name = cryptlib_py.CRYPT_CERTINFO_COMMONNAME try: try: length = cryptlib_py.cryptGetAttributeString(c, name, None) returnVal = array.array('B', [0] * length) cryptlib_py.cryptGetAttributeString(c, name, returnVal) returnVal = returnVal.tostring() except cryptlib_py.CryptException, e: if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: returnVal = None return returnVal finally: cryptlib_py.cryptDestroyCert(c)
[ "def", "getCommonName", "(", "self", ")", ":", "import", "cryptlib_py", "import", "array", "c", "=", "cryptlib_py", ".", "cryptImportCert", "(", "self", ".", "bytes", ",", "cryptlib_py", ".", "CRYPT_UNUSED", ")", "name", "=", "cryptlib_py", ".", "CRYPT_CERTINFO_COMMONNAME", "try", ":", "try", ":", "length", "=", "cryptlib_py", ".", "cryptGetAttributeString", "(", "c", ",", "name", ",", "None", ")", "returnVal", "=", "array", ".", "array", "(", "'B'", ",", "[", "0", "]", "*", "length", ")", "cryptlib_py", ".", "cryptGetAttributeString", "(", "c", ",", "name", ",", "returnVal", ")", "returnVal", "=", "returnVal", ".", "tostring", "(", ")", "except", "cryptlib_py", ".", "CryptException", ",", "e", ":", "if", "e", "[", "0", "]", "==", "cryptlib_py", ".", "CRYPT_ERROR_NOTFOUND", ":", "returnVal", "=", "None", "return", "returnVal", "finally", ":", "cryptlib_py", ".", "cryptDestroyCert", "(", "c", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/tlslite/tlslite/X509.py#L112-L137
assimp/assimp
97c7e084c2f7f8c9355ea42f73605890481bddc5
port/PyAssimp/scripts/transformations.py
python
Arcball.drag
(self, point)
Update current cursor window coordinates.
Update current cursor window coordinates.
[ "Update", "current", "cursor", "window", "coordinates", "." ]
def drag(self, point): """Update current cursor window coordinates.""" vnow = arcball_map_to_sphere(point, self._center, self._radius) if self._axis is not None: vnow = arcball_constrain_to_axis(vnow, self._axis) self._qpre = self._qnow t = numpy.cross(self._vdown, vnow) if numpy.dot(t, t) < _EPS: self._qnow = self._qdown else: q = [t[0], t[1], t[2], numpy.dot(self._vdown, vnow)] self._qnow = quaternion_multiply(q, self._qdown)
[ "def", "drag", "(", "self", ",", "point", ")", ":", "vnow", "=", "arcball_map_to_sphere", "(", "point", ",", "self", ".", "_center", ",", "self", ".", "_radius", ")", "if", "self", ".", "_axis", "is", "not", "None", ":", "vnow", "=", "arcball_constrain_to_axis", "(", "vnow", ",", "self", ".", "_axis", ")", "self", ".", "_qpre", "=", "self", ".", "_qnow", "t", "=", "numpy", ".", "cross", "(", "self", ".", "_vdown", ",", "vnow", ")", "if", "numpy", ".", "dot", "(", "t", ",", "t", ")", "<", "_EPS", ":", "self", ".", "_qnow", "=", "self", ".", "_qdown", "else", ":", "q", "=", "[", "t", "[", "0", "]", ",", "t", "[", "1", "]", ",", "t", "[", "2", "]", ",", "numpy", ".", "dot", "(", "self", ".", "_vdown", ",", "vnow", ")", "]", "self", ".", "_qnow", "=", "quaternion_multiply", "(", "q", ",", "self", ".", "_qdown", ")" ]
https://github.com/assimp/assimp/blob/97c7e084c2f7f8c9355ea42f73605890481bddc5/port/PyAssimp/scripts/transformations.py#L1446-L1460
LiquidPlayer/LiquidCore
9405979363f2353ac9a71ad8ab59685dd7f919c9
deps/node-10.15.3/deps/v8/third_party/jinja2/bccache.py
python
BytecodeCache.dump_bytecode
(self, bucket)
Subclasses have to override this method to write the bytecode from a bucket back to the cache. If it unable to do so it must not fail silently but raise an exception.
Subclasses have to override this method to write the bytecode from a bucket back to the cache. If it unable to do so it must not fail silently but raise an exception.
[ "Subclasses", "have", "to", "override", "this", "method", "to", "write", "the", "bytecode", "from", "a", "bucket", "back", "to", "the", "cache", ".", "If", "it", "unable", "to", "do", "so", "it", "must", "not", "fail", "silently", "but", "raise", "an", "exception", "." ]
def dump_bytecode(self, bucket): """Subclasses have to override this method to write the bytecode from a bucket back to the cache. If it unable to do so it must not fail silently but raise an exception. """ raise NotImplementedError()
[ "def", "dump_bytecode", "(", "self", ",", "bucket", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/deps/v8/third_party/jinja2/bccache.py#L153-L158
ValveSoftware/source-sdk-2013
0d8dceea4310fde5706b3ce1c70609d72a38efdf
sp/src/thirdparty/protobuf-2.3.0/python/google/protobuf/service_reflection.py
python
_ServiceStubBuilder.BuildServiceStub
(self, cls)
Constructs the stub class. Args: cls: The class that will be constructed.
Constructs the stub class.
[ "Constructs", "the", "stub", "class", "." ]
def BuildServiceStub(self, cls): """Constructs the stub class. Args: cls: The class that will be constructed. """ def _ServiceStubInit(stub, rpc_channel): stub.rpc_channel = rpc_channel self.cls = cls cls.__init__ = _ServiceStubInit for method in self.descriptor.methods: setattr(cls, method.name, self._GenerateStubMethod(method))
[ "def", "BuildServiceStub", "(", "self", ",", "cls", ")", ":", "def", "_ServiceStubInit", "(", "stub", ",", "rpc_channel", ")", ":", "stub", ".", "rpc_channel", "=", "rpc_channel", "self", ".", "cls", "=", "cls", "cls", ".", "__init__", "=", "_ServiceStubInit", "for", "method", "in", "self", ".", "descriptor", ".", "methods", ":", "setattr", "(", "cls", ",", "method", ".", "name", ",", "self", ".", "_GenerateStubMethod", "(", "method", ")", ")" ]
https://github.com/ValveSoftware/source-sdk-2013/blob/0d8dceea4310fde5706b3ce1c70609d72a38efdf/sp/src/thirdparty/protobuf-2.3.0/python/google/protobuf/service_reflection.py#L251-L263
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/turtle.py
python
read_docstrings
(lang)
Read in docstrings from lang-specific docstring dictionary. Transfer docstrings, translated to lang, from a dictionary-file to the methods of classes Screen and Turtle and - in revised form - to the corresponding functions.
Read in docstrings from lang-specific docstring dictionary.
[ "Read", "in", "docstrings", "from", "lang", "-", "specific", "docstring", "dictionary", "." ]
def read_docstrings(lang): """Read in docstrings from lang-specific docstring dictionary. Transfer docstrings, translated to lang, from a dictionary-file to the methods of classes Screen and Turtle and - in revised form - to the corresponding functions. """ modname = "turtle_docstringdict_%(language)s" % {'language':lang.lower()} module = __import__(modname) docsdict = module.docsdict for key in docsdict: try: # eval(key).im_func.__doc__ = docsdict[key] eval(key).__doc__ = docsdict[key] except Exception: print("Bad docstring-entry: %s" % key)
[ "def", "read_docstrings", "(", "lang", ")", ":", "modname", "=", "\"turtle_docstringdict_%(language)s\"", "%", "{", "'language'", ":", "lang", ".", "lower", "(", ")", "}", "module", "=", "__import__", "(", "modname", ")", "docsdict", "=", "module", ".", "docsdict", "for", "key", "in", "docsdict", ":", "try", ":", "# eval(key).im_func.__doc__ = docsdict[key]", "eval", "(", "key", ")", ".", "__doc__", "=", "docsdict", "[", "key", "]", "except", "Exception", ":", "print", "(", "\"Bad docstring-entry: %s\"", "%", "key", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/turtle.py#L3854-L3869
macchina-io/macchina.io
ef24ba0e18379c3dd48fb84e6dbf991101cb8db0
platform/JS/V8/v8/tools/stats-viewer.py
python
Main
(data_file, name_filter)
Run the stats counter. Args: data_file: The counters file to monitor. name_filter: The regexp filter to apply to counter names.
Run the stats counter.
[ "Run", "the", "stats", "counter", "." ]
def Main(data_file, name_filter): """Run the stats counter. Args: data_file: The counters file to monitor. name_filter: The regexp filter to apply to counter names. """ StatsViewer(data_file, name_filter).Run()
[ "def", "Main", "(", "data_file", ",", "name_filter", ")", ":", "StatsViewer", "(", "data_file", ",", "name_filter", ")", ".", "Run", "(", ")" ]
https://github.com/macchina-io/macchina.io/blob/ef24ba0e18379c3dd48fb84e6dbf991101cb8db0/platform/JS/V8/v8/tools/stats-viewer.py#L451-L458
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/python/training/learning_rate_decay.py
python
natural_exp_decay
(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None)
Applies natural exponential decay to the initial learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies an exponential decay function to a provided initial learning rate. It requires an `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python decayed_learning_rate = learning_rate * exp(-decay_rate * global_step) ``` Example: decay exponetially with a base of 0.96: ```python ... global_step = tf.Variable(0, trainable=False) learning_rate = 0.1 k = 0.5 learning_rate = tf.train.exponential_time_decay(learning_rate, global_step, k) # Passing global_step to minimize() will increment it at each step. learning_step = ( tf.train.GradientDescentOptimizer(learning_rate) .minimize(...my loss..., global_step=global_step) ) ``` Args: learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. global_step: A Python number. Global step to use for the decay computation. Must not be negative. decay_rate: A Python number. The decay rate. name: String. Optional name of the operation. Defaults to 'ExponentialTimeDecay' Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate.
Applies natural exponential decay to the initial learning rate.
[ "Applies", "natural", "exponential", "decay", "to", "the", "initial", "learning", "rate", "." ]
def natural_exp_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None): """Applies natural exponential decay to the initial learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies an exponential decay function to a provided initial learning rate. It requires an `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python decayed_learning_rate = learning_rate * exp(-decay_rate * global_step) ``` Example: decay exponetially with a base of 0.96: ```python ... global_step = tf.Variable(0, trainable=False) learning_rate = 0.1 k = 0.5 learning_rate = tf.train.exponential_time_decay(learning_rate, global_step, k) # Passing global_step to minimize() will increment it at each step. learning_step = ( tf.train.GradientDescentOptimizer(learning_rate) .minimize(...my loss..., global_step=global_step) ) ``` Args: learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. global_step: A Python number. Global step to use for the decay computation. Must not be negative. decay_rate: A Python number. The decay rate. name: String. Optional name of the operation. Defaults to 'ExponentialTimeDecay' Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. """ with ops.op_scope([learning_rate, global_step, decay_rate], name, "NaturalExpDecay") as name: learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate") dtype = learning_rate.dtype global_step = math_ops.cast(global_step, dtype) decay_steps = math_ops.cast(decay_steps, dtype) decay_rate = math_ops.cast(decay_rate, dtype) p = global_step / decay_steps if staircase: p = math_ops.floor(p) exponent = math_ops.exp(math_ops.mul(math_ops.neg(decay_rate), p)) return math_ops.mul(learning_rate, exponent, name=name)
[ "def", "natural_exp_decay", "(", "learning_rate", ",", "global_step", ",", "decay_steps", ",", "decay_rate", ",", "staircase", "=", "False", ",", "name", "=", "None", ")", ":", "with", "ops", ".", "op_scope", "(", "[", "learning_rate", ",", "global_step", ",", "decay_rate", "]", ",", "name", ",", "\"NaturalExpDecay\"", ")", "as", "name", ":", "learning_rate", "=", "ops", ".", "convert_to_tensor", "(", "learning_rate", ",", "name", "=", "\"learning_rate\"", ")", "dtype", "=", "learning_rate", ".", "dtype", "global_step", "=", "math_ops", ".", "cast", "(", "global_step", ",", "dtype", ")", "decay_steps", "=", "math_ops", ".", "cast", "(", "decay_steps", ",", "dtype", ")", "decay_rate", "=", "math_ops", ".", "cast", "(", "decay_rate", ",", "dtype", ")", "p", "=", "global_step", "/", "decay_steps", "if", "staircase", ":", "p", "=", "math_ops", ".", "floor", "(", "p", ")", "exponent", "=", "math_ops", ".", "exp", "(", "math_ops", ".", "mul", "(", "math_ops", ".", "neg", "(", "decay_rate", ")", ",", "p", ")", ")", "return", "math_ops", ".", "mul", "(", "learning_rate", ",", "exponent", ",", "name", "=", "name", ")" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/training/learning_rate_decay.py#L244-L300
Polidea/SiriusObfuscator
b0e590d8130e97856afe578869b83a209e2b19be
SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py
python
SBExpressionOptions.GetPlaygroundTransformEnabled
(self)
return _lldb.SBExpressionOptions_GetPlaygroundTransformEnabled(self)
GetPlaygroundTransformEnabled(self) -> bool
GetPlaygroundTransformEnabled(self) -> bool
[ "GetPlaygroundTransformEnabled", "(", "self", ")", "-", ">", "bool" ]
def GetPlaygroundTransformEnabled(self): """GetPlaygroundTransformEnabled(self) -> bool""" return _lldb.SBExpressionOptions_GetPlaygroundTransformEnabled(self)
[ "def", "GetPlaygroundTransformEnabled", "(", "self", ")", ":", "return", "_lldb", ".", "SBExpressionOptions_GetPlaygroundTransformEnabled", "(", "self", ")" ]
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L4174-L4176
hyperledger-archives/iroha
ed579f85126d0e86532a1f4f1f6ce5681bbcd3a9
example/python/irohalib.py
python
IrohaGrpc.tx_status
(self, transaction)
return status_name, status_code, error_message
Request a status of a transaction :param transaction: the transaction, which status is about to be known :return: a tuple with the symbolic status description, integral status code, and error message string (will be empty if no error occurred)
Request a status of a transaction :param transaction: the transaction, which status is about to be known :return: a tuple with the symbolic status description, integral status code, and error message string (will be empty if no error occurred)
[ "Request", "a", "status", "of", "a", "transaction", ":", "param", "transaction", ":", "the", "transaction", "which", "status", "is", "about", "to", "be", "known", ":", "return", ":", "a", "tuple", "with", "the", "symbolic", "status", "description", "integral", "status", "code", "and", "error", "message", "string", "(", "will", "be", "empty", "if", "no", "error", "occurred", ")" ]
def tx_status(self, transaction): """ Request a status of a transaction :param transaction: the transaction, which status is about to be known :return: a tuple with the symbolic status description, integral status code, and error message string (will be empty if no error occurred) """ request = endpoint_pb2.TxStatusRequest() request.tx_hash = binascii.hexlify(IrohaCrypto.hash(transaction)) response = self._command_service_stub.Status(request) status_code = response.tx_status status_name = endpoint_pb2.TxStatus.Name(response.tx_status) error_message = response.error_message return status_name, status_code, error_message
[ "def", "tx_status", "(", "self", ",", "transaction", ")", ":", "request", "=", "endpoint_pb2", ".", "TxStatusRequest", "(", ")", "request", ".", "tx_hash", "=", "binascii", ".", "hexlify", "(", "IrohaCrypto", ".", "hash", "(", "transaction", ")", ")", "response", "=", "self", ".", "_command_service_stub", ".", "Status", "(", "request", ")", "status_code", "=", "response", ".", "tx_status", "status_name", "=", "endpoint_pb2", ".", "TxStatus", ".", "Name", "(", "response", ".", "tx_status", ")", "error_message", "=", "response", ".", "error_message", "return", "status_name", ",", "status_code", ",", "error_message" ]
https://github.com/hyperledger-archives/iroha/blob/ed579f85126d0e86532a1f4f1f6ce5681bbcd3a9/example/python/irohalib.py#L350-L363
hfinkel/llvm-project-cxxjit
91084ef018240bbb8e24235ff5cd8c355a9c1a1e
libcxx/utils/libcxx/util.py
python
capture
(args, env=None)
return out
capture(command) - Run the given command (or argv list) in a shell and return the standard output. Raises a CalledProcessError if the command exits with a non-zero status.
capture(command) - Run the given command (or argv list) in a shell and return the standard output. Raises a CalledProcessError if the command exits with a non-zero status.
[ "capture", "(", "command", ")", "-", "Run", "the", "given", "command", "(", "or", "argv", "list", ")", "in", "a", "shell", "and", "return", "the", "standard", "output", ".", "Raises", "a", "CalledProcessError", "if", "the", "command", "exits", "with", "a", "non", "-", "zero", "status", "." ]
def capture(args, env=None): """capture(command) - Run the given command (or argv list) in a shell and return the standard output. Raises a CalledProcessError if the command exits with a non-zero status.""" p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out, err = p.communicate() out = convert_string(out) err = convert_string(err) if p.returncode != 0: raise subprocess.CalledProcessError(cmd=args, returncode=p.returncode, output="{}\n{}".format(out, err)) return out
[ "def", "capture", "(", "args", ",", "env", "=", "None", ")", ":", "p", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "env", "=", "env", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "out", "=", "convert_string", "(", "out", ")", "err", "=", "convert_string", "(", "err", ")", "if", "p", ".", "returncode", "!=", "0", ":", "raise", "subprocess", ".", "CalledProcessError", "(", "cmd", "=", "args", ",", "returncode", "=", "p", ".", "returncode", ",", "output", "=", "\"{}\\n{}\"", ".", "format", "(", "out", ",", "err", ")", ")", "return", "out" ]
https://github.com/hfinkel/llvm-project-cxxjit/blob/91084ef018240bbb8e24235ff5cd8c355a9c1a1e/libcxx/utils/libcxx/util.py#L84-L97
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/distutils/command/check.py
python
check.warn
(self, msg)
return Command.warn(self, msg)
Counts the number of warnings that occurs.
Counts the number of warnings that occurs.
[ "Counts", "the", "number", "of", "warnings", "that", "occurs", "." ]
def warn(self, msg): """Counts the number of warnings that occurs.""" self._warnings += 1 return Command.warn(self, msg)
[ "def", "warn", "(", "self", ",", "msg", ")", ":", "self", ".", "_warnings", "+=", "1", "return", "Command", ".", "warn", "(", "self", ",", "msg", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/distutils/command/check.py#L61-L64
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/tpu/feature_column.py
python
_record_variable_scope_and_name
(embedding_var_name, embedding_var_name_in_fc, is_shared_embedding=False)
Add embedding variable name and scope to collection.
Add embedding variable name and scope to collection.
[ "Add", "embedding", "variable", "name", "and", "scope", "to", "collection", "." ]
def _record_variable_scope_and_name(embedding_var_name, embedding_var_name_in_fc, is_shared_embedding=False): """Add embedding variable name and scope to collection.""" g = ops.get_default_graph() collection = g.get_collection_ref(_TPU_FC_TO_SCOPE) if not collection: collection.append({}) var_def_dict = collection[0] captured_scope = variable_scope.get_variable_scope() captured_scope_name = captured_scope.name if embedding_var_name in var_def_dict: if (var_def_dict[embedding_var_name][0] != captured_scope_name and not is_shared_embedding): raise ValueError( 'For embedding var name {}, the variable scope name is different, ' 'got {}; expected {}'.format(embedding_var_name, captured_scope_name, var_def_dict[embedding_var_name][0])) if var_def_dict[embedding_var_name][1] != embedding_var_name_in_fc: raise ValueError( 'For embedding var name {}, the embedding name is different, ' 'got {}; expected {}'.format(embedding_var_name, embedding_var_name_in_fc, var_def_dict[embedding_var_name][1])) else: var_def_dict[embedding_var_name] = (captured_scope_name, embedding_var_name_in_fc)
[ "def", "_record_variable_scope_and_name", "(", "embedding_var_name", ",", "embedding_var_name_in_fc", ",", "is_shared_embedding", "=", "False", ")", ":", "g", "=", "ops", ".", "get_default_graph", "(", ")", "collection", "=", "g", ".", "get_collection_ref", "(", "_TPU_FC_TO_SCOPE", ")", "if", "not", "collection", ":", "collection", ".", "append", "(", "{", "}", ")", "var_def_dict", "=", "collection", "[", "0", "]", "captured_scope", "=", "variable_scope", ".", "get_variable_scope", "(", ")", "captured_scope_name", "=", "captured_scope", ".", "name", "if", "embedding_var_name", "in", "var_def_dict", ":", "if", "(", "var_def_dict", "[", "embedding_var_name", "]", "[", "0", "]", "!=", "captured_scope_name", "and", "not", "is_shared_embedding", ")", ":", "raise", "ValueError", "(", "'For embedding var name {}, the variable scope name is different, '", "'got {}; expected {}'", ".", "format", "(", "embedding_var_name", ",", "captured_scope_name", ",", "var_def_dict", "[", "embedding_var_name", "]", "[", "0", "]", ")", ")", "if", "var_def_dict", "[", "embedding_var_name", "]", "[", "1", "]", "!=", "embedding_var_name_in_fc", ":", "raise", "ValueError", "(", "'For embedding var name {}, the embedding name is different, '", "'got {}; expected {}'", ".", "format", "(", "embedding_var_name", ",", "embedding_var_name_in_fc", ",", "var_def_dict", "[", "embedding_var_name", "]", "[", "1", "]", ")", ")", "else", ":", "var_def_dict", "[", "embedding_var_name", "]", "=", "(", "captured_scope_name", ",", "embedding_var_name_in_fc", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/tpu/feature_column.py#L585-L615
ablab/quast
5f6709528129a6ad266a6b24ef3f40b88f0fe04b
quast_libs/busco/GeneSetAnalysis.py
python
GeneSetAnalysis.run_analysis
(self)
This function calls all needed steps for running the analysis.
This function calls all needed steps for running the analysis.
[ "This", "function", "calls", "all", "needed", "steps", "for", "running", "the", "analysis", "." ]
def run_analysis(self): """ This function calls all needed steps for running the analysis. """ super(GeneSetAnalysis, self).run_analysis() # validate sequence file if super(GeneSetAnalysis, self).check_protein_file() is False: GeneSetAnalysis._logger.error('Please provide a protein file as input') raise SystemExit self._load_score() self._load_length() self._run_hmmer() self._produce_short_summary() self.cleanup() if self._tarzip: self._run_tarzip_hmmer_output()
[ "def", "run_analysis", "(", "self", ")", ":", "super", "(", "GeneSetAnalysis", ",", "self", ")", ".", "run_analysis", "(", ")", "# validate sequence file", "if", "super", "(", "GeneSetAnalysis", ",", "self", ")", ".", "check_protein_file", "(", ")", "is", "False", ":", "GeneSetAnalysis", ".", "_logger", ".", "error", "(", "'Please provide a protein file as input'", ")", "raise", "SystemExit", "self", ".", "_load_score", "(", ")", "self", ".", "_load_length", "(", ")", "self", ".", "_run_hmmer", "(", ")", "self", ".", "_produce_short_summary", "(", ")", "self", ".", "cleanup", "(", ")", "if", "self", ".", "_tarzip", ":", "self", ".", "_run_tarzip_hmmer_output", "(", ")" ]
https://github.com/ablab/quast/blob/5f6709528129a6ad266a6b24ef3f40b88f0fe04b/quast_libs/busco/GeneSetAnalysis.py#L50-L65
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/setuptools/command/egg_info.py
python
FileList.prune
(self, dir)
return self._remove_files(match.match)
Filter out files from 'dir/'.
Filter out files from 'dir/'.
[ "Filter", "out", "files", "from", "dir", "/", "." ]
def prune(self, dir): """Filter out files from 'dir/'.""" match = translate_pattern(os.path.join(dir, '**')) return self._remove_files(match.match)
[ "def", "prune", "(", "self", ",", "dir", ")", ":", "match", "=", "translate_pattern", "(", "os", ".", "path", ".", "join", "(", "dir", ",", "'**'", ")", ")", "return", "self", ".", "_remove_files", "(", "match", ".", "match", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/setuptools/command/egg_info.py#L450-L453
microsoft/CNTK
e9396480025b9ca457d26b6f33dd07c474c6aa04
bindings/python/cntk/debugging/__init__.py
python
dump_signature
(root, tag=None)
Debug helper that prints the signature of a Function.
Debug helper that prints the signature of a Function.
[ "Debug", "helper", "that", "prints", "the", "signature", "of", "a", "Function", "." ]
def dump_signature(root, tag=None): ''' Debug helper that prints the signature of a Function. ''' print(str(root))
[ "def", "dump_signature", "(", "root", ",", "tag", "=", "None", ")", ":", "print", "(", "str", "(", "root", ")", ")" ]
https://github.com/microsoft/CNTK/blob/e9396480025b9ca457d26b6f33dd07c474c6aa04/bindings/python/cntk/debugging/__init__.py#L29-L33
kushview/Element
1cc16380caa2ab79461246ba758b9de1f46db2a5
waflib/Tools/c_config.py
python
cxx_add_flags
(conf)
Adds CXXFLAGS / CPPFLAGS from os.environ to conf.env
Adds CXXFLAGS / CPPFLAGS from os.environ to conf.env
[ "Adds", "CXXFLAGS", "/", "CPPFLAGS", "from", "os", ".", "environ", "to", "conf", ".", "env" ]
def cxx_add_flags(conf): """ Adds CXXFLAGS / CPPFLAGS from os.environ to conf.env """ conf.add_os_flags('CPPFLAGS', dup=False) conf.add_os_flags('CXXFLAGS', dup=False)
[ "def", "cxx_add_flags", "(", "conf", ")", ":", "conf", ".", "add_os_flags", "(", "'CPPFLAGS'", ",", "dup", "=", "False", ")", "conf", ".", "add_os_flags", "(", "'CXXFLAGS'", ",", "dup", "=", "False", ")" ]
https://github.com/kushview/Element/blob/1cc16380caa2ab79461246ba758b9de1f46db2a5/waflib/Tools/c_config.py#L985-L990
GeometryCollective/boundary-first-flattening
8250e5a0e85980ec50b5e8aa8f49dd6519f915cd
deps/nanogui/ext/pybind11/tools/clang/cindex.py
python
CompileCommand.directory
(self)
return conf.lib.clang_CompileCommand_getDirectory(self.cmd)
Get the working directory for this CompileCommand
Get the working directory for this CompileCommand
[ "Get", "the", "working", "directory", "for", "this", "CompileCommand" ]
def directory(self): """Get the working directory for this CompileCommand""" return conf.lib.clang_CompileCommand_getDirectory(self.cmd)
[ "def", "directory", "(", "self", ")", ":", "return", "conf", ".", "lib", ".", "clang_CompileCommand_getDirectory", "(", "self", ".", "cmd", ")" ]
https://github.com/GeometryCollective/boundary-first-flattening/blob/8250e5a0e85980ec50b5e8aa8f49dd6519f915cd/deps/nanogui/ext/pybind11/tools/clang/cindex.py#L2759-L2761
SFTtech/openage
d6a08c53c48dc1e157807471df92197f6ca9e04d
openage/util/profiler.py
python
Tracemalloc.report
(self, sortby='lineno', cumulative=True, limit=100)
Return the snapshot statistics to the console.
Return the snapshot statistics to the console.
[ "Return", "the", "snapshot", "statistics", "to", "the", "console", "." ]
def report(self, sortby='lineno', cumulative=True, limit=100): """ Return the snapshot statistics to the console. """ for stat in self.snapshot.statistics(sortby, cumulative)[:limit]: print(stat)
[ "def", "report", "(", "self", ",", "sortby", "=", "'lineno'", ",", "cumulative", "=", "True", ",", "limit", "=", "100", ")", ":", "for", "stat", "in", "self", ".", "snapshot", ".", "statistics", "(", "sortby", ",", "cumulative", ")", "[", ":", "limit", "]", ":", "print", "(", "stat", ")" ]
https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/util/profiler.py#L108-L113
miyosuda/TensorFlowAndroidDemo
35903e0221aa5f109ea2dbef27f20b52e317f42d
jni-build/jni/include/tensorflow/contrib/slim/python/slim/model_analyzer.py
python
analyze_ops
(graph, print_info=False)
return total_size
Compute the estimated size of the ops.outputs in the graph. Args: graph: the graph containing the operations. print_info: Optional, if true print ops and their outputs. Returns: total size of the ops.outputs
Compute the estimated size of the ops.outputs in the graph.
[ "Compute", "the", "estimated", "size", "of", "the", "ops", ".", "outputs", "in", "the", "graph", "." ]
def analyze_ops(graph, print_info=False): """Compute the estimated size of the ops.outputs in the graph. Args: graph: the graph containing the operations. print_info: Optional, if true print ops and their outputs. Returns: total size of the ops.outputs """ if print_info: print('---------') print('Operations: name -> (type shapes) [size]') print('---------') total_size = 0 for op in graph.get_operations(): op_size = 0 shapes = [] for output in op.outputs: # if output.num_elements() is None or [] assume size 0. output_size = output.get_shape().num_elements() or 0 if output.get_shape(): shapes.append(tensor_description(output)) op_size += output_size if print_info: print(op.name, '\t->', ', '.join(shapes), '[' + str(op_size) + ']') total_size += op_size return total_size
[ "def", "analyze_ops", "(", "graph", ",", "print_info", "=", "False", ")", ":", "if", "print_info", ":", "print", "(", "'---------'", ")", "print", "(", "'Operations: name -> (type shapes) [size]'", ")", "print", "(", "'---------'", ")", "total_size", "=", "0", "for", "op", "in", "graph", ".", "get_operations", "(", ")", ":", "op_size", "=", "0", "shapes", "=", "[", "]", "for", "output", "in", "op", ".", "outputs", ":", "# if output.num_elements() is None or [] assume size 0.", "output_size", "=", "output", ".", "get_shape", "(", ")", ".", "num_elements", "(", ")", "or", "0", "if", "output", ".", "get_shape", "(", ")", ":", "shapes", ".", "append", "(", "tensor_description", "(", "output", ")", ")", "op_size", "+=", "output_size", "if", "print_info", ":", "print", "(", "op", ".", "name", ",", "'\\t->'", ",", "', '", ".", "join", "(", "shapes", ")", ",", "'['", "+", "str", "(", "op_size", ")", "+", "']'", ")", "total_size", "+=", "op_size", "return", "total_size" ]
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/slim/python/slim/model_analyzer.py#L53-L80
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
samples/ide/activegrid/tool/AbstractEditor.py
python
EditorCanvasShapeEvtHandler.OnMovePre
(self, dc, x, y, oldX, oldY, display)
return ogl.ShapeEvtHandler.OnMovePre(self, dc, x, y, oldX, oldY, display)
Prevent objects from being dragged outside of viewable area
Prevent objects from being dragged outside of viewable area
[ "Prevent", "objects", "from", "being", "dragged", "outside", "of", "viewable", "area" ]
def OnMovePre(self, dc, x, y, oldX, oldY, display): """ Prevent objects from being dragged outside of viewable area """ if (x < 0) or (y < 0) or (x > self._view._maxWidth) or (y > self._view._maxHeight): return False return ogl.ShapeEvtHandler.OnMovePre(self, dc, x, y, oldX, oldY, display)
[ "def", "OnMovePre", "(", "self", ",", "dc", ",", "x", ",", "y", ",", "oldX", ",", "oldY", ",", "display", ")", ":", "if", "(", "x", "<", "0", ")", "or", "(", "y", "<", "0", ")", "or", "(", "x", ">", "self", ".", "_view", ".", "_maxWidth", ")", "or", "(", "y", ">", "self", ".", "_view", ".", "_maxHeight", ")", ":", "return", "False", "return", "ogl", ".", "ShapeEvtHandler", ".", "OnMovePre", "(", "self", ",", "dc", ",", "x", ",", "y", ",", "oldX", ",", "oldY", ",", "display", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/samples/ide/activegrid/tool/AbstractEditor.py#L814-L819
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/array_analysis.py
python
EquivSet.insert_equiv
(self, *objs)
return self._insert(objs)
Insert a set of equivalent objects by modifying self. This method can be overloaded to transform object type before insertion.
Insert a set of equivalent objects by modifying self. This method can be overloaded to transform object type before insertion.
[ "Insert", "a", "set", "of", "equivalent", "objects", "by", "modifying", "self", ".", "This", "method", "can", "be", "overloaded", "to", "transform", "object", "type", "before", "insertion", "." ]
def insert_equiv(self, *objs): """Insert a set of equivalent objects by modifying self. This method can be overloaded to transform object type before insertion. """ return self._insert(objs)
[ "def", "insert_equiv", "(", "self", ",", "*", "objs", ")", ":", "return", "self", ".", "_insert", "(", "objs", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/array_analysis.py#L279-L283
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/fft/_pocketfft.py
python
irfft2
(a, s=None, axes=(-2, -1), norm=None)
return irfftn(a, s, axes, norm)
Compute the 2-dimensional inverse FFT of a real array. Parameters ---------- a : array_like The input array s : sequence of ints, optional Shape of the real output to the inverse FFT. axes : sequence of ints, optional The axes over which to compute the inverse fft. Default is the last two axes. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : ndarray The result of the inverse real 2-D FFT. See Also -------- irfftn : Compute the inverse of the N-dimensional FFT of real input. Notes ----- This is really `irfftn` with different defaults. For more details see `irfftn`.
Compute the 2-dimensional inverse FFT of a real array.
[ "Compute", "the", "2", "-", "dimensional", "inverse", "FFT", "of", "a", "real", "array", "." ]
def irfft2(a, s=None, axes=(-2, -1), norm=None): """ Compute the 2-dimensional inverse FFT of a real array. Parameters ---------- a : array_like The input array s : sequence of ints, optional Shape of the real output to the inverse FFT. axes : sequence of ints, optional The axes over which to compute the inverse fft. Default is the last two axes. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : ndarray The result of the inverse real 2-D FFT. See Also -------- irfftn : Compute the inverse of the N-dimensional FFT of real input. Notes ----- This is really `irfftn` with different defaults. For more details see `irfftn`. """ return irfftn(a, s, axes, norm)
[ "def", "irfft2", "(", "a", ",", "s", "=", "None", ",", "axes", "=", "(", "-", "2", ",", "-", "1", ")", ",", "norm", "=", "None", ")", ":", "return", "irfftn", "(", "a", ",", "s", ",", "axes", ",", "norm", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/fft/_pocketfft.py#L1273-L1307
SFTtech/openage
d6a08c53c48dc1e157807471df92197f6ca9e04d
openage/convert/service/read/string_resource.py
python
read_age2_hd_3x_stringresources
(stringres, srcdir)
return count
HD Edition 3.x and below store language .txt files in the Bin/ folder. Specific language strings are in Bin/$LANG/*.txt. The data is stored in the `stringres` storage.
HD Edition 3.x and below store language .txt files in the Bin/ folder. Specific language strings are in Bin/$LANG/*.txt.
[ "HD", "Edition", "3", ".", "x", "and", "below", "store", "language", ".", "txt", "files", "in", "the", "Bin", "/", "folder", ".", "Specific", "language", "strings", "are", "in", "Bin", "/", "$LANG", "/", "*", ".", "txt", "." ]
def read_age2_hd_3x_stringresources(stringres, srcdir): """ HD Edition 3.x and below store language .txt files in the Bin/ folder. Specific language strings are in Bin/$LANG/*.txt. The data is stored in the `stringres` storage. """ count = 0 for lang in srcdir["bin"].list(): lang_path = srcdir["bin", lang.decode()] # There are some .txt files immediately in bin/, but they don't # seem to contain anything useful. (Everything is overridden by # files in Bin/$LANG/.) if not lang_path.is_dir(): continue # Sometimes we can have language DLLs in Bin/$LANG/ # e.g. HD Edition 2.0 # We do _not_ want to treat these as text files # so first check explicitly if lang_path["language.dll"].is_file(): for name in ["language.dll", "language_x1.dll", "language_x1_p1.dll"]: pefile = PEFile(lang_path[name].open('rb')) stringres.fill_from(pefile.resources().strings) count += 1 else: for basename in lang_path.list(): with lang_path[basename].open('rb') as langfile: # No utf-8 :( stringres.fill_from( read_hd_language_file_old( langfile, lang, enc='iso-8859-1')) count += 1 return count
[ "def", "read_age2_hd_3x_stringresources", "(", "stringres", ",", "srcdir", ")", ":", "count", "=", "0", "for", "lang", "in", "srcdir", "[", "\"bin\"", "]", ".", "list", "(", ")", ":", "lang_path", "=", "srcdir", "[", "\"bin\"", ",", "lang", ".", "decode", "(", ")", "]", "# There are some .txt files immediately in bin/, but they don't", "# seem to contain anything useful. (Everything is overridden by", "# files in Bin/$LANG/.)", "if", "not", "lang_path", ".", "is_dir", "(", ")", ":", "continue", "# Sometimes we can have language DLLs in Bin/$LANG/", "# e.g. HD Edition 2.0", "# We do _not_ want to treat these as text files", "# so first check explicitly", "if", "lang_path", "[", "\"language.dll\"", "]", ".", "is_file", "(", ")", ":", "for", "name", "in", "[", "\"language.dll\"", ",", "\"language_x1.dll\"", ",", "\"language_x1_p1.dll\"", "]", ":", "pefile", "=", "PEFile", "(", "lang_path", "[", "name", "]", ".", "open", "(", "'rb'", ")", ")", "stringres", ".", "fill_from", "(", "pefile", ".", "resources", "(", ")", ".", "strings", ")", "count", "+=", "1", "else", ":", "for", "basename", "in", "lang_path", ".", "list", "(", ")", ":", "with", "lang_path", "[", "basename", "]", ".", "open", "(", "'rb'", ")", "as", "langfile", ":", "# No utf-8 :(", "stringres", ".", "fill_from", "(", "read_hd_language_file_old", "(", "langfile", ",", "lang", ",", "enc", "=", "'iso-8859-1'", ")", ")", "count", "+=", "1", "return", "count" ]
https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/service/read/string_resource.py#L94-L137
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/fluid/contrib/decoder/beam_search_decoder.py
python
BeamSearchDecoder.read_array
(self, init, is_ids=False, is_scores=False)
return read_value
Read an array to get the decoded ids and scores generated by previous RNN step. At the first step of RNN, the init variable mut be used to initialize the array. Args: init (Variable): The initial variable for first step usage. init must be provided. is_ids (bool): Specify whether the variable is an id. is_scores (bool): Specify whether the variable is a score. Returns: The associated variable generated during previous RNN steps. Examples: .. code-block:: python prev_ids = decoder.read_array(init=init_ids, is_ids=True) prev_scores = decoder.read_array(init=init_scores, is_scores=True)
Read an array to get the decoded ids and scores generated by previous RNN step. At the first step of RNN, the init variable mut be used to initialize the array.
[ "Read", "an", "array", "to", "get", "the", "decoded", "ids", "and", "scores", "generated", "by", "previous", "RNN", "step", ".", "At", "the", "first", "step", "of", "RNN", "the", "init", "variable", "mut", "be", "used", "to", "initialize", "the", "array", "." ]
def read_array(self, init, is_ids=False, is_scores=False): """ Read an array to get the decoded ids and scores generated by previous RNN step. At the first step of RNN, the init variable mut be used to initialize the array. Args: init (Variable): The initial variable for first step usage. init must be provided. is_ids (bool): Specify whether the variable is an id. is_scores (bool): Specify whether the variable is a score. Returns: The associated variable generated during previous RNN steps. Examples: .. code-block:: python prev_ids = decoder.read_array(init=init_ids, is_ids=True) prev_scores = decoder.read_array(init=init_scores, is_scores=True) """ self._assert_in_decoder_block('read_array') if is_ids and is_scores: raise ValueError('Shouldn\'t mark current array be ids array and' 'scores array at the same time.') if not isinstance(init, Variable): raise TypeError('The input argument `init` must be a Variable.') parent_block = self._parent_block() array = parent_block.create_var( name=unique_name.generate('beam_search_decoder_array'), type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, dtype=init.dtype) parent_block.append_op( type='write_to_array', inputs={'X': init, 'I': self._zero_idx}, outputs={'Out': array}) if is_ids: self._ids_array = array elif is_scores: self._scores_array = array read_value = layers.array_read(array=array, i=self._counter) self._array_dict[read_value.name] = array return read_value
[ "def", "read_array", "(", "self", ",", "init", ",", "is_ids", "=", "False", ",", "is_scores", "=", "False", ")", ":", "self", ".", "_assert_in_decoder_block", "(", "'read_array'", ")", "if", "is_ids", "and", "is_scores", ":", "raise", "ValueError", "(", "'Shouldn\\'t mark current array be ids array and'", "'scores array at the same time.'", ")", "if", "not", "isinstance", "(", "init", ",", "Variable", ")", ":", "raise", "TypeError", "(", "'The input argument `init` must be a Variable.'", ")", "parent_block", "=", "self", ".", "_parent_block", "(", ")", "array", "=", "parent_block", ".", "create_var", "(", "name", "=", "unique_name", ".", "generate", "(", "'beam_search_decoder_array'", ")", ",", "type", "=", "core", ".", "VarDesc", ".", "VarType", ".", "LOD_TENSOR_ARRAY", ",", "dtype", "=", "init", ".", "dtype", ")", "parent_block", ".", "append_op", "(", "type", "=", "'write_to_array'", ",", "inputs", "=", "{", "'X'", ":", "init", ",", "'I'", ":", "self", ".", "_zero_idx", "}", ",", "outputs", "=", "{", "'Out'", ":", "array", "}", ")", "if", "is_ids", ":", "self", ".", "_ids_array", "=", "array", "elif", "is_scores", ":", "self", ".", "_scores_array", "=", "array", "read_value", "=", "layers", ".", "array_read", "(", "array", "=", "array", ",", "i", "=", "self", ".", "_counter", ")", "self", ".", "_array_dict", "[", "read_value", ".", "name", "]", "=", "array", "return", "read_value" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/contrib/decoder/beam_search_decoder.py#L733-L780
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_misc.py
python
DateTime.Set
(*args, **kwargs)
return _misc_.DateTime_Set(*args, **kwargs)
Set(self, int day, int month=Inv_Month, int year=Inv_Year, int hour=0, int minute=0, int second=0, int millisec=0) -> DateTime
Set(self, int day, int month=Inv_Month, int year=Inv_Year, int hour=0, int minute=0, int second=0, int millisec=0) -> DateTime
[ "Set", "(", "self", "int", "day", "int", "month", "=", "Inv_Month", "int", "year", "=", "Inv_Year", "int", "hour", "=", "0", "int", "minute", "=", "0", "int", "second", "=", "0", "int", "millisec", "=", "0", ")", "-", ">", "DateTime" ]
def Set(*args, **kwargs): """ Set(self, int day, int month=Inv_Month, int year=Inv_Year, int hour=0, int minute=0, int second=0, int millisec=0) -> DateTime """ return _misc_.DateTime_Set(*args, **kwargs)
[ "def", "Set", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_misc_", ".", "DateTime_Set", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_misc.py#L3802-L3807
apache/singa
93fd9da72694e68bfe3fb29d0183a65263d238a1
python/singa/image_tool.py
python
ImageTool.color_cast
(self, offset=20, inplace=True)
Add a random value from [-offset, offset] to each channel Args: offset: cast offset, >0 and <255 inplace: inplace imgs or not ( return new_imgs)
Add a random value from [-offset, offset] to each channel
[ "Add", "a", "random", "value", "from", "[", "-", "offset", "offset", "]", "to", "each", "channel" ]
def color_cast(self, offset=20, inplace=True): '''Add a random value from [-offset, offset] to each channel Args: offset: cast offset, >0 and <255 inplace: inplace imgs or not ( return new_imgs) ''' new_imgs = [] if offset < 0 or offset > 255: raise Exception('offset must be >0 and <255') for img in self.imgs: new_img = color_cast(img, offset) new_imgs.append(new_img) if inplace: self.imgs = new_imgs return self else: return new_imgs
[ "def", "color_cast", "(", "self", ",", "offset", "=", "20", ",", "inplace", "=", "True", ")", ":", "new_imgs", "=", "[", "]", "if", "offset", "<", "0", "or", "offset", ">", "255", ":", "raise", "Exception", "(", "'offset must be >0 and <255'", ")", "for", "img", "in", "self", ".", "imgs", ":", "new_img", "=", "color_cast", "(", "img", ",", "offset", ")", "new_imgs", ".", "append", "(", "new_img", ")", "if", "inplace", ":", "self", ".", "imgs", "=", "new_imgs", "return", "self", "else", ":", "return", "new_imgs" ]
https://github.com/apache/singa/blob/93fd9da72694e68bfe3fb29d0183a65263d238a1/python/singa/image_tool.py#L594-L612
kamyu104/LeetCode-Solutions
77605708a927ea3b85aee5a479db733938c7c211
Python/compare-version-numbers.py
python
Solution2.compareVersion2
(self, version1, version2)
return cmp(v1, v2)
:type version1: str :type version2: str :rtype: int
:type version1: str :type version2: str :rtype: int
[ ":", "type", "version1", ":", "str", ":", "type", "version2", ":", "str", ":", "rtype", ":", "int" ]
def compareVersion2(self, version1, version2): """ :type version1: str :type version2: str :rtype: int """ v1 = [int(x) for x in version1.split('.')] v2 = [int(x) for x in version2.split('.')] while len(v1) != len(v2): if len(v1) > len(v2): v2.append(0) else: v1.append(0) return cmp(v1, v2)
[ "def", "compareVersion2", "(", "self", ",", "version1", ",", "version2", ")", ":", "v1", "=", "[", "int", "(", "x", ")", "for", "x", "in", "version1", ".", "split", "(", "'.'", ")", "]", "v2", "=", "[", "int", "(", "x", ")", "for", "x", "in", "version2", ".", "split", "(", "'.'", ")", "]", "while", "len", "(", "v1", ")", "!=", "len", "(", "v2", ")", ":", "if", "len", "(", "v1", ")", ">", "len", "(", "v2", ")", ":", "v2", ".", "append", "(", "0", ")", "else", ":", "v1", ".", "append", "(", "0", ")", "return", "cmp", "(", "v1", ",", "v2", ")" ]
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/compare-version-numbers.py#L60-L73
livecode/livecode
4606a10ea10b16d5071d0f9f263ccdd7ede8b31d
gyp/pylib/gyp/MSVSSettings.py
python
ConvertVCMacrosToMSBuild
(s)
return s
Convert the the MSVS macros found in the string to the MSBuild equivalent. This list is probably not exhaustive. Add as needed.
Convert the the MSVS macros found in the string to the MSBuild equivalent.
[ "Convert", "the", "the", "MSVS", "macros", "found", "in", "the", "string", "to", "the", "MSBuild", "equivalent", "." ]
def ConvertVCMacrosToMSBuild(s): """Convert the the MSVS macros found in the string to the MSBuild equivalent. This list is probably not exhaustive. Add as needed. """ if '$' in s: replace_map = { '$(ConfigurationName)': '$(Configuration)', '$(InputDir)': '%(RelativeDir)', '$(InputExt)': '%(Extension)', '$(InputFileName)': '%(Filename)%(Extension)', '$(InputName)': '%(Filename)', '$(InputPath)': '%(Identity)', '$(ParentName)': '$(ProjectFileName)', '$(PlatformName)': '$(Platform)', '$(SafeInputName)': '%(Filename)', } for old, new in replace_map.iteritems(): s = s.replace(old, new) s = FixVCMacroSlashes(s) return s
[ "def", "ConvertVCMacrosToMSBuild", "(", "s", ")", ":", "if", "'$'", "in", "s", ":", "replace_map", "=", "{", "'$(ConfigurationName)'", ":", "'$(Configuration)'", ",", "'$(InputDir)'", ":", "'%(RelativeDir)'", ",", "'$(InputExt)'", ":", "'%(Extension)'", ",", "'$(InputFileName)'", ":", "'%(Filename)%(Extension)'", ",", "'$(InputName)'", ":", "'%(Filename)'", ",", "'$(InputPath)'", ":", "'%(Identity)'", ",", "'$(ParentName)'", ":", "'$(ProjectFileName)'", ",", "'$(PlatformName)'", ":", "'$(Platform)'", ",", "'$(SafeInputName)'", ":", "'%(Filename)'", ",", "}", "for", "old", ",", "new", "in", "replace_map", ".", "iteritems", "(", ")", ":", "s", "=", "s", ".", "replace", "(", "old", ",", "new", ")", "s", "=", "FixVCMacroSlashes", "(", "s", ")", "return", "s" ]
https://github.com/livecode/livecode/blob/4606a10ea10b16d5071d0f9f263ccdd7ede8b31d/gyp/pylib/gyp/MSVSSettings.py#L419-L439
ideawu/ssdb
f229ba277c7f7d0ca5a441c0c6fb3d1209af68e4
deps/cpy/antlr3/tree.py
python
CommonTreeNodeStream.push
(self, index)
Make stream jump to a new location, saving old location. Switch back with pop().
Make stream jump to a new location, saving old location. Switch back with pop().
[ "Make", "stream", "jump", "to", "a", "new", "location", "saving", "old", "location", ".", "Switch", "back", "with", "pop", "()", "." ]
def push(self, index): """ Make stream jump to a new location, saving old location. Switch back with pop(). """ self.calls.append(self.p) # save current index self.seek(index)
[ "def", "push", "(", "self", ",", "index", ")", ":", "self", ".", "calls", ".", "append", "(", "self", ".", "p", ")", "# save current index", "self", ".", "seek", "(", "index", ")" ]
https://github.com/ideawu/ssdb/blob/f229ba277c7f7d0ca5a441c0c6fb3d1209af68e4/deps/cpy/antlr3/tree.py#L1909-L1916
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/profiler/internal/flops_registry.py
python
_l2_loss_flops
(graph, node)
return ops.OpStats("flops", in_shape.num_elements() * 3 - 1)
Compute flops for L2Loss operation.
Compute flops for L2Loss operation.
[ "Compute", "flops", "for", "L2Loss", "operation", "." ]
def _l2_loss_flops(graph, node): """Compute flops for L2Loss operation.""" in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) in_shape.assert_is_fully_defined() # Tensorflow uses inefficient implementation, with (3*N-1) flops: # Optimal implementation is 2*N flops return ops.OpStats("flops", in_shape.num_elements() * 3 - 1)
[ "def", "_l2_loss_flops", "(", "graph", ",", "node", ")", ":", "in_shape", "=", "graph_util", ".", "tensor_shape_from_node_def_name", "(", "graph", ",", "node", ".", "input", "[", "0", "]", ")", "in_shape", ".", "assert_is_fully_defined", "(", ")", "# Tensorflow uses inefficient implementation, with (3*N-1) flops:", "# Optimal implementation is 2*N flops", "return", "ops", ".", "OpStats", "(", "\"flops\"", ",", "in_shape", ".", "num_elements", "(", ")", "*", "3", "-", "1", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/profiler/internal/flops_registry.py#L115-L121
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/propgrid.py
python
FileProperty_GetClassValidator
(*args)
return _propgrid.FileProperty_GetClassValidator(*args)
FileProperty_GetClassValidator() -> Validator
FileProperty_GetClassValidator() -> Validator
[ "FileProperty_GetClassValidator", "()", "-", ">", "Validator" ]
def FileProperty_GetClassValidator(*args): """FileProperty_GetClassValidator() -> Validator""" return _propgrid.FileProperty_GetClassValidator(*args)
[ "def", "FileProperty_GetClassValidator", "(", "*", "args", ")", ":", "return", "_propgrid", ".", "FileProperty_GetClassValidator", "(", "*", "args", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/propgrid.py#L3086-L3088
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
gpu/command_buffer/build_gles2_cmd_buffer.py
python
StateSetNamedParameter.WriteHandlerImplementation
(self, func, file)
Overridden from TypeHandler.
Overridden from TypeHandler.
[ "Overridden", "from", "TypeHandler", "." ]
def WriteHandlerImplementation(self, func, file): """Overridden from TypeHandler.""" state_name = func.GetInfo('state') state = _STATES[state_name] states = state['states'] args = func.GetOriginalArgs() num_args = len(args) assert num_args == 2 file.Write(" switch (%s) {\n" % args[0].name) for state in states: file.Write(" case %s:\n" % state['enum']) file.Write(" if (state_.%s != %s) {\n" % (state['name'], args[1].name)) file.Write(" state_.%s = %s;\n" % (state['name'], args[1].name)) if not func.GetInfo("no_gl"): file.Write(" %s(%s);\n" % (func.GetGLFunctionName(), func.MakeOriginalArgString(""))) file.Write(" }\n") file.Write(" break;\n") file.Write(" default:\n") file.Write(" NOTREACHED();\n") file.Write(" }\n")
[ "def", "WriteHandlerImplementation", "(", "self", ",", "func", ",", "file", ")", ":", "state_name", "=", "func", ".", "GetInfo", "(", "'state'", ")", "state", "=", "_STATES", "[", "state_name", "]", "states", "=", "state", "[", "'states'", "]", "args", "=", "func", ".", "GetOriginalArgs", "(", ")", "num_args", "=", "len", "(", "args", ")", "assert", "num_args", "==", "2", "file", ".", "Write", "(", "\" switch (%s) {\\n\"", "%", "args", "[", "0", "]", ".", "name", ")", "for", "state", "in", "states", ":", "file", ".", "Write", "(", "\" case %s:\\n\"", "%", "state", "[", "'enum'", "]", ")", "file", ".", "Write", "(", "\" if (state_.%s != %s) {\\n\"", "%", "(", "state", "[", "'name'", "]", ",", "args", "[", "1", "]", ".", "name", ")", ")", "file", ".", "Write", "(", "\" state_.%s = %s;\\n\"", "%", "(", "state", "[", "'name'", "]", ",", "args", "[", "1", "]", ".", "name", ")", ")", "if", "not", "func", ".", "GetInfo", "(", "\"no_gl\"", ")", ":", "file", ".", "Write", "(", "\" %s(%s);\\n\"", "%", "(", "func", ".", "GetGLFunctionName", "(", ")", ",", "func", ".", "MakeOriginalArgString", "(", "\"\"", ")", ")", ")", "file", ".", "Write", "(", "\" }\\n\"", ")", "file", ".", "Write", "(", "\" break;\\n\"", ")", "file", ".", "Write", "(", "\" default:\\n\"", ")", "file", ".", "Write", "(", "\" NOTREACHED();\\n\"", ")", "file", ".", "Write", "(", "\" }\\n\"", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/gpu/command_buffer/build_gles2_cmd_buffer.py#L3400-L3421
y123456yz/reading-and-annotate-mongodb-3.6
93280293672ca7586dc24af18132aa61e4ed7fcf
mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/SConsign.py
python
Base.get_entry
(self, filename)
return self.entries[filename]
Fetch the specified entry attribute.
Fetch the specified entry attribute.
[ "Fetch", "the", "specified", "entry", "attribute", "." ]
def get_entry(self, filename): """ Fetch the specified entry attribute. """ return self.entries[filename]
[ "def", "get_entry", "(", "self", ",", "filename", ")", ":", "return", "self", ".", "entries", "[", "filename", "]" ]
https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/SConsign.py#L173-L177
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/boto/boto/mws/connection.py
python
MWSConnection.list_carts
(self, request, response, **kw)
return self._post_request(request, kw, response)
Returns a list of shopping carts in your Webstore that were last updated during the time range that you specify.
Returns a list of shopping carts in your Webstore that were last updated during the time range that you specify.
[ "Returns", "a", "list", "of", "shopping", "carts", "in", "your", "Webstore", "that", "were", "last", "updated", "during", "the", "time", "range", "that", "you", "specify", "." ]
def list_carts(self, request, response, **kw): """Returns a list of shopping carts in your Webstore that were last updated during the time range that you specify. """ return self._post_request(request, kw, response)
[ "def", "list_carts", "(", "self", ",", "request", ",", "response", ",", "*", "*", "kw", ")", ":", "return", "self", ".", "_post_request", "(", "request", ",", "kw", ",", "response", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/mws/connection.py#L956-L960
raymondlu/super-animation-samples
04234269112ff0dc32447f27a761dbbb00b8ba17
samples/cocos2d-x-3.1/CocosLuaGame2/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py
python
CursorKind.is_preprocessing
(self)
return conf.lib.clang_isPreprocessing(self)
Test if this is a preprocessing kind.
Test if this is a preprocessing kind.
[ "Test", "if", "this", "is", "a", "preprocessing", "kind", "." ]
def is_preprocessing(self): """Test if this is a preprocessing kind.""" return conf.lib.clang_isPreprocessing(self)
[ "def", "is_preprocessing", "(", "self", ")", ":", "return", "conf", ".", "lib", ".", "clang_isPreprocessing", "(", "self", ")" ]
https://github.com/raymondlu/super-animation-samples/blob/04234269112ff0dc32447f27a761dbbb00b8ba17/samples/cocos2d-x-3.1/CocosLuaGame2/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py#L660-L662
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/stc.py
python
StyledTextCtrl.SetHScrollBar
(*args, **kwargs)
return _stc.StyledTextCtrl_SetHScrollBar(*args, **kwargs)
SetHScrollBar(self, ScrollBar bar) Set the horizontal scrollbar to use instead of the ont that's built-in.
SetHScrollBar(self, ScrollBar bar)
[ "SetHScrollBar", "(", "self", "ScrollBar", "bar", ")" ]
def SetHScrollBar(*args, **kwargs): """ SetHScrollBar(self, ScrollBar bar) Set the horizontal scrollbar to use instead of the ont that's built-in. """ return _stc.StyledTextCtrl_SetHScrollBar(*args, **kwargs)
[ "def", "SetHScrollBar", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_stc", ".", "StyledTextCtrl_SetHScrollBar", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/stc.py#L6637-L6643
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/prompt-toolkit/py2/prompt_toolkit/contrib/telnet/server.py
python
TelnetServer._handle_incoming_data
(self, conn)
Handle incoming data on socket.
Handle incoming data on socket.
[ "Handle", "incoming", "data", "on", "socket", "." ]
def _handle_incoming_data(self, conn): """ Handle incoming data on socket. """ connection = [c for c in self.connections if c.conn == conn][0] data = conn.recv(1024) if data: connection.feed(data) else: self.connections.remove(connection)
[ "def", "_handle_incoming_data", "(", "self", ",", "conn", ")", ":", "connection", "=", "[", "c", "for", "c", "in", "self", ".", "connections", "if", "c", ".", "conn", "==", "conn", "]", "[", "0", "]", "data", "=", "conn", ".", "recv", "(", "1024", ")", "if", "data", ":", "connection", ".", "feed", "(", "data", ")", "else", ":", "self", ".", "connections", ".", "remove", "(", "connection", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py2/prompt_toolkit/contrib/telnet/server.py#L398-L407
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/ftplib.py
python
parse229
(resp, peer)
return host, port
Parse the '229' response for a EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.
Parse the '229' response for a EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.
[ "Parse", "the", "229", "response", "for", "a", "EPSV", "request", ".", "Raises", "error_proto", "if", "it", "does", "not", "contain", "(", "|||port|", ")", "Return", "(", "host", ".", "addr", ".", "as", ".", "numbers", "port#", ")", "tuple", "." ]
def parse229(resp, peer): '''Parse the '229' response for a EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.''' if resp[:3] != '229': raise error_reply, resp left = resp.find('(') if left < 0: raise error_proto, resp right = resp.find(')', left + 1) if right < 0: raise error_proto, resp # should contain '(|||port|)' if resp[left + 1] != resp[right - 1]: raise error_proto, resp parts = resp[left + 1:right].split(resp[left+1]) if len(parts) != 5: raise error_proto, resp host = peer[0] port = int(parts[3]) return host, port
[ "def", "parse229", "(", "resp", ",", "peer", ")", ":", "if", "resp", "[", ":", "3", "]", "!=", "'229'", ":", "raise", "error_reply", ",", "resp", "left", "=", "resp", ".", "find", "(", "'('", ")", "if", "left", "<", "0", ":", "raise", "error_proto", ",", "resp", "right", "=", "resp", ".", "find", "(", "')'", ",", "left", "+", "1", ")", "if", "right", "<", "0", ":", "raise", "error_proto", ",", "resp", "# should contain '(|||port|)'", "if", "resp", "[", "left", "+", "1", "]", "!=", "resp", "[", "right", "-", "1", "]", ":", "raise", "error_proto", ",", "resp", "parts", "=", "resp", "[", "left", "+", "1", ":", "right", "]", ".", "split", "(", "resp", "[", "left", "+", "1", "]", ")", "if", "len", "(", "parts", ")", "!=", "5", ":", "raise", "error_proto", ",", "resp", "host", "=", "peer", "[", "0", "]", "port", "=", "int", "(", "parts", "[", "3", "]", ")", "return", "host", ",", "port" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/ftplib.py#L814-L833
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemFramework/v1/AWS/common-code/lib/pycparser/c_lexer.py
python
CLexer.t_ppline_NEWLINE
(self, t)
r'\n
r'\n
[ "r", "\\", "n" ]
def t_ppline_NEWLINE(self, t): r'\n' if self.pp_line is None: self._error('line number missing in #line', t) else: self.lexer.lineno = int(self.pp_line) if self.pp_filename is not None: self.filename = self.pp_filename t.lexer.begin('INITIAL')
[ "def", "t_ppline_NEWLINE", "(", "self", ",", "t", ")", ":", "if", "self", ".", "pp_line", "is", "None", ":", "self", ".", "_error", "(", "'line number missing in #line'", ",", "t", ")", "else", ":", "self", ".", "lexer", ".", "lineno", "=", "int", "(", "self", ".", "pp_line", ")", "if", "self", ".", "pp_filename", "is", "not", "None", ":", "self", ".", "filename", "=", "self", ".", "pp_filename", "t", ".", "lexer", ".", "begin", "(", "'INITIAL'", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/common-code/lib/pycparser/c_lexer.py#L303-L313
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/protobuf/py3/google/protobuf/internal/wire_format.py
python
IsTypePackable
(field_type)
return field_type not in NON_PACKABLE_TYPES
Return true iff packable = true is valid for fields of this type. Args: field_type: a FieldDescriptor::Type value. Returns: True iff fields of this type are packable.
Return true iff packable = true is valid for fields of this type.
[ "Return", "true", "iff", "packable", "=", "true", "is", "valid", "for", "fields", "of", "this", "type", "." ]
def IsTypePackable(field_type): """Return true iff packable = true is valid for fields of this type. Args: field_type: a FieldDescriptor::Type value. Returns: True iff fields of this type are packable. """ return field_type not in NON_PACKABLE_TYPES
[ "def", "IsTypePackable", "(", "field_type", ")", ":", "return", "field_type", "not", "in", "NON_PACKABLE_TYPES" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/protobuf/py3/google/protobuf/internal/wire_format.py#L259-L268
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/queue.py
python
Queue.join
(self)
Blocks until all items in the Queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks.
Blocks until all items in the Queue have been gotten and processed.
[ "Blocks", "until", "all", "items", "in", "the", "Queue", "have", "been", "gotten", "and", "processed", "." ]
def join(self): '''Blocks until all items in the Queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. ''' with self.all_tasks_done: while self.unfinished_tasks: self.all_tasks_done.wait()
[ "def", "join", "(", "self", ")", ":", "with", "self", ".", "all_tasks_done", ":", "while", "self", ".", "unfinished_tasks", ":", "self", ".", "all_tasks_done", ".", "wait", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/queue.py#L78-L89
NVIDIAGameWorks/kaolin
e5148d05e9c1e2ce92a07881ce3593b1c5c3f166
kaolin/io/usd.py
python
export_meshes
(file_path, scene_paths=None, vertices=None, faces=None, uvs=None, face_uvs_idx=None, face_normals=None, materials_order=None, materials=None, up_axis='Y', times=None)
return stage
r"""Export multiple meshes to a new USD stage. Export multiple meshes defined by lists vertices and faces and save the stage to disk. Args: file_path (str): Path to usd file (\*.usd, \*.usda). scene_paths (list of str, optional): Absolute paths of meshes within the USD file scene. Must have the same number ofpaths as the number of meshes ``N``. Must be a valid Sdf.Path. If no path is provided, a default path is used. vertices (list of torch.FloatTensor, optional): Vertices with shape ``(num_vertices, 3)``. faces (list of torch.LongTensor, optional): Vertex indices for each face with shape ``(num_faces, face_size)``. Mesh must be homogenous (consistent number of vertices per face). uvs (list of torch.FloatTensor, optional): of shape ``(num_uvs, 2)``. face_uvs_idx (list of torch.LongTensor, optional): of shape ``(num_faces, face_size)``. If provided, `uvs` must also be specified. face_normals (list of torch.Tensor, optional): of shape ``(num_vertices, num_faces, 3)``. materials_order (torch.LongTensor): of shape (N, 2) showing the order in which materials are used over **face_uvs_idx** and the first indices in which they start to be used. A material can be used multiple times. materials (list of Material): a list of materials up_axis (str, optional): Specifies the scene's up axis. Choose from ``['Y', 'Z']``. times (list of int, optional): Positive integers defining the time at which the supplied parameters correspond to. Returns: (Usd.Stage) Example: >>> vertices_list = [torch.rand(3, 3) for _ in range(3)] >>> faces_list = [torch.tensor([[0, 1, 2]]) for _ in range(3)] >>> stage = export_meshes('./new_stage.usd', vertices=vertices_list, faces=faces_list)
r"""Export multiple meshes to a new USD stage.
[ "r", "Export", "multiple", "meshes", "to", "a", "new", "USD", "stage", "." ]
def export_meshes(file_path, scene_paths=None, vertices=None, faces=None, uvs=None, face_uvs_idx=None, face_normals=None, materials_order=None, materials=None, up_axis='Y', times=None): r"""Export multiple meshes to a new USD stage. Export multiple meshes defined by lists vertices and faces and save the stage to disk. Args: file_path (str): Path to usd file (\*.usd, \*.usda). scene_paths (list of str, optional): Absolute paths of meshes within the USD file scene. Must have the same number ofpaths as the number of meshes ``N``. Must be a valid Sdf.Path. If no path is provided, a default path is used. vertices (list of torch.FloatTensor, optional): Vertices with shape ``(num_vertices, 3)``. faces (list of torch.LongTensor, optional): Vertex indices for each face with shape ``(num_faces, face_size)``. Mesh must be homogenous (consistent number of vertices per face). uvs (list of torch.FloatTensor, optional): of shape ``(num_uvs, 2)``. face_uvs_idx (list of torch.LongTensor, optional): of shape ``(num_faces, face_size)``. If provided, `uvs` must also be specified. face_normals (list of torch.Tensor, optional): of shape ``(num_vertices, num_faces, 3)``. materials_order (torch.LongTensor): of shape (N, 2) showing the order in which materials are used over **face_uvs_idx** and the first indices in which they start to be used. A material can be used multiple times. materials (list of Material): a list of materials up_axis (str, optional): Specifies the scene's up axis. Choose from ``['Y', 'Z']``. times (list of int, optional): Positive integers defining the time at which the supplied parameters correspond to. Returns: (Usd.Stage) Example: >>> vertices_list = [torch.rand(3, 3) for _ in range(3)] >>> faces_list = [torch.tensor([[0, 1, 2]]) for _ in range(3)] >>> stage = export_meshes('./new_stage.usd', vertices=vertices_list, faces=faces_list) """ stage = create_stage(file_path, up_axis) mesh_parameters = {'vertices': vertices, 'faces': faces, 'uvs': uvs, 'face_uvs_idx': face_uvs_idx, 'face_normals': face_normals, 'materials_order': materials_order, 'materials': materials} supplied_parameters = {k: p for k, p in mesh_parameters.items() if p is not None} length = len(list(supplied_parameters.values())[0]) assert all([len(p) == length for p in supplied_parameters.values()]) if scene_paths is None: if not stage.GetPrimAtPath('/World/Meshes'): stage.DefinePrim('/World/Meshes', 'Xform') scene_paths = [f'/World/Meshes/mesh_{i}' for i in range(len(vertices))] assert len(scene_paths) == length if times is None: times = [Usd.TimeCode.Default()] * len(scene_paths) for i, scene_path in enumerate(scene_paths): mesh_params = {k: p[i] for k, p in supplied_parameters.items()} add_mesh(stage, scene_path, **mesh_params) stage.Save() return stage
[ "def", "export_meshes", "(", "file_path", ",", "scene_paths", "=", "None", ",", "vertices", "=", "None", ",", "faces", "=", "None", ",", "uvs", "=", "None", ",", "face_uvs_idx", "=", "None", ",", "face_normals", "=", "None", ",", "materials_order", "=", "None", ",", "materials", "=", "None", ",", "up_axis", "=", "'Y'", ",", "times", "=", "None", ")", ":", "stage", "=", "create_stage", "(", "file_path", ",", "up_axis", ")", "mesh_parameters", "=", "{", "'vertices'", ":", "vertices", ",", "'faces'", ":", "faces", ",", "'uvs'", ":", "uvs", ",", "'face_uvs_idx'", ":", "face_uvs_idx", ",", "'face_normals'", ":", "face_normals", ",", "'materials_order'", ":", "materials_order", ",", "'materials'", ":", "materials", "}", "supplied_parameters", "=", "{", "k", ":", "p", "for", "k", ",", "p", "in", "mesh_parameters", ".", "items", "(", ")", "if", "p", "is", "not", "None", "}", "length", "=", "len", "(", "list", "(", "supplied_parameters", ".", "values", "(", ")", ")", "[", "0", "]", ")", "assert", "all", "(", "[", "len", "(", "p", ")", "==", "length", "for", "p", "in", "supplied_parameters", ".", "values", "(", ")", "]", ")", "if", "scene_paths", "is", "None", ":", "if", "not", "stage", ".", "GetPrimAtPath", "(", "'/World/Meshes'", ")", ":", "stage", ".", "DefinePrim", "(", "'/World/Meshes'", ",", "'Xform'", ")", "scene_paths", "=", "[", "f'/World/Meshes/mesh_{i}'", "for", "i", "in", "range", "(", "len", "(", "vertices", ")", ")", "]", "assert", "len", "(", "scene_paths", ")", "==", "length", "if", "times", "is", "None", ":", "times", "=", "[", "Usd", ".", "TimeCode", ".", "Default", "(", ")", "]", "*", "len", "(", "scene_paths", ")", "for", "i", ",", "scene_path", "in", "enumerate", "(", "scene_paths", ")", ":", "mesh_params", "=", "{", "k", ":", "p", "[", "i", "]", "for", "k", ",", "p", "in", "supplied_parameters", ".", "items", "(", ")", "}", "add_mesh", "(", "stage", ",", "scene_path", ",", "*", "*", "mesh_params", ")", "stage", ".", "Save", "(", ")", "return", "stage" ]
https://github.com/NVIDIAGameWorks/kaolin/blob/e5148d05e9c1e2ce92a07881ce3593b1c5c3f166/kaolin/io/usd.py#L736-L790
nnrg/opennero
43e12a1bcba6e228639db3886fec1dc47ddc24cb
mods/Maze/environment.py
python
MazeEnvironment.mark_maze
(self, r, c, marker)
mark a maze cell with the specified color
mark a maze cell with the specified color
[ "mark", "a", "maze", "cell", "with", "the", "specified", "color" ]
def mark_maze(self, r, c, marker): """ mark a maze cell with the specified color """ # remove the previous object, if necessary if (r,c) in self.marker_map: removeObject(self.marker_map[(r,c)]) # remember the ID of the marker self.marker_map[(r,c)] = addObject(marker, Vector3f( (r+1) * GRID_DX, (c+1) * GRID_DY, -1))
[ "def", "mark_maze", "(", "self", ",", "r", ",", "c", ",", "marker", ")", ":", "# remove the previous object, if necessary", "if", "(", "r", ",", "c", ")", "in", "self", ".", "marker_map", ":", "removeObject", "(", "self", ".", "marker_map", "[", "(", "r", ",", "c", ")", "]", ")", "# remember the ID of the marker", "self", ".", "marker_map", "[", "(", "r", ",", "c", ")", "]", "=", "addObject", "(", "marker", ",", "Vector3f", "(", "(", "r", "+", "1", ")", "*", "GRID_DX", ",", "(", "c", "+", "1", ")", "*", "GRID_DY", ",", "-", "1", ")", ")" ]
https://github.com/nnrg/opennero/blob/43e12a1bcba6e228639db3886fec1dc47ddc24cb/mods/Maze/environment.py#L300-L306
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
Framework/PythonInterface/mantid/plots/plotfunctions.py
python
_validate_plot_inputs
(workspaces, spectrum_nums, wksp_indices, tiled=False, overplot=False)
Raises a ValueError if any arguments have the incorrect types
Raises a ValueError if any arguments have the incorrect types
[ "Raises", "a", "ValueError", "if", "any", "arguments", "have", "the", "incorrect", "types" ]
def _validate_plot_inputs(workspaces, spectrum_nums, wksp_indices, tiled=False, overplot=False): """Raises a ValueError if any arguments have the incorrect types""" if spectrum_nums is not None and wksp_indices is not None: raise ValueError("Both spectrum_nums and wksp_indices supplied. " "Please supply only 1.") if tiled and overplot: raise ValueError("Both tiled and overplot flags set to true. " "Please set only one to true.") raise_if_not_sequence(workspaces, 'workspaces', MatrixWorkspace) if spectrum_nums is not None: raise_if_not_sequence(spectrum_nums, 'spectrum_nums') if wksp_indices is not None: raise_if_not_sequence(wksp_indices, 'wksp_indices')
[ "def", "_validate_plot_inputs", "(", "workspaces", ",", "spectrum_nums", ",", "wksp_indices", ",", "tiled", "=", "False", ",", "overplot", "=", "False", ")", ":", "if", "spectrum_nums", "is", "not", "None", "and", "wksp_indices", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Both spectrum_nums and wksp_indices supplied. \"", "\"Please supply only 1.\"", ")", "if", "tiled", "and", "overplot", ":", "raise", "ValueError", "(", "\"Both tiled and overplot flags set to true. \"", "\"Please set only one to true.\"", ")", "raise_if_not_sequence", "(", "workspaces", ",", "'workspaces'", ",", "MatrixWorkspace", ")", "if", "spectrum_nums", "is", "not", "None", ":", "raise_if_not_sequence", "(", "spectrum_nums", ",", "'spectrum_nums'", ")", "if", "wksp_indices", "is", "not", "None", ":", "raise_if_not_sequence", "(", "wksp_indices", ",", "'wksp_indices'", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/mantid/plots/plotfunctions.py#L438-L454
anestisb/oatdump_plus
ba858c1596598f0d9ae79c14d08c708cecc50af3
tools/bisection_search/bisection_search.py
python
EqualsOutputCheck.__init__
(self, expected_output)
Constructor. Args: expected_output: string, expected output.
Constructor.
[ "Constructor", "." ]
def __init__(self, expected_output): """Constructor. Args: expected_output: string, expected output. """ self._expected_output = expected_output
[ "def", "__init__", "(", "self", ",", "expected_output", ")", ":", "self", ".", "_expected_output", "=", "expected_output" ]
https://github.com/anestisb/oatdump_plus/blob/ba858c1596598f0d9ae79c14d08c708cecc50af3/tools/bisection_search/bisection_search.py#L205-L211
wyrover/book-code
7f4883d9030d553bc6bcfa3da685e34789839900
3rdparty/protobuf/python/google/protobuf/text_format.py
python
Tokenizer.ParseErrorPreviousToken
(self, message)
return ParseError(message, self._previous_line + 1, self._previous_column + 1)
Creates and *returns* a ParseError for the previously read token. Args: message: A message to set for the exception. Returns: A ParseError instance.
Creates and *returns* a ParseError for the previously read token.
[ "Creates", "and", "*", "returns", "*", "a", "ParseError", "for", "the", "previously", "read", "token", "." ]
def ParseErrorPreviousToken(self, message): """Creates and *returns* a ParseError for the previously read token. Args: message: A message to set for the exception. Returns: A ParseError instance. """ return ParseError(message, self._previous_line + 1, self._previous_column + 1)
[ "def", "ParseErrorPreviousToken", "(", "self", ",", "message", ")", ":", "return", "ParseError", "(", "message", ",", "self", ".", "_previous_line", "+", "1", ",", "self", ".", "_previous_column", "+", "1", ")" ]
https://github.com/wyrover/book-code/blob/7f4883d9030d553bc6bcfa3da685e34789839900/3rdparty/protobuf/python/google/protobuf/text_format.py#L1207-L1217
devsisters/libquic
8954789a056d8e7d5fcb6452fd1572ca57eb5c4e
src/third_party/protobuf/python/mox.py
python
MockMethod._PopNextMethod
(self)
Pop the next method from our call queue.
Pop the next method from our call queue.
[ "Pop", "the", "next", "method", "from", "our", "call", "queue", "." ]
def _PopNextMethod(self): """Pop the next method from our call queue.""" try: return self._call_queue.popleft() except IndexError: raise UnexpectedMethodCallError(self, None)
[ "def", "_PopNextMethod", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_call_queue", ".", "popleft", "(", ")", "except", "IndexError", ":", "raise", "UnexpectedMethodCallError", "(", "self", ",", "None", ")" ]
https://github.com/devsisters/libquic/blob/8954789a056d8e7d5fcb6452fd1572ca57eb5c4e/src/third_party/protobuf/python/mox.py#L581-L586
gnina/gnina
b9ae032f52fc7a8153987bde09c0efa3620d8bb6
caffe/tools/extra/extract_seconds.py
python
get_log_created_year
(input_file)
return log_created_year
Get year from log file system timestamp
Get year from log file system timestamp
[ "Get", "year", "from", "log", "file", "system", "timestamp" ]
def get_log_created_year(input_file): """Get year from log file system timestamp """ log_created_time = os.path.getctime(input_file) log_created_year = datetime.datetime.fromtimestamp(log_created_time).year return log_created_year
[ "def", "get_log_created_year", "(", "input_file", ")", ":", "log_created_time", "=", "os", ".", "path", ".", "getctime", "(", "input_file", ")", "log_created_year", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "log_created_time", ")", ".", "year", "return", "log_created_year" ]
https://github.com/gnina/gnina/blob/b9ae032f52fc7a8153987bde09c0efa3620d8bb6/caffe/tools/extra/extract_seconds.py#L22-L28
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/docs/method.py
python
document_custom_method
(section, method_name, method)
Documents a non-data driven method :param section: The section to write the documentation to. :param method_name: The name of the method :param method: The handle to the method being documented
Documents a non-data driven method
[ "Documents", "a", "non", "-", "data", "driven", "method" ]
def document_custom_method(section, method_name, method): """Documents a non-data driven method :param section: The section to write the documentation to. :param method_name: The name of the method :param method: The handle to the method being documented """ document_custom_signature( section, method_name, method) method_intro_section = section.add_new_section('method-intro') method_intro_section.writeln('') doc_string = inspect.getdoc(method) if doc_string is not None: method_intro_section.style.write_py_doc_string(doc_string)
[ "def", "document_custom_method", "(", "section", ",", "method_name", ",", "method", ")", ":", "document_custom_signature", "(", "section", ",", "method_name", ",", "method", ")", "method_intro_section", "=", "section", ".", "add_new_section", "(", "'method-intro'", ")", "method_intro_section", ".", "writeln", "(", "''", ")", "doc_string", "=", "inspect", ".", "getdoc", "(", "method", ")", "if", "doc_string", "is", "not", "None", ":", "method_intro_section", ".", "style", ".", "write_py_doc_string", "(", "doc_string", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/docs/method.py#L108-L123
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/ma/core.py
python
_DomainTan.__init__
(self, eps)
domain_tan(eps) = true where abs(cos(x)) < eps)
domain_tan(eps) = true where abs(cos(x)) < eps)
[ "domain_tan", "(", "eps", ")", "=", "true", "where", "abs", "(", "cos", "(", "x", "))", "<", "eps", ")" ]
def __init__(self, eps): "domain_tan(eps) = true where abs(cos(x)) < eps)" self.eps = eps
[ "def", "__init__", "(", "self", ",", "eps", ")", ":", "self", ".", "eps", "=", "eps" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/ma/core.py#L757-L759
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/training/sync_replicas_optimizer.py
python
SyncReplicasOptimizer.get_chief_queue_runner
(self)
return self._chief_queue_runner
Returns the QueueRunner for the chief to execute. This includes the operations to synchronize replicas: aggregate gradients, apply to variables, increment global step, insert tokens to token queue. Note that this can only be called after calling apply_gradients() which actually generates this queuerunner. Returns: A `QueueRunner` for chief to execute. Raises: ValueError: If this is called before apply_gradients().
Returns the QueueRunner for the chief to execute.
[ "Returns", "the", "QueueRunner", "for", "the", "chief", "to", "execute", "." ]
def get_chief_queue_runner(self): """Returns the QueueRunner for the chief to execute. This includes the operations to synchronize replicas: aggregate gradients, apply to variables, increment global step, insert tokens to token queue. Note that this can only be called after calling apply_gradients() which actually generates this queuerunner. Returns: A `QueueRunner` for chief to execute. Raises: ValueError: If this is called before apply_gradients(). """ if self._gradients_applied is False: raise ValueError("Should be called after apply_gradients().") return self._chief_queue_runner
[ "def", "get_chief_queue_runner", "(", "self", ")", ":", "if", "self", ".", "_gradients_applied", "is", "False", ":", "raise", "ValueError", "(", "\"Should be called after apply_gradients().\"", ")", "return", "self", ".", "_chief_queue_runner" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/training/sync_replicas_optimizer.py#L343-L361
apache/arrow
af33dd1157eb8d7d9bfac25ebf61445b793b7943
cpp/build-support/cpplint.py
python
NestingState.SeenOpenBrace
(self)
return (not self.stack) or self.stack[-1].seen_open_brace
Check if we have seen the opening brace for the innermost block. Returns: True if we have seen the opening brace, False if the innermost block is still expecting an opening brace.
Check if we have seen the opening brace for the innermost block.
[ "Check", "if", "we", "have", "seen", "the", "opening", "brace", "for", "the", "innermost", "block", "." ]
def SeenOpenBrace(self): """Check if we have seen the opening brace for the innermost block. Returns: True if we have seen the opening brace, False if the innermost block is still expecting an opening brace. """ return (not self.stack) or self.stack[-1].seen_open_brace
[ "def", "SeenOpenBrace", "(", "self", ")", ":", "return", "(", "not", "self", ".", "stack", ")", "or", "self", ".", "stack", "[", "-", "1", "]", ".", "seen_open_brace" ]
https://github.com/apache/arrow/blob/af33dd1157eb8d7d9bfac25ebf61445b793b7943/cpp/build-support/cpplint.py#L2540-L2547
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/tools/compatibility/tf_upgrade_v2.py
python
_add_uniform_scaling_initializer_transformer
( parent, node, full_name, name, logs)
return node
Updates references to uniform_unit_scaling_initializer. Transforms: tf.uniform_unit_scaling_initializer(factor, seed, dtype) to tf.compat.v1.keras.initializers.VarianceScaling( scale=factor, distribution="uniform", seed=seed) Note: to apply this transformation, symbol must be added to reordered_function_names above.
Updates references to uniform_unit_scaling_initializer.
[ "Updates", "references", "to", "uniform_unit_scaling_initializer", "." ]
def _add_uniform_scaling_initializer_transformer( parent, node, full_name, name, logs): """Updates references to uniform_unit_scaling_initializer. Transforms: tf.uniform_unit_scaling_initializer(factor, seed, dtype) to tf.compat.v1.keras.initializers.VarianceScaling( scale=factor, distribution="uniform", seed=seed) Note: to apply this transformation, symbol must be added to reordered_function_names above. """ for keyword_arg in node.keywords: if keyword_arg.arg == "factor": keyword_arg.arg = "scale" distribution_value = "\"uniform\"" # Parse with pasta instead of ast to avoid emitting a spurious trailing \n. ast_value = pasta.parse(distribution_value) node.keywords.append(ast.keyword(arg="distribution", value=ast_value)) lineno = node.func.value.lineno col_offset = node.func.value.col_offset node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers") node.func.value.lineno = lineno node.func.value.col_offset = col_offset node.func.attr = "VarianceScaling" return node
[ "def", "_add_uniform_scaling_initializer_transformer", "(", "parent", ",", "node", ",", "full_name", ",", "name", ",", "logs", ")", ":", "for", "keyword_arg", "in", "node", ".", "keywords", ":", "if", "keyword_arg", ".", "arg", "==", "\"factor\"", ":", "keyword_arg", ".", "arg", "=", "\"scale\"", "distribution_value", "=", "\"\\\"uniform\\\"\"", "# Parse with pasta instead of ast to avoid emitting a spurious trailing \\n.", "ast_value", "=", "pasta", ".", "parse", "(", "distribution_value", ")", "node", ".", "keywords", ".", "append", "(", "ast", ".", "keyword", "(", "arg", "=", "\"distribution\"", ",", "value", "=", "ast_value", ")", ")", "lineno", "=", "node", ".", "func", ".", "value", ".", "lineno", "col_offset", "=", "node", ".", "func", ".", "value", ".", "col_offset", "node", ".", "func", ".", "value", "=", "ast_edits", ".", "full_name_node", "(", "\"tf.compat.v1.keras.initializers\"", ")", "node", ".", "func", ".", "value", ".", "lineno", "=", "lineno", "node", ".", "func", ".", "value", ".", "col_offset", "=", "col_offset", "node", ".", "func", ".", "attr", "=", "\"VarianceScaling\"", "return", "node" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/tools/compatibility/tf_upgrade_v2.py#L2207-L2234
microsoft/TSS.MSR
0f2516fca2cd9929c31d5450e39301c9bde43688
TSS.Py/src/TpmTypes.py
python
TPM2_PolicyCounterTimer_REQUEST.fromTpm
(buf)
return buf.createObj(TPM2_PolicyCounterTimer_REQUEST)
Returns new TPM2_PolicyCounterTimer_REQUEST object constructed from its marshaled representation in the given TpmBuffer buffer
Returns new TPM2_PolicyCounterTimer_REQUEST object constructed from its marshaled representation in the given TpmBuffer buffer
[ "Returns", "new", "TPM2_PolicyCounterTimer_REQUEST", "object", "constructed", "from", "its", "marshaled", "representation", "in", "the", "given", "TpmBuffer", "buffer" ]
def fromTpm(buf): """ Returns new TPM2_PolicyCounterTimer_REQUEST object constructed from its marshaled representation in the given TpmBuffer buffer """ return buf.createObj(TPM2_PolicyCounterTimer_REQUEST)
[ "def", "fromTpm", "(", "buf", ")", ":", "return", "buf", ".", "createObj", "(", "TPM2_PolicyCounterTimer_REQUEST", ")" ]
https://github.com/microsoft/TSS.MSR/blob/0f2516fca2cd9929c31d5450e39301c9bde43688/TSS.Py/src/TpmTypes.py#L14666-L14670
WeitaoVan/L-GM-loss
598582f0631bac876b3eeb8d6c4cd1d780269e03
scripts/cpp_lint.py
python
RemoveMultiLineCommentsFromRange
(lines, begin, end)
Clears a range of lines for multi-line comments.
Clears a range of lines for multi-line comments.
[ "Clears", "a", "range", "of", "lines", "for", "multi", "-", "line", "comments", "." ]
def RemoveMultiLineCommentsFromRange(lines, begin, end): """Clears a range of lines for multi-line comments.""" # Having // dummy comments makes the lines non-empty, so we will not get # unnecessary blank line warnings later in the code. for i in range(begin, end): lines[i] = '// dummy'
[ "def", "RemoveMultiLineCommentsFromRange", "(", "lines", ",", "begin", ",", "end", ")", ":", "# Having // dummy comments makes the lines non-empty, so we will not get", "# unnecessary blank line warnings later in the code.", "for", "i", "in", "range", "(", "begin", ",", "end", ")", ":", "lines", "[", "i", "]", "=", "'// dummy'" ]
https://github.com/WeitaoVan/L-GM-loss/blob/598582f0631bac876b3eeb8d6c4cd1d780269e03/scripts/cpp_lint.py#L1143-L1148
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/boto3/resources/collection.py
python
ResourceCollection.page_size
(self, count)
return self._clone(page_size=count)
Fetch at most this many resources per service request. >>> for obj in s3.Bucket('boto3').objects.page_size(100): ... print(obj.key) :type count: int :param count: Fetch this many items per request :rtype: :py:class:`ResourceCollection`
Fetch at most this many resources per service request.
[ "Fetch", "at", "most", "this", "many", "resources", "per", "service", "request", "." ]
def page_size(self, count): """ Fetch at most this many resources per service request. >>> for obj in s3.Bucket('boto3').objects.page_size(100): ... print(obj.key) :type count: int :param count: Fetch this many items per request :rtype: :py:class:`ResourceCollection` """ return self._clone(page_size=count)
[ "def", "page_size", "(", "self", ",", "count", ")", ":", "return", "self", ".", "_clone", "(", "page_size", "=", "count", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/boto3/resources/collection.py#L246-L257
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py3/pandas/core/series.py
python
Series.unstack
(self, level=-1, fill_value=None)
return unstack(self, level, fill_value)
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4
Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.
[ "Unstack", "also", "known", "as", "pivot", "Series", "with", "MultiIndex", "to", "produce", "DataFrame", "." ]
def unstack(self, level=-1, fill_value=None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value)
[ "def", "unstack", "(", "self", ",", "level", "=", "-", "1", ",", "fill_value", "=", "None", ")", "->", "DataFrame", ":", "from", "pandas", ".", "core", ".", "reshape", ".", "reshape", "import", "unstack", "return", "unstack", "(", "self", ",", "level", ",", "fill_value", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/series.py#L4041-L4081
google/syzygy
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
third_party/numpy/files/numpy/lib/stride_tricks.py
python
broadcast_arrays
(*args)
return broadcasted
Broadcast any number of arrays against each other. Parameters ---------- `*args` : array_likes The arrays to broadcast. Returns ------- broadcasted : list of arrays These arrays are views on the original arrays. They are typically not contiguous. Furthermore, more than one element of a broadcasted array may refer to a single memory location. If you need to write to the arrays, make copies first. Examples -------- >>> x = np.array([[1,2,3]]) >>> y = np.array([[1],[2],[3]]) >>> np.broadcast_arrays(x, y) [array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])] Here is a useful idiom for getting contiguous copies instead of non-contiguous views. >>> map(np.array, np.broadcast_arrays(x, y)) [array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])]
Broadcast any number of arrays against each other.
[ "Broadcast", "any", "number", "of", "arrays", "against", "each", "other", "." ]
def broadcast_arrays(*args): """ Broadcast any number of arrays against each other. Parameters ---------- `*args` : array_likes The arrays to broadcast. Returns ------- broadcasted : list of arrays These arrays are views on the original arrays. They are typically not contiguous. Furthermore, more than one element of a broadcasted array may refer to a single memory location. If you need to write to the arrays, make copies first. Examples -------- >>> x = np.array([[1,2,3]]) >>> y = np.array([[1],[2],[3]]) >>> np.broadcast_arrays(x, y) [array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])] Here is a useful idiom for getting contiguous copies instead of non-contiguous views. >>> map(np.array, np.broadcast_arrays(x, y)) [array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])] """ args = map(np.asarray, args) shapes = [x.shape for x in args] if len(set(shapes)) == 1: # Common case where nothing needs to be broadcasted. return args shapes = [list(s) for s in shapes] strides = [list(x.strides) for x in args] nds = [len(s) for s in shapes] biggest = max(nds) # Go through each array and prepend dimensions of length 1 to each of the # shapes in order to make the number of dimensions equal. for i in range(len(args)): diff = biggest - nds[i] if diff > 0: shapes[i] = [1] * diff + shapes[i] strides[i] = [0] * diff + strides[i] # Chech each dimension for compatibility. A dimension length of 1 is # accepted as compatible with any other length. common_shape = [] for axis in range(biggest): lengths = [s[axis] for s in shapes] unique = set(lengths + [1]) if len(unique) > 2: # There must be at least two non-1 lengths for this axis. raise ValueError("shape mismatch: two or more arrays have " "incompatible dimensions on axis %r." % (axis,)) elif len(unique) == 2: # There is exactly one non-1 length. The common shape will take this # value. unique.remove(1) new_length = unique.pop() common_shape.append(new_length) # For each array, if this axis is being broadcasted from a length of # 1, then set its stride to 0 so that it repeats its data. for i in range(len(args)): if shapes[i][axis] == 1: shapes[i][axis] = new_length strides[i][axis] = 0 else: # Every array has a length of 1 on this axis. Strides can be left # alone as nothing is broadcasted. common_shape.append(1) # Construct the new arrays. broadcasted = [as_strided(x, shape=sh, strides=st) for (x,sh,st) in zip(args, shapes, strides)] return broadcasted
[ "def", "broadcast_arrays", "(", "*", "args", ")", ":", "args", "=", "map", "(", "np", ".", "asarray", ",", "args", ")", "shapes", "=", "[", "x", ".", "shape", "for", "x", "in", "args", "]", "if", "len", "(", "set", "(", "shapes", ")", ")", "==", "1", ":", "# Common case where nothing needs to be broadcasted.", "return", "args", "shapes", "=", "[", "list", "(", "s", ")", "for", "s", "in", "shapes", "]", "strides", "=", "[", "list", "(", "x", ".", "strides", ")", "for", "x", "in", "args", "]", "nds", "=", "[", "len", "(", "s", ")", "for", "s", "in", "shapes", "]", "biggest", "=", "max", "(", "nds", ")", "# Go through each array and prepend dimensions of length 1 to each of the", "# shapes in order to make the number of dimensions equal.", "for", "i", "in", "range", "(", "len", "(", "args", ")", ")", ":", "diff", "=", "biggest", "-", "nds", "[", "i", "]", "if", "diff", ">", "0", ":", "shapes", "[", "i", "]", "=", "[", "1", "]", "*", "diff", "+", "shapes", "[", "i", "]", "strides", "[", "i", "]", "=", "[", "0", "]", "*", "diff", "+", "strides", "[", "i", "]", "# Chech each dimension for compatibility. A dimension length of 1 is", "# accepted as compatible with any other length.", "common_shape", "=", "[", "]", "for", "axis", "in", "range", "(", "biggest", ")", ":", "lengths", "=", "[", "s", "[", "axis", "]", "for", "s", "in", "shapes", "]", "unique", "=", "set", "(", "lengths", "+", "[", "1", "]", ")", "if", "len", "(", "unique", ")", ">", "2", ":", "# There must be at least two non-1 lengths for this axis.", "raise", "ValueError", "(", "\"shape mismatch: two or more arrays have \"", "\"incompatible dimensions on axis %r.\"", "%", "(", "axis", ",", ")", ")", "elif", "len", "(", "unique", ")", "==", "2", ":", "# There is exactly one non-1 length. The common shape will take this", "# value.", "unique", ".", "remove", "(", "1", ")", "new_length", "=", "unique", ".", "pop", "(", ")", "common_shape", ".", "append", "(", "new_length", ")", "# For each array, if this axis is being broadcasted from a length of", "# 1, then set its stride to 0 so that it repeats its data.", "for", "i", "in", "range", "(", "len", "(", "args", ")", ")", ":", "if", "shapes", "[", "i", "]", "[", "axis", "]", "==", "1", ":", "shapes", "[", "i", "]", "[", "axis", "]", "=", "new_length", "strides", "[", "i", "]", "[", "axis", "]", "=", "0", "else", ":", "# Every array has a length of 1 on this axis. Strides can be left", "# alone as nothing is broadcasted.", "common_shape", ".", "append", "(", "1", ")", "# Construct the new arrays.", "broadcasted", "=", "[", "as_strided", "(", "x", ",", "shape", "=", "sh", ",", "strides", "=", "st", ")", "for", "(", "x", ",", "sh", ",", "st", ")", "in", "zip", "(", "args", ",", "shapes", ",", "strides", ")", "]", "return", "broadcasted" ]
https://github.com/google/syzygy/blob/8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5/third_party/numpy/files/numpy/lib/stride_tricks.py#L30-L115
bairdzhang/smallhardface
76fa1d87a9602d9b13d7a7fe693fc7aec91cab80
external/marcopede-face-eval-f2870fd85d48/database.py
python
getRecord
(data, total=-1, pos=True, pose=False, facial=False)
return arrPos
return all the gt data in a record
return all the gt data in a record
[ "return", "all", "the", "gt", "data", "in", "a", "record" ]
def getRecord(data, total=-1, pos=True, pose=False, facial=False): """return all the gt data in a record""" if total == -1: total = data.getTotal() else: total = min(data.getTotal(), total) arrPos = numpy.zeros( total, dtype=[("id", numpy.int32), ("name", object), ("bbox", list)]) if facial: arrPos = numpy.zeros(total, dtype=[ ("id", numpy.int32), ("name", object), ("bbox", list), ("facial", object)]) if pose: arrPos = numpy.zeros(total, dtype=[ ("id", numpy.int32), ("name", object), ("bbox", list), ("facial", object), ("pose", object)]) for i in range(total): arrPos[i]["id"] = i arrPos[i]["name"] = data.getImageName(i) arrPos[i]["bbox"] = data.getBBox(i) if pose: arrPos[i]["pose"] = data.getPose(i) if facial: arrPos[i]["facial"] = data.getFacial(i) return arrPos
[ "def", "getRecord", "(", "data", ",", "total", "=", "-", "1", ",", "pos", "=", "True", ",", "pose", "=", "False", ",", "facial", "=", "False", ")", ":", "if", "total", "==", "-", "1", ":", "total", "=", "data", ".", "getTotal", "(", ")", "else", ":", "total", "=", "min", "(", "data", ".", "getTotal", "(", ")", ",", "total", ")", "arrPos", "=", "numpy", ".", "zeros", "(", "total", ",", "dtype", "=", "[", "(", "\"id\"", ",", "numpy", ".", "int32", ")", ",", "(", "\"name\"", ",", "object", ")", ",", "(", "\"bbox\"", ",", "list", ")", "]", ")", "if", "facial", ":", "arrPos", "=", "numpy", ".", "zeros", "(", "total", ",", "dtype", "=", "[", "(", "\"id\"", ",", "numpy", ".", "int32", ")", ",", "(", "\"name\"", ",", "object", ")", ",", "(", "\"bbox\"", ",", "list", ")", ",", "(", "\"facial\"", ",", "object", ")", "]", ")", "if", "pose", ":", "arrPos", "=", "numpy", ".", "zeros", "(", "total", ",", "dtype", "=", "[", "(", "\"id\"", ",", "numpy", ".", "int32", ")", ",", "(", "\"name\"", ",", "object", ")", ",", "(", "\"bbox\"", ",", "list", ")", ",", "(", "\"facial\"", ",", "object", ")", ",", "(", "\"pose\"", ",", "object", ")", "]", ")", "for", "i", "in", "range", "(", "total", ")", ":", "arrPos", "[", "i", "]", "[", "\"id\"", "]", "=", "i", "arrPos", "[", "i", "]", "[", "\"name\"", "]", "=", "data", ".", "getImageName", "(", "i", ")", "arrPos", "[", "i", "]", "[", "\"bbox\"", "]", "=", "data", ".", "getBBox", "(", "i", ")", "if", "pose", ":", "arrPos", "[", "i", "]", "[", "\"pose\"", "]", "=", "data", ".", "getPose", "(", "i", ")", "if", "facial", ":", "arrPos", "[", "i", "]", "[", "\"facial\"", "]", "=", "data", ".", "getFacial", "(", "i", ")", "return", "arrPos" ]
https://github.com/bairdzhang/smallhardface/blob/76fa1d87a9602d9b13d7a7fe693fc7aec91cab80/external/marcopede-face-eval-f2870fd85d48/database.py#L121-L143