nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_misc.py
python
PlatformInformation.GetOperatingSystemFamilyName
(*args, **kwargs)
return _misc_.PlatformInformation_GetOperatingSystemFamilyName(*args, **kwargs)
GetOperatingSystemFamilyName(self) -> String
GetOperatingSystemFamilyName(self) -> String
[ "GetOperatingSystemFamilyName", "(", "self", ")", "-", ">", "String" ]
def GetOperatingSystemFamilyName(*args, **kwargs): """GetOperatingSystemFamilyName(self) -> String""" return _misc_.PlatformInformation_GetOperatingSystemFamilyName(*args, **kwargs)
[ "def", "GetOperatingSystemFamilyName", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_misc_", ".", "PlatformInformation_GetOperatingSystemFamilyName", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_misc.py#L1109-L1111
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/build/waf-1.7.13/waflib/Scripting.py
python
autoconfigure
(execute_method)
return execute
Decorator used to set the commands that can be configured automatically
Decorator used to set the commands that can be configured automatically
[ "Decorator", "used", "to", "set", "the", "commands", "that", "can", "be", "configured", "automatically" ]
def autoconfigure(execute_method): """ Decorator used to set the commands that can be configured automatically """ def execute(self): if not Configure.autoconfig: return execute_method(self) env = ConfigSet.ConfigSet() do_config = False if self.root.find_node(self.cache_dir) == None: do_config = True else: try: env.load(os.path.join(Context.lock_dir, Options.lockfile)) except Exception: Logs.warn('Configuring the project') do_config = True else: if env.run_dir != Context.run_dir: do_config = True else: h = 0 for f in env['files']: try: h = Utils.h_list((h, Utils.readf(f, 'rb'))) except (IOError, EOFError): pass # ignore missing files (will cause a rerun cause of the changed hash) do_config = h != env.hash if do_config and 'configure' not in Options.commands: Options.commands.insert(0, self.cmd) Options.commands.insert(0, 'configure') self.skip_finish_message = True return return execute_method(self) return execute
[ "def", "autoconfigure", "(", "execute_method", ")", ":", "def", "execute", "(", "self", ")", ":", "if", "not", "Configure", ".", "autoconfig", ":", "return", "execute_method", "(", "self", ")", "env", "=", "ConfigSet", ".", "ConfigSet", "(", ")", "do_config", "=", "False", "if", "self", ".", "root", ".", "find_node", "(", "self", ".", "cache_dir", ")", "==", "None", ":", "do_config", "=", "True", "else", ":", "try", ":", "env", ".", "load", "(", "os", ".", "path", ".", "join", "(", "Context", ".", "lock_dir", ",", "Options", ".", "lockfile", ")", ")", "except", "Exception", ":", "Logs", ".", "warn", "(", "'Configuring the project'", ")", "do_config", "=", "True", "else", ":", "if", "env", ".", "run_dir", "!=", "Context", ".", "run_dir", ":", "do_config", "=", "True", "else", ":", "h", "=", "0", "for", "f", "in", "env", "[", "'files'", "]", ":", "try", ":", "h", "=", "Utils", ".", "h_list", "(", "(", "h", ",", "Utils", ".", "readf", "(", "f", ",", "'rb'", ")", ")", ")", "except", "(", "IOError", ",", "EOFError", ")", ":", "pass", "# ignore missing files (will cause a rerun cause of the changed hash)", "do_config", "=", "h", "!=", "env", ".", "hash", "if", "do_config", "and", "'configure'", "not", "in", "Options", ".", "commands", ":", "Options", ".", "commands", ".", "insert", "(", "0", ",", "self", ".", "cmd", ")", "Options", ".", "commands", ".", "insert", "(", "0", ",", "'configure'", ")", "self", ".", "skip_finish_message", "=", "True", "return", "return", "execute_method", "(", "self", ")", "return", "execute" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/waflib/Scripting.py#L660-L697
lawy623/SVS
b7c7ae367c82a4797ff4a896a2ff304f02e7f724
caffe/scripts/cpp_lint.py
python
_Filters
()
return _cpplint_state.filters
Returns the module's list of output filters, as a list.
Returns the module's list of output filters, as a list.
[ "Returns", "the", "module", "s", "list", "of", "output", "filters", "as", "a", "list", "." ]
def _Filters(): """Returns the module's list of output filters, as a list.""" return _cpplint_state.filters
[ "def", "_Filters", "(", ")", ":", "return", "_cpplint_state", ".", "filters" ]
https://github.com/lawy623/SVS/blob/b7c7ae367c82a4797ff4a896a2ff304f02e7f724/caffe/scripts/cpp_lint.py#L792-L794
pmq20/node-packer
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
current/tools/inspector_protocol/jinja2/visitor.py
python
NodeTransformer.visit_list
(self, node, *args, **kwargs)
return rv
As transformers may return lists in some places this method can be used to enforce a list as return value.
As transformers may return lists in some places this method can be used to enforce a list as return value.
[ "As", "transformers", "may", "return", "lists", "in", "some", "places", "this", "method", "can", "be", "used", "to", "enforce", "a", "list", "as", "return", "value", "." ]
def visit_list(self, node, *args, **kwargs): """As transformers may return lists in some places this method can be used to enforce a list as return value. """ rv = self.visit(node, *args, **kwargs) if not isinstance(rv, list): rv = [rv] return rv
[ "def", "visit_list", "(", "self", ",", "node", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "rv", "=", "self", ".", "visit", "(", "node", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "not", "isinstance", "(", "rv", ",", "list", ")", ":", "rv", "=", "[", "rv", "]", "return", "rv" ]
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/current/tools/inspector_protocol/jinja2/visitor.py#L80-L87
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py3/numpy/lib/mixins.py
python
_inplace_binary_method
(ufunc, name)
return func
Implement an in-place binary method with a ufunc, e.g., __iadd__.
Implement an in-place binary method with a ufunc, e.g., __iadd__.
[ "Implement", "an", "in", "-", "place", "binary", "method", "with", "a", "ufunc", "e", ".", "g", ".", "__iadd__", "." ]
def _inplace_binary_method(ufunc, name): """Implement an in-place binary method with a ufunc, e.g., __iadd__.""" def func(self, other): return ufunc(self, other, out=(self,)) func.__name__ = '__i{}__'.format(name) return func
[ "def", "_inplace_binary_method", "(", "ufunc", ",", "name", ")", ":", "def", "func", "(", "self", ",", "other", ")", ":", "return", "ufunc", "(", "self", ",", "other", ",", "out", "=", "(", "self", ",", ")", ")", "func", ".", "__name__", "=", "'__i{}__'", ".", "format", "(", "name", ")", "return", "func" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/lib/mixins.py#L36-L41
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/python2_version/klampt/math/optimize.py
python
OptimizationProblemBuilder.inequalitySatisfiedSymbolic
(self,soft=False)
return res <= 0
Returns a symbolic.Expression, over variables in self.context, that evaluates to True if the inequality constraint is met
Returns a symbolic.Expression, over variables in self.context, that evaluates to True if the inequality constraint is met
[ "Returns", "a", "symbolic", ".", "Expression", "over", "variables", "in", "self", ".", "context", "that", "evaluates", "to", "True", "if", "the", "inequality", "constraint", "is", "met" ]
def inequalitySatisfiedSymbolic(self,soft=False): """Returns a symbolic.Expression, over variables in self.context, that evaluates to True if the inequality constraint is met""" res = self.inequalityResidualSymbolic(soft) if res is None: return None return res <= 0
[ "def", "inequalitySatisfiedSymbolic", "(", "self", ",", "soft", "=", "False", ")", ":", "res", "=", "self", ".", "inequalityResidualSymbolic", "(", "soft", ")", "if", "res", "is", "None", ":", "return", "None", "return", "res", "<=", "0" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/math/optimize.py#L1074-L1079
llvm/llvm-project
ffa6262cb4e2a335d26416fad39a581b4f98c5f4
bolt/utils/llvm-bolt-wrapper.py
python
preprocess_args
(args: argparse.Namespace)
return {key: value for key, value in vars(args).items() if value}
Drop options that weren't parsed (e.g. -w), convert to a dict
Drop options that weren't parsed (e.g. -w), convert to a dict
[ "Drop", "options", "that", "weren", "t", "parsed", "(", "e", ".", "g", ".", "-", "w", ")", "convert", "to", "a", "dict" ]
def preprocess_args(args: argparse.Namespace) -> Mapping[AnyStr, AnyStr]: ''' Drop options that weren't parsed (e.g. -w), convert to a dict ''' return {key: value for key, value in vars(args).items() if value}
[ "def", "preprocess_args", "(", "args", ":", "argparse", ".", "Namespace", ")", "->", "Mapping", "[", "AnyStr", ",", "AnyStr", "]", ":", "return", "{", "key", ":", "value", "for", "key", ",", "value", "in", "vars", "(", "args", ")", ".", "items", "(", ")", "if", "value", "}" ]
https://github.com/llvm/llvm-project/blob/ffa6262cb4e2a335d26416fad39a581b4f98c5f4/bolt/utils/llvm-bolt-wrapper.py#L144-L148
BVLC/caffe
9b891540183ddc834a02b2bd81b31afae71b2153
scripts/cpp_lint.py
python
CheckForCopyright
(filename, lines, error)
Logs an error if a Copyright message appears at the top of the file.
Logs an error if a Copyright message appears at the top of the file.
[ "Logs", "an", "error", "if", "a", "Copyright", "message", "appears", "at", "the", "top", "of", "the", "file", "." ]
def CheckForCopyright(filename, lines, error): """Logs an error if a Copyright message appears at the top of the file.""" # We'll check up to line 10. Don't forget there's a # dummy line at the front. for line in xrange(1, min(len(lines), 11)): if _RE_COPYRIGHT.search(lines[line], re.I): error(filename, 0, 'legal/copyright', 5, 'Copyright message found. ' 'You should not include a copyright line.')
[ "def", "CheckForCopyright", "(", "filename", ",", "lines", ",", "error", ")", ":", "# We'll check up to line 10. Don't forget there's a", "# dummy line at the front.", "for", "line", "in", "xrange", "(", "1", ",", "min", "(", "len", "(", "lines", ")", ",", "11", ")", ")", ":", "if", "_RE_COPYRIGHT", ".", "search", "(", "lines", "[", "line", "]", ",", "re", ".", "I", ")", ":", "error", "(", "filename", ",", "0", ",", "'legal/copyright'", ",", "5", ",", "'Copyright message found. '", "'You should not include a copyright line.'", ")" ]
https://github.com/BVLC/caffe/blob/9b891540183ddc834a02b2bd81b31afae71b2153/scripts/cpp_lint.py#L1376-L1385
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/tkinter/__init__.py
python
Variable.trace_variable
(self, mode, callback)
return cbname
Define a trace callback for the variable. MODE is one of "r", "w", "u" for read, write, undefine. CALLBACK must be a function which is called when the variable is read, written or undefined. Return the name of the callback. This deprecated method wraps a deprecated Tcl method that will likely be removed in the future. Use trace_add() instead.
Define a trace callback for the variable.
[ "Define", "a", "trace", "callback", "for", "the", "variable", "." ]
def trace_variable(self, mode, callback): """Define a trace callback for the variable. MODE is one of "r", "w", "u" for read, write, undefine. CALLBACK must be a function which is called when the variable is read, written or undefined. Return the name of the callback. This deprecated method wraps a deprecated Tcl method that will likely be removed in the future. Use trace_add() instead. """ # TODO: Add deprecation warning cbname = self._register(callback) self._tk.call("trace", "variable", self._name, mode, cbname) return cbname
[ "def", "trace_variable", "(", "self", ",", "mode", ",", "callback", ")", ":", "# TODO: Add deprecation warning", "cbname", "=", "self", ".", "_register", "(", "callback", ")", "self", ".", "_tk", ".", "call", "(", "\"trace\"", ",", "\"variable\"", ",", "self", ".", "_name", ",", "mode", ",", "cbname", ")", "return", "cbname" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/tkinter/__init__.py#L407-L422
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/keras/_impl/keras/preprocessing/text.py
python
Tokenizer.sequences_to_matrix
(self, sequences, mode='binary')
return x
Converts a list of sequences into a Numpy matrix. Arguments: sequences: list of sequences (a sequence is a list of integer word indices). mode: one of "binary", "count", "tfidf", "freq" Returns: A Numpy matrix. Raises: ValueError: In case of invalid `mode` argument, or if the Tokenizer requires to be fit to sample data.
Converts a list of sequences into a Numpy matrix.
[ "Converts", "a", "list", "of", "sequences", "into", "a", "Numpy", "matrix", "." ]
def sequences_to_matrix(self, sequences, mode='binary'): """Converts a list of sequences into a Numpy matrix. Arguments: sequences: list of sequences (a sequence is a list of integer word indices). mode: one of "binary", "count", "tfidf", "freq" Returns: A Numpy matrix. Raises: ValueError: In case of invalid `mode` argument, or if the Tokenizer requires to be fit to sample data. """ if not self.num_words: if self.word_index: num_words = len(self.word_index) + 1 else: raise ValueError('Specify a dimension (num_words argument), ' 'or fit on some text data first.') else: num_words = self.num_words if mode == 'tfidf' and not self.document_count: raise ValueError('Fit the Tokenizer on some data ' 'before using tfidf mode.') x = np.zeros((len(sequences), num_words)) for i, seq in enumerate(sequences): if not seq: continue counts = {} for j in seq: if j >= num_words: continue if j not in counts: counts[j] = 1. else: counts[j] += 1 for j, c in list(counts.items()): if mode == 'count': x[i][j] = c elif mode == 'freq': x[i][j] = c / len(seq) elif mode == 'binary': x[i][j] = 1 elif mode == 'tfidf': # Use weighting scheme 2 in # https://en.wikipedia.org/wiki/Tf%E2%80%93idf tf = 1 + np.log(c) idf = np.log(1 + self.document_count / (1 + self.index_docs.get(j, 0))) x[i][j] = tf * idf else: raise ValueError('Unknown vectorization mode:', mode) return x
[ "def", "sequences_to_matrix", "(", "self", ",", "sequences", ",", "mode", "=", "'binary'", ")", ":", "if", "not", "self", ".", "num_words", ":", "if", "self", ".", "word_index", ":", "num_words", "=", "len", "(", "self", ".", "word_index", ")", "+", "1", "else", ":", "raise", "ValueError", "(", "'Specify a dimension (num_words argument), '", "'or fit on some text data first.'", ")", "else", ":", "num_words", "=", "self", ".", "num_words", "if", "mode", "==", "'tfidf'", "and", "not", "self", ".", "document_count", ":", "raise", "ValueError", "(", "'Fit the Tokenizer on some data '", "'before using tfidf mode.'", ")", "x", "=", "np", ".", "zeros", "(", "(", "len", "(", "sequences", ")", ",", "num_words", ")", ")", "for", "i", ",", "seq", "in", "enumerate", "(", "sequences", ")", ":", "if", "not", "seq", ":", "continue", "counts", "=", "{", "}", "for", "j", "in", "seq", ":", "if", "j", ">=", "num_words", ":", "continue", "if", "j", "not", "in", "counts", ":", "counts", "[", "j", "]", "=", "1.", "else", ":", "counts", "[", "j", "]", "+=", "1", "for", "j", ",", "c", "in", "list", "(", "counts", ".", "items", "(", ")", ")", ":", "if", "mode", "==", "'count'", ":", "x", "[", "i", "]", "[", "j", "]", "=", "c", "elif", "mode", "==", "'freq'", ":", "x", "[", "i", "]", "[", "j", "]", "=", "c", "/", "len", "(", "seq", ")", "elif", "mode", "==", "'binary'", ":", "x", "[", "i", "]", "[", "j", "]", "=", "1", "elif", "mode", "==", "'tfidf'", ":", "# Use weighting scheme 2 in", "# https://en.wikipedia.org/wiki/Tf%E2%80%93idf", "tf", "=", "1", "+", "np", ".", "log", "(", "c", ")", "idf", "=", "np", ".", "log", "(", "1", "+", "self", ".", "document_count", "/", "(", "1", "+", "self", ".", "index_docs", ".", "get", "(", "j", ",", "0", ")", ")", ")", "x", "[", "i", "]", "[", "j", "]", "=", "tf", "*", "idf", "else", ":", "raise", "ValueError", "(", "'Unknown vectorization mode:'", ",", "mode", ")", "return", "x" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/keras/_impl/keras/preprocessing/text.py#L266-L322
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/setuptools/py3/pkg_resources/_vendor/pyparsing.py
python
ParseResults.asList
( self )
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
Returns the parse results as a nested list of matching tokens, all converted to strings. Example:: patt = OneOrMore(Word(alphas)) result = patt.parseString("sldkj lsdkj sldkj") # even though the result prints in string-like form, it is actually a pyparsing ParseResults print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj'] # Use asList() to create an actual list result_list = result.asList() print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
Returns the parse results as a nested list of matching tokens, all converted to strings.
[ "Returns", "the", "parse", "results", "as", "a", "nested", "list", "of", "matching", "tokens", "all", "converted", "to", "strings", "." ]
def asList( self ): """ Returns the parse results as a nested list of matching tokens, all converted to strings. Example:: patt = OneOrMore(Word(alphas)) result = patt.parseString("sldkj lsdkj sldkj") # even though the result prints in string-like form, it is actually a pyparsing ParseResults print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj'] # Use asList() to create an actual list result_list = result.asList() print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] """ return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
[ "def", "asList", "(", "self", ")", ":", "return", "[", "res", ".", "asList", "(", ")", "if", "isinstance", "(", "res", ",", "ParseResults", ")", "else", "res", "for", "res", "in", "self", ".", "__toklist", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py3/pkg_resources/_vendor/pyparsing.py#L704-L718
POV-Ray/povray
76a804d18a30a1dbb0afbc0070b62526715571eb
tools/meta-make/bluenoise/BlueNoise.py
python
StoreNDTextureHDR
(Array,OutputFilePath)
This function stores the given unsigned integer array in a minimalist binary file format. The last dimension is interpreted as corresponding to the channels of the image. The file format consists of a sequence of unsigned, least significant bit first 32-bit integers. The contained data is described below: - Version: File format version, should be 1. - nChannel: The number of color channels in the image. This should be a value between 1 (greyscale) and 4 (RGBA). - nDimension: The number of dimensions of the stored array, i.e. the number of indices required to uniquely identify one pixel, voxel, etc.. - Shape[nDimension]: nDimension integers providing the size of the array along each dimension. By convention the first dimension is height, second width and third depth. - Data[Shape[0]*...*Shape[nDimension-1]*nChannel]: The uncompressed data of the array. The channels are unrolled first, followed by all dimensions in reverse order. Thus, an RG image of size 3*2 would be stored in the following order: 00R, 00G, 01R, 01G, 10R, 10G, 11R, 11G, 20R, 20G, 21R, 21G
This function stores the given unsigned integer array in a minimalist binary file format. The last dimension is interpreted as corresponding to the channels of the image. The file format consists of a sequence of unsigned, least significant bit first 32-bit integers. The contained data is described below: - Version: File format version, should be 1. - nChannel: The number of color channels in the image. This should be a value between 1 (greyscale) and 4 (RGBA). - nDimension: The number of dimensions of the stored array, i.e. the number of indices required to uniquely identify one pixel, voxel, etc.. - Shape[nDimension]: nDimension integers providing the size of the array along each dimension. By convention the first dimension is height, second width and third depth. - Data[Shape[0]*...*Shape[nDimension-1]*nChannel]: The uncompressed data of the array. The channels are unrolled first, followed by all dimensions in reverse order. Thus, an RG image of size 3*2 would be stored in the following order: 00R, 00G, 01R, 01G, 10R, 10G, 11R, 11G, 20R, 20G, 21R, 21G
[ "This", "function", "stores", "the", "given", "unsigned", "integer", "array", "in", "a", "minimalist", "binary", "file", "format", ".", "The", "last", "dimension", "is", "interpreted", "as", "corresponding", "to", "the", "channels", "of", "the", "image", ".", "The", "file", "format", "consists", "of", "a", "sequence", "of", "unsigned", "least", "significant", "bit", "first", "32", "-", "bit", "integers", ".", "The", "contained", "data", "is", "described", "below", ":", "-", "Version", ":", "File", "format", "version", "should", "be", "1", ".", "-", "nChannel", ":", "The", "number", "of", "color", "channels", "in", "the", "image", ".", "This", "should", "be", "a", "value", "between", "1", "(", "greyscale", ")", "and", "4", "(", "RGBA", ")", ".", "-", "nDimension", ":", "The", "number", "of", "dimensions", "of", "the", "stored", "array", "i", ".", "e", ".", "the", "number", "of", "indices", "required", "to", "uniquely", "identify", "one", "pixel", "voxel", "etc", "..", "-", "Shape", "[", "nDimension", "]", ":", "nDimension", "integers", "providing", "the", "size", "of", "the", "array", "along", "each", "dimension", ".", "By", "convention", "the", "first", "dimension", "is", "height", "second", "width", "and", "third", "depth", ".", "-", "Data", "[", "Shape", "[", "0", "]", "*", "...", "*", "Shape", "[", "nDimension", "-", "1", "]", "*", "nChannel", "]", ":", "The", "uncompressed", "data", "of", "the", "array", ".", "The", "channels", "are", "unrolled", "first", "followed", "by", "all", "dimensions", "in", "reverse", "order", ".", "Thus", "an", "RG", "image", "of", "size", "3", "*", "2", "would", "be", "stored", "in", "the", "following", "order", ":", "00R", "00G", "01R", "01G", "10R", "10G", "11R", "11G", "20R", "20G", "21R", "21G" ]
def StoreNDTextureHDR(Array,OutputFilePath): """This function stores the given unsigned integer array in a minimalist binary file format. The last dimension is interpreted as corresponding to the channels of the image. The file format consists of a sequence of unsigned, least significant bit first 32-bit integers. The contained data is described below: - Version: File format version, should be 1. - nChannel: The number of color channels in the image. This should be a value between 1 (greyscale) and 4 (RGBA). - nDimension: The number of dimensions of the stored array, i.e. the number of indices required to uniquely identify one pixel, voxel, etc.. - Shape[nDimension]: nDimension integers providing the size of the array along each dimension. By convention the first dimension is height, second width and third depth. - Data[Shape[0]*...*Shape[nDimension-1]*nChannel]: The uncompressed data of the array. The channels are unrolled first, followed by all dimensions in reverse order. Thus, an RG image of size 3*2 would be stored in the following order: 00R, 00G, 01R, 01G, 10R, 10G, 11R, 11G, 20R, 20G, 21R, 21G""" # Prepare all the meta data and the data itself Array=np.asarray(Array,dtype=np.uint32); Version=1; nDimension=len(Array.shape)-1; nChannel=Array.shape[nDimension]; Shape=Array.shape[0:nDimension]; Data=Array.flatten("C"); # Write it to the file OutputFile=open(OutputFilePath,"wb"); OutputFile.write(struct.pack("LLL",Version,nChannel,nDimension)); OutputFile.write(struct.pack("L"*nDimension,*Shape)); OutputFile.write(struct.pack("L"*np.size(Data),*Data)); OutputFile.close();
[ "def", "StoreNDTextureHDR", "(", "Array", ",", "OutputFilePath", ")", ":", "# Prepare all the meta data and the data itself", "Array", "=", "np", ".", "asarray", "(", "Array", ",", "dtype", "=", "np", ".", "uint32", ")", "Version", "=", "1", "nDimension", "=", "len", "(", "Array", ".", "shape", ")", "-", "1", "nChannel", "=", "Array", ".", "shape", "[", "nDimension", "]", "Shape", "=", "Array", ".", "shape", "[", "0", ":", "nDimension", "]", "Data", "=", "Array", ".", "flatten", "(", "\"C\"", ")", "# Write it to the file", "OutputFile", "=", "open", "(", "OutputFilePath", ",", "\"wb\"", ")", "OutputFile", ".", "write", "(", "struct", ".", "pack", "(", "\"LLL\"", ",", "Version", ",", "nChannel", ",", "nDimension", ")", ")", "OutputFile", ".", "write", "(", "struct", ".", "pack", "(", "\"L\"", "*", "nDimension", ",", "*", "Shape", ")", ")", "OutputFile", ".", "write", "(", "struct", ".", "pack", "(", "\"L\"", "*", "np", ".", "size", "(", "Data", ")", ",", "*", "Data", ")", ")", "OutputFile", ".", "close", "(", ")" ]
https://github.com/POV-Ray/povray/blob/76a804d18a30a1dbb0afbc0070b62526715571eb/tools/meta-make/bluenoise/BlueNoise.py#L262-L293
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
scripts/reduction_gui/reduction/sans/eqsans_data_script.py
python
DataSets.reset
(self)
Reset state
Reset state
[ "Reset", "state" ]
def reset(self): """ Reset state """ super(DataSets, self).reset() self.background.reset()
[ "def", "reset", "(", "self", ")", ":", "super", "(", "DataSets", ",", "self", ")", ".", "reset", "(", ")", "self", ".", "background", ".", "reset", "(", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/reduction_gui/reduction/sans/eqsans_data_script.py#L22-L27
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/lib-tk/tkSimpleDialog.py
python
Dialog.__init__
(self, parent, title = None)
Initialize a dialog. Arguments: parent -- a parent window (the application window) title -- the dialog title
Initialize a dialog.
[ "Initialize", "a", "dialog", "." ]
def __init__(self, parent, title = None): '''Initialize a dialog. Arguments: parent -- a parent window (the application window) title -- the dialog title ''' Toplevel.__init__(self, parent) self.withdraw() # remain invisible for now # If the master is not viewable, don't # make the child transient, or else it # would be opened withdrawn if parent.winfo_viewable(): self.transient(parent) if title: self.title(title) self.parent = parent self.result = None body = Frame(self) self.initial_focus = self.body(body) body.pack(padx=5, pady=5) self.buttonbox() if not self.initial_focus: self.initial_focus = self self.protocol("WM_DELETE_WINDOW", self.cancel) if self.parent is not None: self.geometry("+%d+%d" % (parent.winfo_rootx()+50, parent.winfo_rooty()+50)) self.deiconify() # become visibile now self.initial_focus.focus_set() # wait for window to appear on screen before calling grab_set self.wait_visibility() self.grab_set() self.wait_window(self)
[ "def", "__init__", "(", "self", ",", "parent", ",", "title", "=", "None", ")", ":", "Toplevel", ".", "__init__", "(", "self", ",", "parent", ")", "self", ".", "withdraw", "(", ")", "# remain invisible for now", "# If the master is not viewable, don't", "# make the child transient, or else it", "# would be opened withdrawn", "if", "parent", ".", "winfo_viewable", "(", ")", ":", "self", ".", "transient", "(", "parent", ")", "if", "title", ":", "self", ".", "title", "(", "title", ")", "self", ".", "parent", "=", "parent", "self", ".", "result", "=", "None", "body", "=", "Frame", "(", "self", ")", "self", ".", "initial_focus", "=", "self", ".", "body", "(", "body", ")", "body", ".", "pack", "(", "padx", "=", "5", ",", "pady", "=", "5", ")", "self", ".", "buttonbox", "(", ")", "if", "not", "self", ".", "initial_focus", ":", "self", ".", "initial_focus", "=", "self", "self", ".", "protocol", "(", "\"WM_DELETE_WINDOW\"", ",", "self", ".", "cancel", ")", "if", "self", ".", "parent", "is", "not", "None", ":", "self", ".", "geometry", "(", "\"+%d+%d\"", "%", "(", "parent", ".", "winfo_rootx", "(", ")", "+", "50", ",", "parent", ".", "winfo_rooty", "(", ")", "+", "50", ")", ")", "self", ".", "deiconify", "(", ")", "# become visibile now", "self", ".", "initial_focus", ".", "focus_set", "(", ")", "# wait for window to appear on screen before calling grab_set", "self", ".", "wait_visibility", "(", ")", "self", ".", "grab_set", "(", ")", "self", ".", "wait_window", "(", "self", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/lib-tk/tkSimpleDialog.py#L37-L86
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/util/tf_inspect.py
python
getsourcelines
(object)
return _inspect.getsourcelines(tf_decorator.unwrap(object)[1])
TFDecorator-aware replacement for inspect.getsourcelines.
TFDecorator-aware replacement for inspect.getsourcelines.
[ "TFDecorator", "-", "aware", "replacement", "for", "inspect", ".", "getsourcelines", "." ]
def getsourcelines(object): # pylint: disable=redefined-builtin """TFDecorator-aware replacement for inspect.getsourcelines.""" return _inspect.getsourcelines(tf_decorator.unwrap(object)[1])
[ "def", "getsourcelines", "(", "object", ")", ":", "# pylint: disable=redefined-builtin", "return", "_inspect", ".", "getsourcelines", "(", "tf_decorator", ".", "unwrap", "(", "object", ")", "[", "1", "]", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/util/tf_inspect.py#L355-L357
neoml-lib/neoml
a0d370fba05269a1b2258cef126f77bbd2054a3e
NeoML/Python/neoml/Dnn/Solver.py
python
Solver.train
(self)
Modifies the trainable parameters of the network layers, using the accumulated gradients and previous steps' history (moment, etc.).
Modifies the trainable parameters of the network layers, using the accumulated gradients and previous steps' history (moment, etc.).
[ "Modifies", "the", "trainable", "parameters", "of", "the", "network", "layers", "using", "the", "accumulated", "gradients", "and", "previous", "steps", "history", "(", "moment", "etc", ".", ")", "." ]
def train(self): """Modifies the trainable parameters of the network layers, using the accumulated gradients and previous steps' history (moment, etc.). """ self._internal.train()
[ "def", "train", "(", "self", ")", ":", "self", ".", "_internal", ".", "train", "(", ")" ]
https://github.com/neoml-lib/neoml/blob/a0d370fba05269a1b2258cef126f77bbd2054a3e/NeoML/Python/neoml/Dnn/Solver.py#L30-L34
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
deps/src/libxml2-2.9.1/python/libxml2class.py
python
xmlTextReader.MoveToAttribute
(self, name)
return ret
Moves the position of the current instance to the attribute with the specified qualified name.
Moves the position of the current instance to the attribute with the specified qualified name.
[ "Moves", "the", "position", "of", "the", "current", "instance", "to", "the", "attribute", "with", "the", "specified", "qualified", "name", "." ]
def MoveToAttribute(self, name): """Moves the position of the current instance to the attribute with the specified qualified name. """ ret = libxml2mod.xmlTextReaderMoveToAttribute(self._o, name) return ret
[ "def", "MoveToAttribute", "(", "self", ",", "name", ")", ":", "ret", "=", "libxml2mod", ".", "xmlTextReaderMoveToAttribute", "(", "self", ".", "_o", ",", "name", ")", "return", "ret" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2class.py#L5898-L5902
adobe/chromium
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
gpu/command_buffer/build_gles2_cmd_buffer.py
python
ImmediateFunction.WriteValidationCode
(self, file)
Overridden from Function
Overridden from Function
[ "Overridden", "from", "Function" ]
def WriteValidationCode(self, file): """Overridden from Function""" self.type_handler.WriteImmediateValidationCode(self, file)
[ "def", "WriteValidationCode", "(", "self", ",", "file", ")", ":", "self", ".", "type_handler", ".", "WriteImmediateValidationCode", "(", "self", ",", "file", ")" ]
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/gpu/command_buffer/build_gles2_cmd_buffer.py#L5438-L5440
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/muon_load_data.py
python
MuonLoadData.remove_last_added_data
(self)
Remove the data item before the current one
Remove the data item before the current one
[ "Remove", "the", "data", "item", "before", "the", "current", "one" ]
def remove_last_added_data(self): """Remove the data item before the current one""" self.remove_nth_last_entry(2)
[ "def", "remove_last_added_data", "(", "self", ")", ":", "self", ".", "remove_nth_last_entry", "(", "2", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/muon_load_data.py#L89-L91
cornell-zhang/heterocl
6d9e4b4acc2ee2707b2d25b27298c0335bccedfd
python/heterocl/tvm/api.py
python
save_json
(node)
return _api_internal._save_json(node)
Load tvm object as json string. Parameters ---------- node : Node A TVM Node object to be saved. Returns ------- json_str : str Saved json string.
Load tvm object as json string.
[ "Load", "tvm", "object", "as", "json", "string", "." ]
def save_json(node): """Load tvm object as json string. Parameters ---------- node : Node A TVM Node object to be saved. Returns ------- json_str : str Saved json string. """ return _api_internal._save_json(node)
[ "def", "save_json", "(", "node", ")", ":", "return", "_api_internal", ".", "_save_json", "(", "node", ")" ]
https://github.com/cornell-zhang/heterocl/blob/6d9e4b4acc2ee2707b2d25b27298c0335bccedfd/python/heterocl/tvm/api.py#L85-L98
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/s3transfer/processpool.py
python
ProcessPoolDownloader.__init__
(self, client_kwargs=None, config=None)
Downloads S3 objects using process pools :type client_kwargs: dict :param client_kwargs: The keyword arguments to provide when instantiating S3 clients. The arguments must match the keyword arguments provided to the `botocore.session.Session.create_client()` method. :type config: ProcessTransferConfig :param config: Configuration for the downloader
Downloads S3 objects using process pools
[ "Downloads", "S3", "objects", "using", "process", "pools" ]
def __init__(self, client_kwargs=None, config=None): """Downloads S3 objects using process pools :type client_kwargs: dict :param client_kwargs: The keyword arguments to provide when instantiating S3 clients. The arguments must match the keyword arguments provided to the `botocore.session.Session.create_client()` method. :type config: ProcessTransferConfig :param config: Configuration for the downloader """ if client_kwargs is None: client_kwargs = {} self._client_factory = ClientFactory(client_kwargs) self._transfer_config = config if config is None: self._transfer_config = ProcessTransferConfig() self._download_request_queue = multiprocessing.Queue(1000) self._worker_queue = multiprocessing.Queue(1000) self._osutil = OSUtils() self._started = False self._start_lock = threading.Lock() # These below are initialized in the start() method self._manager = None self._transfer_monitor = None self._submitter = None self._workers = []
[ "def", "__init__", "(", "self", ",", "client_kwargs", "=", "None", ",", "config", "=", "None", ")", ":", "if", "client_kwargs", "is", "None", ":", "client_kwargs", "=", "{", "}", "self", ".", "_client_factory", "=", "ClientFactory", "(", "client_kwargs", ")", "self", ".", "_transfer_config", "=", "config", "if", "config", "is", "None", ":", "self", ".", "_transfer_config", "=", "ProcessTransferConfig", "(", ")", "self", ".", "_download_request_queue", "=", "multiprocessing", ".", "Queue", "(", "1000", ")", "self", ".", "_worker_queue", "=", "multiprocessing", ".", "Queue", "(", "1000", ")", "self", ".", "_osutil", "=", "OSUtils", "(", ")", "self", ".", "_started", "=", "False", "self", ".", "_start_lock", "=", "threading", ".", "Lock", "(", ")", "# These below are initialized in the start() method", "self", ".", "_manager", "=", "None", "self", ".", "_transfer_monitor", "=", "None", "self", ".", "_submitter", "=", "None", "self", ".", "_workers", "=", "[", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/s3transfer/processpool.py#L291-L322
facebook/bistro
db9eff7e92f5cedcc917a440d5c88064c7980e40
build/fbcode_builder/shell_quoting.py
python
shell_quote
(s)
return ( s if isinstance(s, ShellQuoted) else ShellQuoted("'" + str(s).replace("'", "'\\''") + "'") )
Quotes a string if it is not already quoted
Quotes a string if it is not already quoted
[ "Quotes", "a", "string", "if", "it", "is", "not", "already", "quoted" ]
def shell_quote(s): "Quotes a string if it is not already quoted" return ( s if isinstance(s, ShellQuoted) else ShellQuoted("'" + str(s).replace("'", "'\\''") + "'") )
[ "def", "shell_quote", "(", "s", ")", ":", "return", "(", "s", "if", "isinstance", "(", "s", ",", "ShellQuoted", ")", "else", "ShellQuoted", "(", "\"'\"", "+", "str", "(", "s", ")", ".", "replace", "(", "\"'\"", ",", "\"'\\\\''\"", ")", "+", "\"'\"", ")", ")" ]
https://github.com/facebook/bistro/blob/db9eff7e92f5cedcc917a440d5c88064c7980e40/build/fbcode_builder/shell_quoting.py#L68-L74
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py2/numpy/f2py/crackfortran.py
python
crackline
(line, reset=0)
reset=-1 --- initialize reset=0 --- crack the line reset=1 --- final check if mismatch of blocks occurred Cracked data is saved in grouplist[0].
reset=-1 --- initialize reset=0 --- crack the line reset=1 --- final check if mismatch of blocks occurred
[ "reset", "=", "-", "1", "---", "initialize", "reset", "=", "0", "---", "crack", "the", "line", "reset", "=", "1", "---", "final", "check", "if", "mismatch", "of", "blocks", "occurred" ]
def crackline(line, reset=0): """ reset=-1 --- initialize reset=0 --- crack the line reset=1 --- final check if mismatch of blocks occurred Cracked data is saved in grouplist[0]. """ global beginpattern, groupcounter, groupname, groupcache, grouplist global filepositiontext, currentfilename, neededmodule, expectbegin global skipblocksuntil, skipemptyends, previous_context, gotnextfile _, has_semicolon = split_by_unquoted(line, ";") if has_semicolon and not (f2pyenhancementspattern[0].match(line) or multilinepattern[0].match(line)): # XXX: non-zero reset values need testing assert reset == 0, repr(reset) # split line on unquoted semicolons line, semicolon_line = split_by_unquoted(line, ";") while semicolon_line: crackline(line, reset) line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";") crackline(line, reset) return if reset < 0: groupcounter = 0 groupname = {groupcounter: ''} groupcache = {groupcounter: {}} grouplist = {groupcounter: []} groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['vars'] = {} groupcache[groupcounter]['block'] = '' groupcache[groupcounter]['name'] = '' neededmodule = -1 skipblocksuntil = -1 return if reset > 0: fl = 0 if f77modulename and neededmodule == groupcounter: fl = 2 while groupcounter > fl: outmess('crackline: groupcounter=%s groupname=%s\n' % (repr(groupcounter), repr(groupname))) outmess( 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 if f77modulename and neededmodule == groupcounter: grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 # end interface grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 # end module neededmodule = -1 return if line == '': return flag = 0 for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, requiredpattern, parameterpattern, datapattern, publicpattern, privatepattern, intrisicpattern, endifpattern, endpattern, formatpattern, beginpattern, functionpattern, subroutinepattern, implicitpattern, typespattern, commonpattern, callpattern, usepattern, containspattern, entrypattern, f2pyenhancementspattern, multilinepattern ]: m = pat[0].match(line) if m: break flag = flag + 1 if not m: re_1 = crackline_re_1 if 0 <= skipblocksuntil <= groupcounter: return if 'externals' in groupcache[groupcounter]: for name in groupcache[groupcounter]['externals']: if name in invbadnames: name = invbadnames[name] if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: continue m1 = re.match( r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) if m1: m2 = re_1.match(m1.group('before')) a = _simplifyargs(m1.group('args')) if m2: line = 'callfun %s(%s) result (%s)' % ( name, a, m2.group('result')) else: line = 'callfun %s(%s)' % (name, a) m = callfunpattern[0].match(line) if not m: outmess( 'crackline: could not resolve function call for line=%s.\n' % repr(line)) return analyzeline(m, 'callfun', line) return if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): previous_context = None outmess('crackline:%d: No pattern for line\n' % (groupcounter)) return elif pat[1] == 'end': if 0 <= skipblocksuntil < groupcounter: groupcounter = groupcounter - 1 if skipblocksuntil <= groupcounter: return if groupcounter <= 0: raise Exception('crackline: groupcounter(=%s) is nonpositive. ' 'Check the blocks.' % (groupcounter)) m1 = beginpattern[0].match((line)) if (m1) and (not m1.group('this') == groupname[groupcounter]): raise Exception('crackline: End group %s does not match with ' 'previous Begin group %s\n\t%s' % (repr(m1.group('this')), repr(groupname[groupcounter]), filepositiontext) ) if skipblocksuntil == groupcounter: skipblocksuntil = -1 grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 if not skipemptyends: expectbegin = 1 elif pat[1] == 'begin': if 0 <= skipblocksuntil <= groupcounter: groupcounter = groupcounter + 1 return gotnextfile = 0 analyzeline(m, pat[1], line) expectbegin = 0 elif pat[1] == 'endif': pass elif pat[1] == 'contains': if ignorecontains: return if 0 <= skipblocksuntil <= groupcounter: return skipblocksuntil = groupcounter else: if 0 <= skipblocksuntil <= groupcounter: return analyzeline(m, pat[1], line)
[ "def", "crackline", "(", "line", ",", "reset", "=", "0", ")", ":", "global", "beginpattern", ",", "groupcounter", ",", "groupname", ",", "groupcache", ",", "grouplist", "global", "filepositiontext", ",", "currentfilename", ",", "neededmodule", ",", "expectbegin", "global", "skipblocksuntil", ",", "skipemptyends", ",", "previous_context", ",", "gotnextfile", "_", ",", "has_semicolon", "=", "split_by_unquoted", "(", "line", ",", "\";\"", ")", "if", "has_semicolon", "and", "not", "(", "f2pyenhancementspattern", "[", "0", "]", ".", "match", "(", "line", ")", "or", "multilinepattern", "[", "0", "]", ".", "match", "(", "line", ")", ")", ":", "# XXX: non-zero reset values need testing", "assert", "reset", "==", "0", ",", "repr", "(", "reset", ")", "# split line on unquoted semicolons", "line", ",", "semicolon_line", "=", "split_by_unquoted", "(", "line", ",", "\";\"", ")", "while", "semicolon_line", ":", "crackline", "(", "line", ",", "reset", ")", "line", ",", "semicolon_line", "=", "split_by_unquoted", "(", "semicolon_line", "[", "1", ":", "]", ",", "\";\"", ")", "crackline", "(", "line", ",", "reset", ")", "return", "if", "reset", "<", "0", ":", "groupcounter", "=", "0", "groupname", "=", "{", "groupcounter", ":", "''", "}", "groupcache", "=", "{", "groupcounter", ":", "{", "}", "}", "grouplist", "=", "{", "groupcounter", ":", "[", "]", "}", "groupcache", "[", "groupcounter", "]", "[", "'body'", "]", "=", "[", "]", "groupcache", "[", "groupcounter", "]", "[", "'vars'", "]", "=", "{", "}", "groupcache", "[", "groupcounter", "]", "[", "'block'", "]", "=", "''", "groupcache", "[", "groupcounter", "]", "[", "'name'", "]", "=", "''", "neededmodule", "=", "-", "1", "skipblocksuntil", "=", "-", "1", "return", "if", "reset", ">", "0", ":", "fl", "=", "0", "if", "f77modulename", "and", "neededmodule", "==", "groupcounter", ":", "fl", "=", "2", "while", "groupcounter", ">", "fl", ":", "outmess", "(", "'crackline: groupcounter=%s groupname=%s\\n'", "%", "(", "repr", "(", "groupcounter", ")", ",", "repr", "(", "groupname", ")", ")", ")", "outmess", "(", "'crackline: Mismatch of blocks encountered. Trying to fix it by assuming \"end\" statement.\\n'", ")", "grouplist", "[", "groupcounter", "-", "1", "]", ".", "append", "(", "groupcache", "[", "groupcounter", "]", ")", "grouplist", "[", "groupcounter", "-", "1", "]", "[", "-", "1", "]", "[", "'body'", "]", "=", "grouplist", "[", "groupcounter", "]", "del", "grouplist", "[", "groupcounter", "]", "groupcounter", "=", "groupcounter", "-", "1", "if", "f77modulename", "and", "neededmodule", "==", "groupcounter", ":", "grouplist", "[", "groupcounter", "-", "1", "]", ".", "append", "(", "groupcache", "[", "groupcounter", "]", ")", "grouplist", "[", "groupcounter", "-", "1", "]", "[", "-", "1", "]", "[", "'body'", "]", "=", "grouplist", "[", "groupcounter", "]", "del", "grouplist", "[", "groupcounter", "]", "groupcounter", "=", "groupcounter", "-", "1", "# end interface", "grouplist", "[", "groupcounter", "-", "1", "]", ".", "append", "(", "groupcache", "[", "groupcounter", "]", ")", "grouplist", "[", "groupcounter", "-", "1", "]", "[", "-", "1", "]", "[", "'body'", "]", "=", "grouplist", "[", "groupcounter", "]", "del", "grouplist", "[", "groupcounter", "]", "groupcounter", "=", "groupcounter", "-", "1", "# end module", "neededmodule", "=", "-", "1", "return", "if", "line", "==", "''", ":", "return", "flag", "=", "0", "for", "pat", "in", "[", "dimensionpattern", ",", "externalpattern", ",", "intentpattern", ",", "optionalpattern", ",", "requiredpattern", ",", "parameterpattern", ",", "datapattern", ",", "publicpattern", ",", "privatepattern", ",", "intrisicpattern", ",", "endifpattern", ",", "endpattern", ",", "formatpattern", ",", "beginpattern", ",", "functionpattern", ",", "subroutinepattern", ",", "implicitpattern", ",", "typespattern", ",", "commonpattern", ",", "callpattern", ",", "usepattern", ",", "containspattern", ",", "entrypattern", ",", "f2pyenhancementspattern", ",", "multilinepattern", "]", ":", "m", "=", "pat", "[", "0", "]", ".", "match", "(", "line", ")", "if", "m", ":", "break", "flag", "=", "flag", "+", "1", "if", "not", "m", ":", "re_1", "=", "crackline_re_1", "if", "0", "<=", "skipblocksuntil", "<=", "groupcounter", ":", "return", "if", "'externals'", "in", "groupcache", "[", "groupcounter", "]", ":", "for", "name", "in", "groupcache", "[", "groupcounter", "]", "[", "'externals'", "]", ":", "if", "name", "in", "invbadnames", ":", "name", "=", "invbadnames", "[", "name", "]", "if", "'interfaced'", "in", "groupcache", "[", "groupcounter", "]", "and", "name", "in", "groupcache", "[", "groupcounter", "]", "[", "'interfaced'", "]", ":", "continue", "m1", "=", "re", ".", "match", "(", "r'(?P<before>[^\"]*)\\b%s\\b\\s*@\\(@(?P<args>[^@]*)@\\)@.*\\Z'", "%", "name", ",", "markouterparen", "(", "line", ")", ",", "re", ".", "I", ")", "if", "m1", ":", "m2", "=", "re_1", ".", "match", "(", "m1", ".", "group", "(", "'before'", ")", ")", "a", "=", "_simplifyargs", "(", "m1", ".", "group", "(", "'args'", ")", ")", "if", "m2", ":", "line", "=", "'callfun %s(%s) result (%s)'", "%", "(", "name", ",", "a", ",", "m2", ".", "group", "(", "'result'", ")", ")", "else", ":", "line", "=", "'callfun %s(%s)'", "%", "(", "name", ",", "a", ")", "m", "=", "callfunpattern", "[", "0", "]", ".", "match", "(", "line", ")", "if", "not", "m", ":", "outmess", "(", "'crackline: could not resolve function call for line=%s.\\n'", "%", "repr", "(", "line", ")", ")", "return", "analyzeline", "(", "m", ",", "'callfun'", ",", "line", ")", "return", "if", "verbose", ">", "1", "or", "(", "verbose", "==", "1", "and", "currentfilename", ".", "lower", "(", ")", ".", "endswith", "(", "'.pyf'", ")", ")", ":", "previous_context", "=", "None", "outmess", "(", "'crackline:%d: No pattern for line\\n'", "%", "(", "groupcounter", ")", ")", "return", "elif", "pat", "[", "1", "]", "==", "'end'", ":", "if", "0", "<=", "skipblocksuntil", "<", "groupcounter", ":", "groupcounter", "=", "groupcounter", "-", "1", "if", "skipblocksuntil", "<=", "groupcounter", ":", "return", "if", "groupcounter", "<=", "0", ":", "raise", "Exception", "(", "'crackline: groupcounter(=%s) is nonpositive. '", "'Check the blocks.'", "%", "(", "groupcounter", ")", ")", "m1", "=", "beginpattern", "[", "0", "]", ".", "match", "(", "(", "line", ")", ")", "if", "(", "m1", ")", "and", "(", "not", "m1", ".", "group", "(", "'this'", ")", "==", "groupname", "[", "groupcounter", "]", ")", ":", "raise", "Exception", "(", "'crackline: End group %s does not match with '", "'previous Begin group %s\\n\\t%s'", "%", "(", "repr", "(", "m1", ".", "group", "(", "'this'", ")", ")", ",", "repr", "(", "groupname", "[", "groupcounter", "]", ")", ",", "filepositiontext", ")", ")", "if", "skipblocksuntil", "==", "groupcounter", ":", "skipblocksuntil", "=", "-", "1", "grouplist", "[", "groupcounter", "-", "1", "]", ".", "append", "(", "groupcache", "[", "groupcounter", "]", ")", "grouplist", "[", "groupcounter", "-", "1", "]", "[", "-", "1", "]", "[", "'body'", "]", "=", "grouplist", "[", "groupcounter", "]", "del", "grouplist", "[", "groupcounter", "]", "groupcounter", "=", "groupcounter", "-", "1", "if", "not", "skipemptyends", ":", "expectbegin", "=", "1", "elif", "pat", "[", "1", "]", "==", "'begin'", ":", "if", "0", "<=", "skipblocksuntil", "<=", "groupcounter", ":", "groupcounter", "=", "groupcounter", "+", "1", "return", "gotnextfile", "=", "0", "analyzeline", "(", "m", ",", "pat", "[", "1", "]", ",", "line", ")", "expectbegin", "=", "0", "elif", "pat", "[", "1", "]", "==", "'endif'", ":", "pass", "elif", "pat", "[", "1", "]", "==", "'contains'", ":", "if", "ignorecontains", ":", "return", "if", "0", "<=", "skipblocksuntil", "<=", "groupcounter", ":", "return", "skipblocksuntil", "=", "groupcounter", "else", ":", "if", "0", "<=", "skipblocksuntil", "<=", "groupcounter", ":", "return", "analyzeline", "(", "m", ",", "pat", "[", "1", "]", ",", "line", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py2/numpy/f2py/crackfortran.py#L643-L796
lmb-freiburg/flownet2
b92e198b56b0e52e1ba0a5a98dc0e39fa5ae70cc
scripts/cpp_lint.py
python
ParseNolintSuppressions
(filename, raw_line, linenum, error)
Updates the global list of error-suppressions. Parses any NOLINT comments on the current line, updating the global error_suppressions store. Reports an error if the NOLINT comment was malformed. Args: filename: str, the name of the input file. raw_line: str, the line of input text, with comments. linenum: int, the number of the current line. error: function, an error handler.
Updates the global list of error-suppressions.
[ "Updates", "the", "global", "list", "of", "error", "-", "suppressions", "." ]
def ParseNolintSuppressions(filename, raw_line, linenum, error): """Updates the global list of error-suppressions. Parses any NOLINT comments on the current line, updating the global error_suppressions store. Reports an error if the NOLINT comment was malformed. Args: filename: str, the name of the input file. raw_line: str, the line of input text, with comments. linenum: int, the number of the current line. error: function, an error handler. """ # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*). matched = _RE_SUPPRESSION.search(raw_line) if matched: if matched.group(1) == '_NEXT_LINE': linenum += 1 category = matched.group(2) if category in (None, '(*)'): # => "suppress all" _error_suppressions.setdefault(None, set()).add(linenum) else: if category.startswith('(') and category.endswith(')'): category = category[1:-1] if category in _ERROR_CATEGORIES: _error_suppressions.setdefault(category, set()).add(linenum) else: error(filename, linenum, 'readability/nolint', 5, 'Unknown NOLINT error category: %s' % category)
[ "def", "ParseNolintSuppressions", "(", "filename", ",", "raw_line", ",", "linenum", ",", "error", ")", ":", "# FIXME(adonovan): \"NOLINT(\" is misparsed as NOLINT(*).", "matched", "=", "_RE_SUPPRESSION", ".", "search", "(", "raw_line", ")", "if", "matched", ":", "if", "matched", ".", "group", "(", "1", ")", "==", "'_NEXT_LINE'", ":", "linenum", "+=", "1", "category", "=", "matched", ".", "group", "(", "2", ")", "if", "category", "in", "(", "None", ",", "'(*)'", ")", ":", "# => \"suppress all\"", "_error_suppressions", ".", "setdefault", "(", "None", ",", "set", "(", ")", ")", ".", "add", "(", "linenum", ")", "else", ":", "if", "category", ".", "startswith", "(", "'('", ")", "and", "category", ".", "endswith", "(", "')'", ")", ":", "category", "=", "category", "[", "1", ":", "-", "1", "]", "if", "category", "in", "_ERROR_CATEGORIES", ":", "_error_suppressions", ".", "setdefault", "(", "category", ",", "set", "(", ")", ")", ".", "add", "(", "linenum", ")", "else", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/nolint'", ",", "5", ",", "'Unknown NOLINT error category: %s'", "%", "category", ")" ]
https://github.com/lmb-freiburg/flownet2/blob/b92e198b56b0e52e1ba0a5a98dc0e39fa5ae70cc/scripts/cpp_lint.py#L464-L492
openvinotoolkit/openvino
dedcbeafa8b84cccdc55ca64b8da516682b381c7
src/bindings/python/src/openvino/runtime/opset3/ops.py
python
scatter_elements_update
( data: NodeInput, indices: NodeInput, updates: NodeInput, axis: NodeInput, name: Optional[str] = None, )
return _get_node_factory_opset3().create( "ScatterElementsUpdate", as_nodes(data, indices, updates, axis) )
Return a node which produces a ScatterElementsUpdate operation. @param data: The input tensor to be updated. @param indices: The tensor with indexes which will be updated. @param updates: The tensor with update values. @param axis: The axis for scatter. @return ScatterElementsUpdate node ScatterElementsUpdate creates a copy of the first input tensor with updated elements specified with second and third input tensors. For each entry in `updates`, the target index in `data` is obtained by combining the corresponding entry in `indices` with the index of the entry itself: the index-value for dimension equal to `axis` is obtained from the value of the corresponding entry in `indices` and the index-value for dimension not equal to `axis` is obtained from the index of the entry itself.
Return a node which produces a ScatterElementsUpdate operation.
[ "Return", "a", "node", "which", "produces", "a", "ScatterElementsUpdate", "operation", "." ]
def scatter_elements_update( data: NodeInput, indices: NodeInput, updates: NodeInput, axis: NodeInput, name: Optional[str] = None, ) -> Node: """Return a node which produces a ScatterElementsUpdate operation. @param data: The input tensor to be updated. @param indices: The tensor with indexes which will be updated. @param updates: The tensor with update values. @param axis: The axis for scatter. @return ScatterElementsUpdate node ScatterElementsUpdate creates a copy of the first input tensor with updated elements specified with second and third input tensors. For each entry in `updates`, the target index in `data` is obtained by combining the corresponding entry in `indices` with the index of the entry itself: the index-value for dimension equal to `axis` is obtained from the value of the corresponding entry in `indices` and the index-value for dimension not equal to `axis` is obtained from the index of the entry itself. """ return _get_node_factory_opset3().create( "ScatterElementsUpdate", as_nodes(data, indices, updates, axis) )
[ "def", "scatter_elements_update", "(", "data", ":", "NodeInput", ",", "indices", ":", "NodeInput", ",", "updates", ":", "NodeInput", ",", "axis", ":", "NodeInput", ",", "name", ":", "Optional", "[", "str", "]", "=", "None", ",", ")", "->", "Node", ":", "return", "_get_node_factory_opset3", "(", ")", ".", "create", "(", "\"ScatterElementsUpdate\"", ",", "as_nodes", "(", "data", ",", "indices", ",", "updates", ",", "axis", ")", ")" ]
https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/src/bindings/python/src/openvino/runtime/opset3/ops.py#L484-L511
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/config/expandlibs_exec.py
python
ExpandArgsMore.makelist
(self)
Replaces object file names with a temporary list file, using a list format depending on the EXPAND_LIBS_LIST_STYLE variable
Replaces object file names with a temporary list file, using a list format depending on the EXPAND_LIBS_LIST_STYLE variable
[ "Replaces", "object", "file", "names", "with", "a", "temporary", "list", "file", "using", "a", "list", "format", "depending", "on", "the", "EXPAND_LIBS_LIST_STYLE", "variable" ]
def makelist(self): '''Replaces object file names with a temporary list file, using a list format depending on the EXPAND_LIBS_LIST_STYLE variable ''' objs = [o for o in self if isObject(o)] if not len(objs): return fd, tmp = tempfile.mkstemp(suffix=".list",dir=os.curdir) if conf.EXPAND_LIBS_LIST_STYLE == "linkerscript": content = ['INPUT("%s")\n' % obj for obj in objs] ref = tmp elif conf.EXPAND_LIBS_LIST_STYLE == "filelist": content = ["%s\n" % obj for obj in objs] ref = "-Wl,-filelist," + tmp elif conf.EXPAND_LIBS_LIST_STYLE == "list": content = ["%s\n" % obj for obj in objs] ref = "@" + tmp else: os.close(fd) os.remove(tmp) return self.tmp.append(tmp) f = os.fdopen(fd, "w") f.writelines(content) f.close() idx = self.index(objs[0]) newlist = self[0:idx] + [ref] + [item for item in self[idx:] if item not in objs] self[0:] = newlist
[ "def", "makelist", "(", "self", ")", ":", "objs", "=", "[", "o", "for", "o", "in", "self", "if", "isObject", "(", "o", ")", "]", "if", "not", "len", "(", "objs", ")", ":", "return", "fd", ",", "tmp", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "\".list\"", ",", "dir", "=", "os", ".", "curdir", ")", "if", "conf", ".", "EXPAND_LIBS_LIST_STYLE", "==", "\"linkerscript\"", ":", "content", "=", "[", "'INPUT(\"%s\")\\n'", "%", "obj", "for", "obj", "in", "objs", "]", "ref", "=", "tmp", "elif", "conf", ".", "EXPAND_LIBS_LIST_STYLE", "==", "\"filelist\"", ":", "content", "=", "[", "\"%s\\n\"", "%", "obj", "for", "obj", "in", "objs", "]", "ref", "=", "\"-Wl,-filelist,\"", "+", "tmp", "elif", "conf", ".", "EXPAND_LIBS_LIST_STYLE", "==", "\"list\"", ":", "content", "=", "[", "\"%s\\n\"", "%", "obj", "for", "obj", "in", "objs", "]", "ref", "=", "\"@\"", "+", "tmp", "else", ":", "os", ".", "close", "(", "fd", ")", "os", ".", "remove", "(", "tmp", ")", "return", "self", ".", "tmp", ".", "append", "(", "tmp", ")", "f", "=", "os", ".", "fdopen", "(", "fd", ",", "\"w\"", ")", "f", ".", "writelines", "(", "content", ")", "f", ".", "close", "(", ")", "idx", "=", "self", ".", "index", "(", "objs", "[", "0", "]", ")", "newlist", "=", "self", "[", "0", ":", "idx", "]", "+", "[", "ref", "]", "+", "[", "item", "for", "item", "in", "self", "[", "idx", ":", "]", "if", "item", "not", "in", "objs", "]", "self", "[", "0", ":", "]", "=", "newlist" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/config/expandlibs_exec.py#L120-L146
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/customtreectrl.py
python
CustomTreeCtrl.SetIndent
(self, indent)
Sets the indentation for :class:`CustomTreeCtrl`. :param integer `indent`: an integer representing the indentation for the items in the tree.
Sets the indentation for :class:`CustomTreeCtrl`.
[ "Sets", "the", "indentation", "for", ":", "class", ":", "CustomTreeCtrl", "." ]
def SetIndent(self, indent): """ Sets the indentation for :class:`CustomTreeCtrl`. :param integer `indent`: an integer representing the indentation for the items in the tree. """ self._indent = indent self._dirty = True
[ "def", "SetIndent", "(", "self", ",", "indent", ")", ":", "self", ".", "_indent", "=", "indent", "self", ".", "_dirty", "=", "True" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/customtreectrl.py#L3415-L3423
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/rnn/python/ops/rnn_cell.py
python
IndyGRUCell.call
(self, inputs, state)
return new_h, new_h
Recurrently independent Gated Recurrent Unit (GRU) with nunits cells.
Recurrently independent Gated Recurrent Unit (GRU) with nunits cells.
[ "Recurrently", "independent", "Gated", "Recurrent", "Unit", "(", "GRU", ")", "with", "nunits", "cells", "." ]
def call(self, inputs, state): """Recurrently independent Gated Recurrent Unit (GRU) with nunits cells.""" gate_inputs = math_ops.matmul(inputs, self._gate_kernel_w) + ( gen_array_ops.tile(state, [1, 2]) * self._gate_kernel_u) gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias) value = math_ops.sigmoid(gate_inputs) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state candidate = math_ops.matmul(inputs, self._candidate_kernel_w) + ( r_state * self._candidate_kernel_u) candidate = nn_ops.bias_add(candidate, self._candidate_bias) c = self._activation(candidate) new_h = u * state + (1 - u) * c return new_h, new_h
[ "def", "call", "(", "self", ",", "inputs", ",", "state", ")", ":", "gate_inputs", "=", "math_ops", ".", "matmul", "(", "inputs", ",", "self", ".", "_gate_kernel_w", ")", "+", "(", "gen_array_ops", ".", "tile", "(", "state", ",", "[", "1", ",", "2", "]", ")", "*", "self", ".", "_gate_kernel_u", ")", "gate_inputs", "=", "nn_ops", ".", "bias_add", "(", "gate_inputs", ",", "self", ".", "_gate_bias", ")", "value", "=", "math_ops", ".", "sigmoid", "(", "gate_inputs", ")", "r", ",", "u", "=", "array_ops", ".", "split", "(", "value", "=", "value", ",", "num_or_size_splits", "=", "2", ",", "axis", "=", "1", ")", "r_state", "=", "r", "*", "state", "candidate", "=", "math_ops", ".", "matmul", "(", "inputs", ",", "self", ".", "_candidate_kernel_w", ")", "+", "(", "r_state", "*", "self", ".", "_candidate_kernel_u", ")", "candidate", "=", "nn_ops", ".", "bias_add", "(", "candidate", ",", "self", ".", "_candidate_bias", ")", "c", "=", "self", ".", "_activation", "(", "candidate", ")", "new_h", "=", "u", "*", "state", "+", "(", "1", "-", "u", ")", "*", "c", "return", "new_h", ",", "new_h" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/rnn/python/ops/rnn_cell.py#L3257-L3275
panda3d/panda3d
833ad89ebad58395d0af0b7ec08538e5e4308265
direct/src/directnotify/Notifier.py
python
Notifier.warning
(self, warningString)
return 1
Issue the warning message if warn flag is on
Issue the warning message if warn flag is on
[ "Issue", "the", "warning", "message", "if", "warn", "flag", "is", "on" ]
def warning(self, warningString): """ Issue the warning message if warn flag is on """ if self.__warning: message = str(warningString) if Notifier.showTime.getValue(): string = (self.getTime() + self.__name + '(warning): ' + message) else: string = (":" + self.__name + '(warning): ' + message) self.__log(string) self.__print(string) return 1
[ "def", "warning", "(", "self", ",", "warningString", ")", ":", "if", "self", ".", "__warning", ":", "message", "=", "str", "(", "warningString", ")", "if", "Notifier", ".", "showTime", ".", "getValue", "(", ")", ":", "string", "=", "(", "self", ".", "getTime", "(", ")", "+", "self", ".", "__name", "+", "'(warning): '", "+", "message", ")", "else", ":", "string", "=", "(", "\":\"", "+", "self", ".", "__name", "+", "'(warning): '", "+", "message", ")", "self", ".", "__log", "(", "string", ")", "self", ".", "__print", "(", "string", ")", "return", "1" ]
https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/directnotify/Notifier.py#L133-L145
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/media/webrtc/trunk/tools/gyp/pylib/gyp/generator/android.py
python
AndroidMkWriter.ComputeOutput
(self, spec)
return os.path.join(path, self.ComputeOutputBasename(spec))
Return the 'output' (full output path) of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce '$(obj)/baz/libfoobar.so'
Return the 'output' (full output path) of a gyp spec.
[ "Return", "the", "output", "(", "full", "output", "path", ")", "of", "a", "gyp", "spec", "." ]
def ComputeOutput(self, spec): """Return the 'output' (full output path) of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce '$(obj)/baz/libfoobar.so' """ if self.type == 'executable' and self.toolset == 'host': # We install host executables into shared_intermediate_dir so they can be # run by gyp rules that refer to PRODUCT_DIR. path = '$(gyp_shared_intermediate_dir)' elif self.type == 'shared_library': if self.toolset == 'host': path = '$(HOST_OUT_INTERMEDIATE_LIBRARIES)' else: path = '$(TARGET_OUT_INTERMEDIATE_LIBRARIES)' else: # Other targets just get built into their intermediate dir. if self.toolset == 'host': path = '$(call intermediates-dir-for,%s,%s,true)' % (self.android_class, self.android_module) else: path = '$(call intermediates-dir-for,%s,%s)' % (self.android_class, self.android_module) assert spec.get('product_dir') is None # TODO: not supported? return os.path.join(path, self.ComputeOutputBasename(spec))
[ "def", "ComputeOutput", "(", "self", ",", "spec", ")", ":", "if", "self", ".", "type", "==", "'executable'", "and", "self", ".", "toolset", "==", "'host'", ":", "# We install host executables into shared_intermediate_dir so they can be", "# run by gyp rules that refer to PRODUCT_DIR.", "path", "=", "'$(gyp_shared_intermediate_dir)'", "elif", "self", ".", "type", "==", "'shared_library'", ":", "if", "self", ".", "toolset", "==", "'host'", ":", "path", "=", "'$(HOST_OUT_INTERMEDIATE_LIBRARIES)'", "else", ":", "path", "=", "'$(TARGET_OUT_INTERMEDIATE_LIBRARIES)'", "else", ":", "# Other targets just get built into their intermediate dir.", "if", "self", ".", "toolset", "==", "'host'", ":", "path", "=", "'$(call intermediates-dir-for,%s,%s,true)'", "%", "(", "self", ".", "android_class", ",", "self", ".", "android_module", ")", "else", ":", "path", "=", "'$(call intermediates-dir-for,%s,%s)'", "%", "(", "self", ".", "android_class", ",", "self", ".", "android_module", ")", "assert", "spec", ".", "get", "(", "'product_dir'", ")", "is", "None", "# TODO: not supported?", "return", "os", ".", "path", ".", "join", "(", "path", ",", "self", ".", "ComputeOutputBasename", "(", "spec", ")", ")" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/media/webrtc/trunk/tools/gyp/pylib/gyp/generator/android.py#L658-L683
hpi-xnor/BMXNet
ed0b201da6667887222b8e4b5f997c4f6b61943d
python/mxnet/module/python_module.py
python
PythonModule.update_metric
(self, eval_metric, labels)
Evaluates and accumulates evaluation metric on outputs of the last forward computation. Subclass should override this method if needed. Parameters ---------- eval_metric : EvalMetric labels : list of NDArray Typically ``data_batch.label``.
Evaluates and accumulates evaluation metric on outputs of the last forward computation. Subclass should override this method if needed.
[ "Evaluates", "and", "accumulates", "evaluation", "metric", "on", "outputs", "of", "the", "last", "forward", "computation", ".", "Subclass", "should", "override", "this", "method", "if", "needed", "." ]
def update_metric(self, eval_metric, labels): """Evaluates and accumulates evaluation metric on outputs of the last forward computation. Subclass should override this method if needed. Parameters ---------- eval_metric : EvalMetric labels : list of NDArray Typically ``data_batch.label``. """ if self._label_shapes is None: # since we do not need labels, we are probably not a module with a loss # function or predictions, so just ignore this call return # by default we expect our outputs are some scores that could be evaluated eval_metric.update(labels, self.get_outputs())
[ "def", "update_metric", "(", "self", ",", "eval_metric", ",", "labels", ")", ":", "if", "self", ".", "_label_shapes", "is", "None", ":", "# since we do not need labels, we are probably not a module with a loss", "# function or predictions, so just ignore this call", "return", "# by default we expect our outputs are some scores that could be evaluated", "eval_metric", ".", "update", "(", "labels", ",", "self", ".", "get_outputs", "(", ")", ")" ]
https://github.com/hpi-xnor/BMXNet/blob/ed0b201da6667887222b8e4b5f997c4f6b61943d/python/mxnet/module/python_module.py#L141-L157
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_core.py
python
Sizer.RemovePos
(self, *args, **kw)
return self.Remove(*args, **kw)
Compatibility alias for `Remove`.
Compatibility alias for `Remove`.
[ "Compatibility", "alias", "for", "Remove", "." ]
def RemovePos(self, *args, **kw): """Compatibility alias for `Remove`.""" return self.Remove(*args, **kw)
[ "def", "RemovePos", "(", "self", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "return", "self", ".", "Remove", "(", "*", "args", ",", "*", "*", "kw", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L14744-L14746
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/logging/__init__.py
python
captureWarnings
(capture)
If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations.
If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations.
[ "If", "capture", "is", "true", "redirect", "all", "warnings", "to", "the", "logging", "package", ".", "If", "capture", "is", "False", "ensure", "that", "warnings", "are", "not", "redirected", "to", "logging", "but", "to", "their", "original", "destinations", "." ]
def captureWarnings(capture): """ If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations. """ global _warnings_showwarning if capture: if _warnings_showwarning is None: _warnings_showwarning = warnings.showwarning warnings.showwarning = _showwarning else: if _warnings_showwarning is not None: warnings.showwarning = _warnings_showwarning _warnings_showwarning = None
[ "def", "captureWarnings", "(", "capture", ")", ":", "global", "_warnings_showwarning", "if", "capture", ":", "if", "_warnings_showwarning", "is", "None", ":", "_warnings_showwarning", "=", "warnings", ".", "showwarning", "warnings", ".", "showwarning", "=", "_showwarning", "else", ":", "if", "_warnings_showwarning", "is", "not", "None", ":", "warnings", ".", "showwarning", "=", "_warnings_showwarning", "_warnings_showwarning", "=", "None" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/logging/__init__.py#L1737-L1751
eclipse/sumo
7132a9b8b6eea734bdec38479026b4d8c4336d03
tools/contributed/sumopy/coremodules/simulation/sumo.py
python
SumoTraci._init_special
(self, **kwargs)
Special initializations. To be overridden.
Special initializations. To be overridden.
[ "Special", "initializations", ".", "To", "be", "overridden", "." ]
def _init_special(self, **kwargs): """ Special initializations. To be overridden. """ pass
[ "def", "_init_special", "(", "self", ",", "*", "*", "kwargs", ")", ":", "pass" ]
https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/contributed/sumopy/coremodules/simulation/sumo.py#L1230-L1234
Tencent/CMONGO
c40380caa14e05509f46993aa8b8da966b09b0b5
src/third_party/wiredtiger/src/docs/tools/doxypy.py
python
Doxypy.parse
(self, input)
return "\n".join(self.output)
Parses a python file given as input string and returns the doxygen- compatible representation. @param input the python code to parse @returns the modified python code
Parses a python file given as input string and returns the doxygen- compatible representation.
[ "Parses", "a", "python", "file", "given", "as", "input", "string", "and", "returns", "the", "doxygen", "-", "compatible", "representation", "." ]
def parse(self, input): """Parses a python file given as input string and returns the doxygen- compatible representation. @param input the python code to parse @returns the modified python code """ lines = input.split("\n") for line in lines: self.fsm.makeTransition(line) if self.fsm.current_state == "DEFCLASS": self.__closeComment() return "\n".join(self.output)
[ "def", "parse", "(", "self", ",", "input", ")", ":", "lines", "=", "input", ".", "split", "(", "\"\\n\"", ")", "for", "line", "in", "lines", ":", "self", ".", "fsm", ".", "makeTransition", "(", "line", ")", "if", "self", ".", "fsm", ".", "current_state", "==", "\"DEFCLASS\"", ":", "self", ".", "__closeComment", "(", ")", "return", "\"\\n\"", ".", "join", "(", "self", ".", "output", ")" ]
https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/src/third_party/wiredtiger/src/docs/tools/doxypy.py#L339-L354
turi-code/SFrame
796b9bdfb2fa1b881d82080754643c7e68629cd2
oss_src/unity/python/sframe/data_structures/sframe.py
python
get_spark_integration_jar_path
()
return BINARY_PATHS['RDD_JAR_PATH']
The absolute path of the jar file required to enable GraphLab Create's integration with Apache Spark.
The absolute path of the jar file required to enable GraphLab Create's integration with Apache Spark.
[ "The", "absolute", "path", "of", "the", "jar", "file", "required", "to", "enable", "GraphLab", "Create", "s", "integration", "with", "Apache", "Spark", "." ]
def get_spark_integration_jar_path(): """ The absolute path of the jar file required to enable GraphLab Create's integration with Apache Spark. """ if 'RDD_JAR_PATH' not in BINARY_PATHS: raise RuntimeError("Could not find a spark integration jar. "\ "Does your version of GraphLab Create support Spark Integration (is it >= 1.0)?") return BINARY_PATHS['RDD_JAR_PATH']
[ "def", "get_spark_integration_jar_path", "(", ")", ":", "if", "'RDD_JAR_PATH'", "not", "in", "BINARY_PATHS", ":", "raise", "RuntimeError", "(", "\"Could not find a spark integration jar. \"", "\"Does your version of GraphLab Create support Spark Integration (is it >= 1.0)?\"", ")", "return", "BINARY_PATHS", "[", "'RDD_JAR_PATH'", "]" ]
https://github.com/turi-code/SFrame/blob/796b9bdfb2fa1b881d82080754643c7e68629cd2/oss_src/unity/python/sframe/data_structures/sframe.py#L111-L119
weolar/miniblink49
1c4678db0594a4abde23d3ebbcc7cd13c3170777
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/misc.py
python
Hasher.update
(self, v)
Add `v` to the hash, recursively if needed.
Add `v` to the hash, recursively if needed.
[ "Add", "v", "to", "the", "hash", "recursively", "if", "needed", "." ]
def update(self, v): """Add `v` to the hash, recursively if needed.""" self.md5.update(to_bytes(str(type(v)))) if isinstance(v, string_class): self.md5.update(to_bytes(v)) elif isinstance(v, (int, float)): self.update(str(v)) elif isinstance(v, (tuple, list)): for e in v: self.update(e) elif isinstance(v, dict): keys = v.keys() for k in sorted(keys): self.update(k) self.update(v[k]) else: for k in dir(v): if k.startswith('__'): continue a = getattr(v, k) if inspect.isroutine(a): continue self.update(k) self.update(a)
[ "def", "update", "(", "self", ",", "v", ")", ":", "self", ".", "md5", ".", "update", "(", "to_bytes", "(", "str", "(", "type", "(", "v", ")", ")", ")", ")", "if", "isinstance", "(", "v", ",", "string_class", ")", ":", "self", ".", "md5", ".", "update", "(", "to_bytes", "(", "v", ")", ")", "elif", "isinstance", "(", "v", ",", "(", "int", ",", "float", ")", ")", ":", "self", ".", "update", "(", "str", "(", "v", ")", ")", "elif", "isinstance", "(", "v", ",", "(", "tuple", ",", "list", ")", ")", ":", "for", "e", "in", "v", ":", "self", ".", "update", "(", "e", ")", "elif", "isinstance", "(", "v", ",", "dict", ")", ":", "keys", "=", "v", ".", "keys", "(", ")", "for", "k", "in", "sorted", "(", "keys", ")", ":", "self", ".", "update", "(", "k", ")", "self", ".", "update", "(", "v", "[", "k", "]", ")", "else", ":", "for", "k", "in", "dir", "(", "v", ")", ":", "if", "k", ".", "startswith", "(", "'__'", ")", ":", "continue", "a", "=", "getattr", "(", "v", ",", "k", ")", "if", "inspect", ".", "isroutine", "(", "a", ")", ":", "continue", "self", ".", "update", "(", "k", ")", "self", ".", "update", "(", "a", ")" ]
https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/misc.py#L91-L114
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
third_party/jinja2/environment.py
python
copy_cache
(cache)
return LRUCache(cache.capacity)
Create an empty copy of the given cache.
Create an empty copy of the given cache.
[ "Create", "an", "empty", "copy", "of", "the", "given", "cache", "." ]
def copy_cache(cache): """Create an empty copy of the given cache.""" if cache is None: return None elif type(cache) is dict: return {} return LRUCache(cache.capacity)
[ "def", "copy_cache", "(", "cache", ")", ":", "if", "cache", "is", "None", ":", "return", "None", "elif", "type", "(", "cache", ")", "is", "dict", ":", "return", "{", "}", "return", "LRUCache", "(", "cache", ".", "capacity", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/jinja2/environment.py#L69-L75
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/richtext.py
python
RichTextCtrl.GetSelectionAnchor
(*args, **kwargs)
return _richtext.RichTextCtrl_GetSelectionAnchor(*args, **kwargs)
GetSelectionAnchor(self) -> long
GetSelectionAnchor(self) -> long
[ "GetSelectionAnchor", "(", "self", ")", "-", ">", "long" ]
def GetSelectionAnchor(*args, **kwargs): """GetSelectionAnchor(self) -> long""" return _richtext.RichTextCtrl_GetSelectionAnchor(*args, **kwargs)
[ "def", "GetSelectionAnchor", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_richtext", ".", "RichTextCtrl_GetSelectionAnchor", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/richtext.py#L3069-L3071
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/targets/gdb_hook.py
python
hook_gdb_breakpoint
()
return impl
Adds the Numba break point into the source
Adds the Numba break point into the source
[ "Adds", "the", "Numba", "break", "point", "into", "the", "source" ]
def hook_gdb_breakpoint(): """ Adds the Numba break point into the source """ if not sys.platform.startswith('linux'): raise RuntimeError('gdb is only available on linux') bp_impl = gen_bp_impl() def impl(): bp_impl() return impl
[ "def", "hook_gdb_breakpoint", "(", ")", ":", "if", "not", "sys", ".", "platform", ".", "startswith", "(", "'linux'", ")", ":", "raise", "RuntimeError", "(", "'gdb is only available on linux'", ")", "bp_impl", "=", "gen_bp_impl", "(", ")", "def", "impl", "(", ")", ":", "bp_impl", "(", ")", "return", "impl" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/targets/gdb_hook.py#L193-L203
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/distributed/fleet/dataset/dataset.py
python
BoxPSDataset.end_pass
(self, need_save_delta)
End Pass Notify BoxPS that current pass ended Examples: .. code-block:: python import paddle dataset = paddle.distributed.fleet.BoxPSDataset() dataset.end_pass(True)
End Pass Notify BoxPS that current pass ended Examples: .. code-block:: python
[ "End", "Pass", "Notify", "BoxPS", "that", "current", "pass", "ended", "Examples", ":", "..", "code", "-", "block", "::", "python" ]
def end_pass(self, need_save_delta): """ End Pass Notify BoxPS that current pass ended Examples: .. code-block:: python import paddle dataset = paddle.distributed.fleet.BoxPSDataset() dataset.end_pass(True) """ self.boxps.end_pass(need_save_delta)
[ "def", "end_pass", "(", "self", ",", "need_save_delta", ")", ":", "self", ".", "boxps", ".", "end_pass", "(", "need_save_delta", ")" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/distributed/fleet/dataset/dataset.py#L1452-L1463
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/jedi/jedi/evaluate/stdlib.py
python
argument_clinic
(string, want_obj=False, want_context=False, want_arguments=False)
return f
Works like Argument Clinic (PEP 436), to validate function params.
Works like Argument Clinic (PEP 436), to validate function params.
[ "Works", "like", "Argument", "Clinic", "(", "PEP", "436", ")", "to", "validate", "function", "params", "." ]
def argument_clinic(string, want_obj=False, want_context=False, want_arguments=False): """ Works like Argument Clinic (PEP 436), to validate function params. """ def f(func): @repack_with_argument_clinic(string, keep_arguments_param=True) def wrapper(evaluator, obj, *args, **kwargs): arguments = kwargs.pop('arguments') assert not kwargs # Python 2... debug.dbg('builtin start %s' % obj, color='MAGENTA') result = NO_CONTEXTS if want_context: kwargs['context'] = arguments.context if want_obj: kwargs['obj'] = obj if want_arguments: kwargs['arguments'] = arguments result = func(evaluator, *args, **kwargs) debug.dbg('builtin end: %s', result, color='MAGENTA') return result return wrapper return f
[ "def", "argument_clinic", "(", "string", ",", "want_obj", "=", "False", ",", "want_context", "=", "False", ",", "want_arguments", "=", "False", ")", ":", "def", "f", "(", "func", ")", ":", "@", "repack_with_argument_clinic", "(", "string", ",", "keep_arguments_param", "=", "True", ")", "def", "wrapper", "(", "evaluator", ",", "obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "arguments", "=", "kwargs", ".", "pop", "(", "'arguments'", ")", "assert", "not", "kwargs", "# Python 2...", "debug", ".", "dbg", "(", "'builtin start %s'", "%", "obj", ",", "color", "=", "'MAGENTA'", ")", "result", "=", "NO_CONTEXTS", "if", "want_context", ":", "kwargs", "[", "'context'", "]", "=", "arguments", ".", "context", "if", "want_obj", ":", "kwargs", "[", "'obj'", "]", "=", "obj", "if", "want_arguments", ":", "kwargs", "[", "'arguments'", "]", "=", "arguments", "result", "=", "func", "(", "evaluator", ",", "*", "args", ",", "*", "*", "kwargs", ")", "debug", ".", "dbg", "(", "'builtin end: %s'", ",", "result", ",", "color", "=", "'MAGENTA'", ")", "return", "result", "return", "wrapper", "return", "f" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/jedi/jedi/evaluate/stdlib.py#L85-L108
eldar/deepcut-cnn
928bf2f224fce132f6e4404b4c95fb017297a5e0
scripts/cpp_lint.py
python
ProcessLine
(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[])
Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error
Processes a single line in the file.
[ "Processes", "a", "single", "line", "in", "the", "file", "." ]
def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[]): """Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[line], line, error) nesting_state.Update(filename, clean_lines, line, error) if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM: return CheckForFunctionLengths(filename, clean_lines, line, function_state, error) CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error) CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error) CheckVlogArguments(filename, clean_lines, line, error) CheckCaffeAlternatives(filename, clean_lines, line, error) CheckCaffeDataLayerSetUp(filename, clean_lines, line, error) CheckCaffeRandom(filename, clean_lines, line, error) CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) for check_fn in extra_check_functions: check_fn(filename, clean_lines, line, error)
[ "def", "ProcessLine", "(", "filename", ",", "file_extension", ",", "clean_lines", ",", "line", ",", "include_state", ",", "function_state", ",", "nesting_state", ",", "error", ",", "extra_check_functions", "=", "[", "]", ")", ":", "raw_lines", "=", "clean_lines", ".", "raw_lines", "ParseNolintSuppressions", "(", "filename", ",", "raw_lines", "[", "line", "]", ",", "line", ",", "error", ")", "nesting_state", ".", "Update", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "if", "nesting_state", ".", "stack", "and", "nesting_state", ".", "stack", "[", "-", "1", "]", ".", "inline_asm", "!=", "_NO_ASM", ":", "return", "CheckForFunctionLengths", "(", "filename", ",", "clean_lines", ",", "line", ",", "function_state", ",", "error", ")", "CheckForMultilineCommentsAndStrings", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "CheckStyle", "(", "filename", ",", "clean_lines", ",", "line", ",", "file_extension", ",", "nesting_state", ",", "error", ")", "CheckLanguage", "(", "filename", ",", "clean_lines", ",", "line", ",", "file_extension", ",", "include_state", ",", "nesting_state", ",", "error", ")", "CheckForNonConstReference", "(", "filename", ",", "clean_lines", ",", "line", ",", "nesting_state", ",", "error", ")", "CheckForNonStandardConstructs", "(", "filename", ",", "clean_lines", ",", "line", ",", "nesting_state", ",", "error", ")", "CheckVlogArguments", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "CheckCaffeAlternatives", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "CheckCaffeDataLayerSetUp", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "CheckCaffeRandom", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "CheckPosixThreading", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "CheckInvalidIncrement", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "CheckMakePairUsesDeduction", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")", "for", "check_fn", "in", "extra_check_functions", ":", "check_fn", "(", "filename", ",", "clean_lines", ",", "line", ",", "error", ")" ]
https://github.com/eldar/deepcut-cnn/blob/928bf2f224fce132f6e4404b4c95fb017297a5e0/scripts/cpp_lint.py#L4600-L4642
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib-tk/turtle.py
python
TPen.pen
(self, pen=None, **pendict)
Return or set the pen's attributes. Arguments: pen -- a dictionary with some or all of the below listed keys. **pendict -- one or more keyword-arguments with the below listed keys as keywords. Return or set the pen's attributes in a 'pen-dictionary' with the following key/value pairs: "shown" : True/False "pendown" : True/False "pencolor" : color-string or color-tuple "fillcolor" : color-string or color-tuple "pensize" : positive number "speed" : number in range 0..10 "resizemode" : "auto" or "user" or "noresize" "stretchfactor": (positive number, positive number) "outline" : positive number "tilt" : number This dictionary can be used as argument for a subsequent pen()-call to restore the former pen-state. Moreover one or more of these attributes can be provided as keyword-arguments. This can be used to set several pen attributes in one statement. Examples (for a Turtle instance named turtle): >>> turtle.pen(fillcolor="black", pencolor="red", pensize=10) >>> turtle.pen() {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, 'pencolor': 'red', 'pendown': True, 'fillcolor': 'black', 'stretchfactor': (1,1), 'speed': 3} >>> penstate=turtle.pen() >>> turtle.color("yellow","") >>> turtle.penup() >>> turtle.pen() {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, 'pencolor': 'yellow', 'pendown': False, 'fillcolor': '', 'stretchfactor': (1,1), 'speed': 3} >>> p.pen(penstate, fillcolor="green") >>> p.pen() {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, 'pencolor': 'red', 'pendown': True, 'fillcolor': 'green', 'stretchfactor': (1,1), 'speed': 3}
Return or set the pen's attributes.
[ "Return", "or", "set", "the", "pen", "s", "attributes", "." ]
def pen(self, pen=None, **pendict): """Return or set the pen's attributes. Arguments: pen -- a dictionary with some or all of the below listed keys. **pendict -- one or more keyword-arguments with the below listed keys as keywords. Return or set the pen's attributes in a 'pen-dictionary' with the following key/value pairs: "shown" : True/False "pendown" : True/False "pencolor" : color-string or color-tuple "fillcolor" : color-string or color-tuple "pensize" : positive number "speed" : number in range 0..10 "resizemode" : "auto" or "user" or "noresize" "stretchfactor": (positive number, positive number) "outline" : positive number "tilt" : number This dictionary can be used as argument for a subsequent pen()-call to restore the former pen-state. Moreover one or more of these attributes can be provided as keyword-arguments. This can be used to set several pen attributes in one statement. Examples (for a Turtle instance named turtle): >>> turtle.pen(fillcolor="black", pencolor="red", pensize=10) >>> turtle.pen() {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, 'pencolor': 'red', 'pendown': True, 'fillcolor': 'black', 'stretchfactor': (1,1), 'speed': 3} >>> penstate=turtle.pen() >>> turtle.color("yellow","") >>> turtle.penup() >>> turtle.pen() {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, 'pencolor': 'yellow', 'pendown': False, 'fillcolor': '', 'stretchfactor': (1,1), 'speed': 3} >>> p.pen(penstate, fillcolor="green") >>> p.pen() {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, 'pencolor': 'red', 'pendown': True, 'fillcolor': 'green', 'stretchfactor': (1,1), 'speed': 3} """ _pd = {"shown" : self._shown, "pendown" : self._drawing, "pencolor" : self._pencolor, "fillcolor" : self._fillcolor, "pensize" : self._pensize, "speed" : self._speed, "resizemode" : self._resizemode, "stretchfactor" : self._stretchfactor, "outline" : self._outlinewidth, "tilt" : self._tilt } if not (pen or pendict): return _pd if isinstance(pen, dict): p = pen else: p = {} p.update(pendict) _p_buf = {} for key in p: _p_buf[key] = _pd[key] if self.undobuffer: self.undobuffer.push(("pen", _p_buf)) newLine = False if "pendown" in p: if self._drawing != p["pendown"]: newLine = True if "pencolor" in p: if isinstance(p["pencolor"], tuple): p["pencolor"] = self._colorstr((p["pencolor"],)) if self._pencolor != p["pencolor"]: newLine = True if "pensize" in p: if self._pensize != p["pensize"]: newLine = True if newLine: self._newLine() if "pendown" in p: self._drawing = p["pendown"] if "pencolor" in p: self._pencolor = p["pencolor"] if "pensize" in p: self._pensize = p["pensize"] if "fillcolor" in p: if isinstance(p["fillcolor"], tuple): p["fillcolor"] = self._colorstr((p["fillcolor"],)) self._fillcolor = p["fillcolor"] if "speed" in p: self._speed = p["speed"] if "resizemode" in p: self._resizemode = p["resizemode"] if "stretchfactor" in p: sf = p["stretchfactor"] if isinstance(sf, (int, float)): sf = (sf, sf) self._stretchfactor = sf if "outline" in p: self._outlinewidth = p["outline"] if "shown" in p: self._shown = p["shown"] if "tilt" in p: self._tilt = p["tilt"] self._update()
[ "def", "pen", "(", "self", ",", "pen", "=", "None", ",", "*", "*", "pendict", ")", ":", "_pd", "=", "{", "\"shown\"", ":", "self", ".", "_shown", ",", "\"pendown\"", ":", "self", ".", "_drawing", ",", "\"pencolor\"", ":", "self", ".", "_pencolor", ",", "\"fillcolor\"", ":", "self", ".", "_fillcolor", ",", "\"pensize\"", ":", "self", ".", "_pensize", ",", "\"speed\"", ":", "self", ".", "_speed", ",", "\"resizemode\"", ":", "self", ".", "_resizemode", ",", "\"stretchfactor\"", ":", "self", ".", "_stretchfactor", ",", "\"outline\"", ":", "self", ".", "_outlinewidth", ",", "\"tilt\"", ":", "self", ".", "_tilt", "}", "if", "not", "(", "pen", "or", "pendict", ")", ":", "return", "_pd", "if", "isinstance", "(", "pen", ",", "dict", ")", ":", "p", "=", "pen", "else", ":", "p", "=", "{", "}", "p", ".", "update", "(", "pendict", ")", "_p_buf", "=", "{", "}", "for", "key", "in", "p", ":", "_p_buf", "[", "key", "]", "=", "_pd", "[", "key", "]", "if", "self", ".", "undobuffer", ":", "self", ".", "undobuffer", ".", "push", "(", "(", "\"pen\"", ",", "_p_buf", ")", ")", "newLine", "=", "False", "if", "\"pendown\"", "in", "p", ":", "if", "self", ".", "_drawing", "!=", "p", "[", "\"pendown\"", "]", ":", "newLine", "=", "True", "if", "\"pencolor\"", "in", "p", ":", "if", "isinstance", "(", "p", "[", "\"pencolor\"", "]", ",", "tuple", ")", ":", "p", "[", "\"pencolor\"", "]", "=", "self", ".", "_colorstr", "(", "(", "p", "[", "\"pencolor\"", "]", ",", ")", ")", "if", "self", ".", "_pencolor", "!=", "p", "[", "\"pencolor\"", "]", ":", "newLine", "=", "True", "if", "\"pensize\"", "in", "p", ":", "if", "self", ".", "_pensize", "!=", "p", "[", "\"pensize\"", "]", ":", "newLine", "=", "True", "if", "newLine", ":", "self", ".", "_newLine", "(", ")", "if", "\"pendown\"", "in", "p", ":", "self", ".", "_drawing", "=", "p", "[", "\"pendown\"", "]", "if", "\"pencolor\"", "in", "p", ":", "self", ".", "_pencolor", "=", "p", "[", "\"pencolor\"", "]", "if", "\"pensize\"", "in", "p", ":", "self", ".", "_pensize", "=", "p", "[", "\"pensize\"", "]", "if", "\"fillcolor\"", "in", "p", ":", "if", "isinstance", "(", "p", "[", "\"fillcolor\"", "]", ",", "tuple", ")", ":", "p", "[", "\"fillcolor\"", "]", "=", "self", ".", "_colorstr", "(", "(", "p", "[", "\"fillcolor\"", "]", ",", ")", ")", "self", ".", "_fillcolor", "=", "p", "[", "\"fillcolor\"", "]", "if", "\"speed\"", "in", "p", ":", "self", ".", "_speed", "=", "p", "[", "\"speed\"", "]", "if", "\"resizemode\"", "in", "p", ":", "self", ".", "_resizemode", "=", "p", "[", "\"resizemode\"", "]", "if", "\"stretchfactor\"", "in", "p", ":", "sf", "=", "p", "[", "\"stretchfactor\"", "]", "if", "isinstance", "(", "sf", ",", "(", "int", ",", "float", ")", ")", ":", "sf", "=", "(", "sf", ",", "sf", ")", "self", ".", "_stretchfactor", "=", "sf", "if", "\"outline\"", "in", "p", ":", "self", ".", "_outlinewidth", "=", "p", "[", "\"outline\"", "]", "if", "\"shown\"", "in", "p", ":", "self", ".", "_shown", "=", "p", "[", "\"shown\"", "]", "if", "\"tilt\"", "in", "p", ":", "self", ".", "_tilt", "=", "p", "[", "\"tilt\"", "]", "self", ".", "_update", "(", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib-tk/turtle.py#L2250-L2363
priyankchheda/algorithms
c361aa9071573fa9966d5b02d05e524815abcf2b
red_black_tree/red_black_tree.py
python
RedBlackTree.get_node_color
(node)
return node.color
returns node color
returns node color
[ "returns", "node", "color" ]
def get_node_color(node): """ returns node color """ if node is None: return BLACK return node.color
[ "def", "get_node_color", "(", "node", ")", ":", "if", "node", "is", "None", ":", "return", "BLACK", "return", "node", ".", "color" ]
https://github.com/priyankchheda/algorithms/blob/c361aa9071573fa9966d5b02d05e524815abcf2b/red_black_tree/red_black_tree.py#L181-L185
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/difflib.py
python
_mdiff
(fromlines, tolines, context=None, linejunk=None, charjunk=IS_CHARACTER_JUNK)
r"""Returns generator yielding marked up from/to side by side differences. Arguments: fromlines -- list of text lines to compared to tolines tolines -- list of text lines to be compared to fromlines context -- number of context lines to display on each side of difference, if None, all from/to text lines will be generated. linejunk -- passed on to ndiff (see ndiff documentation) charjunk -- passed on to ndiff (see ndiff documentation) This function returns an interator which returns a tuple: (from line tuple, to line tuple, boolean flag) from/to line tuple -- (line num, line text) line num -- integer or None (to indicate a context separation) line text -- original line text with following markers inserted: '\0+' -- marks start of added text '\0-' -- marks start of deleted text '\0^' -- marks start of changed text '\1' -- marks end of added/deleted/changed text boolean flag -- None indicates context separation, True indicates either "from" or "to" line contains a change, otherwise False. This function/iterator was originally developed to generate side by side file difference for making HTML pages (see HtmlDiff class for example usage). Note, this function utilizes the ndiff function to generate the side by side difference markup. Optional ndiff arguments may be passed to this function and they in turn will be passed to ndiff.
r"""Returns generator yielding marked up from/to side by side differences.
[ "r", "Returns", "generator", "yielding", "marked", "up", "from", "/", "to", "side", "by", "side", "differences", "." ]
def _mdiff(fromlines, tolines, context=None, linejunk=None, charjunk=IS_CHARACTER_JUNK): r"""Returns generator yielding marked up from/to side by side differences. Arguments: fromlines -- list of text lines to compared to tolines tolines -- list of text lines to be compared to fromlines context -- number of context lines to display on each side of difference, if None, all from/to text lines will be generated. linejunk -- passed on to ndiff (see ndiff documentation) charjunk -- passed on to ndiff (see ndiff documentation) This function returns an interator which returns a tuple: (from line tuple, to line tuple, boolean flag) from/to line tuple -- (line num, line text) line num -- integer or None (to indicate a context separation) line text -- original line text with following markers inserted: '\0+' -- marks start of added text '\0-' -- marks start of deleted text '\0^' -- marks start of changed text '\1' -- marks end of added/deleted/changed text boolean flag -- None indicates context separation, True indicates either "from" or "to" line contains a change, otherwise False. This function/iterator was originally developed to generate side by side file difference for making HTML pages (see HtmlDiff class for example usage). Note, this function utilizes the ndiff function to generate the side by side difference markup. Optional ndiff arguments may be passed to this function and they in turn will be passed to ndiff. """ import re # regular expression for finding intraline change indices change_re = re.compile('(\++|\-+|\^+)') # create the difference iterator to generate the differences diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk) def _make_line(lines, format_key, side, num_lines=[0,0]): """Returns line of text with user's change markup and line formatting. lines -- list of lines from the ndiff generator to produce a line of text from. When producing the line of text to return, the lines used are removed from this list. format_key -- '+' return first line in list with "add" markup around the entire line. '-' return first line in list with "delete" markup around the entire line. '?' return first line in list with add/delete/change intraline markup (indices obtained from second line) None return first line in list with no markup side -- indice into the num_lines list (0=from,1=to) num_lines -- from/to current line number. This is NOT intended to be a passed parameter. It is present as a keyword argument to maintain memory of the current line numbers between calls of this function. Note, this function is purposefully not defined at the module scope so that data it needs from its parent function (within whose context it is defined) does not need to be of module scope. """ num_lines[side] += 1 # Handle case where no user markup is to be added, just return line of # text with user's line format to allow for usage of the line number. if format_key is None: return (num_lines[side],lines.pop(0)[2:]) # Handle case of intraline changes if format_key == '?': text, markers = lines.pop(0), lines.pop(0) # find intraline changes (store change type and indices in tuples) sub_info = [] def record_sub_info(match_object,sub_info=sub_info): sub_info.append([match_object.group(1)[0],match_object.span()]) return match_object.group(1) change_re.sub(record_sub_info,markers) # process each tuple inserting our special marks that won't be # noticed by an xml/html escaper. for key,(begin,end) in sub_info[::-1]: text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:] text = text[2:] # Handle case of add/delete entire line else: text = lines.pop(0)[2:] # if line of text is just a newline, insert a space so there is # something for the user to highlight and see. if not text: text = ' ' # insert marks that won't be noticed by an xml/html escaper. text = '\0' + format_key + text + '\1' # Return line of text, first allow user's line formatter to do its # thing (such as adding the line number) then replace the special # marks with what the user's change markup. return (num_lines[side],text) def _line_iterator(): """Yields from/to lines of text with a change indication. This function is an iterator. It itself pulls lines from a differencing iterator, processes them and yields them. When it can it yields both a "from" and a "to" line, otherwise it will yield one or the other. In addition to yielding the lines of from/to text, a boolean flag is yielded to indicate if the text line(s) have differences in them. Note, this function is purposefully not defined at the module scope so that data it needs from its parent function (within whose context it is defined) does not need to be of module scope. """ lines = [] num_blanks_pending, num_blanks_to_yield = 0, 0 while True: # Load up next 4 lines so we can look ahead, create strings which # are a concatenation of the first character of each of the 4 lines # so we can do some very readable comparisons. while len(lines) < 4: try: lines.append(diff_lines_iterator.next()) except StopIteration: lines.append('X') s = ''.join([line[0] for line in lines]) if s.startswith('X'): # When no more lines, pump out any remaining blank lines so the # corresponding add/delete lines get a matching blank line so # all line pairs get yielded at the next level. num_blanks_to_yield = num_blanks_pending elif s.startswith('-?+?'): # simple intraline change yield _make_line(lines,'?',0), _make_line(lines,'?',1), True continue elif s.startswith('--++'): # in delete block, add block coming: we do NOT want to get # caught up on blank lines yet, just process the delete line num_blanks_pending -= 1 yield _make_line(lines,'-',0), None, True continue elif s.startswith(('--?+', '--+', '- ')): # in delete block and see a intraline change or unchanged line # coming: yield the delete line and then blanks from_line,to_line = _make_line(lines,'-',0), None num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0 elif s.startswith('-+?'): # intraline change yield _make_line(lines,None,0), _make_line(lines,'?',1), True continue elif s.startswith('-?+'): # intraline change yield _make_line(lines,'?',0), _make_line(lines,None,1), True continue elif s.startswith('-'): # delete FROM line num_blanks_pending -= 1 yield _make_line(lines,'-',0), None, True continue elif s.startswith('+--'): # in add block, delete block coming: we do NOT want to get # caught up on blank lines yet, just process the add line num_blanks_pending += 1 yield None, _make_line(lines,'+',1), True continue elif s.startswith(('+ ', '+-')): # will be leaving an add block: yield blanks then add line from_line, to_line = None, _make_line(lines,'+',1) num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0 elif s.startswith('+'): # inside an add block, yield the add line num_blanks_pending += 1 yield None, _make_line(lines,'+',1), True continue elif s.startswith(' '): # unchanged text, yield it to both sides yield _make_line(lines[:],None,0),_make_line(lines,None,1),False continue # Catch up on the blank lines so when we yield the next from/to # pair, they are lined up. while(num_blanks_to_yield < 0): num_blanks_to_yield += 1 yield None,('','\n'),True while(num_blanks_to_yield > 0): num_blanks_to_yield -= 1 yield ('','\n'),None,True if s.startswith('X'): raise StopIteration else: yield from_line,to_line,True def _line_pair_iterator(): """Yields from/to lines of text with a change indication. This function is an iterator. It itself pulls lines from the line iterator. Its difference from that iterator is that this function always yields a pair of from/to text lines (with the change indication). If necessary it will collect single from/to lines until it has a matching pair from/to pair to yield. Note, this function is purposefully not defined at the module scope so that data it needs from its parent function (within whose context it is defined) does not need to be of module scope. """ line_iterator = _line_iterator() fromlines,tolines=[],[] while True: # Collecting lines of text until we have a from/to pair while (len(fromlines)==0 or len(tolines)==0): from_line, to_line, found_diff =line_iterator.next() if from_line is not None: fromlines.append((from_line,found_diff)) if to_line is not None: tolines.append((to_line,found_diff)) # Once we have a pair, remove them from the collection and yield it from_line, fromDiff = fromlines.pop(0) to_line, to_diff = tolines.pop(0) yield (from_line,to_line,fromDiff or to_diff) # Handle case where user does not want context differencing, just yield # them up without doing anything else with them. line_pair_iterator = _line_pair_iterator() if context is None: while True: yield line_pair_iterator.next() # Handle case where user wants context differencing. We must do some # storage of lines until we know for sure that they are to be yielded. else: context += 1 lines_to_write = 0 while True: # Store lines up until we find a difference, note use of a # circular queue because we only need to keep around what # we need for context. index, contextLines = 0, [None]*(context) found_diff = False while(found_diff is False): from_line, to_line, found_diff = line_pair_iterator.next() i = index % context contextLines[i] = (from_line, to_line, found_diff) index += 1 # Yield lines that we have collected so far, but first yield # the user's separator. if index > context: yield None, None, None lines_to_write = context else: lines_to_write = index index = 0 while(lines_to_write): i = index % context index += 1 yield contextLines[i] lines_to_write -= 1 # Now yield the context lines after the change lines_to_write = context-1 while(lines_to_write): from_line, to_line, found_diff = line_pair_iterator.next() # If another change within the context, extend the context if found_diff: lines_to_write = context-1 else: lines_to_write -= 1 yield from_line, to_line, found_diff
[ "def", "_mdiff", "(", "fromlines", ",", "tolines", ",", "context", "=", "None", ",", "linejunk", "=", "None", ",", "charjunk", "=", "IS_CHARACTER_JUNK", ")", ":", "import", "re", "# regular expression for finding intraline change indices", "change_re", "=", "re", ".", "compile", "(", "'(\\++|\\-+|\\^+)'", ")", "# create the difference iterator to generate the differences", "diff_lines_iterator", "=", "ndiff", "(", "fromlines", ",", "tolines", ",", "linejunk", ",", "charjunk", ")", "def", "_make_line", "(", "lines", ",", "format_key", ",", "side", ",", "num_lines", "=", "[", "0", ",", "0", "]", ")", ":", "\"\"\"Returns line of text with user's change markup and line formatting.\n\n lines -- list of lines from the ndiff generator to produce a line of\n text from. When producing the line of text to return, the\n lines used are removed from this list.\n format_key -- '+' return first line in list with \"add\" markup around\n the entire line.\n '-' return first line in list with \"delete\" markup around\n the entire line.\n '?' return first line in list with add/delete/change\n intraline markup (indices obtained from second line)\n None return first line in list with no markup\n side -- indice into the num_lines list (0=from,1=to)\n num_lines -- from/to current line number. This is NOT intended to be a\n passed parameter. It is present as a keyword argument to\n maintain memory of the current line numbers between calls\n of this function.\n\n Note, this function is purposefully not defined at the module scope so\n that data it needs from its parent function (within whose context it\n is defined) does not need to be of module scope.\n \"\"\"", "num_lines", "[", "side", "]", "+=", "1", "# Handle case where no user markup is to be added, just return line of", "# text with user's line format to allow for usage of the line number.", "if", "format_key", "is", "None", ":", "return", "(", "num_lines", "[", "side", "]", ",", "lines", ".", "pop", "(", "0", ")", "[", "2", ":", "]", ")", "# Handle case of intraline changes", "if", "format_key", "==", "'?'", ":", "text", ",", "markers", "=", "lines", ".", "pop", "(", "0", ")", ",", "lines", ".", "pop", "(", "0", ")", "# find intraline changes (store change type and indices in tuples)", "sub_info", "=", "[", "]", "def", "record_sub_info", "(", "match_object", ",", "sub_info", "=", "sub_info", ")", ":", "sub_info", ".", "append", "(", "[", "match_object", ".", "group", "(", "1", ")", "[", "0", "]", ",", "match_object", ".", "span", "(", ")", "]", ")", "return", "match_object", ".", "group", "(", "1", ")", "change_re", ".", "sub", "(", "record_sub_info", ",", "markers", ")", "# process each tuple inserting our special marks that won't be", "# noticed by an xml/html escaper.", "for", "key", ",", "(", "begin", ",", "end", ")", "in", "sub_info", "[", ":", ":", "-", "1", "]", ":", "text", "=", "text", "[", "0", ":", "begin", "]", "+", "'\\0'", "+", "key", "+", "text", "[", "begin", ":", "end", "]", "+", "'\\1'", "+", "text", "[", "end", ":", "]", "text", "=", "text", "[", "2", ":", "]", "# Handle case of add/delete entire line", "else", ":", "text", "=", "lines", ".", "pop", "(", "0", ")", "[", "2", ":", "]", "# if line of text is just a newline, insert a space so there is", "# something for the user to highlight and see.", "if", "not", "text", ":", "text", "=", "' '", "# insert marks that won't be noticed by an xml/html escaper.", "text", "=", "'\\0'", "+", "format_key", "+", "text", "+", "'\\1'", "# Return line of text, first allow user's line formatter to do its", "# thing (such as adding the line number) then replace the special", "# marks with what the user's change markup.", "return", "(", "num_lines", "[", "side", "]", ",", "text", ")", "def", "_line_iterator", "(", ")", ":", "\"\"\"Yields from/to lines of text with a change indication.\n\n This function is an iterator. It itself pulls lines from a\n differencing iterator, processes them and yields them. When it can\n it yields both a \"from\" and a \"to\" line, otherwise it will yield one\n or the other. In addition to yielding the lines of from/to text, a\n boolean flag is yielded to indicate if the text line(s) have\n differences in them.\n\n Note, this function is purposefully not defined at the module scope so\n that data it needs from its parent function (within whose context it\n is defined) does not need to be of module scope.\n \"\"\"", "lines", "=", "[", "]", "num_blanks_pending", ",", "num_blanks_to_yield", "=", "0", ",", "0", "while", "True", ":", "# Load up next 4 lines so we can look ahead, create strings which", "# are a concatenation of the first character of each of the 4 lines", "# so we can do some very readable comparisons.", "while", "len", "(", "lines", ")", "<", "4", ":", "try", ":", "lines", ".", "append", "(", "diff_lines_iterator", ".", "next", "(", ")", ")", "except", "StopIteration", ":", "lines", ".", "append", "(", "'X'", ")", "s", "=", "''", ".", "join", "(", "[", "line", "[", "0", "]", "for", "line", "in", "lines", "]", ")", "if", "s", ".", "startswith", "(", "'X'", ")", ":", "# When no more lines, pump out any remaining blank lines so the", "# corresponding add/delete lines get a matching blank line so", "# all line pairs get yielded at the next level.", "num_blanks_to_yield", "=", "num_blanks_pending", "elif", "s", ".", "startswith", "(", "'-?+?'", ")", ":", "# simple intraline change", "yield", "_make_line", "(", "lines", ",", "'?'", ",", "0", ")", ",", "_make_line", "(", "lines", ",", "'?'", ",", "1", ")", ",", "True", "continue", "elif", "s", ".", "startswith", "(", "'--++'", ")", ":", "# in delete block, add block coming: we do NOT want to get", "# caught up on blank lines yet, just process the delete line", "num_blanks_pending", "-=", "1", "yield", "_make_line", "(", "lines", ",", "'-'", ",", "0", ")", ",", "None", ",", "True", "continue", "elif", "s", ".", "startswith", "(", "(", "'--?+'", ",", "'--+'", ",", "'- '", ")", ")", ":", "# in delete block and see a intraline change or unchanged line", "# coming: yield the delete line and then blanks", "from_line", ",", "to_line", "=", "_make_line", "(", "lines", ",", "'-'", ",", "0", ")", ",", "None", "num_blanks_to_yield", ",", "num_blanks_pending", "=", "num_blanks_pending", "-", "1", ",", "0", "elif", "s", ".", "startswith", "(", "'-+?'", ")", ":", "# intraline change", "yield", "_make_line", "(", "lines", ",", "None", ",", "0", ")", ",", "_make_line", "(", "lines", ",", "'?'", ",", "1", ")", ",", "True", "continue", "elif", "s", ".", "startswith", "(", "'-?+'", ")", ":", "# intraline change", "yield", "_make_line", "(", "lines", ",", "'?'", ",", "0", ")", ",", "_make_line", "(", "lines", ",", "None", ",", "1", ")", ",", "True", "continue", "elif", "s", ".", "startswith", "(", "'-'", ")", ":", "# delete FROM line", "num_blanks_pending", "-=", "1", "yield", "_make_line", "(", "lines", ",", "'-'", ",", "0", ")", ",", "None", ",", "True", "continue", "elif", "s", ".", "startswith", "(", "'+--'", ")", ":", "# in add block, delete block coming: we do NOT want to get", "# caught up on blank lines yet, just process the add line", "num_blanks_pending", "+=", "1", "yield", "None", ",", "_make_line", "(", "lines", ",", "'+'", ",", "1", ")", ",", "True", "continue", "elif", "s", ".", "startswith", "(", "(", "'+ '", ",", "'+-'", ")", ")", ":", "# will be leaving an add block: yield blanks then add line", "from_line", ",", "to_line", "=", "None", ",", "_make_line", "(", "lines", ",", "'+'", ",", "1", ")", "num_blanks_to_yield", ",", "num_blanks_pending", "=", "num_blanks_pending", "+", "1", ",", "0", "elif", "s", ".", "startswith", "(", "'+'", ")", ":", "# inside an add block, yield the add line", "num_blanks_pending", "+=", "1", "yield", "None", ",", "_make_line", "(", "lines", ",", "'+'", ",", "1", ")", ",", "True", "continue", "elif", "s", ".", "startswith", "(", "' '", ")", ":", "# unchanged text, yield it to both sides", "yield", "_make_line", "(", "lines", "[", ":", "]", ",", "None", ",", "0", ")", ",", "_make_line", "(", "lines", ",", "None", ",", "1", ")", ",", "False", "continue", "# Catch up on the blank lines so when we yield the next from/to", "# pair, they are lined up.", "while", "(", "num_blanks_to_yield", "<", "0", ")", ":", "num_blanks_to_yield", "+=", "1", "yield", "None", ",", "(", "''", ",", "'\\n'", ")", ",", "True", "while", "(", "num_blanks_to_yield", ">", "0", ")", ":", "num_blanks_to_yield", "-=", "1", "yield", "(", "''", ",", "'\\n'", ")", ",", "None", ",", "True", "if", "s", ".", "startswith", "(", "'X'", ")", ":", "raise", "StopIteration", "else", ":", "yield", "from_line", ",", "to_line", ",", "True", "def", "_line_pair_iterator", "(", ")", ":", "\"\"\"Yields from/to lines of text with a change indication.\n\n This function is an iterator. It itself pulls lines from the line\n iterator. Its difference from that iterator is that this function\n always yields a pair of from/to text lines (with the change\n indication). If necessary it will collect single from/to lines\n until it has a matching pair from/to pair to yield.\n\n Note, this function is purposefully not defined at the module scope so\n that data it needs from its parent function (within whose context it\n is defined) does not need to be of module scope.\n \"\"\"", "line_iterator", "=", "_line_iterator", "(", ")", "fromlines", ",", "tolines", "=", "[", "]", ",", "[", "]", "while", "True", ":", "# Collecting lines of text until we have a from/to pair", "while", "(", "len", "(", "fromlines", ")", "==", "0", "or", "len", "(", "tolines", ")", "==", "0", ")", ":", "from_line", ",", "to_line", ",", "found_diff", "=", "line_iterator", ".", "next", "(", ")", "if", "from_line", "is", "not", "None", ":", "fromlines", ".", "append", "(", "(", "from_line", ",", "found_diff", ")", ")", "if", "to_line", "is", "not", "None", ":", "tolines", ".", "append", "(", "(", "to_line", ",", "found_diff", ")", ")", "# Once we have a pair, remove them from the collection and yield it", "from_line", ",", "fromDiff", "=", "fromlines", ".", "pop", "(", "0", ")", "to_line", ",", "to_diff", "=", "tolines", ".", "pop", "(", "0", ")", "yield", "(", "from_line", ",", "to_line", ",", "fromDiff", "or", "to_diff", ")", "# Handle case where user does not want context differencing, just yield", "# them up without doing anything else with them.", "line_pair_iterator", "=", "_line_pair_iterator", "(", ")", "if", "context", "is", "None", ":", "while", "True", ":", "yield", "line_pair_iterator", ".", "next", "(", ")", "# Handle case where user wants context differencing. We must do some", "# storage of lines until we know for sure that they are to be yielded.", "else", ":", "context", "+=", "1", "lines_to_write", "=", "0", "while", "True", ":", "# Store lines up until we find a difference, note use of a", "# circular queue because we only need to keep around what", "# we need for context.", "index", ",", "contextLines", "=", "0", ",", "[", "None", "]", "*", "(", "context", ")", "found_diff", "=", "False", "while", "(", "found_diff", "is", "False", ")", ":", "from_line", ",", "to_line", ",", "found_diff", "=", "line_pair_iterator", ".", "next", "(", ")", "i", "=", "index", "%", "context", "contextLines", "[", "i", "]", "=", "(", "from_line", ",", "to_line", ",", "found_diff", ")", "index", "+=", "1", "# Yield lines that we have collected so far, but first yield", "# the user's separator.", "if", "index", ">", "context", ":", "yield", "None", ",", "None", ",", "None", "lines_to_write", "=", "context", "else", ":", "lines_to_write", "=", "index", "index", "=", "0", "while", "(", "lines_to_write", ")", ":", "i", "=", "index", "%", "context", "index", "+=", "1", "yield", "contextLines", "[", "i", "]", "lines_to_write", "-=", "1", "# Now yield the context lines after the change", "lines_to_write", "=", "context", "-", "1", "while", "(", "lines_to_write", ")", ":", "from_line", ",", "to_line", ",", "found_diff", "=", "line_pair_iterator", ".", "next", "(", ")", "# If another change within the context, extend the context", "if", "found_diff", ":", "lines_to_write", "=", "context", "-", "1", "else", ":", "lines_to_write", "-=", "1", "yield", "from_line", ",", "to_line", ",", "found_diff" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/difflib.py#L1352-L1613
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/datetime.py
python
time.tzname
(self)
return name
Return the timezone name. Note that the name is 100% informational -- there's no requirement that it mean anything in particular. For example, "GMT", "UTC", "-500", "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
Return the timezone name.
[ "Return", "the", "timezone", "name", "." ]
def tzname(self): """Return the timezone name. Note that the name is 100% informational -- there's no requirement that it mean anything in particular. For example, "GMT", "UTC", "-500", "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. """ if self._tzinfo is None: return None name = self._tzinfo.tzname(None) _check_tzname(name) return name
[ "def", "tzname", "(", "self", ")", ":", "if", "self", ".", "_tzinfo", "is", "None", ":", "return", "None", "name", "=", "self", ".", "_tzinfo", ".", "tzname", "(", "None", ")", "_check_tzname", "(", "name", ")", "return", "name" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/datetime.py#L1424-L1435
y123456yz/reading-and-annotate-mongodb-3.6
93280293672ca7586dc24af18132aa61e4ed7fcf
mongo/buildscripts/cpplint.py
python
CheckForNonConstReference
(filename, clean_lines, linenum, nesting_state, error)
Check for non-const references. Separate from CheckLanguage since it scans backwards from current line, instead of scanning forward. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
Check for non-const references.
[ "Check", "for", "non", "-", "const", "references", "." ]
def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state, error): """Check for non-const references. Separate from CheckLanguage since it scans backwards from current line, instead of scanning forward. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Do nothing if there is no '&' on current line. line = clean_lines.elided[linenum] if '&' not in line: return # If a function is inherited, current function doesn't have much of # a choice, so any non-const references should not be blamed on # derived function. if IsDerivedFunction(clean_lines, linenum): return # Long type names may be broken across multiple lines, usually in one # of these forms: # LongType # ::LongTypeContinued &identifier # LongType:: # LongTypeContinued &identifier # LongType< # ...>::LongTypeContinued &identifier # # If we detected a type split across two lines, join the previous # line to current line so that we can match const references # accordingly. # # Note that this only scans back one line, since scanning back # arbitrary number of lines would be expensive. If you have a type # that spans more than 2 lines, please use a typedef. if linenum > 1: previous = None if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): # previous_line\n + ::current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', clean_lines.elided[linenum - 1]) elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): # previous_line::\n + current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', clean_lines.elided[linenum - 1]) if previous: line = previous.group(1) + line.lstrip() else: # Check for templated parameter that is split across multiple lines endpos = line.rfind('>') if endpos > -1: (_, startline, startpos) = ReverseCloseExpression( clean_lines, linenum, endpos) if startpos > -1 and startline < linenum: # Found the matching < on an earlier line, collect all # pieces up to current line. line = '' for i in xrange(startline, linenum + 1): line += clean_lines.elided[i].strip() # Check for non-const references in function parameters. A single '&' may # found in the following places: # inside expression: binary & for bitwise AND # inside expression: unary & for taking the address of something # inside declarators: reference parameter # We will exclude the first two cases by checking that we are not inside a # function body, including one that was just introduced by a trailing '{'. # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. if (nesting_state.previous_stack_top and not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or isinstance(nesting_state.previous_stack_top, _NamespaceInfo))): # Not at toplevel, not within a class, and not within a namespace return # Avoid initializer lists. We only need to scan back from the # current line for something that starts with ':'. # # We don't need to check the current line, since the '&' would # appear inside the second set of parentheses on the current line as # opposed to the first set. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 10), -1): previous_line = clean_lines.elided[i] if not Search(r'[),]\s*$', previous_line): break if Match(r'^\s*:\s+\S', previous_line): return # Avoid preprocessors if Search(r'\\\s*$', line): return # Avoid constructor initializer lists if IsInitializerList(clean_lines, linenum): return # We allow non-const references in a few standard places, like functions # called "swap()" or iostream operators like "<<" or ">>". Do not check # those function parameters. # # We also accept & in static_assert, which looks like a function but # it's actually a declaration expression. whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' r'operator\s*[<>][<>]|' r'static_assert|COMPILE_ASSERT' r')\s*\(') if Search(whitelisted_functions, line): return elif not Search(r'\S+\([^)]*$', line): # Don't see a whitelisted function on this line. Actually we # didn't see any function name on this line, so this is likely a # multi-line parameter list. Try a bit harder to catch this case. for i in xrange(2): if (linenum > i and Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): return decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter): error(filename, linenum, 'runtime/references', 2, 'Is this a non-const reference? ' 'If so, make const or use a pointer: ' + ReplaceAll(' *<', '<', parameter))
[ "def", "CheckForNonConstReference", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "nesting_state", ",", "error", ")", ":", "# Do nothing if there is no '&' on current line.", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "if", "'&'", "not", "in", "line", ":", "return", "# If a function is inherited, current function doesn't have much of", "# a choice, so any non-const references should not be blamed on", "# derived function.", "if", "IsDerivedFunction", "(", "clean_lines", ",", "linenum", ")", ":", "return", "# Long type names may be broken across multiple lines, usually in one", "# of these forms:", "# LongType", "# ::LongTypeContinued &identifier", "# LongType::", "# LongTypeContinued &identifier", "# LongType<", "# ...>::LongTypeContinued &identifier", "#", "# If we detected a type split across two lines, join the previous", "# line to current line so that we can match const references", "# accordingly.", "#", "# Note that this only scans back one line, since scanning back", "# arbitrary number of lines would be expensive. If you have a type", "# that spans more than 2 lines, please use a typedef.", "if", "linenum", ">", "1", ":", "previous", "=", "None", "if", "Match", "(", "r'\\s*::(?:[\\w<>]|::)+\\s*&\\s*\\S'", ",", "line", ")", ":", "# previous_line\\n + ::current_line", "previous", "=", "Search", "(", "r'\\b((?:const\\s*)?(?:[\\w<>]|::)+[\\w<>])\\s*$'", ",", "clean_lines", ".", "elided", "[", "linenum", "-", "1", "]", ")", "elif", "Match", "(", "r'\\s*[a-zA-Z_]([\\w<>]|::)+\\s*&\\s*\\S'", ",", "line", ")", ":", "# previous_line::\\n + current_line", "previous", "=", "Search", "(", "r'\\b((?:const\\s*)?(?:[\\w<>]|::)+::)\\s*$'", ",", "clean_lines", ".", "elided", "[", "linenum", "-", "1", "]", ")", "if", "previous", ":", "line", "=", "previous", ".", "group", "(", "1", ")", "+", "line", ".", "lstrip", "(", ")", "else", ":", "# Check for templated parameter that is split across multiple lines", "endpos", "=", "line", ".", "rfind", "(", "'>'", ")", "if", "endpos", ">", "-", "1", ":", "(", "_", ",", "startline", ",", "startpos", ")", "=", "ReverseCloseExpression", "(", "clean_lines", ",", "linenum", ",", "endpos", ")", "if", "startpos", ">", "-", "1", "and", "startline", "<", "linenum", ":", "# Found the matching < on an earlier line, collect all", "# pieces up to current line.", "line", "=", "''", "for", "i", "in", "xrange", "(", "startline", ",", "linenum", "+", "1", ")", ":", "line", "+=", "clean_lines", ".", "elided", "[", "i", "]", ".", "strip", "(", ")", "# Check for non-const references in function parameters. A single '&' may", "# found in the following places:", "# inside expression: binary & for bitwise AND", "# inside expression: unary & for taking the address of something", "# inside declarators: reference parameter", "# We will exclude the first two cases by checking that we are not inside a", "# function body, including one that was just introduced by a trailing '{'.", "# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].", "if", "(", "nesting_state", ".", "previous_stack_top", "and", "not", "(", "isinstance", "(", "nesting_state", ".", "previous_stack_top", ",", "_ClassInfo", ")", "or", "isinstance", "(", "nesting_state", ".", "previous_stack_top", ",", "_NamespaceInfo", ")", ")", ")", ":", "# Not at toplevel, not within a class, and not within a namespace", "return", "# Avoid initializer lists. We only need to scan back from the", "# current line for something that starts with ':'.", "#", "# We don't need to check the current line, since the '&' would", "# appear inside the second set of parentheses on the current line as", "# opposed to the first set.", "if", "linenum", ">", "0", ":", "for", "i", "in", "xrange", "(", "linenum", "-", "1", ",", "max", "(", "0", ",", "linenum", "-", "10", ")", ",", "-", "1", ")", ":", "previous_line", "=", "clean_lines", ".", "elided", "[", "i", "]", "if", "not", "Search", "(", "r'[),]\\s*$'", ",", "previous_line", ")", ":", "break", "if", "Match", "(", "r'^\\s*:\\s+\\S'", ",", "previous_line", ")", ":", "return", "# Avoid preprocessors", "if", "Search", "(", "r'\\\\\\s*$'", ",", "line", ")", ":", "return", "# Avoid constructor initializer lists", "if", "IsInitializerList", "(", "clean_lines", ",", "linenum", ")", ":", "return", "# We allow non-const references in a few standard places, like functions", "# called \"swap()\" or iostream operators like \"<<\" or \">>\". Do not check", "# those function parameters.", "#", "# We also accept & in static_assert, which looks like a function but", "# it's actually a declaration expression.", "whitelisted_functions", "=", "(", "r'(?:[sS]wap(?:<\\w:+>)?|'", "r'operator\\s*[<>][<>]|'", "r'static_assert|COMPILE_ASSERT'", "r')\\s*\\('", ")", "if", "Search", "(", "whitelisted_functions", ",", "line", ")", ":", "return", "elif", "not", "Search", "(", "r'\\S+\\([^)]*$'", ",", "line", ")", ":", "# Don't see a whitelisted function on this line. Actually we", "# didn't see any function name on this line, so this is likely a", "# multi-line parameter list. Try a bit harder to catch this case.", "for", "i", "in", "xrange", "(", "2", ")", ":", "if", "(", "linenum", ">", "i", "and", "Search", "(", "whitelisted_functions", ",", "clean_lines", ".", "elided", "[", "linenum", "-", "i", "-", "1", "]", ")", ")", ":", "return", "decls", "=", "ReplaceAll", "(", "r'{[^}]*}'", ",", "' '", ",", "line", ")", "# exclude function body", "for", "parameter", "in", "re", ".", "findall", "(", "_RE_PATTERN_REF_PARAM", ",", "decls", ")", ":", "if", "not", "Match", "(", "_RE_PATTERN_CONST_REF_PARAM", ",", "parameter", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/references'", ",", "2", ",", "'Is this a non-const reference? '", "'If so, make const or use a pointer: '", "+", "ReplaceAll", "(", "' *<'", ",", "'<'", ",", "parameter", ")", ")" ]
https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/buildscripts/cpplint.py#L5005-L5135
intel/llvm
e6d0547e9d99b5a56430c4749f6c7e328bf221ab
third-party/benchmark/tools/gbench/report.py
python
color_format
(use_color, fmt_str, *args, **kwargs)
return fmt_str.format(*args, **kwargs)
Return the result of 'fmt_str.format(*args, **kwargs)' after transforming 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color' is False then all color codes in 'args' and 'kwargs' are replaced with the empty string.
Return the result of 'fmt_str.format(*args, **kwargs)' after transforming 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color' is False then all color codes in 'args' and 'kwargs' are replaced with the empty string.
[ "Return", "the", "result", "of", "fmt_str", ".", "format", "(", "*", "args", "**", "kwargs", ")", "after", "transforming", "args", "and", "kwargs", "according", "to", "the", "value", "of", "use_color", ".", "If", "use_color", "is", "False", "then", "all", "color", "codes", "in", "args", "and", "kwargs", "are", "replaced", "with", "the", "empty", "string", "." ]
def color_format(use_color, fmt_str, *args, **kwargs): """ Return the result of 'fmt_str.format(*args, **kwargs)' after transforming 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color' is False then all color codes in 'args' and 'kwargs' are replaced with the empty string. """ assert use_color is True or use_color is False if not use_color: args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE for arg in args] kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE for key, arg in kwargs.items()} return fmt_str.format(*args, **kwargs)
[ "def", "color_format", "(", "use_color", ",", "fmt_str", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "assert", "use_color", "is", "True", "or", "use_color", "is", "False", "if", "not", "use_color", ":", "args", "=", "[", "arg", "if", "not", "isinstance", "(", "arg", ",", "BenchmarkColor", ")", "else", "BC_NONE", "for", "arg", "in", "args", "]", "kwargs", "=", "{", "key", ":", "arg", "if", "not", "isinstance", "(", "arg", ",", "BenchmarkColor", ")", "else", "BC_NONE", "for", "key", ",", "arg", "in", "kwargs", ".", "items", "(", ")", "}", "return", "fmt_str", ".", "format", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/third-party/benchmark/tools/gbench/report.py#L47-L60
okex/V3-Open-API-SDK
c5abb0db7e2287718e0055e17e57672ce0ec7fd9
okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/pkg_resources/__init__.py
python
Distribution.clone
(self, **kw)
return self.__class__(**kw)
Copy this distribution, substituting in any changed keyword args
Copy this distribution, substituting in any changed keyword args
[ "Copy", "this", "distribution", "substituting", "in", "any", "changed", "keyword", "args" ]
def clone(self, **kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw)
[ "def", "clone", "(", "self", ",", "*", "*", "kw", ")", ":", "names", "=", "'project_name version py_version platform location precedence'", "for", "attr", "in", "names", ".", "split", "(", ")", ":", "kw", ".", "setdefault", "(", "attr", ",", "getattr", "(", "self", ",", "attr", ",", "None", ")", ")", "kw", ".", "setdefault", "(", "'metadata'", ",", "self", ".", "_provider", ")", "return", "self", ".", "__class__", "(", "*", "*", "kw", ")" ]
https://github.com/okex/V3-Open-API-SDK/blob/c5abb0db7e2287718e0055e17e57672ce0ec7fd9/okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/pkg_resources/__init__.py#L2844-L2850
openvinotoolkit/openvino
dedcbeafa8b84cccdc55ca64b8da516682b381c7
tools/mo/openvino/tools/mo/utils/broadcasting.py
python
make_equal_rank
(shape_1: np.ndarray, shape_2: np.ndarray)
return shape_1, shape_2
Prepend shape with smaller length with 1. Return updates shapes :param shape_1: first shape :param shape_2: second shape :return: tuple with updated shapes
Prepend shape with smaller length with 1. Return updates shapes :param shape_1: first shape :param shape_2: second shape :return: tuple with updated shapes
[ "Prepend", "shape", "with", "smaller", "length", "with", "1", ".", "Return", "updates", "shapes", ":", "param", "shape_1", ":", "first", "shape", ":", "param", "shape_2", ":", "second", "shape", ":", "return", ":", "tuple", "with", "updated", "shapes" ]
def make_equal_rank(shape_1: np.ndarray, shape_2: np.ndarray): """ Prepend shape with smaller length with 1. Return updates shapes :param shape_1: first shape :param shape_2: second shape :return: tuple with updated shapes """ while len(shape_1) < len(shape_2): shape_1 = shape_insert(shape_1, 0, 1) while len(shape_2) < len(shape_1): shape_2 = shape_insert(shape_2, 0, 1) return shape_1, shape_2
[ "def", "make_equal_rank", "(", "shape_1", ":", "np", ".", "ndarray", ",", "shape_2", ":", "np", ".", "ndarray", ")", ":", "while", "len", "(", "shape_1", ")", "<", "len", "(", "shape_2", ")", ":", "shape_1", "=", "shape_insert", "(", "shape_1", ",", "0", ",", "1", ")", "while", "len", "(", "shape_2", ")", "<", "len", "(", "shape_1", ")", ":", "shape_2", "=", "shape_insert", "(", "shape_2", ",", "0", ",", "1", ")", "return", "shape_1", ",", "shape_2" ]
https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/tools/mo/openvino/tools/mo/utils/broadcasting.py#L13-L26
nsnam/ns-3-dev-git
efdb2e21f45c0a87a60b47c547b68fa140a7b686
bindings/python/rad_util.py
python
trim
(l)
return l_trimmed
Discard values in list more than 1.5*IQR outside IQR. (IQR is inter-quartile-range) This function uses rad_util.quantile 1.5*IQR -- mild outlier 3*IQR -- extreme outlier See: http://wind.cc.whecn.edu/~pwildman/statnew/section_7_-_exploratory_data_analysis.htm
Discard values in list more than 1.5*IQR outside IQR.
[ "Discard", "values", "in", "list", "more", "than", "1", ".", "5", "*", "IQR", "outside", "IQR", "." ]
def trim(l): """Discard values in list more than 1.5*IQR outside IQR. (IQR is inter-quartile-range) This function uses rad_util.quantile 1.5*IQR -- mild outlier 3*IQR -- extreme outlier See: http://wind.cc.whecn.edu/~pwildman/statnew/section_7_-_exploratory_data_analysis.htm """ l_sort = l[:] l_sort.sort() # Calculate medianscore (based on stats.py lmedianscore by Gary Strangman) if len(l_sort) % 2 == 0: # If even number of scores, average middle 2. index = int(len(l_sort) / 2) # Integer division correct median = float(l_sort[index] + l_sort[index-1]) / 2 else: # int division gives mid value when count from 0 index = int(len(l_sort) / 2) median = l_sort[index] # Calculate IQR. q1 = quantile(l_sort, 0.25) q3 = quantile(l_sort, 0.75) iqr = q3 - q1 iqr_extra = iqr * 1.5 def in_interval(x, i=iqr_extra, q1=q1, q3=q3): return (x >= q1-i and x <= q3+i) l_trimmed = [x for x in l_sort if in_interval(x)] return l_trimmed
[ "def", "trim", "(", "l", ")", ":", "l_sort", "=", "l", "[", ":", "]", "l_sort", ".", "sort", "(", ")", "# Calculate medianscore (based on stats.py lmedianscore by Gary Strangman)", "if", "len", "(", "l_sort", ")", "%", "2", "==", "0", ":", "# If even number of scores, average middle 2.", "index", "=", "int", "(", "len", "(", "l_sort", ")", "/", "2", ")", "# Integer division correct", "median", "=", "float", "(", "l_sort", "[", "index", "]", "+", "l_sort", "[", "index", "-", "1", "]", ")", "/", "2", "else", ":", "# int division gives mid value when count from 0", "index", "=", "int", "(", "len", "(", "l_sort", ")", "/", "2", ")", "median", "=", "l_sort", "[", "index", "]", "# Calculate IQR.", "q1", "=", "quantile", "(", "l_sort", ",", "0.25", ")", "q3", "=", "quantile", "(", "l_sort", ",", "0.75", ")", "iqr", "=", "q3", "-", "q1", "iqr_extra", "=", "iqr", "*", "1.5", "def", "in_interval", "(", "x", ",", "i", "=", "iqr_extra", ",", "q1", "=", "q1", ",", "q3", "=", "q3", ")", ":", "return", "(", "x", ">=", "q1", "-", "i", "and", "x", "<=", "q3", "+", "i", ")", "l_trimmed", "=", "[", "x", "for", "x", "in", "l_sort", "if", "in_interval", "(", "x", ")", "]", "return", "l_trimmed" ]
https://github.com/nsnam/ns-3-dev-git/blob/efdb2e21f45c0a87a60b47c547b68fa140a7b686/bindings/python/rad_util.py#L371-L404
BlzFans/wke
b0fa21158312e40c5fbd84682d643022b6c34a93
cygwin/lib/python2.6/plistlib.py
python
writePlistToString
(rootObject)
return f.getvalue()
Return 'rootObject' as a plist-formatted string.
Return 'rootObject' as a plist-formatted string.
[ "Return", "rootObject", "as", "a", "plist", "-", "formatted", "string", "." ]
def writePlistToString(rootObject): """Return 'rootObject' as a plist-formatted string. """ f = StringIO() writePlist(rootObject, f) return f.getvalue()
[ "def", "writePlistToString", "(", "rootObject", ")", ":", "f", "=", "StringIO", "(", ")", "writePlist", "(", "rootObject", ",", "f", ")", "return", "f", ".", "getvalue", "(", ")" ]
https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/plistlib.py#L106-L111
Illumina/strelka
d7377443b62319f7c7bd70c241c4b2df3459e29a
src/python/lib/estimateHardware.py
python
getNodeMemMb
()
return memMb
return total memory in Mbytes linux logic taken from R Kelley's function in IsisWorkflow
return total memory in Mbytes
[ "return", "total", "memory", "in", "Mbytes" ]
def getNodeMemMb(): """ return total memory in Mbytes linux logic taken from R Kelley's function in IsisWorkflow """ memMb = 0 import platform if platform.system().find("Linux") > -1: # # get this from /proc/meminfo # mname="/proc/meminfo" if not os.path.isfile(mname): raise EstException("Can't read memory information from %s" % (mname)) line = open(mname).readline() splat = line.rstrip().split() if len(splat) != 3: raise EstException("Unexpected format in %s" % (mname)) try: memMb = 1+((int(splat[1])-1)/1024) except: raise EstException("Unexpected format in %s" % (mname)) elif platform.system().find("Darwin") > -1: import subprocess cmd=['sysctl', '-n', 'hw.memsize'] proc=subprocess.Popen(cmd,shell=False,stdout=subprocess.PIPE) for line in proc.stdout : memMb=int(line.strip())/(1024*1024) break elif platform.system().find("Windows") > -1: process = os.popen('wmic memorychip get capacity') result = process.read() process.close() totalMem = 0 for m in result.split(" \r\n")[1:-1]: totalMem += int(m) memMb = totalMem / (1024**2) else: raise EstException("Can't determine total memory available on OS: '%s'" (platform.system())) return memMb
[ "def", "getNodeMemMb", "(", ")", ":", "memMb", "=", "0", "import", "platform", "if", "platform", ".", "system", "(", ")", ".", "find", "(", "\"Linux\"", ")", ">", "-", "1", ":", "#", "# get this from /proc/meminfo", "#", "mname", "=", "\"/proc/meminfo\"", "if", "not", "os", ".", "path", ".", "isfile", "(", "mname", ")", ":", "raise", "EstException", "(", "\"Can't read memory information from %s\"", "%", "(", "mname", ")", ")", "line", "=", "open", "(", "mname", ")", ".", "readline", "(", ")", "splat", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", ")", "if", "len", "(", "splat", ")", "!=", "3", ":", "raise", "EstException", "(", "\"Unexpected format in %s\"", "%", "(", "mname", ")", ")", "try", ":", "memMb", "=", "1", "+", "(", "(", "int", "(", "splat", "[", "1", "]", ")", "-", "1", ")", "/", "1024", ")", "except", ":", "raise", "EstException", "(", "\"Unexpected format in %s\"", "%", "(", "mname", ")", ")", "elif", "platform", ".", "system", "(", ")", ".", "find", "(", "\"Darwin\"", ")", ">", "-", "1", ":", "import", "subprocess", "cmd", "=", "[", "'sysctl'", ",", "'-n'", ",", "'hw.memsize'", "]", "proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "False", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "for", "line", "in", "proc", ".", "stdout", ":", "memMb", "=", "int", "(", "line", ".", "strip", "(", ")", ")", "/", "(", "1024", "*", "1024", ")", "break", "elif", "platform", ".", "system", "(", ")", ".", "find", "(", "\"Windows\"", ")", ">", "-", "1", ":", "process", "=", "os", ".", "popen", "(", "'wmic memorychip get capacity'", ")", "result", "=", "process", ".", "read", "(", ")", "process", ".", "close", "(", ")", "totalMem", "=", "0", "for", "m", "in", "result", ".", "split", "(", "\" \\r\\n\"", ")", "[", "1", ":", "-", "1", "]", ":", "totalMem", "+=", "int", "(", "m", ")", "memMb", "=", "totalMem", "/", "(", "1024", "**", "2", ")", "else", ":", "raise", "EstException", "(", "\"Can't determine total memory available on OS: '%s'\"", "(", "platform", ".", "system", "(", ")", ")", ")", "return", "memMb" ]
https://github.com/Illumina/strelka/blob/d7377443b62319f7c7bd70c241c4b2df3459e29a/src/python/lib/estimateHardware.py#L122-L168
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/smtplib.py
python
SMTP.sendmail
(self, from_addr, to_addrs, msg, mail_options=[], rcpt_options=[])
return senderrs
This command performs an entire mail transaction. The arguments are: - from_addr : The address sending this mail. - to_addrs : A list of addresses to send this mail to. A bare string will be treated as a list with 1 address. - msg : The message to send. - mail_options : List of ESMTP options (such as 8bitmime) for the mail command. - rcpt_options : List of ESMTP options (such as DSN commands) for all the rcpt commands. If there has been no previous EHLO or HELO command this session, this method tries ESMTP EHLO first. If the server does ESMTP, message size and each of the specified options will be passed to it. If EHLO fails, HELO will be tried and ESMTP options suppressed. This method will return normally if the mail is accepted for at least one recipient. It returns a dictionary, with one entry for each recipient that was refused. Each entry contains a tuple of the SMTP error code and the accompanying error message sent by the server. This method may raise the following exceptions: SMTPHeloError The server didn't reply properly to the helo greeting. SMTPRecipientsRefused The server rejected ALL recipients (no mail was sent). SMTPSenderRefused The server didn't accept the from_addr. SMTPDataError The server replied with an unexpected error code (other than a refusal of a recipient). Note: the connection will be open even after an exception is raised. Example: >>> import smtplib >>> s=smtplib.SMTP("localhost") >>> tolist=["[email protected]","[email protected]","[email protected]","[email protected]"] >>> msg = '''\\ ... From: [email protected] ... Subject: testin'... ... ... This is a test ''' >>> s.sendmail("[email protected]",tolist,msg) { "[email protected]" : ( 550 ,"User unknown" ) } >>> s.quit() In the above example, the message was accepted for delivery to three of the four addresses, and one was rejected, with the error code 550. If all addresses are accepted, then the method will return an empty dictionary.
This command performs an entire mail transaction.
[ "This", "command", "performs", "an", "entire", "mail", "transaction", "." ]
def sendmail(self, from_addr, to_addrs, msg, mail_options=[], rcpt_options=[]): """This command performs an entire mail transaction. The arguments are: - from_addr : The address sending this mail. - to_addrs : A list of addresses to send this mail to. A bare string will be treated as a list with 1 address. - msg : The message to send. - mail_options : List of ESMTP options (such as 8bitmime) for the mail command. - rcpt_options : List of ESMTP options (such as DSN commands) for all the rcpt commands. If there has been no previous EHLO or HELO command this session, this method tries ESMTP EHLO first. If the server does ESMTP, message size and each of the specified options will be passed to it. If EHLO fails, HELO will be tried and ESMTP options suppressed. This method will return normally if the mail is accepted for at least one recipient. It returns a dictionary, with one entry for each recipient that was refused. Each entry contains a tuple of the SMTP error code and the accompanying error message sent by the server. This method may raise the following exceptions: SMTPHeloError The server didn't reply properly to the helo greeting. SMTPRecipientsRefused The server rejected ALL recipients (no mail was sent). SMTPSenderRefused The server didn't accept the from_addr. SMTPDataError The server replied with an unexpected error code (other than a refusal of a recipient). Note: the connection will be open even after an exception is raised. Example: >>> import smtplib >>> s=smtplib.SMTP("localhost") >>> tolist=["[email protected]","[email protected]","[email protected]","[email protected]"] >>> msg = '''\\ ... From: [email protected] ... Subject: testin'... ... ... This is a test ''' >>> s.sendmail("[email protected]",tolist,msg) { "[email protected]" : ( 550 ,"User unknown" ) } >>> s.quit() In the above example, the message was accepted for delivery to three of the four addresses, and one was rejected, with the error code 550. If all addresses are accepted, then the method will return an empty dictionary. """ self.ehlo_or_helo_if_needed() esmtp_opts = [] if self.does_esmtp: # Hmmm? what's this? -ddm # self.esmtp_features['7bit']="" if self.has_extn('size'): esmtp_opts.append("size=%d" % len(msg)) for option in mail_options: esmtp_opts.append(option) (code, resp) = self.mail(from_addr, esmtp_opts) if code != 250: self.rset() raise SMTPSenderRefused(code, resp, from_addr) senderrs = {} if isinstance(to_addrs, basestring): to_addrs = [to_addrs] for each in to_addrs: (code, resp) = self.rcpt(each, rcpt_options) if (code != 250) and (code != 251): senderrs[each] = (code, resp) if len(senderrs) == len(to_addrs): # the server refused all our recipients self.rset() raise SMTPRecipientsRefused(senderrs) (code, resp) = self.data(msg) if code != 250: self.rset() raise SMTPDataError(code, resp) #if we got here then somebody got our mail return senderrs
[ "def", "sendmail", "(", "self", ",", "from_addr", ",", "to_addrs", ",", "msg", ",", "mail_options", "=", "[", "]", ",", "rcpt_options", "=", "[", "]", ")", ":", "self", ".", "ehlo_or_helo_if_needed", "(", ")", "esmtp_opts", "=", "[", "]", "if", "self", ".", "does_esmtp", ":", "# Hmmm? what's this? -ddm", "# self.esmtp_features['7bit']=\"\"", "if", "self", ".", "has_extn", "(", "'size'", ")", ":", "esmtp_opts", ".", "append", "(", "\"size=%d\"", "%", "len", "(", "msg", ")", ")", "for", "option", "in", "mail_options", ":", "esmtp_opts", ".", "append", "(", "option", ")", "(", "code", ",", "resp", ")", "=", "self", ".", "mail", "(", "from_addr", ",", "esmtp_opts", ")", "if", "code", "!=", "250", ":", "self", ".", "rset", "(", ")", "raise", "SMTPSenderRefused", "(", "code", ",", "resp", ",", "from_addr", ")", "senderrs", "=", "{", "}", "if", "isinstance", "(", "to_addrs", ",", "basestring", ")", ":", "to_addrs", "=", "[", "to_addrs", "]", "for", "each", "in", "to_addrs", ":", "(", "code", ",", "resp", ")", "=", "self", ".", "rcpt", "(", "each", ",", "rcpt_options", ")", "if", "(", "code", "!=", "250", ")", "and", "(", "code", "!=", "251", ")", ":", "senderrs", "[", "each", "]", "=", "(", "code", ",", "resp", ")", "if", "len", "(", "senderrs", ")", "==", "len", "(", "to_addrs", ")", ":", "# the server refused all our recipients", "self", ".", "rset", "(", ")", "raise", "SMTPRecipientsRefused", "(", "senderrs", ")", "(", "code", ",", "resp", ")", "=", "self", ".", "data", "(", "msg", ")", "if", "code", "!=", "250", ":", "self", ".", "rset", "(", ")", "raise", "SMTPDataError", "(", "code", ",", "resp", ")", "#if we got here then somebody got our mail", "return", "senderrs" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/smtplib.py#L667-L754
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/calendar.py
python
TextCalendar.formatday
(self, day, weekday, width)
return s.center(width)
Returns a formatted day.
Returns a formatted day.
[ "Returns", "a", "formatted", "day", "." ]
def formatday(self, day, weekday, width): """ Returns a formatted day. """ if day == 0: s = '' else: s = '%2i' % day # right-align single-digit days return s.center(width)
[ "def", "formatday", "(", "self", ",", "day", ",", "weekday", ",", "width", ")", ":", "if", "day", "==", "0", ":", "s", "=", "''", "else", ":", "s", "=", "'%2i'", "%", "day", "# right-align single-digit days", "return", "s", ".", "center", "(", "width", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/calendar.py#L272-L280
root-project/root
fcd3583bb14852bf2e8cd2415717cbaac0e75896
main/python/cmdLineUtils.py
python
_rootLsPrintLongLs
(keyList, indent, treeListing)
Prints a list of `TKey`s and some information. The information of each key is printed with the following pattern: TKeyClassName {date time pattern} TKeyName;TKeyCycle TKeyTitle {optional: [current/backup cycle]} An example: ``` $ rootls -l https://root.cern/files/tutorials/hsimple.root TProfile Jun 30 23:59 2018 hprof;1 "Profile of pz versus px" TH1F Jun 30 23:59 2018 hpx;1 "This is the px distribution" TH2F Jun 30 23:59 2018 hpxpy;1 "py vs px" TNtuple Jun 30 23:59 2018 ntuple;1 "Demo ntuple" ```
Prints a list of `TKey`s and some information.
[ "Prints", "a", "list", "of", "TKey", "s", "and", "some", "information", "." ]
def _rootLsPrintLongLs(keyList, indent, treeListing): """Prints a list of `TKey`s and some information. The information of each key is printed with the following pattern: TKeyClassName {date time pattern} TKeyName;TKeyCycle TKeyTitle {optional: [current/backup cycle]} An example: ``` $ rootls -l https://root.cern/files/tutorials/hsimple.root TProfile Jun 30 23:59 2018 hprof;1 "Profile of pz versus px" TH1F Jun 30 23:59 2018 hpx;1 "This is the px distribution" TH2F Jun 30 23:59 2018 hpxpy;1 "py vs px" TNtuple Jun 30 23:59 2018 ntuple;1 "Demo ntuple" ``` """ # Early return if the keyList is empty if not keyList: return maxCharClass = max([len(key.GetClassName()) for key in keyList]) maxCharTime = 12 maxCharName = max([len(key.GetName()) for key in keyList]) dic = { "classWidth": maxCharClass+2, "timeWidth": maxCharTime+2, "nameWidth": maxCharName+2, "titleWidth": 1, "cycleWidth": 1} # Input keyList is a THashList. Convert it to a Python list to make it work # with zip_longest later keyList = [key for key in keyList] # Mimic the logic used in TDirectoryFile::ls(Option_t *option) # For any key in the list, we need to grab the previous one and the next one. # To do this, we use the iterator returned by zip_longest. The three input # lists to zip_longest can be visualized as follows: # # a = ["key_1","key_2","key_3"] # a_lagright = [None] + a[:-1] # a_lagleft = a[1:] # list(zip_longest(a_lagright, a, a_lagleft)) # [(None, 'key_1', 'key_2'), ('key_1', 'key_2', 'key_3'), ('key_2', 'key_3', None)] # # So that for any key, we can have a correct reference to the previous and # following keys of `keyList`. The first key has no previous key and the last # key has no following key, so the respective elements of the zip_longest # iterator are `None`. for previouskey, currentkey, nextkey in zip_longest([None]+keyList[:-1], keyList, keyList[1:]): # If this key is the first one in the list, or if it has a different # name than the previous one, it means that it's the first object of # that kind in the list. if previouskey is None or currentkey.GetName() != previouskey.GetName(): # Then we check the following key. If the current key is not # the last key in the list and if the following key has the same # name, then it means it's another cycle of the same object. # Thus, it's gonna be a backup cycle of the same object. # Otherwise, it's just a key with one cycle so we don't need # to print information two distinguish between different cycles # of the same key. if nextkey is not None and currentkey.GetName() == nextkey.GetName(): cyclestr = "[current cycle]" else: cyclestr = "" else: # This key is a subsequent cycle of a previous key cyclestr = "[backup cycle]" datime = currentkey.GetDatime() time = datime.GetTime() date = datime.GetDate() year = datime.GetYear() time = _prepareTime(time) rec = [ currentkey.GetClassName(), MONTH[int(str(date)[4:6])]+" " + str(date)[6:] + " "+time[:2]+":" + time[2:4]+" "+str(year)+" ", currentkey.GetName()+";"+str(currentkey.GetCycle()), " \""+currentkey.GetTitle()+"\"", " " + cyclestr ] write(LONG_TEMPLATE.format(*rec, **dic), indent, end="\n") if treeListing and isTreeKey(currentkey): tree = currentkey.ReadObj() _recursifTreePrinter(tree, indent+2) tree = tree.GetTree() _printClusters(tree, indent+2) if treeListing and isTHnSparseKey(currentkey): hs = currentkey.ReadObj() hs.Print("all")
[ "def", "_rootLsPrintLongLs", "(", "keyList", ",", "indent", ",", "treeListing", ")", ":", "# Early return if the keyList is empty", "if", "not", "keyList", ":", "return", "maxCharClass", "=", "max", "(", "[", "len", "(", "key", ".", "GetClassName", "(", ")", ")", "for", "key", "in", "keyList", "]", ")", "maxCharTime", "=", "12", "maxCharName", "=", "max", "(", "[", "len", "(", "key", ".", "GetName", "(", ")", ")", "for", "key", "in", "keyList", "]", ")", "dic", "=", "{", "\"classWidth\"", ":", "maxCharClass", "+", "2", ",", "\"timeWidth\"", ":", "maxCharTime", "+", "2", ",", "\"nameWidth\"", ":", "maxCharName", "+", "2", ",", "\"titleWidth\"", ":", "1", ",", "\"cycleWidth\"", ":", "1", "}", "# Input keyList is a THashList. Convert it to a Python list to make it work", "# with zip_longest later", "keyList", "=", "[", "key", "for", "key", "in", "keyList", "]", "# Mimic the logic used in TDirectoryFile::ls(Option_t *option)", "# For any key in the list, we need to grab the previous one and the next one.", "# To do this, we use the iterator returned by zip_longest. The three input", "# lists to zip_longest can be visualized as follows:", "#", "# a = [\"key_1\",\"key_2\",\"key_3\"]", "# a_lagright = [None] + a[:-1]", "# a_lagleft = a[1:]", "# list(zip_longest(a_lagright, a, a_lagleft))", "# [(None, 'key_1', 'key_2'), ('key_1', 'key_2', 'key_3'), ('key_2', 'key_3', None)]", "#", "# So that for any key, we can have a correct reference to the previous and", "# following keys of `keyList`. The first key has no previous key and the last", "# key has no following key, so the respective elements of the zip_longest", "# iterator are `None`.", "for", "previouskey", ",", "currentkey", ",", "nextkey", "in", "zip_longest", "(", "[", "None", "]", "+", "keyList", "[", ":", "-", "1", "]", ",", "keyList", ",", "keyList", "[", "1", ":", "]", ")", ":", "# If this key is the first one in the list, or if it has a different", "# name than the previous one, it means that it's the first object of", "# that kind in the list.", "if", "previouskey", "is", "None", "or", "currentkey", ".", "GetName", "(", ")", "!=", "previouskey", ".", "GetName", "(", ")", ":", "# Then we check the following key. If the current key is not", "# the last key in the list and if the following key has the same", "# name, then it means it's another cycle of the same object.", "# Thus, it's gonna be a backup cycle of the same object.", "# Otherwise, it's just a key with one cycle so we don't need", "# to print information two distinguish between different cycles", "# of the same key.", "if", "nextkey", "is", "not", "None", "and", "currentkey", ".", "GetName", "(", ")", "==", "nextkey", ".", "GetName", "(", ")", ":", "cyclestr", "=", "\"[current cycle]\"", "else", ":", "cyclestr", "=", "\"\"", "else", ":", "# This key is a subsequent cycle of a previous key", "cyclestr", "=", "\"[backup cycle]\"", "datime", "=", "currentkey", ".", "GetDatime", "(", ")", "time", "=", "datime", ".", "GetTime", "(", ")", "date", "=", "datime", ".", "GetDate", "(", ")", "year", "=", "datime", ".", "GetYear", "(", ")", "time", "=", "_prepareTime", "(", "time", ")", "rec", "=", "[", "currentkey", ".", "GetClassName", "(", ")", ",", "MONTH", "[", "int", "(", "str", "(", "date", ")", "[", "4", ":", "6", "]", ")", "]", "+", "\" \"", "+", "str", "(", "date", ")", "[", "6", ":", "]", "+", "\" \"", "+", "time", "[", ":", "2", "]", "+", "\":\"", "+", "time", "[", "2", ":", "4", "]", "+", "\" \"", "+", "str", "(", "year", ")", "+", "\" \"", ",", "currentkey", ".", "GetName", "(", ")", "+", "\";\"", "+", "str", "(", "currentkey", ".", "GetCycle", "(", ")", ")", ",", "\" \\\"\"", "+", "currentkey", ".", "GetTitle", "(", ")", "+", "\"\\\"\"", ",", "\" \"", "+", "cyclestr", "]", "write", "(", "LONG_TEMPLATE", ".", "format", "(", "*", "rec", ",", "*", "*", "dic", ")", ",", "indent", ",", "end", "=", "\"\\n\"", ")", "if", "treeListing", "and", "isTreeKey", "(", "currentkey", ")", ":", "tree", "=", "currentkey", ".", "ReadObj", "(", ")", "_recursifTreePrinter", "(", "tree", ",", "indent", "+", "2", ")", "tree", "=", "tree", ".", "GetTree", "(", ")", "_printClusters", "(", "tree", ",", "indent", "+", "2", ")", "if", "treeListing", "and", "isTHnSparseKey", "(", "currentkey", ")", ":", "hs", "=", "currentkey", ".", "ReadObj", "(", ")", "hs", ".", "Print", "(", "\"all\"", ")" ]
https://github.com/root-project/root/blob/fcd3583bb14852bf2e8cd2415717cbaac0e75896/main/python/cmdLineUtils.py#L966-L1057
BlzFans/wke
b0fa21158312e40c5fbd84682d643022b6c34a93
cygwin/lib/python2.6/dircache.py
python
listdir
(path)
return list
List directory contents, using cache.
List directory contents, using cache.
[ "List", "directory", "contents", "using", "cache", "." ]
def listdir(path): """List directory contents, using cache.""" try: cached_mtime, list = cache[path] del cache[path] except KeyError: cached_mtime, list = -1, [] mtime = os.stat(path).st_mtime if mtime != cached_mtime: list = os.listdir(path) list.sort() cache[path] = mtime, list return list
[ "def", "listdir", "(", "path", ")", ":", "try", ":", "cached_mtime", ",", "list", "=", "cache", "[", "path", "]", "del", "cache", "[", "path", "]", "except", "KeyError", ":", "cached_mtime", ",", "list", "=", "-", "1", ",", "[", "]", "mtime", "=", "os", ".", "stat", "(", "path", ")", ".", "st_mtime", "if", "mtime", "!=", "cached_mtime", ":", "list", "=", "os", ".", "listdir", "(", "path", ")", "list", ".", "sort", "(", ")", "cache", "[", "path", "]", "=", "mtime", ",", "list", "return", "list" ]
https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/dircache.py#L21-L33
Z3Prover/z3
d745d03afdfdf638d66093e2bfbacaf87187f35b
src/api/python/z3/z3.py
python
SRem
(a, b)
return BitVecRef(Z3_mk_bvsrem(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
Create the Z3 expression signed remainder. Use the operator % for signed modulus, and URem() for unsigned remainder. >>> x = BitVec('x', 32) >>> y = BitVec('y', 32) >>> SRem(x, y) SRem(x, y) >>> SRem(x, y).sort() BitVec(32) >>> (x % y).sexpr() '(bvsmod x y)' >>> SRem(x, y).sexpr() '(bvsrem x y)'
Create the Z3 expression signed remainder.
[ "Create", "the", "Z3", "expression", "signed", "remainder", "." ]
def SRem(a, b): """Create the Z3 expression signed remainder. Use the operator % for signed modulus, and URem() for unsigned remainder. >>> x = BitVec('x', 32) >>> y = BitVec('y', 32) >>> SRem(x, y) SRem(x, y) >>> SRem(x, y).sort() BitVec(32) >>> (x % y).sexpr() '(bvsmod x y)' >>> SRem(x, y).sexpr() '(bvsrem x y)' """ _check_bv_args(a, b) a, b = _coerce_exprs(a, b) return BitVecRef(Z3_mk_bvsrem(a.ctx_ref(), a.as_ast(), b.as_ast()), a.ctx)
[ "def", "SRem", "(", "a", ",", "b", ")", ":", "_check_bv_args", "(", "a", ",", "b", ")", "a", ",", "b", "=", "_coerce_exprs", "(", "a", ",", "b", ")", "return", "BitVecRef", "(", "Z3_mk_bvsrem", "(", "a", ".", "ctx_ref", "(", ")", ",", "a", ".", "as_ast", "(", ")", ",", "b", ".", "as_ast", "(", ")", ")", ",", "a", ".", "ctx", ")" ]
https://github.com/Z3Prover/z3/blob/d745d03afdfdf638d66093e2bfbacaf87187f35b/src/api/python/z3/z3.py#L4243-L4261
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/python/training/sync_replicas_optimizer.py
python
SyncReplicasOptimizer._aggregate_sparse_grad
(self, grad, var, train_ops)
Aggregate sparse gradients. Args: grad: The sparse gradient to aggregate. var: The variable to apply this gradient to. train_ops: The train_ops for the worker to run. Returns: aggregated_grad: Aggregated grad.
Aggregate sparse gradients.
[ "Aggregate", "sparse", "gradients", "." ]
def _aggregate_sparse_grad(self, grad, var, train_ops): """Aggregate sparse gradients. Args: grad: The sparse gradient to aggregate. var: The variable to apply this gradient to. train_ops: The train_ops for the worker to run. Returns: aggregated_grad: Aggregated grad. """ # Sparse gradients have to be inserted as one pair of (value, # indice) as an element instead of the whole "indexedslice" because # their shapes are not deterministic. sparse_grad_queue = (data_flow_ops.FIFOQueue( -1, (grad.values.dtype, grad.indices.dtype), shapes=(var.get_shape().as_list()[1:], ()), shared_name="sparse_grad_q_%s" % var.name)) self._sparse_grad_queues_and_devs.append((sparse_grad_queue, var.device)) # Sparse token is inserted after the "enqueue_many" finishes. This # is needed to make sure enough sparse gradients have been enqueued # before applying them to the variables. sparse_token_queue = (data_flow_ops.FIFOQueue( self._replicas_to_aggregate * 2, types_pb2.DT_INT32, shapes=(), shared_name="sparse_token_q_%s" % var.name)) self._one_element_queue_list.append((sparse_token_queue, var.device)) enqueue_spares_op = sparse_grad_queue.enqueue_many([grad.values, grad.indices]) with ops.control_dependencies([enqueue_spares_op]): train_ops.append(sparse_token_queue.enqueue((1,))) with ops.control_dependencies([sparse_token_queue.dequeue_many( self._replicas_to_aggregate)]): values, indices = sparse_grad_queue.dequeue_many(sparse_grad_queue.size()) concat_grad = ops.IndexedSlices(values, indices, grad.dense_shape) # Sum the gradients of the same variables in the sparse layers so # that each variable is only updated once. Note that with 2 # gradients g1 and g2 from 2 replicas for the same variable, # apply(g1+g2) is different from apply(g1) and then apply(g2) when # the optimizer is complex like Momentum or Adagrad. values = concat_grad.values indices = concat_grad.indices new_indices, indx = array_ops.unique(indices) num_indices = array_ops.shape(new_indices)[0] sum_values = math_ops.unsorted_segment_sum(values, indx, num_indices) return ops.IndexedSlices(sum_values, new_indices, concat_grad.dense_shape)
[ "def", "_aggregate_sparse_grad", "(", "self", ",", "grad", ",", "var", ",", "train_ops", ")", ":", "# Sparse gradients have to be inserted as one pair of (value,", "# indice) as an element instead of the whole \"indexedslice\" because", "# their shapes are not deterministic.", "sparse_grad_queue", "=", "(", "data_flow_ops", ".", "FIFOQueue", "(", "-", "1", ",", "(", "grad", ".", "values", ".", "dtype", ",", "grad", ".", "indices", ".", "dtype", ")", ",", "shapes", "=", "(", "var", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "1", ":", "]", ",", "(", ")", ")", ",", "shared_name", "=", "\"sparse_grad_q_%s\"", "%", "var", ".", "name", ")", ")", "self", ".", "_sparse_grad_queues_and_devs", ".", "append", "(", "(", "sparse_grad_queue", ",", "var", ".", "device", ")", ")", "# Sparse token is inserted after the \"enqueue_many\" finishes. This", "# is needed to make sure enough sparse gradients have been enqueued", "# before applying them to the variables.", "sparse_token_queue", "=", "(", "data_flow_ops", ".", "FIFOQueue", "(", "self", ".", "_replicas_to_aggregate", "*", "2", ",", "types_pb2", ".", "DT_INT32", ",", "shapes", "=", "(", ")", ",", "shared_name", "=", "\"sparse_token_q_%s\"", "%", "var", ".", "name", ")", ")", "self", ".", "_one_element_queue_list", ".", "append", "(", "(", "sparse_token_queue", ",", "var", ".", "device", ")", ")", "enqueue_spares_op", "=", "sparse_grad_queue", ".", "enqueue_many", "(", "[", "grad", ".", "values", ",", "grad", ".", "indices", "]", ")", "with", "ops", ".", "control_dependencies", "(", "[", "enqueue_spares_op", "]", ")", ":", "train_ops", ".", "append", "(", "sparse_token_queue", ".", "enqueue", "(", "(", "1", ",", ")", ")", ")", "with", "ops", ".", "control_dependencies", "(", "[", "sparse_token_queue", ".", "dequeue_many", "(", "self", ".", "_replicas_to_aggregate", ")", "]", ")", ":", "values", ",", "indices", "=", "sparse_grad_queue", ".", "dequeue_many", "(", "sparse_grad_queue", ".", "size", "(", ")", ")", "concat_grad", "=", "ops", ".", "IndexedSlices", "(", "values", ",", "indices", ",", "grad", ".", "dense_shape", ")", "# Sum the gradients of the same variables in the sparse layers so", "# that each variable is only updated once. Note that with 2", "# gradients g1 and g2 from 2 replicas for the same variable,", "# apply(g1+g2) is different from apply(g1) and then apply(g2) when", "# the optimizer is complex like Momentum or Adagrad.", "values", "=", "concat_grad", ".", "values", "indices", "=", "concat_grad", ".", "indices", "new_indices", ",", "indx", "=", "array_ops", ".", "unique", "(", "indices", ")", "num_indices", "=", "array_ops", ".", "shape", "(", "new_indices", ")", "[", "0", "]", "sum_values", "=", "math_ops", ".", "unsorted_segment_sum", "(", "values", ",", "indx", ",", "num_indices", ")", "return", "ops", ".", "IndexedSlices", "(", "sum_values", ",", "new_indices", ",", "concat_grad", ".", "dense_shape", ")" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/training/sync_replicas_optimizer.py#L645-L696
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
media/tools/constrained_network_server/traffic_control.py
python
DeleteConstrainedPort
(config)
Deletes an existing constrained port. Deletes constraints set on a given port and the traffic forwarding rule from the constrained port to a specified server port. The original constrained network configuration used to create the constrained port must be passed in. Args: config: Constraint configuration dictionary, format: port: Port to constrain (integer 1-65535). server_port: Port to redirect traffic on [port] to (integer 1-65535). interface: Network interface name (string). bandwidth: Maximum allowed upload bandwidth (integer in kbit/s). Raises: TrafficControlError: If any operation fails. The message in the exception describes what failed.
Deletes an existing constrained port.
[ "Deletes", "an", "existing", "constrained", "port", "." ]
def DeleteConstrainedPort(config): """Deletes an existing constrained port. Deletes constraints set on a given port and the traffic forwarding rule from the constrained port to a specified server port. The original constrained network configuration used to create the constrained port must be passed in. Args: config: Constraint configuration dictionary, format: port: Port to constrain (integer 1-65535). server_port: Port to redirect traffic on [port] to (integer 1-65535). interface: Network interface name (string). bandwidth: Maximum allowed upload bandwidth (integer in kbit/s). Raises: TrafficControlError: If any operation fails. The message in the exception describes what failed. """ _CheckArgsExist(config, 'interface', 'port', 'server_port') try: # Delete filters first so it frees the class. _DeleteFilter(config['interface'], config['port']) finally: try: # Deleting the class deletes attached qdisc as well. _ConfigureClass('del', config) finally: _DeleteIptableRule(config['interface'], config['port'], config['server_port'])
[ "def", "DeleteConstrainedPort", "(", "config", ")", ":", "_CheckArgsExist", "(", "config", ",", "'interface'", ",", "'port'", ",", "'server_port'", ")", "try", ":", "# Delete filters first so it frees the class.", "_DeleteFilter", "(", "config", "[", "'interface'", "]", ",", "config", "[", "'port'", "]", ")", "finally", ":", "try", ":", "# Deleting the class deletes attached qdisc as well.", "_ConfigureClass", "(", "'del'", ",", "config", ")", "finally", ":", "_DeleteIptableRule", "(", "config", "[", "'interface'", "]", ",", "config", "[", "'port'", "]", ",", "config", "[", "'server_port'", "]", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/media/tools/constrained_network_server/traffic_control.py#L92-L122
emscripten-core/emscripten
0d413d3c5af8b28349682496edc14656f5700c2f
third_party/ply/example/classcalc/calc.py
python
Calc.p_expression_group
(self, p)
expression : LPAREN expression RPAREN
expression : LPAREN expression RPAREN
[ "expression", ":", "LPAREN", "expression", "RPAREN" ]
def p_expression_group(self, p): 'expression : LPAREN expression RPAREN' p[0] = p[2]
[ "def", "p_expression_group", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "2", "]" ]
https://github.com/emscripten-core/emscripten/blob/0d413d3c5af8b28349682496edc14656f5700c2f/third_party/ply/example/classcalc/calc.py#L133-L135
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py3/numpy/core/einsumfunc.py
python
_find_contraction
(positions, input_sets, output_set)
return (new_result, remaining, idx_removed, idx_contract)
Finds the contraction for a given set of input and output sets. Parameters ---------- positions : iterable Integer positions of terms used in the contraction. input_sets : list List of sets that represent the lhs side of the einsum subscript output_set : set Set that represents the rhs side of the overall einsum subscript Returns ------- new_result : set The indices of the resulting contraction remaining : list List of sets that have not been contracted, the new set is appended to the end of this list idx_removed : set Indices removed from the entire contraction idx_contraction : set The indices used in the current contraction Examples -------- # A simple dot product test case >>> pos = (0, 1) >>> isets = [set('ab'), set('bc')] >>> oset = set('ac') >>> _find_contraction(pos, isets, oset) ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) # A more complex case with additional terms in the contraction >>> pos = (0, 2) >>> isets = [set('abd'), set('ac'), set('bdc')] >>> oset = set('ac') >>> _find_contraction(pos, isets, oset) ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
Finds the contraction for a given set of input and output sets.
[ "Finds", "the", "contraction", "for", "a", "given", "set", "of", "input", "and", "output", "sets", "." ]
def _find_contraction(positions, input_sets, output_set): """ Finds the contraction for a given set of input and output sets. Parameters ---------- positions : iterable Integer positions of terms used in the contraction. input_sets : list List of sets that represent the lhs side of the einsum subscript output_set : set Set that represents the rhs side of the overall einsum subscript Returns ------- new_result : set The indices of the resulting contraction remaining : list List of sets that have not been contracted, the new set is appended to the end of this list idx_removed : set Indices removed from the entire contraction idx_contraction : set The indices used in the current contraction Examples -------- # A simple dot product test case >>> pos = (0, 1) >>> isets = [set('ab'), set('bc')] >>> oset = set('ac') >>> _find_contraction(pos, isets, oset) ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) # A more complex case with additional terms in the contraction >>> pos = (0, 2) >>> isets = [set('abd'), set('ac'), set('bdc')] >>> oset = set('ac') >>> _find_contraction(pos, isets, oset) ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'}) """ idx_contract = set() idx_remain = output_set.copy() remaining = [] for ind, value in enumerate(input_sets): if ind in positions: idx_contract |= value else: remaining.append(value) idx_remain |= value new_result = idx_remain & idx_contract idx_removed = (idx_contract - new_result) remaining.append(new_result) return (new_result, remaining, idx_removed, idx_contract)
[ "def", "_find_contraction", "(", "positions", ",", "input_sets", ",", "output_set", ")", ":", "idx_contract", "=", "set", "(", ")", "idx_remain", "=", "output_set", ".", "copy", "(", ")", "remaining", "=", "[", "]", "for", "ind", ",", "value", "in", "enumerate", "(", "input_sets", ")", ":", "if", "ind", "in", "positions", ":", "idx_contract", "|=", "value", "else", ":", "remaining", ".", "append", "(", "value", ")", "idx_remain", "|=", "value", "new_result", "=", "idx_remain", "&", "idx_contract", "idx_removed", "=", "(", "idx_contract", "-", "new_result", ")", "remaining", ".", "append", "(", "new_result", ")", "return", "(", "new_result", ",", "remaining", ",", "idx_removed", ",", "idx_contract", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/core/einsumfunc.py#L85-L142
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/mapreduce/mapreduce/output_writers.py
python
_GoogleCloudStorageOutputWriter._create
(cls, writer_spec, filename_suffix)
return cls(writer, writer_spec=writer_spec)
Helper method that actually creates the file in cloud storage.
Helper method that actually creates the file in cloud storage.
[ "Helper", "method", "that", "actually", "creates", "the", "file", "in", "cloud", "storage", "." ]
def _create(cls, writer_spec, filename_suffix): """Helper method that actually creates the file in cloud storage.""" writer = cls._open_file(writer_spec, filename_suffix) return cls(writer, writer_spec=writer_spec)
[ "def", "_create", "(", "cls", ",", "writer_spec", ",", "filename_suffix", ")", ":", "writer", "=", "cls", ".", "_open_file", "(", "writer_spec", ",", "filename_suffix", ")", "return", "cls", "(", "writer", ",", "writer_spec", "=", "writer_spec", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/mapreduce/mapreduce/output_writers.py#L744-L747
mandiant/flare-wmi
b0a5a094ff9ca7d7a1c4fc711dc00c74dec4b6b1
python-cim/cim/objects.py
python
ClassInstance.qualifiers
(self)
return ret
get dict of str to str
get dict of str to str
[ "get", "dict", "of", "str", "to", "str" ]
def qualifiers(self): """ get dict of str to str """ # TODO: remove duplication ret = {} for i in range(self.qualifiers_list.count): q = self.qualifiers_list.qualifiers[i] qk = self.data.get_qualifier_key(q) qv = self.data.get_qualifier_value(q) ret[str(qk)] = qv return ret
[ "def", "qualifiers", "(", "self", ")", ":", "# TODO: remove duplication", "ret", "=", "{", "}", "for", "i", "in", "range", "(", "self", ".", "qualifiers_list", ".", "count", ")", ":", "q", "=", "self", ".", "qualifiers_list", ".", "qualifiers", "[", "i", "]", "qk", "=", "self", ".", "data", ".", "get_qualifier_key", "(", "q", ")", "qv", "=", "self", ".", "data", ".", "get_qualifier_value", "(", "q", ")", "ret", "[", "str", "(", "qk", ")", "]", "=", "qv", "return", "ret" ]
https://github.com/mandiant/flare-wmi/blob/b0a5a094ff9ca7d7a1c4fc711dc00c74dec4b6b1/python-cim/cim/objects.py#L868-L877
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/src/ed_shelf.py
python
EdShelfDelegate.AddItem
(self, item, name, bmp=wx.NullBitmap)
Add an item to the shelf
Add an item to the shelf
[ "Add", "an", "item", "to", "the", "shelf" ]
def AddItem(self, item, name, bmp=wx.NullBitmap): """Add an item to the shelf""" self._shelf.AddItem(item, name, bmp)
[ "def", "AddItem", "(", "self", ",", "item", ",", "name", ",", "bmp", "=", "wx", ".", "NullBitmap", ")", ":", "self", ".", "_shelf", ".", "AddItem", "(", "item", ",", "name", ",", "bmp", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_shelf.py#L367-L369
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/jit/frontend.py
python
get_class_properties
(cls, self_name)
return properties
Get a list of Property objects representing the properties of a class. Args: cls: The class to get properties of. self_name: The name of the class that the properties should belong to. Returns: A list of Property objects corresponding to the properties of cls. Property here refers to the subclass of TreeView.
Get a list of Property objects representing the properties of a class.
[ "Get", "a", "list", "of", "Property", "objects", "representing", "the", "properties", "of", "a", "class", "." ]
def get_class_properties(cls, self_name): """ Get a list of Property objects representing the properties of a class. Args: cls: The class to get properties of. self_name: The name of the class that the properties should belong to. Returns: A list of Property objects corresponding to the properties of cls. Property here refers to the subclass of TreeView. """ props = inspect.getmembers( cls, predicate=lambda m: isinstance(m, property)) # Any property that should not compiled must be in this list on the Module. unused_properties = getattr(cls, "__jit_unused_properties__", []) # Create Property TreeView objects from inspected property objects. properties = [] for prop in props: if prop[0] not in unused_properties and not should_drop(prop[1].fget): getter = get_jit_def(prop[1].fget, f"__{prop[0]}_getter", self_name=self_name) setter = get_jit_def(prop[1].fset, f"__{prop[0]}_setter", self_name=self_name) if prop[1].fset else None properties.append(Property(getter.range(), Ident(getter.range(), prop[0]), getter, setter)) return properties
[ "def", "get_class_properties", "(", "cls", ",", "self_name", ")", ":", "props", "=", "inspect", ".", "getmembers", "(", "cls", ",", "predicate", "=", "lambda", "m", ":", "isinstance", "(", "m", ",", "property", ")", ")", "# Any property that should not compiled must be in this list on the Module.", "unused_properties", "=", "getattr", "(", "cls", ",", "\"__jit_unused_properties__\"", ",", "[", "]", ")", "# Create Property TreeView objects from inspected property objects.", "properties", "=", "[", "]", "for", "prop", "in", "props", ":", "if", "prop", "[", "0", "]", "not", "in", "unused_properties", "and", "not", "should_drop", "(", "prop", "[", "1", "]", ".", "fget", ")", ":", "getter", "=", "get_jit_def", "(", "prop", "[", "1", "]", ".", "fget", ",", "f\"__{prop[0]}_getter\"", ",", "self_name", "=", "self_name", ")", "setter", "=", "get_jit_def", "(", "prop", "[", "1", "]", ".", "fset", ",", "f\"__{prop[0]}_setter\"", ",", "self_name", "=", "self_name", ")", "if", "prop", "[", "1", "]", ".", "fset", "else", "None", "properties", ".", "append", "(", "Property", "(", "getter", ".", "range", "(", ")", ",", "Ident", "(", "getter", ".", "range", "(", ")", ",", "prop", "[", "0", "]", ")", ",", "getter", ",", "setter", ")", ")", "return", "properties" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/jit/frontend.py#L141-L165
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
tools/memory_inspector/memory_inspector/core/backends.py
python
Device.EnableMmapTracing
(self, enabled)
Provision the device and make it ready to trace memory maps.
Provision the device and make it ready to trace memory maps.
[ "Provision", "the", "device", "and", "make", "it", "ready", "to", "trace", "memory", "maps", "." ]
def EnableMmapTracing(self, enabled): """Provision the device and make it ready to trace memory maps.""" raise NotImplementedError()
[ "def", "EnableMmapTracing", "(", "self", ",", "enabled", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/memory_inspector/memory_inspector/core/backends.py#L87-L89
cms-sw/cmssw
fd9de012d503d3405420bcbeec0ec879baa57cf2
Validation/RecoTrack/python/plotting/plotting.py
python
PlotFolder.iterSelectionName
(self, plotFolderName, translatedDqmSubFolder)
Iterate over possible selections name (used in output directory name and legend) from the name of PlotterFolder, and a return value of translateSubFolder
Iterate over possible selections name (used in output directory name and legend) from the name of PlotterFolder, and a return value of translateSubFolder
[ "Iterate", "over", "possible", "selections", "name", "(", "used", "in", "output", "directory", "name", "and", "legend", ")", "from", "the", "name", "of", "PlotterFolder", "and", "a", "return", "value", "of", "translateSubFolder" ]
def iterSelectionName(self, plotFolderName, translatedDqmSubFolder): """Iterate over possible selections name (used in output directory name and legend) from the name of PlotterFolder, and a return value of translateSubFolder""" ret = "" if plotFolderName != "": ret += "_"+plotFolderName if translatedDqmSubFolder is not None: ret += "_"+translatedDqmSubFolder yield ret
[ "def", "iterSelectionName", "(", "self", ",", "plotFolderName", ",", "translatedDqmSubFolder", ")", ":", "ret", "=", "\"\"", "if", "plotFolderName", "!=", "\"\"", ":", "ret", "+=", "\"_\"", "+", "plotFolderName", "if", "translatedDqmSubFolder", "is", "not", "None", ":", "ret", "+=", "\"_\"", "+", "translatedDqmSubFolder", "yield", "ret" ]
https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/Validation/RecoTrack/python/plotting/plotting.py#L2675-L2682
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/framework/subscribe.py
python
_subscribe
(tensor, side_effects, control_cache)
return _subscribe_new(tensor, side_effects, control_cache)
Helper method that subscribes a single tensor to a list of side_effects. This method will check if the given tensor has already been subscribed or if it's a tensor returned by a previous call to `subscribe()` and, if so, will reuse the existing identity op, appending the given side effects to the list of existing ones. Args: tensor: The `tf.Tensor` to be subscribed. side_effects: List of side_effect functions, see subscribe for details. control_cache: `_ControlOutputCache` helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects or the given tensor, if it was already been subscribed.
Helper method that subscribes a single tensor to a list of side_effects.
[ "Helper", "method", "that", "subscribes", "a", "single", "tensor", "to", "a", "list", "of", "side_effects", "." ]
def _subscribe(tensor, side_effects, control_cache): """Helper method that subscribes a single tensor to a list of side_effects. This method will check if the given tensor has already been subscribed or if it's a tensor returned by a previous call to `subscribe()` and, if so, will reuse the existing identity op, appending the given side effects to the list of existing ones. Args: tensor: The `tf.Tensor` to be subscribed. side_effects: List of side_effect functions, see subscribe for details. control_cache: `_ControlOutputCache` helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects or the given tensor, if it was already been subscribed. """ # Check if the given tensor has a numpy compatible type (see dtypes.py). # If not, we cannot subscribe it, so we just return the original tensor. if not tensor.dtype.is_numpy_compatible: logging.debug(('Tensor {} has an un-supported {} type and cannot be ' 'subscribed.').format(tensor.name, tensor.dtype)) return tensor if _is_subscribed_identity(tensor): return _subscribe_extend(tensor, side_effects) # Check if the given tensor has already been subscribed by inspecting its # outputs. name_scope = tensor.op.name + '/subscription/Identity' consumers = tensor.consumers() matching_ops = [op for op in consumers if op.name.startswith(name_scope)] assert len(matching_ops) <= 1, ('Op {} must only have one subscription ' 'op connected to it').format(tensor.op.name) if len(matching_ops) == 1: candidate_tensor = matching_ops[0].outputs[0] if _is_subscribed_identity(candidate_tensor): return _subscribe_extend(candidate_tensor, side_effects) return _subscribe_new(tensor, side_effects, control_cache)
[ "def", "_subscribe", "(", "tensor", ",", "side_effects", ",", "control_cache", ")", ":", "# Check if the given tensor has a numpy compatible type (see dtypes.py).", "# If not, we cannot subscribe it, so we just return the original tensor.", "if", "not", "tensor", ".", "dtype", ".", "is_numpy_compatible", ":", "logging", ".", "debug", "(", "(", "'Tensor {} has an un-supported {} type and cannot be '", "'subscribed.'", ")", ".", "format", "(", "tensor", ".", "name", ",", "tensor", ".", "dtype", ")", ")", "return", "tensor", "if", "_is_subscribed_identity", "(", "tensor", ")", ":", "return", "_subscribe_extend", "(", "tensor", ",", "side_effects", ")", "# Check if the given tensor has already been subscribed by inspecting its", "# outputs.", "name_scope", "=", "tensor", ".", "op", ".", "name", "+", "'/subscription/Identity'", "consumers", "=", "tensor", ".", "consumers", "(", ")", "matching_ops", "=", "[", "op", "for", "op", "in", "consumers", "if", "op", ".", "name", ".", "startswith", "(", "name_scope", ")", "]", "assert", "len", "(", "matching_ops", ")", "<=", "1", ",", "(", "'Op {} must only have one subscription '", "'op connected to it'", ")", ".", "format", "(", "tensor", ".", "op", ".", "name", ")", "if", "len", "(", "matching_ops", ")", "==", "1", ":", "candidate_tensor", "=", "matching_ops", "[", "0", "]", ".", "outputs", "[", "0", "]", "if", "_is_subscribed_identity", "(", "candidate_tensor", ")", ":", "return", "_subscribe_extend", "(", "candidate_tensor", ",", "side_effects", ")", "return", "_subscribe_new", "(", "tensor", ",", "side_effects", ",", "control_cache", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/framework/subscribe.py#L217-L256
apple/swift-lldb
d74be846ef3e62de946df343e8c234bde93a8912
utils/vim-lldb/python-vim-lldb/lldb_controller.py
python
LLDBController.doStep
(self, stepType)
Perform a step command and block the UI for eventDelayStep seconds in order to process events on lldb's event queue. FIXME: if the step does not complete in eventDelayStep seconds, we relinquish control to the main thread to avoid the appearance of a "hang". If this happens, the UI will update whenever; usually when the user moves the cursor. This is somewhat annoying.
Perform a step command and block the UI for eventDelayStep seconds in order to process events on lldb's event queue. FIXME: if the step does not complete in eventDelayStep seconds, we relinquish control to the main thread to avoid the appearance of a "hang". If this happens, the UI will update whenever; usually when the user moves the cursor. This is somewhat annoying.
[ "Perform", "a", "step", "command", "and", "block", "the", "UI", "for", "eventDelayStep", "seconds", "in", "order", "to", "process", "events", "on", "lldb", "s", "event", "queue", ".", "FIXME", ":", "if", "the", "step", "does", "not", "complete", "in", "eventDelayStep", "seconds", "we", "relinquish", "control", "to", "the", "main", "thread", "to", "avoid", "the", "appearance", "of", "a", "hang", ".", "If", "this", "happens", "the", "UI", "will", "update", "whenever", ";", "usually", "when", "the", "user", "moves", "the", "cursor", ".", "This", "is", "somewhat", "annoying", "." ]
def doStep(self, stepType): """ Perform a step command and block the UI for eventDelayStep seconds in order to process events on lldb's event queue. FIXME: if the step does not complete in eventDelayStep seconds, we relinquish control to the main thread to avoid the appearance of a "hang". If this happens, the UI will update whenever; usually when the user moves the cursor. This is somewhat annoying. """ if not self.process: sys.stderr.write("No process to step") return t = self.process.GetSelectedThread() if stepType == StepType.INSTRUCTION: t.StepInstruction(False) if stepType == StepType.INSTRUCTION_OVER: t.StepInstruction(True) elif stepType == StepType.INTO: t.StepInto() elif stepType == StepType.OVER: t.StepOver() elif stepType == StepType.OUT: t.StepOut() self.processPendingEvents(self.eventDelayStep, True)
[ "def", "doStep", "(", "self", ",", "stepType", ")", ":", "if", "not", "self", ".", "process", ":", "sys", ".", "stderr", ".", "write", "(", "\"No process to step\"", ")", "return", "t", "=", "self", ".", "process", ".", "GetSelectedThread", "(", ")", "if", "stepType", "==", "StepType", ".", "INSTRUCTION", ":", "t", ".", "StepInstruction", "(", "False", ")", "if", "stepType", "==", "StepType", ".", "INSTRUCTION_OVER", ":", "t", ".", "StepInstruction", "(", "True", ")", "elif", "stepType", "==", "StepType", ".", "INTO", ":", "t", ".", "StepInto", "(", ")", "elif", "stepType", "==", "StepType", ".", "OVER", ":", "t", ".", "StepOver", "(", ")", "elif", "stepType", "==", "StepType", ".", "OUT", ":", "t", ".", "StepOut", "(", ")", "self", ".", "processPendingEvents", "(", "self", ".", "eventDelayStep", ",", "True", ")" ]
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/utils/vim-lldb/python-vim-lldb/lldb_controller.py#L113-L136
google/or-tools
2cb85b4eead4c38e1c54b48044f92087cf165bce
ortools/constraint_solver/samples/cvrp_reload.py
python
create_demand_evaluator
(data)
return demand_evaluator
Creates callback to get demands at each location.
Creates callback to get demands at each location.
[ "Creates", "callback", "to", "get", "demands", "at", "each", "location", "." ]
def create_demand_evaluator(data): """Creates callback to get demands at each location.""" _demands = data['demands'] def demand_evaluator(manager, from_node): """Returns the demand of the current node""" return _demands[manager.IndexToNode(from_node)] return demand_evaluator
[ "def", "create_demand_evaluator", "(", "data", ")", ":", "_demands", "=", "data", "[", "'demands'", "]", "def", "demand_evaluator", "(", "manager", ",", "from_node", ")", ":", "\"\"\"Returns the demand of the current node\"\"\"", "return", "_demands", "[", "manager", ".", "IndexToNode", "(", "from_node", ")", "]", "return", "demand_evaluator" ]
https://github.com/google/or-tools/blob/2cb85b4eead4c38e1c54b48044f92087cf165bce/ortools/constraint_solver/samples/cvrp_reload.py#L179-L187
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_windows.py
python
PrintData.SetPrivData
(*args, **kwargs)
return _windows_.PrintData_SetPrivData(*args, **kwargs)
SetPrivData(self, PyObject data)
SetPrivData(self, PyObject data)
[ "SetPrivData", "(", "self", "PyObject", "data", ")" ]
def SetPrivData(*args, **kwargs): """SetPrivData(self, PyObject data)""" return _windows_.PrintData_SetPrivData(*args, **kwargs)
[ "def", "SetPrivData", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "PrintData_SetPrivData", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_windows.py#L4832-L4834
facebookincubator/BOLT
88c70afe9d388ad430cc150cc158641701397f70
clang/tools/scan-build-py/lib/libscanbuild/analyze.py
python
analyze_build
()
Entry point for analyze-build command.
Entry point for analyze-build command.
[ "Entry", "point", "for", "analyze", "-", "build", "command", "." ]
def analyze_build(): """ Entry point for analyze-build command. """ args = parse_args_for_analyze_build() # will re-assign the report directory as new output with report_directory(args.output, args.keep_empty, args.output_format) as args.output: # Run the analyzer against a compilation db. govern_analyzer_runs(args) # Cover report generation and bug counting. number_of_bugs = document(args) # Set exit status as it was requested. return number_of_bugs if args.status_bugs else 0
[ "def", "analyze_build", "(", ")", ":", "args", "=", "parse_args_for_analyze_build", "(", ")", "# will re-assign the report directory as new output", "with", "report_directory", "(", "args", ".", "output", ",", "args", ".", "keep_empty", ",", "args", ".", "output_format", ")", "as", "args", ".", "output", ":", "# Run the analyzer against a compilation db.", "govern_analyzer_runs", "(", "args", ")", "# Cover report generation and bug counting.", "number_of_bugs", "=", "document", "(", "args", ")", "# Set exit status as it was requested.", "return", "number_of_bugs", "if", "args", ".", "status_bugs", "else", "0" ]
https://github.com/facebookincubator/BOLT/blob/88c70afe9d388ad430cc150cc158641701397f70/clang/tools/scan-build-py/lib/libscanbuild/analyze.py#L80-L91
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/cuda/cudadrv/driver.py
python
_device_pointer_attr
(devmem, attr, odata)
Query attribute on the device pointer
Query attribute on the device pointer
[ "Query", "attribute", "on", "the", "device", "pointer" ]
def _device_pointer_attr(devmem, attr, odata): """Query attribute on the device pointer """ error = driver.cuPointerGetAttribute(byref(odata), attr, device_ctypes_pointer(devmem)) driver.check_error(error, "Failed to query pointer attribute")
[ "def", "_device_pointer_attr", "(", "devmem", ",", "attr", ",", "odata", ")", ":", "error", "=", "driver", ".", "cuPointerGetAttribute", "(", "byref", "(", "odata", ")", ",", "attr", ",", "device_ctypes_pointer", "(", "devmem", ")", ")", "driver", ".", "check_error", "(", "error", ",", "\"Failed to query pointer attribute\"", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/cuda/cudadrv/driver.py#L1724-L1729
adobe/chromium
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
gpu/command_buffer/build_gles2_cmd_buffer.py
python
GLcharHandler.WriteImmediateCmdHelper
(self, func, file)
Overrriden from TypeHandler.
Overrriden from TypeHandler.
[ "Overrriden", "from", "TypeHandler", "." ]
def WriteImmediateCmdHelper(self, func, file): """Overrriden from TypeHandler.""" code = """ void %(name)s(%(typed_args)s) { const uint32 data_size = strlen(name); gles2::%(name)s* c = GetImmediateCmdSpace<gles2::%(name)s>(data_size); if (c) { c->Init(%(args)s, data_size); } } """ file.Write(code % { "name": func.name, "typed_args": func.MakeTypedOriginalArgString(""), "args": func.MakeOriginalArgString(""), })
[ "def", "WriteImmediateCmdHelper", "(", "self", ",", "func", ",", "file", ")", ":", "code", "=", "\"\"\" void %(name)s(%(typed_args)s) {\n const uint32 data_size = strlen(name);\n gles2::%(name)s* c = GetImmediateCmdSpace<gles2::%(name)s>(data_size);\n if (c) {\n c->Init(%(args)s, data_size);\n }\n }\n\n\"\"\"", "file", ".", "Write", "(", "code", "%", "{", "\"name\"", ":", "func", ".", "name", ",", "\"typed_args\"", ":", "func", ".", "MakeTypedOriginalArgString", "(", "\"\"", ")", ",", "\"args\"", ":", "func", ".", "MakeOriginalArgString", "(", "\"\"", ")", ",", "}", ")" ]
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/gpu/command_buffer/build_gles2_cmd_buffer.py#L4179-L4194
thalium/icebox
99d147d5b9269222225443ce171b4fd46d8985d4
third_party/benchmark/tools/gbench/util.py
python
is_json_file
(filename)
return False
Returns 'True' if 'filename' names a valid JSON output file. 'False' otherwise.
Returns 'True' if 'filename' names a valid JSON output file. 'False' otherwise.
[ "Returns", "True", "if", "filename", "names", "a", "valid", "JSON", "output", "file", ".", "False", "otherwise", "." ]
def is_json_file(filename): """ Returns 'True' if 'filename' names a valid JSON output file. 'False' otherwise. """ try: with open(filename, 'r') as f: json.load(f) return True except BaseException: pass return False
[ "def", "is_json_file", "(", "filename", ")", ":", "try", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "json", ".", "load", "(", "f", ")", "return", "True", "except", "BaseException", ":", "pass", "return", "False" ]
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/benchmark/tools/gbench/util.py#L42-L53
rizonesoft/Notepad3
33cbe20f7ce563541d2a6ceaf22cabeffc826542
scintilla/scripts/Dependencies.py
python
UpdateDependencies
(filepath, dependencies, comment="")
Write a dependencies file if different from dependencies.
Write a dependencies file if different from dependencies.
[ "Write", "a", "dependencies", "file", "if", "different", "from", "dependencies", "." ]
def UpdateDependencies(filepath, dependencies, comment=""): """ Write a dependencies file if different from dependencies. """ FileGenerator.UpdateFile(os.path.abspath(filepath), comment.rstrip() + os.linesep + TextFromDependencies(dependencies))
[ "def", "UpdateDependencies", "(", "filepath", ",", "dependencies", ",", "comment", "=", "\"\"", ")", ":", "FileGenerator", ".", "UpdateFile", "(", "os", ".", "path", ".", "abspath", "(", "filepath", ")", ",", "comment", ".", "rstrip", "(", ")", "+", "os", ".", "linesep", "+", "TextFromDependencies", "(", "dependencies", ")", ")" ]
https://github.com/rizonesoft/Notepad3/blob/33cbe20f7ce563541d2a6ceaf22cabeffc826542/scintilla/scripts/Dependencies.py#L139-L142
hpi-xnor/BMXNet-v2
af2b1859eafc5c721b1397cef02f946aaf2ce20d
example/image-classification/common/util.py
python
get_gpus
()
return range(mx.util.get_gpu_count())
return a list of GPUs
return a list of GPUs
[ "return", "a", "list", "of", "GPUs" ]
def get_gpus(): """ return a list of GPUs """ return range(mx.util.get_gpu_count())
[ "def", "get_gpus", "(", ")", ":", "return", "range", "(", "mx", ".", "util", ".", "get_gpu_count", "(", ")", ")" ]
https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/example/image-classification/common/util.py#L50-L54
oracle/graaljs
36a56e8e993d45fc40939a3a4d9c0c24990720f1
graal-nodejs/configure.py
python
host_arch_win
()
return matchup.get(arch, 'ia32')
Host architecture check using environ vars (better way to do this?)
Host architecture check using environ vars (better way to do this?)
[ "Host", "architecture", "check", "using", "environ", "vars", "(", "better", "way", "to", "do", "this?", ")" ]
def host_arch_win(): """Host architecture check using environ vars (better way to do this?)""" observed_arch = os.environ.get('PROCESSOR_ARCHITECTURE', 'x86') arch = os.environ.get('PROCESSOR_ARCHITEW6432', observed_arch) matchup = { 'AMD64' : 'x64', 'x86' : 'ia32', 'arm' : 'arm', 'mips' : 'mips', } return matchup.get(arch, 'ia32')
[ "def", "host_arch_win", "(", ")", ":", "observed_arch", "=", "os", ".", "environ", ".", "get", "(", "'PROCESSOR_ARCHITECTURE'", ",", "'x86'", ")", "arch", "=", "os", ".", "environ", ".", "get", "(", "'PROCESSOR_ARCHITEW6432'", ",", "observed_arch", ")", "matchup", "=", "{", "'AMD64'", ":", "'x64'", ",", "'x86'", ":", "'ia32'", ",", "'arm'", ":", "'arm'", ",", "'mips'", ":", "'mips'", ",", "}", "return", "matchup", ".", "get", "(", "arch", ",", "'ia32'", ")" ]
https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/configure.py#L1113-L1126
openvinotoolkit/openvino
dedcbeafa8b84cccdc55ca64b8da516682b381c7
cmake/developer_package/cpplint/cpplint.py
python
_ExpandDirectories
(filenames)
return filtered
Searches a list of filenames and replaces directories in the list with all files descending from those directories. Files with extensions not in the valid extensions list are excluded. Args: filenames: A list of files or directories Returns: A list of all files that are members of filenames or descended from a directory in filenames
Searches a list of filenames and replaces directories in the list with all files descending from those directories. Files with extensions not in the valid extensions list are excluded.
[ "Searches", "a", "list", "of", "filenames", "and", "replaces", "directories", "in", "the", "list", "with", "all", "files", "descending", "from", "those", "directories", ".", "Files", "with", "extensions", "not", "in", "the", "valid", "extensions", "list", "are", "excluded", "." ]
def _ExpandDirectories(filenames): """Searches a list of filenames and replaces directories in the list with all files descending from those directories. Files with extensions not in the valid extensions list are excluded. Args: filenames: A list of files or directories Returns: A list of all files that are members of filenames or descended from a directory in filenames """ expanded = set() for filename in filenames: if not os.path.isdir(filename): expanded.add(filename) continue for root, _, files in os.walk(filename): for loopfile in files: fullname = os.path.join(root, loopfile) if fullname.startswith('.' + os.path.sep): fullname = fullname[len('.' + os.path.sep):] expanded.add(fullname) filtered = [] for filename in expanded: if os.path.splitext(filename)[1][1:] in GetAllExtensions(): filtered.append(filename) return filtered
[ "def", "_ExpandDirectories", "(", "filenames", ")", ":", "expanded", "=", "set", "(", ")", "for", "filename", "in", "filenames", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "filename", ")", ":", "expanded", ".", "add", "(", "filename", ")", "continue", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "filename", ")", ":", "for", "loopfile", "in", "files", ":", "fullname", "=", "os", ".", "path", ".", "join", "(", "root", ",", "loopfile", ")", "if", "fullname", ".", "startswith", "(", "'.'", "+", "os", ".", "path", ".", "sep", ")", ":", "fullname", "=", "fullname", "[", "len", "(", "'.'", "+", "os", ".", "path", ".", "sep", ")", ":", "]", "expanded", ".", "add", "(", "fullname", ")", "filtered", "=", "[", "]", "for", "filename", "in", "expanded", ":", "if", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "1", "]", "[", "1", ":", "]", "in", "GetAllExtensions", "(", ")", ":", "filtered", ".", "append", "(", "filename", ")", "return", "filtered" ]
https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/cmake/developer_package/cpplint/cpplint.py#L6540-L6569
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/BaseHTTPServer.py
python
BaseHTTPRequestHandler.log_date_time_string
(self)
return s
Return the current time formatted for logging.
Return the current time formatted for logging.
[ "Return", "the", "current", "time", "formatted", "for", "logging", "." ]
def log_date_time_string(self): """Return the current time formatted for logging.""" now = time.time() year, month, day, hh, mm, ss, x, y, z = time.localtime(now) s = "%02d/%3s/%04d %02d:%02d:%02d" % ( day, self.monthname[month], year, hh, mm, ss) return s
[ "def", "log_date_time_string", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "year", ",", "month", ",", "day", ",", "hh", ",", "mm", ",", "ss", ",", "x", ",", "y", ",", "z", "=", "time", ".", "localtime", "(", "now", ")", "s", "=", "\"%02d/%3s/%04d %02d:%02d:%02d\"", "%", "(", "day", ",", "self", ".", "monthname", "[", "month", "]", ",", "year", ",", "hh", ",", "mm", ",", "ss", ")", "return", "s" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/BaseHTTPServer.py#L475-L481
kushview/Element
1cc16380caa2ab79461246ba758b9de1f46db2a5
waflib/Tools/c_config.py
python
get_cc_version
(conf, cc, gcc=False, icc=False, clang=False)
return k
Runs the preprocessor to determine the gcc/icc/clang version The variables CC_VERSION, DEST_OS, DEST_BINFMT and DEST_CPU will be set in *conf.env* :raise: :py:class:`waflib.Errors.ConfigurationError`
Runs the preprocessor to determine the gcc/icc/clang version
[ "Runs", "the", "preprocessor", "to", "determine", "the", "gcc", "/", "icc", "/", "clang", "version" ]
def get_cc_version(conf, cc, gcc=False, icc=False, clang=False): """ Runs the preprocessor to determine the gcc/icc/clang version The variables CC_VERSION, DEST_OS, DEST_BINFMT and DEST_CPU will be set in *conf.env* :raise: :py:class:`waflib.Errors.ConfigurationError` """ cmd = cc + ['-dM', '-E', '-'] env = conf.env.env or None try: out, err = conf.cmd_and_log(cmd, output=0, input='\n'.encode(), env=env) except Errors.WafError: conf.fatal('Could not determine the compiler version %r' % cmd) if gcc: if out.find('__INTEL_COMPILER') >= 0: conf.fatal('The intel compiler pretends to be gcc') if out.find('__GNUC__') < 0 and out.find('__clang__') < 0: conf.fatal('Could not determine the compiler type') if icc and out.find('__INTEL_COMPILER') < 0: conf.fatal('Not icc/icpc') if clang and out.find('__clang__') < 0: conf.fatal('Not clang/clang++') if not clang and out.find('__clang__') >= 0: conf.fatal('Could not find gcc/g++ (only Clang), if renamed try eg: CC=gcc48 CXX=g++48 waf configure') k = {} if icc or gcc or clang: out = out.splitlines() for line in out: lst = shlex.split(line) if len(lst)>2: key = lst[1] val = lst[2] k[key] = val def isD(var): return var in k # Some documentation is available at http://predef.sourceforge.net # The names given to DEST_OS must match what Utils.unversioned_sys_platform() returns. if not conf.env.DEST_OS: conf.env.DEST_OS = '' for i in MACRO_TO_DESTOS: if isD(i): conf.env.DEST_OS = MACRO_TO_DESTOS[i] break else: if isD('__APPLE__') and isD('__MACH__'): conf.env.DEST_OS = 'darwin' elif isD('__unix__'): # unix must be tested last as it's a generic fallback conf.env.DEST_OS = 'generic' if isD('__ELF__'): conf.env.DEST_BINFMT = 'elf' elif isD('__WINNT__') or isD('__CYGWIN__') or isD('_WIN32'): conf.env.DEST_BINFMT = 'pe' if not conf.env.IMPLIBDIR: conf.env.IMPLIBDIR = conf.env.LIBDIR # for .lib or .dll.a files conf.env.LIBDIR = conf.env.BINDIR elif isD('__APPLE__'): conf.env.DEST_BINFMT = 'mac-o' if not conf.env.DEST_BINFMT: # Infer the binary format from the os name. conf.env.DEST_BINFMT = Utils.destos_to_binfmt(conf.env.DEST_OS) for i in MACRO_TO_DEST_CPU: if isD(i): conf.env.DEST_CPU = MACRO_TO_DEST_CPU[i] break Logs.debug('ccroot: dest platform: ' + ' '.join([conf.env[x] or '?' for x in ('DEST_OS', 'DEST_BINFMT', 'DEST_CPU')])) if icc: ver = k['__INTEL_COMPILER'] conf.env.CC_VERSION = (ver[:-2], ver[-2], ver[-1]) else: if isD('__clang__') and isD('__clang_major__'): conf.env.CC_VERSION = (k['__clang_major__'], k['__clang_minor__'], k['__clang_patchlevel__']) else: # older clang versions and gcc conf.env.CC_VERSION = (k['__GNUC__'], k['__GNUC_MINOR__'], k.get('__GNUC_PATCHLEVEL__', '0')) return k
[ "def", "get_cc_version", "(", "conf", ",", "cc", ",", "gcc", "=", "False", ",", "icc", "=", "False", ",", "clang", "=", "False", ")", ":", "cmd", "=", "cc", "+", "[", "'-dM'", ",", "'-E'", ",", "'-'", "]", "env", "=", "conf", ".", "env", ".", "env", "or", "None", "try", ":", "out", ",", "err", "=", "conf", ".", "cmd_and_log", "(", "cmd", ",", "output", "=", "0", ",", "input", "=", "'\\n'", ".", "encode", "(", ")", ",", "env", "=", "env", ")", "except", "Errors", ".", "WafError", ":", "conf", ".", "fatal", "(", "'Could not determine the compiler version %r'", "%", "cmd", ")", "if", "gcc", ":", "if", "out", ".", "find", "(", "'__INTEL_COMPILER'", ")", ">=", "0", ":", "conf", ".", "fatal", "(", "'The intel compiler pretends to be gcc'", ")", "if", "out", ".", "find", "(", "'__GNUC__'", ")", "<", "0", "and", "out", ".", "find", "(", "'__clang__'", ")", "<", "0", ":", "conf", ".", "fatal", "(", "'Could not determine the compiler type'", ")", "if", "icc", "and", "out", ".", "find", "(", "'__INTEL_COMPILER'", ")", "<", "0", ":", "conf", ".", "fatal", "(", "'Not icc/icpc'", ")", "if", "clang", "and", "out", ".", "find", "(", "'__clang__'", ")", "<", "0", ":", "conf", ".", "fatal", "(", "'Not clang/clang++'", ")", "if", "not", "clang", "and", "out", ".", "find", "(", "'__clang__'", ")", ">=", "0", ":", "conf", ".", "fatal", "(", "'Could not find gcc/g++ (only Clang), if renamed try eg: CC=gcc48 CXX=g++48 waf configure'", ")", "k", "=", "{", "}", "if", "icc", "or", "gcc", "or", "clang", ":", "out", "=", "out", ".", "splitlines", "(", ")", "for", "line", "in", "out", ":", "lst", "=", "shlex", ".", "split", "(", "line", ")", "if", "len", "(", "lst", ")", ">", "2", ":", "key", "=", "lst", "[", "1", "]", "val", "=", "lst", "[", "2", "]", "k", "[", "key", "]", "=", "val", "def", "isD", "(", "var", ")", ":", "return", "var", "in", "k", "# Some documentation is available at http://predef.sourceforge.net", "# The names given to DEST_OS must match what Utils.unversioned_sys_platform() returns.", "if", "not", "conf", ".", "env", ".", "DEST_OS", ":", "conf", ".", "env", ".", "DEST_OS", "=", "''", "for", "i", "in", "MACRO_TO_DESTOS", ":", "if", "isD", "(", "i", ")", ":", "conf", ".", "env", ".", "DEST_OS", "=", "MACRO_TO_DESTOS", "[", "i", "]", "break", "else", ":", "if", "isD", "(", "'__APPLE__'", ")", "and", "isD", "(", "'__MACH__'", ")", ":", "conf", ".", "env", ".", "DEST_OS", "=", "'darwin'", "elif", "isD", "(", "'__unix__'", ")", ":", "# unix must be tested last as it's a generic fallback", "conf", ".", "env", ".", "DEST_OS", "=", "'generic'", "if", "isD", "(", "'__ELF__'", ")", ":", "conf", ".", "env", ".", "DEST_BINFMT", "=", "'elf'", "elif", "isD", "(", "'__WINNT__'", ")", "or", "isD", "(", "'__CYGWIN__'", ")", "or", "isD", "(", "'_WIN32'", ")", ":", "conf", ".", "env", ".", "DEST_BINFMT", "=", "'pe'", "if", "not", "conf", ".", "env", ".", "IMPLIBDIR", ":", "conf", ".", "env", ".", "IMPLIBDIR", "=", "conf", ".", "env", ".", "LIBDIR", "# for .lib or .dll.a files", "conf", ".", "env", ".", "LIBDIR", "=", "conf", ".", "env", ".", "BINDIR", "elif", "isD", "(", "'__APPLE__'", ")", ":", "conf", ".", "env", ".", "DEST_BINFMT", "=", "'mac-o'", "if", "not", "conf", ".", "env", ".", "DEST_BINFMT", ":", "# Infer the binary format from the os name.", "conf", ".", "env", ".", "DEST_BINFMT", "=", "Utils", ".", "destos_to_binfmt", "(", "conf", ".", "env", ".", "DEST_OS", ")", "for", "i", "in", "MACRO_TO_DEST_CPU", ":", "if", "isD", "(", "i", ")", ":", "conf", ".", "env", ".", "DEST_CPU", "=", "MACRO_TO_DEST_CPU", "[", "i", "]", "break", "Logs", ".", "debug", "(", "'ccroot: dest platform: '", "+", "' '", ".", "join", "(", "[", "conf", ".", "env", "[", "x", "]", "or", "'?'", "for", "x", "in", "(", "'DEST_OS'", ",", "'DEST_BINFMT'", ",", "'DEST_CPU'", ")", "]", ")", ")", "if", "icc", ":", "ver", "=", "k", "[", "'__INTEL_COMPILER'", "]", "conf", ".", "env", ".", "CC_VERSION", "=", "(", "ver", "[", ":", "-", "2", "]", ",", "ver", "[", "-", "2", "]", ",", "ver", "[", "-", "1", "]", ")", "else", ":", "if", "isD", "(", "'__clang__'", ")", "and", "isD", "(", "'__clang_major__'", ")", ":", "conf", ".", "env", ".", "CC_VERSION", "=", "(", "k", "[", "'__clang_major__'", "]", ",", "k", "[", "'__clang_minor__'", "]", ",", "k", "[", "'__clang_patchlevel__'", "]", ")", "else", ":", "# older clang versions and gcc", "conf", ".", "env", ".", "CC_VERSION", "=", "(", "k", "[", "'__GNUC__'", "]", ",", "k", "[", "'__GNUC_MINOR__'", "]", ",", "k", ".", "get", "(", "'__GNUC_PATCHLEVEL__'", ",", "'0'", ")", ")", "return", "k" ]
https://github.com/kushview/Element/blob/1cc16380caa2ab79461246ba758b9de1f46db2a5/waflib/Tools/c_config.py#L1019-L1104
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/distributed/fleet/meta_parallel/sharding/sharding_stage2.py
python
ShardingStage2._fresh_trainable
(self)
Whether to update training parameters.
Whether to update training parameters.
[ "Whether", "to", "update", "training", "parameters", "." ]
def _fresh_trainable(self): """ Whether to update training parameters. """ # Make sure that this is not done while gradients are waiting to be reduced (if no_sync context for instance) if reduce(lambda x, y: x or y, self._grad_reduced, False): logging.warning("Grads waiting to be reduced.") self._trainable_params = list( filter(lambda x: x.trainable, self._all_params)) self._trainable_params.sort(key=lambda x: np.prod(x.shape)) self._trainable_param2rank = {} for optim in self._sharding_optimizers: # Need to be wrappered for Sharding Stage2 Optimizer if len(optim.param_storages.keys()) == 0: optim.update_opt_status() # Get the parameters split by the optimizer according to rank for per_rank_params in optim.dtype_rank_params.values( ): # all the params from all ranks for params in per_rank_params: for param in filter(lambda x: x.trainable, params): self._trainable_param2rank[ param.name] = optim.param2rank[param.name] self._trainable_param2align[ param.name] = optim._param2align[param.name] self._setup_use_grad_storage() # wait next func hook support self._setup_backward_hooks()
[ "def", "_fresh_trainable", "(", "self", ")", ":", "# Make sure that this is not done while gradients are waiting to be reduced (if no_sync context for instance)", "if", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "or", "y", ",", "self", ".", "_grad_reduced", ",", "False", ")", ":", "logging", ".", "warning", "(", "\"Grads waiting to be reduced.\"", ")", "self", ".", "_trainable_params", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", ".", "trainable", ",", "self", ".", "_all_params", ")", ")", "self", ".", "_trainable_params", ".", "sort", "(", "key", "=", "lambda", "x", ":", "np", ".", "prod", "(", "x", ".", "shape", ")", ")", "self", ".", "_trainable_param2rank", "=", "{", "}", "for", "optim", "in", "self", ".", "_sharding_optimizers", ":", "# Need to be wrappered for Sharding Stage2 Optimizer", "if", "len", "(", "optim", ".", "param_storages", ".", "keys", "(", ")", ")", "==", "0", ":", "optim", ".", "update_opt_status", "(", ")", "# Get the parameters split by the optimizer according to rank", "for", "per_rank_params", "in", "optim", ".", "dtype_rank_params", ".", "values", "(", ")", ":", "# all the params from all ranks", "for", "params", "in", "per_rank_params", ":", "for", "param", "in", "filter", "(", "lambda", "x", ":", "x", ".", "trainable", ",", "params", ")", ":", "self", ".", "_trainable_param2rank", "[", "param", ".", "name", "]", "=", "optim", ".", "param2rank", "[", "param", ".", "name", "]", "self", ".", "_trainable_param2align", "[", "param", ".", "name", "]", "=", "optim", ".", "_param2align", "[", "param", ".", "name", "]", "self", ".", "_setup_use_grad_storage", "(", ")", "# wait next func hook support", "self", ".", "_setup_backward_hooks", "(", ")" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_stage2.py#L235-L265
cvxpy/cvxpy
5165b4fb750dfd237de8659383ef24b4b2e33aaf
cvxpy/atoms/norm.py
python
norm2
(x, axis=None)
return norm(x, p=2, axis=axis)
The 2-norm of x. Parameters ---------- x : Expression or numeric constant The value to take the norm of. If `x` is 2D and `axis` is None, this function constructs a matrix norm. Returns ------- Expression An Expression representing the norm.
The 2-norm of x.
[ "The", "2", "-", "norm", "of", "x", "." ]
def norm2(x, axis=None): """The 2-norm of x. Parameters ---------- x : Expression or numeric constant The value to take the norm of. If `x` is 2D and `axis` is None, this function constructs a matrix norm. Returns ------- Expression An Expression representing the norm. """ return norm(x, p=2, axis=axis)
[ "def", "norm2", "(", "x", ",", "axis", "=", "None", ")", ":", "return", "norm", "(", "x", ",", "p", "=", "2", ",", "axis", "=", "axis", ")" ]
https://github.com/cvxpy/cvxpy/blob/5165b4fb750dfd237de8659383ef24b4b2e33aaf/cvxpy/atoms/norm.py#L76-L90
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
python
_prepare_sequence_inputs
(inputs, states)
return (length, key, sorted_states, sorted_sequences, sorted_context)
Convert input to tensors and validate shape information. Args: inputs: A `_SequenceInputWrapper` instance. states: A dictionary mapping state names to input constants or tensors. Returns: The tuple (length, key, sorted_states, sorted_sequences, sorted_context), where each value has been checked for valid shape, and the sorted_* dicts are instances of OrderedDict; with key-value pairs sorted by key. Raises: ValueError: if the shapes of inputs.context.values(), states.values(), or inputs.sequences.values() are not fully defined (with the exception of the dimension of any `Tensor` in inputs.sequences.values()). TypeError: if the dtype of length is not int32.
Convert input to tensors and validate shape information.
[ "Convert", "input", "to", "tensors", "and", "validate", "shape", "information", "." ]
def _prepare_sequence_inputs(inputs, states): """Convert input to tensors and validate shape information. Args: inputs: A `_SequenceInputWrapper` instance. states: A dictionary mapping state names to input constants or tensors. Returns: The tuple (length, key, sorted_states, sorted_sequences, sorted_context), where each value has been checked for valid shape, and the sorted_* dicts are instances of OrderedDict; with key-value pairs sorted by key. Raises: ValueError: if the shapes of inputs.context.values(), states.values(), or inputs.sequences.values() are not fully defined (with the exception of the dimension of any `Tensor` in inputs.sequences.values()). TypeError: if the dtype of length is not int32. """ # Convert state initial values to tensors states = dict((k, ops.convert_to_tensor( v, name="state_%s" % k)) for k, v in states.items()) def _assert_fully_defined(label, dict_, ignore_first_dimension=False): start_dimension = 1 if ignore_first_dimension else 0 for k, v in dict_.items(): if not v.get_shape()[start_dimension:].is_fully_defined(): raise ValueError("Shape for %s %s is not fully defined %s: %s" % (label, k, "(ignoring first dimension)" if ignore_first_dimension else "", v.get_shape())) _assert_fully_defined("state", states) _assert_fully_defined("context", inputs.context) # Sequences' first dimension (time) may be variable _assert_fully_defined( "sequence", inputs.sequences, ignore_first_dimension=True) # Get dictionaries' dtypes ordered by name - ordering is important # when switching between dicts and tuples for passing to Barrier. def _sort_by_name(d): return collections.OrderedDict(sorted(d.items(), key=lambda k_v: k_v[0])) sorted_sequences = _sort_by_name(inputs.sequences) sorted_context = _sort_by_name(inputs.context) sorted_states = _sort_by_name(states) length = _check_rank(inputs.length, 0) key = _check_rank(inputs.key, 0) if length.dtype != dtypes.int32: raise TypeError("length dtype must be int32, but received: %s" % length.dtype) if key.dtype != dtypes.string: raise TypeError("key dtype must be string, but received: %s" % key.dtype) return (length, key, sorted_states, sorted_sequences, sorted_context)
[ "def", "_prepare_sequence_inputs", "(", "inputs", ",", "states", ")", ":", "# Convert state initial values to tensors", "states", "=", "dict", "(", "(", "k", ",", "ops", ".", "convert_to_tensor", "(", "v", ",", "name", "=", "\"state_%s\"", "%", "k", ")", ")", "for", "k", ",", "v", "in", "states", ".", "items", "(", ")", ")", "def", "_assert_fully_defined", "(", "label", ",", "dict_", ",", "ignore_first_dimension", "=", "False", ")", ":", "start_dimension", "=", "1", "if", "ignore_first_dimension", "else", "0", "for", "k", ",", "v", "in", "dict_", ".", "items", "(", ")", ":", "if", "not", "v", ".", "get_shape", "(", ")", "[", "start_dimension", ":", "]", ".", "is_fully_defined", "(", ")", ":", "raise", "ValueError", "(", "\"Shape for %s %s is not fully defined %s: %s\"", "%", "(", "label", ",", "k", ",", "\"(ignoring first dimension)\"", "if", "ignore_first_dimension", "else", "\"\"", ",", "v", ".", "get_shape", "(", ")", ")", ")", "_assert_fully_defined", "(", "\"state\"", ",", "states", ")", "_assert_fully_defined", "(", "\"context\"", ",", "inputs", ".", "context", ")", "# Sequences' first dimension (time) may be variable", "_assert_fully_defined", "(", "\"sequence\"", ",", "inputs", ".", "sequences", ",", "ignore_first_dimension", "=", "True", ")", "# Get dictionaries' dtypes ordered by name - ordering is important", "# when switching between dicts and tuples for passing to Barrier.", "def", "_sort_by_name", "(", "d", ")", ":", "return", "collections", ".", "OrderedDict", "(", "sorted", "(", "d", ".", "items", "(", ")", ",", "key", "=", "lambda", "k_v", ":", "k_v", "[", "0", "]", ")", ")", "sorted_sequences", "=", "_sort_by_name", "(", "inputs", ".", "sequences", ")", "sorted_context", "=", "_sort_by_name", "(", "inputs", ".", "context", ")", "sorted_states", "=", "_sort_by_name", "(", "states", ")", "length", "=", "_check_rank", "(", "inputs", ".", "length", ",", "0", ")", "key", "=", "_check_rank", "(", "inputs", ".", "key", ",", "0", ")", "if", "length", ".", "dtype", "!=", "dtypes", ".", "int32", ":", "raise", "TypeError", "(", "\"length dtype must be int32, but received: %s\"", "%", "length", ".", "dtype", ")", "if", "key", ".", "dtype", "!=", "dtypes", ".", "string", ":", "raise", "TypeError", "(", "\"key dtype must be string, but received: %s\"", "%", "key", ".", "dtype", ")", "return", "(", "length", ",", "key", ",", "sorted_states", ",", "sorted_sequences", ",", "sorted_context", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py#L298-L352
macchina-io/macchina.io
ef24ba0e18379c3dd48fb84e6dbf991101cb8db0
platform/JS/V8/tools/gyp/pylib/gyp/win_tool.py
python
WinTool.ExecStamp
(self, path)
Simple stamp command.
Simple stamp command.
[ "Simple", "stamp", "command", "." ]
def ExecStamp(self, path): """Simple stamp command.""" open(path, 'w').close()
[ "def", "ExecStamp", "(", "self", ",", "path", ")", ":", "open", "(", "path", ",", "'w'", ")", ".", "close", "(", ")" ]
https://github.com/macchina-io/macchina.io/blob/ef24ba0e18379c3dd48fb84e6dbf991101cb8db0/platform/JS/V8/tools/gyp/pylib/gyp/win_tool.py#L85-L87
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/protobuf/py3/google/protobuf/json_format.py
python
_Printer._AnyMessageToJsonObject
(self, message)
return self._RegularMessageToJsonObject(sub_message, js)
Converts Any message according to Proto3 JSON Specification.
Converts Any message according to Proto3 JSON Specification.
[ "Converts", "Any", "message", "according", "to", "Proto3", "JSON", "Specification", "." ]
def _AnyMessageToJsonObject(self, message): """Converts Any message according to Proto3 JSON Specification.""" if not message.ListFields(): return {} # Must print @type first, use OrderedDict instead of {} js = OrderedDict() type_url = message.type_url js['@type'] = type_url sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool) sub_message.ParseFromString(message.value) message_descriptor = sub_message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): js['value'] = self._WrapperMessageToJsonObject(sub_message) return js if full_name in _WKTJSONMETHODS: js['value'] = methodcaller(_WKTJSONMETHODS[full_name][0], sub_message)(self) return js return self._RegularMessageToJsonObject(sub_message, js)
[ "def", "_AnyMessageToJsonObject", "(", "self", ",", "message", ")", ":", "if", "not", "message", ".", "ListFields", "(", ")", ":", "return", "{", "}", "# Must print @type first, use OrderedDict instead of {}", "js", "=", "OrderedDict", "(", ")", "type_url", "=", "message", ".", "type_url", "js", "[", "'@type'", "]", "=", "type_url", "sub_message", "=", "_CreateMessageFromTypeUrl", "(", "type_url", ",", "self", ".", "descriptor_pool", ")", "sub_message", ".", "ParseFromString", "(", "message", ".", "value", ")", "message_descriptor", "=", "sub_message", ".", "DESCRIPTOR", "full_name", "=", "message_descriptor", ".", "full_name", "if", "_IsWrapperMessage", "(", "message_descriptor", ")", ":", "js", "[", "'value'", "]", "=", "self", ".", "_WrapperMessageToJsonObject", "(", "sub_message", ")", "return", "js", "if", "full_name", "in", "_WKTJSONMETHODS", ":", "js", "[", "'value'", "]", "=", "methodcaller", "(", "_WKTJSONMETHODS", "[", "full_name", "]", "[", "0", "]", ",", "sub_message", ")", "(", "self", ")", "return", "js", "return", "self", ".", "_RegularMessageToJsonObject", "(", "sub_message", ",", "js", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/protobuf/py3/google/protobuf/json_format.py#L325-L344
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/propgrid.py
python
PropertyGridInterface.Expand
(*args, **kwargs)
return _propgrid.PropertyGridInterface_Expand(*args, **kwargs)
Expand(self, PGPropArg id) -> bool
Expand(self, PGPropArg id) -> bool
[ "Expand", "(", "self", "PGPropArg", "id", ")", "-", ">", "bool" ]
def Expand(*args, **kwargs): """Expand(self, PGPropArg id) -> bool""" return _propgrid.PropertyGridInterface_Expand(*args, **kwargs)
[ "def", "Expand", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_propgrid", ".", "PropertyGridInterface_Expand", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/propgrid.py#L1155-L1157
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/grid.py
python
GridCellAttr.GetFont
(*args, **kwargs)
return _grid.GridCellAttr_GetFont(*args, **kwargs)
GetFont(self) -> Font
GetFont(self) -> Font
[ "GetFont", "(", "self", ")", "-", ">", "Font" ]
def GetFont(*args, **kwargs): """GetFont(self) -> Font""" return _grid.GridCellAttr_GetFont(*args, **kwargs)
[ "def", "GetFont", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_grid", ".", "GridCellAttr_GetFont", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/grid.py#L623-L625
qbittorrent/qBittorrent
78eaa49cd6b3b59c064cd461fe6d30eceaeac770
src/searchengine/nova3/sgmllib3.py
python
SGMLParser.handle_charref
(self, name)
Handle character reference, no need to override.
Handle character reference, no need to override.
[ "Handle", "character", "reference", "no", "need", "to", "override", "." ]
def handle_charref(self, name): """Handle character reference, no need to override.""" replacement = self.convert_charref(name) if replacement is None: self.unknown_charref(name) else: self.handle_data(replacement)
[ "def", "handle_charref", "(", "self", ",", "name", ")", ":", "replacement", "=", "self", ".", "convert_charref", "(", "name", ")", "if", "replacement", "is", "None", ":", "self", ".", "unknown_charref", "(", "name", ")", "else", ":", "self", ".", "handle_data", "(", "replacement", ")" ]
https://github.com/qbittorrent/qBittorrent/blob/78eaa49cd6b3b59c064cd461fe6d30eceaeac770/src/searchengine/nova3/sgmllib3.py#L400-L406
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
python
_Stream._init_write_gz
(self)
Initialize for writing with gzip compression.
Initialize for writing with gzip compression.
[ "Initialize", "for", "writing", "with", "gzip", "compression", "." ]
def _init_write_gz(self): """Initialize for writing with gzip compression. """ self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0) timestamp = struct.pack("<L", int(time.time())) self.__write(b"\037\213\010\010" + timestamp + b"\002\377") if self.name.endswith(".gz"): self.name = self.name[:-3] # RFC1952 says we must use ISO-8859-1 for the FNAME field. self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
[ "def", "_init_write_gz", "(", "self", ")", ":", "self", ".", "cmp", "=", "self", ".", "zlib", ".", "compressobj", "(", "9", ",", "self", ".", "zlib", ".", "DEFLATED", ",", "-", "self", ".", "zlib", ".", "MAX_WBITS", ",", "self", ".", "zlib", ".", "DEF_MEM_LEVEL", ",", "0", ")", "timestamp", "=", "struct", ".", "pack", "(", "\"<L\"", ",", "int", "(", "time", ".", "time", "(", ")", ")", ")", "self", ".", "__write", "(", "b\"\\037\\213\\010\\010\"", "+", "timestamp", "+", "b\"\\002\\377\"", ")", "if", "self", ".", "name", ".", "endswith", "(", "\".gz\"", ")", ":", "self", ".", "name", "=", "self", ".", "name", "[", ":", "-", "3", "]", "# RFC1952 says we must use ISO-8859-1 for the FNAME field.", "self", ".", "__write", "(", "self", ".", "name", ".", "encode", "(", "\"iso-8859-1\"", ",", "\"replace\"", ")", "+", "NUL", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L455-L467
dolphin-emu/dolphin
b4c7f2b1e834ce5ea4b2301f9d4fb07c11afeabb
Externals/fmt/support/docopt.py
python
parse_shorts
(tokens, options)
return parsed
shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;
shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;
[ "shorts", "::", "=", "-", "(", "chars", ")", "*", "[", "[", "]", "chars", "]", ";" ]
def parse_shorts(tokens, options): """shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;""" token = tokens.move() assert token.startswith('-') and not token.startswith('--') left = token.lstrip('-') parsed = [] while left != '': short, left = '-' + left[0], left[1:] similar = [o for o in options if o.short == short] if len(similar) > 1: raise tokens.error('%s is specified ambiguously %d times' % (short, len(similar))) elif len(similar) < 1: o = Option(short, None, 0) options.append(o) if tokens.error is DocoptExit: o = Option(short, None, 0, True) else: # why copying is necessary here? o = Option(short, similar[0].long, similar[0].argcount, similar[0].value) value = None if o.argcount != 0: if left == '': if tokens.current() in [None, '--']: raise tokens.error('%s requires argument' % short) value = tokens.move() else: value = left left = '' if tokens.error is DocoptExit: o.value = value if value is not None else True parsed.append(o) return parsed
[ "def", "parse_shorts", "(", "tokens", ",", "options", ")", ":", "token", "=", "tokens", ".", "move", "(", ")", "assert", "token", ".", "startswith", "(", "'-'", ")", "and", "not", "token", ".", "startswith", "(", "'--'", ")", "left", "=", "token", ".", "lstrip", "(", "'-'", ")", "parsed", "=", "[", "]", "while", "left", "!=", "''", ":", "short", ",", "left", "=", "'-'", "+", "left", "[", "0", "]", ",", "left", "[", "1", ":", "]", "similar", "=", "[", "o", "for", "o", "in", "options", "if", "o", ".", "short", "==", "short", "]", "if", "len", "(", "similar", ")", ">", "1", ":", "raise", "tokens", ".", "error", "(", "'%s is specified ambiguously %d times'", "%", "(", "short", ",", "len", "(", "similar", ")", ")", ")", "elif", "len", "(", "similar", ")", "<", "1", ":", "o", "=", "Option", "(", "short", ",", "None", ",", "0", ")", "options", ".", "append", "(", "o", ")", "if", "tokens", ".", "error", "is", "DocoptExit", ":", "o", "=", "Option", "(", "short", ",", "None", ",", "0", ",", "True", ")", "else", ":", "# why copying is necessary here?", "o", "=", "Option", "(", "short", ",", "similar", "[", "0", "]", ".", "long", ",", "similar", "[", "0", "]", ".", "argcount", ",", "similar", "[", "0", "]", ".", "value", ")", "value", "=", "None", "if", "o", ".", "argcount", "!=", "0", ":", "if", "left", "==", "''", ":", "if", "tokens", ".", "current", "(", ")", "in", "[", "None", ",", "'--'", "]", ":", "raise", "tokens", ".", "error", "(", "'%s requires argument'", "%", "short", ")", "value", "=", "tokens", ".", "move", "(", ")", "else", ":", "value", "=", "left", "left", "=", "''", "if", "tokens", ".", "error", "is", "DocoptExit", ":", "o", ".", "value", "=", "value", "if", "value", "is", "not", "None", "else", "True", "parsed", ".", "append", "(", "o", ")", "return", "parsed" ]
https://github.com/dolphin-emu/dolphin/blob/b4c7f2b1e834ce5ea4b2301f9d4fb07c11afeabb/Externals/fmt/support/docopt.py#L334-L366
neo-ai/neo-ai-dlr
bf397aa0367a5207654c00d2985f900d94ad1543
python/dlr/counter/deviceinfo.py
python
DeviceInfo.get_info
(self)
return dict_device
Prepare a dictionary of data member in sequence. 1. Machine 2. Architecture 3. Operating system 4. Machine name 5. Operating system distribution 6. UUID Parameters ---------- self Returns ------- dictionary: return a dictionary of data members
Prepare a dictionary of data member in sequence. 1. Machine 2. Architecture 3. Operating system 4. Machine name 5. Operating system distribution 6. UUID Parameters ---------- self
[ "Prepare", "a", "dictionary", "of", "data", "member", "in", "sequence", ".", "1", ".", "Machine", "2", ".", "Architecture", "3", ".", "Operating", "system", "4", ".", "Machine", "name", "5", ".", "Operating", "system", "distribution", "6", ".", "UUID", "Parameters", "----------", "self" ]
def get_info(self): """ Prepare a dictionary of data member in sequence. 1. Machine 2. Architecture 3. Operating system 4. Machine name 5. Operating system distribution 6. UUID Parameters ---------- self Returns ------- dictionary: return a dictionary of data members """ dict_device = { "uuid": self.uuid, "machine": self.machine, "arch": self.arch, "os": self.osname, "os distribution": self.dist } return dict_device
[ "def", "get_info", "(", "self", ")", ":", "dict_device", "=", "{", "\"uuid\"", ":", "self", ".", "uuid", ",", "\"machine\"", ":", "self", ".", "machine", ",", "\"arch\"", ":", "self", ".", "arch", ",", "\"os\"", ":", "self", ".", "osname", ",", "\"os distribution\"", ":", "self", ".", "dist", "}", "return", "dict_device" ]
https://github.com/neo-ai/neo-ai-dlr/blob/bf397aa0367a5207654c00d2985f900d94ad1543/python/dlr/counter/deviceinfo.py#L15-L41
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/learn/python/learn/models.py
python
linear_regression
(x, y, init_mean=None, init_stddev=1.0)
Creates linear regression TensorFlow subgraph. Args: x: tensor or placeholder for input features. y: tensor or placeholder for labels. init_mean: the mean value to use for initialization. init_stddev: the standard deviation to use for initialization. Returns: Predictions and loss tensors. Side effects: The variables linear_regression.weights and linear_regression.bias are initialized as follows. If init_mean is not None, then initialization will be done using a random normal initializer with the given init_mean and init_stddv. (These may be set to 0.0 each if a zero initialization is desirable for convex use cases.) If init_mean is None, then the uniform_unit_scaling_initialzer will be used.
Creates linear regression TensorFlow subgraph.
[ "Creates", "linear", "regression", "TensorFlow", "subgraph", "." ]
def linear_regression(x, y, init_mean=None, init_stddev=1.0): """Creates linear regression TensorFlow subgraph. Args: x: tensor or placeholder for input features. y: tensor or placeholder for labels. init_mean: the mean value to use for initialization. init_stddev: the standard deviation to use for initialization. Returns: Predictions and loss tensors. Side effects: The variables linear_regression.weights and linear_regression.bias are initialized as follows. If init_mean is not None, then initialization will be done using a random normal initializer with the given init_mean and init_stddv. (These may be set to 0.0 each if a zero initialization is desirable for convex use cases.) If init_mean is None, then the uniform_unit_scaling_initialzer will be used. """ with vs.variable_scope('linear_regression'): scope_name = vs.get_variable_scope().name summary.histogram('%s.x' % scope_name, x) summary.histogram('%s.y' % scope_name, y) dtype = x.dtype.base_dtype y_shape = y.get_shape() if len(y_shape) == 1: output_shape = 1 else: output_shape = y_shape[1] # Set up the requested initialization. if init_mean is None: weights = vs.get_variable( 'weights', [x.get_shape()[1], output_shape], dtype=dtype) bias = vs.get_variable('bias', [output_shape], dtype=dtype) else: weights = vs.get_variable( 'weights', [x.get_shape()[1], output_shape], initializer=init_ops.random_normal_initializer( init_mean, init_stddev, dtype=dtype), dtype=dtype) bias = vs.get_variable( 'bias', [output_shape], initializer=init_ops.random_normal_initializer( init_mean, init_stddev, dtype=dtype), dtype=dtype) summary.histogram('%s.weights' % scope_name, weights) summary.histogram('%s.bias' % scope_name, bias) return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
[ "def", "linear_regression", "(", "x", ",", "y", ",", "init_mean", "=", "None", ",", "init_stddev", "=", "1.0", ")", ":", "with", "vs", ".", "variable_scope", "(", "'linear_regression'", ")", ":", "scope_name", "=", "vs", ".", "get_variable_scope", "(", ")", ".", "name", "summary", ".", "histogram", "(", "'%s.x'", "%", "scope_name", ",", "x", ")", "summary", ".", "histogram", "(", "'%s.y'", "%", "scope_name", ",", "y", ")", "dtype", "=", "x", ".", "dtype", ".", "base_dtype", "y_shape", "=", "y", ".", "get_shape", "(", ")", "if", "len", "(", "y_shape", ")", "==", "1", ":", "output_shape", "=", "1", "else", ":", "output_shape", "=", "y_shape", "[", "1", "]", "# Set up the requested initialization.", "if", "init_mean", "is", "None", ":", "weights", "=", "vs", ".", "get_variable", "(", "'weights'", ",", "[", "x", ".", "get_shape", "(", ")", "[", "1", "]", ",", "output_shape", "]", ",", "dtype", "=", "dtype", ")", "bias", "=", "vs", ".", "get_variable", "(", "'bias'", ",", "[", "output_shape", "]", ",", "dtype", "=", "dtype", ")", "else", ":", "weights", "=", "vs", ".", "get_variable", "(", "'weights'", ",", "[", "x", ".", "get_shape", "(", ")", "[", "1", "]", ",", "output_shape", "]", ",", "initializer", "=", "init_ops", ".", "random_normal_initializer", "(", "init_mean", ",", "init_stddev", ",", "dtype", "=", "dtype", ")", ",", "dtype", "=", "dtype", ")", "bias", "=", "vs", ".", "get_variable", "(", "'bias'", ",", "[", "output_shape", "]", ",", "initializer", "=", "init_ops", ".", "random_normal_initializer", "(", "init_mean", ",", "init_stddev", ",", "dtype", "=", "dtype", ")", ",", "dtype", "=", "dtype", ")", "summary", ".", "histogram", "(", "'%s.weights'", "%", "scope_name", ",", "weights", ")", "summary", ".", "histogram", "(", "'%s.bias'", "%", "scope_name", ",", "bias", ")", "return", "losses_ops", ".", "mean_squared_error_regressor", "(", "x", ",", "y", ",", "weights", ",", "bias", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/learn/python/learn/models.py#L68-L116
lawy623/SVS
b7c7ae367c82a4797ff4a896a2ff304f02e7f724
caffe/scripts/cpp_lint.py
python
FindStartOfExpressionInLine
(line, endpos, depth, startchar, endchar)
return (-1, depth)
Find position at the matching startchar. This is almost the reverse of FindEndOfExpressionInLine, but note that the input position and returned position differs by 1. Args: line: a CleansedLines line. endpos: start searching at this position. depth: nesting level at endpos. startchar: expression opening character. endchar: expression closing character. Returns: On finding matching startchar: (index at matching startchar, 0) Otherwise: (-1, new depth at beginning of this line)
Find position at the matching startchar.
[ "Find", "position", "at", "the", "matching", "startchar", "." ]
def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar): """Find position at the matching startchar. This is almost the reverse of FindEndOfExpressionInLine, but note that the input position and returned position differs by 1. Args: line: a CleansedLines line. endpos: start searching at this position. depth: nesting level at endpos. startchar: expression opening character. endchar: expression closing character. Returns: On finding matching startchar: (index at matching startchar, 0) Otherwise: (-1, new depth at beginning of this line) """ for i in xrange(endpos, -1, -1): if line[i] == endchar: depth += 1 elif line[i] == startchar: depth -= 1 if depth == 0: return (i, 0) return (-1, depth)
[ "def", "FindStartOfExpressionInLine", "(", "line", ",", "endpos", ",", "depth", ",", "startchar", ",", "endchar", ")", ":", "for", "i", "in", "xrange", "(", "endpos", ",", "-", "1", ",", "-", "1", ")", ":", "if", "line", "[", "i", "]", "==", "endchar", ":", "depth", "+=", "1", "elif", "line", "[", "i", "]", "==", "startchar", ":", "depth", "-=", "1", "if", "depth", "==", "0", ":", "return", "(", "i", ",", "0", ")", "return", "(", "-", "1", ",", "depth", ")" ]
https://github.com/lawy623/SVS/blob/b7c7ae367c82a4797ff4a896a2ff304f02e7f724/caffe/scripts/cpp_lint.py#L1300-L1324
thalium/icebox
99d147d5b9269222225443ce171b4fd46d8985d4
third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py
python
xmlNs.newNsProp
(self, node, name, value)
return __tmp
Create a new property tagged with a namespace and carried by a node.
Create a new property tagged with a namespace and carried by a node.
[ "Create", "a", "new", "property", "tagged", "with", "a", "namespace", "and", "carried", "by", "a", "node", "." ]
def newNsProp(self, node, name, value): """Create a new property tagged with a namespace and carried by a node. """ if node is None: node__o = None else: node__o = node._o ret = libxml2mod.xmlNewNsProp(node__o, self._o, name, value) if ret is None:raise treeError('xmlNewNsProp() failed') __tmp = xmlAttr(_obj=ret) return __tmp
[ "def", "newNsProp", "(", "self", ",", "node", ",", "name", ",", "value", ")", ":", "if", "node", "is", "None", ":", "node__o", "=", "None", "else", ":", "node__o", "=", "node", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlNewNsProp", "(", "node__o", ",", "self", ".", "_o", ",", "name", ",", "value", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlNewNsProp() failed'", ")", "__tmp", "=", "xmlAttr", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py#L5957-L5965
apiaryio/snowcrash
b5b39faa85f88ee17459edf39fdc6fe4fc70d2e3
tools/gyp/pylib/gyp/generator/msvs.py
python
_GetIncludeDirs
(config)
return include_dirs, midl_include_dirs, resource_include_dirs
Returns the list of directories to be used for #include directives. Arguments: config: The dictionary that defines the special processing to be done for this configuration. Returns: The list of directory paths.
Returns the list of directories to be used for #include directives.
[ "Returns", "the", "list", "of", "directories", "to", "be", "used", "for", "#include", "directives", "." ]
def _GetIncludeDirs(config): """Returns the list of directories to be used for #include directives. Arguments: config: The dictionary that defines the special processing to be done for this configuration. Returns: The list of directory paths. """ # TODO(bradnelson): include_dirs should really be flexible enough not to # require this sort of thing. include_dirs = ( config.get('include_dirs', []) + config.get('msvs_system_include_dirs', [])) midl_include_dirs = ( config.get('midl_include_dirs', []) + config.get('msvs_system_include_dirs', [])) resource_include_dirs = config.get('resource_include_dirs', include_dirs) include_dirs = _FixPaths(include_dirs) midl_include_dirs = _FixPaths(midl_include_dirs) resource_include_dirs = _FixPaths(resource_include_dirs) return include_dirs, midl_include_dirs, resource_include_dirs
[ "def", "_GetIncludeDirs", "(", "config", ")", ":", "# TODO(bradnelson): include_dirs should really be flexible enough not to", "# require this sort of thing.", "include_dirs", "=", "(", "config", ".", "get", "(", "'include_dirs'", ",", "[", "]", ")", "+", "config", ".", "get", "(", "'msvs_system_include_dirs'", ",", "[", "]", ")", ")", "midl_include_dirs", "=", "(", "config", ".", "get", "(", "'midl_include_dirs'", ",", "[", "]", ")", "+", "config", ".", "get", "(", "'msvs_system_include_dirs'", ",", "[", "]", ")", ")", "resource_include_dirs", "=", "config", ".", "get", "(", "'resource_include_dirs'", ",", "include_dirs", ")", "include_dirs", "=", "_FixPaths", "(", "include_dirs", ")", "midl_include_dirs", "=", "_FixPaths", "(", "midl_include_dirs", ")", "resource_include_dirs", "=", "_FixPaths", "(", "resource_include_dirs", ")", "return", "include_dirs", ",", "midl_include_dirs", ",", "resource_include_dirs" ]
https://github.com/apiaryio/snowcrash/blob/b5b39faa85f88ee17459edf39fdc6fe4fc70d2e3/tools/gyp/pylib/gyp/generator/msvs.py#L1188-L1209
openthread/openthread
9fcdbed9c526c70f1556d1ed84099c1535c7cd32
tools/otci/otci/otci.py
python
OTCI.srp_server_disable
(self)
Disable SRP server.
Disable SRP server.
[ "Disable", "SRP", "server", "." ]
def srp_server_disable(self): """Disable SRP server.""" self.execute_command('srp server disable')
[ "def", "srp_server_disable", "(", "self", ")", ":", "self", ".", "execute_command", "(", "'srp server disable'", ")" ]
https://github.com/openthread/openthread/blob/9fcdbed9c526c70f1556d1ed84099c1535c7cd32/tools/otci/otci/otci.py#L909-L911
trailofbits/sienna-locomotive
09bc1a0bea7d7a33089422c62e0d3c715ecb7ce0
breakpad/src/tools/python/filter_syms.py
python
SymbolFileParser._ParseRecord
(self, record)
Parses a single Breakpad symbol record - a single line from the symbol file. Returns: The modified string to write to the output file, or None if no line should be written.
Parses a single Breakpad symbol record - a single line from the symbol file.
[ "Parses", "a", "single", "Breakpad", "symbol", "record", "-", "a", "single", "line", "from", "the", "symbol", "file", "." ]
def _ParseRecord(self, record): """Parses a single Breakpad symbol record - a single line from the symbol file. Returns: The modified string to write to the output file, or None if no line should be written. """ record_type = record.partition(' ')[0] if record_type == 'FILE': return self._ParseFileRecord(record) elif self._IsLineRecord(record_type): return self._ParseLineRecord(record) else: # Simply pass the record through unaltered. return record
[ "def", "_ParseRecord", "(", "self", ",", "record", ")", ":", "record_type", "=", "record", ".", "partition", "(", "' '", ")", "[", "0", "]", "if", "record_type", "==", "'FILE'", ":", "return", "self", ".", "_ParseFileRecord", "(", "record", ")", "elif", "self", ".", "_IsLineRecord", "(", "record_type", ")", ":", "return", "self", ".", "_ParseLineRecord", "(", "record", ")", "else", ":", "# Simply pass the record through unaltered.", "return", "record" ]
https://github.com/trailofbits/sienna-locomotive/blob/09bc1a0bea7d7a33089422c62e0d3c715ecb7ce0/breakpad/src/tools/python/filter_syms.py#L97-L112