nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
pgRouting/osm2pgrouting
8491929fc4037d308f271e84d59bb96da3c28aa2
tools/cpplint.py
python
_SetCountingStyle
(level)
Sets the module's counting options.
Sets the module's counting options.
[ "Sets", "the", "module", "s", "counting", "options", "." ]
def _SetCountingStyle(level): """Sets the module's counting options.""" _cpplint_state.SetCountingStyle(level)
[ "def", "_SetCountingStyle", "(", "level", ")", ":", "_cpplint_state", ".", "SetCountingStyle", "(", "level", ")" ]
https://github.com/pgRouting/osm2pgrouting/blob/8491929fc4037d308f271e84d59bb96da3c28aa2/tools/cpplint.py#L869-L871
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
buildscripts/idl/idl/generator.py
python
_CppHeaderFileWriter.gen_invocation_base_class_declaration
(self, command)
Generate the InvocationBaseGen class for a command's base class.
Generate the InvocationBaseGen class for a command's base class.
[ "Generate", "the", "InvocationBaseGen", "class", "for", "a", "command", "s", "base", "class", "." ]
def gen_invocation_base_class_declaration(self, command): # type: (ast.Command) -> None """Generate the InvocationBaseGen class for a command's base class.""" class_declaration = 'class InvocationBaseGen : public _TypedCommandInvocationBase {' with writer.IndentedScopedBlock(self._writer, class_declaration, '};'): # public requires special indentation that aligns with the class definition. self._writer.unindent() self._writer.write_line('public:') self._writer.indent() # Inherit base constructor. self._writer.write_line( 'using _TypedCommandInvocationBase::_TypedCommandInvocationBase;') self._writer.write_line('virtual Reply typedRun(OperationContext* opCtx) = 0;') if command.access_checks == []: self._writer.write_line( 'void doCheckAuthorization(OperationContext* opCtx) const final {}')
[ "def", "gen_invocation_base_class_declaration", "(", "self", ",", "command", ")", ":", "# type: (ast.Command) -> None", "class_declaration", "=", "'class InvocationBaseGen : public _TypedCommandInvocationBase {'", "with", "writer", ".", "IndentedScopedBlock", "(", "self", ".", "_writer", ",", "class_declaration", ",", "'};'", ")", ":", "# public requires special indentation that aligns with the class definition.", "self", ".", "_writer", ".", "unindent", "(", ")", "self", ".", "_writer", ".", "write_line", "(", "'public:'", ")", "self", ".", "_writer", ".", "indent", "(", ")", "# Inherit base constructor.", "self", ".", "_writer", ".", "write_line", "(", "'using _TypedCommandInvocationBase::_TypedCommandInvocationBase;'", ")", "self", ".", "_writer", ".", "write_line", "(", "'virtual Reply typedRun(OperationContext* opCtx) = 0;'", ")", "if", "command", ".", "access_checks", "==", "[", "]", ":", "self", ".", "_writer", ".", "write_line", "(", "'void doCheckAuthorization(OperationContext* opCtx) const final {}'", ")" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/idl/idl/generator.py#L893-L911
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/ipython/py2/IPython/core/completerlib.py
python
quick_completer
(cmd, completions)
Easily create a trivial completer for a command. Takes either a list of completions, or all completions in string (that will be split on whitespace). Example:: [d:\ipython]|1> import ipy_completers [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz']) [d:\ipython]|3> foo b<TAB> bar baz [d:\ipython]|3> foo ba
Easily create a trivial completer for a command.
[ "Easily", "create", "a", "trivial", "completer", "for", "a", "command", "." ]
def quick_completer(cmd, completions): """ Easily create a trivial completer for a command. Takes either a list of completions, or all completions in string (that will be split on whitespace). Example:: [d:\ipython]|1> import ipy_completers [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz']) [d:\ipython]|3> foo b<TAB> bar baz [d:\ipython]|3> foo ba """ if isinstance(completions, string_types): completions = completions.split() def do_complete(self, event): return completions get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
[ "def", "quick_completer", "(", "cmd", ",", "completions", ")", ":", "if", "isinstance", "(", "completions", ",", "string_types", ")", ":", "completions", "=", "completions", ".", "split", "(", ")", "def", "do_complete", "(", "self", ",", "event", ")", ":", "return", "completions", "get_ipython", "(", ")", ".", "set_hook", "(", "'complete_command'", ",", "do_complete", ",", "str_key", "=", "cmd", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py2/IPython/core/completerlib.py#L239-L260
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/Blast/3rdParty/assimp/port/PyAssimp/scripts/transformations.py
python
Arcball.getconstrain
(self)
return self._constrain
Return state of constrain to axis mode.
Return state of constrain to axis mode.
[ "Return", "state", "of", "constrain", "to", "axis", "mode", "." ]
def getconstrain(self): """Return state of constrain to axis mode.""" return self._constrain
[ "def", "getconstrain", "(", "self", ")", ":", "return", "self", ".", "_constrain" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/Blast/3rdParty/assimp/port/PyAssimp/scripts/transformations.py#L1431-L1433
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib2to3/pytree.py
python
Base.__eq__
(self, other)
return self._eq(other)
Compare two nodes for equality. This calls the method _eq().
Compare two nodes for equality.
[ "Compare", "two", "nodes", "for", "equality", "." ]
def __eq__(self, other): """ Compare two nodes for equality. This calls the method _eq(). """ if self.__class__ is not other.__class__: return NotImplemented return self._eq(other)
[ "def", "__eq__", "(", "self", ",", "other", ")", ":", "if", "self", ".", "__class__", "is", "not", "other", ".", "__class__", ":", "return", "NotImplemented", "return", "self", ".", "_eq", "(", "other", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib2to3/pytree.py#L55-L63
oracle/graaljs
36a56e8e993d45fc40939a3a4d9c0c24990720f1
graal-nodejs/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py
python
XCObject.Print
(self, file=sys.stdout)
Prints a reprentation of this object to file, adhering to Xcode output formatting.
Prints a reprentation of this object to file, adhering to Xcode output formatting.
[ "Prints", "a", "reprentation", "of", "this", "object", "to", "file", "adhering", "to", "Xcode", "output", "formatting", "." ]
def Print(self, file=sys.stdout): """Prints a reprentation of this object to file, adhering to Xcode output formatting. """ self.VerifyHasRequiredProperties() if self._should_print_single_line: # When printing an object in a single line, Xcode doesn't put any space # between the beginning of a dictionary (or presumably a list) and the # first contained item, so you wind up with snippets like # ...CDEF = {isa = PBXFileReference; fileRef = 0123... # If it were me, I would have put a space in there after the opening # curly, but I guess this is just another one of those inconsistencies # between how Xcode prints PBXFileReference and PBXBuildFile objects as # compared to other objects. Mimic Xcode's behavior here by using an # empty string for sep. sep = "" end_tabs = 0 else: sep = "\n" end_tabs = 2 # Start the object. For example, '\t\tPBXProject = {\n'. self._XCPrint(file, 2, self._XCPrintableValue(2, self) + " = {" + sep) # "isa" isn't in the _properties dictionary, it's an intrinsic property # of the class which the object belongs to. Xcode always outputs "isa" # as the first element of an object dictionary. self._XCKVPrint(file, 3, "isa", self.__class__.__name__) # The remaining elements of an object dictionary are sorted alphabetically. for property, value in sorted(self._properties.items()): self._XCKVPrint(file, 3, property, value) # End the object. self._XCPrint(file, end_tabs, "};\n")
[ "def", "Print", "(", "self", ",", "file", "=", "sys", ".", "stdout", ")", ":", "self", ".", "VerifyHasRequiredProperties", "(", ")", "if", "self", ".", "_should_print_single_line", ":", "# When printing an object in a single line, Xcode doesn't put any space", "# between the beginning of a dictionary (or presumably a list) and the", "# first contained item, so you wind up with snippets like", "# ...CDEF = {isa = PBXFileReference; fileRef = 0123...", "# If it were me, I would have put a space in there after the opening", "# curly, but I guess this is just another one of those inconsistencies", "# between how Xcode prints PBXFileReference and PBXBuildFile objects as", "# compared to other objects. Mimic Xcode's behavior here by using an", "# empty string for sep.", "sep", "=", "\"\"", "end_tabs", "=", "0", "else", ":", "sep", "=", "\"\\n\"", "end_tabs", "=", "2", "# Start the object. For example, '\\t\\tPBXProject = {\\n'.", "self", ".", "_XCPrint", "(", "file", ",", "2", ",", "self", ".", "_XCPrintableValue", "(", "2", ",", "self", ")", "+", "\" = {\"", "+", "sep", ")", "# \"isa\" isn't in the _properties dictionary, it's an intrinsic property", "# of the class which the object belongs to. Xcode always outputs \"isa\"", "# as the first element of an object dictionary.", "self", ".", "_XCKVPrint", "(", "file", ",", "3", ",", "\"isa\"", ",", "self", ".", "__class__", ".", "__name__", ")", "# The remaining elements of an object dictionary are sorted alphabetically.", "for", "property", ",", "value", "in", "sorted", "(", "self", ".", "_properties", ".", "items", "(", ")", ")", ":", "self", ".", "_XCKVPrint", "(", "file", ",", "3", ",", "property", ",", "value", ")", "# End the object.", "self", ".", "_XCPrint", "(", "file", ",", "end_tabs", ",", "\"};\\n\"", ")" ]
https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py#L722-L758
ApolloAuto/apollo
463fb82f9e979d02dcb25044e60931293ab2dba0
modules/tools/prediction/data_pipelines/cruise_h5_preprocessing.py
python
getListOfFiles
(dirName)
return allFiles
Given a directory (dirName), return a list containing the full-path of all files inside that directory (including all hierarchy).
Given a directory (dirName), return a list containing the full-path of all files inside that directory (including all hierarchy).
[ "Given", "a", "directory", "(", "dirName", ")", "return", "a", "list", "containing", "the", "full", "-", "path", "of", "all", "files", "inside", "that", "directory", "(", "including", "all", "hierarchy", ")", "." ]
def getListOfFiles(dirName): ''' Given a directory (dirName), return a list containing the full-path of all files inside that directory (including all hierarchy). ''' listOfFiles = os.listdir(dirName) allFiles = list() for entry in listOfFiles: fullPath = os.path.join(dirName, entry) if os.path.isdir(fullPath): allFiles = allFiles + getListOfFiles(fullPath) else: allFiles.append(fullPath) return allFiles
[ "def", "getListOfFiles", "(", "dirName", ")", ":", "listOfFiles", "=", "os", ".", "listdir", "(", "dirName", ")", "allFiles", "=", "list", "(", ")", "for", "entry", "in", "listOfFiles", ":", "fullPath", "=", "os", ".", "path", ".", "join", "(", "dirName", ",", "entry", ")", "if", "os", ".", "path", ".", "isdir", "(", "fullPath", ")", ":", "allFiles", "=", "allFiles", "+", "getListOfFiles", "(", "fullPath", ")", "else", ":", "allFiles", ".", "append", "(", "fullPath", ")", "return", "allFiles" ]
https://github.com/ApolloAuto/apollo/blob/463fb82f9e979d02dcb25044e60931293ab2dba0/modules/tools/prediction/data_pipelines/cruise_h5_preprocessing.py#L33-L48
eomahony/Numberjack
53fa9e994a36f881ffd320d8d04158097190aad8
Numberjack/__init__.py
python
Predicate.solution
(self, solver=None)
return output
.. deprecated:: 1.1 Instead you should use :func:`Expression.get_value` and call :func:`str` on that. Returns a string containing the valuation of the predicate. :param `NBJ_STD_Solver` solver: If specified, the solver from which the state will be sourced, if `None` then the most recently loaded solver is used. :rtype: str
.. deprecated:: 1.1 Instead you should use :func:`Expression.get_value` and call :func:`str` on that.
[ "..", "deprecated", "::", "1", ".", "1", "Instead", "you", "should", "use", ":", "func", ":", "Expression", ".", "get_value", "and", "call", ":", "func", ":", "str", "on", "that", "." ]
def solution(self, solver=None): """ .. deprecated:: 1.1 Instead you should use :func:`Expression.get_value` and call :func:`str` on that. Returns a string containing the valuation of the predicate. :param `NBJ_STD_Solver` solver: If specified, the solver from which the state will be sourced, if `None` then the most recently loaded solver is used. :rtype: str """ save_str = Expression.__str__ Expression.__str__ = lambda x: x.solution(solver) output = self.__str__() Expression.__str__ = save_str return output
[ "def", "solution", "(", "self", ",", "solver", "=", "None", ")", ":", "save_str", "=", "Expression", ".", "__str__", "Expression", ".", "__str__", "=", "lambda", "x", ":", "x", ".", "solution", "(", "solver", ")", "output", "=", "self", ".", "__str__", "(", ")", "Expression", ".", "__str__", "=", "save_str", "return", "output" ]
https://github.com/eomahony/Numberjack/blob/53fa9e994a36f881ffd320d8d04158097190aad8/Numberjack/__init__.py#L1492-L1509
freeorion/freeorion
c266a40eccd3a99a17de8fe57c36ef6ba3771665
default/python/AI/AIstate.py
python
AIstate.log_peace_request
(self, initiating_empire_id, recipient_empire_id)
Keep a record of peace requests made or received by this empire.
Keep a record of peace requests made or received by this empire.
[ "Keep", "a", "record", "of", "peace", "requests", "made", "or", "received", "by", "this", "empire", "." ]
def log_peace_request(self, initiating_empire_id, recipient_empire_id): """Keep a record of peace requests made or received by this empire.""" peace_requests = self.diplomatic_logs.setdefault("peace_requests", {}) log_index = (initiating_empire_id, recipient_empire_id) peace_requests.setdefault(log_index, []).append(fo.currentTurn())
[ "def", "log_peace_request", "(", "self", ",", "initiating_empire_id", ",", "recipient_empire_id", ")", ":", "peace_requests", "=", "self", ".", "diplomatic_logs", ".", "setdefault", "(", "\"peace_requests\"", ",", "{", "}", ")", "log_index", "=", "(", "initiating_empire_id", ",", "recipient_empire_id", ")", "peace_requests", ".", "setdefault", "(", "log_index", ",", "[", "]", ")", ".", "append", "(", "fo", ".", "currentTurn", "(", ")", ")" ]
https://github.com/freeorion/freeorion/blob/c266a40eccd3a99a17de8fe57c36ef6ba3771665/default/python/AI/AIstate.py#L1060-L1065
miyosuda/TensorFlowAndroidDemo
35903e0221aa5f109ea2dbef27f20b52e317f42d
jni-build/jni/include/tensorflow/models/embedding/word2vec_optimized.py
python
Word2Vec.nearby
(self, words, num=20)
Prints out nearby words given a list of words.
Prints out nearby words given a list of words.
[ "Prints", "out", "nearby", "words", "given", "a", "list", "of", "words", "." ]
def nearby(self, words, num=20): """Prints out nearby words given a list of words.""" ids = np.array([self._word2id.get(x, 0) for x in words]) vals, idx = self._session.run( [self._nearby_val, self._nearby_idx], {self._nearby_word: ids}) for i in xrange(len(words)): print("\n%s\n=====================================" % (words[i])) for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]): print("%-20s %6.4f" % (self._id2word[neighbor], distance))
[ "def", "nearby", "(", "self", ",", "words", ",", "num", "=", "20", ")", ":", "ids", "=", "np", ".", "array", "(", "[", "self", ".", "_word2id", ".", "get", "(", "x", ",", "0", ")", "for", "x", "in", "words", "]", ")", "vals", ",", "idx", "=", "self", ".", "_session", ".", "run", "(", "[", "self", ".", "_nearby_val", ",", "self", ".", "_nearby_idx", "]", ",", "{", "self", ".", "_nearby_word", ":", "ids", "}", ")", "for", "i", "in", "xrange", "(", "len", "(", "words", ")", ")", ":", "print", "(", "\"\\n%s\\n=====================================\"", "%", "(", "words", "[", "i", "]", ")", ")", "for", "(", "neighbor", ",", "distance", ")", "in", "zip", "(", "idx", "[", "i", ",", ":", "num", "]", ",", "vals", "[", "i", ",", ":", "num", "]", ")", ":", "print", "(", "\"%-20s %6.4f\"", "%", "(", "self", ".", "_id2word", "[", "neighbor", "]", ",", "distance", ")", ")" ]
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/models/embedding/word2vec_optimized.py#L387-L395
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/aui/auibar.py
python
AuiToolBar.GetToolBarFits
(self)
return self.GetToolFitsByIndex(len(self._items) - 1)
Returns whether the :class:`AuiToolBar` size fits in a specified size.
Returns whether the :class:`AuiToolBar` size fits in a specified size.
[ "Returns", "whether", "the", ":", "class", ":", "AuiToolBar", "size", "fits", "in", "a", "specified", "size", "." ]
def GetToolBarFits(self): """ Returns whether the :class:`AuiToolBar` size fits in a specified size. """ if len(self._items) == 0: # empty toolbar always 'fits' return True # entire toolbar content fits if the last tool fits return self.GetToolFitsByIndex(len(self._items) - 1)
[ "def", "GetToolBarFits", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_items", ")", "==", "0", ":", "# empty toolbar always 'fits'", "return", "True", "# entire toolbar content fits if the last tool fits", "return", "self", ".", "GetToolFitsByIndex", "(", "len", "(", "self", ".", "_items", ")", "-", "1", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/aui/auibar.py#L2965-L2973
chromiumembedded/cef
80caf947f3fe2210e5344713c5281d8af9bdc295
tools/file_util.py
python
copy_dir
(src, dst, quiet=True)
Copy a directory tree.
Copy a directory tree.
[ "Copy", "a", "directory", "tree", "." ]
def copy_dir(src, dst, quiet=True): """ Copy a directory tree. """ try: remove_dir(dst, quiet) shutil.copytree(src, dst) if not quiet: sys.stdout.write('Transferring ' + src + ' directory.\n') except IOError as e: (errno, strerror) = e.args sys.stderr.write('Failed to copy directory from ' + src + ' to ' + dst + ': ' + strerror) raise
[ "def", "copy_dir", "(", "src", ",", "dst", ",", "quiet", "=", "True", ")", ":", "try", ":", "remove_dir", "(", "dst", ",", "quiet", ")", "shutil", ".", "copytree", "(", "src", ",", "dst", ")", "if", "not", "quiet", ":", "sys", ".", "stdout", ".", "write", "(", "'Transferring '", "+", "src", "+", "' directory.\\n'", ")", "except", "IOError", "as", "e", ":", "(", "errno", ",", "strerror", ")", "=", "e", ".", "args", "sys", ".", "stderr", ".", "write", "(", "'Failed to copy directory from '", "+", "src", "+", "' to '", "+", "dst", "+", "': '", "+", "strerror", ")", "raise" ]
https://github.com/chromiumembedded/cef/blob/80caf947f3fe2210e5344713c5281d8af9bdc295/tools/file_util.py#L118-L129
runtimejs/runtime
0a6e84c30823d35a4548d6634166784260ae7b74
deps/v8/tools/jsmin.py
python
JavaScriptMinifier.CharFromNumber
(self, number)
return chr(number + 65)
A single-digit base-52 encoding using a-zA-Z.
A single-digit base-52 encoding using a-zA-Z.
[ "A", "single", "-", "digit", "base", "-", "52", "encoding", "using", "a", "-", "zA", "-", "Z", "." ]
def CharFromNumber(self, number): """A single-digit base-52 encoding using a-zA-Z.""" if number < 26: return chr(number + 97) number -= 26 return chr(number + 65)
[ "def", "CharFromNumber", "(", "self", ",", "number", ")", ":", "if", "number", "<", "26", ":", "return", "chr", "(", "number", "+", "97", ")", "number", "-=", "26", "return", "chr", "(", "number", "+", "65", ")" ]
https://github.com/runtimejs/runtime/blob/0a6e84c30823d35a4548d6634166784260ae7b74/deps/v8/tools/jsmin.py#L135-L140
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
tools/usb_gadget/echo_gadget.py
python
EchoCompositeFeature.ReceivePacket
(self, endpoint, data)
Echo a packet back to the host. Args: endpoint: Incoming endpoint (must be an OUT pipe). data: Packet data.
Echo a packet back to the host.
[ "Echo", "a", "packet", "back", "to", "the", "host", "." ]
def ReceivePacket(self, endpoint, data): """Echo a packet back to the host. Args: endpoint: Incoming endpoint (must be an OUT pipe). data: Packet data. """ assert endpoint & usb_constants.Dir.IN == 0 self.SendPacket(endpoint | usb_constants.Dir.IN, data)
[ "def", "ReceivePacket", "(", "self", ",", "endpoint", ",", "data", ")", ":", "assert", "endpoint", "&", "usb_constants", ".", "Dir", ".", "IN", "==", "0", "self", ".", "SendPacket", "(", "endpoint", "|", "usb_constants", ".", "Dir", ".", "IN", ",", "data", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/usb_gadget/echo_gadget.py#L177-L186
duckdb/duckdb
e252305418ef420d3989a63afc9ec3f570db3980
scripts/run-clang-tidy.py
python
apply_fixes
(args, tmpdir)
Calls clang-apply-fixes on a given directory.
Calls clang-apply-fixes on a given directory.
[ "Calls", "clang", "-", "apply", "-", "fixes", "on", "a", "given", "directory", "." ]
def apply_fixes(args, tmpdir): """Calls clang-apply-fixes on a given directory.""" invocation = [args.clang_apply_replacements_binary] if args.format: invocation.append('-format') if args.style: invocation.append('-style=' + args.style) invocation.append(tmpdir) subprocess.call(invocation)
[ "def", "apply_fixes", "(", "args", ",", "tmpdir", ")", ":", "invocation", "=", "[", "args", ".", "clang_apply_replacements_binary", "]", "if", "args", ".", "format", ":", "invocation", ".", "append", "(", "'-format'", ")", "if", "args", ".", "style", ":", "invocation", ".", "append", "(", "'-style='", "+", "args", ".", "style", ")", "invocation", ".", "append", "(", "tmpdir", ")", "subprocess", ".", "call", "(", "invocation", ")" ]
https://github.com/duckdb/duckdb/blob/e252305418ef420d3989a63afc9ec3f570db3980/scripts/run-clang-tidy.py#L145-L153
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/utils/data/dataset.py
python
random_split
(dataset: Dataset[T], lengths: Sequence[int], generator: Optional[Generator] = default_generator)
return [Subset(dataset, indices[offset - length : offset]) for offset, length in zip(_accumulate(lengths), lengths)]
r""" Randomly split a dataset into non-overlapping new datasets of given lengths. Optionally fix the generator for reproducible results, e.g.: >>> random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(42)) Args: dataset (Dataset): Dataset to be split lengths (sequence): lengths of splits to be produced generator (Generator): Generator used for the random permutation.
r""" Randomly split a dataset into non-overlapping new datasets of given lengths. Optionally fix the generator for reproducible results, e.g.:
[ "r", "Randomly", "split", "a", "dataset", "into", "non", "-", "overlapping", "new", "datasets", "of", "given", "lengths", ".", "Optionally", "fix", "the", "generator", "for", "reproducible", "results", "e", ".", "g", ".", ":" ]
def random_split(dataset: Dataset[T], lengths: Sequence[int], generator: Optional[Generator] = default_generator) -> List[Subset[T]]: r""" Randomly split a dataset into non-overlapping new datasets of given lengths. Optionally fix the generator for reproducible results, e.g.: >>> random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(42)) Args: dataset (Dataset): Dataset to be split lengths (sequence): lengths of splits to be produced generator (Generator): Generator used for the random permutation. """ # Cannot verify that dataset is Sized if sum(lengths) != len(dataset): raise ValueError("Sum of input lengths does not equal the length of the input dataset!") indices = randperm(sum(lengths), generator=generator).tolist() return [Subset(dataset, indices[offset - length : offset]) for offset, length in zip(_accumulate(lengths), lengths)]
[ "def", "random_split", "(", "dataset", ":", "Dataset", "[", "T", "]", ",", "lengths", ":", "Sequence", "[", "int", "]", ",", "generator", ":", "Optional", "[", "Generator", "]", "=", "default_generator", ")", "->", "List", "[", "Subset", "[", "T", "]", "]", ":", "# Cannot verify that dataset is Sized", "if", "sum", "(", "lengths", ")", "!=", "len", "(", "dataset", ")", ":", "raise", "ValueError", "(", "\"Sum of input lengths does not equal the length of the input dataset!\"", ")", "indices", "=", "randperm", "(", "sum", "(", "lengths", ")", ",", "generator", "=", "generator", ")", ".", "tolist", "(", ")", "return", "[", "Subset", "(", "dataset", ",", "indices", "[", "offset", "-", "length", ":", "offset", "]", ")", "for", "offset", ",", "length", "in", "zip", "(", "_accumulate", "(", "lengths", ")", ",", "lengths", ")", "]" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/utils/data/dataset.py#L404-L422
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_windows.py
python
StandardDialogLayoutAdapter.DoFitWithScrolling
(*args, **kwargs)
return _windows_.StandardDialogLayoutAdapter_DoFitWithScrolling(*args, **kwargs)
DoFitWithScrolling(Dialog dialog, ScrolledWindow scrolledWindow) -> bool
DoFitWithScrolling(Dialog dialog, ScrolledWindow scrolledWindow) -> bool
[ "DoFitWithScrolling", "(", "Dialog", "dialog", "ScrolledWindow", "scrolledWindow", ")", "-", ">", "bool" ]
def DoFitWithScrolling(*args, **kwargs): """DoFitWithScrolling(Dialog dialog, ScrolledWindow scrolledWindow) -> bool""" return _windows_.StandardDialogLayoutAdapter_DoFitWithScrolling(*args, **kwargs)
[ "def", "DoFitWithScrolling", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "StandardDialogLayoutAdapter_DoFitWithScrolling", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_windows.py#L1018-L1020
redpony/cdec
f7c4899b174d86bc70b40b1cae68dcad364615cb
python/cdec/configobj.py
python
ConfigObj.reset
(self)
Clear ConfigObj instance and restore to 'freshly created' state.
Clear ConfigObj instance and restore to 'freshly created' state.
[ "Clear", "ConfigObj", "instance", "and", "restore", "to", "freshly", "created", "state", "." ]
def reset(self): """Clear ConfigObj instance and restore to 'freshly created' state.""" self.clear() self._initialise() # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload) # requires an empty dictionary self.configspec = None # Just to be sure ;-) self._original_configspec = None
[ "def", "reset", "(", "self", ")", ":", "self", ".", "clear", "(", ")", "self", ".", "_initialise", "(", ")", "# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)", "# requires an empty dictionary", "self", ".", "configspec", "=", "None", "# Just to be sure ;-)", "self", ".", "_original_configspec", "=", "None" ]
https://github.com/redpony/cdec/blob/f7c4899b174d86bc70b40b1cae68dcad364615cb/python/cdec/configobj.py#L2323-L2331
envoyproxy/envoy
65541accdafe255e72310b4298d646e091da2d80
tools/protoxform/protoprint.py
python
format_service_method
(type_context, method)
return '%srpc %s(%s%s%s) returns (%s%s) {%s}\n' % ( leading_comment, method.name, trailing_comment, format_streaming( method.client_streaming), normalize_field_type_name( type_context, method.input_type), format_streaming(method.server_streaming), normalize_field_type_name(type_context, method.output_type), format_options(method.options))
Format a service MethodDescriptorProto. Args: type_context: contextual information for method. method: MethodDescriptorProto proto. Returns: Formatted service method as string.
Format a service MethodDescriptorProto.
[ "Format", "a", "service", "MethodDescriptorProto", "." ]
def format_service_method(type_context, method): """Format a service MethodDescriptorProto. Args: type_context: contextual information for method. method: MethodDescriptorProto proto. Returns: Formatted service method as string. """ def format_streaming(s): return 'stream ' if s else '' leading_comment, trailing_comment = format_type_context_comments(type_context) return '%srpc %s(%s%s%s) returns (%s%s) {%s}\n' % ( leading_comment, method.name, trailing_comment, format_streaming( method.client_streaming), normalize_field_type_name( type_context, method.input_type), format_streaming(method.server_streaming), normalize_field_type_name(type_context, method.output_type), format_options(method.options))
[ "def", "format_service_method", "(", "type_context", ",", "method", ")", ":", "def", "format_streaming", "(", "s", ")", ":", "return", "'stream '", "if", "s", "else", "''", "leading_comment", ",", "trailing_comment", "=", "format_type_context_comments", "(", "type_context", ")", "return", "'%srpc %s(%s%s%s) returns (%s%s) {%s}\\n'", "%", "(", "leading_comment", ",", "method", ".", "name", ",", "trailing_comment", ",", "format_streaming", "(", "method", ".", "client_streaming", ")", ",", "normalize_field_type_name", "(", "type_context", ",", "method", ".", "input_type", ")", ",", "format_streaming", "(", "method", ".", "server_streaming", ")", ",", "normalize_field_type_name", "(", "type_context", ",", "method", ".", "output_type", ")", ",", "format_options", "(", "method", ".", "options", ")", ")" ]
https://github.com/envoyproxy/envoy/blob/65541accdafe255e72310b4298d646e091da2d80/tools/protoxform/protoprint.py#L456-L475
wheybags/freeablo
921ac20be95828460ccc184a9de11eca5c7c0519
extern/fmt/support/docopt.py
python
parse_argv
(tokens, options, options_first=False)
return parsed
Parse command-line argument vector. If options_first: argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ; else: argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ;
Parse command-line argument vector.
[ "Parse", "command", "-", "line", "argument", "vector", "." ]
def parse_argv(tokens, options, options_first=False): """Parse command-line argument vector. If options_first: argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ; else: argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ; """ parsed = [] while tokens.current() is not None: if tokens.current() == '--': return parsed + [Argument(None, v) for v in tokens] elif tokens.current().startswith('--'): parsed += parse_long(tokens, options) elif tokens.current().startswith('-') and tokens.current() != '-': parsed += parse_shorts(tokens, options) elif options_first: return parsed + [Argument(None, v) for v in tokens] else: parsed.append(Argument(None, tokens.move())) return parsed
[ "def", "parse_argv", "(", "tokens", ",", "options", ",", "options_first", "=", "False", ")", ":", "parsed", "=", "[", "]", "while", "tokens", ".", "current", "(", ")", "is", "not", "None", ":", "if", "tokens", ".", "current", "(", ")", "==", "'--'", ":", "return", "parsed", "+", "[", "Argument", "(", "None", ",", "v", ")", "for", "v", "in", "tokens", "]", "elif", "tokens", ".", "current", "(", ")", ".", "startswith", "(", "'--'", ")", ":", "parsed", "+=", "parse_long", "(", "tokens", ",", "options", ")", "elif", "tokens", ".", "current", "(", ")", ".", "startswith", "(", "'-'", ")", "and", "tokens", ".", "current", "(", ")", "!=", "'-'", ":", "parsed", "+=", "parse_shorts", "(", "tokens", ",", "options", ")", "elif", "options_first", ":", "return", "parsed", "+", "[", "Argument", "(", "None", ",", "v", ")", "for", "v", "in", "tokens", "]", "else", ":", "parsed", ".", "append", "(", "Argument", "(", "None", ",", "tokens", ".", "move", "(", ")", ")", ")", "return", "parsed" ]
https://github.com/wheybags/freeablo/blob/921ac20be95828460ccc184a9de11eca5c7c0519/extern/fmt/support/docopt.py#L428-L449
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/distributed/elastic/rendezvous/api.py
python
RendezvousHandler.next_rendezvous
( self, )
Main entry-point into the rendezvous barrier. Blocks until the rendezvous is complete and the current process is included in the formed worker group, or a timeout occurs, or the rendezvous was marked closed. Returns: A tuple of :py:class:`torch.distributed.Store`, ``rank``, and ``world size``. Raises: RendezvousClosedError: The rendezvous is closed. RendezvousConnectionError: The connection to the rendezvous backend has failed. RendezvousStateError: The rendezvous state is corrupt. RendezvousTimeoutError: The rendezvous did not complete on time.
Main entry-point into the rendezvous barrier.
[ "Main", "entry", "-", "point", "into", "the", "rendezvous", "barrier", "." ]
def next_rendezvous( self, ) -> Tuple[Store, int, int]: """Main entry-point into the rendezvous barrier. Blocks until the rendezvous is complete and the current process is included in the formed worker group, or a timeout occurs, or the rendezvous was marked closed. Returns: A tuple of :py:class:`torch.distributed.Store`, ``rank``, and ``world size``. Raises: RendezvousClosedError: The rendezvous is closed. RendezvousConnectionError: The connection to the rendezvous backend has failed. RendezvousStateError: The rendezvous state is corrupt. RendezvousTimeoutError: The rendezvous did not complete on time. """
[ "def", "next_rendezvous", "(", "self", ",", ")", "->", "Tuple", "[", "Store", ",", "int", ",", "int", "]", ":" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/elastic/rendezvous/api.py#L47-L69
vslavik/poedit
f7a9daa0a10037e090aa0a86f5ce0f24ececdf6a
deps/boost/tools/build/src/build/targets.py
python
BasicTarget.sources
(self)
return self.source_targets_
Returns the list of AbstractTargets which are used as sources. The extra properties specified for sources are not represented. The only used of this rule at the moment is the '--dump-tests' feature of the test system.
Returns the list of AbstractTargets which are used as sources. The extra properties specified for sources are not represented. The only used of this rule at the moment is the '--dump-tests' feature of the test system.
[ "Returns", "the", "list", "of", "AbstractTargets", "which", "are", "used", "as", "sources", ".", "The", "extra", "properties", "specified", "for", "sources", "are", "not", "represented", ".", "The", "only", "used", "of", "this", "rule", "at", "the", "moment", "is", "the", "--", "dump", "-", "tests", "feature", "of", "the", "test", "system", "." ]
def sources (self): """ Returns the list of AbstractTargets which are used as sources. The extra properties specified for sources are not represented. The only used of this rule at the moment is the '--dump-tests' feature of the test system. """ if self.source_targets_ == None: self.source_targets_ = [] for s in self.sources_: self.source_targets_.append(resolve_reference(s, self.project_)[0]) return self.source_targets_
[ "def", "sources", "(", "self", ")", ":", "if", "self", ".", "source_targets_", "==", "None", ":", "self", ".", "source_targets_", "=", "[", "]", "for", "s", "in", "self", ".", "sources_", ":", "self", ".", "source_targets_", ".", "append", "(", "resolve_reference", "(", "s", ",", "self", ".", "project_", ")", "[", "0", "]", ")", "return", "self", ".", "source_targets_" ]
https://github.com/vslavik/poedit/blob/f7a9daa0a10037e090aa0a86f5ce0f24ececdf6a/deps/boost/tools/build/src/build/targets.py#L939-L950
cyberbotics/webots
af7fa7d68dcf7b4550f1f2e132092b41e83698fc
resources/web/server/session_server.py
python
ClientWebSocketHandler.check_origin
(self, origin)
return True
Allow to run the server on the same computer as the client.
Allow to run the server on the same computer as the client.
[ "Allow", "to", "run", "the", "server", "on", "the", "same", "computer", "as", "the", "client", "." ]
def check_origin(self, origin): """Allow to run the server on the same computer as the client.""" return True
[ "def", "check_origin", "(", "self", ",", "origin", ")", ":", "return", "True" ]
https://github.com/cyberbotics/webots/blob/af7fa7d68dcf7b4550f1f2e132092b41e83698fc/resources/web/server/session_server.py#L133-L135
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/strings.py
python
str_slice
(arr, start=None, stop=None, step=None)
return _na_map(f, arr, dtype=str)
Slice substrings from each element in the Series or Index. Parameters ---------- start : int, optional Start position for slice operation. stop : int, optional Stop position for slice operation. step : int, optional Step size for slice operation. Returns ------- Series or Index of object Series or Index from sliced substring from original string object. See Also -------- Series.str.slice_replace : Replace a slice with a string. Series.str.get : Return element at position. Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i` being the position. Examples -------- >>> s = pd.Series(["koala", "fox", "chameleon"]) >>> s 0 koala 1 fox 2 chameleon dtype: object >>> s.str.slice(start=1) 0 oala 1 ox 2 hameleon dtype: object >>> s.str.slice(start=-1) 0 a 1 x 2 n dtype: object >>> s.str.slice(stop=2) 0 ko 1 fo 2 ch dtype: object >>> s.str.slice(step=2) 0 kaa 1 fx 2 caeen dtype: object >>> s.str.slice(start=0, stop=5, step=3) 0 kl 1 f 2 cm dtype: object Equivalent behaviour to: >>> s.str[0:5:3] 0 kl 1 f 2 cm dtype: object
Slice substrings from each element in the Series or Index.
[ "Slice", "substrings", "from", "each", "element", "in", "the", "Series", "or", "Index", "." ]
def str_slice(arr, start=None, stop=None, step=None): """ Slice substrings from each element in the Series or Index. Parameters ---------- start : int, optional Start position for slice operation. stop : int, optional Stop position for slice operation. step : int, optional Step size for slice operation. Returns ------- Series or Index of object Series or Index from sliced substring from original string object. See Also -------- Series.str.slice_replace : Replace a slice with a string. Series.str.get : Return element at position. Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i` being the position. Examples -------- >>> s = pd.Series(["koala", "fox", "chameleon"]) >>> s 0 koala 1 fox 2 chameleon dtype: object >>> s.str.slice(start=1) 0 oala 1 ox 2 hameleon dtype: object >>> s.str.slice(start=-1) 0 a 1 x 2 n dtype: object >>> s.str.slice(stop=2) 0 ko 1 fo 2 ch dtype: object >>> s.str.slice(step=2) 0 kaa 1 fx 2 caeen dtype: object >>> s.str.slice(start=0, stop=5, step=3) 0 kl 1 f 2 cm dtype: object Equivalent behaviour to: >>> s.str[0:5:3] 0 kl 1 f 2 cm dtype: object """ obj = slice(start, stop, step) f = lambda x: x[obj] return _na_map(f, arr, dtype=str)
[ "def", "str_slice", "(", "arr", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "step", "=", "None", ")", ":", "obj", "=", "slice", "(", "start", ",", "stop", ",", "step", ")", "f", "=", "lambda", "x", ":", "x", "[", "obj", "]", "return", "_na_map", "(", "f", ",", "arr", ",", "dtype", "=", "str", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/strings.py#L1508-L1582
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/ffmpeg/ffmpeg_ops.py
python
encode_audio
(audio, file_format=None, samples_per_second=None)
return gen_encode_audio_op_py.encode_audio_v2( audio, file_format=file_format, samples_per_second=samples_per_second, bits_per_second=192000)
Creates an op that encodes an audio file using sampled audio from a tensor. Args: audio: A rank-2 `Tensor` that has time along dimension 0 and channels along dimension 1. Dimension 0 is `samples_per_second * length_in_seconds` long. file_format: The type of file to encode, as a string or rank-0 string tensor. "wav" is the only supported format. samples_per_second: The number of samples in the audio tensor per second of audio, as an `int` or rank-0 `int32` tensor. Returns: A scalar tensor that contains the encoded audio in the specified file format.
Creates an op that encodes an audio file using sampled audio from a tensor.
[ "Creates", "an", "op", "that", "encodes", "an", "audio", "file", "using", "sampled", "audio", "from", "a", "tensor", "." ]
def encode_audio(audio, file_format=None, samples_per_second=None): """Creates an op that encodes an audio file using sampled audio from a tensor. Args: audio: A rank-2 `Tensor` that has time along dimension 0 and channels along dimension 1. Dimension 0 is `samples_per_second * length_in_seconds` long. file_format: The type of file to encode, as a string or rank-0 string tensor. "wav" is the only supported format. samples_per_second: The number of samples in the audio tensor per second of audio, as an `int` or rank-0 `int32` tensor. Returns: A scalar tensor that contains the encoded audio in the specified file format. """ return gen_encode_audio_op_py.encode_audio_v2( audio, file_format=file_format, samples_per_second=samples_per_second, bits_per_second=192000)
[ "def", "encode_audio", "(", "audio", ",", "file_format", "=", "None", ",", "samples_per_second", "=", "None", ")", ":", "return", "gen_encode_audio_op_py", ".", "encode_audio_v2", "(", "audio", ",", "file_format", "=", "file_format", ",", "samples_per_second", "=", "samples_per_second", ",", "bits_per_second", "=", "192000", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/ffmpeg/ffmpeg_ops.py#L68-L88
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/nn/probability/distribution/bernoulli.py
python
Bernoulli._var
(self, probs1=None)
return self.exp(self.log(probs0) + self.log(probs1))
r""" .. math:: VAR(B) = probs1 * probs0
r""" .. math:: VAR(B) = probs1 * probs0
[ "r", "..", "math", "::", "VAR", "(", "B", ")", "=", "probs1", "*", "probs0" ]
def _var(self, probs1=None): r""" .. math:: VAR(B) = probs1 * probs0 """ probs1 = self._check_param_type(probs1) probs0 = 1.0 - probs1 return self.exp(self.log(probs0) + self.log(probs1))
[ "def", "_var", "(", "self", ",", "probs1", "=", "None", ")", ":", "probs1", "=", "self", ".", "_check_param_type", "(", "probs1", ")", "probs0", "=", "1.0", "-", "probs1", "return", "self", ".", "exp", "(", "self", ".", "log", "(", "probs0", ")", "+", "self", ".", "log", "(", "probs1", ")", ")" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/nn/probability/distribution/bernoulli.py#L221-L228
Polidea/SiriusObfuscator
b0e590d8130e97856afe578869b83a209e2b19be
SymbolExtractorAndRenamer/clang/bindings/python/clang/cindex.py
python
Cursor.is_move_constructor
(self)
return conf.lib.clang_CXXConstructor_isMoveConstructor(self)
Returns True if the cursor refers to a C++ move constructor.
Returns True if the cursor refers to a C++ move constructor.
[ "Returns", "True", "if", "the", "cursor", "refers", "to", "a", "C", "++", "move", "constructor", "." ]
def is_move_constructor(self): """Returns True if the cursor refers to a C++ move constructor. """ return conf.lib.clang_CXXConstructor_isMoveConstructor(self)
[ "def", "is_move_constructor", "(", "self", ")", ":", "return", "conf", ".", "lib", ".", "clang_CXXConstructor_isMoveConstructor", "(", "self", ")" ]
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/clang/bindings/python/clang/cindex.py#L1367-L1370
BitMEX/api-connectors
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
auto-generated/python/swagger_client/models/error.py
python
Error.__repr__
(self)
return self.to_str()
For `print` and `pprint`
For `print` and `pprint`
[ "For", "print", "and", "pprint" ]
def __repr__(self): """For `print` and `pprint`""" return self.to_str()
[ "def", "__repr__", "(", "self", ")", ":", "return", "self", ".", "to_str", "(", ")" ]
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/error.py#L103-L105
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/python/framework/ops.py
python
Graph._is_function
(self, name)
return name in self._functions
Tests whether 'name' is registered in this graph's function library. Args: name: string op name. Returns: bool indicating whether or not 'name' is registered in function library.
Tests whether 'name' is registered in this graph's function library.
[ "Tests", "whether", "name", "is", "registered", "in", "this", "graph", "s", "function", "library", "." ]
def _is_function(self, name): """Tests whether 'name' is registered in this graph's function library. Args: name: string op name. Returns: bool indicating whether or not 'name' is registered in function library. """ return name in self._functions
[ "def", "_is_function", "(", "self", ",", "name", ")", ":", "return", "name", "in", "self", ".", "_functions" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/framework/ops.py#L2256-L2264
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_core.py
python
FileSystem_FileNameToURL
(*args, **kwargs)
return _core_.FileSystem_FileNameToURL(*args, **kwargs)
FileSystem_FileNameToURL(String filename) -> String
FileSystem_FileNameToURL(String filename) -> String
[ "FileSystem_FileNameToURL", "(", "String", "filename", ")", "-", ">", "String" ]
def FileSystem_FileNameToURL(*args, **kwargs): """FileSystem_FileNameToURL(String filename) -> String""" return _core_.FileSystem_FileNameToURL(*args, **kwargs)
[ "def", "FileSystem_FileNameToURL", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "FileSystem_FileNameToURL", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_core.py#L2476-L2478
ZintrulCre/LeetCode_Archiver
de23e16ead29336b5ee7aa1898a392a5d6463d27
LeetCode/python/709.py
python
Solution.toLowerCase
(self, str)
return str.lower()
:type str: str :rtype: str
:type str: str :rtype: str
[ ":", "type", "str", ":", "str", ":", "rtype", ":", "str" ]
def toLowerCase(self, str): """ :type str: str :rtype: str """ return str.lower()
[ "def", "toLowerCase", "(", "self", ",", "str", ")", ":", "return", "str", ".", "lower", "(", ")" ]
https://github.com/ZintrulCre/LeetCode_Archiver/blob/de23e16ead29336b5ee7aa1898a392a5d6463d27/LeetCode/python/709.py#L2-L7
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/models/rnn/ptb/reader.py
python
ptb_producer
(raw_data, batch_size, num_steps, name=None)
Iterate on the raw PTB data. This chunks up raw_data into batches of examples and returns Tensors that are drawn from these batches. Args: raw_data: one of the raw data outputs from ptb_raw_data. batch_size: int, the batch size. num_steps: int, the number of unrolls. name: the name of this operation (optional). Returns: A pair of Tensors, each shaped [batch_size, num_steps]. The second element of the tuple is the same data time-shifted to the right by one. Raises: tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
Iterate on the raw PTB data.
[ "Iterate", "on", "the", "raw", "PTB", "data", "." ]
def ptb_producer(raw_data, batch_size, num_steps, name=None): """Iterate on the raw PTB data. This chunks up raw_data into batches of examples and returns Tensors that are drawn from these batches. Args: raw_data: one of the raw data outputs from ptb_raw_data. batch_size: int, the batch size. num_steps: int, the number of unrolls. name: the name of this operation (optional). Returns: A pair of Tensors, each shaped [batch_size, num_steps]. The second element of the tuple is the same data time-shifted to the right by one. Raises: tf.errors.InvalidArgumentError: if batch_size or num_steps are too high. """ with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]): raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32) data_len = tf.size(raw_data) batch_len = data_len // batch_size data = tf.reshape(raw_data[0 : batch_size * batch_len], [batch_size, batch_len]) epoch_size = (batch_len - 1) // num_steps assertion = tf.assert_positive( epoch_size, message="epoch_size == 0, decrease batch_size or num_steps") with tf.control_dependencies([assertion]): epoch_size = tf.identity(epoch_size, name="epoch_size") i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue() x = tf.slice(data, [0, i * num_steps], [batch_size, num_steps]) y = tf.slice(data, [0, i * num_steps + 1], [batch_size, num_steps]) return x, y
[ "def", "ptb_producer", "(", "raw_data", ",", "batch_size", ",", "num_steps", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", ",", "\"PTBProducer\"", ",", "[", "raw_data", ",", "batch_size", ",", "num_steps", "]", ")", ":", "raw_data", "=", "tf", ".", "convert_to_tensor", "(", "raw_data", ",", "name", "=", "\"raw_data\"", ",", "dtype", "=", "tf", ".", "int32", ")", "data_len", "=", "tf", ".", "size", "(", "raw_data", ")", "batch_len", "=", "data_len", "//", "batch_size", "data", "=", "tf", ".", "reshape", "(", "raw_data", "[", "0", ":", "batch_size", "*", "batch_len", "]", ",", "[", "batch_size", ",", "batch_len", "]", ")", "epoch_size", "=", "(", "batch_len", "-", "1", ")", "//", "num_steps", "assertion", "=", "tf", ".", "assert_positive", "(", "epoch_size", ",", "message", "=", "\"epoch_size == 0, decrease batch_size or num_steps\"", ")", "with", "tf", ".", "control_dependencies", "(", "[", "assertion", "]", ")", ":", "epoch_size", "=", "tf", ".", "identity", "(", "epoch_size", ",", "name", "=", "\"epoch_size\"", ")", "i", "=", "tf", ".", "train", ".", "range_input_producer", "(", "epoch_size", ",", "shuffle", "=", "False", ")", ".", "dequeue", "(", ")", "x", "=", "tf", ".", "slice", "(", "data", ",", "[", "0", ",", "i", "*", "num_steps", "]", ",", "[", "batch_size", ",", "num_steps", "]", ")", "y", "=", "tf", ".", "slice", "(", "data", ",", "[", "0", ",", "i", "*", "num_steps", "+", "1", "]", ",", "[", "batch_size", ",", "num_steps", "]", ")", "return", "x", ",", "y" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/models/rnn/ptb/reader.py#L81-L118
InsightSoftwareConsortium/ITK
87acfce9a93d928311c38bc371b666b515b9f19d
Modules/ThirdParty/pygccxml/src/pygccxml/declarations/namespace.py
python
namespace_t.free_operators
( self, name=None, function=None, symbol=None, return_type=None, arg_types=None, header_dir=None, header_file=None, recursive=None, allow_empty=None)
return ( self._find_multiple( scopedef.scopedef_t._impl_matchers[namespace_t.free_operator], name=self._build_operator_name(name, function, symbol), symbol=symbol, function=self._build_operator_function(name, function), decl_type=self._impl_decl_types[namespace_t.free_operator], return_type=return_type, arg_types=arg_types, header_dir=header_dir, header_file=header_file, recursive=recursive, allow_empty=allow_empty) )
Returns a set of free operator declarations that match a defined criteria.
Returns a set of free operator declarations that match a defined criteria.
[ "Returns", "a", "set", "of", "free", "operator", "declarations", "that", "match", "a", "defined", "criteria", "." ]
def free_operators( self, name=None, function=None, symbol=None, return_type=None, arg_types=None, header_dir=None, header_file=None, recursive=None, allow_empty=None): """ Returns a set of free operator declarations that match a defined criteria. """ return ( self._find_multiple( scopedef.scopedef_t._impl_matchers[namespace_t.free_operator], name=self._build_operator_name(name, function, symbol), symbol=symbol, function=self._build_operator_function(name, function), decl_type=self._impl_decl_types[namespace_t.free_operator], return_type=return_type, arg_types=arg_types, header_dir=header_dir, header_file=header_file, recursive=recursive, allow_empty=allow_empty) )
[ "def", "free_operators", "(", "self", ",", "name", "=", "None", ",", "function", "=", "None", ",", "symbol", "=", "None", ",", "return_type", "=", "None", ",", "arg_types", "=", "None", ",", "header_dir", "=", "None", ",", "header_file", "=", "None", ",", "recursive", "=", "None", ",", "allow_empty", "=", "None", ")", ":", "return", "(", "self", ".", "_find_multiple", "(", "scopedef", ".", "scopedef_t", ".", "_impl_matchers", "[", "namespace_t", ".", "free_operator", "]", ",", "name", "=", "self", ".", "_build_operator_name", "(", "name", ",", "function", ",", "symbol", ")", ",", "symbol", "=", "symbol", ",", "function", "=", "self", ".", "_build_operator_function", "(", "name", ",", "function", ")", ",", "decl_type", "=", "self", ".", "_impl_decl_types", "[", "namespace_t", ".", "free_operator", "]", ",", "return_type", "=", "return_type", ",", "arg_types", "=", "arg_types", ",", "header_dir", "=", "header_dir", ",", "header_file", "=", "header_file", ",", "recursive", "=", "recursive", ",", "allow_empty", "=", "allow_empty", ")", ")" ]
https://github.com/InsightSoftwareConsortium/ITK/blob/87acfce9a93d928311c38bc371b666b515b9f19d/Modules/ThirdParty/pygccxml/src/pygccxml/declarations/namespace.py#L232-L262
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/importlib/_bootstrap_external.py
python
ExtensionFileLoader.get_filename
(self, fullname)
return self.path
Return the path to the source file as found by the finder.
Return the path to the source file as found by the finder.
[ "Return", "the", "path", "to", "the", "source", "file", "as", "found", "by", "the", "finder", "." ]
def get_filename(self, fullname): """Return the path to the source file as found by the finder.""" return self.path
[ "def", "get_filename", "(", "self", ",", "fullname", ")", ":", "return", "self", ".", "path" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/importlib/_bootstrap_external.py#L1200-L1202
rapidsai/cudf
d5b2448fc69f17509304d594f029d0df56984962
python/cudf/cudf/core/df_protocol.py
python
from_dataframe
( df: DataFrameObject, allow_copy: bool = False )
return _from_dataframe(df.__dataframe__(allow_copy=allow_copy))
Construct a cudf DataFrame from ``df`` if it supports ``__dataframe__``
Construct a cudf DataFrame from ``df`` if it supports ``__dataframe__``
[ "Construct", "a", "cudf", "DataFrame", "from", "df", "if", "it", "supports", "__dataframe__" ]
def from_dataframe( df: DataFrameObject, allow_copy: bool = False ) -> _CuDFDataFrame: """ Construct a cudf DataFrame from ``df`` if it supports ``__dataframe__`` """ if isinstance(df, cudf.DataFrame): return df if not hasattr(df, "__dataframe__"): raise ValueError("`df` does not support __dataframe__") return _from_dataframe(df.__dataframe__(allow_copy=allow_copy))
[ "def", "from_dataframe", "(", "df", ":", "DataFrameObject", ",", "allow_copy", ":", "bool", "=", "False", ")", "->", "_CuDFDataFrame", ":", "if", "isinstance", "(", "df", ",", "cudf", ".", "DataFrame", ")", ":", "return", "df", "if", "not", "hasattr", "(", "df", ",", "\"__dataframe__\"", ")", ":", "raise", "ValueError", "(", "\"`df` does not support __dataframe__\"", ")", "return", "_from_dataframe", "(", "df", ".", "__dataframe__", "(", "allow_copy", "=", "allow_copy", ")", ")" ]
https://github.com/rapidsai/cudf/blob/d5b2448fc69f17509304d594f029d0df56984962/python/cudf/cudf/core/df_protocol.py#L640-L652
google/fhir
d77f57706c1a168529b0b87ca7ccb1c0113e83c2
py/google/fhir/json_format/_json_printer.py
python
JsonPrinter._print_list
(self, values: List[Any], print_func: Callable[[Any], None])
Adds the printed JSON list representation of values to _output. Args: values: The values to print as a JSON list. print_func: A function responsible for printing a single value.
Adds the printed JSON list representation of values to _output.
[ "Adds", "the", "printed", "JSON", "list", "representation", "of", "values", "to", "_output", "." ]
def _print_list(self, values: List[Any], print_func: Callable[[Any], None]) -> None: """Adds the printed JSON list representation of values to _output. Args: values: The values to print as a JSON list. print_func: A function responsible for printing a single value. """ self.generator.open_json_list() field_size = len(values) for i in range(field_size): print_func(values[i]) if i < (field_size - 1): self.generator.push(',') self.generator.add_newline() self.generator.close_json_list()
[ "def", "_print_list", "(", "self", ",", "values", ":", "List", "[", "Any", "]", ",", "print_func", ":", "Callable", "[", "[", "Any", "]", ",", "None", "]", ")", "->", "None", ":", "self", ".", "generator", ".", "open_json_list", "(", ")", "field_size", "=", "len", "(", "values", ")", "for", "i", "in", "range", "(", "field_size", ")", ":", "print_func", "(", "values", "[", "i", "]", ")", "if", "i", "<", "(", "field_size", "-", "1", ")", ":", "self", ".", "generator", ".", "push", "(", "','", ")", "self", ".", "generator", ".", "add_newline", "(", ")", "self", ".", "generator", ".", "close_json_list", "(", ")" ]
https://github.com/google/fhir/blob/d77f57706c1a168529b0b87ca7ccb1c0113e83c2/py/google/fhir/json_format/_json_printer.py#L199-L216
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/mailbox.py
python
MH.set_sequences
(self, sequences)
Set sequences using the given name-to-key-list dictionary.
Set sequences using the given name-to-key-list dictionary.
[ "Set", "sequences", "using", "the", "given", "name", "-", "to", "-", "key", "-", "list", "dictionary", "." ]
def set_sequences(self, sequences): """Set sequences using the given name-to-key-list dictionary.""" f = open(os.path.join(self._path, '.mh_sequences'), 'r+', encoding='ASCII') try: os.close(os.open(f.name, os.O_WRONLY | os.O_TRUNC)) for name, keys in sequences.items(): if len(keys) == 0: continue f.write(name + ':') prev = None completing = False for key in sorted(set(keys)): if key - 1 == prev: if not completing: completing = True f.write('-') elif completing: completing = False f.write('%s %s' % (prev, key)) else: f.write(' %s' % key) prev = key if completing: f.write(str(prev) + '\n') else: f.write('\n') finally: _sync_close(f)
[ "def", "set_sequences", "(", "self", ",", "sequences", ")", ":", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_path", ",", "'.mh_sequences'", ")", ",", "'r+'", ",", "encoding", "=", "'ASCII'", ")", "try", ":", "os", ".", "close", "(", "os", ".", "open", "(", "f", ".", "name", ",", "os", ".", "O_WRONLY", "|", "os", ".", "O_TRUNC", ")", ")", "for", "name", ",", "keys", "in", "sequences", ".", "items", "(", ")", ":", "if", "len", "(", "keys", ")", "==", "0", ":", "continue", "f", ".", "write", "(", "name", "+", "':'", ")", "prev", "=", "None", "completing", "=", "False", "for", "key", "in", "sorted", "(", "set", "(", "keys", ")", ")", ":", "if", "key", "-", "1", "==", "prev", ":", "if", "not", "completing", ":", "completing", "=", "True", "f", ".", "write", "(", "'-'", ")", "elif", "completing", ":", "completing", "=", "False", "f", ".", "write", "(", "'%s %s'", "%", "(", "prev", ",", "key", ")", ")", "else", ":", "f", ".", "write", "(", "' %s'", "%", "key", ")", "prev", "=", "key", "if", "completing", ":", "f", ".", "write", "(", "str", "(", "prev", ")", "+", "'\\n'", ")", "else", ":", "f", ".", "write", "(", "'\\n'", ")", "finally", ":", "_sync_close", "(", "f", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/mailbox.py#L1167-L1194
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/thrift/transport/TZlibTransport.py
python
TZlibTransport.listen
(self)
Invoke the underlying transport's listen() method
Invoke the underlying transport's listen() method
[ "Invoke", "the", "underlying", "transport", "s", "listen", "()", "method" ]
def listen(self): """Invoke the underlying transport's listen() method""" self.__trans.listen()
[ "def", "listen", "(", "self", ")", ":", "self", ".", "__trans", ".", "listen", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/thrift/transport/TZlibTransport.py#L167-L169
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/html2.py
python
WebView.IsEditable
(*args, **kwargs)
return _html2.WebView_IsEditable(*args, **kwargs)
IsEditable(self) -> bool
IsEditable(self) -> bool
[ "IsEditable", "(", "self", ")", "-", ">", "bool" ]
def IsEditable(*args, **kwargs): """IsEditable(self) -> bool""" return _html2.WebView_IsEditable(*args, **kwargs)
[ "def", "IsEditable", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_html2", ".", "WebView_IsEditable", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/html2.py#L175-L177
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/Draft/draftgeoutils/circles_incomplete.py
python
circleFrom3tan
(tan1, tan2, tan3)
Circle from three tangents. The tangents should be edges, and they may be either straight line edges or circular edges, so eight combinations are possible.
Circle from three tangents.
[ "Circle", "from", "three", "tangents", "." ]
def circleFrom3tan(tan1, tan2, tan3): """Circle from three tangents. The tangents should be edges, and they may be either straight line edges or circular edges, so eight combinations are possible. """ tan1IsLine = (geomType(tan1) == "Line") tan2IsLine = (geomType(tan2) == "Line") tan3IsLine = (geomType(tan3) == "Line") tan1IsCircle = (geomType(tan1) == "Circle") tan2IsCircle = (geomType(tan2) == "Circle") tan3IsCircle = (geomType(tan3) == "Circle") if tan1IsLine and tan2IsLine and tan3IsLine: return circleFrom3LineTangents(tan1, tan2, tan3) elif tan1IsCircle and tan2IsCircle and tan3IsCircle: return circleFrom3CircleTangents(tan1, tan2, tan3) elif tan1IsCircle and tan2IsLine and tan3IsLine: return circleFrom1Circle2Lines(tan1, tan2, tan3) elif tan1IsLine and tan2IsCircle and tan3IsLine: return circleFrom1Circle2Lines(tan2, tan1, tan3) elif tan1IsLine and tan2IsLine and tan3IsCircle: return circleFrom1Circle2Lines(tan3, tan1, tan2) elif tan1IsLine and tan2IsCircle and tan3IsCircle: return circleFrom2Circle1Lines(tan2, tan3, tan1) elif tan1IsCircle and tan2IsLine and tan3IsCircle: return circleFrom2Circle1Lines(tan1, tan3, tan2) elif tan1IsCircle and tan2IsCircle and tan3IsLine: return circleFrom2Circle1Lines(tan1, tan2, tan3)
[ "def", "circleFrom3tan", "(", "tan1", ",", "tan2", ",", "tan3", ")", ":", "tan1IsLine", "=", "(", "geomType", "(", "tan1", ")", "==", "\"Line\"", ")", "tan2IsLine", "=", "(", "geomType", "(", "tan2", ")", "==", "\"Line\"", ")", "tan3IsLine", "=", "(", "geomType", "(", "tan3", ")", "==", "\"Line\"", ")", "tan1IsCircle", "=", "(", "geomType", "(", "tan1", ")", "==", "\"Circle\"", ")", "tan2IsCircle", "=", "(", "geomType", "(", "tan2", ")", "==", "\"Circle\"", ")", "tan3IsCircle", "=", "(", "geomType", "(", "tan3", ")", "==", "\"Circle\"", ")", "if", "tan1IsLine", "and", "tan2IsLine", "and", "tan3IsLine", ":", "return", "circleFrom3LineTangents", "(", "tan1", ",", "tan2", ",", "tan3", ")", "elif", "tan1IsCircle", "and", "tan2IsCircle", "and", "tan3IsCircle", ":", "return", "circleFrom3CircleTangents", "(", "tan1", ",", "tan2", ",", "tan3", ")", "elif", "tan1IsCircle", "and", "tan2IsLine", "and", "tan3IsLine", ":", "return", "circleFrom1Circle2Lines", "(", "tan1", ",", "tan2", ",", "tan3", ")", "elif", "tan1IsLine", "and", "tan2IsCircle", "and", "tan3IsLine", ":", "return", "circleFrom1Circle2Lines", "(", "tan2", ",", "tan1", ",", "tan3", ")", "elif", "tan1IsLine", "and", "tan2IsLine", "and", "tan3IsCircle", ":", "return", "circleFrom1Circle2Lines", "(", "tan3", ",", "tan1", ",", "tan2", ")", "elif", "tan1IsLine", "and", "tan2IsCircle", "and", "tan3IsCircle", ":", "return", "circleFrom2Circle1Lines", "(", "tan2", ",", "tan3", ",", "tan1", ")", "elif", "tan1IsCircle", "and", "tan2IsLine", "and", "tan3IsCircle", ":", "return", "circleFrom2Circle1Lines", "(", "tan1", ",", "tan3", ",", "tan2", ")", "elif", "tan1IsCircle", "and", "tan2IsCircle", "and", "tan3IsLine", ":", "return", "circleFrom2Circle1Lines", "(", "tan1", ",", "tan2", ",", "tan3", ")" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftgeoutils/circles_incomplete.py#L184-L220
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/prompt-toolkit/py3/prompt_toolkit/input/win32.py
python
ConsoleInputReader._event_to_key_presses
(self, ev: KEY_EVENT_RECORD)
For this `KEY_EVENT_RECORD`, return a list of `KeyPress` instances.
For this `KEY_EVENT_RECORD`, return a list of `KeyPress` instances.
[ "For", "this", "KEY_EVENT_RECORD", "return", "a", "list", "of", "KeyPress", "instances", "." ]
def _event_to_key_presses(self, ev: KEY_EVENT_RECORD) -> List[KeyPress]: """ For this `KEY_EVENT_RECORD`, return a list of `KeyPress` instances. """ assert type(ev) == KEY_EVENT_RECORD and ev.KeyDown result: Optional[KeyPress] = None control_key_state = ev.ControlKeyState u_char = ev.uChar.UnicodeChar # Use surrogatepass because u_char may be an unmatched surrogate ascii_char = u_char.encode("utf-8", "surrogatepass") # NOTE: We don't use `ev.uChar.AsciiChar`. That appears to be the # unicode code point truncated to 1 byte. See also: # https://github.com/ipython/ipython/issues/10004 # https://github.com/jonathanslenders/python-prompt-toolkit/issues/389 if u_char == "\x00": if ev.VirtualKeyCode in self.keycodes: result = KeyPress(self.keycodes[ev.VirtualKeyCode], "") else: if ascii_char in self.mappings: if self.mappings[ascii_char] == Keys.ControlJ: u_char = ( "\n" # Windows sends \n, turn into \r for unix compatibility. ) result = KeyPress(self.mappings[ascii_char], u_char) else: result = KeyPress(u_char, u_char) # First we handle Shift-Control-Arrow/Home/End (need to do this first) if ( ( control_key_state & self.LEFT_CTRL_PRESSED or control_key_state & self.RIGHT_CTRL_PRESSED ) and control_key_state & self.SHIFT_PRESSED and result ): mapping: Dict[str, str] = { Keys.Left: Keys.ControlShiftLeft, Keys.Right: Keys.ControlShiftRight, Keys.Up: Keys.ControlShiftUp, Keys.Down: Keys.ControlShiftDown, Keys.Home: Keys.ControlShiftHome, Keys.End: Keys.ControlShiftEnd, Keys.Insert: Keys.ControlShiftInsert, Keys.PageUp: Keys.ControlShiftPageUp, Keys.PageDown: Keys.ControlShiftPageDown, } result.key = mapping.get(result.key, result.key) # Correctly handle Control-Arrow/Home/End and Control-Insert/Delete keys. if ( control_key_state & self.LEFT_CTRL_PRESSED or control_key_state & self.RIGHT_CTRL_PRESSED ) and result: mapping = { Keys.Left: Keys.ControlLeft, Keys.Right: Keys.ControlRight, Keys.Up: Keys.ControlUp, Keys.Down: Keys.ControlDown, Keys.Home: Keys.ControlHome, Keys.End: Keys.ControlEnd, Keys.Insert: Keys.ControlInsert, Keys.Delete: Keys.ControlDelete, Keys.PageUp: Keys.ControlPageUp, Keys.PageDown: Keys.ControlPageDown, } result.key = mapping.get(result.key, result.key) # Turn 'Tab' into 'BackTab' when shift was pressed. # Also handle other shift-key combination if control_key_state & self.SHIFT_PRESSED and result: mapping = { Keys.Tab: Keys.BackTab, Keys.Left: Keys.ShiftLeft, Keys.Right: Keys.ShiftRight, Keys.Up: Keys.ShiftUp, Keys.Down: Keys.ShiftDown, Keys.Home: Keys.ShiftHome, Keys.End: Keys.ShiftEnd, Keys.Insert: Keys.ShiftInsert, Keys.Delete: Keys.ShiftDelete, Keys.PageUp: Keys.ShiftPageUp, Keys.PageDown: Keys.ShiftPageDown, } result.key = mapping.get(result.key, result.key) # Turn 'Space' into 'ControlSpace' when control was pressed. if ( ( control_key_state & self.LEFT_CTRL_PRESSED or control_key_state & self.RIGHT_CTRL_PRESSED ) and result and result.data == " " ): result = KeyPress(Keys.ControlSpace, " ") # Turn Control-Enter into META-Enter. (On a vt100 terminal, we cannot # detect this combination. But it's really practical on Windows.) if ( ( control_key_state & self.LEFT_CTRL_PRESSED or control_key_state & self.RIGHT_CTRL_PRESSED ) and result and result.key == Keys.ControlJ ): return [KeyPress(Keys.Escape, ""), result] # Return result. If alt was pressed, prefix the result with an # 'Escape' key, just like unix VT100 terminals do. # NOTE: Only replace the left alt with escape. The right alt key often # acts as altgr and is used in many non US keyboard layouts for # typing some special characters, like a backslash. We don't want # all backslashes to be prefixed with escape. (Esc-\ has a # meaning in E-macs, for instance.) if result: meta_pressed = control_key_state & self.LEFT_ALT_PRESSED if meta_pressed: return [KeyPress(Keys.Escape, ""), result] else: return [result] else: return []
[ "def", "_event_to_key_presses", "(", "self", ",", "ev", ":", "KEY_EVENT_RECORD", ")", "->", "List", "[", "KeyPress", "]", ":", "assert", "type", "(", "ev", ")", "==", "KEY_EVENT_RECORD", "and", "ev", ".", "KeyDown", "result", ":", "Optional", "[", "KeyPress", "]", "=", "None", "control_key_state", "=", "ev", ".", "ControlKeyState", "u_char", "=", "ev", ".", "uChar", ".", "UnicodeChar", "# Use surrogatepass because u_char may be an unmatched surrogate", "ascii_char", "=", "u_char", ".", "encode", "(", "\"utf-8\"", ",", "\"surrogatepass\"", ")", "# NOTE: We don't use `ev.uChar.AsciiChar`. That appears to be the", "# unicode code point truncated to 1 byte. See also:", "# https://github.com/ipython/ipython/issues/10004", "# https://github.com/jonathanslenders/python-prompt-toolkit/issues/389", "if", "u_char", "==", "\"\\x00\"", ":", "if", "ev", ".", "VirtualKeyCode", "in", "self", ".", "keycodes", ":", "result", "=", "KeyPress", "(", "self", ".", "keycodes", "[", "ev", ".", "VirtualKeyCode", "]", ",", "\"\"", ")", "else", ":", "if", "ascii_char", "in", "self", ".", "mappings", ":", "if", "self", ".", "mappings", "[", "ascii_char", "]", "==", "Keys", ".", "ControlJ", ":", "u_char", "=", "(", "\"\\n\"", "# Windows sends \\n, turn into \\r for unix compatibility.", ")", "result", "=", "KeyPress", "(", "self", ".", "mappings", "[", "ascii_char", "]", ",", "u_char", ")", "else", ":", "result", "=", "KeyPress", "(", "u_char", ",", "u_char", ")", "# First we handle Shift-Control-Arrow/Home/End (need to do this first)", "if", "(", "(", "control_key_state", "&", "self", ".", "LEFT_CTRL_PRESSED", "or", "control_key_state", "&", "self", ".", "RIGHT_CTRL_PRESSED", ")", "and", "control_key_state", "&", "self", ".", "SHIFT_PRESSED", "and", "result", ")", ":", "mapping", ":", "Dict", "[", "str", ",", "str", "]", "=", "{", "Keys", ".", "Left", ":", "Keys", ".", "ControlShiftLeft", ",", "Keys", ".", "Right", ":", "Keys", ".", "ControlShiftRight", ",", "Keys", ".", "Up", ":", "Keys", ".", "ControlShiftUp", ",", "Keys", ".", "Down", ":", "Keys", ".", "ControlShiftDown", ",", "Keys", ".", "Home", ":", "Keys", ".", "ControlShiftHome", ",", "Keys", ".", "End", ":", "Keys", ".", "ControlShiftEnd", ",", "Keys", ".", "Insert", ":", "Keys", ".", "ControlShiftInsert", ",", "Keys", ".", "PageUp", ":", "Keys", ".", "ControlShiftPageUp", ",", "Keys", ".", "PageDown", ":", "Keys", ".", "ControlShiftPageDown", ",", "}", "result", ".", "key", "=", "mapping", ".", "get", "(", "result", ".", "key", ",", "result", ".", "key", ")", "# Correctly handle Control-Arrow/Home/End and Control-Insert/Delete keys.", "if", "(", "control_key_state", "&", "self", ".", "LEFT_CTRL_PRESSED", "or", "control_key_state", "&", "self", ".", "RIGHT_CTRL_PRESSED", ")", "and", "result", ":", "mapping", "=", "{", "Keys", ".", "Left", ":", "Keys", ".", "ControlLeft", ",", "Keys", ".", "Right", ":", "Keys", ".", "ControlRight", ",", "Keys", ".", "Up", ":", "Keys", ".", "ControlUp", ",", "Keys", ".", "Down", ":", "Keys", ".", "ControlDown", ",", "Keys", ".", "Home", ":", "Keys", ".", "ControlHome", ",", "Keys", ".", "End", ":", "Keys", ".", "ControlEnd", ",", "Keys", ".", "Insert", ":", "Keys", ".", "ControlInsert", ",", "Keys", ".", "Delete", ":", "Keys", ".", "ControlDelete", ",", "Keys", ".", "PageUp", ":", "Keys", ".", "ControlPageUp", ",", "Keys", ".", "PageDown", ":", "Keys", ".", "ControlPageDown", ",", "}", "result", ".", "key", "=", "mapping", ".", "get", "(", "result", ".", "key", ",", "result", ".", "key", ")", "# Turn 'Tab' into 'BackTab' when shift was pressed.", "# Also handle other shift-key combination", "if", "control_key_state", "&", "self", ".", "SHIFT_PRESSED", "and", "result", ":", "mapping", "=", "{", "Keys", ".", "Tab", ":", "Keys", ".", "BackTab", ",", "Keys", ".", "Left", ":", "Keys", ".", "ShiftLeft", ",", "Keys", ".", "Right", ":", "Keys", ".", "ShiftRight", ",", "Keys", ".", "Up", ":", "Keys", ".", "ShiftUp", ",", "Keys", ".", "Down", ":", "Keys", ".", "ShiftDown", ",", "Keys", ".", "Home", ":", "Keys", ".", "ShiftHome", ",", "Keys", ".", "End", ":", "Keys", ".", "ShiftEnd", ",", "Keys", ".", "Insert", ":", "Keys", ".", "ShiftInsert", ",", "Keys", ".", "Delete", ":", "Keys", ".", "ShiftDelete", ",", "Keys", ".", "PageUp", ":", "Keys", ".", "ShiftPageUp", ",", "Keys", ".", "PageDown", ":", "Keys", ".", "ShiftPageDown", ",", "}", "result", ".", "key", "=", "mapping", ".", "get", "(", "result", ".", "key", ",", "result", ".", "key", ")", "# Turn 'Space' into 'ControlSpace' when control was pressed.", "if", "(", "(", "control_key_state", "&", "self", ".", "LEFT_CTRL_PRESSED", "or", "control_key_state", "&", "self", ".", "RIGHT_CTRL_PRESSED", ")", "and", "result", "and", "result", ".", "data", "==", "\" \"", ")", ":", "result", "=", "KeyPress", "(", "Keys", ".", "ControlSpace", ",", "\" \"", ")", "# Turn Control-Enter into META-Enter. (On a vt100 terminal, we cannot", "# detect this combination. But it's really practical on Windows.)", "if", "(", "(", "control_key_state", "&", "self", ".", "LEFT_CTRL_PRESSED", "or", "control_key_state", "&", "self", ".", "RIGHT_CTRL_PRESSED", ")", "and", "result", "and", "result", ".", "key", "==", "Keys", ".", "ControlJ", ")", ":", "return", "[", "KeyPress", "(", "Keys", ".", "Escape", ",", "\"\"", ")", ",", "result", "]", "# Return result. If alt was pressed, prefix the result with an", "# 'Escape' key, just like unix VT100 terminals do.", "# NOTE: Only replace the left alt with escape. The right alt key often", "# acts as altgr and is used in many non US keyboard layouts for", "# typing some special characters, like a backslash. We don't want", "# all backslashes to be prefixed with escape. (Esc-\\ has a", "# meaning in E-macs, for instance.)", "if", "result", ":", "meta_pressed", "=", "control_key_state", "&", "self", ".", "LEFT_ALT_PRESSED", "if", "meta_pressed", ":", "return", "[", "KeyPress", "(", "Keys", ".", "Escape", ",", "\"\"", ")", ",", "result", "]", "else", ":", "return", "[", "result", "]", "else", ":", "return", "[", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py3/prompt_toolkit/input/win32.py#L387-L517
OSGeo/gdal
3748fc4ba4fba727492774b2b908a2130c864a83
swig/python/osgeo/ogr.py
python
RegisterAll
(*args)
return _ogr.RegisterAll(*args)
r"""RegisterAll()
r"""RegisterAll()
[ "r", "RegisterAll", "()" ]
def RegisterAll(*args): r"""RegisterAll()""" return _ogr.RegisterAll(*args)
[ "def", "RegisterAll", "(", "*", "args", ")", ":", "return", "_ogr", ".", "RegisterAll", "(", "*", "args", ")" ]
https://github.com/OSGeo/gdal/blob/3748fc4ba4fba727492774b2b908a2130c864a83/swig/python/osgeo/ogr.py#L7648-L7650
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/ops/data_flow_ops.py
python
QueueBase.dequeue_up_to
(self, n, name=None)
return self._dequeue_return_value(ret)
Dequeues and concatenates `n` elements from this queue. **Note** This operation is not supported by all queues. If a queue does not support DequeueUpTo, then a `tf.errors.UnimplementedError` is raised. This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. If the queue has not been closed, all of the components in the dequeued tuple will have size `n` in the 0th dimension. If the queue is closed and there are more than `0` but fewer than `n` elements remaining, then instead of raising a `tf.errors.OutOfRangeError` like @{tf.QueueBase.dequeue_many}, less than `n` elements are returned immediately. If the queue is closed and there are `0` elements left in the queue, then a `tf.errors.OutOfRangeError` is raised just like in `dequeue_many`. Otherwise the behavior is identical to `dequeue_many`. Args: n: A scalar `Tensor` containing the number of elements to dequeue. name: A name for the operation (optional). Returns: The tuple of concatenated tensors that was dequeued.
Dequeues and concatenates `n` elements from this queue.
[ "Dequeues", "and", "concatenates", "n", "elements", "from", "this", "queue", "." ]
def dequeue_up_to(self, n, name=None): """Dequeues and concatenates `n` elements from this queue. **Note** This operation is not supported by all queues. If a queue does not support DequeueUpTo, then a `tf.errors.UnimplementedError` is raised. This operation concatenates queue-element component tensors along the 0th dimension to make a single component tensor. If the queue has not been closed, all of the components in the dequeued tuple will have size `n` in the 0th dimension. If the queue is closed and there are more than `0` but fewer than `n` elements remaining, then instead of raising a `tf.errors.OutOfRangeError` like @{tf.QueueBase.dequeue_many}, less than `n` elements are returned immediately. If the queue is closed and there are `0` elements left in the queue, then a `tf.errors.OutOfRangeError` is raised just like in `dequeue_many`. Otherwise the behavior is identical to `dequeue_many`. Args: n: A scalar `Tensor` containing the number of elements to dequeue. name: A name for the operation (optional). Returns: The tuple of concatenated tensors that was dequeued. """ if name is None: name = "%s_DequeueUpTo" % self._name ret = gen_data_flow_ops._queue_dequeue_up_to_v2( self._queue_ref, n=n, component_types=self._dtypes, name=name) # NOTE(mrry): Not using a shape function because we need access to # the Queue object. if context.in_graph_mode(): op = ret[0].op for output, shape in zip(op.values(), self._shapes): output.set_shape(tensor_shape.TensorShape([None]).concatenate(shape)) return self._dequeue_return_value(ret)
[ "def", "dequeue_up_to", "(", "self", ",", "n", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "\"%s_DequeueUpTo\"", "%", "self", ".", "_name", "ret", "=", "gen_data_flow_ops", ".", "_queue_dequeue_up_to_v2", "(", "self", ".", "_queue_ref", ",", "n", "=", "n", ",", "component_types", "=", "self", ".", "_dtypes", ",", "name", "=", "name", ")", "# NOTE(mrry): Not using a shape function because we need access to", "# the Queue object.", "if", "context", ".", "in_graph_mode", "(", ")", ":", "op", "=", "ret", "[", "0", "]", ".", "op", "for", "output", ",", "shape", "in", "zip", "(", "op", ".", "values", "(", ")", ",", "self", ".", "_shapes", ")", ":", "output", ".", "set_shape", "(", "tensor_shape", ".", "TensorShape", "(", "[", "None", "]", ")", ".", "concatenate", "(", "shape", ")", ")", "return", "self", ".", "_dequeue_return_value", "(", "ret", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/data_flow_ops.py#L483-L522
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/arrays/sparse.py
python
_sanitize_values
(arr)
return arr
return an ndarray for our input, in a platform independent manner
return an ndarray for our input, in a platform independent manner
[ "return", "an", "ndarray", "for", "our", "input", "in", "a", "platform", "independent", "manner" ]
def _sanitize_values(arr): """ return an ndarray for our input, in a platform independent manner """ if hasattr(arr, 'values'): arr = arr.values else: # scalar if is_scalar(arr): arr = [arr] # ndarray if isinstance(arr, np.ndarray): pass elif is_list_like(arr) and len(arr) > 0: arr = maybe_convert_platform(arr) else: arr = np.asarray(arr) return arr
[ "def", "_sanitize_values", "(", "arr", ")", ":", "if", "hasattr", "(", "arr", ",", "'values'", ")", ":", "arr", "=", "arr", ".", "values", "else", ":", "# scalar", "if", "is_scalar", "(", "arr", ")", ":", "arr", "=", "[", "arr", "]", "# ndarray", "if", "isinstance", "(", "arr", ",", "np", ".", "ndarray", ")", ":", "pass", "elif", "is_list_like", "(", "arr", ")", "and", "len", "(", "arr", ")", ">", "0", ":", "arr", "=", "maybe_convert_platform", "(", "arr", ")", "else", ":", "arr", "=", "np", ".", "asarray", "(", "arr", ")", "return", "arr" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/arrays/sparse.py#L1796-L1820
weolar/miniblink49
1c4678db0594a4abde23d3ebbcc7cd13c3170777
v8_5_1/tools/run_perf.py
python
RunnableConfig.ChangeCWD
(self, suite_path)
Changes the cwd to to path defined in the current graph. The tests are supposed to be relative to the suite configuration.
Changes the cwd to to path defined in the current graph.
[ "Changes", "the", "cwd", "to", "to", "path", "defined", "in", "the", "current", "graph", "." ]
def ChangeCWD(self, suite_path): """Changes the cwd to to path defined in the current graph. The tests are supposed to be relative to the suite configuration. """ suite_dir = os.path.abspath(os.path.dirname(suite_path)) bench_dir = os.path.normpath(os.path.join(*self.path)) os.chdir(os.path.join(suite_dir, bench_dir))
[ "def", "ChangeCWD", "(", "self", ",", "suite_path", ")", ":", "suite_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "suite_path", ")", ")", "bench_dir", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "*", "self", ".", "path", ")", ")", "os", ".", "chdir", "(", "os", ".", "path", ".", "join", "(", "suite_dir", ",", "bench_dir", ")", ")" ]
https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/v8_5_1/tools/run_perf.py#L447-L454
metashell/metashell
f4177e4854ea00c8dbc722cadab26ef413d798ea
3rd/templight/compiler-rt/lib/sanitizer_common/scripts/cpplint.py
python
ParseArguments
(args)
return filenames
Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint.
Parses the command line arguments.
[ "Parses", "the", "command", "line", "arguments", "." ]
def ParseArguments(args): """Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint. """ try: (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'linelength=', 'extensions=', 'headers=', 'quiet']) except getopt.GetoptError: PrintUsage('Invalid arguments.') verbosity = _VerboseLevel() output_format = _OutputFormat() filters = '' quiet = _Quiet() counting_style = '' for (opt, val) in opts: if opt == '--help': PrintUsage(None) elif opt == '--output': if val not in ('emacs', 'vs7', 'eclipse'): PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') output_format = val elif opt == '--quiet': quiet = True elif opt == '--verbose': verbosity = int(val) elif opt == '--filter': filters = val if not filters: PrintCategories() elif opt == '--counting': if val not in ('total', 'toplevel', 'detailed'): PrintUsage('Valid counting options are total, toplevel, and detailed') counting_style = val elif opt == '--root': global _root _root = val elif opt == '--linelength': global _line_length try: _line_length = int(val) except ValueError: PrintUsage('Line length must be digits.') elif opt == '--extensions': global _valid_extensions try: _valid_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma separated list.') elif opt == '--headers': ProcessHppHeadersOption(val) if not filenames: PrintUsage('No files were specified.') _SetOutputFormat(output_format) _SetQuiet(quiet) _SetVerboseLevel(verbosity) _SetFilters(filters) _SetCountingStyle(counting_style) return filenames
[ "def", "ParseArguments", "(", "args", ")", ":", "try", ":", "(", "opts", ",", "filenames", ")", "=", "getopt", ".", "getopt", "(", "args", ",", "''", ",", "[", "'help'", ",", "'output='", ",", "'verbose='", ",", "'counting='", ",", "'filter='", ",", "'root='", ",", "'linelength='", ",", "'extensions='", ",", "'headers='", ",", "'quiet'", "]", ")", "except", "getopt", ".", "GetoptError", ":", "PrintUsage", "(", "'Invalid arguments.'", ")", "verbosity", "=", "_VerboseLevel", "(", ")", "output_format", "=", "_OutputFormat", "(", ")", "filters", "=", "''", "quiet", "=", "_Quiet", "(", ")", "counting_style", "=", "''", "for", "(", "opt", ",", "val", ")", "in", "opts", ":", "if", "opt", "==", "'--help'", ":", "PrintUsage", "(", "None", ")", "elif", "opt", "==", "'--output'", ":", "if", "val", "not", "in", "(", "'emacs'", ",", "'vs7'", ",", "'eclipse'", ")", ":", "PrintUsage", "(", "'The only allowed output formats are emacs, vs7 and eclipse.'", ")", "output_format", "=", "val", "elif", "opt", "==", "'--quiet'", ":", "quiet", "=", "True", "elif", "opt", "==", "'--verbose'", ":", "verbosity", "=", "int", "(", "val", ")", "elif", "opt", "==", "'--filter'", ":", "filters", "=", "val", "if", "not", "filters", ":", "PrintCategories", "(", ")", "elif", "opt", "==", "'--counting'", ":", "if", "val", "not", "in", "(", "'total'", ",", "'toplevel'", ",", "'detailed'", ")", ":", "PrintUsage", "(", "'Valid counting options are total, toplevel, and detailed'", ")", "counting_style", "=", "val", "elif", "opt", "==", "'--root'", ":", "global", "_root", "_root", "=", "val", "elif", "opt", "==", "'--linelength'", ":", "global", "_line_length", "try", ":", "_line_length", "=", "int", "(", "val", ")", "except", "ValueError", ":", "PrintUsage", "(", "'Line length must be digits.'", ")", "elif", "opt", "==", "'--extensions'", ":", "global", "_valid_extensions", "try", ":", "_valid_extensions", "=", "set", "(", "val", ".", "split", "(", "','", ")", ")", "except", "ValueError", ":", "PrintUsage", "(", "'Extensions must be comma separated list.'", ")", "elif", "opt", "==", "'--headers'", ":", "ProcessHppHeadersOption", "(", "val", ")", "if", "not", "filenames", ":", "PrintUsage", "(", "'No files were specified.'", ")", "_SetOutputFormat", "(", "output_format", ")", "_SetQuiet", "(", "quiet", ")", "_SetVerboseLevel", "(", "verbosity", ")", "_SetFilters", "(", "filters", ")", "_SetCountingStyle", "(", "counting_style", ")", "return", "filenames" ]
https://github.com/metashell/metashell/blob/f4177e4854ea00c8dbc722cadab26ef413d798ea/3rd/templight/compiler-rt/lib/sanitizer_common/scripts/cpplint.py#L6145-L6220
COVESA/ramses
86cac72b86dab4082c4d404d884db7e4ba0ed7b8
scripts/code_style_checker/check_file_attributes.py
python
check_file_attributes
(filename)
Check for unwanted file attributes
Check for unwanted file attributes
[ "Check", "for", "unwanted", "file", "attributes" ]
def check_file_attributes(filename): """ Check for unwanted file attributes """ mode = os.stat(filename).st_mode if bool(mode & stat.S_IXUSR) or bool(mode & stat.S_IXGRP) or bool(mode & stat.S_IXOTH): cc.log_warning("check_file_attributes", filename, 0, "may not have file executable bits set", "")
[ "def", "check_file_attributes", "(", "filename", ")", ":", "mode", "=", "os", ".", "stat", "(", "filename", ")", ".", "st_mode", "if", "bool", "(", "mode", "&", "stat", ".", "S_IXUSR", ")", "or", "bool", "(", "mode", "&", "stat", ".", "S_IXGRP", ")", "or", "bool", "(", "mode", "&", "stat", ".", "S_IXOTH", ")", ":", "cc", ".", "log_warning", "(", "\"check_file_attributes\"", ",", "filename", ",", "0", ",", "\"may not have file executable bits set\"", ",", "\"\"", ")" ]
https://github.com/COVESA/ramses/blob/86cac72b86dab4082c4d404d884db7e4ba0ed7b8/scripts/code_style_checker/check_file_attributes.py#L16-L21
nnrg/opennero
43e12a1bcba6e228639db3886fec1dc47ddc24cb
mods/Maze/mazer.py
python
Maze.xy_bounds
(self, x, y)
return self.rc_bounds(r,c)
check in bounds x y
check in bounds x y
[ "check", "in", "bounds", "x", "y" ]
def xy_bounds(self, x, y): "check in bounds x y" (r,c) = self.xy2rc(x,y) return self.rc_bounds(r,c)
[ "def", "xy_bounds", "(", "self", ",", "x", ",", "y", ")", ":", "(", "r", ",", "c", ")", "=", "self", ".", "xy2rc", "(", "x", ",", "y", ")", "return", "self", ".", "rc_bounds", "(", "r", ",", "c", ")" ]
https://github.com/nnrg/opennero/blob/43e12a1bcba6e228639db3886fec1dc47ddc24cb/mods/Maze/mazer.py#L73-L76
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/kfac/python/ops/fisher_blocks.py
python
KroneckerProductFB.full_fisher_block
(self)
return self._renorm_coeff * utils.kronecker_product(left_factor, right_factor)
Explicitly constructs the full Fisher block. Used for testing purposes. (In general, the result may be very large.) Returns: The full Fisher block.
Explicitly constructs the full Fisher block.
[ "Explicitly", "constructs", "the", "full", "Fisher", "block", "." ]
def full_fisher_block(self): """Explicitly constructs the full Fisher block. Used for testing purposes. (In general, the result may be very large.) Returns: The full Fisher block. """ left_factor = self._input_factor.get_cov() right_factor = self._output_factor.get_cov() return self._renorm_coeff * utils.kronecker_product(left_factor, right_factor)
[ "def", "full_fisher_block", "(", "self", ")", ":", "left_factor", "=", "self", ".", "_input_factor", ".", "get_cov", "(", ")", "right_factor", "=", "self", ".", "_output_factor", ".", "get_cov", "(", ")", "return", "self", ".", "_renorm_coeff", "*", "utils", ".", "kronecker_product", "(", "left_factor", ",", "right_factor", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/kfac/python/ops/fisher_blocks.py#L486-L497
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/klampt/vis/visualization.py
python
setPlotDuration
(name : str, time : float)
Sets the plot duration.
Sets the plot duration.
[ "Sets", "the", "plot", "duration", "." ]
def setPlotDuration(name : str, time : float) -> None: """Sets the plot duration.""" setAttribute(name,'duration',time)
[ "def", "setPlotDuration", "(", "name", ":", "str", ",", "time", ":", "float", ")", "->", "None", ":", "setAttribute", "(", "name", ",", "'duration'", ",", "time", ")" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/vis/visualization.py#L1630-L1632
thalium/icebox
99d147d5b9269222225443ce171b4fd46d8985d4
third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2class.py
python
xpathParserContext.xpathCeilingFunction
(self, nargs)
Implement the ceiling() XPath function number ceiling(number) The ceiling function returns the smallest (closest to negative infinity) number that is not less than the argument and that is an integer.
Implement the ceiling() XPath function number ceiling(number) The ceiling function returns the smallest (closest to negative infinity) number that is not less than the argument and that is an integer.
[ "Implement", "the", "ceiling", "()", "XPath", "function", "number", "ceiling", "(", "number", ")", "The", "ceiling", "function", "returns", "the", "smallest", "(", "closest", "to", "negative", "infinity", ")", "number", "that", "is", "not", "less", "than", "the", "argument", "and", "that", "is", "an", "integer", "." ]
def xpathCeilingFunction(self, nargs): """Implement the ceiling() XPath function number ceiling(number) The ceiling function returns the smallest (closest to negative infinity) number that is not less than the argument and that is an integer. """ libxml2mod.xmlXPathCeilingFunction(self._o, nargs)
[ "def", "xpathCeilingFunction", "(", "self", ",", "nargs", ")", ":", "libxml2mod", ".", "xmlXPathCeilingFunction", "(", "self", ".", "_o", ",", "nargs", ")" ]
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2class.py#L6663-L6668
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/msvccompiler.py
python
read_keys
(base, key)
return L
Return list of registry keys.
Return list of registry keys.
[ "Return", "list", "of", "registry", "keys", "." ]
def read_keys(base, key): """Return list of registry keys.""" try: handle = RegOpenKeyEx(base, key) except RegError: return None L = [] i = 0 while True: try: k = RegEnumKey(handle, i) except RegError: break L.append(k) i += 1 return L
[ "def", "read_keys", "(", "base", ",", "key", ")", ":", "try", ":", "handle", "=", "RegOpenKeyEx", "(", "base", ",", "key", ")", "except", "RegError", ":", "return", "None", "L", "=", "[", "]", "i", "=", "0", "while", "True", ":", "try", ":", "k", "=", "RegEnumKey", "(", "handle", ",", "i", ")", "except", "RegError", ":", "break", "L", ".", "append", "(", "k", ")", "i", "+=", "1", "return", "L" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/msvccompiler.py#L55-L70
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_windows.py
python
FileDialog.SetDirectory
(*args, **kwargs)
return _windows_.FileDialog_SetDirectory(*args, **kwargs)
SetDirectory(self, String dir) Sets the default directory.
SetDirectory(self, String dir)
[ "SetDirectory", "(", "self", "String", "dir", ")" ]
def SetDirectory(*args, **kwargs): """ SetDirectory(self, String dir) Sets the default directory. """ return _windows_.FileDialog_SetDirectory(*args, **kwargs)
[ "def", "SetDirectory", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "FileDialog_SetDirectory", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_windows.py#L3159-L3165
bayandin/chromedriver
d40a2092b50f2fca817221eeb5ea093e0e642c10
util.py
python
FindProbableFreePorts
()
Get an generator returning random free ports on the system. Note that this function has an inherent race condition: some other process may bind to the port after we return it, so it may no longer be free by then. The workaround is to do this inside a retry loop. Do not use this function if there is any alternative.
Get an generator returning random free ports on the system.
[ "Get", "an", "generator", "returning", "random", "free", "ports", "on", "the", "system", "." ]
def FindProbableFreePorts(): """Get an generator returning random free ports on the system. Note that this function has an inherent race condition: some other process may bind to the port after we return it, so it may no longer be free by then. The workaround is to do this inside a retry loop. Do not use this function if there is any alternative. """ # This is the range of dynamic ports. See RFC6335 page 10. dynamic_ports = list(range(49152, 65535)) random.shuffle(dynamic_ports) for port in dynamic_ports: try: socket.create_connection(('127.0.0.1', port), 0.2).close() except socket.error: # If we can't connect to the port, then clearly nothing is listening on # it. yield port raise RuntimeError('Cannot find open port')
[ "def", "FindProbableFreePorts", "(", ")", ":", "# This is the range of dynamic ports. See RFC6335 page 10.", "dynamic_ports", "=", "list", "(", "range", "(", "49152", ",", "65535", ")", ")", "random", ".", "shuffle", "(", "dynamic_ports", ")", "for", "port", "in", "dynamic_ports", ":", "try", ":", "socket", ".", "create_connection", "(", "(", "'127.0.0.1'", ",", "port", ")", ",", "0.2", ")", ".", "close", "(", ")", "except", "socket", ".", "error", ":", "# If we can't connect to the port, then clearly nothing is listening on", "# it.", "yield", "port", "raise", "RuntimeError", "(", "'Cannot find open port'", ")" ]
https://github.com/bayandin/chromedriver/blob/d40a2092b50f2fca817221eeb5ea093e0e642c10/util.py#L249-L268
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_windows.py
python
PrintDialogData.GetSelection
(*args, **kwargs)
return _windows_.PrintDialogData_GetSelection(*args, **kwargs)
GetSelection(self) -> bool
GetSelection(self) -> bool
[ "GetSelection", "(", "self", ")", "-", ">", "bool" ]
def GetSelection(*args, **kwargs): """GetSelection(self) -> bool""" return _windows_.PrintDialogData_GetSelection(*args, **kwargs)
[ "def", "GetSelection", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "PrintDialogData_GetSelection", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_windows.py#L5066-L5068
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
tools/android/loading/sandwich_prefetch.py
python
_ExtractDiscoverableUrls
( original_headers_path, loading_trace_path, subresource_discoverer)
return whitelisted_urls
Extracts discoverable resource urls from a loading trace according to a sub-resource discoverer. Args: original_headers_path: Path of JSON containing the original headers. loading_trace_path: Path of the loading trace recorded at original cache creation. subresource_discoverer: The sub-resources discoverer that should white-list the resources to keep in cache for the NoState-Prefetch benchmarks. Returns: A set of urls.
Extracts discoverable resource urls from a loading trace according to a sub-resource discoverer.
[ "Extracts", "discoverable", "resource", "urls", "from", "a", "loading", "trace", "according", "to", "a", "sub", "-", "resource", "discoverer", "." ]
def _ExtractDiscoverableUrls( original_headers_path, loading_trace_path, subresource_discoverer): """Extracts discoverable resource urls from a loading trace according to a sub-resource discoverer. Args: original_headers_path: Path of JSON containing the original headers. loading_trace_path: Path of the loading trace recorded at original cache creation. subresource_discoverer: The sub-resources discoverer that should white-list the resources to keep in cache for the NoState-Prefetch benchmarks. Returns: A set of urls. """ assert subresource_discoverer in SUBRESOURCE_DISCOVERERS, \ 'unknown prefetch simulation {}'.format(subresource_discoverer) logging.info('loading %s', loading_trace_path) trace = loading_trace.LoadingTrace.FromJsonFile(loading_trace_path) dependencies_lens = RequestDependencyLens(trace) # Build the list of discovered requests according to the desired simulation. discovered_requests = [] if subresource_discoverer == Discoverer.HTMLPreloadScannerStore: requests = _DiscoverRequests( dependencies_lens, Discoverer.HTMLPreloadScanner) discovered_requests = _PruneOutOriginalNoStoreRequests( original_headers_path, requests) else: discovered_requests = _DiscoverRequests( dependencies_lens, subresource_discoverer) whitelisted_urls = set() for request in sandwich_utils.FilterOutDataAndIncompleteRequests( discovered_requests): logging.debug('white-listing %s', request.url) whitelisted_urls.add(request.url) logging.info('number of white-listed resources: %d', len(whitelisted_urls)) return whitelisted_urls
[ "def", "_ExtractDiscoverableUrls", "(", "original_headers_path", ",", "loading_trace_path", ",", "subresource_discoverer", ")", ":", "assert", "subresource_discoverer", "in", "SUBRESOURCE_DISCOVERERS", ",", "'unknown prefetch simulation {}'", ".", "format", "(", "subresource_discoverer", ")", "logging", ".", "info", "(", "'loading %s'", ",", "loading_trace_path", ")", "trace", "=", "loading_trace", ".", "LoadingTrace", ".", "FromJsonFile", "(", "loading_trace_path", ")", "dependencies_lens", "=", "RequestDependencyLens", "(", "trace", ")", "# Build the list of discovered requests according to the desired simulation.", "discovered_requests", "=", "[", "]", "if", "subresource_discoverer", "==", "Discoverer", ".", "HTMLPreloadScannerStore", ":", "requests", "=", "_DiscoverRequests", "(", "dependencies_lens", ",", "Discoverer", ".", "HTMLPreloadScanner", ")", "discovered_requests", "=", "_PruneOutOriginalNoStoreRequests", "(", "original_headers_path", ",", "requests", ")", "else", ":", "discovered_requests", "=", "_DiscoverRequests", "(", "dependencies_lens", ",", "subresource_discoverer", ")", "whitelisted_urls", "=", "set", "(", ")", "for", "request", "in", "sandwich_utils", ".", "FilterOutDataAndIncompleteRequests", "(", "discovered_requests", ")", ":", "logging", ".", "debug", "(", "'white-listing %s'", ",", "request", ".", "url", ")", "whitelisted_urls", ".", "add", "(", "request", ".", "url", ")", "logging", ".", "info", "(", "'number of white-listed resources: %d'", ",", "len", "(", "whitelisted_urls", ")", ")", "return", "whitelisted_urls" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/android/loading/sandwich_prefetch.py#L173-L211
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/roc/target.py
python
gen_arg_access_qual_md
(fn)
return lc.MetaData.get(mod, [name] + consts)
Generate kernel_arg_access_qual metadata
Generate kernel_arg_access_qual metadata
[ "Generate", "kernel_arg_access_qual", "metadata" ]
def gen_arg_access_qual_md(fn): """ Generate kernel_arg_access_qual metadata """ mod = fn.module consts = [lc.MetaDataString.get(mod, "none")] * len(fn.args) name = lc.MetaDataString.get(mod, "kernel_arg_access_qual") return lc.MetaData.get(mod, [name] + consts)
[ "def", "gen_arg_access_qual_md", "(", "fn", ")", ":", "mod", "=", "fn", ".", "module", "consts", "=", "[", "lc", ".", "MetaDataString", ".", "get", "(", "mod", ",", "\"none\"", ")", "]", "*", "len", "(", "fn", ".", "args", ")", "name", "=", "lc", ".", "MetaDataString", ".", "get", "(", "mod", ",", "\"kernel_arg_access_qual\"", ")", "return", "lc", ".", "MetaData", ".", "get", "(", "mod", ",", "[", "name", "]", "+", "consts", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/roc/target.py#L270-L277
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_socket.py
python
SocketSerial.read
(self, size=1)
return bytes(data)
Read size bytes from the serial port. If a timeout is set it may return less characters as requested. With no timeout it will block until the requested number of bytes is read.
Read size bytes from the serial port. If a timeout is set it may return less characters as requested. With no timeout it will block until the requested number of bytes is read.
[ "Read", "size", "bytes", "from", "the", "serial", "port", ".", "If", "a", "timeout", "is", "set", "it", "may", "return", "less", "characters", "as", "requested", ".", "With", "no", "timeout", "it", "will", "block", "until", "the", "requested", "number", "of", "bytes", "is", "read", "." ]
def read(self, size=1): """Read size bytes from the serial port. If a timeout is set it may return less characters as requested. With no timeout it will block until the requested number of bytes is read.""" if not self._isOpen: raise portNotOpenError data = bytearray() if self._timeout is not None: timeout = time.time() + self._timeout else: timeout = None while len(data) < size and (timeout is None or time.time() < timeout): try: # an implementation with internal buffer would be better # performing... t = time.time() block = self._socket.recv(size - len(data)) duration = time.time() - t if block: data.extend(block) else: # no data -> EOF (connection probably closed) break except socket.timeout: # just need to get out of recv from time to time to check if # still alive continue except socket.error, e: # connection fails -> terminate loop raise SerialException('connection failed (%s)' % e) return bytes(data)
[ "def", "read", "(", "self", ",", "size", "=", "1", ")", ":", "if", "not", "self", ".", "_isOpen", ":", "raise", "portNotOpenError", "data", "=", "bytearray", "(", ")", "if", "self", ".", "_timeout", "is", "not", "None", ":", "timeout", "=", "time", ".", "time", "(", ")", "+", "self", ".", "_timeout", "else", ":", "timeout", "=", "None", "while", "len", "(", "data", ")", "<", "size", "and", "(", "timeout", "is", "None", "or", "time", ".", "time", "(", ")", "<", "timeout", ")", ":", "try", ":", "# an implementation with internal buffer would be better", "# performing...", "t", "=", "time", ".", "time", "(", ")", "block", "=", "self", ".", "_socket", ".", "recv", "(", "size", "-", "len", "(", "data", ")", ")", "duration", "=", "time", ".", "time", "(", ")", "-", "t", "if", "block", ":", "data", ".", "extend", "(", "block", ")", "else", ":", "# no data -> EOF (connection probably closed)", "break", "except", "socket", ".", "timeout", ":", "# just need to get out of recv from time to time to check if", "# still alive", "continue", "except", "socket", ".", "error", ",", "e", ":", "# connection fails -> terminate loop", "raise", "SerialException", "(", "'connection failed (%s)'", "%", "e", ")", "return", "bytes", "(", "data", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/third_party/pyserial/serial/urlhandler/protocol_socket.py#L134-L163
wujixiu/helmet-detection
8eff5c59ddfba5a29e0b76aeb48babcb49246178
hardhat-wearing-detection/SSD-RPA/python/caffe/draw.py
python
draw_net
(caffe_net, rankdir, ext='png', phase=None)
return get_pydot_graph(caffe_net, rankdir, phase=phase).create(format=ext)
Draws a caffe net and returns the image string encoded using the given extension. Parameters ---------- caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer. ext : string, optional The image extension (the default is 'png'). phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional Include layers from this network phase. If None, include all layers. (the default is None) Returns ------- string : Postscript representation of the graph.
Draws a caffe net and returns the image string encoded using the given extension.
[ "Draws", "a", "caffe", "net", "and", "returns", "the", "image", "string", "encoded", "using", "the", "given", "extension", "." ]
def draw_net(caffe_net, rankdir, ext='png', phase=None): """Draws a caffe net and returns the image string encoded using the given extension. Parameters ---------- caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer. ext : string, optional The image extension (the default is 'png'). phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional Include layers from this network phase. If None, include all layers. (the default is None) Returns ------- string : Postscript representation of the graph. """ return get_pydot_graph(caffe_net, rankdir, phase=phase).create(format=ext)
[ "def", "draw_net", "(", "caffe_net", ",", "rankdir", ",", "ext", "=", "'png'", ",", "phase", "=", "None", ")", ":", "return", "get_pydot_graph", "(", "caffe_net", ",", "rankdir", ",", "phase", "=", "phase", ")", ".", "create", "(", "format", "=", "ext", ")" ]
https://github.com/wujixiu/helmet-detection/blob/8eff5c59ddfba5a29e0b76aeb48babcb49246178/hardhat-wearing-detection/SSD-RPA/python/caffe/draw.py#L205-L223
wyrover/book-code
7f4883d9030d553bc6bcfa3da685e34789839900
3rdparty/protobuf/python/google/protobuf/internal/decoder.py
python
_SkipFixed64
(buffer, pos, end)
return pos
Skip a fixed64 value. Returns the new position.
Skip a fixed64 value. Returns the new position.
[ "Skip", "a", "fixed64", "value", ".", "Returns", "the", "new", "position", "." ]
def _SkipFixed64(buffer, pos, end): """Skip a fixed64 value. Returns the new position.""" pos += 8 if pos > end: raise _DecodeError('Truncated message.') return pos
[ "def", "_SkipFixed64", "(", "buffer", ",", "pos", ",", "end", ")", ":", "pos", "+=", "8", "if", "pos", ">", "end", ":", "raise", "_DecodeError", "(", "'Truncated message.'", ")", "return", "pos" ]
https://github.com/wyrover/book-code/blob/7f4883d9030d553bc6bcfa3da685e34789839900/3rdparty/protobuf/python/google/protobuf/internal/decoder.py#L777-L783
ouster-lidar/ouster_example
13ea8e8b8a4951fb630dbc9108666995c8443bf6
python/src/ouster/client/data.py
python
ImuPacket.__init__
(self, data: BufferT, info: SensorInfo, timestamp: Optional[float] = None)
This will always alias the supplied buffer-like object. Pass in a copy to avoid unintentional aliasing. Args: data: Buffer containing the packet payload info: Metadata associated with the sensor packet stream timestamp: A capture timestamp, in microseconds Raises: ValueError: If the buffer is smaller than the size specified by the packet format
This will always alias the supplied buffer-like object. Pass in a copy to avoid unintentional aliasing.
[ "This", "will", "always", "alias", "the", "supplied", "buffer", "-", "like", "object", ".", "Pass", "in", "a", "copy", "to", "avoid", "unintentional", "aliasing", "." ]
def __init__(self, data: BufferT, info: SensorInfo, timestamp: Optional[float] = None) -> None: """ This will always alias the supplied buffer-like object. Pass in a copy to avoid unintentional aliasing. Args: data: Buffer containing the packet payload info: Metadata associated with the sensor packet stream timestamp: A capture timestamp, in microseconds Raises: ValueError: If the buffer is smaller than the size specified by the packet format """ self._pf = _client.PacketFormat.from_info(info) self._data = np.frombuffer(data, dtype=np.uint8, count=self._pf.imu_packet_size) self.capture_timestamp = timestamp
[ "def", "__init__", "(", "self", ",", "data", ":", "BufferT", ",", "info", ":", "SensorInfo", ",", "timestamp", ":", "Optional", "[", "float", "]", "=", "None", ")", "->", "None", ":", "self", ".", "_pf", "=", "_client", ".", "PacketFormat", ".", "from_info", "(", "info", ")", "self", ".", "_data", "=", "np", ".", "frombuffer", "(", "data", ",", "dtype", "=", "np", ".", "uint8", ",", "count", "=", "self", ".", "_pf", ".", "imu_packet_size", ")", "self", ".", "capture_timestamp", "=", "timestamp" ]
https://github.com/ouster-lidar/ouster_example/blob/13ea8e8b8a4951fb630dbc9108666995c8443bf6/python/src/ouster/client/data.py#L27-L50
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pkg_resources.py
python
file_ns_handler
(importer, path_item, packageName, module)
Compute an ns-package subpath for a filesystem or zipfile importer
Compute an ns-package subpath for a filesystem or zipfile importer
[ "Compute", "an", "ns", "-", "package", "subpath", "for", "a", "filesystem", "or", "zipfile", "importer" ]
def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: if _normalize_cached(item)==normalized: break else: # Only return the path if it's not already there return subpath
[ "def", "file_ns_handler", "(", "importer", ",", "path_item", ",", "packageName", ",", "module", ")", ":", "subpath", "=", "os", ".", "path", ".", "join", "(", "path_item", ",", "packageName", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ")", "normalized", "=", "_normalize_cached", "(", "subpath", ")", "for", "item", "in", "module", ".", "__path__", ":", "if", "_normalize_cached", "(", "item", ")", "==", "normalized", ":", "break", "else", ":", "# Only return the path if it's not already there", "return", "subpath" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pkg_resources.py#L1957-L1967
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/python/debug/cli/curses_ui.py
python
CursesUI._screen_terminate
(self)
Terminate the curses screen.
Terminate the curses screen.
[ "Terminate", "the", "curses", "screen", "." ]
def _screen_terminate(self): """Terminate the curses screen.""" self._stdscr.keypad(0) curses.nocbreak() curses.echo() curses.endwin()
[ "def", "_screen_terminate", "(", "self", ")", ":", "self", ".", "_stdscr", ".", "keypad", "(", "0", ")", "curses", ".", "nocbreak", "(", ")", "curses", ".", "echo", "(", ")", "curses", ".", "endwin", "(", ")" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/debug/cli/curses_ui.py#L194-L200
Kitware/ParaView
f760af9124ff4634b23ebbeab95a4f56e0261955
ThirdParty/cinema/paraview/tpl/cinema_python/adaptors/paraview/pv_introspect.py
python
get_pipeline
()
return proxies
sanitizes the pipeline graph
sanitizes the pipeline graph
[ "sanitizes", "the", "pipeline", "graph" ]
def get_pipeline(): """sanitizes the pipeline graph""" proxies = inspect(skip_invisible=False) for proxy in proxies: source = paraview.simple.FindSource(proxy['name']) numberOfProducers = source.GetNumberOfProducers() if proxy['parent'] == '0' and numberOfProducers > 0: # this proxy is the result of a merge parents = [] for i in xrange(numberOfProducers): parents.append( source.GetProducerProxy(i).GetGlobalIDAsString()) proxy['parents'] = parents else: proxy['parents'] = [proxy['parent']] del proxy['parent'] for proxy in proxies: proxy['children'] = [p['id'] for p in proxies if proxy['id'] in p['parents']] return proxies
[ "def", "get_pipeline", "(", ")", ":", "proxies", "=", "inspect", "(", "skip_invisible", "=", "False", ")", "for", "proxy", "in", "proxies", ":", "source", "=", "paraview", ".", "simple", ".", "FindSource", "(", "proxy", "[", "'name'", "]", ")", "numberOfProducers", "=", "source", ".", "GetNumberOfProducers", "(", ")", "if", "proxy", "[", "'parent'", "]", "==", "'0'", "and", "numberOfProducers", ">", "0", ":", "# this proxy is the result of a merge", "parents", "=", "[", "]", "for", "i", "in", "xrange", "(", "numberOfProducers", ")", ":", "parents", ".", "append", "(", "source", ".", "GetProducerProxy", "(", "i", ")", ".", "GetGlobalIDAsString", "(", ")", ")", "proxy", "[", "'parents'", "]", "=", "parents", "else", ":", "proxy", "[", "'parents'", "]", "=", "[", "proxy", "[", "'parent'", "]", "]", "del", "proxy", "[", "'parent'", "]", "for", "proxy", "in", "proxies", ":", "proxy", "[", "'children'", "]", "=", "[", "p", "[", "'id'", "]", "for", "p", "in", "proxies", "if", "proxy", "[", "'id'", "]", "in", "p", "[", "'parents'", "]", "]", "return", "proxies" ]
https://github.com/Kitware/ParaView/blob/f760af9124ff4634b23ebbeab95a4f56e0261955/ThirdParty/cinema/paraview/tpl/cinema_python/adaptors/paraview/pv_introspect.py#L192-L211
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/__init__.py
python
ResourceManager.resource_filename
(self, package_or_requirement, resource_name)
return get_provider(package_or_requirement).get_resource_filename( self, resource_name )
Return a true filesystem path for specified resource
Return a true filesystem path for specified resource
[ "Return", "a", "true", "filesystem", "path", "for", "specified", "resource" ]
def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name )
[ "def", "resource_filename", "(", "self", ",", "package_or_requirement", ",", "resource_name", ")", ":", "return", "get_provider", "(", "package_or_requirement", ")", ".", "get_resource_filename", "(", "self", ",", "resource_name", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/__init__.py#L1143-L1147
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/setuptools/py2/setuptools/_vendor/six.py
python
_SixMetaPathImporter.get_code
(self, fullname)
return None
Return None Required, if is_package is implemented
Return None
[ "Return", "None" ]
def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None
[ "def", "get_code", "(", "self", ",", "fullname", ")", ":", "self", ".", "__get_module", "(", "fullname", ")", "# eventually raises ImportError", "return", "None" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/setuptools/_vendor/six.py#L218-L223
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/third_party/lib_aarch64/python2.7/dist-packages/diagnostic_updater/_diagnostic_updater.py
python
Updater.publish
(self, msg)
Publishes a single diagnostic status or a vector of diagnostic statuses.
Publishes a single diagnostic status or a vector of diagnostic statuses.
[ "Publishes", "a", "single", "diagnostic", "status", "or", "a", "vector", "of", "diagnostic", "statuses", "." ]
def publish(self, msg): """Publishes a single diagnostic status or a vector of diagnostic statuses.""" if not type(msg) is list: msg = [msg] for stat in msg: stat.name = rospy.get_name()[1:]+ ": " + stat.name da = DiagnosticArray() da.status = msg da.header.stamp = rospy.Time.now() # Add timestamp for ROS 0.10 self.publisher.publish(da)
[ "def", "publish", "(", "self", ",", "msg", ")", ":", "if", "not", "type", "(", "msg", ")", "is", "list", ":", "msg", "=", "[", "msg", "]", "for", "stat", "in", "msg", ":", "stat", ".", "name", "=", "rospy", ".", "get_name", "(", ")", "[", "1", ":", "]", "+", "\": \"", "+", "stat", ".", "name", "da", "=", "DiagnosticArray", "(", ")", "da", ".", "status", "=", "msg", "da", ".", "header", ".", "stamp", "=", "rospy", ".", "Time", ".", "now", "(", ")", "# Add timestamp for ROS 0.10", "self", ".", "publisher", ".", "publish", "(", "da", ")" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_aarch64/python2.7/dist-packages/diagnostic_updater/_diagnostic_updater.py#L325-L336
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/idlelib/sidebar.py
python
EndLineDelegator.__init__
(self, changed_callback)
changed_callback - Callable, will be called after insert or delete operations with the current end line number.
changed_callback - Callable, will be called after insert or delete operations with the current end line number.
[ "changed_callback", "-", "Callable", "will", "be", "called", "after", "insert", "or", "delete", "operations", "with", "the", "current", "end", "line", "number", "." ]
def __init__(self, changed_callback): """ changed_callback - Callable, will be called after insert or delete operations with the current end line number. """ Delegator.__init__(self) self.changed_callback = changed_callback
[ "def", "__init__", "(", "self", ",", "changed_callback", ")", ":", "Delegator", ".", "__init__", "(", "self", ")", "self", ".", "changed_callback", "=", "changed_callback" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/idlelib/sidebar.py#L124-L131
raspberrypi/tools
13474ee775d0c5ec8a7da4fb0a9fa84187abfc87
arm-bcm2708/gcc-linaro-arm-linux-gnueabihf-raspbian-x64/share/gdb/python/gdb/command/explore.py
python
ReferenceExplorer.explore_type
(name, datatype, is_child)
return False
Function to explore pointer types. See Explorer.explore_type for more information.
Function to explore pointer types. See Explorer.explore_type for more information.
[ "Function", "to", "explore", "pointer", "types", ".", "See", "Explorer", ".", "explore_type", "for", "more", "information", "." ]
def explore_type(name, datatype, is_child): """Function to explore pointer types. See Explorer.explore_type for more information. """ target_type = datatype.target() Explorer.explore_type(name, target_type, is_child) return False
[ "def", "explore_type", "(", "name", ",", "datatype", ",", "is_child", ")", ":", "target_type", "=", "datatype", ".", "target", "(", ")", "Explorer", ".", "explore_type", "(", "name", ",", "target_type", ",", "is_child", ")", "return", "False" ]
https://github.com/raspberrypi/tools/blob/13474ee775d0c5ec8a7da4fb0a9fa84187abfc87/arm-bcm2708/gcc-linaro-arm-linux-gnueabihf-raspbian-x64/share/gdb/python/gdb/command/explore.py#L313-L319
gimli-org/gimli
17aa2160de9b15ababd9ef99e89b1bc3277bbb23
pygimli/viewer/mpl/utils.py
python
wait
(**kwargs)
TODO WRITEME.
TODO WRITEME.
[ "TODO", "WRITEME", "." ]
def wait(**kwargs): """TODO WRITEME.""" # plt.pause seems to be broken in mpl:2.1 # ax.canvas.draw_onIdle() updateAxes(plt.gca()) kp = kwargs.pop('untilKeyPressed', False) if kp == True: plt.waitforbuttonpress(**kwargs) else: plt.show(**kwargs)
[ "def", "wait", "(", "*", "*", "kwargs", ")", ":", "# plt.pause seems to be broken in mpl:2.1", "# ax.canvas.draw_onIdle()", "updateAxes", "(", "plt", ".", "gca", "(", ")", ")", "kp", "=", "kwargs", ".", "pop", "(", "'untilKeyPressed'", ",", "False", ")", "if", "kp", "==", "True", ":", "plt", ".", "waitforbuttonpress", "(", "*", "*", "kwargs", ")", "else", ":", "plt", ".", "show", "(", "*", "*", "kwargs", ")" ]
https://github.com/gimli-org/gimli/blob/17aa2160de9b15ababd9ef99e89b1bc3277bbb23/pygimli/viewer/mpl/utils.py#L63-L72
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/shortcuteditor.py
python
ShortcutEvent.__init__
(self, evtType, evtId, **kwargs)
Default class constructor. For internal use: do not call it in your code! :param integer `evtType`: the event type; :param integer `evtId`: the event identifier.
Default class constructor. For internal use: do not call it in your code!
[ "Default", "class", "constructor", ".", "For", "internal", "use", ":", "do", "not", "call", "it", "in", "your", "code!" ]
def __init__(self, evtType, evtId, **kwargs): """ Default class constructor. For internal use: do not call it in your code! :param integer `evtType`: the event type; :param integer `evtId`: the event identifier. """ wx.PyCommandEvent.__init__(self, evtType, evtId, **kwargs)
[ "def", "__init__", "(", "self", ",", "evtType", ",", "evtId", ",", "*", "*", "kwargs", ")", ":", "wx", ".", "PyCommandEvent", ".", "__init__", "(", "self", ",", "evtType", ",", "evtId", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/shortcuteditor.py#L1077-L1086
gem5/gem5
141cc37c2d4b93959d4c249b8f7e6a8b2ef75338
src/python/gem5/components/memory/multi_channel.py
python
DualChannelDDR4_2400
( size: Optional[str] = None, )
return ChanneledMemory( DDR4_2400_8x8, 2, 64, size=size, )
A dual channel memory system using DDR4_2400_8x8 based DIMM
A dual channel memory system using DDR4_2400_8x8 based DIMM
[ "A", "dual", "channel", "memory", "system", "using", "DDR4_2400_8x8", "based", "DIMM" ]
def DualChannelDDR4_2400( size: Optional[str] = None, ) -> AbstractMemorySystem: """ A dual channel memory system using DDR4_2400_8x8 based DIMM """ return ChanneledMemory( DDR4_2400_8x8, 2, 64, size=size, )
[ "def", "DualChannelDDR4_2400", "(", "size", ":", "Optional", "[", "str", "]", "=", "None", ",", ")", "->", "AbstractMemorySystem", ":", "return", "ChanneledMemory", "(", "DDR4_2400_8x8", ",", "2", ",", "64", ",", "size", "=", "size", ",", ")" ]
https://github.com/gem5/gem5/blob/141cc37c2d4b93959d4c249b8f7e6a8b2ef75338/src/python/gem5/components/memory/multi_channel.py#L63-L74
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/base.py
python
IndexOpsMixin.argmin
(self, axis=None, skipna=True, *args, **kwargs)
return nanops.nanargmin(self._values, skipna=skipna)
Return a ndarray of the minimum argument indexer. Parameters ---------- axis : {None} Dummy argument for consistency with Series. skipna : bool, default True Returns ------- numpy.ndarray See Also -------- numpy.ndarray.argmin
Return a ndarray of the minimum argument indexer.
[ "Return", "a", "ndarray", "of", "the", "minimum", "argument", "indexer", "." ]
def argmin(self, axis=None, skipna=True, *args, **kwargs): """ Return a ndarray of the minimum argument indexer. Parameters ---------- axis : {None} Dummy argument for consistency with Series. skipna : bool, default True Returns ------- numpy.ndarray See Also -------- numpy.ndarray.argmin """ nv.validate_minmax_axis(axis) nv.validate_argmax_with_skipna(skipna, args, kwargs) return nanops.nanargmin(self._values, skipna=skipna)
[ "def", "argmin", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_minmax_axis", "(", "axis", ")", "nv", ".", "validate_argmax_with_skipna", "(", "skipna", ",", "args", ",", "kwargs", ")", "return", "nanops", ".", "nanargmin", "(", "self", ".", "_values", ",", "skipna", "=", "skipna", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/base.py#L979-L999
psi4/psi4
be533f7f426b6ccc263904e55122899b16663395
psi4/share/psi4/fsapt/fsapt.py
python
fragment_d3_disp
(d3disp: np.ndarray, frags: Dict[str, Dict[str, List[str]]])
return D3frags
Fragments atomic pairwise dispersion contributions from DFTD3 for inclusion in F-SAPT-D. Arguments --------- d3disp : numpy.ndarray[float] (NA, NB) array of atom-pairwise dispersion computed by DFTD3 frags : Dict[str, Dict[str, List[str]]] Dictionary containing fragment information read from `fA.dat` and `fB.dat` natoms : Dict[str, int] Dictionary containing number of atoms in each monomer Returns ------- Edisp : float Dispersion energy computed from pairwise analysis D3pairs : Dict[str, Dict[str, float]] Dictionary containing reduced order-2 dispersion interactions between fragments
Fragments atomic pairwise dispersion contributions from DFTD3 for inclusion in F-SAPT-D. Arguments --------- d3disp : numpy.ndarray[float] (NA, NB) array of atom-pairwise dispersion computed by DFTD3 frags : Dict[str, Dict[str, List[str]]] Dictionary containing fragment information read from `fA.dat` and `fB.dat` natoms : Dict[str, int] Dictionary containing number of atoms in each monomer Returns ------- Edisp : float Dispersion energy computed from pairwise analysis D3pairs : Dict[str, Dict[str, float]] Dictionary containing reduced order-2 dispersion interactions between fragments
[ "Fragments", "atomic", "pairwise", "dispersion", "contributions", "from", "DFTD3", "for", "inclusion", "in", "F", "-", "SAPT", "-", "D", ".", "Arguments", "---------", "d3disp", ":", "numpy", ".", "ndarray", "[", "float", "]", "(", "NA", "NB", ")", "array", "of", "atom", "-", "pairwise", "dispersion", "computed", "by", "DFTD3", "frags", ":", "Dict", "[", "str", "Dict", "[", "str", "List", "[", "str", "]]]", "Dictionary", "containing", "fragment", "information", "read", "from", "fA", ".", "dat", "and", "fB", ".", "dat", "natoms", ":", "Dict", "[", "str", "int", "]", "Dictionary", "containing", "number", "of", "atoms", "in", "each", "monomer", "Returns", "-------", "Edisp", ":", "float", "Dispersion", "energy", "computed", "from", "pairwise", "analysis", "D3pairs", ":", "Dict", "[", "str", "Dict", "[", "str", "float", "]]", "Dictionary", "containing", "reduced", "order", "-", "2", "dispersion", "interactions", "between", "fragments" ]
def fragment_d3_disp(d3disp: np.ndarray, frags: Dict[str, Dict[str, List[str]]]) -> Tuple[float, Dict[str, Dict[str, float]]]: """Fragments atomic pairwise dispersion contributions from DFTD3 for inclusion in F-SAPT-D. Arguments --------- d3disp : numpy.ndarray[float] (NA, NB) array of atom-pairwise dispersion computed by DFTD3 frags : Dict[str, Dict[str, List[str]]] Dictionary containing fragment information read from `fA.dat` and `fB.dat` natoms : Dict[str, int] Dictionary containing number of atoms in each monomer Returns ------- Edisp : float Dispersion energy computed from pairwise analysis D3pairs : Dict[str, Dict[str, float]] Dictionary containing reduced order-2 dispersion interactions between fragments """ # Iterate over fragments, pull out relevant contributions to each D3frags = {} for fA, idA in frags['A'].items(): if 'Link' in fA: continue idA = np.array(idA) D3frags[fA] = {} for fB, idB in frags['B'].items(): if 'Link' in fB: continue fe = 0.0 for i in idA: for j in idB: fe += d3disp[i][j] # Energies read are already in kcal/mol! D3frags[fA][fB] = fe return D3frags
[ "def", "fragment_d3_disp", "(", "d3disp", ":", "np", ".", "ndarray", ",", "frags", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", ")", "->", "Tuple", "[", "float", ",", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "float", "]", "]", "]", ":", "# Iterate over fragments, pull out relevant contributions to each", "D3frags", "=", "{", "}", "for", "fA", ",", "idA", "in", "frags", "[", "'A'", "]", ".", "items", "(", ")", ":", "if", "'Link'", "in", "fA", ":", "continue", "idA", "=", "np", ".", "array", "(", "idA", ")", "D3frags", "[", "fA", "]", "=", "{", "}", "for", "fB", ",", "idB", "in", "frags", "[", "'B'", "]", ".", "items", "(", ")", ":", "if", "'Link'", "in", "fB", ":", "continue", "fe", "=", "0.0", "for", "i", "in", "idA", ":", "for", "j", "in", "idB", ":", "fe", "+=", "d3disp", "[", "i", "]", "[", "j", "]", "# Energies read are already in kcal/mol!", "D3frags", "[", "fA", "]", "[", "fB", "]", "=", "fe", "return", "D3frags" ]
https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/share/psi4/fsapt/fsapt.py#L436-L469
ApolloAuto/apollo
463fb82f9e979d02dcb25044e60931293ab2dba0
cyber/python/cyber_py3/cyber_time.py
python
Time.sleep_until
(self, cyber_time)
return NotImplemented
sleep until time.
sleep until time.
[ "sleep", "until", "time", "." ]
def sleep_until(self, cyber_time): """ sleep until time. """ if isinstance(time, Time): return _CYBER_TIME.PyTime_sleep_until(self.time, cyber_time.to_nsec()) return NotImplemented
[ "def", "sleep_until", "(", "self", ",", "cyber_time", ")", ":", "if", "isinstance", "(", "time", ",", "Time", ")", ":", "return", "_CYBER_TIME", ".", "PyTime_sleep_until", "(", "self", ".", "time", ",", "cyber_time", ".", "to_nsec", "(", ")", ")", "return", "NotImplemented" ]
https://github.com/ApolloAuto/apollo/blob/463fb82f9e979d02dcb25044e60931293ab2dba0/cyber/python/cyber_py3/cyber_time.py#L163-L170
traveller59/spconv
647927ce6b64dc51fbec4eb50c7194f8ca5007e5
spconv/pytorch/utils.py
python
PointToVoxel.__call__
(self, pc: torch.Tensor, clear_voxels: bool = True, empty_mean: bool = False)
return res[0], res[1], res[2]
generate voxels/indices/num_point_per_voxel/pc_voxel_ids from point cloud. This function don't return pc_voxel_id for backward compatility. pc_voxel_id will be added in spconv 2.2. Args: pc: [N, 3+] point cloud. clear_voxels: if True, call zero on voxels empty_mean: if True, full empty location of voxels with mean. Returns: voxels: voxels indices: quantized coords num_per_voxel: number of points in a voxel
generate voxels/indices/num_point_per_voxel/pc_voxel_ids from point cloud. This function don't return pc_voxel_id for backward compatility. pc_voxel_id will be added in spconv 2.2. Args: pc: [N, 3+] point cloud. clear_voxels: if True, call zero on voxels empty_mean: if True, full empty location of voxels with mean. Returns: voxels: voxels indices: quantized coords num_per_voxel: number of points in a voxel
[ "generate", "voxels", "/", "indices", "/", "num_point_per_voxel", "/", "pc_voxel_ids", "from", "point", "cloud", ".", "This", "function", "don", "t", "return", "pc_voxel_id", "for", "backward", "compatility", ".", "pc_voxel_id", "will", "be", "added", "in", "spconv", "2", ".", "2", ".", "Args", ":", "pc", ":", "[", "N", "3", "+", "]", "point", "cloud", ".", "clear_voxels", ":", "if", "True", "call", "zero", "on", "voxels", "empty_mean", ":", "if", "True", "full", "empty", "location", "of", "voxels", "with", "mean", ".", "Returns", ":", "voxels", ":", "voxels", "indices", ":", "quantized", "coords", "num_per_voxel", ":", "number", "of", "points", "in", "a", "voxel" ]
def __call__(self, pc: torch.Tensor, clear_voxels: bool = True, empty_mean: bool = False): """generate voxels/indices/num_point_per_voxel/pc_voxel_ids from point cloud. This function don't return pc_voxel_id for backward compatility. pc_voxel_id will be added in spconv 2.2. Args: pc: [N, 3+] point cloud. clear_voxels: if True, call zero on voxels empty_mean: if True, full empty location of voxels with mean. Returns: voxels: voxels indices: quantized coords num_per_voxel: number of points in a voxel """ res = self.generate_voxel_with_id(pc, clear_voxels, empty_mean) return res[0], res[1], res[2]
[ "def", "__call__", "(", "self", ",", "pc", ":", "torch", ".", "Tensor", ",", "clear_voxels", ":", "bool", "=", "True", ",", "empty_mean", ":", "bool", "=", "False", ")", ":", "res", "=", "self", ".", "generate_voxel_with_id", "(", "pc", ",", "clear_voxels", ",", "empty_mean", ")", "return", "res", "[", "0", "]", ",", "res", "[", "1", "]", ",", "res", "[", "2", "]" ]
https://github.com/traveller59/spconv/blob/647927ce6b64dc51fbec4eb50c7194f8ca5007e5/spconv/pytorch/utils.py#L70-L89
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/platform/resource_loader.py
python
get_path_to_datafile
(path)
return _os.path.join(data_files_path, path)
Get the path to the specified file in the data dependencies. The path is relative to tensorflow/ Args: path: a string resource path relative to tensorflow/ Returns: The path to the specified file present in the data attribute of py_test or py_binary. Raises: IOError: If the path is not found, or the resource can't be opened.
Get the path to the specified file in the data dependencies.
[ "Get", "the", "path", "to", "the", "specified", "file", "in", "the", "data", "dependencies", "." ]
def get_path_to_datafile(path): """Get the path to the specified file in the data dependencies. The path is relative to tensorflow/ Args: path: a string resource path relative to tensorflow/ Returns: The path to the specified file present in the data attribute of py_test or py_binary. Raises: IOError: If the path is not found, or the resource can't be opened. """ data_files_path = _os.path.dirname(_inspect.getfile(_sys._getframe(1))) return _os.path.join(data_files_path, path)
[ "def", "get_path_to_datafile", "(", "path", ")", ":", "data_files_path", "=", "_os", ".", "path", ".", "dirname", "(", "_inspect", ".", "getfile", "(", "_sys", ".", "_getframe", "(", "1", ")", ")", ")", "return", "_os", ".", "path", ".", "join", "(", "data_files_path", ",", "path", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/platform/resource_loader.py#L104-L120
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
scripts/SANS/sans/algorithm_detail/batch_execution.py
python
get_all_names_to_save
(reduction_packages, save_can)
return names_to_save
Extracts all the output names from a list of reduction packages. :param reduction_packages: a list of reduction packages :param save_can: a bool, whether or not to save unsubtracted can workspace :return: a list of workspace names to save.
Extracts all the output names from a list of reduction packages.
[ "Extracts", "all", "the", "output", "names", "from", "a", "list", "of", "reduction", "packages", "." ]
def get_all_names_to_save(reduction_packages, save_can): """ Extracts all the output names from a list of reduction packages. :param reduction_packages: a list of reduction packages :param save_can: a bool, whether or not to save unsubtracted can workspace :return: a list of workspace names to save. """ def get_ws_names_from_group(ws_group): return [ws.name() for ws in ws_group] names_to_save = [] for reduction_package in reduction_packages: reduced_lab = reduction_package.reduced_lab reduced_hab = reduction_package.reduced_hab reduced_merged = reduction_package.reduced_merged reduced_lab_can = reduction_package.reduced_lab_can reduced_hab_can = reduction_package.reduced_hab_can reduced_lab_sample = reduction_package.reduced_lab_sample reduced_hab_sample = reduction_package.reduced_hab_sample trans_name = get_transmission_names_to_save(reduction_package, False) trans_can_name = get_transmission_names_to_save(reduction_package, True) if save_can: if reduced_merged: names_to_save.append((get_ws_names_from_group(reduced_merged), trans_name, trans_can_name)) if reduced_lab: names_to_save.append((get_ws_names_from_group(reduced_lab), trans_name, trans_can_name)) if reduced_hab: names_to_save.append((get_ws_names_from_group(reduced_hab), trans_name, trans_can_name)) if reduced_lab_can: names_to_save.append((get_ws_names_from_group(reduced_lab_can), [], trans_can_name)) if reduced_hab_can: names_to_save.append((get_ws_names_from_group(reduced_hab_can), [], trans_can_name)) if reduced_lab_sample: names_to_save.append((get_ws_names_from_group(reduced_lab_sample), trans_name, [])) if reduced_hab_sample: names_to_save.append((get_ws_names_from_group(reduced_hab_sample), trans_name, [])) # If we have merged reduction then store the elif reduced_merged: names_to_save.append((get_ws_names_from_group(reduced_merged), trans_name, trans_can_name)) else: if reduced_lab: names_to_save.append((get_ws_names_from_group(reduced_lab), trans_name, trans_can_name)) if reduced_hab: names_to_save.append((get_ws_names_from_group(reduced_hab), trans_name, trans_can_name)) return names_to_save
[ "def", "get_all_names_to_save", "(", "reduction_packages", ",", "save_can", ")", ":", "def", "get_ws_names_from_group", "(", "ws_group", ")", ":", "return", "[", "ws", ".", "name", "(", ")", "for", "ws", "in", "ws_group", "]", "names_to_save", "=", "[", "]", "for", "reduction_package", "in", "reduction_packages", ":", "reduced_lab", "=", "reduction_package", ".", "reduced_lab", "reduced_hab", "=", "reduction_package", ".", "reduced_hab", "reduced_merged", "=", "reduction_package", ".", "reduced_merged", "reduced_lab_can", "=", "reduction_package", ".", "reduced_lab_can", "reduced_hab_can", "=", "reduction_package", ".", "reduced_hab_can", "reduced_lab_sample", "=", "reduction_package", ".", "reduced_lab_sample", "reduced_hab_sample", "=", "reduction_package", ".", "reduced_hab_sample", "trans_name", "=", "get_transmission_names_to_save", "(", "reduction_package", ",", "False", ")", "trans_can_name", "=", "get_transmission_names_to_save", "(", "reduction_package", ",", "True", ")", "if", "save_can", ":", "if", "reduced_merged", ":", "names_to_save", ".", "append", "(", "(", "get_ws_names_from_group", "(", "reduced_merged", ")", ",", "trans_name", ",", "trans_can_name", ")", ")", "if", "reduced_lab", ":", "names_to_save", ".", "append", "(", "(", "get_ws_names_from_group", "(", "reduced_lab", ")", ",", "trans_name", ",", "trans_can_name", ")", ")", "if", "reduced_hab", ":", "names_to_save", ".", "append", "(", "(", "get_ws_names_from_group", "(", "reduced_hab", ")", ",", "trans_name", ",", "trans_can_name", ")", ")", "if", "reduced_lab_can", ":", "names_to_save", ".", "append", "(", "(", "get_ws_names_from_group", "(", "reduced_lab_can", ")", ",", "[", "]", ",", "trans_can_name", ")", ")", "if", "reduced_hab_can", ":", "names_to_save", ".", "append", "(", "(", "get_ws_names_from_group", "(", "reduced_hab_can", ")", ",", "[", "]", ",", "trans_can_name", ")", ")", "if", "reduced_lab_sample", ":", "names_to_save", ".", "append", "(", "(", "get_ws_names_from_group", "(", "reduced_lab_sample", ")", ",", "trans_name", ",", "[", "]", ")", ")", "if", "reduced_hab_sample", ":", "names_to_save", ".", "append", "(", "(", "get_ws_names_from_group", "(", "reduced_hab_sample", ")", ",", "trans_name", ",", "[", "]", ")", ")", "# If we have merged reduction then store the", "elif", "reduced_merged", ":", "names_to_save", ".", "append", "(", "(", "get_ws_names_from_group", "(", "reduced_merged", ")", ",", "trans_name", ",", "trans_can_name", ")", ")", "else", ":", "if", "reduced_lab", ":", "names_to_save", ".", "append", "(", "(", "get_ws_names_from_group", "(", "reduced_lab", ")", ",", "trans_name", ",", "trans_can_name", ")", ")", "if", "reduced_hab", ":", "names_to_save", ".", "append", "(", "(", "get_ws_names_from_group", "(", "reduced_hab", ")", ",", "trans_name", ",", "trans_can_name", ")", ")", "return", "names_to_save" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/sans/algorithm_detail/batch_execution.py#L1233-L1282
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py3/numpy/lib/_datasource.py
python
_check_mode
(mode, encoding, newline)
Check mode and that encoding and newline are compatible. Parameters ---------- mode : str File open mode. encoding : str File encoding. newline : str Newline for text files.
Check mode and that encoding and newline are compatible.
[ "Check", "mode", "and", "that", "encoding", "and", "newline", "are", "compatible", "." ]
def _check_mode(mode, encoding, newline): """Check mode and that encoding and newline are compatible. Parameters ---------- mode : str File open mode. encoding : str File encoding. newline : str Newline for text files. """ if "t" in mode: if "b" in mode: raise ValueError("Invalid mode: %r" % (mode,)) else: if encoding is not None: raise ValueError("Argument 'encoding' not supported in binary mode") if newline is not None: raise ValueError("Argument 'newline' not supported in binary mode")
[ "def", "_check_mode", "(", "mode", ",", "encoding", ",", "newline", ")", ":", "if", "\"t\"", "in", "mode", ":", "if", "\"b\"", "in", "mode", ":", "raise", "ValueError", "(", "\"Invalid mode: %r\"", "%", "(", "mode", ",", ")", ")", "else", ":", "if", "encoding", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Argument 'encoding' not supported in binary mode\"", ")", "if", "newline", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Argument 'newline' not supported in binary mode\"", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/lib/_datasource.py#L46-L66
oracle/graaljs
36a56e8e993d45fc40939a3a4d9c0c24990720f1
graal-nodejs/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py
python
NinjaWriter.GetSortedXcodePostbuildEnv
(self)
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
Returns the variables Xcode would set for postbuild steps.
Returns the variables Xcode would set for postbuild steps.
[ "Returns", "the", "variables", "Xcode", "would", "set", "for", "postbuild", "steps", "." ]
def GetSortedXcodePostbuildEnv(self): """Returns the variables Xcode would set for postbuild steps.""" postbuild_settings = {} # CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack. # TODO(thakis): It would be nice to have some general mechanism instead. strip_save_file = self.xcode_settings.GetPerTargetSetting( "CHROMIUM_STRIP_SAVE_FILE" ) if strip_save_file: postbuild_settings["CHROMIUM_STRIP_SAVE_FILE"] = strip_save_file return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
[ "def", "GetSortedXcodePostbuildEnv", "(", "self", ")", ":", "postbuild_settings", "=", "{", "}", "# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.", "# TODO(thakis): It would be nice to have some general mechanism instead.", "strip_save_file", "=", "self", ".", "xcode_settings", ".", "GetPerTargetSetting", "(", "\"CHROMIUM_STRIP_SAVE_FILE\"", ")", "if", "strip_save_file", ":", "postbuild_settings", "[", "\"CHROMIUM_STRIP_SAVE_FILE\"", "]", "=", "strip_save_file", "return", "self", ".", "GetSortedXcodeEnv", "(", "additional_settings", "=", "postbuild_settings", ")" ]
https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py#L1704-L1714
cmu-db/noisepage
79276e68fe83322f1249e8a8be96bd63c583ae56
build-support/cpplint.py
python
CheckVlogArguments
(filename, clean_lines, linenum, error)
Checks that VLOG() is only used for defining a logging level. For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and VLOG(FATAL) are not. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
Checks that VLOG() is only used for defining a logging level.
[ "Checks", "that", "VLOG", "()", "is", "only", "used", "for", "defining", "a", "logging", "level", "." ]
def CheckVlogArguments(filename, clean_lines, linenum, error): """Checks that VLOG() is only used for defining a logging level. For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and VLOG(FATAL) are not. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): error(filename, linenum, 'runtime/vlog', 5, 'VLOG() should be used with numeric verbosity level. ' 'Use LOG() if you want symbolic severity levels.')
[ "def", "CheckVlogArguments", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "if", "Search", "(", "r'\\bVLOG\\((INFO|ERROR|WARNING|DFATAL|FATAL)\\)'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/vlog'", ",", "5", ",", "'VLOG() should be used with numeric verbosity level. '", "'Use LOG() if you want symbolic severity levels.'", ")" ]
https://github.com/cmu-db/noisepage/blob/79276e68fe83322f1249e8a8be96bd63c583ae56/build-support/cpplint.py#L2377-L2393
rsummers11/CADLab
976ed959a0b5208bb4173127a7ef732ac73a9b6f
lesion_detector_3DCE/rcnn/dataset/pascal_voc.py
python
PascalVOC.selective_search_roidb
(self, gt_roidb, append_gt=False)
return roidb
get selective search roidb and ground truth roidb :param gt_roidb: ground truth roidb :param append_gt: append ground truth :return: roidb of selective search
get selective search roidb and ground truth roidb :param gt_roidb: ground truth roidb :param append_gt: append ground truth :return: roidb of selective search
[ "get", "selective", "search", "roidb", "and", "ground", "truth", "roidb", ":", "param", "gt_roidb", ":", "ground", "truth", "roidb", ":", "param", "append_gt", ":", "append", "ground", "truth", ":", "return", ":", "roidb", "of", "selective", "search" ]
def selective_search_roidb(self, gt_roidb, append_gt=False): """ get selective search roidb and ground truth roidb :param gt_roidb: ground truth roidb :param append_gt: append ground truth :return: roidb of selective search """ cache_file = os.path.join(self.cache_path, self.name + '_ss_roidb.pkl') if os.path.exists(cache_file): with open(cache_file, 'rb') as fid: roidb = cPickle.load(fid) logger.info('%s ss roidb loaded from %s' % (self.name, cache_file)) return roidb if append_gt: logger.info('%s appending ground truth annotations' % self.name) ss_roidb = self.load_selective_search_roidb(gt_roidb) roidb = IMDB.merge_roidbs(gt_roidb, ss_roidb) else: roidb = self.load_selective_search_roidb(gt_roidb) with open(cache_file, 'wb') as fid: cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL) logger.info('%s wrote ss roidb to %s' % (self.name, cache_file)) return roidb
[ "def", "selective_search_roidb", "(", "self", ",", "gt_roidb", ",", "append_gt", "=", "False", ")", ":", "cache_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "cache_path", ",", "self", ".", "name", "+", "'_ss_roidb.pkl'", ")", "if", "os", ".", "path", ".", "exists", "(", "cache_file", ")", ":", "with", "open", "(", "cache_file", ",", "'rb'", ")", "as", "fid", ":", "roidb", "=", "cPickle", ".", "load", "(", "fid", ")", "logger", ".", "info", "(", "'%s ss roidb loaded from %s'", "%", "(", "self", ".", "name", ",", "cache_file", ")", ")", "return", "roidb", "if", "append_gt", ":", "logger", ".", "info", "(", "'%s appending ground truth annotations'", "%", "self", ".", "name", ")", "ss_roidb", "=", "self", ".", "load_selective_search_roidb", "(", "gt_roidb", ")", "roidb", "=", "IMDB", ".", "merge_roidbs", "(", "gt_roidb", ",", "ss_roidb", ")", "else", ":", "roidb", "=", "self", ".", "load_selective_search_roidb", "(", "gt_roidb", ")", "with", "open", "(", "cache_file", ",", "'wb'", ")", "as", "fid", ":", "cPickle", ".", "dump", "(", "roidb", ",", "fid", ",", "cPickle", ".", "HIGHEST_PROTOCOL", ")", "logger", ".", "info", "(", "'%s wrote ss roidb to %s'", "%", "(", "self", ".", "name", ",", "cache_file", ")", ")", "return", "roidb" ]
https://github.com/rsummers11/CADLab/blob/976ed959a0b5208bb4173127a7ef732ac73a9b6f/lesion_detector_3DCE/rcnn/dataset/pascal_voc.py#L160-L184
SequoiaDB/SequoiaDB
2894ed7e5bd6fe57330afc900cf76d0ff0df9f64
driver/python/pysequoiadb/replicagroup.py
python
replicagroup.is_catalog
(self)
return iscatalog
Test whether current replica group is catalog replica group. Return values: bool Exceptions: pysequoiadb.error.SDBBaseError
Test whether current replica group is catalog replica group.
[ "Test", "whether", "current", "replica", "group", "is", "catalog", "replica", "group", "." ]
def is_catalog(self): """Test whether current replica group is catalog replica group. Return values: bool Exceptions: pysequoiadb.error.SDBBaseError """ iscatalog = False rc, is_cata = sdb.gp_is_catalog(self._group) raise_if_error(rc, "Failed to check if is catalog") if TRUE == is_cata: iscatalog = True return iscatalog
[ "def", "is_catalog", "(", "self", ")", ":", "iscatalog", "=", "False", "rc", ",", "is_cata", "=", "sdb", ".", "gp_is_catalog", "(", "self", ".", "_group", ")", "raise_if_error", "(", "rc", ",", "\"Failed to check if is catalog\"", ")", "if", "TRUE", "==", "is_cata", ":", "iscatalog", "=", "True", "return", "iscatalog" ]
https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/driver/python/pysequoiadb/replicagroup.py#L310-L323
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
setup.py
python
update_permissions
(path)
Update permissions. Args: path (str): Target directory path.
Update permissions.
[ "Update", "permissions", "." ]
def update_permissions(path): """ Update permissions. Args: path (str): Target directory path. """ if platform.system() == "Windows": return for dirpath, dirnames, filenames in os.walk(path): for dirname in dirnames: dir_fullpath = os.path.join(dirpath, dirname) os.chmod(dir_fullpath, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC | stat.S_IRGRP | stat.S_IXGRP) for filename in filenames: file_fullpath = os.path.join(dirpath, filename) os.chmod(file_fullpath, stat.S_IREAD)
[ "def", "update_permissions", "(", "path", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "\"Windows\"", ":", "return", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "dirname", "in", "dirnames", ":", "dir_fullpath", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "dirname", ")", "os", ".", "chmod", "(", "dir_fullpath", ",", "stat", ".", "S_IREAD", "|", "stat", ".", "S_IWRITE", "|", "stat", ".", "S_IEXEC", "|", "stat", ".", "S_IRGRP", "|", "stat", ".", "S_IXGRP", ")", "for", "filename", "in", "filenames", ":", "file_fullpath", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "filename", ")", "os", ".", "chmod", "(", "file_fullpath", ",", "stat", ".", "S_IREAD", ")" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/setup.py#L140-L157
snap-stanford/snap-python
d53c51b0a26aa7e3e7400b014cdf728948fde80a
setup/snap.py
python
TUCh.LoadXml
(self, *args)
return _snap.TUCh_LoadXml(self, *args)
LoadXml(TUCh self, PXmlTok const & XmlTok, TStr Nm) Parameters: XmlTok: PXmlTok const & Nm: TStr const &
LoadXml(TUCh self, PXmlTok const & XmlTok, TStr Nm)
[ "LoadXml", "(", "TUCh", "self", "PXmlTok", "const", "&", "XmlTok", "TStr", "Nm", ")" ]
def LoadXml(self, *args): """ LoadXml(TUCh self, PXmlTok const & XmlTok, TStr Nm) Parameters: XmlTok: PXmlTok const & Nm: TStr const & """ return _snap.TUCh_LoadXml(self, *args)
[ "def", "LoadXml", "(", "self", ",", "*", "args", ")", ":", "return", "_snap", ".", "TUCh_LoadXml", "(", "self", ",", "*", "args", ")" ]
https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L12757-L12766
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/computation/pytables.py
python
BinOp.metadata
(self)
return getattr(self.queryables.get(self.lhs), "metadata", None)
the metadata of my field
the metadata of my field
[ "the", "metadata", "of", "my", "field" ]
def metadata(self): """ the metadata of my field """ return getattr(self.queryables.get(self.lhs), "metadata", None)
[ "def", "metadata", "(", "self", ")", ":", "return", "getattr", "(", "self", ".", "queryables", ".", "get", "(", "self", ".", "lhs", ")", ",", "\"metadata\"", ",", "None", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/computation/pytables.py#L168-L170
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/utils/temp_dir.py
python
AdjacentTempDirectory._generate_names
(cls, name)
Generates a series of temporary names. The algorithm replaces the leading characters in the name with ones that are valid filesystem characters, but are not valid package names (for both Python and pip definitions of package).
Generates a series of temporary names.
[ "Generates", "a", "series", "of", "temporary", "names", "." ]
def _generate_names(cls, name): # type: (str) -> Iterator[str] """Generates a series of temporary names. The algorithm replaces the leading characters in the name with ones that are valid filesystem characters, but are not valid package names (for both Python and pip definitions of package). """ for i in range(1, len(name)): for candidate in itertools.combinations_with_replacement( cls.LEADING_CHARS, i - 1): new_name = '~' + ''.join(candidate) + name[i:] if new_name != name: yield new_name # If we make it this far, we will have to make a longer name for i in range(len(cls.LEADING_CHARS)): for candidate in itertools.combinations_with_replacement( cls.LEADING_CHARS, i): new_name = '~' + ''.join(candidate) + name if new_name != name: yield new_name
[ "def", "_generate_names", "(", "cls", ",", "name", ")", ":", "# type: (str) -> Iterator[str]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "name", ")", ")", ":", "for", "candidate", "in", "itertools", ".", "combinations_with_replacement", "(", "cls", ".", "LEADING_CHARS", ",", "i", "-", "1", ")", ":", "new_name", "=", "'~'", "+", "''", ".", "join", "(", "candidate", ")", "+", "name", "[", "i", ":", "]", "if", "new_name", "!=", "name", ":", "yield", "new_name", "# If we make it this far, we will have to make a longer name", "for", "i", "in", "range", "(", "len", "(", "cls", ".", "LEADING_CHARS", ")", ")", ":", "for", "candidate", "in", "itertools", ".", "combinations_with_replacement", "(", "cls", ".", "LEADING_CHARS", ",", "i", ")", ":", "new_name", "=", "'~'", "+", "''", ".", "join", "(", "candidate", ")", "+", "name", "if", "new_name", "!=", "name", ":", "yield", "new_name" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/utils/temp_dir.py#L473-L517
p4lang/p4c
3272e79369f20813cc1a555a5eb26f44432f84a4
tools/cpplint.py
python
_IsType
(clean_lines, nesting_state, expr)
return False
Check if expression looks like a type name, returns true if so. Args: clean_lines: A CleansedLines instance containing the file. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. expr: The expression to check. Returns: True, if token looks like a type.
Check if expression looks like a type name, returns true if so.
[ "Check", "if", "expression", "looks", "like", "a", "type", "name", "returns", "true", "if", "so", "." ]
def _IsType(clean_lines, nesting_state, expr): """Check if expression looks like a type name, returns true if so. Args: clean_lines: A CleansedLines instance containing the file. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. expr: The expression to check. Returns: True, if token looks like a type. """ # Keep only the last token in the expression last_word = Match(r'^.*(\b\S+)$', expr) if last_word: token = last_word.group(1) else: token = expr # Match native types and stdint types if _TYPES.match(token): return True # Try a bit harder to match templated types. Walk up the nesting # stack until we find something that resembles a typename # declaration for what we are looking for. typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) + r'\b') block_index = len(nesting_state.stack) - 1 while block_index >= 0: if isinstance(nesting_state.stack[block_index], _NamespaceInfo): return False # Found where the opening brace is. We want to scan from this # line up to the beginning of the function, minus a few lines. # template <typename Type1, // stop scanning here # ...> # class C # : public ... { // start scanning here last_line = nesting_state.stack[block_index].starting_linenum next_block_start = 0 if block_index > 0: next_block_start = nesting_state.stack[block_index - 1].starting_linenum first_line = last_line while first_line >= next_block_start: if clean_lines.elided[first_line].find('template') >= 0: break first_line -= 1 if first_line < next_block_start: # Didn't find any "template" keyword before reaching the next block, # there are probably no template things to check for this block block_index -= 1 continue # Look for typename in the specified range for i in xrange(first_line, last_line + 1, 1): if Search(typename_pattern, clean_lines.elided[i]): return True block_index -= 1 return False
[ "def", "_IsType", "(", "clean_lines", ",", "nesting_state", ",", "expr", ")", ":", "# Keep only the last token in the expression", "last_word", "=", "Match", "(", "r'^.*(\\b\\S+)$'", ",", "expr", ")", "if", "last_word", ":", "token", "=", "last_word", ".", "group", "(", "1", ")", "else", ":", "token", "=", "expr", "# Match native types and stdint types", "if", "_TYPES", ".", "match", "(", "token", ")", ":", "return", "True", "# Try a bit harder to match templated types. Walk up the nesting", "# stack until we find something that resembles a typename", "# declaration for what we are looking for.", "typename_pattern", "=", "(", "r'\\b(?:typename|class|struct)\\s+'", "+", "re", ".", "escape", "(", "token", ")", "+", "r'\\b'", ")", "block_index", "=", "len", "(", "nesting_state", ".", "stack", ")", "-", "1", "while", "block_index", ">=", "0", ":", "if", "isinstance", "(", "nesting_state", ".", "stack", "[", "block_index", "]", ",", "_NamespaceInfo", ")", ":", "return", "False", "# Found where the opening brace is. We want to scan from this", "# line up to the beginning of the function, minus a few lines.", "# template <typename Type1, // stop scanning here", "# ...>", "# class C", "# : public ... { // start scanning here", "last_line", "=", "nesting_state", ".", "stack", "[", "block_index", "]", ".", "starting_linenum", "next_block_start", "=", "0", "if", "block_index", ">", "0", ":", "next_block_start", "=", "nesting_state", ".", "stack", "[", "block_index", "-", "1", "]", ".", "starting_linenum", "first_line", "=", "last_line", "while", "first_line", ">=", "next_block_start", ":", "if", "clean_lines", ".", "elided", "[", "first_line", "]", ".", "find", "(", "'template'", ")", ">=", "0", ":", "break", "first_line", "-=", "1", "if", "first_line", "<", "next_block_start", ":", "# Didn't find any \"template\" keyword before reaching the next block,", "# there are probably no template things to check for this block", "block_index", "-=", "1", "continue", "# Look for typename in the specified range", "for", "i", "in", "xrange", "(", "first_line", ",", "last_line", "+", "1", ",", "1", ")", ":", "if", "Search", "(", "typename_pattern", ",", "clean_lines", ".", "elided", "[", "i", "]", ")", ":", "return", "True", "block_index", "-=", "1", "return", "False" ]
https://github.com/p4lang/p4c/blob/3272e79369f20813cc1a555a5eb26f44432f84a4/tools/cpplint.py#L3993-L4053
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/xrc.py
python
XmlResourceHandler.GetStyle
(*args, **kwargs)
return _xrc.XmlResourceHandler_GetStyle(*args, **kwargs)
GetStyle(self, String param=StyleString, int defaults=0) -> int
GetStyle(self, String param=StyleString, int defaults=0) -> int
[ "GetStyle", "(", "self", "String", "param", "=", "StyleString", "int", "defaults", "=", "0", ")", "-", ">", "int" ]
def GetStyle(*args, **kwargs): """GetStyle(self, String param=StyleString, int defaults=0) -> int""" return _xrc.XmlResourceHandler_GetStyle(*args, **kwargs)
[ "def", "GetStyle", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_xrc", ".", "XmlResourceHandler_GetStyle", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/xrc.py#L655-L657
microsoft/ELL
a1d6bacc37a14879cc025d9be2ba40b1a0632315
tools/utilities/pythonlibs/modelHelpers.py
python
get_mean_duration
(accumulated, duration, maxAccumulatedEntries=30)
return mean
Add a duration to an array and calculate the mean duration.
Add a duration to an array and calculate the mean duration.
[ "Add", "a", "duration", "to", "an", "array", "and", "calculate", "the", "mean", "duration", "." ]
def get_mean_duration(accumulated, duration, maxAccumulatedEntries=30): """ Add a duration to an array and calculate the mean duration. """ accumulated.append(duration) if (len(accumulated) > maxAccumulatedEntries): accumulated.pop(0) durations = np.array(accumulated) mean = np.mean(durations) return mean
[ "def", "get_mean_duration", "(", "accumulated", ",", "duration", ",", "maxAccumulatedEntries", "=", "30", ")", ":", "accumulated", ".", "append", "(", "duration", ")", "if", "(", "len", "(", "accumulated", ")", ">", "maxAccumulatedEntries", ")", ":", "accumulated", ".", "pop", "(", "0", ")", "durations", "=", "np", ".", "array", "(", "accumulated", ")", "mean", "=", "np", ".", "mean", "(", "durations", ")", "return", "mean" ]
https://github.com/microsoft/ELL/blob/a1d6bacc37a14879cc025d9be2ba40b1a0632315/tools/utilities/pythonlibs/modelHelpers.py#L80-L88
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/ros_comm/rospy/src/rospy/impl/udpros.py
python
UDPROSTransport.receive_once
(self)
block until messages are read off of socket @return: list of newly received messages @rtype: [Msg] @raise TransportException: if unable to receive message due to error
block until messages are read off of socket
[ "block", "until", "messages", "are", "read", "off", "of", "socket" ]
def receive_once(self): """ block until messages are read off of socket @return: list of newly received messages @rtype: [Msg] @raise TransportException: if unable to receive message due to error """ pass
[ "def", "receive_once", "(", "self", ")", ":", "pass" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/ros_comm/rospy/src/rospy/impl/udpros.py#L282-L289
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/distutils/filelist.py
python
FileList.exclude_pattern
(self, pattern, anchor=1, prefix=None, is_regex=0)
return files_found
Remove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. The list 'self.files' is modified in place. Return 1 if files are found.
Remove strings (presumably filenames) from 'files' that match 'pattern'.
[ "Remove", "strings", "(", "presumably", "filenames", ")", "from", "files", "that", "match", "pattern", "." ]
def exclude_pattern(self, pattern, anchor=1, prefix=None, is_regex=0): """Remove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. The list 'self.files' is modified in place. Return 1 if files are found. """ files_found = 0 pattern_re = translate_pattern(pattern, anchor, prefix, is_regex) self.debug_print("exclude_pattern: applying regex r'%s'" % pattern_re.pattern) for i in range(len(self.files)-1, -1, -1): if pattern_re.search(self.files[i]): self.debug_print(" removing " + self.files[i]) del self.files[i] files_found = 1 return files_found
[ "def", "exclude_pattern", "(", "self", ",", "pattern", ",", "anchor", "=", "1", ",", "prefix", "=", "None", ",", "is_regex", "=", "0", ")", ":", "files_found", "=", "0", "pattern_re", "=", "translate_pattern", "(", "pattern", ",", "anchor", ",", "prefix", ",", "is_regex", ")", "self", ".", "debug_print", "(", "\"exclude_pattern: applying regex r'%s'\"", "%", "pattern_re", ".", "pattern", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "files", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "if", "pattern_re", ".", "search", "(", "self", ".", "files", "[", "i", "]", ")", ":", "self", ".", "debug_print", "(", "\" removing \"", "+", "self", ".", "files", "[", "i", "]", ")", "del", "self", ".", "files", "[", "i", "]", "files_found", "=", "1", "return", "files_found" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/distutils/filelist.py#L232-L250
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/_compat.py
python
with_metaclass
(meta, *bases)
return type.__new__(metaclass, 'temporary_class', (), {})
Create a base class with a metaclass.
Create a base class with a metaclass.
[ "Create", "a", "base", "class", "with", "a", "metaclass", "." ]
def with_metaclass(meta, *bases): """ Create a base class with a metaclass. """ # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {})
[ "def", "with_metaclass", "(", "meta", ",", "*", "bases", ")", ":", "# This requires a bit of explanation: the basic idea is to make a dummy", "# metaclass for one level of class instantiation that replaces itself with", "# the actual metaclass.", "class", "metaclass", "(", "meta", ")", ":", "def", "__new__", "(", "cls", ",", "name", ",", "this_bases", ",", "d", ")", ":", "return", "meta", "(", "name", ",", "bases", ",", "d", ")", "return", "type", ".", "__new__", "(", "metaclass", ",", "'temporary_class'", ",", "(", ")", ",", "{", "}", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/_compat.py#L20-L30
xbmc/xbmc
091211a754589fc40a2a1f239b0ce9f4ee138268
addons/service.xbmc.versioncheck/resources/lib/version_check/common.py
python
upgrade_message
(msg)
Prompt user with upgrade suggestion message :param msg: string id for prompt message :type msg: int
Prompt user with upgrade suggestion message
[ "Prompt", "user", "with", "upgrade", "suggestion", "message" ]
def upgrade_message(msg): """ Prompt user with upgrade suggestion message :param msg: string id for prompt message :type msg: int """ wait_for_end_of_video() if ADDON.getSetting('lastnotified_version') < ADDON_VERSION: xbmcgui.Dialog().ok( ADDON_NAME, '[CR]'.join([localise(msg), localise(32001), localise(32002)]) ) else: log('Already notified one time for upgrading.')
[ "def", "upgrade_message", "(", "msg", ")", ":", "wait_for_end_of_video", "(", ")", "if", "ADDON", ".", "getSetting", "(", "'lastnotified_version'", ")", "<", "ADDON_VERSION", ":", "xbmcgui", ".", "Dialog", "(", ")", ".", "ok", "(", "ADDON_NAME", ",", "'[CR]'", ".", "join", "(", "[", "localise", "(", "msg", ")", ",", "localise", "(", "32001", ")", ",", "localise", "(", "32002", ")", "]", ")", ")", "else", ":", "log", "(", "'Already notified one time for upgrading.'", ")" ]
https://github.com/xbmc/xbmc/blob/091211a754589fc40a2a1f239b0ce9f4ee138268/addons/service.xbmc.versioncheck/resources/lib/version_check/common.py#L166-L180
larroy/clearskies_core
3574ddf0edc8555454c7044126e786a6c29444dc
tools/gyp/pylib/gyp/xcodeproj_file.py
python
XCConfigurationList.ConfigurationNamed
(self, name)
Convenience accessor to obtain an XCBuildConfiguration by name.
Convenience accessor to obtain an XCBuildConfiguration by name.
[ "Convenience", "accessor", "to", "obtain", "an", "XCBuildConfiguration", "by", "name", "." ]
def ConfigurationNamed(self, name): """Convenience accessor to obtain an XCBuildConfiguration by name.""" for configuration in self._properties['buildConfigurations']: if configuration._properties['name'] == name: return configuration raise KeyError, name
[ "def", "ConfigurationNamed", "(", "self", ",", "name", ")", ":", "for", "configuration", "in", "self", ".", "_properties", "[", "'buildConfigurations'", "]", ":", "if", "configuration", ".", "_properties", "[", "'name'", "]", "==", "name", ":", "return", "configuration", "raise", "KeyError", ",", "name" ]
https://github.com/larroy/clearskies_core/blob/3574ddf0edc8555454c7044126e786a6c29444dc/tools/gyp/pylib/gyp/xcodeproj_file.py#L1603-L1609
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/jira/client.py
python
JIRA.client_info
(self)
return self._options['server']
Get the server this client is connected to.
Get the server this client is connected to.
[ "Get", "the", "server", "this", "client", "is", "connected", "to", "." ]
def client_info(self): """Get the server this client is connected to.""" return self._options['server']
[ "def", "client_info", "(", "self", ")", ":", "return", "self", ".", "_options", "[", "'server'", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/jira/client.py#L639-L641
OSVR/OSVR-Core
495648e4c94d6e8a1ffb74aa00b69deada1a9b51
vendor/eigen/debug/gdb/printers.py
python
EigenQuaternionPrinter.__init__
(self, val)
Extract all the necessary information
Extract all the necessary information
[ "Extract", "all", "the", "necessary", "information" ]
def __init__(self, val): "Extract all the necessary information" # The gdb extension does not support value template arguments - need to extract them by hand type = val.type if type.code == gdb.TYPE_CODE_REF: type = type.target() self.type = type.unqualified().strip_typedefs() self.innerType = self.type.template_argument(0) self.val = val # Quaternions have a struct as their storage, so we need to walk through this self.data = self.val['m_coeffs']['m_storage']['m_data']['array'] self.data = self.data.cast(self.innerType.pointer())
[ "def", "__init__", "(", "self", ",", "val", ")", ":", "# The gdb extension does not support value template arguments - need to extract them by hand", "type", "=", "val", ".", "type", "if", "type", ".", "code", "==", "gdb", ".", "TYPE_CODE_REF", ":", "type", "=", "type", ".", "target", "(", ")", "self", ".", "type", "=", "type", ".", "unqualified", "(", ")", ".", "strip_typedefs", "(", ")", "self", ".", "innerType", "=", "self", ".", "type", ".", "template_argument", "(", "0", ")", "self", ".", "val", "=", "val", "# Quaternions have a struct as their storage, so we need to walk through this", "self", ".", "data", "=", "self", ".", "val", "[", "'m_coeffs'", "]", "[", "'m_storage'", "]", "[", "'m_data'", "]", "[", "'array'", "]", "self", ".", "data", "=", "self", ".", "data", ".", "cast", "(", "self", ".", "innerType", ".", "pointer", "(", ")", ")" ]
https://github.com/OSVR/OSVR-Core/blob/495648e4c94d6e8a1ffb74aa00b69deada1a9b51/vendor/eigen/debug/gdb/printers.py#L135-L147
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/AWSPythonSDK/1.5.8/botocore/vendored/requests/sessions.py
python
SessionRedirectMixin.resolve_redirects
(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, **adapter_kwargs)
Receives a Response. Returns a generator of Responses.
Receives a Response. Returns a generator of Responses.
[ "Receives", "a", "Response", ".", "Returns", "a", "generator", "of", "Responses", "." ]
def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, **adapter_kwargs): """Receives a Response. Returns a generator of Responses.""" i = 0 hist = [] # keep track of history while resp.is_redirect: prepared_request = req.copy() if i > 0: # Update history and keep track of redirects. hist.append(resp) new_hist = list(hist) resp.history = new_hist try: resp.content # Consume socket so it can be released except (ChunkedEncodingError, ContentDecodingError, RuntimeError): resp.raw.read(decode_content=False) if i >= self.max_redirects: raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects) # Release the connection back into the pool. resp.close() url = resp.headers['location'] method = req.method # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith('//'): parsed_rurl = urlparse(resp.url) url = '%s:%s' % (parsed_rurl.scheme, url) # The scheme should be lower case... parsed = urlparse(url) url = parsed.geturl() # Facilitate relative 'location' headers, as allowed by RFC 7231. # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # Compliant with RFC3986, we percent encode the url. if not parsed.netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) # Cache the url, unless it redirects to itself. if resp.is_permanent_redirect and req.url != prepared_request.url: self.redirect_cache[req.url] = prepared_request.url # http://tools.ietf.org/html/rfc7231#section-6.4.4 if (resp.status_code == codes.see_other and method != 'HEAD'): method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if resp.status_code == codes.found and method != 'HEAD': method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. if resp.status_code == codes.moved and method == 'POST': method = 'GET' prepared_request.method = method # https://github.com/kennethreitz/requests/issues/1084 if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): if 'Content-Length' in prepared_request.headers: del prepared_request.headers['Content-Length'] prepared_request.body = None headers = prepared_request.headers try: del headers['Cookie'] except KeyError: pass # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared # request, use the old one that we haven't yet touched. extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) prepared_request._cookies.update(self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) # Rebuild auth and proxy information. proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, resp) # Override the original request. req = prepared_request resp = self.send( req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) i += 1 yield resp
[ "def", "resolve_redirects", "(", "self", ",", "resp", ",", "req", ",", "stream", "=", "False", ",", "timeout", "=", "None", ",", "verify", "=", "True", ",", "cert", "=", "None", ",", "proxies", "=", "None", ",", "*", "*", "adapter_kwargs", ")", ":", "i", "=", "0", "hist", "=", "[", "]", "# keep track of history", "while", "resp", ".", "is_redirect", ":", "prepared_request", "=", "req", ".", "copy", "(", ")", "if", "i", ">", "0", ":", "# Update history and keep track of redirects.", "hist", ".", "append", "(", "resp", ")", "new_hist", "=", "list", "(", "hist", ")", "resp", ".", "history", "=", "new_hist", "try", ":", "resp", ".", "content", "# Consume socket so it can be released", "except", "(", "ChunkedEncodingError", ",", "ContentDecodingError", ",", "RuntimeError", ")", ":", "resp", ".", "raw", ".", "read", "(", "decode_content", "=", "False", ")", "if", "i", ">=", "self", ".", "max_redirects", ":", "raise", "TooManyRedirects", "(", "'Exceeded %s redirects.'", "%", "self", ".", "max_redirects", ")", "# Release the connection back into the pool.", "resp", ".", "close", "(", ")", "url", "=", "resp", ".", "headers", "[", "'location'", "]", "method", "=", "req", ".", "method", "# Handle redirection without scheme (see: RFC 1808 Section 4)", "if", "url", ".", "startswith", "(", "'//'", ")", ":", "parsed_rurl", "=", "urlparse", "(", "resp", ".", "url", ")", "url", "=", "'%s:%s'", "%", "(", "parsed_rurl", ".", "scheme", ",", "url", ")", "# The scheme should be lower case...", "parsed", "=", "urlparse", "(", "url", ")", "url", "=", "parsed", ".", "geturl", "(", ")", "# Facilitate relative 'location' headers, as allowed by RFC 7231.", "# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')", "# Compliant with RFC3986, we percent encode the url.", "if", "not", "parsed", ".", "netloc", ":", "url", "=", "urljoin", "(", "resp", ".", "url", ",", "requote_uri", "(", "url", ")", ")", "else", ":", "url", "=", "requote_uri", "(", "url", ")", "prepared_request", ".", "url", "=", "to_native_string", "(", "url", ")", "# Cache the url, unless it redirects to itself.", "if", "resp", ".", "is_permanent_redirect", "and", "req", ".", "url", "!=", "prepared_request", ".", "url", ":", "self", ".", "redirect_cache", "[", "req", ".", "url", "]", "=", "prepared_request", ".", "url", "# http://tools.ietf.org/html/rfc7231#section-6.4.4", "if", "(", "resp", ".", "status_code", "==", "codes", ".", "see_other", "and", "method", "!=", "'HEAD'", ")", ":", "method", "=", "'GET'", "# Do what the browsers do, despite standards...", "# First, turn 302s into GETs.", "if", "resp", ".", "status_code", "==", "codes", ".", "found", "and", "method", "!=", "'HEAD'", ":", "method", "=", "'GET'", "# Second, if a POST is responded to with a 301, turn it into a GET.", "# This bizarre behaviour is explained in Issue 1704.", "if", "resp", ".", "status_code", "==", "codes", ".", "moved", "and", "method", "==", "'POST'", ":", "method", "=", "'GET'", "prepared_request", ".", "method", "=", "method", "# https://github.com/kennethreitz/requests/issues/1084", "if", "resp", ".", "status_code", "not", "in", "(", "codes", ".", "temporary_redirect", ",", "codes", ".", "permanent_redirect", ")", ":", "if", "'Content-Length'", "in", "prepared_request", ".", "headers", ":", "del", "prepared_request", ".", "headers", "[", "'Content-Length'", "]", "prepared_request", ".", "body", "=", "None", "headers", "=", "prepared_request", ".", "headers", "try", ":", "del", "headers", "[", "'Cookie'", "]", "except", "KeyError", ":", "pass", "# Extract any cookies sent on the response to the cookiejar", "# in the new request. Because we've mutated our copied prepared", "# request, use the old one that we haven't yet touched.", "extract_cookies_to_jar", "(", "prepared_request", ".", "_cookies", ",", "req", ",", "resp", ".", "raw", ")", "prepared_request", ".", "_cookies", ".", "update", "(", "self", ".", "cookies", ")", "prepared_request", ".", "prepare_cookies", "(", "prepared_request", ".", "_cookies", ")", "# Rebuild auth and proxy information.", "proxies", "=", "self", ".", "rebuild_proxies", "(", "prepared_request", ",", "proxies", ")", "self", ".", "rebuild_auth", "(", "prepared_request", ",", "resp", ")", "# Override the original request.", "req", "=", "prepared_request", "resp", "=", "self", ".", "send", "(", "req", ",", "stream", "=", "stream", ",", "timeout", "=", "timeout", ",", "verify", "=", "verify", ",", "cert", "=", "cert", ",", "proxies", "=", "proxies", ",", "allow_redirects", "=", "False", ",", "*", "*", "adapter_kwargs", ")", "extract_cookies_to_jar", "(", "self", ".", "cookies", ",", "prepared_request", ",", "resp", ".", "raw", ")", "i", "+=", "1", "yield", "resp" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/botocore/vendored/requests/sessions.py#L92-L202
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/aui.py
python
AuiToolBarItem.SetMinSize
(*args, **kwargs)
return _aui.AuiToolBarItem_SetMinSize(*args, **kwargs)
SetMinSize(self, Size s)
SetMinSize(self, Size s)
[ "SetMinSize", "(", "self", "Size", "s", ")" ]
def SetMinSize(*args, **kwargs): """SetMinSize(self, Size s)""" return _aui.AuiToolBarItem_SetMinSize(*args, **kwargs)
[ "def", "SetMinSize", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_aui", ".", "AuiToolBarItem_SetMinSize", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/aui.py#L1817-L1819
TheLegendAli/DeepLab-Context
fb04e9e2fc2682490ad9f60533b9d6c4c0e0479c
python/caffe/pycaffe.py
python
_Net_set_input_scale
(self, input_, scale)
Set the scale of preprocessed inputs s.t. the blob = blob * scale. N.B. input_scale is done AFTER mean subtraction and other preprocessing while raw_scale is done BEFORE. Take input_: which input to assign this scale factor scale: scale coefficient
Set the scale of preprocessed inputs s.t. the blob = blob * scale. N.B. input_scale is done AFTER mean subtraction and other preprocessing while raw_scale is done BEFORE.
[ "Set", "the", "scale", "of", "preprocessed", "inputs", "s", ".", "t", ".", "the", "blob", "=", "blob", "*", "scale", ".", "N", ".", "B", ".", "input_scale", "is", "done", "AFTER", "mean", "subtraction", "and", "other", "preprocessing", "while", "raw_scale", "is", "done", "BEFORE", "." ]
def _Net_set_input_scale(self, input_, scale): """ Set the scale of preprocessed inputs s.t. the blob = blob * scale. N.B. input_scale is done AFTER mean subtraction and other preprocessing while raw_scale is done BEFORE. Take input_: which input to assign this scale factor scale: scale coefficient """ if input_ not in self.inputs: raise Exception('Input not in {}'.format(self.inputs)) self.input_scale[input_] = scale
[ "def", "_Net_set_input_scale", "(", "self", ",", "input_", ",", "scale", ")", ":", "if", "input_", "not", "in", "self", ".", "inputs", ":", "raise", "Exception", "(", "'Input not in {}'", ".", "format", "(", "self", ".", "inputs", ")", ")", "self", ".", "input_scale", "[", "input_", "]", "=", "scale" ]
https://github.com/TheLegendAli/DeepLab-Context/blob/fb04e9e2fc2682490ad9f60533b9d6c4c0e0479c/python/caffe/pycaffe.py#L230-L242