nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
deps/src/libxml2-2.9.1/python/libxml2.py
python
catalog.add
(self, type, orig, replace)
return ret
Add an entry in the catalog, it may overwrite existing but different entries.
Add an entry in the catalog, it may overwrite existing but different entries.
[ "Add", "an", "entry", "in", "the", "catalog", "it", "may", "overwrite", "existing", "but", "different", "entries", "." ]
def add(self, type, orig, replace): """Add an entry in the catalog, it may overwrite existing but different entries. """ ret = libxml2mod.xmlACatalogAdd(self._o, type, orig, replace) return ret
[ "def", "add", "(", "self", ",", "type", ",", "orig", ",", "replace", ")", ":", "ret", "=", "libxml2mod", ".", "xmlACatalogAdd", "(", "self", ".", "_o", ",", "type", ",", "orig", ",", "replace", ")", "return", "ret" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2.py#L5661-L5665
NREL/EnergyPlus
fadc5973b85c70e8cc923efb69c144e808a26078
src/EnergyPlus/api/datatransfer.py
python
DataExchange.set_global_value
(self, state: c_void_p, handle: int, value: float)
Set the current value of a plugin global variable in a running simulation. This is only used for Python Plugin applications! Global variables are used as a way to share data between running Python Plugins. First a global variable must be declared in the input file using the PythonPlugin:GlobalVariables object. Once a name has been declared, it can be accessed in the Plugin by getting a handle to the variable using the get_global_handle function, then using the get_global_value and this set_global_value functions as needed. Note all global variables are floating point values. :param state: An active EnergyPlus "state" that is returned from a call to `api.state_manager.new_state()`. :param handle: An integer returned from the `get_global_handle` function. :param value: Floating point value to assign to the global variable
Set the current value of a plugin global variable in a running simulation. This is only used for Python Plugin applications!
[ "Set", "the", "current", "value", "of", "a", "plugin", "global", "variable", "in", "a", "running", "simulation", ".", "This", "is", "only", "used", "for", "Python", "Plugin", "applications!" ]
def set_global_value(self, state: c_void_p, handle: int, value: float) -> None: """ Set the current value of a plugin global variable in a running simulation. This is only used for Python Plugin applications! Global variables are used as a way to share data between running Python Plugins. First a global variable must be declared in the input file using the PythonPlugin:GlobalVariables object. Once a name has been declared, it can be accessed in the Plugin by getting a handle to the variable using the get_global_handle function, then using the get_global_value and this set_global_value functions as needed. Note all global variables are floating point values. :param state: An active EnergyPlus "state" that is returned from a call to `api.state_manager.new_state()`. :param handle: An integer returned from the `get_global_handle` function. :param value: Floating point value to assign to the global variable """ if not self.running_as_python_plugin: raise EnergyPlusException("set_global_handle is only available as part of a Python Plugin workflow") if not is_number(handle): raise EnergyPlusException( "`set_global_value` expects `variable_handle` as an `int`, not " "'{}'".format(handle)) if not is_number(value): raise EnergyPlusException( "`get_global_value` expects `value` as a `float`, not " "'{}'".format(value)) self.api.setPluginGlobalVariableValue(state, handle, value)
[ "def", "set_global_value", "(", "self", ",", "state", ":", "c_void_p", ",", "handle", ":", "int", ",", "value", ":", "float", ")", "->", "None", ":", "if", "not", "self", ".", "running_as_python_plugin", ":", "raise", "EnergyPlusException", "(", "\"set_global_handle is only available as part of a Python Plugin workflow\"", ")", "if", "not", "is_number", "(", "handle", ")", ":", "raise", "EnergyPlusException", "(", "\"`set_global_value` expects `variable_handle` as an `int`, not \"", "\"'{}'\"", ".", "format", "(", "handle", ")", ")", "if", "not", "is_number", "(", "value", ")", ":", "raise", "EnergyPlusException", "(", "\"`get_global_value` expects `value` as a `float`, not \"", "\"'{}'\"", ".", "format", "(", "value", ")", ")", "self", ".", "api", ".", "setPluginGlobalVariableValue", "(", "state", ",", "handle", ",", "value", ")" ]
https://github.com/NREL/EnergyPlus/blob/fadc5973b85c70e8cc923efb69c144e808a26078/src/EnergyPlus/api/datatransfer.py#L681-L706
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_windows.py
python
StatusBar.SetStatusStyles
(*args, **kwargs)
return _windows_.StatusBar_SetStatusStyles(*args, **kwargs)
SetStatusStyles(self, int styles)
SetStatusStyles(self, int styles)
[ "SetStatusStyles", "(", "self", "int", "styles", ")" ]
def SetStatusStyles(*args, **kwargs): """SetStatusStyles(self, int styles)""" return _windows_.StatusBar_SetStatusStyles(*args, **kwargs)
[ "def", "SetStatusStyles", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "StatusBar_SetStatusStyles", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_windows.py#L1271-L1273
facebook/wangle
2e7e3fbb3a15c4986d6fe0e36c31daeeba614ce3
build/fbcode_builder/getdeps/cargo.py
python
CargoBuilder._resolve_config
(self)
return "\n".join(config)
Returns a configuration to be put inside root Cargo.toml file which patches the dependencies git code with local getdeps versions. See https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section
Returns a configuration to be put inside root Cargo.toml file which patches the dependencies git code with local getdeps versions. See https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section
[ "Returns", "a", "configuration", "to", "be", "put", "inside", "root", "Cargo", ".", "toml", "file", "which", "patches", "the", "dependencies", "git", "code", "with", "local", "getdeps", "versions", ".", "See", "https", ":", "//", "doc", ".", "rust", "-", "lang", ".", "org", "/", "cargo", "/", "reference", "/", "manifest", ".", "html#the", "-", "patch", "-", "section" ]
def _resolve_config(self): """ Returns a configuration to be put inside root Cargo.toml file which patches the dependencies git code with local getdeps versions. See https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section """ dep_to_git = self._resolve_dep_to_git() dep_to_crates = CargoBuilder._resolve_dep_to_crates( self.build_source_dir(), dep_to_git ) config = [] for name in sorted(dep_to_git.keys()): git_conf = dep_to_git[name] crates = sorted(dep_to_crates.get(name, [])) if not crates: continue # nothing to patch, move along crates_patches = [ '{} = {{ path = "{}" }}'.format( crate, CargoBuilder._resolve_crate_to_path(crate, git_conf).replace( "\\", "\\\\" ), ) for crate in crates ] config.append( '[patch."{0}"]\n'.format(git_conf["repo_url"]) + "\n".join(crates_patches) ) return "\n".join(config)
[ "def", "_resolve_config", "(", "self", ")", ":", "dep_to_git", "=", "self", ".", "_resolve_dep_to_git", "(", ")", "dep_to_crates", "=", "CargoBuilder", ".", "_resolve_dep_to_crates", "(", "self", ".", "build_source_dir", "(", ")", ",", "dep_to_git", ")", "config", "=", "[", "]", "for", "name", "in", "sorted", "(", "dep_to_git", ".", "keys", "(", ")", ")", ":", "git_conf", "=", "dep_to_git", "[", "name", "]", "crates", "=", "sorted", "(", "dep_to_crates", ".", "get", "(", "name", ",", "[", "]", ")", ")", "if", "not", "crates", ":", "continue", "# nothing to patch, move along", "crates_patches", "=", "[", "'{} = {{ path = \"{}\" }}'", ".", "format", "(", "crate", ",", "CargoBuilder", ".", "_resolve_crate_to_path", "(", "crate", ",", "git_conf", ")", ".", "replace", "(", "\"\\\\\"", ",", "\"\\\\\\\\\"", ")", ",", ")", "for", "crate", "in", "crates", "]", "config", ".", "append", "(", "'[patch.\"{0}\"]\\n'", ".", "format", "(", "git_conf", "[", "\"repo_url\"", "]", ")", "+", "\"\\n\"", ".", "join", "(", "crates_patches", ")", ")", "return", "\"\\n\"", ".", "join", "(", "config", ")" ]
https://github.com/facebook/wangle/blob/2e7e3fbb3a15c4986d6fe0e36c31daeeba614ce3/build/fbcode_builder/getdeps/cargo.py#L187-L218
oracle/graaljs
36a56e8e993d45fc40939a3a4d9c0c24990720f1
graal-nodejs/tools/configure.d/nodedownload.py
python
unpack
(packedfile, parent_path)
Unpacks packedfile into parent_path. Assumes .zip. Returns parent_path
Unpacks packedfile into parent_path. Assumes .zip. Returns parent_path
[ "Unpacks", "packedfile", "into", "parent_path", ".", "Assumes", ".", "zip", ".", "Returns", "parent_path" ]
def unpack(packedfile, parent_path): """Unpacks packedfile into parent_path. Assumes .zip. Returns parent_path""" if zipfile.is_zipfile(packedfile): with contextlib.closing(zipfile.ZipFile(packedfile, 'r')) as icuzip: print(' Extracting zipfile: %s' % packedfile) icuzip.extractall(parent_path) return parent_path elif tarfile.is_tarfile(packedfile): with contextlib.closing(tarfile.TarFile.open(packedfile, 'r')) as icuzip: print(' Extracting tarfile: %s' % packedfile) icuzip.extractall(parent_path) return parent_path else: packedsuffix = packedfile.lower().split('.')[-1] # .zip, .tgz etc raise Exception('Error: Don\'t know how to unpack %s with extension %s' % (packedfile, packedsuffix))
[ "def", "unpack", "(", "packedfile", ",", "parent_path", ")", ":", "if", "zipfile", ".", "is_zipfile", "(", "packedfile", ")", ":", "with", "contextlib", ".", "closing", "(", "zipfile", ".", "ZipFile", "(", "packedfile", ",", "'r'", ")", ")", "as", "icuzip", ":", "print", "(", "' Extracting zipfile: %s'", "%", "packedfile", ")", "icuzip", ".", "extractall", "(", "parent_path", ")", "return", "parent_path", "elif", "tarfile", ".", "is_tarfile", "(", "packedfile", ")", ":", "with", "contextlib", ".", "closing", "(", "tarfile", ".", "TarFile", ".", "open", "(", "packedfile", ",", "'r'", ")", ")", "as", "icuzip", ":", "print", "(", "' Extracting tarfile: %s'", "%", "packedfile", ")", "icuzip", ".", "extractall", "(", "parent_path", ")", "return", "parent_path", "else", ":", "packedsuffix", "=", "packedfile", ".", "lower", "(", ")", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "# .zip, .tgz etc", "raise", "Exception", "(", "'Error: Don\\'t know how to unpack %s with extension %s'", "%", "(", "packedfile", ",", "packedsuffix", ")", ")" ]
https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/tools/configure.d/nodedownload.py#L71-L85
thalium/icebox
99d147d5b9269222225443ce171b4fd46d8985d4
third_party/retdec-3.2/scripts/type_extractor/type_extractor/json_types.py
python
get_array_dimensions
(dimensions)
return [int(d) if d.isdigit() else d for d in dimensions]
Returns list of all dimensions.
Returns list of all dimensions.
[ "Returns", "list", "of", "all", "dimensions", "." ]
def get_array_dimensions(dimensions): """Returns list of all dimensions.""" dimensions = re.sub(r'^\[|\]$', '', dimensions).split('][') return [int(d) if d.isdigit() else d for d in dimensions]
[ "def", "get_array_dimensions", "(", "dimensions", ")", ":", "dimensions", "=", "re", ".", "sub", "(", "r'^\\[|\\]$'", ",", "''", ",", "dimensions", ")", ".", "split", "(", "']['", ")", "return", "[", "int", "(", "d", ")", "if", "d", ".", "isdigit", "(", ")", "else", "d", "for", "d", "in", "dimensions", "]" ]
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/retdec-3.2/scripts/type_extractor/type_extractor/json_types.py#L489-L492
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/sets.py
python
BaseSet.__or__
(self, other)
return self.union(other)
Return the union of two sets as a new set. (I.e. all elements that are in either set.)
Return the union of two sets as a new set.
[ "Return", "the", "union", "of", "two", "sets", "as", "a", "new", "set", "." ]
def __or__(self, other): """Return the union of two sets as a new set. (I.e. all elements that are in either set.) """ if not isinstance(other, BaseSet): return NotImplemented return self.union(other)
[ "def", "__or__", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "BaseSet", ")", ":", "return", "NotImplemented", "return", "self", ".", "union", "(", "other", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/sets.py#L178-L185
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/aui.py
python
AuiPaneInfo.Floatable
(*args, **kwargs)
return _aui.AuiPaneInfo_Floatable(*args, **kwargs)
Floatable(self, bool b=True) -> AuiPaneInfo
Floatable(self, bool b=True) -> AuiPaneInfo
[ "Floatable", "(", "self", "bool", "b", "=", "True", ")", "-", ">", "AuiPaneInfo" ]
def Floatable(*args, **kwargs): """Floatable(self, bool b=True) -> AuiPaneInfo""" return _aui.AuiPaneInfo_Floatable(*args, **kwargs)
[ "def", "Floatable", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_aui", ".", "AuiPaneInfo_Floatable", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/aui.py#L493-L495
coinapi/coinapi-sdk
854f21e7f69ea8599ae35c5403565cf299d8b795
oeml-sdk/python/openapi_client/model/order_execution_reports.py
python
OrderExecutionReports.__init__
(self, *args, **kwargs)
OrderExecutionReports - a model defined in OpenAPI Note that value can be passed either in args or in kwargs, but not in both. Args: args[0] ([OrderExecutionReport]): Collection of order execution reports for currently opened orders.. # noqa: E501 Keyword Args: value ([OrderExecutionReport]): Collection of order execution reports for currently opened orders.. # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,)
OrderExecutionReports - a model defined in OpenAPI
[ "OrderExecutionReports", "-", "a", "model", "defined", "in", "OpenAPI" ]
def __init__(self, *args, **kwargs): """OrderExecutionReports - a model defined in OpenAPI Note that value can be passed either in args or in kwargs, but not in both. Args: args[0] ([OrderExecutionReport]): Collection of order execution reports for currently opened orders.. # noqa: E501 Keyword Args: value ([OrderExecutionReport]): Collection of order execution reports for currently opened orders.. # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ # required up here when default value is not given _path_to_item = kwargs.pop('_path_to_item', ()) if 'value' in kwargs: value = kwargs.pop('value') elif args: args = list(args) value = args.pop(0) else: raise ApiTypeError( "value is required, but not passed in args or kwargs and doesn't have default", path_to_item=_path_to_item, valid_classes=(self.__class__,), ) _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.value = value if kwargs: raise ApiTypeError( "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( kwargs, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), )
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# required up here when default value is not given", "_path_to_item", "=", "kwargs", ".", "pop", "(", "'_path_to_item'", ",", "(", ")", ")", "if", "'value'", "in", "kwargs", ":", "value", "=", "kwargs", ".", "pop", "(", "'value'", ")", "elif", "args", ":", "args", "=", "list", "(", "args", ")", "value", "=", "args", ".", "pop", "(", "0", ")", "else", ":", "raise", "ApiTypeError", "(", "\"value is required, but not passed in args or kwargs and doesn't have default\"", ",", "path_to_item", "=", "_path_to_item", ",", "valid_classes", "=", "(", "self", ".", "__class__", ",", ")", ",", ")", "_check_type", "=", "kwargs", ".", "pop", "(", "'_check_type'", ",", "True", ")", "_spec_property_naming", "=", "kwargs", ".", "pop", "(", "'_spec_property_naming'", ",", "False", ")", "_configuration", "=", "kwargs", ".", "pop", "(", "'_configuration'", ",", "None", ")", "_visited_composed_classes", "=", "kwargs", ".", "pop", "(", "'_visited_composed_classes'", ",", "(", ")", ")", "if", "args", ":", "raise", "ApiTypeError", "(", "\"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.\"", "%", "(", "args", ",", "self", ".", "__class__", ".", "__name__", ",", ")", ",", "path_to_item", "=", "_path_to_item", ",", "valid_classes", "=", "(", "self", ".", "__class__", ",", ")", ",", ")", "self", ".", "_data_store", "=", "{", "}", "self", ".", "_check_type", "=", "_check_type", "self", ".", "_spec_property_naming", "=", "_spec_property_naming", "self", ".", "_path_to_item", "=", "_path_to_item", "self", ".", "_configuration", "=", "_configuration", "self", ".", "_visited_composed_classes", "=", "_visited_composed_classes", "+", "(", "self", ".", "__class__", ",", ")", "self", ".", "value", "=", "value", "if", "kwargs", ":", "raise", "ApiTypeError", "(", "\"Invalid named arguments=%s passed to %s. Remove those invalid named arguments.\"", "%", "(", "kwargs", ",", "self", ".", "__class__", ".", "__name__", ",", ")", ",", "path_to_item", "=", "_path_to_item", ",", "valid_classes", "=", "(", "self", ".", "__class__", ",", ")", ",", ")" ]
https://github.com/coinapi/coinapi-sdk/blob/854f21e7f69ea8599ae35c5403565cf299d8b795/oeml-sdk/python/openapi_client/model/order_execution_reports.py#L104-L190
microsoft/checkedc-clang
a173fefde5d7877b7750e7ce96dd08cf18baebf2
compiler-rt/lib/sanitizer_common/scripts/cpplint.py
python
CheckSectionSpacing
(filename, clean_lines, class_info, linenum, error)
Checks for additional blank line issues related to sections. Currently the only thing checked here is blank line before protected/private. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. class_info: A _ClassInfo objects. linenum: The number of the line to check. error: The function to call with any errors found.
Checks for additional blank line issues related to sections.
[ "Checks", "for", "additional", "blank", "line", "issues", "related", "to", "sections", "." ]
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): """Checks for additional blank line issues related to sections. Currently the only thing checked here is blank line before protected/private. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. class_info: A _ClassInfo objects. linenum: The number of the line to check. error: The function to call with any errors found. """ # Skip checks if the class is small, where small means 25 lines or less. # 25 lines seems like a good cutoff since that's the usual height of # terminals, and any class that can't fit in one screen can't really # be considered "small". # # Also skip checks if we are on the first line. This accounts for # classes that look like # class Foo { public: ... }; # # If we didn't find the end of the class, last_line would be zero, # and the check will be skipped by the first condition. if (class_info.last_line - class_info.starting_linenum <= 24 or linenum <= class_info.starting_linenum): return matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: # Issue warning if the line before public/protected/private was # not a blank line, but don't do this if the previous line contains # "class" or "struct". This can happen two ways: # - We are at the beginning of the class. # - We are forward-declaring an inner class that is semantically # private, but needed to be public for implementation reasons. # Also ignores cases where the previous line ends with a backslash as can be # common when defining classes in C macros. prev_line = clean_lines.lines[linenum - 1] if (not IsBlankLine(prev_line) and not Search(r'\b(class|struct)\b', prev_line) and not Search(r'\\$', prev_line)): # Try a bit harder to find the beginning of the class. This is to # account for multi-line base-specifier lists, e.g.: # class Derived # : public Base { end_class_head = class_info.starting_linenum for i in range(class_info.starting_linenum, linenum): if Search(r'\{\s*$', clean_lines.lines[i]): end_class_head = i break if end_class_head < linenum - 1: error(filename, linenum, 'whitespace/blank_line', 3, '"%s:" should be preceded by a blank line' % matched.group(1))
[ "def", "CheckSectionSpacing", "(", "filename", ",", "clean_lines", ",", "class_info", ",", "linenum", ",", "error", ")", ":", "# Skip checks if the class is small, where small means 25 lines or less.", "# 25 lines seems like a good cutoff since that's the usual height of", "# terminals, and any class that can't fit in one screen can't really", "# be considered \"small\".", "#", "# Also skip checks if we are on the first line. This accounts for", "# classes that look like", "# class Foo { public: ... };", "#", "# If we didn't find the end of the class, last_line would be zero,", "# and the check will be skipped by the first condition.", "if", "(", "class_info", ".", "last_line", "-", "class_info", ".", "starting_linenum", "<=", "24", "or", "linenum", "<=", "class_info", ".", "starting_linenum", ")", ":", "return", "matched", "=", "Match", "(", "r'\\s*(public|protected|private):'", ",", "clean_lines", ".", "lines", "[", "linenum", "]", ")", "if", "matched", ":", "# Issue warning if the line before public/protected/private was", "# not a blank line, but don't do this if the previous line contains", "# \"class\" or \"struct\". This can happen two ways:", "# - We are at the beginning of the class.", "# - We are forward-declaring an inner class that is semantically", "# private, but needed to be public for implementation reasons.", "# Also ignores cases where the previous line ends with a backslash as can be", "# common when defining classes in C macros.", "prev_line", "=", "clean_lines", ".", "lines", "[", "linenum", "-", "1", "]", "if", "(", "not", "IsBlankLine", "(", "prev_line", ")", "and", "not", "Search", "(", "r'\\b(class|struct)\\b'", ",", "prev_line", ")", "and", "not", "Search", "(", "r'\\\\$'", ",", "prev_line", ")", ")", ":", "# Try a bit harder to find the beginning of the class. This is to", "# account for multi-line base-specifier lists, e.g.:", "# class Derived", "# : public Base {", "end_class_head", "=", "class_info", ".", "starting_linenum", "for", "i", "in", "range", "(", "class_info", ".", "starting_linenum", ",", "linenum", ")", ":", "if", "Search", "(", "r'\\{\\s*$'", ",", "clean_lines", ".", "lines", "[", "i", "]", ")", ":", "end_class_head", "=", "i", "break", "if", "end_class_head", "<", "linenum", "-", "1", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/blank_line'", ",", "3", ",", "'\"%s:\" should be preceded by a blank line'", "%", "matched", ".", "group", "(", "1", ")", ")" ]
https://github.com/microsoft/checkedc-clang/blob/a173fefde5d7877b7750e7ce96dd08cf18baebf2/compiler-rt/lib/sanitizer_common/scripts/cpplint.py#L3658-L3710
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_misc.py
python
MimeTypesManager.EnumAllFileTypes
(*args, **kwargs)
return _misc_.MimeTypesManager_EnumAllFileTypes(*args, **kwargs)
EnumAllFileTypes(self) -> PyObject
EnumAllFileTypes(self) -> PyObject
[ "EnumAllFileTypes", "(", "self", ")", "-", ">", "PyObject" ]
def EnumAllFileTypes(*args, **kwargs): """EnumAllFileTypes(self) -> PyObject""" return _misc_.MimeTypesManager_EnumAllFileTypes(*args, **kwargs)
[ "def", "EnumAllFileTypes", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_misc_", ".", "MimeTypesManager_EnumAllFileTypes", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L2677-L2679
anestisb/oatdump_plus
ba858c1596598f0d9ae79c14d08c708cecc50af3
tools/common/common.py
python
GetJackClassPath
()
return libdir + '/core-libart-hostdex_intermediates/classes.jack:' \ + libdir + '/core-oj-hostdex_intermediates/classes.jack'
Returns Jack's classpath.
Returns Jack's classpath.
[ "Returns", "Jack", "s", "classpath", "." ]
def GetJackClassPath(): """Returns Jack's classpath.""" top = GetEnvVariableOrError('ANDROID_BUILD_TOP') libdir = top + '/out/host/common/obj/JAVA_LIBRARIES' return libdir + '/core-libart-hostdex_intermediates/classes.jack:' \ + libdir + '/core-oj-hostdex_intermediates/classes.jack'
[ "def", "GetJackClassPath", "(", ")", ":", "top", "=", "GetEnvVariableOrError", "(", "'ANDROID_BUILD_TOP'", ")", "libdir", "=", "top", "+", "'/out/host/common/obj/JAVA_LIBRARIES'", "return", "libdir", "+", "'/core-libart-hostdex_intermediates/classes.jack:'", "+", "libdir", "+", "'/core-oj-hostdex_intermediates/classes.jack'" ]
https://github.com/anestisb/oatdump_plus/blob/ba858c1596598f0d9ae79c14d08c708cecc50af3/tools/common/common.py#L119-L124
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/python2_version/klampt/model/trajectory.py
python
HermiteTrajectory.stackDofs
(self,trajs,strict=True)
Stacks the degrees of freedom of multiple trajectories together. The result is contained in self. All evaluations are assumed to take place with the 'halt' endBehavior. Args: trajs (list or tuple of HermiteTrajectory): the trajectories to stack strict (bool, optional): ignored. Will always warn for invalid classes. Returns: None
Stacks the degrees of freedom of multiple trajectories together. The result is contained in self.
[ "Stacks", "the", "degrees", "of", "freedom", "of", "multiple", "trajectories", "together", ".", "The", "result", "is", "contained", "in", "self", "." ]
def stackDofs(self,trajs,strict=True): """Stacks the degrees of freedom of multiple trajectories together. The result is contained in self. All evaluations are assumed to take place with the 'halt' endBehavior. Args: trajs (list or tuple of HermiteTrajectory): the trajectories to stack strict (bool, optional): ignored. Will always warn for invalid classes. Returns: None """ if not isinstance(trajs,(list,tuple)): raise ValueError("HermiteTrajectory.stackDofs takes in a list of trajectories as input") for traj in trajs: if not isinstance(traj,HermiteTrajectory): raise ValueError("Can't stack non-HermiteTrajectory objects into a HermiteTrajectory") alltimes = set() for traj in trajs: for t in traj.times: alltimes.add(t) self.times = sorted(alltimes) stacktrajs = [traj.remesh(self.times) for traj in trajs] for traj in stacktrajs: assert len(traj.milestones) == len(self.times) self.milestones = [] for i,t in enumerate(self.times): q = [] v = [] for traj in stacktrajs: n = len(traj.milestones[i])//2 q += list(traj.milestones[i][:n]) v += list(traj.milestones[i][n:]) self.milestones.append(q + v)
[ "def", "stackDofs", "(", "self", ",", "trajs", ",", "strict", "=", "True", ")", ":", "if", "not", "isinstance", "(", "trajs", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "ValueError", "(", "\"HermiteTrajectory.stackDofs takes in a list of trajectories as input\"", ")", "for", "traj", "in", "trajs", ":", "if", "not", "isinstance", "(", "traj", ",", "HermiteTrajectory", ")", ":", "raise", "ValueError", "(", "\"Can't stack non-HermiteTrajectory objects into a HermiteTrajectory\"", ")", "alltimes", "=", "set", "(", ")", "for", "traj", "in", "trajs", ":", "for", "t", "in", "traj", ".", "times", ":", "alltimes", ".", "add", "(", "t", ")", "self", ".", "times", "=", "sorted", "(", "alltimes", ")", "stacktrajs", "=", "[", "traj", ".", "remesh", "(", "self", ".", "times", ")", "for", "traj", "in", "trajs", "]", "for", "traj", "in", "stacktrajs", ":", "assert", "len", "(", "traj", ".", "milestones", ")", "==", "len", "(", "self", ".", "times", ")", "self", ".", "milestones", "=", "[", "]", "for", "i", ",", "t", "in", "enumerate", "(", "self", ".", "times", ")", ":", "q", "=", "[", "]", "v", "=", "[", "]", "for", "traj", "in", "stacktrajs", ":", "n", "=", "len", "(", "traj", ".", "milestones", "[", "i", "]", ")", "//", "2", "q", "+=", "list", "(", "traj", ".", "milestones", "[", "i", "]", "[", ":", "n", "]", ")", "v", "+=", "list", "(", "traj", ".", "milestones", "[", "i", "]", "[", "n", ":", "]", ")", "self", ".", "milestones", ".", "append", "(", "q", "+", "v", ")" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/model/trajectory.py#L1036-L1072
smilehao/xlua-framework
a03801538be2b0e92d39332d445b22caca1ef61f
ConfigData/trunk/tools/protobuf-2.5.0/protobuf-2.5.0/python/google/protobuf/text_format.py
python
PrintFieldValue
(field, value, out, indent=0, as_utf8=False, as_one_line=False)
Print a single field value (not including name). For repeated fields, the value should be a single element.
Print a single field value (not including name). For repeated fields, the value should be a single element.
[ "Print", "a", "single", "field", "value", "(", "not", "including", "name", ")", ".", "For", "repeated", "fields", "the", "value", "should", "be", "a", "single", "element", "." ]
def PrintFieldValue(field, value, out, indent=0, as_utf8=False, as_one_line=False): """Print a single field value (not including name). For repeated fields, the value should be a single element.""" if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: if as_one_line: out.write(' { ') PrintMessage(value, out, indent, as_utf8, as_one_line) out.write('}') else: out.write(' {\n') PrintMessage(value, out, indent + 2, as_utf8, as_one_line) out.write(' ' * indent + '}') elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: enum_value = field.enum_type.values_by_number.get(value, None) if enum_value is not None: out.write(enum_value.name) else: out.write(str(value)) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: out.write('\"') if type(value) is unicode: out.write(_CEscape(value.encode('utf-8'), as_utf8)) else: out.write(_CEscape(value, as_utf8)) out.write('\"') elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: if value: out.write("true") else: out.write("false") else: out.write(str(value))
[ "def", "PrintFieldValue", "(", "field", ",", "value", ",", "out", ",", "indent", "=", "0", ",", "as_utf8", "=", "False", ",", "as_one_line", "=", "False", ")", ":", "if", "field", ".", "cpp_type", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "if", "as_one_line", ":", "out", ".", "write", "(", "' { '", ")", "PrintMessage", "(", "value", ",", "out", ",", "indent", ",", "as_utf8", ",", "as_one_line", ")", "out", ".", "write", "(", "'}'", ")", "else", ":", "out", ".", "write", "(", "' {\\n'", ")", "PrintMessage", "(", "value", ",", "out", ",", "indent", "+", "2", ",", "as_utf8", ",", "as_one_line", ")", "out", ".", "write", "(", "' '", "*", "indent", "+", "'}'", ")", "elif", "field", ".", "cpp_type", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_ENUM", ":", "enum_value", "=", "field", ".", "enum_type", ".", "values_by_number", ".", "get", "(", "value", ",", "None", ")", "if", "enum_value", "is", "not", "None", ":", "out", ".", "write", "(", "enum_value", ".", "name", ")", "else", ":", "out", ".", "write", "(", "str", "(", "value", ")", ")", "elif", "field", ".", "cpp_type", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_STRING", ":", "out", ".", "write", "(", "'\\\"'", ")", "if", "type", "(", "value", ")", "is", "unicode", ":", "out", ".", "write", "(", "_CEscape", "(", "value", ".", "encode", "(", "'utf-8'", ")", ",", "as_utf8", ")", ")", "else", ":", "out", ".", "write", "(", "_CEscape", "(", "value", ",", "as_utf8", ")", ")", "out", ".", "write", "(", "'\\\"'", ")", "elif", "field", ".", "cpp_type", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_BOOL", ":", "if", "value", ":", "out", ".", "write", "(", "\"true\"", ")", "else", ":", "out", ".", "write", "(", "\"false\"", ")", "else", ":", "out", ".", "write", "(", "str", "(", "value", ")", ")" ]
https://github.com/smilehao/xlua-framework/blob/a03801538be2b0e92d39332d445b22caca1ef61f/ConfigData/trunk/tools/protobuf-2.5.0/protobuf-2.5.0/python/google/protobuf/text_format.py#L110-L143
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scikit-learn/py2/sklearn/pipeline.py
python
Pipeline.predict
(self, X)
return self.steps[-1][-1].predict(Xt)
Apply transforms to the data, and predict with the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_pred : array-like
Apply transforms to the data, and predict with the final estimator
[ "Apply", "transforms", "to", "the", "data", "and", "predict", "with", "the", "final", "estimator" ]
def predict(self, X): """Apply transforms to the data, and predict with the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_pred : array-like """ Xt = X for name, transform in self.steps[:-1]: if transform is not None: Xt = transform.transform(Xt) return self.steps[-1][-1].predict(Xt)
[ "def", "predict", "(", "self", ",", "X", ")", ":", "Xt", "=", "X", "for", "name", ",", "transform", "in", "self", ".", "steps", "[", ":", "-", "1", "]", ":", "if", "transform", "is", "not", "None", ":", "Xt", "=", "transform", ".", "transform", "(", "Xt", ")", "return", "self", ".", "steps", "[", "-", "1", "]", "[", "-", "1", "]", ".", "predict", "(", "Xt", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/pipeline.py#L310-L327
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/common/vitis_quantize_registry.py
python
VitisQuantizeRegistry.get_input_quantize_config
(self)
return config
Get input quantize config.
Get input quantize config.
[ "Get", "input", "quantize", "config", "." ]
def get_input_quantize_config(self): """Get input quantize config.""" config = self._configs['input_quantize_config'] input_bit = self.get_input_bit() if input_bit: config['input_quantizer']['quantizer_params']['bit_width'] = input_bit logger.debug('Override default bit_width: input -> {}'.format(input_bit)) symmetry = self.get_symmetry() if symmetry is not None: config['input_quantizer']['quantizer_params']['symmetry'] = symmetry logger.debug('Override default symmetry: input -> {}'.format(symmetry)) return config
[ "def", "get_input_quantize_config", "(", "self", ")", ":", "config", "=", "self", ".", "_configs", "[", "'input_quantize_config'", "]", "input_bit", "=", "self", ".", "get_input_bit", "(", ")", "if", "input_bit", ":", "config", "[", "'input_quantizer'", "]", "[", "'quantizer_params'", "]", "[", "'bit_width'", "]", "=", "input_bit", "logger", ".", "debug", "(", "'Override default bit_width: input -> {}'", ".", "format", "(", "input_bit", ")", ")", "symmetry", "=", "self", ".", "get_symmetry", "(", ")", "if", "symmetry", "is", "not", "None", ":", "config", "[", "'input_quantizer'", "]", "[", "'quantizer_params'", "]", "[", "'symmetry'", "]", "=", "symmetry", "logger", ".", "debug", "(", "'Override default symmetry: input -> {}'", ".", "format", "(", "symmetry", ")", ")", "return", "config" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/common/vitis_quantize_registry.py#L160-L171
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/ultimatelistctrl.py
python
UltimateListHeaderData.GetText
(self)
return self._text
Returns the header/footer item text.
Returns the header/footer item text.
[ "Returns", "the", "header", "/", "footer", "item", "text", "." ]
def GetText(self): """ Returns the header/footer item text. """ return self._text
[ "def", "GetText", "(", "self", ")", ":", "return", "self", ".", "_text" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/ultimatelistctrl.py#L3221-L3224
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
build/linux/rewrite_dirs.py
python
RewritePath
(path, opts)
Rewrites a path by stripping the prefix and prepending the sysroot.
Rewrites a path by stripping the prefix and prepending the sysroot.
[ "Rewrites", "a", "path", "by", "stripping", "the", "prefix", "and", "prepending", "the", "sysroot", "." ]
def RewritePath(path, opts): """Rewrites a path by stripping the prefix and prepending the sysroot.""" sysroot = opts.sysroot prefix = opts.strip_prefix if os.path.isabs(path) and not path.startswith(sysroot): if path.startswith(prefix): path = path[len(prefix):] path = path.lstrip('/') return os.path.join(sysroot, path) else: return path
[ "def", "RewritePath", "(", "path", ",", "opts", ")", ":", "sysroot", "=", "opts", ".", "sysroot", "prefix", "=", "opts", ".", "strip_prefix", "if", "os", ".", "path", ".", "isabs", "(", "path", ")", "and", "not", "path", ".", "startswith", "(", "sysroot", ")", ":", "if", "path", ".", "startswith", "(", "prefix", ")", ":", "path", "=", "path", "[", "len", "(", "prefix", ")", ":", "]", "path", "=", "path", ".", "lstrip", "(", "'/'", ")", "return", "os", ".", "path", ".", "join", "(", "sysroot", ",", "path", ")", "else", ":", "return", "path" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/build/linux/rewrite_dirs.py#L22-L32
potassco/clingo
e0c91d8f95cc28de1c480a871f9c97c30de83d40
libpyclingo/clingo/ast.py
python
Pool
(location: Location, arguments: Sequence[AST])
return AST(p_ast[0])
Construct an AST node of type `ASTType.Pool`.
Construct an AST node of type `ASTType.Pool`.
[ "Construct", "an", "AST", "node", "of", "type", "ASTType", ".", "Pool", "." ]
def Pool(location: Location, arguments: Sequence[AST]) -> AST: ''' Construct an AST node of type `ASTType.Pool`. ''' p_ast = _ffi.new('clingo_ast_t**') c_location = _c_location(location) _handle_error(_lib.clingo_ast_build( _lib.clingo_ast_type_pool, p_ast, c_location[0], _ffi.new('clingo_ast_t*[]', [ x._rep for x in arguments ]), _ffi.cast('size_t', len(arguments)))) return AST(p_ast[0])
[ "def", "Pool", "(", "location", ":", "Location", ",", "arguments", ":", "Sequence", "[", "AST", "]", ")", "->", "AST", ":", "p_ast", "=", "_ffi", ".", "new", "(", "'clingo_ast_t**'", ")", "c_location", "=", "_c_location", "(", "location", ")", "_handle_error", "(", "_lib", ".", "clingo_ast_build", "(", "_lib", ".", "clingo_ast_type_pool", ",", "p_ast", ",", "c_location", "[", "0", "]", ",", "_ffi", ".", "new", "(", "'clingo_ast_t*[]'", ",", "[", "x", ".", "_rep", "for", "x", "in", "arguments", "]", ")", ",", "_ffi", ".", "cast", "(", "'size_t'", ",", "len", "(", "arguments", ")", ")", ")", ")", "return", "AST", "(", "p_ast", "[", "0", "]", ")" ]
https://github.com/potassco/clingo/blob/e0c91d8f95cc28de1c480a871f9c97c30de83d40/libpyclingo/clingo/ast.py#L1287-L1298
openvinotoolkit/openvino
dedcbeafa8b84cccdc55ca64b8da516682b381c7
.ci/openvino-onnx/watchdog/src/git_wrapper.py
python
GitWrapper.get_git_time
(self)
return datetime_object
Retrieve time from GitHub. Used to reliably determine time during Watchdog run. :return: Datetime object describing current time :rtype: datetime
Retrieve time from GitHub.
[ "Retrieve", "time", "from", "GitHub", "." ]
def get_git_time(self): """Retrieve time from GitHub. Used to reliably determine time during Watchdog run. :return: Datetime object describing current time :rtype: datetime """ try: datetime_object = self._get_git_time() except ValueError as e: raise GitWrapperError(str(e)) except GithubException as e: message = 'GitHub Exception during API status retrieval. Exception: {}'.format(str(e)) raise GitWrapperError(message) except timeout_decorator.TimeoutError: message = 'GitHub Exception during API status retrieval. Timeout during API request.' raise GitWrapperError(message) return datetime_object
[ "def", "get_git_time", "(", "self", ")", ":", "try", ":", "datetime_object", "=", "self", ".", "_get_git_time", "(", ")", "except", "ValueError", "as", "e", ":", "raise", "GitWrapperError", "(", "str", "(", "e", ")", ")", "except", "GithubException", "as", "e", ":", "message", "=", "'GitHub Exception during API status retrieval. Exception: {}'", ".", "format", "(", "str", "(", "e", ")", ")", "raise", "GitWrapperError", "(", "message", ")", "except", "timeout_decorator", ".", "TimeoutError", ":", "message", "=", "'GitHub Exception during API status retrieval. Timeout during API request.'", "raise", "GitWrapperError", "(", "message", ")", "return", "datetime_object" ]
https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/.ci/openvino-onnx/watchdog/src/git_wrapper.py#L44-L62
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/fileinput.py
python
close
()
Close the sequence.
Close the sequence.
[ "Close", "the", "sequence", "." ]
def close(): """Close the sequence.""" global _state state = _state _state = None if state: state.close()
[ "def", "close", "(", ")", ":", "global", "_state", "state", "=", "_state", "_state", "=", "None", "if", "state", ":", "state", ".", "close", "(", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/fileinput.py#L106-L112
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/python/requests/requests/sessions.py
python
Session.get
(self, url, **kwargs)
return self.request('GET', url, **kwargs)
Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes.
Sends a GET request. Returns :class:`Response` object.
[ "Sends", "a", "GET", "request", ".", "Returns", ":", "class", ":", "Response", "object", "." ]
def get(self, url, **kwargs): """Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ kwargs.setdefault('allow_redirects', True) return self.request('GET', url, **kwargs)
[ "def", "get", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'allow_redirects'", ",", "True", ")", "return", "self", ".", "request", "(", "'GET'", ",", "url", ",", "*", "*", "kwargs", ")" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/requests/requests/sessions.py#L465-L473
ivansafrin/Polycode
37a40fefe194ec7f6e9d1257f3bb3517b0a168bc
Bindings/Scripts/create_lua_library/zipfile.py
python
ZipFile.write
(self, filename, arcname=None, compress_type=None)
Put the bytes from filename into the archive under the name arcname.
Put the bytes from filename into the archive under the name arcname.
[ "Put", "the", "bytes", "from", "filename", "into", "the", "archive", "under", "the", "name", "arcname", "." ]
def write(self, filename, arcname=None, compress_type=None): """Put the bytes from filename into the archive under the name arcname.""" if not self.fp: raise RuntimeError( "Attempt to write to ZIP archive that was already closed") st = os.stat(filename) isdir = stat.S_ISDIR(st.st_mode) mtime = time.localtime(st.st_mtime) date_time = mtime[0:6] # Create ZipInfo instance to store file information if arcname is None: arcname = filename arcname = os.path.normpath(os.path.splitdrive(arcname)[1]) while arcname[0] in (os.sep, os.altsep): arcname = arcname[1:] if isdir: arcname += '/' zinfo = ZipInfo(arcname, date_time) zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes if compress_type is None: zinfo.compress_type = self.compression else: zinfo.compress_type = compress_type zinfo.file_size = st.st_size zinfo.flag_bits = 0x00 zinfo.header_offset = self.fp.tell() # Start of header bytes self._writecheck(zinfo) self._didModify = True if isdir: zinfo.file_size = 0 zinfo.compress_size = 0 zinfo.CRC = 0 self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo self.fp.write(zinfo.FileHeader()) return with open(filename, "rb") as fp: # Must overwrite CRC and sizes with correct data later zinfo.CRC = CRC = 0 zinfo.compress_size = compress_size = 0 zinfo.file_size = file_size = 0 self.fp.write(zinfo.FileHeader()) if zinfo.compress_type == ZIP_DEFLATED: cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15) else: cmpr = None while 1: buf = fp.read(1024 * 8) if not buf: break file_size = file_size + len(buf) CRC = crc32(buf, CRC) & 0xffffffff if cmpr: buf = cmpr.compress(buf) compress_size = compress_size + len(buf) self.fp.write(buf) if cmpr: buf = cmpr.flush() compress_size = compress_size + len(buf) self.fp.write(buf) zinfo.compress_size = compress_size else: zinfo.compress_size = file_size zinfo.CRC = CRC zinfo.file_size = file_size # Seek backwards and write CRC and file sizes position = self.fp.tell() # Preserve current position in file self.fp.seek(zinfo.header_offset + 14, 0) self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size, zinfo.file_size)) self.fp.seek(position, 0) self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo
[ "def", "write", "(", "self", ",", "filename", ",", "arcname", "=", "None", ",", "compress_type", "=", "None", ")", ":", "if", "not", "self", ".", "fp", ":", "raise", "RuntimeError", "(", "\"Attempt to write to ZIP archive that was already closed\"", ")", "st", "=", "os", ".", "stat", "(", "filename", ")", "isdir", "=", "stat", ".", "S_ISDIR", "(", "st", ".", "st_mode", ")", "mtime", "=", "time", ".", "localtime", "(", "st", ".", "st_mtime", ")", "date_time", "=", "mtime", "[", "0", ":", "6", "]", "# Create ZipInfo instance to store file information", "if", "arcname", "is", "None", ":", "arcname", "=", "filename", "arcname", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "splitdrive", "(", "arcname", ")", "[", "1", "]", ")", "while", "arcname", "[", "0", "]", "in", "(", "os", ".", "sep", ",", "os", ".", "altsep", ")", ":", "arcname", "=", "arcname", "[", "1", ":", "]", "if", "isdir", ":", "arcname", "+=", "'/'", "zinfo", "=", "ZipInfo", "(", "arcname", ",", "date_time", ")", "zinfo", ".", "external_attr", "=", "(", "st", "[", "0", "]", "&", "0xFFFF", ")", "<<", "16L", "# Unix attributes", "if", "compress_type", "is", "None", ":", "zinfo", ".", "compress_type", "=", "self", ".", "compression", "else", ":", "zinfo", ".", "compress_type", "=", "compress_type", "zinfo", ".", "file_size", "=", "st", ".", "st_size", "zinfo", ".", "flag_bits", "=", "0x00", "zinfo", ".", "header_offset", "=", "self", ".", "fp", ".", "tell", "(", ")", "# Start of header bytes", "self", ".", "_writecheck", "(", "zinfo", ")", "self", ".", "_didModify", "=", "True", "if", "isdir", ":", "zinfo", ".", "file_size", "=", "0", "zinfo", ".", "compress_size", "=", "0", "zinfo", ".", "CRC", "=", "0", "self", ".", "filelist", ".", "append", "(", "zinfo", ")", "self", ".", "NameToInfo", "[", "zinfo", ".", "filename", "]", "=", "zinfo", "self", ".", "fp", ".", "write", "(", "zinfo", ".", "FileHeader", "(", ")", ")", "return", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "fp", ":", "# Must overwrite CRC and sizes with correct data later", "zinfo", ".", "CRC", "=", "CRC", "=", "0", "zinfo", ".", "compress_size", "=", "compress_size", "=", "0", "zinfo", ".", "file_size", "=", "file_size", "=", "0", "self", ".", "fp", ".", "write", "(", "zinfo", ".", "FileHeader", "(", ")", ")", "if", "zinfo", ".", "compress_type", "==", "ZIP_DEFLATED", ":", "cmpr", "=", "zlib", ".", "compressobj", "(", "zlib", ".", "Z_DEFAULT_COMPRESSION", ",", "zlib", ".", "DEFLATED", ",", "-", "15", ")", "else", ":", "cmpr", "=", "None", "while", "1", ":", "buf", "=", "fp", ".", "read", "(", "1024", "*", "8", ")", "if", "not", "buf", ":", "break", "file_size", "=", "file_size", "+", "len", "(", "buf", ")", "CRC", "=", "crc32", "(", "buf", ",", "CRC", ")", "&", "0xffffffff", "if", "cmpr", ":", "buf", "=", "cmpr", ".", "compress", "(", "buf", ")", "compress_size", "=", "compress_size", "+", "len", "(", "buf", ")", "self", ".", "fp", ".", "write", "(", "buf", ")", "if", "cmpr", ":", "buf", "=", "cmpr", ".", "flush", "(", ")", "compress_size", "=", "compress_size", "+", "len", "(", "buf", ")", "self", ".", "fp", ".", "write", "(", "buf", ")", "zinfo", ".", "compress_size", "=", "compress_size", "else", ":", "zinfo", ".", "compress_size", "=", "file_size", "zinfo", ".", "CRC", "=", "CRC", "zinfo", ".", "file_size", "=", "file_size", "# Seek backwards and write CRC and file sizes", "position", "=", "self", ".", "fp", ".", "tell", "(", ")", "# Preserve current position in file", "self", ".", "fp", ".", "seek", "(", "zinfo", ".", "header_offset", "+", "14", ",", "0", ")", "self", ".", "fp", ".", "write", "(", "struct", ".", "pack", "(", "\"<LLL\"", ",", "zinfo", ".", "CRC", ",", "zinfo", ".", "compress_size", ",", "zinfo", ".", "file_size", ")", ")", "self", ".", "fp", ".", "seek", "(", "position", ",", "0", ")", "self", ".", "filelist", ".", "append", "(", "zinfo", ")", "self", ".", "NameToInfo", "[", "zinfo", ".", "filename", "]", "=", "zinfo" ]
https://github.com/ivansafrin/Polycode/blob/37a40fefe194ec7f6e9d1257f3bb3517b0a168bc/Bindings/Scripts/create_lua_library/zipfile.py#L1042-L1121
apple/swift-lldb
d74be846ef3e62de946df343e8c234bde93a8912
examples/python/globals.py
python
globals
(command_args)
Extract all globals from any arguments which must be paths to object files.
Extract all globals from any arguments which must be paths to object files.
[ "Extract", "all", "globals", "from", "any", "arguments", "which", "must", "be", "paths", "to", "object", "files", "." ]
def globals(command_args): '''Extract all globals from any arguments which must be paths to object files.''' usage = "usage: %prog [options] <PATH> [PATH ...]" description = '''This command will find all globals in the specified object file and return an list() of lldb.SBValue objects (which might be empty).''' parser = optparse.OptionParser( description=description, prog='globals', usage=usage) parser.add_option( '-v', '--verbose', action='store_true', dest='verbose', help='display verbose debug info', default=False) parser.add_option( '-a', '--arch', type='string', metavar='arch', dest='arch', help='Specify an architecture (or triple) to use when extracting from a file.') parser.add_option( '-p', '--platform', type='string', metavar='platform', dest='platform', help='Specify the platform to use when creating the debug target. Valid values include "localhost", "darwin-kernel", "ios-simulator", "remote-freebsd", "remote-macosx", "remote-ios", "remote-linux".') try: (options, args) = parser.parse_args(command_args) except: return for path in args: get_globals(path, options)
[ "def", "globals", "(", "command_args", ")", ":", "usage", "=", "\"usage: %prog [options] <PATH> [PATH ...]\"", "description", "=", "'''This command will find all globals in the specified object file and return an list() of lldb.SBValue objects (which might be empty).'''", "parser", "=", "optparse", ".", "OptionParser", "(", "description", "=", "description", ",", "prog", "=", "'globals'", ",", "usage", "=", "usage", ")", "parser", ".", "add_option", "(", "'-v'", ",", "'--verbose'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'verbose'", ",", "help", "=", "'display verbose debug info'", ",", "default", "=", "False", ")", "parser", ".", "add_option", "(", "'-a'", ",", "'--arch'", ",", "type", "=", "'string'", ",", "metavar", "=", "'arch'", ",", "dest", "=", "'arch'", ",", "help", "=", "'Specify an architecture (or triple) to use when extracting from a file.'", ")", "parser", ".", "add_option", "(", "'-p'", ",", "'--platform'", ",", "type", "=", "'string'", ",", "metavar", "=", "'platform'", ",", "dest", "=", "'platform'", ",", "help", "=", "'Specify the platform to use when creating the debug target. Valid values include \"localhost\", \"darwin-kernel\", \"ios-simulator\", \"remote-freebsd\", \"remote-macosx\", \"remote-ios\", \"remote-linux\".'", ")", "try", ":", "(", "options", ",", "args", ")", "=", "parser", ".", "parse_args", "(", "command_args", ")", "except", ":", "return", "for", "path", "in", "args", ":", "get_globals", "(", "path", ",", "options", ")" ]
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/examples/python/globals.py#L67-L102
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/nn_impl.py
python
l2_normalize_v2
(x, axis=None, epsilon=1e-12, name=None)
Normalizes along dimension `axis` using an L2 norm. For a 1-D tensor with `axis = 0`, computes output = x / sqrt(max(sum(x**2), epsilon)) For `x` with more dimensions, independently normalizes each 1-D slice along dimension `axis`. Args: x: A `Tensor`. axis: Dimension along which to normalize. A scalar or a vector of integers. epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the divisor if `norm < sqrt(epsilon)`. name: A name for this operation (optional). Returns: A `Tensor` with the same shape as `x`.
Normalizes along dimension `axis` using an L2 norm.
[ "Normalizes", "along", "dimension", "axis", "using", "an", "L2", "norm", "." ]
def l2_normalize_v2(x, axis=None, epsilon=1e-12, name=None): """Normalizes along dimension `axis` using an L2 norm. For a 1-D tensor with `axis = 0`, computes output = x / sqrt(max(sum(x**2), epsilon)) For `x` with more dimensions, independently normalizes each 1-D slice along dimension `axis`. Args: x: A `Tensor`. axis: Dimension along which to normalize. A scalar or a vector of integers. epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the divisor if `norm < sqrt(epsilon)`. name: A name for this operation (optional). Returns: A `Tensor` with the same shape as `x`. """ with ops.name_scope(name, "l2_normalize", [x]) as name: x = ops.convert_to_tensor(x, name="x") square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keepdims=True) x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon)) return math_ops.multiply(x, x_inv_norm, name=name)
[ "def", "l2_normalize_v2", "(", "x", ",", "axis", "=", "None", ",", "epsilon", "=", "1e-12", ",", "name", "=", "None", ")", ":", "with", "ops", ".", "name_scope", "(", "name", ",", "\"l2_normalize\"", ",", "[", "x", "]", ")", "as", "name", ":", "x", "=", "ops", ".", "convert_to_tensor", "(", "x", ",", "name", "=", "\"x\"", ")", "square_sum", "=", "math_ops", ".", "reduce_sum", "(", "math_ops", ".", "square", "(", "x", ")", ",", "axis", ",", "keepdims", "=", "True", ")", "x_inv_norm", "=", "math_ops", ".", "rsqrt", "(", "math_ops", ".", "maximum", "(", "square_sum", ",", "epsilon", ")", ")", "return", "math_ops", ".", "multiply", "(", "x", ",", "x_inv_norm", ",", "name", "=", "name", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/nn_impl.py#L620-L645
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/more.py
python
split_when
(iterable, pred, maxsplit=-1)
Split *iterable* into pieces based on the output of *pred*. *pred* should be a function that takes successive pairs of items and returns ``True`` if the iterable should be split in between them. For example, to find runs of increasing numbers, split the iterable when element ``i`` is larger than element ``i + 1``: >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y)) [[1, 2, 3, 3], [2, 5], [2, 4], [2]] At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, then there is no limit on the number of splits: >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], ... lambda x, y: x > y, maxsplit=2)) [[1, 2, 3, 3], [2, 5], [2, 4, 2]]
Split *iterable* into pieces based on the output of *pred*. *pred* should be a function that takes successive pairs of items and returns ``True`` if the iterable should be split in between them.
[ "Split", "*", "iterable", "*", "into", "pieces", "based", "on", "the", "output", "of", "*", "pred", "*", ".", "*", "pred", "*", "should", "be", "a", "function", "that", "takes", "successive", "pairs", "of", "items", "and", "returns", "True", "if", "the", "iterable", "should", "be", "split", "in", "between", "them", "." ]
def split_when(iterable, pred, maxsplit=-1): """Split *iterable* into pieces based on the output of *pred*. *pred* should be a function that takes successive pairs of items and returns ``True`` if the iterable should be split in between them. For example, to find runs of increasing numbers, split the iterable when element ``i`` is larger than element ``i + 1``: >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y)) [[1, 2, 3, 3], [2, 5], [2, 4], [2]] At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, then there is no limit on the number of splits: >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], ... lambda x, y: x > y, maxsplit=2)) [[1, 2, 3, 3], [2, 5], [2, 4, 2]] """ if maxsplit == 0: yield list(iterable) return it = iter(iterable) try: cur_item = next(it) except StopIteration: return buf = [cur_item] for next_item in it: if pred(cur_item, next_item): yield buf if maxsplit == 1: yield [next_item] + list(it) return buf = [] maxsplit -= 1 buf.append(next_item) cur_item = next_item yield buf
[ "def", "split_when", "(", "iterable", ",", "pred", ",", "maxsplit", "=", "-", "1", ")", ":", "if", "maxsplit", "==", "0", ":", "yield", "list", "(", "iterable", ")", "return", "it", "=", "iter", "(", "iterable", ")", "try", ":", "cur_item", "=", "next", "(", "it", ")", "except", "StopIteration", ":", "return", "buf", "=", "[", "cur_item", "]", "for", "next_item", "in", "it", ":", "if", "pred", "(", "cur_item", ",", "next_item", ")", ":", "yield", "buf", "if", "maxsplit", "==", "1", ":", "yield", "[", "next_item", "]", "+", "list", "(", "it", ")", "return", "buf", "=", "[", "]", "maxsplit", "-=", "1", "buf", ".", "append", "(", "next_item", ")", "cur_item", "=", "next_item", "yield", "buf" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/more.py#L1278-L1320
tiann/android-native-debug
198903ed9346dc4a74327a63cb98d449b97d8047
app/source/art/tools/cpplint.py
python
_CppLintState.PrintErrorCounts
(self)
Print a summary of errors by category, and the total.
Print a summary of errors by category, and the total.
[ "Print", "a", "summary", "of", "errors", "by", "category", "and", "the", "total", "." ]
def PrintErrorCounts(self): """Print a summary of errors by category, and the total.""" for category, count in self.errors_by_category.iteritems(): sys.stderr.write('Category \'%s\' errors found: %d\n' % (category, count)) sys.stderr.write('Total errors found: %d\n' % self.error_count)
[ "def", "PrintErrorCounts", "(", "self", ")", ":", "for", "category", ",", "count", "in", "self", ".", "errors_by_category", ".", "iteritems", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'Category \\'%s\\' errors found: %d\\n'", "%", "(", "category", ",", "count", ")", ")", "sys", ".", "stderr", ".", "write", "(", "'Total errors found: %d\\n'", "%", "self", ".", "error_count", ")" ]
https://github.com/tiann/android-native-debug/blob/198903ed9346dc4a74327a63cb98d449b97d8047/app/source/art/tools/cpplint.py#L621-L626
bitsai/book-exercises
3a52330c6066090662bc2123d03aac270ef3ed10
Learn Python the Hard Way/ex25.py
python
sort_words
(words)
return sorted(words)
Sorts the words.
Sorts the words.
[ "Sorts", "the", "words", "." ]
def sort_words(words): """Sorts the words.""" return sorted(words)
[ "def", "sort_words", "(", "words", ")", ":", "return", "sorted", "(", "words", ")" ]
https://github.com/bitsai/book-exercises/blob/3a52330c6066090662bc2123d03aac270ef3ed10/Learn Python the Hard Way/ex25.py#L6-L8
echronos/echronos
c996f1d2c8af6c6536205eb319c1bf1d4d84569c
external_tools/ply_info/example/BASIC/basparse.py
python
p_command_for
(p)
command : FOR ID EQUALS expr TO expr optstep
command : FOR ID EQUALS expr TO expr optstep
[ "command", ":", "FOR", "ID", "EQUALS", "expr", "TO", "expr", "optstep" ]
def p_command_for(p): '''command : FOR ID EQUALS expr TO expr optstep''' p[0] = ('FOR',p[2],p[4],p[6],p[7])
[ "def", "p_command_for", "(", "p", ")", ":", "p", "[", "0", "]", "=", "(", "'FOR'", ",", "p", "[", "2", "]", ",", "p", "[", "4", "]", ",", "p", "[", "6", "]", ",", "p", "[", "7", "]", ")" ]
https://github.com/echronos/echronos/blob/c996f1d2c8af6c6536205eb319c1bf1d4d84569c/external_tools/ply_info/example/BASIC/basparse.py#L164-L166
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py2/scipy/signal/ltisys.py
python
impulse2
(system, X0=None, T=None, N=None, **kwargs)
return Tr, Yr
Impulse response of a single-input, continuous-time linear system. Parameters ---------- system : an instance of the LTI class or a tuple of array_like describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) X0 : 1-D array_like, optional The initial condition of the state vector. Default: 0 (the zero vector). T : 1-D array_like, optional The time steps at which the input is defined and at which the output is desired. If `T` is not given, the function will generate a set of time samples automatically. N : int, optional Number of time points to compute. Default: 100. kwargs : various types Additional keyword arguments are passed on to the function `scipy.signal.lsim2`, which in turn passes them on to `scipy.integrate.odeint`; see the latter's documentation for information about these arguments. Returns ------- T : ndarray The time values for the output. yout : ndarray The output response of the system. See Also -------- impulse, lsim2, integrate.odeint Notes ----- The solution is generated by calling `scipy.signal.lsim2`, which uses the differential equation solver `scipy.integrate.odeint`. If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.8.0 Examples -------- Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t) >>> from scipy import signal >>> system = ([1.0], [1.0, 2.0, 1.0]) >>> t, y = signal.impulse2(system) >>> import matplotlib.pyplot as plt >>> plt.plot(t, y)
Impulse response of a single-input, continuous-time linear system.
[ "Impulse", "response", "of", "a", "single", "-", "input", "continuous", "-", "time", "linear", "system", "." ]
def impulse2(system, X0=None, T=None, N=None, **kwargs): """ Impulse response of a single-input, continuous-time linear system. Parameters ---------- system : an instance of the LTI class or a tuple of array_like describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) X0 : 1-D array_like, optional The initial condition of the state vector. Default: 0 (the zero vector). T : 1-D array_like, optional The time steps at which the input is defined and at which the output is desired. If `T` is not given, the function will generate a set of time samples automatically. N : int, optional Number of time points to compute. Default: 100. kwargs : various types Additional keyword arguments are passed on to the function `scipy.signal.lsim2`, which in turn passes them on to `scipy.integrate.odeint`; see the latter's documentation for information about these arguments. Returns ------- T : ndarray The time values for the output. yout : ndarray The output response of the system. See Also -------- impulse, lsim2, integrate.odeint Notes ----- The solution is generated by calling `scipy.signal.lsim2`, which uses the differential equation solver `scipy.integrate.odeint`. If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.8.0 Examples -------- Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t) >>> from scipy import signal >>> system = ([1.0], [1.0, 2.0, 1.0]) >>> t, y = signal.impulse2(system) >>> import matplotlib.pyplot as plt >>> plt.plot(t, y) """ if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('impulse2 can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() B = sys.B if B.shape[-1] != 1: raise ValueError("impulse2() requires a single-input system.") B = B.squeeze() if X0 is None: X0 = zeros_like(B) if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) # Move the impulse in the input to the initial conditions, and then # solve using lsim2(). ic = B + X0 Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs) return Tr, Yr
[ "def", "impulse2", "(", "system", ",", "X0", "=", "None", ",", "T", "=", "None", ",", "N", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "system", ",", "lti", ")", ":", "sys", "=", "system", ".", "_as_ss", "(", ")", "elif", "isinstance", "(", "system", ",", "dlti", ")", ":", "raise", "AttributeError", "(", "'impulse2 can only be used with continuous-time '", "'systems.'", ")", "else", ":", "sys", "=", "lti", "(", "*", "system", ")", ".", "_as_ss", "(", ")", "B", "=", "sys", ".", "B", "if", "B", ".", "shape", "[", "-", "1", "]", "!=", "1", ":", "raise", "ValueError", "(", "\"impulse2() requires a single-input system.\"", ")", "B", "=", "B", ".", "squeeze", "(", ")", "if", "X0", "is", "None", ":", "X0", "=", "zeros_like", "(", "B", ")", "if", "N", "is", "None", ":", "N", "=", "100", "if", "T", "is", "None", ":", "T", "=", "_default_response_times", "(", "sys", ".", "A", ",", "N", ")", "# Move the impulse in the input to the initial conditions, and then", "# solve using lsim2().", "ic", "=", "B", "+", "X0", "Tr", ",", "Yr", ",", "Xr", "=", "lsim2", "(", "sys", ",", "T", "=", "T", ",", "X0", "=", "ic", ",", "*", "*", "kwargs", ")", "return", "Tr", ",", "Yr" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/signal/ltisys.py#L2126-L2212
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_controls.py
python
Slider.GetThumbLength
(*args, **kwargs)
return _controls_.Slider_GetThumbLength(*args, **kwargs)
GetThumbLength(self) -> int
GetThumbLength(self) -> int
[ "GetThumbLength", "(", "self", ")", "-", ">", "int" ]
def GetThumbLength(*args, **kwargs): """GetThumbLength(self) -> int""" return _controls_.Slider_GetThumbLength(*args, **kwargs)
[ "def", "GetThumbLength", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_controls_", ".", "Slider_GetThumbLength", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_controls.py#L2891-L2893
adobe/chromium
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
tools/valgrind/gdb_helper.py
python
AddressTable.Add
(self, binary, address)
Register a lookup request.
Register a lookup request.
[ "Register", "a", "lookup", "request", "." ]
def Add(self, binary, address): ''' Register a lookup request. ''' if binary == '': logging.warn('adding address %s in empty binary?' % address) if binary in self._binaries: self._binaries[binary].append(address) else: self._binaries[binary] = [address] self._all_resolved = False
[ "def", "Add", "(", "self", ",", "binary", ",", "address", ")", ":", "if", "binary", "==", "''", ":", "logging", ".", "warn", "(", "'adding address %s in empty binary?'", "%", "address", ")", "if", "binary", "in", "self", ".", "_binaries", ":", "self", ".", "_binaries", "[", "binary", "]", ".", "append", "(", "address", ")", "else", ":", "self", ".", "_binaries", "[", "binary", "]", "=", "[", "address", "]", "self", ".", "_all_resolved", "=", "False" ]
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/tools/valgrind/gdb_helper.py#L58-L66
SmileiPIC/Smilei
07dcb51200029e10f626e1546558c1ae7599c8b1
happi/_SmileiSimulation.py
python
SmileiSimulation.fieldInfo
(self, diag)
return dict( diagNumber=diagNumber, diagName=diagName, fields=fields )
Information on a specific Field diagnostic Parameters: ----------- diag: the number or name of a Field diagnostic Returns: -------- A dictionnary containing: * "diagNumber": the diagnostic number * "diagName": the diagnostic name * "fields": list of the available fields in this diagnostic. In the case of `AMcylindrical` geometry, this is a dictionnary with a list of modes for each field.
Information on a specific Field diagnostic Parameters: ----------- diag: the number or name of a Field diagnostic Returns: -------- A dictionnary containing: * "diagNumber": the diagnostic number * "diagName": the diagnostic name * "fields": list of the available fields in this diagnostic. In the case of `AMcylindrical` geometry, this is a dictionnary with a list of modes for each field.
[ "Information", "on", "a", "specific", "Field", "diagnostic", "Parameters", ":", "-----------", "diag", ":", "the", "number", "or", "name", "of", "a", "Field", "diagnostic", "Returns", ":", "--------", "A", "dictionnary", "containing", ":", "*", "diagNumber", ":", "the", "diagnostic", "number", "*", "diagName", ":", "the", "diagnostic", "name", "*", "fields", ":", "list", "of", "the", "available", "fields", "in", "this", "diagnostic", ".", "In", "the", "case", "of", "AMcylindrical", "geometry", "this", "is", "a", "dictionnary", "with", "a", "list", "of", "modes", "for", "each", "field", "." ]
def fieldInfo(self, diag): """ Information on a specific Field diagnostic Parameters: ----------- diag: the number or name of a Field diagnostic Returns: -------- A dictionnary containing: * "diagNumber": the diagnostic number * "diagName": the diagnostic name * "fields": list of the available fields in this diagnostic. In the case of `AMcylindrical` geometry, this is a dictionnary with a list of modes for each field. """ diag_numbers, diag_names = self.getDiags("Fields") if type(diag) is str: if diag not in diag_names: raise Exception("No Field diagnostic `"+diag+"` found") i = diag_names.index( diag ) else: if diag not in diag_numbers: raise Exception("No Field diagnostic #"+str(diag)+" found") i = diag_numbers.index( diag ) diagNumber = diag_numbers[i] diagName = diag_names[i] raw_fields = set() for path in self._results_path: file = path+self._os.sep+'Fields'+str(diagNumber)+'.h5' try: f = self._h5py.File(file, 'r') except Exception as e: continue values = f["data"].values() if len(values)==0: continue these_fields = set(next(iter(values)).keys()) raw_fields = (raw_fields & these_fields) or these_fields f.close() # Case of a cylindrical geometry if self.cylindrical: from ._Diagnostics.Field import Field fields = {} for f in raw_fields: fname, imode, envelope = Field._cylindricalMode(f) if fname not in fields: fields[fname] = [] fields[fname] += [int(imode)] else: fields = sorted(list(raw_fields)) return dict( diagNumber=diagNumber, diagName=diagName, fields=fields )
[ "def", "fieldInfo", "(", "self", ",", "diag", ")", ":", "diag_numbers", ",", "diag_names", "=", "self", ".", "getDiags", "(", "\"Fields\"", ")", "if", "type", "(", "diag", ")", "is", "str", ":", "if", "diag", "not", "in", "diag_names", ":", "raise", "Exception", "(", "\"No Field diagnostic `\"", "+", "diag", "+", "\"` found\"", ")", "i", "=", "diag_names", ".", "index", "(", "diag", ")", "else", ":", "if", "diag", "not", "in", "diag_numbers", ":", "raise", "Exception", "(", "\"No Field diagnostic #\"", "+", "str", "(", "diag", ")", "+", "\" found\"", ")", "i", "=", "diag_numbers", ".", "index", "(", "diag", ")", "diagNumber", "=", "diag_numbers", "[", "i", "]", "diagName", "=", "diag_names", "[", "i", "]", "raw_fields", "=", "set", "(", ")", "for", "path", "in", "self", ".", "_results_path", ":", "file", "=", "path", "+", "self", ".", "_os", ".", "sep", "+", "'Fields'", "+", "str", "(", "diagNumber", ")", "+", "'.h5'", "try", ":", "f", "=", "self", ".", "_h5py", ".", "File", "(", "file", ",", "'r'", ")", "except", "Exception", "as", "e", ":", "continue", "values", "=", "f", "[", "\"data\"", "]", ".", "values", "(", ")", "if", "len", "(", "values", ")", "==", "0", ":", "continue", "these_fields", "=", "set", "(", "next", "(", "iter", "(", "values", ")", ")", ".", "keys", "(", ")", ")", "raw_fields", "=", "(", "raw_fields", "&", "these_fields", ")", "or", "these_fields", "f", ".", "close", "(", ")", "# Case of a cylindrical geometry", "if", "self", ".", "cylindrical", ":", "from", ".", "_Diagnostics", ".", "Field", "import", "Field", "fields", "=", "{", "}", "for", "f", "in", "raw_fields", ":", "fname", ",", "imode", ",", "envelope", "=", "Field", ".", "_cylindricalMode", "(", "f", ")", "if", "fname", "not", "in", "fields", ":", "fields", "[", "fname", "]", "=", "[", "]", "fields", "[", "fname", "]", "+=", "[", "int", "(", "imode", ")", "]", "else", ":", "fields", "=", "sorted", "(", "list", "(", "raw_fields", ")", ")", "return", "dict", "(", "diagNumber", "=", "diagNumber", ",", "diagName", "=", "diagName", ",", "fields", "=", "fields", ")" ]
https://github.com/SmileiPIC/Smilei/blob/07dcb51200029e10f626e1546558c1ae7599c8b1/happi/_SmileiSimulation.py#L227-L281
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/oauthlib/oauth2/rfc6749/request_validator.py
python
RequestValidator.get_default_redirect_uri
(self, client_id, request, *args, **kwargs)
Get the default redirect URI for the client. :param client_id: Unicode client identifier :param request: The HTTP Request (oauthlib.common.Request) :rtype: The default redirect URI for the client Method is used by: - Authorization Code Grant - Implicit Grant
Get the default redirect URI for the client.
[ "Get", "the", "default", "redirect", "URI", "for", "the", "client", "." ]
def get_default_redirect_uri(self, client_id, request, *args, **kwargs): """Get the default redirect URI for the client. :param client_id: Unicode client identifier :param request: The HTTP Request (oauthlib.common.Request) :rtype: The default redirect URI for the client Method is used by: - Authorization Code Grant - Implicit Grant """ raise NotImplementedError('Subclasses must implement this method.')
[ "def", "get_default_redirect_uri", "(", "self", ",", "client_id", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "raise", "NotImplementedError", "(", "'Subclasses must implement this method.'", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/oauthlib/oauth2/rfc6749/request_validator.py#L108-L119
microsoft/DirectXShaderCompiler
8348ff8d9e0287610ba05d3a828e10af981a1c05
utils/llvm-build/llvmbuild/main.py
python
add_magic_target_components
(parser, project, opts)
add_magic_target_components(project, opts) -> None Add the "magic" target based components to the project, which can only be determined based on the target configuration options. This currently is responsible for populating the required_libraries list of the "all-targets", "Native", "NativeCodeGen", and "Engine" components.
add_magic_target_components(project, opts) -> None
[ "add_magic_target_components", "(", "project", "opts", ")", "-", ">", "None" ]
def add_magic_target_components(parser, project, opts): """add_magic_target_components(project, opts) -> None Add the "magic" target based components to the project, which can only be determined based on the target configuration options. This currently is responsible for populating the required_libraries list of the "all-targets", "Native", "NativeCodeGen", and "Engine" components. """ # Determine the available targets. available_targets = dict((ci.name,ci) for ci in project.component_infos if ci.type_name == 'TargetGroup') # Find the configured native target. # We handle a few special cases of target names here for historical # reasons, as these are the names configure currently comes up with. native_target_name = { 'x86' : 'X86', 'x86_64' : 'X86', 'Unknown' : None }.get(opts.native_target, opts.native_target) if native_target_name is None: native_target = None else: native_target = available_targets.get(native_target_name) if native_target is None: parser.error("invalid native target: %r (not in project)" % ( opts.native_target,)) if native_target.type_name != 'TargetGroup': parser.error("invalid native target: %r (not a target)" % ( opts.native_target,)) # Find the list of targets to enable. if opts.enable_targets is None: enable_targets = available_targets.values() else: # We support both space separated and semi-colon separated lists. if opts.enable_targets == '': enable_target_names = [] elif ' ' in opts.enable_targets: enable_target_names = opts.enable_targets.split() else: enable_target_names = opts.enable_targets.split(';') enable_targets = [] for name in enable_target_names: if name == "None": continue # HLSL Change target = available_targets.get(name) if target is None: parser.error("invalid target to enable: %r (not in project)" % ( name,)) if target.type_name != 'TargetGroup': parser.error("invalid target to enable: %r (not a target)" % ( name,)) enable_targets.append(target) # Find the special library groups we are going to populate. We enforce that # these appear in the project (instead of just adding them) so that they at # least have an explicit representation in the project LLVMBuild files (and # comments explaining how they are populated). def find_special_group(name): info = info_map.get(name) if info is None: fatal("expected project to contain special %r component" % ( name,)) if info.type_name != 'LibraryGroup': fatal("special component %r should be a LibraryGroup" % ( name,)) if info.required_libraries: fatal("special component %r must have empty %r list" % ( name, 'required_libraries')) if info.add_to_library_groups: fatal("special component %r must have empty %r list" % ( name, 'add_to_library_groups')) info._is_special_group = True return info info_map = dict((ci.name, ci) for ci in project.component_infos) all_targets = find_special_group('all-targets') native_group = find_special_group('Native') native_codegen_group = find_special_group('NativeCodeGen') engine_group = find_special_group('Engine') # Set the enabled bit in all the target groups, and append to the # all-targets list. for ci in enable_targets: all_targets.required_libraries.append(ci.name) ci.enabled = True # If we have a native target, then that defines the native and # native_codegen libraries. if native_target and native_target.enabled: native_group.required_libraries.append(native_target.name) native_codegen_group.required_libraries.append( '%sCodeGen' % native_target.name) # If we have a native target with a JIT, use that for the engine. Otherwise, # use the interpreter. if native_target and native_target.enabled and native_target.has_jit: engine_group.required_libraries.append('MCJIT') engine_group.required_libraries.append(native_group.name) else: engine_group.required_libraries.append('Interpreter')
[ "def", "add_magic_target_components", "(", "parser", ",", "project", ",", "opts", ")", ":", "# Determine the available targets.", "available_targets", "=", "dict", "(", "(", "ci", ".", "name", ",", "ci", ")", "for", "ci", "in", "project", ".", "component_infos", "if", "ci", ".", "type_name", "==", "'TargetGroup'", ")", "# Find the configured native target.", "# We handle a few special cases of target names here for historical", "# reasons, as these are the names configure currently comes up with.", "native_target_name", "=", "{", "'x86'", ":", "'X86'", ",", "'x86_64'", ":", "'X86'", ",", "'Unknown'", ":", "None", "}", ".", "get", "(", "opts", ".", "native_target", ",", "opts", ".", "native_target", ")", "if", "native_target_name", "is", "None", ":", "native_target", "=", "None", "else", ":", "native_target", "=", "available_targets", ".", "get", "(", "native_target_name", ")", "if", "native_target", "is", "None", ":", "parser", ".", "error", "(", "\"invalid native target: %r (not in project)\"", "%", "(", "opts", ".", "native_target", ",", ")", ")", "if", "native_target", ".", "type_name", "!=", "'TargetGroup'", ":", "parser", ".", "error", "(", "\"invalid native target: %r (not a target)\"", "%", "(", "opts", ".", "native_target", ",", ")", ")", "# Find the list of targets to enable.", "if", "opts", ".", "enable_targets", "is", "None", ":", "enable_targets", "=", "available_targets", ".", "values", "(", ")", "else", ":", "# We support both space separated and semi-colon separated lists.", "if", "opts", ".", "enable_targets", "==", "''", ":", "enable_target_names", "=", "[", "]", "elif", "' '", "in", "opts", ".", "enable_targets", ":", "enable_target_names", "=", "opts", ".", "enable_targets", ".", "split", "(", ")", "else", ":", "enable_target_names", "=", "opts", ".", "enable_targets", ".", "split", "(", "';'", ")", "enable_targets", "=", "[", "]", "for", "name", "in", "enable_target_names", ":", "if", "name", "==", "\"None\"", ":", "continue", "# HLSL Change", "target", "=", "available_targets", ".", "get", "(", "name", ")", "if", "target", "is", "None", ":", "parser", ".", "error", "(", "\"invalid target to enable: %r (not in project)\"", "%", "(", "name", ",", ")", ")", "if", "target", ".", "type_name", "!=", "'TargetGroup'", ":", "parser", ".", "error", "(", "\"invalid target to enable: %r (not a target)\"", "%", "(", "name", ",", ")", ")", "enable_targets", ".", "append", "(", "target", ")", "# Find the special library groups we are going to populate. We enforce that", "# these appear in the project (instead of just adding them) so that they at", "# least have an explicit representation in the project LLVMBuild files (and", "# comments explaining how they are populated).", "def", "find_special_group", "(", "name", ")", ":", "info", "=", "info_map", ".", "get", "(", "name", ")", "if", "info", "is", "None", ":", "fatal", "(", "\"expected project to contain special %r component\"", "%", "(", "name", ",", ")", ")", "if", "info", ".", "type_name", "!=", "'LibraryGroup'", ":", "fatal", "(", "\"special component %r should be a LibraryGroup\"", "%", "(", "name", ",", ")", ")", "if", "info", ".", "required_libraries", ":", "fatal", "(", "\"special component %r must have empty %r list\"", "%", "(", "name", ",", "'required_libraries'", ")", ")", "if", "info", ".", "add_to_library_groups", ":", "fatal", "(", "\"special component %r must have empty %r list\"", "%", "(", "name", ",", "'add_to_library_groups'", ")", ")", "info", ".", "_is_special_group", "=", "True", "return", "info", "info_map", "=", "dict", "(", "(", "ci", ".", "name", ",", "ci", ")", "for", "ci", "in", "project", ".", "component_infos", ")", "all_targets", "=", "find_special_group", "(", "'all-targets'", ")", "native_group", "=", "find_special_group", "(", "'Native'", ")", "native_codegen_group", "=", "find_special_group", "(", "'NativeCodeGen'", ")", "engine_group", "=", "find_special_group", "(", "'Engine'", ")", "# Set the enabled bit in all the target groups, and append to the", "# all-targets list.", "for", "ci", "in", "enable_targets", ":", "all_targets", ".", "required_libraries", ".", "append", "(", "ci", ".", "name", ")", "ci", ".", "enabled", "=", "True", "# If we have a native target, then that defines the native and", "# native_codegen libraries.", "if", "native_target", "and", "native_target", ".", "enabled", ":", "native_group", ".", "required_libraries", ".", "append", "(", "native_target", ".", "name", ")", "native_codegen_group", ".", "required_libraries", ".", "append", "(", "'%sCodeGen'", "%", "native_target", ".", "name", ")", "# If we have a native target with a JIT, use that for the engine. Otherwise,", "# use the interpreter.", "if", "native_target", "and", "native_target", ".", "enabled", "and", "native_target", ".", "has_jit", ":", "engine_group", ".", "required_libraries", ".", "append", "(", "'MCJIT'", ")", "engine_group", ".", "required_libraries", ".", "append", "(", "native_group", ".", "name", ")", "else", ":", "engine_group", ".", "required_libraries", ".", "append", "(", "'Interpreter'", ")" ]
https://github.com/microsoft/DirectXShaderCompiler/blob/8348ff8d9e0287610ba05d3a828e10af981a1c05/utils/llvm-build/llvmbuild/main.py#L708-L815
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/plugins/codebrowser/codebrowser/tagload.py
python
_TagLoader.GetGenerator
(self, lang_id)
return None
Get the tag generator method for the given language id @param lang_id: Editra language identifier id @return: Generator Method or None
Get the tag generator method for the given language id @param lang_id: Editra language identifier id @return: Generator Method or None
[ "Get", "the", "tag", "generator", "method", "for", "the", "given", "language", "id", "@param", "lang_id", ":", "Editra", "language", "identifier", "id", "@return", ":", "Generator", "Method", "or", "None" ]
def GetGenerator(self, lang_id): """Get the tag generator method for the given language id @param lang_id: Editra language identifier id @return: Generator Method or None """ if lang_id in LOAD_MAP: modname = LOAD_MAP[lang_id] self.LoadModule(modname) if modname in _TagLoader._loaded: return TagLoader._loaded[modname].GenerateTags return None
[ "def", "GetGenerator", "(", "self", ",", "lang_id", ")", ":", "if", "lang_id", "in", "LOAD_MAP", ":", "modname", "=", "LOAD_MAP", "[", "lang_id", "]", "self", ".", "LoadModule", "(", "modname", ")", "if", "modname", "in", "_TagLoader", ".", "_loaded", ":", "return", "TagLoader", ".", "_loaded", "[", "modname", "]", ".", "GenerateTags", "return", "None" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/plugins/codebrowser/codebrowser/tagload.py#L84-L95
ceph/ceph
959663007321a369c83218414a29bd9dbc8bda3a
src/pybind/mgr/balancer/module.py
python
Module.plan_rm
(self, plan: str)
return (0, '', '')
Discard an optimization plan
Discard an optimization plan
[ "Discard", "an", "optimization", "plan" ]
def plan_rm(self, plan: str) -> Tuple[int, str, str]: """ Discard an optimization plan """ if plan in self.plans: del self.plans[plan] return (0, '', '')
[ "def", "plan_rm", "(", "self", ",", "plan", ":", "str", ")", "->", "Tuple", "[", "int", ",", "str", ",", "str", "]", ":", "if", "plan", "in", "self", ".", "plans", ":", "del", "self", ".", "plans", "[", "plan", "]", "return", "(", "0", ",", "''", ",", "''", ")" ]
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/src/pybind/mgr/balancer/module.py#L561-L567
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/nn/utils/spectral_norm.py
python
remove_spectral_norm
(module: T_module, name: str = 'weight')
return module
r"""Removes the spectral normalization reparameterization from a module. Args: module (Module): containing module name (str, optional): name of weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m)
r"""Removes the spectral normalization reparameterization from a module.
[ "r", "Removes", "the", "spectral", "normalization", "reparameterization", "from", "a", "module", "." ]
def remove_spectral_norm(module: T_module, name: str = 'weight') -> T_module: r"""Removes the spectral normalization reparameterization from a module. Args: module (Module): containing module name (str, optional): name of weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) """ for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, SpectralNorm) and hook.name == name: hook.remove(module) del module._forward_pre_hooks[k] break else: raise ValueError("spectral_norm of '{}' not found in {}".format( name, module)) for k, hook in module._state_dict_hooks.items(): if isinstance(hook, SpectralNormStateDictHook) and hook.fn.name == name: del module._state_dict_hooks[k] break for k, hook in module._load_state_dict_pre_hooks.items(): if isinstance(hook, SpectralNormLoadStateDictPreHook) and hook.fn.name == name: del module._load_state_dict_pre_hooks[k] break return module
[ "def", "remove_spectral_norm", "(", "module", ":", "T_module", ",", "name", ":", "str", "=", "'weight'", ")", "->", "T_module", ":", "for", "k", ",", "hook", "in", "module", ".", "_forward_pre_hooks", ".", "items", "(", ")", ":", "if", "isinstance", "(", "hook", ",", "SpectralNorm", ")", "and", "hook", ".", "name", "==", "name", ":", "hook", ".", "remove", "(", "module", ")", "del", "module", ".", "_forward_pre_hooks", "[", "k", "]", "break", "else", ":", "raise", "ValueError", "(", "\"spectral_norm of '{}' not found in {}\"", ".", "format", "(", "name", ",", "module", ")", ")", "for", "k", ",", "hook", "in", "module", ".", "_state_dict_hooks", ".", "items", "(", ")", ":", "if", "isinstance", "(", "hook", ",", "SpectralNormStateDictHook", ")", "and", "hook", ".", "fn", ".", "name", "==", "name", ":", "del", "module", ".", "_state_dict_hooks", "[", "k", "]", "break", "for", "k", ",", "hook", "in", "module", ".", "_load_state_dict_pre_hooks", ".", "items", "(", ")", ":", "if", "isinstance", "(", "hook", ",", "SpectralNormLoadStateDictPreHook", ")", "and", "hook", ".", "fn", ".", "name", "==", "name", ":", "del", "module", ".", "_load_state_dict_pre_hooks", "[", "k", "]", "break", "return", "module" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/nn/utils/spectral_norm.py#L284-L314
root-project/root
fcd3583bb14852bf2e8cd2415717cbaac0e75896
build/unix/makepchinput.py
python
getParams
()
return rootSrcDir, modules, legacyPyROOT == 'ON', clingetpchList
Extract parameters from the commandline, which looks like makePCHInput.py WWW XXX YYY ZZZ -- CXXFLAGS
Extract parameters from the commandline, which looks like makePCHInput.py WWW XXX YYY ZZZ -- CXXFLAGS
[ "Extract", "parameters", "from", "the", "commandline", "which", "looks", "like", "makePCHInput", ".", "py", "WWW", "XXX", "YYY", "ZZZ", "--", "CXXFLAGS" ]
def getParams(): """ Extract parameters from the commandline, which looks like makePCHInput.py WWW XXX YYY ZZZ -- CXXFLAGS """ argv = sys.argv rootSrcDir, modules, legacyPyROOT = argv[1:4] clingetpchList = argv[4:] return rootSrcDir, modules, legacyPyROOT == 'ON', clingetpchList
[ "def", "getParams", "(", ")", ":", "argv", "=", "sys", ".", "argv", "rootSrcDir", ",", "modules", ",", "legacyPyROOT", "=", "argv", "[", "1", ":", "4", "]", "clingetpchList", "=", "argv", "[", "4", ":", "]", "return", "rootSrcDir", ",", "modules", ",", "legacyPyROOT", "==", "'ON'", ",", "clingetpchList" ]
https://github.com/root-project/root/blob/fcd3583bb14852bf2e8cd2415717cbaac0e75896/build/unix/makepchinput.py#L36-L45
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/apitools/apitools/gen/extended_descriptor.py
python
_ProtoRpcPrinter.__PrintAdditionalImports
(self, imports)
Print additional imports needed for protorpc.
Print additional imports needed for protorpc.
[ "Print", "additional", "imports", "needed", "for", "protorpc", "." ]
def __PrintAdditionalImports(self, imports): """Print additional imports needed for protorpc.""" google_imports = [x for x in imports if 'google' in x] other_imports = [x for x in imports if 'google' not in x] if other_imports: for import_ in sorted(other_imports): self.__printer(import_) self.__printer() # Note: If we ever were going to add imports from this package, we'd # need to sort those out and put them at the end. if google_imports: for import_ in sorted(google_imports): self.__printer(import_) self.__printer()
[ "def", "__PrintAdditionalImports", "(", "self", ",", "imports", ")", ":", "google_imports", "=", "[", "x", "for", "x", "in", "imports", "if", "'google'", "in", "x", "]", "other_imports", "=", "[", "x", "for", "x", "in", "imports", "if", "'google'", "not", "in", "x", "]", "if", "other_imports", ":", "for", "import_", "in", "sorted", "(", "other_imports", ")", ":", "self", ".", "__printer", "(", "import_", ")", "self", ".", "__printer", "(", ")", "# Note: If we ever were going to add imports from this package, we'd", "# need to sort those out and put them at the end.", "if", "google_imports", ":", "for", "import_", "in", "sorted", "(", "google_imports", ")", ":", "self", ".", "__printer", "(", "import_", ")", "self", ".", "__printer", "(", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/apitools/apitools/gen/extended_descriptor.py#L382-L395
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/mailcap.py
python
getcaps
()
return caps
Return a dictionary containing the mailcap database. The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain') to a list of dictionaries corresponding to mailcap entries. The list collects all the entries for that MIME type from all available mailcap files. Each dictionary contains key-value pairs for that MIME type, where the viewing command is stored with the key "view".
Return a dictionary containing the mailcap database.
[ "Return", "a", "dictionary", "containing", "the", "mailcap", "database", "." ]
def getcaps(): """Return a dictionary containing the mailcap database. The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain') to a list of dictionaries corresponding to mailcap entries. The list collects all the entries for that MIME type from all available mailcap files. Each dictionary contains key-value pairs for that MIME type, where the viewing command is stored with the key "view". """ caps = {} for mailcap in listmailcapfiles(): try: fp = open(mailcap, 'r') except IOError: continue with fp: morecaps = readmailcapfile(fp) for key, value in morecaps.iteritems(): if not key in caps: caps[key] = value else: caps[key] = caps[key] + value return caps
[ "def", "getcaps", "(", ")", ":", "caps", "=", "{", "}", "for", "mailcap", "in", "listmailcapfiles", "(", ")", ":", "try", ":", "fp", "=", "open", "(", "mailcap", ",", "'r'", ")", "except", "IOError", ":", "continue", "with", "fp", ":", "morecaps", "=", "readmailcapfile", "(", "fp", ")", "for", "key", ",", "value", "in", "morecaps", ".", "iteritems", "(", ")", ":", "if", "not", "key", "in", "caps", ":", "caps", "[", "key", "]", "=", "value", "else", ":", "caps", "[", "key", "]", "=", "caps", "[", "key", "]", "+", "value", "return", "caps" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/mailcap.py#L9-L32
lighttransport/nanort
74063967336311f54ede5dffdfa242123825033b
deps/cpplint.py
python
IsErrorSuppressedByNolint
(category, linenum)
return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment.
Returns true if the specified error category is suppressed on this line.
[ "Returns", "true", "if", "the", "specified", "error", "category", "is", "suppressed", "on", "this", "line", "." ]
def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment. """ return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
[ "def", "IsErrorSuppressedByNolint", "(", "category", ",", "linenum", ")", ":", "return", "(", "linenum", "in", "_error_suppressions", ".", "get", "(", "category", ",", "set", "(", ")", ")", "or", "linenum", "in", "_error_suppressions", ".", "get", "(", "None", ",", "set", "(", ")", ")", ")" ]
https://github.com/lighttransport/nanort/blob/74063967336311f54ede5dffdfa242123825033b/deps/cpplint.py#L541-L554
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/closure_linter/closure_linter/ecmalintrules.py
python
EcmaScriptLintRules._HandleStartBracket
(self, token, last_non_space_token)
Handles a token that is an open bracket. Args: token: The token to handle. last_non_space_token: The last token that was not a space.
Handles a token that is an open bracket.
[ "Handles", "a", "token", "that", "is", "an", "open", "bracket", "." ]
def _HandleStartBracket(self, token, last_non_space_token): """Handles a token that is an open bracket. Args: token: The token to handle. last_non_space_token: The last token that was not a space. """ if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and last_non_space_token and last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES): self._HandleError( errors.EXTRA_SPACE, 'Extra space before "["', token.previous, position=Position.All(token.previous.string)) # If the [ token is the first token in a line we shouldn't complain # about a missing space before [. This is because some Ecma script # languages allow syntax like: # [Annotation] # class MyClass {...} # So we don't want to blindly warn about missing spaces before [. # In the the future, when rules for computing exactly how many spaces # lines should be indented are added, then we can return errors for # [ tokens that are improperly indented. # For example: # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName = # [a,b,c]; # should trigger a proper indentation warning message as [ is not indented # by four spaces. elif (not token.IsFirstInLine() and token.previous and token.previous.type not in ( [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] + Type.EXPRESSION_ENDER_TYPES)): self._HandleError(errors.MISSING_SPACE, 'Missing space before "["', token, position=Position.AtBeginning())
[ "def", "_HandleStartBracket", "(", "self", ",", "token", ",", "last_non_space_token", ")", ":", "if", "(", "not", "token", ".", "IsFirstInLine", "(", ")", "and", "token", ".", "previous", ".", "type", "==", "Type", ".", "WHITESPACE", "and", "last_non_space_token", "and", "last_non_space_token", ".", "type", "in", "Type", ".", "EXPRESSION_ENDER_TYPES", ")", ":", "self", ".", "_HandleError", "(", "errors", ".", "EXTRA_SPACE", ",", "'Extra space before \"[\"'", ",", "token", ".", "previous", ",", "position", "=", "Position", ".", "All", "(", "token", ".", "previous", ".", "string", ")", ")", "# If the [ token is the first token in a line we shouldn't complain", "# about a missing space before [. This is because some Ecma script", "# languages allow syntax like:", "# [Annotation]", "# class MyClass {...}", "# So we don't want to blindly warn about missing spaces before [.", "# In the the future, when rules for computing exactly how many spaces", "# lines should be indented are added, then we can return errors for", "# [ tokens that are improperly indented.", "# For example:", "# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =", "# [a,b,c];", "# should trigger a proper indentation warning message as [ is not indented", "# by four spaces.", "elif", "(", "not", "token", ".", "IsFirstInLine", "(", ")", "and", "token", ".", "previous", "and", "token", ".", "previous", ".", "type", "not", "in", "(", "[", "Type", ".", "WHITESPACE", ",", "Type", ".", "START_PAREN", ",", "Type", ".", "START_BRACKET", "]", "+", "Type", ".", "EXPRESSION_ENDER_TYPES", ")", ")", ":", "self", ".", "_HandleError", "(", "errors", ".", "MISSING_SPACE", ",", "'Missing space before \"[\"'", ",", "token", ",", "position", "=", "Position", ".", "AtBeginning", "(", ")", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/closure_linter/closure_linter/ecmalintrules.py#L774-L806
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem_shutil.py
python
FakeShutilModule.rmtree
(self, path, ignore_errors=False, onerror=None)
Remove a directory and all its contents. Args: path: (str) Directory tree to remove. ignore_errors: (bool) unimplemented onerror: (func) unimplemented
Remove a directory and all its contents.
[ "Remove", "a", "directory", "and", "all", "its", "contents", "." ]
def rmtree(self, path, ignore_errors=False, onerror=None): """Remove a directory and all its contents. Args: path: (str) Directory tree to remove. ignore_errors: (bool) unimplemented onerror: (func) unimplemented """ self.filesystem.RemoveObject(path)
[ "def", "rmtree", "(", "self", ",", "path", ",", "ignore_errors", "=", "False", ",", "onerror", "=", "None", ")", ":", "self", ".", "filesystem", ".", "RemoveObject", "(", "path", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/pyfakefs/pyfakefs/fake_filesystem_shutil.py#L67-L75
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/targets/npdatetime.py
python
is_not_nat
(builder, val)
return builder.icmp(lc.ICMP_NE, val, NAT)
Return a predicate which is true if *val* is not NaT.
Return a predicate which is true if *val* is not NaT.
[ "Return", "a", "predicate", "which", "is", "true", "if", "*", "val", "*", "is", "not", "NaT", "." ]
def is_not_nat(builder, val): """ Return a predicate which is true if *val* is not NaT. """ return builder.icmp(lc.ICMP_NE, val, NAT)
[ "def", "is_not_nat", "(", "builder", ",", "val", ")", ":", "return", "builder", ".", "icmp", "(", "lc", ".", "ICMP_NE", ",", "val", ",", "NAT", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/targets/npdatetime.py#L92-L96
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/graphviz/py2/graphviz/files.py
python
File._view
(self, filepath, format, quiet)
Start the right viewer based on file format and platform.
Start the right viewer based on file format and platform.
[ "Start", "the", "right", "viewer", "based", "on", "file", "format", "and", "platform", "." ]
def _view(self, filepath, format, quiet): """Start the right viewer based on file format and platform.""" methodnames = [ '_view_%s_%s' % (format, backend.PLATFORM), '_view_%s' % backend.PLATFORM, ] for name in methodnames: view_method = getattr(self, name, None) if view_method is not None: break else: raise RuntimeError('%r has no built-in viewer support for %r' ' on %r platform' % (self.__class__, format, backend.PLATFORM)) view_method(filepath, quiet)
[ "def", "_view", "(", "self", ",", "filepath", ",", "format", ",", "quiet", ")", ":", "methodnames", "=", "[", "'_view_%s_%s'", "%", "(", "format", ",", "backend", ".", "PLATFORM", ")", ",", "'_view_%s'", "%", "backend", ".", "PLATFORM", ",", "]", "for", "name", "in", "methodnames", ":", "view_method", "=", "getattr", "(", "self", ",", "name", ",", "None", ")", "if", "view_method", "is", "not", "None", ":", "break", "else", ":", "raise", "RuntimeError", "(", "'%r has no built-in viewer support for %r'", "' on %r platform'", "%", "(", "self", ".", "__class__", ",", "format", ",", "backend", ".", "PLATFORM", ")", ")", "view_method", "(", "filepath", ",", "quiet", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/graphviz/py2/graphviz/files.py#L286-L300
weolar/miniblink49
1c4678db0594a4abde23d3ebbcc7cd13c3170777
v8_5_1/tools/stats-viewer.py
python
ChromeCounter.__init__
(self, data, name_offset, value_offset)
Create a new instance. Args: data: the shared data access object containing the counter name_offset: the byte offset of the start of this counter's name value_offset: the byte offset of the start of this counter's value
Create a new instance.
[ "Create", "a", "new", "instance", "." ]
def __init__(self, data, name_offset, value_offset): """Create a new instance. Args: data: the shared data access object containing the counter name_offset: the byte offset of the start of this counter's name value_offset: the byte offset of the start of this counter's value """ self.data = data self.name_offset = name_offset self.value_offset = value_offset
[ "def", "__init__", "(", "self", ",", "data", ",", "name_offset", ",", "value_offset", ")", ":", "self", ".", "data", "=", "data", "self", ".", "name_offset", "=", "name_offset", "self", ".", "value_offset", "=", "value_offset" ]
https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/v8_5_1/tools/stats-viewer.py#L386-L396
Komnomnomnom/swigibpy
cfd307fdbfaffabc69a2dc037538d7e34a8b8daf
swigibpy.py
python
ContractDetails.__init__
(self)
__init__(ContractDetails self) -> ContractDetails
__init__(ContractDetails self) -> ContractDetails
[ "__init__", "(", "ContractDetails", "self", ")", "-", ">", "ContractDetails" ]
def __init__(self): """__init__(ContractDetails self) -> ContractDetails""" _swigibpy.ContractDetails_swiginit(self, _swigibpy.new_ContractDetails())
[ "def", "__init__", "(", "self", ")", ":", "_swigibpy", ".", "ContractDetails_swiginit", "(", "self", ",", "_swigibpy", ".", "new_ContractDetails", "(", ")", ")" ]
https://github.com/Komnomnomnom/swigibpy/blob/cfd307fdbfaffabc69a2dc037538d7e34a8b8daf/swigibpy.py#L1046-L1048
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
python
_Stream.seek
(self, pos=0)
return self.pos
Set the stream's file pointer to pos. Negative seeking is forbidden.
Set the stream's file pointer to pos. Negative seeking is forbidden.
[ "Set", "the", "stream", "s", "file", "pointer", "to", "pos", ".", "Negative", "seeking", "is", "forbidden", "." ]
def seek(self, pos=0): """Set the stream's file pointer to pos. Negative seeking is forbidden. """ if pos - self.pos >= 0: blocks, remainder = divmod(pos - self.pos, self.bufsize) for i in range(blocks): self.read(self.bufsize) self.read(remainder) else: raise StreamError("seeking backwards is not allowed") return self.pos
[ "def", "seek", "(", "self", ",", "pos", "=", "0", ")", ":", "if", "pos", "-", "self", ".", "pos", ">=", "0", ":", "blocks", ",", "remainder", "=", "divmod", "(", "pos", "-", "self", ".", "pos", ",", "self", ".", "bufsize", ")", "for", "i", "in", "range", "(", "blocks", ")", ":", "self", ".", "read", "(", "self", ".", "bufsize", ")", "self", ".", "read", "(", "remainder", ")", "else", ":", "raise", "StreamError", "(", "\"seeking backwards is not allowed\"", ")", "return", "self", ".", "pos" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L552-L563
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/share/doc/python3.7/examples/Tools/scripts/patchcheck.py
python
n_files_str
(count)
return "{} file{}".format(count, "s" if count != 1 else "")
Return 'N file(s)' with the proper plurality on 'file'.
Return 'N file(s)' with the proper plurality on 'file'.
[ "Return", "N", "file", "(", "s", ")", "with", "the", "proper", "plurality", "on", "file", "." ]
def n_files_str(count): """Return 'N file(s)' with the proper plurality on 'file'.""" return "{} file{}".format(count, "s" if count != 1 else "")
[ "def", "n_files_str", "(", "count", ")", ":", "return", "\"{} file{}\"", ".", "format", "(", "count", ",", "\"s\"", "if", "count", "!=", "1", "else", "\"\"", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/share/doc/python3.7/examples/Tools/scripts/patchcheck.py#L24-L26
microsoft/checkedc-clang
a173fefde5d7877b7750e7ce96dd08cf18baebf2
llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
python
KScriptGenerator.updateCalledFunctionList
(self, callee)
Maintains a list of functions that will actually be called
Maintains a list of functions that will actually be called
[ "Maintains", "a", "list", "of", "functions", "that", "will", "actually", "be", "called" ]
def updateCalledFunctionList(self, callee): """Maintains a list of functions that will actually be called""" # Update the total call count self.updateTotalCallCount(callee) # If this function is already in the list, don't do anything else if callee in self.calledFunctions: return # Add this function to the list of those that will be called. self.calledFunctions.append(callee) # If this function calls other functions, add them too if callee in self.calledFunctionTable: for subCallee in self.calledFunctionTable[callee]: self.updateCalledFunctionList(subCallee)
[ "def", "updateCalledFunctionList", "(", "self", ",", "callee", ")", ":", "# Update the total call count", "self", ".", "updateTotalCallCount", "(", "callee", ")", "# If this function is already in the list, don't do anything else", "if", "callee", "in", "self", ".", "calledFunctions", ":", "return", "# Add this function to the list of those that will be called.", "self", ".", "calledFunctions", ".", "append", "(", "callee", ")", "# If this function calls other functions, add them too", "if", "callee", "in", "self", ".", "calledFunctionTable", ":", "for", "subCallee", "in", "self", ".", "calledFunctionTable", "[", "callee", "]", ":", "self", ".", "updateCalledFunctionList", "(", "subCallee", ")" ]
https://github.com/microsoft/checkedc-clang/blob/a173fefde5d7877b7750e7ce96dd08cf18baebf2/llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py#L68-L80
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/dataview.py
python
PyDataViewModel._setCallbackInfo
(*args, **kwargs)
return _dataview.PyDataViewModel__setCallbackInfo(*args, **kwargs)
_setCallbackInfo(self, PyObject self, PyObject _class)
_setCallbackInfo(self, PyObject self, PyObject _class)
[ "_setCallbackInfo", "(", "self", "PyObject", "self", "PyObject", "_class", ")" ]
def _setCallbackInfo(*args, **kwargs): """_setCallbackInfo(self, PyObject self, PyObject _class)""" return _dataview.PyDataViewModel__setCallbackInfo(*args, **kwargs)
[ "def", "_setCallbackInfo", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_dataview", ".", "PyDataViewModel__setCallbackInfo", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/dataview.py#L776-L778
htcondor/htcondor
4829724575176d1d6c936e4693dfd78a728569b0
src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/utils.py
python
chop
(s, n=1, d=None)
return spl
Chops initial words from a string and returns a list of them and the rest of the string. @param s: String to chop from. @type s: str or unicode @param n: Number of words to chop. @type n: int @param d: Optional delimeter. Any white-char by default. @type d: str or unicode @return: A list of n first words from the string followed by the rest of the string (C{[w1, w2, ..., wn, rest_of_string]}). @rtype: list of str or unicode
Chops initial words from a string and returns a list of them and the rest of the string.
[ "Chops", "initial", "words", "from", "a", "string", "and", "returns", "a", "list", "of", "them", "and", "the", "rest", "of", "the", "string", "." ]
def chop(s, n=1, d=None): '''Chops initial words from a string and returns a list of them and the rest of the string. @param s: String to chop from. @type s: str or unicode @param n: Number of words to chop. @type n: int @param d: Optional delimeter. Any white-char by default. @type d: str or unicode @return: A list of n first words from the string followed by the rest of the string (C{[w1, w2, ..., wn, rest_of_string]}). @rtype: list of str or unicode ''' spl = s.split(d, n) if len(spl) == n: spl.append(s[:0]) if len(spl) != n + 1: raise ValueError('chop: Could not chop %d words from \'%s\'' % (n, s)) return spl
[ "def", "chop", "(", "s", ",", "n", "=", "1", ",", "d", "=", "None", ")", ":", "spl", "=", "s", ".", "split", "(", "d", ",", "n", ")", "if", "len", "(", "spl", ")", "==", "n", ":", "spl", ".", "append", "(", "s", "[", ":", "0", "]", ")", "if", "len", "(", "spl", ")", "!=", "n", "+", "1", ":", "raise", "ValueError", "(", "'chop: Could not chop %d words from \\'%s\\''", "%", "(", "n", ",", "s", ")", ")", "return", "spl" ]
https://github.com/htcondor/htcondor/blob/4829724575176d1d6c936e4693dfd78a728569b0/src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/utils.py#L10-L29
deepmind/open_spiel
4ca53bea32bb2875c7385d215424048ae92f78c8
open_spiel/python/algorithms/best_response.py
python
CPPBestResponsePolicy.decision_nodes
(self, parent_state)
Yields a (state, cf_prob) pair for each descendant decision node.
Yields a (state, cf_prob) pair for each descendant decision node.
[ "Yields", "a", "(", "state", "cf_prob", ")", "pair", "for", "each", "descendant", "decision", "node", "." ]
def decision_nodes(self, parent_state): """Yields a (state, cf_prob) pair for each descendant decision node.""" if not parent_state.is_terminal(): if parent_state.current_player() == self.best_responder_id: yield (parent_state, 1.0) for action, p_action in self.transitions(parent_state): for state, p_state in self.decision_nodes(parent_state.child(action)): yield (state, p_state * p_action)
[ "def", "decision_nodes", "(", "self", ",", "parent_state", ")", ":", "if", "not", "parent_state", ".", "is_terminal", "(", ")", ":", "if", "parent_state", ".", "current_player", "(", ")", "==", "self", ".", "best_responder_id", ":", "yield", "(", "parent_state", ",", "1.0", ")", "for", "action", ",", "p_action", "in", "self", ".", "transitions", "(", "parent_state", ")", ":", "for", "state", ",", "p_state", "in", "self", ".", "decision_nodes", "(", "parent_state", ".", "child", "(", "action", ")", ")", ":", "yield", "(", "state", ",", "p_state", "*", "p_action", ")" ]
https://github.com/deepmind/open_spiel/blob/4ca53bea32bb2875c7385d215424048ae92f78c8/open_spiel/python/algorithms/best_response.py#L290-L297
acbull/Unbiased_LambdaMart
7c39abe5caa18ca07df2d23c2db392916d92956c
evaluation/scripts/click_models.py
python
ClickModel.setClickProb
(self, neg_click_prob, pos_click_prob, relevance_grading_num)
Generate noisy click probability based on the relevance grading number. Inspired by ERR. Parameters ---------- neg_click_prob : [type] [description] pos_click_prob : [type] [description] relevance_grading_num : [type] [description]
Generate noisy click probability based on the relevance grading number. Inspired by ERR.
[ "Generate", "noisy", "click", "probability", "based", "on", "the", "relevance", "grading", "number", ".", "Inspired", "by", "ERR", "." ]
def setClickProb(self, neg_click_prob, pos_click_prob, relevance_grading_num): """Generate noisy click probability based on the relevance grading number. Inspired by ERR. Parameters ---------- neg_click_prob : [type] [description] pos_click_prob : [type] [description] relevance_grading_num : [type] [description] """ b = (pos_click_prob - neg_click_prob) / \ (pow(2, relevance_grading_num) - 1) a = neg_click_prob - b self.click_prob = [ a + pow(2, i)*b for i in range(relevance_grading_num+1)]
[ "def", "setClickProb", "(", "self", ",", "neg_click_prob", ",", "pos_click_prob", ",", "relevance_grading_num", ")", ":", "b", "=", "(", "pos_click_prob", "-", "neg_click_prob", ")", "/", "(", "pow", "(", "2", ",", "relevance_grading_num", ")", "-", "1", ")", "a", "=", "neg_click_prob", "-", "b", "self", ".", "click_prob", "=", "[", "a", "+", "pow", "(", "2", ",", "i", ")", "*", "b", "for", "i", "in", "range", "(", "relevance_grading_num", "+", "1", ")", "]" ]
https://github.com/acbull/Unbiased_LambdaMart/blob/7c39abe5caa18ca07df2d23c2db392916d92956c/evaluation/scripts/click_models.py#L78-L96
cocos-creator/engine-native
984c4c9f5838253313b44ccd429bd8fac4ec8a6a
tools/bindings-generator/generator.py
python
Generator.sorted_classes
(self)
return no_dupes
sorted classes in order of inheritance
sorted classes in order of inheritance
[ "sorted", "classes", "in", "order", "of", "inheritance" ]
def sorted_classes(self): ''' sorted classes in order of inheritance ''' sorted_list = [] for class_name in iter(self.generated_classes.keys()): nclass = self.generated_classes[class_name] sorted_list += self._sorted_parents(nclass) # remove dupes from the list no_dupes = [] [no_dupes.append(i) for i in sorted_list if not no_dupes.count(i)] return no_dupes
[ "def", "sorted_classes", "(", "self", ")", ":", "sorted_list", "=", "[", "]", "for", "class_name", "in", "iter", "(", "self", ".", "generated_classes", ".", "keys", "(", ")", ")", ":", "nclass", "=", "self", ".", "generated_classes", "[", "class_name", "]", "sorted_list", "+=", "self", ".", "_sorted_parents", "(", "nclass", ")", "# remove dupes from the list", "no_dupes", "=", "[", "]", "[", "no_dupes", ".", "append", "(", "i", ")", "for", "i", "in", "sorted_list", "if", "not", "no_dupes", ".", "count", "(", "i", ")", "]", "return", "no_dupes" ]
https://github.com/cocos-creator/engine-native/blob/984c4c9f5838253313b44ccd429bd8fac4ec8a6a/tools/bindings-generator/generator.py#L1672-L1683
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/nn/probability/distribution/_utils/utils.py
python
cast_type_for_device
(dtype)
return dtype
use the alternative dtype supported by the device. Args: dtype (mindspore.dtype): input dtype. Returns: mindspore.dtype.
use the alternative dtype supported by the device. Args: dtype (mindspore.dtype): input dtype. Returns: mindspore.dtype.
[ "use", "the", "alternative", "dtype", "supported", "by", "the", "device", ".", "Args", ":", "dtype", "(", "mindspore", ".", "dtype", ")", ":", "input", "dtype", ".", "Returns", ":", "mindspore", ".", "dtype", "." ]
def cast_type_for_device(dtype): """ use the alternative dtype supported by the device. Args: dtype (mindspore.dtype): input dtype. Returns: mindspore.dtype. """ if context.get_context("device_target") == "GPU": if dtype in mstype.uint_type or dtype == mstype.int8: return mstype.int16 if dtype == mstype.int64: return mstype.int32 if dtype == mstype.float64: return mstype.float32 return dtype
[ "def", "cast_type_for_device", "(", "dtype", ")", ":", "if", "context", ".", "get_context", "(", "\"device_target\"", ")", "==", "\"GPU\"", ":", "if", "dtype", "in", "mstype", ".", "uint_type", "or", "dtype", "==", "mstype", ".", "int8", ":", "return", "mstype", ".", "int16", "if", "dtype", "==", "mstype", ".", "int64", ":", "return", "mstype", ".", "int32", "if", "dtype", "==", "mstype", ".", "float64", ":", "return", "mstype", ".", "float32", "return", "dtype" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/nn/probability/distribution/_utils/utils.py#L56-L71
OSGeo/gdal
3748fc4ba4fba727492774b2b908a2130c864a83
swig/python/osgeo/ogr.py
python
FieldDefn.SetDomainName
(self, *args)
return _ogr.FieldDefn_SetDomainName(self, *args)
r""" SetDomainName(FieldDefn self, char const * name) void OGR_Fld_SetDomainName(OGRFieldDefnH hDefn, const char *pszFieldName) Set the name of the field domain for this field. Field domains ( OGRFieldDomain) are attached at the GDALDataset level. This method is the same as the C++ method OGRFieldDefn::SetDomainName(). Parameters: ----------- hDefn: handle to the field definition pszFieldName: Field domain name. GDAL 3.3
r""" SetDomainName(FieldDefn self, char const * name) void OGR_Fld_SetDomainName(OGRFieldDefnH hDefn, const char *pszFieldName)
[ "r", "SetDomainName", "(", "FieldDefn", "self", "char", "const", "*", "name", ")", "void", "OGR_Fld_SetDomainName", "(", "OGRFieldDefnH", "hDefn", "const", "char", "*", "pszFieldName", ")" ]
def SetDomainName(self, *args): r""" SetDomainName(FieldDefn self, char const * name) void OGR_Fld_SetDomainName(OGRFieldDefnH hDefn, const char *pszFieldName) Set the name of the field domain for this field. Field domains ( OGRFieldDomain) are attached at the GDALDataset level. This method is the same as the C++ method OGRFieldDefn::SetDomainName(). Parameters: ----------- hDefn: handle to the field definition pszFieldName: Field domain name. GDAL 3.3 """ return _ogr.FieldDefn_SetDomainName(self, *args)
[ "def", "SetDomainName", "(", "self", ",", "*", "args", ")", ":", "return", "_ogr", ".", "FieldDefn_SetDomainName", "(", "self", ",", "*", "args", ")" ]
https://github.com/OSGeo/gdal/blob/3748fc4ba4fba727492774b2b908a2130c864a83/swig/python/osgeo/ogr.py#L5546-L5568
domino-team/openwrt-cc
8b181297c34d14d3ca521cc9f31430d561dbc688
package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/tools/gyp/pylib/gyp/xcode_emulation.py
python
XcodeSettings.GetCflagsObjC
(self, configname)
return cflags_objc
Returns flags that need to be added to .m compilations.
Returns flags that need to be added to .m compilations.
[ "Returns", "flags", "that", "need", "to", "be", "added", "to", ".", "m", "compilations", "." ]
def GetCflagsObjC(self, configname): """Returns flags that need to be added to .m compilations.""" self.configname = configname cflags_objc = [] self._AddObjectiveCGarbageCollectionFlags(cflags_objc) self._AddObjectiveCARCFlags(cflags_objc) self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc) self.configname = None return cflags_objc
[ "def", "GetCflagsObjC", "(", "self", ",", "configname", ")", ":", "self", ".", "configname", "=", "configname", "cflags_objc", "=", "[", "]", "self", ".", "_AddObjectiveCGarbageCollectionFlags", "(", "cflags_objc", ")", "self", ".", "_AddObjectiveCARCFlags", "(", "cflags_objc", ")", "self", ".", "_AddObjectiveCMissingPropertySynthesisFlags", "(", "cflags_objc", ")", "self", ".", "configname", "=", "None", "return", "cflags_objc" ]
https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/tools/gyp/pylib/gyp/xcode_emulation.py#L652-L660
Polidea/SiriusObfuscator
b0e590d8130e97856afe578869b83a209e2b19be
SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py
python
SBUnixSignals.IsValid
(self)
return _lldb.SBUnixSignals_IsValid(self)
IsValid(self) -> bool
IsValid(self) -> bool
[ "IsValid", "(", "self", ")", "-", ">", "bool" ]
def IsValid(self): """IsValid(self) -> bool""" return _lldb.SBUnixSignals_IsValid(self)
[ "def", "IsValid", "(", "self", ")", ":", "return", "_lldb", ".", "SBUnixSignals_IsValid", "(", "self", ")" ]
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L12717-L12719
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/prompt-toolkit/py2/prompt_toolkit/contrib/telnet/protocol.py
python
TelnetProtocolParser.wont_received
(self, data)
Received telnet WONT command.
Received telnet WONT command.
[ "Received", "telnet", "WONT", "command", "." ]
def wont_received(self, data): """ Received telnet WONT command. """ logger.info('WONT %r', data)
[ "def", "wont_received", "(", "self", ",", "data", ")", ":", "logger", ".", "info", "(", "'WONT %r'", ",", "data", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py2/prompt_toolkit/contrib/telnet/protocol.py#L82-L84
zju3dv/clean-pvnet
5870c509e3cc205e1bb28910a7b1a9a3c8add9a8
lib/csrc/ransac_voting/ransac_voting_gpu.py
python
ransac_voting_layer_v3
(mask, vertex, round_hyp_num, inlier_thresh=0.999, confidence=0.99, max_iter=20, min_num=5, max_num=30000)
return batch_win_pts
:param mask: [b,h,w] :param vertex: [b,h,w,vn,2] :param round_hyp_num: :param inlier_thresh: :return: [b,vn,2]
:param mask: [b,h,w] :param vertex: [b,h,w,vn,2] :param round_hyp_num: :param inlier_thresh: :return: [b,vn,2]
[ ":", "param", "mask", ":", "[", "b", "h", "w", "]", ":", "param", "vertex", ":", "[", "b", "h", "w", "vn", "2", "]", ":", "param", "round_hyp_num", ":", ":", "param", "inlier_thresh", ":", ":", "return", ":", "[", "b", "vn", "2", "]" ]
def ransac_voting_layer_v3(mask, vertex, round_hyp_num, inlier_thresh=0.999, confidence=0.99, max_iter=20, min_num=5, max_num=30000): ''' :param mask: [b,h,w] :param vertex: [b,h,w,vn,2] :param round_hyp_num: :param inlier_thresh: :return: [b,vn,2] ''' b, h, w, vn, _ = vertex.shape batch_win_pts = [] for bi in range(b): hyp_num = 0 cur_mask = (mask[bi]).byte() foreground_num = torch.sum(cur_mask) # if too few points, just skip it if foreground_num < min_num: win_pts = torch.zeros([1, vn, 2], dtype=torch.float32, device=mask.device) batch_win_pts.append(win_pts) # [1,vn,2] continue # if too many inliers, we randomly down sample it if foreground_num > max_num: selection = torch.zeros(cur_mask.shape, dtype=torch.float32, device=mask.device).uniform_(0, 1) selected_mask = (selection < (max_num / foreground_num.float())).byte() cur_mask *= selected_mask coords = torch.nonzero(cur_mask).float() # [tn,2] coords = coords[:, [1, 0]] direct = vertex[bi].masked_select(torch.unsqueeze(torch.unsqueeze(cur_mask, 2), 3)) # [tn,vn,2] direct = direct.view([coords.shape[0], vn, 2]) tn = coords.shape[0] idxs = torch.zeros([round_hyp_num, vn, 2], dtype=torch.int32, device=mask.device).random_(0, direct.shape[0]) all_win_ratio = torch.zeros([vn], dtype=torch.float32, device=mask.device) all_win_pts = torch.zeros([vn, 2], dtype=torch.float32, device=mask.device) cur_iter = 0 while True: # generate hypothesis cur_hyp_pts = ransac_voting.generate_hypothesis(direct, coords, idxs) # [hn,vn,2] # voting for hypothesis cur_inlier = torch.zeros([round_hyp_num, vn, tn], dtype=torch.uint8, device=mask.device) ransac_voting.voting_for_hypothesis(direct, coords, cur_hyp_pts, cur_inlier, inlier_thresh) # [hn,vn,tn] # find max cur_inlier_counts = torch.sum(cur_inlier, 2) # [hn,vn] cur_win_counts, cur_win_idx = torch.max(cur_inlier_counts, 0) # [vn] cur_win_pts = cur_hyp_pts[cur_win_idx, torch.arange(vn)] cur_win_ratio = cur_win_counts.float() / tn # update best point larger_mask = all_win_ratio < cur_win_ratio all_win_pts[larger_mask, :] = cur_win_pts[larger_mask, :] all_win_ratio[larger_mask] = cur_win_ratio[larger_mask] # check confidence hyp_num += round_hyp_num cur_iter += 1 cur_min_ratio = torch.min(all_win_ratio) if (1 - (1 - cur_min_ratio ** 2) ** hyp_num) > confidence or cur_iter > max_iter: break # compute mean intersection again normal = torch.zeros_like(direct) # [tn,vn,2] normal[:, :, 0] = direct[:, :, 1] normal[:, :, 1] = -direct[:, :, 0] all_inlier = torch.zeros([1, vn, tn], dtype=torch.uint8, device=mask.device) all_win_pts = torch.unsqueeze(all_win_pts, 0) # [1,vn,2] ransac_voting.voting_for_hypothesis(direct, coords, all_win_pts, all_inlier, inlier_thresh) # [1,vn,tn] # coords [tn,2] normal [vn,tn,2] all_inlier = torch.squeeze(all_inlier.float(), 0) # [vn,tn] normal = normal.permute(1, 0, 2) # [vn,tn,2] normal = normal*torch.unsqueeze(all_inlier, 2) # [vn,tn,2] outlier is all zero b = torch.sum(normal*torch.unsqueeze(coords, 0), 2) # [vn,tn] ATA = torch.matmul(normal.permute(0, 2, 1), normal) # [vn,2,2] ATb = torch.sum(normal*torch.unsqueeze(b, 2), 1) # [vn,2] # try: all_win_pts = torch.matmul(b_inv(ATA), torch.unsqueeze(ATb, 2)) # [vn,2,1] # except: # __import__('ipdb').set_trace() batch_win_pts.append(all_win_pts[None,:,:, 0]) batch_win_pts = torch.cat(batch_win_pts) return batch_win_pts
[ "def", "ransac_voting_layer_v3", "(", "mask", ",", "vertex", ",", "round_hyp_num", ",", "inlier_thresh", "=", "0.999", ",", "confidence", "=", "0.99", ",", "max_iter", "=", "20", ",", "min_num", "=", "5", ",", "max_num", "=", "30000", ")", ":", "b", ",", "h", ",", "w", ",", "vn", ",", "_", "=", "vertex", ".", "shape", "batch_win_pts", "=", "[", "]", "for", "bi", "in", "range", "(", "b", ")", ":", "hyp_num", "=", "0", "cur_mask", "=", "(", "mask", "[", "bi", "]", ")", ".", "byte", "(", ")", "foreground_num", "=", "torch", ".", "sum", "(", "cur_mask", ")", "# if too few points, just skip it", "if", "foreground_num", "<", "min_num", ":", "win_pts", "=", "torch", ".", "zeros", "(", "[", "1", ",", "vn", ",", "2", "]", ",", "dtype", "=", "torch", ".", "float32", ",", "device", "=", "mask", ".", "device", ")", "batch_win_pts", ".", "append", "(", "win_pts", ")", "# [1,vn,2]", "continue", "# if too many inliers, we randomly down sample it", "if", "foreground_num", ">", "max_num", ":", "selection", "=", "torch", ".", "zeros", "(", "cur_mask", ".", "shape", ",", "dtype", "=", "torch", ".", "float32", ",", "device", "=", "mask", ".", "device", ")", ".", "uniform_", "(", "0", ",", "1", ")", "selected_mask", "=", "(", "selection", "<", "(", "max_num", "/", "foreground_num", ".", "float", "(", ")", ")", ")", ".", "byte", "(", ")", "cur_mask", "*=", "selected_mask", "coords", "=", "torch", ".", "nonzero", "(", "cur_mask", ")", ".", "float", "(", ")", "# [tn,2]", "coords", "=", "coords", "[", ":", ",", "[", "1", ",", "0", "]", "]", "direct", "=", "vertex", "[", "bi", "]", ".", "masked_select", "(", "torch", ".", "unsqueeze", "(", "torch", ".", "unsqueeze", "(", "cur_mask", ",", "2", ")", ",", "3", ")", ")", "# [tn,vn,2]", "direct", "=", "direct", ".", "view", "(", "[", "coords", ".", "shape", "[", "0", "]", ",", "vn", ",", "2", "]", ")", "tn", "=", "coords", ".", "shape", "[", "0", "]", "idxs", "=", "torch", ".", "zeros", "(", "[", "round_hyp_num", ",", "vn", ",", "2", "]", ",", "dtype", "=", "torch", ".", "int32", ",", "device", "=", "mask", ".", "device", ")", ".", "random_", "(", "0", ",", "direct", ".", "shape", "[", "0", "]", ")", "all_win_ratio", "=", "torch", ".", "zeros", "(", "[", "vn", "]", ",", "dtype", "=", "torch", ".", "float32", ",", "device", "=", "mask", ".", "device", ")", "all_win_pts", "=", "torch", ".", "zeros", "(", "[", "vn", ",", "2", "]", ",", "dtype", "=", "torch", ".", "float32", ",", "device", "=", "mask", ".", "device", ")", "cur_iter", "=", "0", "while", "True", ":", "# generate hypothesis", "cur_hyp_pts", "=", "ransac_voting", ".", "generate_hypothesis", "(", "direct", ",", "coords", ",", "idxs", ")", "# [hn,vn,2]", "# voting for hypothesis", "cur_inlier", "=", "torch", ".", "zeros", "(", "[", "round_hyp_num", ",", "vn", ",", "tn", "]", ",", "dtype", "=", "torch", ".", "uint8", ",", "device", "=", "mask", ".", "device", ")", "ransac_voting", ".", "voting_for_hypothesis", "(", "direct", ",", "coords", ",", "cur_hyp_pts", ",", "cur_inlier", ",", "inlier_thresh", ")", "# [hn,vn,tn]", "# find max", "cur_inlier_counts", "=", "torch", ".", "sum", "(", "cur_inlier", ",", "2", ")", "# [hn,vn]", "cur_win_counts", ",", "cur_win_idx", "=", "torch", ".", "max", "(", "cur_inlier_counts", ",", "0", ")", "# [vn]", "cur_win_pts", "=", "cur_hyp_pts", "[", "cur_win_idx", ",", "torch", ".", "arange", "(", "vn", ")", "]", "cur_win_ratio", "=", "cur_win_counts", ".", "float", "(", ")", "/", "tn", "# update best point", "larger_mask", "=", "all_win_ratio", "<", "cur_win_ratio", "all_win_pts", "[", "larger_mask", ",", ":", "]", "=", "cur_win_pts", "[", "larger_mask", ",", ":", "]", "all_win_ratio", "[", "larger_mask", "]", "=", "cur_win_ratio", "[", "larger_mask", "]", "# check confidence", "hyp_num", "+=", "round_hyp_num", "cur_iter", "+=", "1", "cur_min_ratio", "=", "torch", ".", "min", "(", "all_win_ratio", ")", "if", "(", "1", "-", "(", "1", "-", "cur_min_ratio", "**", "2", ")", "**", "hyp_num", ")", ">", "confidence", "or", "cur_iter", ">", "max_iter", ":", "break", "# compute mean intersection again", "normal", "=", "torch", ".", "zeros_like", "(", "direct", ")", "# [tn,vn,2]", "normal", "[", ":", ",", ":", ",", "0", "]", "=", "direct", "[", ":", ",", ":", ",", "1", "]", "normal", "[", ":", ",", ":", ",", "1", "]", "=", "-", "direct", "[", ":", ",", ":", ",", "0", "]", "all_inlier", "=", "torch", ".", "zeros", "(", "[", "1", ",", "vn", ",", "tn", "]", ",", "dtype", "=", "torch", ".", "uint8", ",", "device", "=", "mask", ".", "device", ")", "all_win_pts", "=", "torch", ".", "unsqueeze", "(", "all_win_pts", ",", "0", ")", "# [1,vn,2]", "ransac_voting", ".", "voting_for_hypothesis", "(", "direct", ",", "coords", ",", "all_win_pts", ",", "all_inlier", ",", "inlier_thresh", ")", "# [1,vn,tn]", "# coords [tn,2] normal [vn,tn,2]", "all_inlier", "=", "torch", ".", "squeeze", "(", "all_inlier", ".", "float", "(", ")", ",", "0", ")", "# [vn,tn]", "normal", "=", "normal", ".", "permute", "(", "1", ",", "0", ",", "2", ")", "# [vn,tn,2]", "normal", "=", "normal", "*", "torch", ".", "unsqueeze", "(", "all_inlier", ",", "2", ")", "# [vn,tn,2] outlier is all zero", "b", "=", "torch", ".", "sum", "(", "normal", "*", "torch", ".", "unsqueeze", "(", "coords", ",", "0", ")", ",", "2", ")", "# [vn,tn]", "ATA", "=", "torch", ".", "matmul", "(", "normal", ".", "permute", "(", "0", ",", "2", ",", "1", ")", ",", "normal", ")", "# [vn,2,2]", "ATb", "=", "torch", ".", "sum", "(", "normal", "*", "torch", ".", "unsqueeze", "(", "b", ",", "2", ")", ",", "1", ")", "# [vn,2]", "# try:", "all_win_pts", "=", "torch", ".", "matmul", "(", "b_inv", "(", "ATA", ")", ",", "torch", ".", "unsqueeze", "(", "ATb", ",", "2", ")", ")", "# [vn,2,1]", "# except:", "# __import__('ipdb').set_trace()", "batch_win_pts", ".", "append", "(", "all_win_pts", "[", "None", ",", ":", ",", ":", ",", "0", "]", ")", "batch_win_pts", "=", "torch", ".", "cat", "(", "batch_win_pts", ")", "return", "batch_win_pts" ]
https://github.com/zju3dv/clean-pvnet/blob/5870c509e3cc205e1bb28910a7b1a9a3c8add9a8/lib/csrc/ransac_voting/ransac_voting_gpu.py#L112-L199
yuxng/PoseCNN
9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04
lib/datasets/ycb_single.py
python
ycb_single.image_path_at
(self, i)
return self.image_path_from_index(self.image_index[i])
Return the absolute path to image i in the image sequence.
Return the absolute path to image i in the image sequence.
[ "Return", "the", "absolute", "path", "to", "image", "i", "in", "the", "image", "sequence", "." ]
def image_path_at(self, i): """ Return the absolute path to image i in the image sequence. """ return self.image_path_from_index(self.image_index[i])
[ "def", "image_path_at", "(", "self", ",", "i", ")", ":", "return", "self", ".", "image_path_from_index", "(", "self", ".", "image_index", "[", "i", "]", ")" ]
https://github.com/yuxng/PoseCNN/blob/9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04/lib/datasets/ycb_single.py#L73-L77
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/sparse/frame.py
python
SparseDataFrame.applymap
(self, func)
return self.apply(lambda x: lmap(func, x))
Apply a function to a DataFrame that is intended to operate elementwise, i.e. like doing map(func, series) for each series in the DataFrame Parameters ---------- func : function Python function, returns a single value from a single value Returns ------- applied : DataFrame
Apply a function to a DataFrame that is intended to operate elementwise, i.e. like doing map(func, series) for each series in the DataFrame
[ "Apply", "a", "function", "to", "a", "DataFrame", "that", "is", "intended", "to", "operate", "elementwise", "i", ".", "e", ".", "like", "doing", "map", "(", "func", "series", ")", "for", "each", "series", "in", "the", "DataFrame" ]
def applymap(self, func): """ Apply a function to a DataFrame that is intended to operate elementwise, i.e. like doing map(func, series) for each series in the DataFrame Parameters ---------- func : function Python function, returns a single value from a single value Returns ------- applied : DataFrame """ return self.apply(lambda x: lmap(func, x))
[ "def", "applymap", "(", "self", ",", "func", ")", ":", "return", "self", ".", "apply", "(", "lambda", "x", ":", "lmap", "(", "func", ",", "x", ")", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/sparse/frame.py#L932-L947
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/scripts/doxypy.py
python
Doxypy.parseFile
(self, filename)
Parses a python file given as input string and returns the doxygen- compatible representation. @param input the python code to parse @returns the modified python code
Parses a python file given as input string and returns the doxygen- compatible representation.
[ "Parses", "a", "python", "file", "given", "as", "input", "string", "and", "returns", "the", "doxygen", "-", "compatible", "representation", "." ]
def parseFile(self, filename): """Parses a python file given as input string and returns the doxygen- compatible representation. @param input the python code to parse @returns the modified python code """ f = open(filename, 'r') for line in f: self.parseLine(line.rstrip('\r\n')) if self.fsm.current_state == "DEFCLASS": self.__closeComment() self.__flushBuffer() f.close()
[ "def", "parseFile", "(", "self", ",", "filename", ")", ":", "f", "=", "open", "(", "filename", ",", "'r'", ")", "for", "line", "in", "f", ":", "self", ".", "parseLine", "(", "line", ".", "rstrip", "(", "'\\r\\n'", ")", ")", "if", "self", ".", "fsm", ".", "current_state", "==", "\"DEFCLASS\"", ":", "self", ".", "__closeComment", "(", ")", "self", ".", "__flushBuffer", "(", ")", "f", ".", "close", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/scripts/doxypy.py#L387-L401
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/mailcap.py
python
findmatch
(caps, MIMEtype, key='view', filename="/dev/null", plist=[])
return None, None
Find a match for a mailcap entry. Return a tuple containing the command line, and the mailcap entry used; (None, None) if no match is found. This may invoke the 'test' command of several matching entries before deciding which entry to use.
Find a match for a mailcap entry.
[ "Find", "a", "match", "for", "a", "mailcap", "entry", "." ]
def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]): """Find a match for a mailcap entry. Return a tuple containing the command line, and the mailcap entry used; (None, None) if no match is found. This may invoke the 'test' command of several matching entries before deciding which entry to use. """ entries = lookup(caps, MIMEtype, key) # XXX This code should somehow check for the needsterminal flag. for e in entries: if 'test' in e: test = subst(e['test'], filename, plist) if test and os.system(test) != 0: continue command = subst(e[key], MIMEtype, filename, plist) return command, e return None, None
[ "def", "findmatch", "(", "caps", ",", "MIMEtype", ",", "key", "=", "'view'", ",", "filename", "=", "\"/dev/null\"", ",", "plist", "=", "[", "]", ")", ":", "entries", "=", "lookup", "(", "caps", ",", "MIMEtype", ",", "key", ")", "# XXX This code should somehow check for the needsterminal flag.", "for", "e", "in", "entries", ":", "if", "'test'", "in", "e", ":", "test", "=", "subst", "(", "e", "[", "'test'", "]", ",", "filename", ",", "plist", ")", "if", "test", "and", "os", ".", "system", "(", "test", ")", "!=", "0", ":", "continue", "command", "=", "subst", "(", "e", "[", "key", "]", ",", "MIMEtype", ",", "filename", ",", "plist", ")", "return", "command", ",", "e", "return", "None", ",", "None" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/mailcap.py#L138-L156
cyberbotics/webots
af7fa7d68dcf7b4550f1f2e132092b41e83698fc
resources/osm_importer/webots_objects/road.py
python
Road.add_to_list
(osmid, tags, refs)
Add a new road to the list of roads.
Add a new road to the list of roads.
[ "Add", "a", "new", "road", "to", "the", "list", "of", "roads", "." ]
def add_to_list(osmid, tags, refs): """Add a new road to the list of roads.""" settingsSection = Settings.get_section('road', tags['highway']) if settingsSection is None: return road = Road() road.osmid = osmid road.id = str(osmid) road.refs = refs road.tags = tags if road.parse_tags(): # Only add the road if valid. Road.roads.append(road) for ref in road.refs: node = OSMNode.nodeDictionnary[ref] if 'highway' in node.tags and node.tags['highway'] == 'crossing': road.crossings.append(node)
[ "def", "add_to_list", "(", "osmid", ",", "tags", ",", "refs", ")", ":", "settingsSection", "=", "Settings", ".", "get_section", "(", "'road'", ",", "tags", "[", "'highway'", "]", ")", "if", "settingsSection", "is", "None", ":", "return", "road", "=", "Road", "(", ")", "road", ".", "osmid", "=", "osmid", "road", ".", "id", "=", "str", "(", "osmid", ")", "road", ".", "refs", "=", "refs", "road", ".", "tags", "=", "tags", "if", "road", ".", "parse_tags", "(", ")", ":", "# Only add the road if valid.", "Road", ".", "roads", ".", "append", "(", "road", ")", "for", "ref", "in", "road", ".", "refs", ":", "node", "=", "OSMNode", ".", "nodeDictionnary", "[", "ref", "]", "if", "'highway'", "in", "node", ".", "tags", "and", "node", ".", "tags", "[", "'highway'", "]", "==", "'crossing'", ":", "road", ".", "crossings", ".", "append", "(", "node", ")" ]
https://github.com/cyberbotics/webots/blob/af7fa7d68dcf7b4550f1f2e132092b41e83698fc/resources/osm_importer/webots_objects/road.py#L322-L338
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/cmd.py
python
Cmd.parseline
(self, line)
return cmd, arg, line
Parse the line into a command name and a string containing the arguments. Returns a tuple containing (command, args, line). 'command' and 'args' may be None if the line couldn't be parsed.
Parse the line into a command name and a string containing the arguments. Returns a tuple containing (command, args, line). 'command' and 'args' may be None if the line couldn't be parsed.
[ "Parse", "the", "line", "into", "a", "command", "name", "and", "a", "string", "containing", "the", "arguments", ".", "Returns", "a", "tuple", "containing", "(", "command", "args", "line", ")", ".", "command", "and", "args", "may", "be", "None", "if", "the", "line", "couldn", "t", "be", "parsed", "." ]
def parseline(self, line): """Parse the line into a command name and a string containing the arguments. Returns a tuple containing (command, args, line). 'command' and 'args' may be None if the line couldn't be parsed. """ line = line.strip() if not line: return None, None, line elif line[0] == '?': line = 'help ' + line[1:] elif line[0] == '!': if hasattr(self, 'do_shell'): line = 'shell ' + line[1:] else: return None, None, line i, n = 0, len(line) while i < n and line[i] in self.identchars: i = i+1 cmd, arg = line[:i], line[i:].strip() return cmd, arg, line
[ "def", "parseline", "(", "self", ",", "line", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", ":", "return", "None", ",", "None", ",", "line", "elif", "line", "[", "0", "]", "==", "'?'", ":", "line", "=", "'help '", "+", "line", "[", "1", ":", "]", "elif", "line", "[", "0", "]", "==", "'!'", ":", "if", "hasattr", "(", "self", ",", "'do_shell'", ")", ":", "line", "=", "'shell '", "+", "line", "[", "1", ":", "]", "else", ":", "return", "None", ",", "None", ",", "line", "i", ",", "n", "=", "0", ",", "len", "(", "line", ")", "while", "i", "<", "n", "and", "line", "[", "i", "]", "in", "self", ".", "identchars", ":", "i", "=", "i", "+", "1", "cmd", ",", "arg", "=", "line", "[", ":", "i", "]", ",", "line", "[", "i", ":", "]", ".", "strip", "(", ")", "return", "cmd", ",", "arg", ",", "line" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/cmd.py#L176-L194
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/keras/utils/metrics_utils.py
python
ragged_assert_compatible_and_get_flat_values
(values, mask=None)
return values, mask
If ragged, it checks the compatibility and then returns the flat_values. Note: If two tensors are dense, it does not check their compatibility. Note: Although two ragged tensors with different ragged ranks could have identical overall rank and dimension sizes and hence be compatible, we do not support those cases. Args: values: A list of potentially ragged tensor of the same ragged_rank. mask: A potentially ragged tensor of the same ragged_rank as elements in Values. Returns: A tuple in which the first element is the list of tensors and the second is the mask tensor. ([Values], mask). Mask and the element in Values are equal to the flat_values of the input arguments (if they were ragged).
If ragged, it checks the compatibility and then returns the flat_values.
[ "If", "ragged", "it", "checks", "the", "compatibility", "and", "then", "returns", "the", "flat_values", "." ]
def ragged_assert_compatible_and_get_flat_values(values, mask=None): """If ragged, it checks the compatibility and then returns the flat_values. Note: If two tensors are dense, it does not check their compatibility. Note: Although two ragged tensors with different ragged ranks could have identical overall rank and dimension sizes and hence be compatible, we do not support those cases. Args: values: A list of potentially ragged tensor of the same ragged_rank. mask: A potentially ragged tensor of the same ragged_rank as elements in Values. Returns: A tuple in which the first element is the list of tensors and the second is the mask tensor. ([Values], mask). Mask and the element in Values are equal to the flat_values of the input arguments (if they were ragged). """ if isinstance(values, list): is_all_ragged = \ all(isinstance(rt, ragged_tensor.RaggedTensor) for rt in values) is_any_ragged = \ any(isinstance(rt, ragged_tensor.RaggedTensor) for rt in values) else: is_all_ragged = isinstance(values, ragged_tensor.RaggedTensor) is_any_ragged = is_all_ragged if (is_all_ragged and ((mask is None) or isinstance(mask, ragged_tensor.RaggedTensor))): to_be_stripped = False if not isinstance(values, list): values = [values] to_be_stripped = True # NOTE: we leave the flat_values compatibility to # tf.TensorShape `assert_is_compatible_with` # check if both dynamic dimensions are equal and then use the flat_values. nested_row_split_list = [rt.nested_row_splits for rt in values] assertion_list = _assert_splits_match(nested_row_split_list) # if both are ragged sample_weights also should be ragged with same dims. if isinstance(mask, ragged_tensor.RaggedTensor): assertion_list_for_mask = _assert_splits_match( [nested_row_split_list[0], mask.nested_row_splits]) with ops.control_dependencies(assertion_list_for_mask): mask = array_ops.expand_dims(mask.flat_values, -1) # values has at least 1 element. flat_values = [] for value in values: with ops.control_dependencies(assertion_list): flat_values.append(array_ops.expand_dims(value.flat_values, -1)) values = flat_values[0] if to_be_stripped else flat_values elif is_any_ragged: raise TypeError('One of the inputs does not have acceptable types.') # values are empty or value are not ragged and mask is ragged. elif isinstance(mask, ragged_tensor.RaggedTensor): raise TypeError('Ragged mask is not allowed with non-ragged inputs.') return values, mask
[ "def", "ragged_assert_compatible_and_get_flat_values", "(", "values", ",", "mask", "=", "None", ")", ":", "if", "isinstance", "(", "values", ",", "list", ")", ":", "is_all_ragged", "=", "all", "(", "isinstance", "(", "rt", ",", "ragged_tensor", ".", "RaggedTensor", ")", "for", "rt", "in", "values", ")", "is_any_ragged", "=", "any", "(", "isinstance", "(", "rt", ",", "ragged_tensor", ".", "RaggedTensor", ")", "for", "rt", "in", "values", ")", "else", ":", "is_all_ragged", "=", "isinstance", "(", "values", ",", "ragged_tensor", ".", "RaggedTensor", ")", "is_any_ragged", "=", "is_all_ragged", "if", "(", "is_all_ragged", "and", "(", "(", "mask", "is", "None", ")", "or", "isinstance", "(", "mask", ",", "ragged_tensor", ".", "RaggedTensor", ")", ")", ")", ":", "to_be_stripped", "=", "False", "if", "not", "isinstance", "(", "values", ",", "list", ")", ":", "values", "=", "[", "values", "]", "to_be_stripped", "=", "True", "# NOTE: we leave the flat_values compatibility to", "# tf.TensorShape `assert_is_compatible_with`", "# check if both dynamic dimensions are equal and then use the flat_values.", "nested_row_split_list", "=", "[", "rt", ".", "nested_row_splits", "for", "rt", "in", "values", "]", "assertion_list", "=", "_assert_splits_match", "(", "nested_row_split_list", ")", "# if both are ragged sample_weights also should be ragged with same dims.", "if", "isinstance", "(", "mask", ",", "ragged_tensor", ".", "RaggedTensor", ")", ":", "assertion_list_for_mask", "=", "_assert_splits_match", "(", "[", "nested_row_split_list", "[", "0", "]", ",", "mask", ".", "nested_row_splits", "]", ")", "with", "ops", ".", "control_dependencies", "(", "assertion_list_for_mask", ")", ":", "mask", "=", "array_ops", ".", "expand_dims", "(", "mask", ".", "flat_values", ",", "-", "1", ")", "# values has at least 1 element.", "flat_values", "=", "[", "]", "for", "value", "in", "values", ":", "with", "ops", ".", "control_dependencies", "(", "assertion_list", ")", ":", "flat_values", ".", "append", "(", "array_ops", ".", "expand_dims", "(", "value", ".", "flat_values", ",", "-", "1", ")", ")", "values", "=", "flat_values", "[", "0", "]", "if", "to_be_stripped", "else", "flat_values", "elif", "is_any_ragged", ":", "raise", "TypeError", "(", "'One of the inputs does not have acceptable types.'", ")", "# values are empty or value are not ragged and mask is ragged.", "elif", "isinstance", "(", "mask", ",", "ragged_tensor", ".", "RaggedTensor", ")", ":", "raise", "TypeError", "(", "'Ragged mask is not allowed with non-ragged inputs.'", ")", "return", "values", ",", "mask" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/utils/metrics_utils.py#L772-L831
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/data/experimental/ops/resampling.py
python
rejection_resample
(class_func, target_dist, initial_dist=None, seed=None)
return _apply_fn
A transformation that resamples a dataset to achieve a target distribution. **NOTE** Resampling is performed via rejection sampling; some fraction of the input values will be dropped. Args: class_func: A function mapping an element of the input dataset to a scalar `tf.int32` tensor. Values should be in `[0, num_classes)`. target_dist: A floating point type tensor, shaped `[num_classes]`. initial_dist: (Optional.) A floating point type tensor, shaped `[num_classes]`. If not provided, the true class distribution is estimated live in a streaming fashion. seed: (Optional.) Python integer seed for the resampler. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`.
A transformation that resamples a dataset to achieve a target distribution.
[ "A", "transformation", "that", "resamples", "a", "dataset", "to", "achieve", "a", "target", "distribution", "." ]
def rejection_resample(class_func, target_dist, initial_dist=None, seed=None): """A transformation that resamples a dataset to achieve a target distribution. **NOTE** Resampling is performed via rejection sampling; some fraction of the input values will be dropped. Args: class_func: A function mapping an element of the input dataset to a scalar `tf.int32` tensor. Values should be in `[0, num_classes)`. target_dist: A floating point type tensor, shaped `[num_classes]`. initial_dist: (Optional.) A floating point type tensor, shaped `[num_classes]`. If not provided, the true class distribution is estimated live in a streaming fashion. seed: (Optional.) Python integer seed for the resampler. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. """ def _apply_fn(dataset): """Function from `Dataset` to `Dataset` that applies the transformation.""" target_dist_t = ops.convert_to_tensor(target_dist, name="target_dist") class_values_ds = dataset.map(class_func) # Get initial distribution. if initial_dist is not None: initial_dist_t = ops.convert_to_tensor(initial_dist, name="initial_dist") acceptance_dist, prob_of_original = ( _calculate_acceptance_probs_with_mixing(initial_dist_t, target_dist_t)) initial_dist_ds = dataset_ops.Dataset.from_tensors( initial_dist_t).repeat() acceptance_dist_ds = dataset_ops.Dataset.from_tensors( acceptance_dist).repeat() prob_of_original_ds = dataset_ops.Dataset.from_tensors( prob_of_original).repeat() else: initial_dist_ds = _estimate_initial_dist_ds( target_dist_t, class_values_ds) acceptance_and_original_prob_ds = initial_dist_ds.map( lambda initial: _calculate_acceptance_probs_with_mixing( # pylint: disable=g-long-lambda initial, target_dist_t)) acceptance_dist_ds = acceptance_and_original_prob_ds.map( lambda accept_prob, _: accept_prob) prob_of_original_ds = acceptance_and_original_prob_ds.map( lambda _, prob_original: prob_original) filtered_ds = _filter_ds(dataset, acceptance_dist_ds, initial_dist_ds, class_values_ds, seed) # Prefetch filtered dataset for speed. filtered_ds = filtered_ds.prefetch(3) prob_original_static = _get_prob_original_static( initial_dist_t, target_dist_t) if initial_dist is not None else None if prob_original_static == 1: return dataset_ops.Dataset.zip((class_values_ds, dataset)) elif prob_original_static == 0: return filtered_ds else: return interleave_ops.sample_from_datasets( [dataset_ops.Dataset.zip((class_values_ds, dataset)), filtered_ds], weights=prob_of_original_ds.map(lambda prob: [(prob, 1.0 - prob)]), seed=seed) return _apply_fn
[ "def", "rejection_resample", "(", "class_func", ",", "target_dist", ",", "initial_dist", "=", "None", ",", "seed", "=", "None", ")", ":", "def", "_apply_fn", "(", "dataset", ")", ":", "\"\"\"Function from `Dataset` to `Dataset` that applies the transformation.\"\"\"", "target_dist_t", "=", "ops", ".", "convert_to_tensor", "(", "target_dist", ",", "name", "=", "\"target_dist\"", ")", "class_values_ds", "=", "dataset", ".", "map", "(", "class_func", ")", "# Get initial distribution.", "if", "initial_dist", "is", "not", "None", ":", "initial_dist_t", "=", "ops", ".", "convert_to_tensor", "(", "initial_dist", ",", "name", "=", "\"initial_dist\"", ")", "acceptance_dist", ",", "prob_of_original", "=", "(", "_calculate_acceptance_probs_with_mixing", "(", "initial_dist_t", ",", "target_dist_t", ")", ")", "initial_dist_ds", "=", "dataset_ops", ".", "Dataset", ".", "from_tensors", "(", "initial_dist_t", ")", ".", "repeat", "(", ")", "acceptance_dist_ds", "=", "dataset_ops", ".", "Dataset", ".", "from_tensors", "(", "acceptance_dist", ")", ".", "repeat", "(", ")", "prob_of_original_ds", "=", "dataset_ops", ".", "Dataset", ".", "from_tensors", "(", "prob_of_original", ")", ".", "repeat", "(", ")", "else", ":", "initial_dist_ds", "=", "_estimate_initial_dist_ds", "(", "target_dist_t", ",", "class_values_ds", ")", "acceptance_and_original_prob_ds", "=", "initial_dist_ds", ".", "map", "(", "lambda", "initial", ":", "_calculate_acceptance_probs_with_mixing", "(", "# pylint: disable=g-long-lambda", "initial", ",", "target_dist_t", ")", ")", "acceptance_dist_ds", "=", "acceptance_and_original_prob_ds", ".", "map", "(", "lambda", "accept_prob", ",", "_", ":", "accept_prob", ")", "prob_of_original_ds", "=", "acceptance_and_original_prob_ds", ".", "map", "(", "lambda", "_", ",", "prob_original", ":", "prob_original", ")", "filtered_ds", "=", "_filter_ds", "(", "dataset", ",", "acceptance_dist_ds", ",", "initial_dist_ds", ",", "class_values_ds", ",", "seed", ")", "# Prefetch filtered dataset for speed.", "filtered_ds", "=", "filtered_ds", ".", "prefetch", "(", "3", ")", "prob_original_static", "=", "_get_prob_original_static", "(", "initial_dist_t", ",", "target_dist_t", ")", "if", "initial_dist", "is", "not", "None", "else", "None", "if", "prob_original_static", "==", "1", ":", "return", "dataset_ops", ".", "Dataset", ".", "zip", "(", "(", "class_values_ds", ",", "dataset", ")", ")", "elif", "prob_original_static", "==", "0", ":", "return", "filtered_ds", "else", ":", "return", "interleave_ops", ".", "sample_from_datasets", "(", "[", "dataset_ops", ".", "Dataset", ".", "zip", "(", "(", "class_values_ds", ",", "dataset", ")", ")", ",", "filtered_ds", "]", ",", "weights", "=", "prob_of_original_ds", ".", "map", "(", "lambda", "prob", ":", "[", "(", "prob", ",", "1.0", "-", "prob", ")", "]", ")", ",", "seed", "=", "seed", ")", "return", "_apply_fn" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/data/experimental/ops/resampling.py#L38-L101
snap-stanford/snap-python
d53c51b0a26aa7e3e7400b014cdf728948fde80a
setup/snap.py
python
TStr_IsAbsFPath
(*args)
return _snap.TStr_IsAbsFPath(*args)
TStr_IsAbsFPath(TStr FPath) -> bool Parameters: FPath: TStr const &
TStr_IsAbsFPath(TStr FPath) -> bool
[ "TStr_IsAbsFPath", "(", "TStr", "FPath", ")", "-", ">", "bool" ]
def TStr_IsAbsFPath(*args): """ TStr_IsAbsFPath(TStr FPath) -> bool Parameters: FPath: TStr const & """ return _snap.TStr_IsAbsFPath(*args)
[ "def", "TStr_IsAbsFPath", "(", "*", "args", ")", ":", "return", "_snap", ".", "TStr_IsAbsFPath", "(", "*", "args", ")" ]
https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L11203-L11211
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/telemetry/telemetry/internal/platform/posix_platform_backend.py
python
PosixPlatformBackend._GetTopOutput
(self, pid, columns)
return self.RunCommand(args).splitlines()
Returns output of the 'top' command as a list of lines. Args: pid: pid of process to examine. columns: A list of require columns, e.g., ['idlew', 'vsize'].
Returns output of the 'top' command as a list of lines.
[ "Returns", "output", "of", "the", "top", "command", "as", "a", "list", "of", "lines", "." ]
def _GetTopOutput(self, pid, columns): """Returns output of the 'top' command as a list of lines. Args: pid: pid of process to examine. columns: A list of require columns, e.g., ['idlew', 'vsize']. """ args = ['top'] args.extend(['-pid', str(pid), '-l', '1', '-s', '0', '-stats', ','.join(columns)]) return self.RunCommand(args).splitlines()
[ "def", "_GetTopOutput", "(", "self", ",", "pid", ",", "columns", ")", ":", "args", "=", "[", "'top'", "]", "args", ".", "extend", "(", "[", "'-pid'", ",", "str", "(", "pid", ")", ",", "'-l'", ",", "'1'", ",", "'-s'", ",", "'0'", ",", "'-stats'", ",", "','", ".", "join", "(", "columns", ")", "]", ")", "return", "self", ".", "RunCommand", "(", "args", ")", ".", "splitlines", "(", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/telemetry/internal/platform/posix_platform_backend.py#L61-L71
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
tools/android/loading/dependency_graph.py
python
RequestDependencyGraph.Cost
(self, from_first_request=True, path_list=None, costs_out=None)
Returns the cost of the graph, that is the costliest path. Args: from_first_request: (boolean) If True, only considers paths that originate from the first request node. path_list: (list) See graph.Cost(). costs_out: (list) See graph.Cost().
Returns the cost of the graph, that is the costliest path.
[ "Returns", "the", "cost", "of", "the", "graph", "that", "is", "the", "costliest", "path", "." ]
def Cost(self, from_first_request=True, path_list=None, costs_out=None): """Returns the cost of the graph, that is the costliest path. Args: from_first_request: (boolean) If True, only considers paths that originate from the first request node. path_list: (list) See graph.Cost(). costs_out: (list) See graph.Cost(). """ if from_first_request: return self._deps_graph.Cost( [self._first_request_node], path_list, costs_out) else: return self._deps_graph.Cost(path_list=path_list, costs_out=costs_out)
[ "def", "Cost", "(", "self", ",", "from_first_request", "=", "True", ",", "path_list", "=", "None", ",", "costs_out", "=", "None", ")", ":", "if", "from_first_request", ":", "return", "self", ".", "_deps_graph", ".", "Cost", "(", "[", "self", ".", "_first_request_node", "]", ",", "path_list", ",", "costs_out", ")", "else", ":", "return", "self", ".", "_deps_graph", ".", "Cost", "(", "path_list", "=", "path_list", ",", "costs_out", "=", "costs_out", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/android/loading/dependency_graph.py#L117-L130
facebookresearch/faiss
eb8781557f556505ca93f6f21fff932e17f0d9e0
benchs/bench_gpu_1bn.py
python
compute_populated_index
(preproc)
return gpu_index, indexall
Add elements to a sharded index. Return the index and if available a sharded gpu_index that contains the same data.
Add elements to a sharded index. Return the index and if available a sharded gpu_index that contains the same data.
[ "Add", "elements", "to", "a", "sharded", "index", ".", "Return", "the", "index", "and", "if", "available", "a", "sharded", "gpu_index", "that", "contains", "the", "same", "data", "." ]
def compute_populated_index(preproc): """Add elements to a sharded index. Return the index and if available a sharded gpu_index that contains the same data. """ indexall = prepare_trained_index(preproc) co = faiss.GpuMultipleClonerOptions() co.useFloat16 = use_float16 co.useFloat16CoarseQuantizer = False co.usePrecomputed = use_precomputed_tables co.indicesOptions = faiss.INDICES_CPU co.verbose = True co.reserveVecs = max_add if max_add > 0 else xb.shape[0] co.shard = True assert co.shard_type in (0, 1, 2) vres, vdev = make_vres_vdev() gpu_index = faiss.index_cpu_to_gpu_multiple( vres, vdev, indexall, co) print("add...") t0 = time.time() nb = xb.shape[0] for i0, xs in dataset_iterator(xb, preproc, add_batch_size): i1 = i0 + xs.shape[0] gpu_index.add_with_ids(xs, np.arange(i0, i1)) if max_add > 0 and gpu_index.ntotal > max_add: print("Flush indexes to CPU") for i in range(ngpu): index_src_gpu = faiss.downcast_index(gpu_index.at(i)) index_src = faiss.index_gpu_to_cpu(index_src_gpu) print(" index %d size %d" % (i, index_src.ntotal)) index_src.copy_subset_to(indexall, 0, 0, nb) index_src_gpu.reset() index_src_gpu.reserveMemory(max_add) gpu_index.sync_with_shard_indexes() print('\r%d/%d (%.3f s) ' % ( i0, nb, time.time() - t0), end=' ') sys.stdout.flush() print("Add time: %.3f s" % (time.time() - t0)) print("Aggregate indexes to CPU") t0 = time.time() if hasattr(gpu_index, 'at'): # it is a sharded index for i in range(ngpu): index_src = faiss.index_gpu_to_cpu(gpu_index.at(i)) print(" index %d size %d" % (i, index_src.ntotal)) index_src.copy_subset_to(indexall, 0, 0, nb) else: # simple index index_src = faiss.index_gpu_to_cpu(gpu_index) index_src.copy_subset_to(indexall, 0, 0, nb) print(" done in %.3f s" % (time.time() - t0)) if max_add > 0: # it does not contain all the vectors gpu_index = None return gpu_index, indexall
[ "def", "compute_populated_index", "(", "preproc", ")", ":", "indexall", "=", "prepare_trained_index", "(", "preproc", ")", "co", "=", "faiss", ".", "GpuMultipleClonerOptions", "(", ")", "co", ".", "useFloat16", "=", "use_float16", "co", ".", "useFloat16CoarseQuantizer", "=", "False", "co", ".", "usePrecomputed", "=", "use_precomputed_tables", "co", ".", "indicesOptions", "=", "faiss", ".", "INDICES_CPU", "co", ".", "verbose", "=", "True", "co", ".", "reserveVecs", "=", "max_add", "if", "max_add", ">", "0", "else", "xb", ".", "shape", "[", "0", "]", "co", ".", "shard", "=", "True", "assert", "co", ".", "shard_type", "in", "(", "0", ",", "1", ",", "2", ")", "vres", ",", "vdev", "=", "make_vres_vdev", "(", ")", "gpu_index", "=", "faiss", ".", "index_cpu_to_gpu_multiple", "(", "vres", ",", "vdev", ",", "indexall", ",", "co", ")", "print", "(", "\"add...\"", ")", "t0", "=", "time", ".", "time", "(", ")", "nb", "=", "xb", ".", "shape", "[", "0", "]", "for", "i0", ",", "xs", "in", "dataset_iterator", "(", "xb", ",", "preproc", ",", "add_batch_size", ")", ":", "i1", "=", "i0", "+", "xs", ".", "shape", "[", "0", "]", "gpu_index", ".", "add_with_ids", "(", "xs", ",", "np", ".", "arange", "(", "i0", ",", "i1", ")", ")", "if", "max_add", ">", "0", "and", "gpu_index", ".", "ntotal", ">", "max_add", ":", "print", "(", "\"Flush indexes to CPU\"", ")", "for", "i", "in", "range", "(", "ngpu", ")", ":", "index_src_gpu", "=", "faiss", ".", "downcast_index", "(", "gpu_index", ".", "at", "(", "i", ")", ")", "index_src", "=", "faiss", ".", "index_gpu_to_cpu", "(", "index_src_gpu", ")", "print", "(", "\" index %d size %d\"", "%", "(", "i", ",", "index_src", ".", "ntotal", ")", ")", "index_src", ".", "copy_subset_to", "(", "indexall", ",", "0", ",", "0", ",", "nb", ")", "index_src_gpu", ".", "reset", "(", ")", "index_src_gpu", ".", "reserveMemory", "(", "max_add", ")", "gpu_index", ".", "sync_with_shard_indexes", "(", ")", "print", "(", "'\\r%d/%d (%.3f s) '", "%", "(", "i0", ",", "nb", ",", "time", ".", "time", "(", ")", "-", "t0", ")", ",", "end", "=", "' '", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "print", "(", "\"Add time: %.3f s\"", "%", "(", "time", ".", "time", "(", ")", "-", "t0", ")", ")", "print", "(", "\"Aggregate indexes to CPU\"", ")", "t0", "=", "time", ".", "time", "(", ")", "if", "hasattr", "(", "gpu_index", ",", "'at'", ")", ":", "# it is a sharded index", "for", "i", "in", "range", "(", "ngpu", ")", ":", "index_src", "=", "faiss", ".", "index_gpu_to_cpu", "(", "gpu_index", ".", "at", "(", "i", ")", ")", "print", "(", "\" index %d size %d\"", "%", "(", "i", ",", "index_src", ".", "ntotal", ")", ")", "index_src", ".", "copy_subset_to", "(", "indexall", ",", "0", ",", "0", ",", "nb", ")", "else", ":", "# simple index", "index_src", "=", "faiss", ".", "index_gpu_to_cpu", "(", "gpu_index", ")", "index_src", ".", "copy_subset_to", "(", "indexall", ",", "0", ",", "0", ",", "nb", ")", "print", "(", "\" done in %.3f s\"", "%", "(", "time", ".", "time", "(", ")", "-", "t0", ")", ")", "if", "max_add", ">", "0", ":", "# it does not contain all the vectors", "gpu_index", "=", "None", "return", "gpu_index", ",", "indexall" ]
https://github.com/facebookresearch/faiss/blob/eb8781557f556505ca93f6f21fff932e17f0d9e0/benchs/bench_gpu_1bn.py#L501-L562
Tokutek/mongo
0653eabe2c5b9d12b4814617cb7fb2d799937a0f
buildscripts/cpplint.py
python
_SetFilters
(filters)
Sets the module's error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "whitespace/indent"). Each filter should start with + or -; else we die.
Sets the module's error-message filters.
[ "Sets", "the", "module", "s", "error", "-", "message", "filters", "." ]
def _SetFilters(filters): """Sets the module's error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "whitespace/indent"). Each filter should start with + or -; else we die. """ _cpplint_state.SetFilters(filters)
[ "def", "_SetFilters", "(", "filters", ")", ":", "_cpplint_state", ".", "SetFilters", "(", "filters", ")" ]
https://github.com/Tokutek/mongo/blob/0653eabe2c5b9d12b4814617cb7fb2d799937a0f/buildscripts/cpplint.py#L601-L611
mapnik/mapnik
f3da900c355e1d15059c4a91b00203dcc9d9f0ef
scons/scons-local-4.1.0/SCons/Variables/__init__.py
python
Variables.Update
(self, env, args=None)
Update an environment with the option variables. env - the environment to update.
Update an environment with the option variables.
[ "Update", "an", "environment", "with", "the", "option", "variables", "." ]
def Update(self, env, args=None): """ Update an environment with the option variables. env - the environment to update. """ values = {} # first set the defaults: for option in self.options: if option.default is not None: values[option.key] = option.default # next set the value specified in the options file for filename in self.files: if os.path.exists(filename): dir = os.path.split(os.path.abspath(filename))[0] if dir: sys.path.insert(0, dir) try: values['__name__'] = filename with open(filename, 'r') as f: contents = f.read() exec(contents, {}, values) finally: if dir: del sys.path[0] del values['__name__'] # set the values specified on the command line if args is None: args = self.args for arg, value in args.items(): added = False for option in self.options: if arg in list(option.aliases) + [ option.key ]: values[option.key] = value added = True if not added: self.unknown[arg] = value # put the variables in the environment: # (don't copy over variables that are not declared as options) for option in self.options: try: env[option.key] = values[option.key] except KeyError: pass # Call the convert functions: for option in self.options: if option.converter and option.key in values: value = env.subst('${%s}'%option.key) try: try: env[option.key] = option.converter(value) except TypeError: env[option.key] = option.converter(value, env) except ValueError as x: raise SCons.Errors.UserError('Error converting option: %s\n%s'%(option.key, x)) # Finally validate the values: for option in self.options: if option.validator and option.key in values: option.validator(option.key, env.subst('${%s}'%option.key), env)
[ "def", "Update", "(", "self", ",", "env", ",", "args", "=", "None", ")", ":", "values", "=", "{", "}", "# first set the defaults:", "for", "option", "in", "self", ".", "options", ":", "if", "option", ".", "default", "is", "not", "None", ":", "values", "[", "option", ".", "key", "]", "=", "option", ".", "default", "# next set the value specified in the options file", "for", "filename", "in", "self", ".", "files", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "dir", "=", "os", ".", "path", ".", "split", "(", "os", ".", "path", ".", "abspath", "(", "filename", ")", ")", "[", "0", "]", "if", "dir", ":", "sys", ".", "path", ".", "insert", "(", "0", ",", "dir", ")", "try", ":", "values", "[", "'__name__'", "]", "=", "filename", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "contents", "=", "f", ".", "read", "(", ")", "exec", "(", "contents", ",", "{", "}", ",", "values", ")", "finally", ":", "if", "dir", ":", "del", "sys", ".", "path", "[", "0", "]", "del", "values", "[", "'__name__'", "]", "# set the values specified on the command line", "if", "args", "is", "None", ":", "args", "=", "self", ".", "args", "for", "arg", ",", "value", "in", "args", ".", "items", "(", ")", ":", "added", "=", "False", "for", "option", "in", "self", ".", "options", ":", "if", "arg", "in", "list", "(", "option", ".", "aliases", ")", "+", "[", "option", ".", "key", "]", ":", "values", "[", "option", ".", "key", "]", "=", "value", "added", "=", "True", "if", "not", "added", ":", "self", ".", "unknown", "[", "arg", "]", "=", "value", "# put the variables in the environment:", "# (don't copy over variables that are not declared as options)", "for", "option", "in", "self", ".", "options", ":", "try", ":", "env", "[", "option", ".", "key", "]", "=", "values", "[", "option", ".", "key", "]", "except", "KeyError", ":", "pass", "# Call the convert functions:", "for", "option", "in", "self", ".", "options", ":", "if", "option", ".", "converter", "and", "option", ".", "key", "in", "values", ":", "value", "=", "env", ".", "subst", "(", "'${%s}'", "%", "option", ".", "key", ")", "try", ":", "try", ":", "env", "[", "option", ".", "key", "]", "=", "option", ".", "converter", "(", "value", ")", "except", "TypeError", ":", "env", "[", "option", ".", "key", "]", "=", "option", ".", "converter", "(", "value", ",", "env", ")", "except", "ValueError", "as", "x", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "'Error converting option: %s\\n%s'", "%", "(", "option", ".", "key", ",", "x", ")", ")", "# Finally validate the values:", "for", "option", "in", "self", ".", "options", ":", "if", "option", ".", "validator", "and", "option", ".", "key", "in", "values", ":", "option", ".", "validator", "(", "option", ".", "key", ",", "env", ".", "subst", "(", "'${%s}'", "%", "option", ".", "key", ")", ",", "env", ")" ]
https://github.com/mapnik/mapnik/blob/f3da900c355e1d15059c4a91b00203dcc9d9f0ef/scons/scons-local-4.1.0/SCons/Variables/__init__.py#L158-L225
qgis/QGIS
15a77662d4bb712184f6aa60d0bd663010a76a75
python/plugins/processing/gui/HistoryDialog.py
python
TreeLogEntryItem.as_qgis_process_command
(self)
return self.entry.entry.get('process_command')
Returns the entry as a qgis_process command, if possible
Returns the entry as a qgis_process command, if possible
[ "Returns", "the", "entry", "as", "a", "qgis_process", "command", "if", "possible" ]
def as_qgis_process_command(self) -> Optional[str]: """ Returns the entry as a qgis_process command, if possible """ return self.entry.entry.get('process_command')
[ "def", "as_qgis_process_command", "(", "self", ")", "->", "Optional", "[", "str", "]", ":", "return", "self", ".", "entry", ".", "entry", ".", "get", "(", "'process_command'", ")" ]
https://github.com/qgis/QGIS/blob/15a77662d4bb712184f6aa60d0bd663010a76a75/python/plugins/processing/gui/HistoryDialog.py#L290-L294
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/timeseries/python/timeseries/state_management.py
python
ChainingStateManager._update_cached_states
(self, model, features, mode)
return loss_op, end_state, batch_predictions
Read, process, and write chunks to the cache.
Read, process, and write chunks to the cache.
[ "Read", "process", "and", "write", "chunks", "to", "the", "cache", "." ]
def _update_cached_states(self, model, features, mode): """Read, process, and write chunks to the cache.""" times = features[feature_keys.TrainEvalFeatures.TIMES] looked_up_state = self._get_cached_states(times[:, 0]) (model_loss, intermediate_states, batch_predictions) = model.per_step_batch_loss( features=features, mode=mode, state=looked_up_state) # We need to at least write to the bucket after the one we read from. min_chunk_numbers = self._get_chunk_number(times) + 1 # We write to the bucket that would have been read had the window started at # the next sample (except for the last sample in the window, which gets # written to the next bucket). This assumes fixed missing times (i.e. if we # were presented with times [10, 50] we will never see times [30, 50]). # # TODO(allenl): Retrieve the highest time less than the current time rather # than relying on fixed bucketing. write_chunk_numbers = math_ops.maximum( self._get_chunk_number(array_ops.concat( [times[:, 1:], times[:, -1:] + 1], axis=1)), min_chunk_numbers) # Write once for every computed state; this may mean that we write multiple # times to the same cell, but later writes will take precedence. save_ops = [ self._cached_states.insert( keys=write_chunk_numbers, values=intermediate_states)] end_state = nest.pack_sequence_as( intermediate_states, [state_element[:, -1] for state_element in nest.flatten(intermediate_states)]) with ops.control_dependencies(save_ops): # Make sure end states get saved at each iteration loss_op = array_ops.identity(model_loss) return loss_op, end_state, batch_predictions
[ "def", "_update_cached_states", "(", "self", ",", "model", ",", "features", ",", "mode", ")", ":", "times", "=", "features", "[", "feature_keys", ".", "TrainEvalFeatures", ".", "TIMES", "]", "looked_up_state", "=", "self", ".", "_get_cached_states", "(", "times", "[", ":", ",", "0", "]", ")", "(", "model_loss", ",", "intermediate_states", ",", "batch_predictions", ")", "=", "model", ".", "per_step_batch_loss", "(", "features", "=", "features", ",", "mode", "=", "mode", ",", "state", "=", "looked_up_state", ")", "# We need to at least write to the bucket after the one we read from.", "min_chunk_numbers", "=", "self", ".", "_get_chunk_number", "(", "times", ")", "+", "1", "# We write to the bucket that would have been read had the window started at", "# the next sample (except for the last sample in the window, which gets", "# written to the next bucket). This assumes fixed missing times (i.e. if we", "# were presented with times [10, 50] we will never see times [30, 50]).", "#", "# TODO(allenl): Retrieve the highest time less than the current time rather", "# than relying on fixed bucketing.", "write_chunk_numbers", "=", "math_ops", ".", "maximum", "(", "self", ".", "_get_chunk_number", "(", "array_ops", ".", "concat", "(", "[", "times", "[", ":", ",", "1", ":", "]", ",", "times", "[", ":", ",", "-", "1", ":", "]", "+", "1", "]", ",", "axis", "=", "1", ")", ")", ",", "min_chunk_numbers", ")", "# Write once for every computed state; this may mean that we write multiple", "# times to the same cell, but later writes will take precedence.", "save_ops", "=", "[", "self", ".", "_cached_states", ".", "insert", "(", "keys", "=", "write_chunk_numbers", ",", "values", "=", "intermediate_states", ")", "]", "end_state", "=", "nest", ".", "pack_sequence_as", "(", "intermediate_states", ",", "[", "state_element", "[", ":", ",", "-", "1", "]", "for", "state_element", "in", "nest", ".", "flatten", "(", "intermediate_states", ")", "]", ")", "with", "ops", ".", "control_dependencies", "(", "save_ops", ")", ":", "# Make sure end states get saved at each iteration", "loss_op", "=", "array_ops", ".", "identity", "(", "model_loss", ")", "return", "loss_op", ",", "end_state", ",", "batch_predictions" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/timeseries/python/timeseries/state_management.py#L230-L265
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_gdi.py
python
IconLocation.GetIndex
(*args, **kwargs)
return _gdi_.IconLocation_GetIndex(*args, **kwargs)
GetIndex(self) -> int
GetIndex(self) -> int
[ "GetIndex", "(", "self", ")", "-", ">", "int" ]
def GetIndex(*args, **kwargs): """GetIndex(self) -> int""" return _gdi_.IconLocation_GetIndex(*args, **kwargs)
[ "def", "GetIndex", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_gdi_", ".", "IconLocation_GetIndex", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_gdi.py#L1368-L1370
okex/V3-Open-API-SDK
c5abb0db7e2287718e0055e17e57672ce0ec7fd9
okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/pkg_resources/__init__.py
python
non_empty_lines
(path)
Yield non-empty lines from file at path
Yield non-empty lines from file at path
[ "Yield", "non", "-", "empty", "lines", "from", "file", "at", "path" ]
def non_empty_lines(path): """ Yield non-empty lines from file at path """ with open(path) as f: for line in f: line = line.strip() if line: yield line
[ "def", "non_empty_lines", "(", "path", ")", ":", "with", "open", "(", "path", ")", "as", "f", ":", "for", "line", "in", "f", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ":", "yield", "line" ]
https://github.com/okex/V3-Open-API-SDK/blob/c5abb0db7e2287718e0055e17e57672ce0ec7fd9/okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/pkg_resources/__init__.py#L2042-L2050
apple/swift-lldb
d74be846ef3e62de946df343e8c234bde93a8912
scripts/Python/static-binding/lldb.py
python
SBTypeFilter.__ne__
(self, rhs)
return _lldb.SBTypeFilter___ne__(self, rhs)
__ne__(SBTypeFilter self, SBTypeFilter rhs) -> bool
__ne__(SBTypeFilter self, SBTypeFilter rhs) -> bool
[ "__ne__", "(", "SBTypeFilter", "self", "SBTypeFilter", "rhs", ")", "-", ">", "bool" ]
def __ne__(self, rhs): """__ne__(SBTypeFilter self, SBTypeFilter rhs) -> bool""" return _lldb.SBTypeFilter___ne__(self, rhs)
[ "def", "__ne__", "(", "self", ",", "rhs", ")", ":", "return", "_lldb", ".", "SBTypeFilter___ne__", "(", "self", ",", "rhs", ")" ]
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L13508-L13510
rdkit/rdkit
ede860ae316d12d8568daf5ee800921c3389c84e
rdkit/sping/WX/pidWxDc.py
python
PiddleWxDc.fontAscent
(self, font=None)
return self.dc.GetCharHeight() - self.fontDescent(font)
Find the ascent (height above base) of the given font.
Find the ascent (height above base) of the given font.
[ "Find", "the", "ascent", "(", "height", "above", "base", ")", "of", "the", "given", "font", "." ]
def fontAscent(self, font=None): '''Find the ascent (height above base) of the given font.''' wx_font = self._setWXfont(font) return self.dc.GetCharHeight() - self.fontDescent(font)
[ "def", "fontAscent", "(", "self", ",", "font", "=", "None", ")", ":", "wx_font", "=", "self", ".", "_setWXfont", "(", "font", ")", "return", "self", ".", "dc", ".", "GetCharHeight", "(", ")", "-", "self", ".", "fontDescent", "(", "font", ")" ]
https://github.com/rdkit/rdkit/blob/ede860ae316d12d8568daf5ee800921c3389c84e/rdkit/sping/WX/pidWxDc.py#L142-L145
SpenceKonde/megaTinyCore
1c4a70b18a149fe6bcb551dfa6db11ca50b8997b
megaavr/tools/libs/pymcuprog/nvmspi.py
python
NvmAccessProviderCmsisDapSpi.stop
(self)
Stop programming session
Stop programming session
[ "Stop", "programming", "session" ]
def stop(self): """ Stop programming session """ self.isp.leave_progmode()
[ "def", "stop", "(", "self", ")", ":", "self", ".", "isp", ".", "leave_progmode", "(", ")" ]
https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/pymcuprog/nvmspi.py#L30-L34
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scikit-learn/py2/sklearn/model_selection/_validation.py
python
permutation_test_score
(estimator, X, y, groups=None, cv=None, n_permutations=100, n_jobs=1, random_state=0, verbose=0, scoring=None)
return score, permutation_scores, pvalue
Evaluate the significance of a cross-validated score with permutations Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like The target variable to try to predict in the case of supervised learning. groups : array-like, with shape (n_samples,), optional Labels to constrain permutation within groups, i.e. ``y`` values are permuted among samples with the same group identifier. When not specified, ``y`` values are permuted among all samples. When a grouped cross-validator is used, the group labels are also passed on to the ``split`` method of the cross-validator. The cross-validator uses them for grouping the samples while splitting the dataset into train/test set. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_permutations : integer, optional Number of times to permute ``y``. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. random_state : RandomState or an int seed (0 by default) A random number generator instance to define the state of the random permutations generator. verbose : integer, optional The verbosity level. Returns ------- score : float The true score without permuting targets. permutation_scores : array, shape (n_permutations,) The scores obtained for each permutations. pvalue : float The returned value equals p-value if `scoring` returns bigger numbers for better scores (e.g., accuracy_score). If `scoring` is rather a loss function (i.e. when lower is better such as with `mean_squared_error`) then this is actually the complement of the p-value: 1 - p-value. Notes ----- This function implements Test 1 in: Ojala and Garriga. Permutation Tests for Studying Classifier Performance. The Journal of Machine Learning Research (2010) vol. 11
Evaluate the significance of a cross-validated score with permutations
[ "Evaluate", "the", "significance", "of", "a", "cross", "-", "validated", "score", "with", "permutations" ]
def permutation_test_score(estimator, X, y, groups=None, cv=None, n_permutations=100, n_jobs=1, random_state=0, verbose=0, scoring=None): """Evaluate the significance of a cross-validated score with permutations Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like The target variable to try to predict in the case of supervised learning. groups : array-like, with shape (n_samples,), optional Labels to constrain permutation within groups, i.e. ``y`` values are permuted among samples with the same group identifier. When not specified, ``y`` values are permuted among all samples. When a grouped cross-validator is used, the group labels are also passed on to the ``split`` method of the cross-validator. The cross-validator uses them for grouping the samples while splitting the dataset into train/test set. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_permutations : integer, optional Number of times to permute ``y``. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. random_state : RandomState or an int seed (0 by default) A random number generator instance to define the state of the random permutations generator. verbose : integer, optional The verbosity level. Returns ------- score : float The true score without permuting targets. permutation_scores : array, shape (n_permutations,) The scores obtained for each permutations. pvalue : float The returned value equals p-value if `scoring` returns bigger numbers for better scores (e.g., accuracy_score). If `scoring` is rather a loss function (i.e. when lower is better such as with `mean_squared_error`) then this is actually the complement of the p-value: 1 - p-value. Notes ----- This function implements Test 1 in: Ojala and Garriga. Permutation Tests for Studying Classifier Performance. The Journal of Machine Learning Research (2010) vol. 11 """ X, y, groups = indexable(X, y, groups) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer) permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, groups, random_state), groups, cv, scorer) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) return score, permutation_scores, pvalue
[ "def", "permutation_test_score", "(", "estimator", ",", "X", ",", "y", ",", "groups", "=", "None", ",", "cv", "=", "None", ",", "n_permutations", "=", "100", ",", "n_jobs", "=", "1", ",", "random_state", "=", "0", ",", "verbose", "=", "0", ",", "scoring", "=", "None", ")", ":", "X", ",", "y", ",", "groups", "=", "indexable", "(", "X", ",", "y", ",", "groups", ")", "cv", "=", "check_cv", "(", "cv", ",", "y", ",", "classifier", "=", "is_classifier", "(", "estimator", ")", ")", "scorer", "=", "check_scoring", "(", "estimator", ",", "scoring", "=", "scoring", ")", "random_state", "=", "check_random_state", "(", "random_state", ")", "# We clone the estimator to make sure that all the folds are", "# independent, and that it is pickle-able.", "score", "=", "_permutation_test_score", "(", "clone", "(", "estimator", ")", ",", "X", ",", "y", ",", "groups", ",", "cv", ",", "scorer", ")", "permutation_scores", "=", "Parallel", "(", "n_jobs", "=", "n_jobs", ",", "verbose", "=", "verbose", ")", "(", "delayed", "(", "_permutation_test_score", ")", "(", "clone", "(", "estimator", ")", ",", "X", ",", "_shuffle", "(", "y", ",", "groups", ",", "random_state", ")", ",", "groups", ",", "cv", ",", "scorer", ")", "for", "_", "in", "range", "(", "n_permutations", ")", ")", "permutation_scores", "=", "np", ".", "array", "(", "permutation_scores", ")", "pvalue", "=", "(", "np", ".", "sum", "(", "permutation_scores", ">=", "score", ")", "+", "1.0", ")", "/", "(", "n_permutations", "+", "1", ")", "return", "score", ",", "permutation_scores", ",", "pvalue" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/model_selection/_validation.py#L514-L617
SequoiaDB/SequoiaDB
2894ed7e5bd6fe57330afc900cf76d0ff0df9f64
tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py
python
xmlDoc.xincludeProcessFlags
(self, flags)
return ret
Implement the XInclude substitution on the XML document @doc
Implement the XInclude substitution on the XML document
[ "Implement", "the", "XInclude", "substitution", "on", "the", "XML", "document" ]
def xincludeProcessFlags(self, flags): """Implement the XInclude substitution on the XML document @doc """ ret = libxml2mod.xmlXIncludeProcessFlags(self._o, flags) return ret
[ "def", "xincludeProcessFlags", "(", "self", ",", "flags", ")", ":", "ret", "=", "libxml2mod", ".", "xmlXIncludeProcessFlags", "(", "self", ".", "_o", ",", "flags", ")", "return", "ret" ]
https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py#L4766-L4769
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/eager/python/examples/revnet/main_estimator.py
python
get_input_fn
(config, data_dir, split)
return input_fn
Get the input function that is required by the `tf.estimator` API. Args: config: Customized hyperparameters data_dir: Directory where the data is stored split: One of `train`, `validation`, `train_all`, and `test` Returns: Input function required by the `tf.estimator` API
Get the input function that is required by the `tf.estimator` API.
[ "Get", "the", "input", "function", "that", "is", "required", "by", "the", "tf", ".", "estimator", "API", "." ]
def get_input_fn(config, data_dir, split): """Get the input function that is required by the `tf.estimator` API. Args: config: Customized hyperparameters data_dir: Directory where the data is stored split: One of `train`, `validation`, `train_all`, and `test` Returns: Input function required by the `tf.estimator` API """ data_dir = os.path.join(data_dir, config.dataset) # Fix split-dependent hyperparameters if split == "train_all" or split == "train": data_aug = True batch_size = config.batch_size epochs = config.epochs shuffle = True prefetch = config.batch_size else: data_aug = False batch_size = config.eval_batch_size epochs = 1 shuffle = False prefetch = config.eval_batch_size def input_fn(): """Input function required by the `tf.estimator.Estimator` API.""" return cifar_input.get_ds_from_tfrecords( data_dir=data_dir, split=split, data_aug=data_aug, batch_size=batch_size, epochs=epochs, shuffle=shuffle, prefetch=prefetch, data_format=config.data_format) return input_fn
[ "def", "get_input_fn", "(", "config", ",", "data_dir", ",", "split", ")", ":", "data_dir", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "config", ".", "dataset", ")", "# Fix split-dependent hyperparameters", "if", "split", "==", "\"train_all\"", "or", "split", "==", "\"train\"", ":", "data_aug", "=", "True", "batch_size", "=", "config", ".", "batch_size", "epochs", "=", "config", ".", "epochs", "shuffle", "=", "True", "prefetch", "=", "config", ".", "batch_size", "else", ":", "data_aug", "=", "False", "batch_size", "=", "config", ".", "eval_batch_size", "epochs", "=", "1", "shuffle", "=", "False", "prefetch", "=", "config", ".", "eval_batch_size", "def", "input_fn", "(", ")", ":", "\"\"\"Input function required by the `tf.estimator.Estimator` API.\"\"\"", "return", "cifar_input", ".", "get_ds_from_tfrecords", "(", "data_dir", "=", "data_dir", ",", "split", "=", "split", ",", "data_aug", "=", "data_aug", ",", "batch_size", "=", "batch_size", ",", "epochs", "=", "epochs", ",", "shuffle", "=", "shuffle", ",", "prefetch", "=", "prefetch", ",", "data_format", "=", "config", ".", "data_format", ")", "return", "input_fn" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/eager/python/examples/revnet/main_estimator.py#L92-L131
bilibili/biliobs
573613dc3b2b63fe7c1506cc94717609a2c52c0c
third_party/freetype/src/tools/docmaker/content.py
python
ContentProcessor.set_section
( self, section_name )
Set current section during parsing.
Set current section during parsing.
[ "Set", "current", "section", "during", "parsing", "." ]
def set_section( self, section_name ): """Set current section during parsing.""" if not section_name in self.sections: section = DocSection( section_name ) self.sections[section_name] = section self.section = section else: self.section = self.sections[section_name]
[ "def", "set_section", "(", "self", ",", "section_name", ")", ":", "if", "not", "section_name", "in", "self", ".", "sections", ":", "section", "=", "DocSection", "(", "section_name", ")", "self", ".", "sections", "[", "section_name", "]", "=", "section", "self", ".", "section", "=", "section", "else", ":", "self", ".", "section", "=", "self", ".", "sections", "[", "section_name", "]" ]
https://github.com/bilibili/biliobs/blob/573613dc3b2b63fe7c1506cc94717609a2c52c0c/third_party/freetype/src/tools/docmaker/content.py#L414-L421
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
deps/src/libxml2-2.9.1/python/libxml2.py
python
xmlDoc.saveFormatFileEnc
(self, filename, encoding, format)
return ret
Dump an XML document to a file or an URL.
Dump an XML document to a file or an URL.
[ "Dump", "an", "XML", "document", "to", "a", "file", "or", "an", "URL", "." ]
def saveFormatFileEnc(self, filename, encoding, format): """Dump an XML document to a file or an URL. """ ret = libxml2mod.xmlSaveFormatFileEnc(filename, self._o, encoding, format) return ret
[ "def", "saveFormatFileEnc", "(", "self", ",", "filename", ",", "encoding", ",", "format", ")", ":", "ret", "=", "libxml2mod", ".", "xmlSaveFormatFileEnc", "(", "filename", ",", "self", ".", "_o", ",", "encoding", ",", "format", ")", "return", "ret" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2.py#L4505-L4508
google/syzygy
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
third_party/numpy/files/numpy/lib/function_base.py
python
gradient
(f, *varargs)
Return the gradient of an N-dimensional array. The gradient is computed using central differences in the interior and first differences at the boundaries. The returned gradient hence has the same shape as the input array. Parameters ---------- f : array_like An N-dimensional array containing samples of a scalar function. `*varargs` : scalars 0, 1, or N scalars specifying the sample distances in each direction, that is: `dx`, `dy`, `dz`, ... The default distance is 1. Returns ------- g : ndarray N arrays of the same shape as `f` giving the derivative of `f` with respect to each dimension. Examples -------- >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) >>> np.gradient(x) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(x, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) [array([[ 2., 2., -1.], [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])]
Return the gradient of an N-dimensional array.
[ "Return", "the", "gradient", "of", "an", "N", "-", "dimensional", "array", "." ]
def gradient(f, *varargs): """ Return the gradient of an N-dimensional array. The gradient is computed using central differences in the interior and first differences at the boundaries. The returned gradient hence has the same shape as the input array. Parameters ---------- f : array_like An N-dimensional array containing samples of a scalar function. `*varargs` : scalars 0, 1, or N scalars specifying the sample distances in each direction, that is: `dx`, `dy`, `dz`, ... The default distance is 1. Returns ------- g : ndarray N arrays of the same shape as `f` giving the derivative of `f` with respect to each dimension. Examples -------- >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) >>> np.gradient(x) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(x, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)) [array([[ 2., 2., -1.], [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] """ N = len(f.shape) # number of dimensions n = len(varargs) if n == 0: dx = [1.0]*N elif n == 1: dx = [varargs[0]]*N elif n == N: dx = list(varargs) else: raise SyntaxError( "invalid number of arguments") # use central differences on interior and first differences on endpoints outvals = [] # create slice objects --- initially all are [:, :, ..., :] slice1 = [slice(None)]*N slice2 = [slice(None)]*N slice3 = [slice(None)]*N otype = f.dtype.char if otype not in ['f', 'd', 'F', 'D']: otype = 'd' for axis in range(N): # select out appropriate parts for this dimension out = np.zeros_like(f).astype(otype) slice1[axis] = slice(1, -1) slice2[axis] = slice(2, None) slice3[axis] = slice(None, -2) # 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0 out[slice1] = (f[slice2] - f[slice3])/2.0 slice1[axis] = 0 slice2[axis] = 1 slice3[axis] = 0 # 1D equivalent -- out[0] = (f[1] - f[0]) out[slice1] = (f[slice2] - f[slice3]) slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 # 1D equivalent -- out[-1] = (f[-1] - f[-2]) out[slice1] = (f[slice2] - f[slice3]) # divide by step size outvals.append(out / dx[axis]) # reset the slice object in this dimension to ":" slice1[axis] = slice(None) slice2[axis] = slice(None) slice3[axis] = slice(None) if N == 1: return outvals[0] else: return outvals
[ "def", "gradient", "(", "f", ",", "*", "varargs", ")", ":", "N", "=", "len", "(", "f", ".", "shape", ")", "# number of dimensions", "n", "=", "len", "(", "varargs", ")", "if", "n", "==", "0", ":", "dx", "=", "[", "1.0", "]", "*", "N", "elif", "n", "==", "1", ":", "dx", "=", "[", "varargs", "[", "0", "]", "]", "*", "N", "elif", "n", "==", "N", ":", "dx", "=", "list", "(", "varargs", ")", "else", ":", "raise", "SyntaxError", "(", "\"invalid number of arguments\"", ")", "# use central differences on interior and first differences on endpoints", "outvals", "=", "[", "]", "# create slice objects --- initially all are [:, :, ..., :]", "slice1", "=", "[", "slice", "(", "None", ")", "]", "*", "N", "slice2", "=", "[", "slice", "(", "None", ")", "]", "*", "N", "slice3", "=", "[", "slice", "(", "None", ")", "]", "*", "N", "otype", "=", "f", ".", "dtype", ".", "char", "if", "otype", "not", "in", "[", "'f'", ",", "'d'", ",", "'F'", ",", "'D'", "]", ":", "otype", "=", "'d'", "for", "axis", "in", "range", "(", "N", ")", ":", "# select out appropriate parts for this dimension", "out", "=", "np", ".", "zeros_like", "(", "f", ")", ".", "astype", "(", "otype", ")", "slice1", "[", "axis", "]", "=", "slice", "(", "1", ",", "-", "1", ")", "slice2", "[", "axis", "]", "=", "slice", "(", "2", ",", "None", ")", "slice3", "[", "axis", "]", "=", "slice", "(", "None", ",", "-", "2", ")", "# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0", "out", "[", "slice1", "]", "=", "(", "f", "[", "slice2", "]", "-", "f", "[", "slice3", "]", ")", "/", "2.0", "slice1", "[", "axis", "]", "=", "0", "slice2", "[", "axis", "]", "=", "1", "slice3", "[", "axis", "]", "=", "0", "# 1D equivalent -- out[0] = (f[1] - f[0])", "out", "[", "slice1", "]", "=", "(", "f", "[", "slice2", "]", "-", "f", "[", "slice3", "]", ")", "slice1", "[", "axis", "]", "=", "-", "1", "slice2", "[", "axis", "]", "=", "-", "1", "slice3", "[", "axis", "]", "=", "-", "2", "# 1D equivalent -- out[-1] = (f[-1] - f[-2])", "out", "[", "slice1", "]", "=", "(", "f", "[", "slice2", "]", "-", "f", "[", "slice3", "]", ")", "# divide by step size", "outvals", ".", "append", "(", "out", "/", "dx", "[", "axis", "]", ")", "# reset the slice object in this dimension to \":\"", "slice1", "[", "axis", "]", "=", "slice", "(", "None", ")", "slice2", "[", "axis", "]", "=", "slice", "(", "None", ")", "slice3", "[", "axis", "]", "=", "slice", "(", "None", ")", "if", "N", "==", "1", ":", "return", "outvals", "[", "0", "]", "else", ":", "return", "outvals" ]
https://github.com/google/syzygy/blob/8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5/third_party/numpy/files/numpy/lib/function_base.py#L822-L915
kamyu104/LeetCode-Solutions
77605708a927ea3b85aee5a479db733938c7c211
Python/robot-return-to-origin.py
python
Solution.judgeCircle
(self, moves)
return v == 0 and h == 0
:type moves: str :rtype: bool
:type moves: str :rtype: bool
[ ":", "type", "moves", ":", "str", ":", "rtype", ":", "bool" ]
def judgeCircle(self, moves): """ :type moves: str :rtype: bool """ v, h = 0, 0 for move in moves: if move == 'U': v += 1 elif move == 'D': v -= 1 elif move == 'R': h += 1 elif move == 'L': h -= 1 return v == 0 and h == 0
[ "def", "judgeCircle", "(", "self", ",", "moves", ")", ":", "v", ",", "h", "=", "0", ",", "0", "for", "move", "in", "moves", ":", "if", "move", "==", "'U'", ":", "v", "+=", "1", "elif", "move", "==", "'D'", ":", "v", "-=", "1", "elif", "move", "==", "'R'", ":", "h", "+=", "1", "elif", "move", "==", "'L'", ":", "h", "-=", "1", "return", "v", "==", "0", "and", "h", "==", "0" ]
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/robot-return-to-origin.py#L20-L35
CGRU/cgru
1881a4128530e3d31ac6c25314c18314fc50c2c7
plugins/maya/afanasy/__init__.py
python
UI.set_field_value
(self, control, value, *args, **kwargs)
sets the given field value :param control: the UI control :param value: the value, can be a callable :return:
sets the given field value
[ "sets", "the", "given", "field", "value" ]
def set_field_value(self, control, value, *args, **kwargs): """sets the given field value :param control: the UI control :param value: the value, can be a callable :return: """ try: v = value() except TypeError: v = value control.setValue(v)
[ "def", "set_field_value", "(", "self", ",", "control", ",", "value", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "v", "=", "value", "(", ")", "except", "TypeError", ":", "v", "=", "value", "control", ".", "setValue", "(", "v", ")" ]
https://github.com/CGRU/cgru/blob/1881a4128530e3d31ac6c25314c18314fc50c2c7/plugins/maya/afanasy/__init__.py#L341-L352
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/telemetry/telemetry/internal/platform/tracing_agent/__init__.py
python
TracingAgent.StartAgentTracing
(self, config, timeout)
Override to add tracing agent's custom logic to start tracing. Depending on trace_options and category_filter, the tracing agent may choose to start or not start tracing. Args: config: tracing_config instance that contains trace_option and category_filter trace_options: an instance of tracing_options.TracingOptions that control which core tracing systems should be enabled. category_filter: an instance of chrome_trace_category_filter.ChromeTraceCategoryFilter timeout: number of seconds that this tracing agent should try to start tracing until time out. Returns: True if tracing agent started successfully.
Override to add tracing agent's custom logic to start tracing.
[ "Override", "to", "add", "tracing", "agent", "s", "custom", "logic", "to", "start", "tracing", "." ]
def StartAgentTracing(self, config, timeout): """ Override to add tracing agent's custom logic to start tracing. Depending on trace_options and category_filter, the tracing agent may choose to start or not start tracing. Args: config: tracing_config instance that contains trace_option and category_filter trace_options: an instance of tracing_options.TracingOptions that control which core tracing systems should be enabled. category_filter: an instance of chrome_trace_category_filter.ChromeTraceCategoryFilter timeout: number of seconds that this tracing agent should try to start tracing until time out. Returns: True if tracing agent started successfully. """ raise NotImplementedError
[ "def", "StartAgentTracing", "(", "self", ",", "config", ",", "timeout", ")", ":", "raise", "NotImplementedError" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/telemetry/internal/platform/tracing_agent/__init__.py#L27-L46
eric612/Caffe-YOLOv3-Windows
6736ca6e16781789b828cc64218ff77cc3454e5d
scripts/cpp_lint.py
python
_NestingState.SeenOpenBrace
(self)
return (not self.stack) or self.stack[-1].seen_open_brace
Check if we have seen the opening brace for the innermost block. Returns: True if we have seen the opening brace, False if the innermost block is still expecting an opening brace.
Check if we have seen the opening brace for the innermost block.
[ "Check", "if", "we", "have", "seen", "the", "opening", "brace", "for", "the", "innermost", "block", "." ]
def SeenOpenBrace(self): """Check if we have seen the opening brace for the innermost block. Returns: True if we have seen the opening brace, False if the innermost block is still expecting an opening brace. """ return (not self.stack) or self.stack[-1].seen_open_brace
[ "def", "SeenOpenBrace", "(", "self", ")", ":", "return", "(", "not", "self", ".", "stack", ")", "or", "self", ".", "stack", "[", "-", "1", "]", ".", "seen_open_brace" ]
https://github.com/eric612/Caffe-YOLOv3-Windows/blob/6736ca6e16781789b828cc64218ff77cc3454e5d/scripts/cpp_lint.py#L1935-L1942
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/richtext.py
python
RichTextBuffer.LoadStream
(*args, **kwargs)
return _richtext.RichTextBuffer_LoadStream(*args, **kwargs)
LoadStream(self, InputStream stream, int type=RICHTEXT_TYPE_ANY) -> bool
LoadStream(self, InputStream stream, int type=RICHTEXT_TYPE_ANY) -> bool
[ "LoadStream", "(", "self", "InputStream", "stream", "int", "type", "=", "RICHTEXT_TYPE_ANY", ")", "-", ">", "bool" ]
def LoadStream(*args, **kwargs): """LoadStream(self, InputStream stream, int type=RICHTEXT_TYPE_ANY) -> bool""" return _richtext.RichTextBuffer_LoadStream(*args, **kwargs)
[ "def", "LoadStream", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_richtext", ".", "RichTextBuffer_LoadStream", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/richtext.py#L2253-L2255
intel/caffe
3f494b442ee3f9d17a07b09ecbd5fa2bbda00836
examples/rfcn/lib/pycocotools/cocoeval.py
python
COCOeval._prepare
(self)
Prepare ._gts and ._dts for evaluation based on params :return: None
Prepare ._gts and ._dts for evaluation based on params :return: None
[ "Prepare", ".", "_gts", "and", ".", "_dts", "for", "evaluation", "based", "on", "params", ":", "return", ":", "None" ]
def _prepare(self): ''' Prepare ._gts and ._dts for evaluation based on params :return: None ''' # def _toMask(objs, coco): # modify segmentation by reference for obj in objs: t = coco.imgs[obj['image_id']] if type(obj['segmentation']) == list: if type(obj['segmentation'][0]) == dict: print 'debug' obj['segmentation'] = mask.frPyObjects(obj['segmentation'],t['height'],t['width']) if len(obj['segmentation']) == 1: obj['segmentation'] = obj['segmentation'][0] else: # an object can have multiple polygon regions # merge them into one RLE mask obj['segmentation'] = mask.merge(obj['segmentation']) elif type(obj['segmentation']) == dict and type(obj['segmentation']['counts']) == list: obj['segmentation'] = mask.frPyObjects([obj['segmentation']],t['height'],t['width'])[0] elif type(obj['segmentation']) == dict and \ type(obj['segmentation']['counts'] == unicode or type(obj['segmentation']['counts']) == str): pass else: raise Exception('segmentation format not supported.') p = self.params if p.useCats: gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)) dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)) else: gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds)) dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds)) if p.useSegm: _toMask(gts, self.cocoGt) _toMask(dts, self.cocoDt) self._gts = defaultdict(list) # gt for evaluation self._dts = defaultdict(list) # dt for evaluation for gt in gts: self._gts[gt['image_id'], gt['category_id']].append(gt) for dt in dts: self._dts[dt['image_id'], dt['category_id']].append(dt) self.evalImgs = defaultdict(list) # per-image per-category evaluation results self.eval = {}
[ "def", "_prepare", "(", "self", ")", ":", "#", "def", "_toMask", "(", "objs", ",", "coco", ")", ":", "# modify segmentation by reference", "for", "obj", "in", "objs", ":", "t", "=", "coco", ".", "imgs", "[", "obj", "[", "'image_id'", "]", "]", "if", "type", "(", "obj", "[", "'segmentation'", "]", ")", "==", "list", ":", "if", "type", "(", "obj", "[", "'segmentation'", "]", "[", "0", "]", ")", "==", "dict", ":", "print", "'debug'", "obj", "[", "'segmentation'", "]", "=", "mask", ".", "frPyObjects", "(", "obj", "[", "'segmentation'", "]", ",", "t", "[", "'height'", "]", ",", "t", "[", "'width'", "]", ")", "if", "len", "(", "obj", "[", "'segmentation'", "]", ")", "==", "1", ":", "obj", "[", "'segmentation'", "]", "=", "obj", "[", "'segmentation'", "]", "[", "0", "]", "else", ":", "# an object can have multiple polygon regions", "# merge them into one RLE mask", "obj", "[", "'segmentation'", "]", "=", "mask", ".", "merge", "(", "obj", "[", "'segmentation'", "]", ")", "elif", "type", "(", "obj", "[", "'segmentation'", "]", ")", "==", "dict", "and", "type", "(", "obj", "[", "'segmentation'", "]", "[", "'counts'", "]", ")", "==", "list", ":", "obj", "[", "'segmentation'", "]", "=", "mask", ".", "frPyObjects", "(", "[", "obj", "[", "'segmentation'", "]", "]", ",", "t", "[", "'height'", "]", ",", "t", "[", "'width'", "]", ")", "[", "0", "]", "elif", "type", "(", "obj", "[", "'segmentation'", "]", ")", "==", "dict", "and", "type", "(", "obj", "[", "'segmentation'", "]", "[", "'counts'", "]", "==", "unicode", "or", "type", "(", "obj", "[", "'segmentation'", "]", "[", "'counts'", "]", ")", "==", "str", ")", ":", "pass", "else", ":", "raise", "Exception", "(", "'segmentation format not supported.'", ")", "p", "=", "self", ".", "params", "if", "p", ".", "useCats", ":", "gts", "=", "self", ".", "cocoGt", ".", "loadAnns", "(", "self", ".", "cocoGt", ".", "getAnnIds", "(", "imgIds", "=", "p", ".", "imgIds", ",", "catIds", "=", "p", ".", "catIds", ")", ")", "dts", "=", "self", ".", "cocoDt", ".", "loadAnns", "(", "self", ".", "cocoDt", ".", "getAnnIds", "(", "imgIds", "=", "p", ".", "imgIds", ",", "catIds", "=", "p", ".", "catIds", ")", ")", "else", ":", "gts", "=", "self", ".", "cocoGt", ".", "loadAnns", "(", "self", ".", "cocoGt", ".", "getAnnIds", "(", "imgIds", "=", "p", ".", "imgIds", ")", ")", "dts", "=", "self", ".", "cocoDt", ".", "loadAnns", "(", "self", ".", "cocoDt", ".", "getAnnIds", "(", "imgIds", "=", "p", ".", "imgIds", ")", ")", "if", "p", ".", "useSegm", ":", "_toMask", "(", "gts", ",", "self", ".", "cocoGt", ")", "_toMask", "(", "dts", ",", "self", ".", "cocoDt", ")", "self", ".", "_gts", "=", "defaultdict", "(", "list", ")", "# gt for evaluation", "self", ".", "_dts", "=", "defaultdict", "(", "list", ")", "# dt for evaluation", "for", "gt", "in", "gts", ":", "self", ".", "_gts", "[", "gt", "[", "'image_id'", "]", ",", "gt", "[", "'category_id'", "]", "]", ".", "append", "(", "gt", ")", "for", "dt", "in", "dts", ":", "self", ".", "_dts", "[", "dt", "[", "'image_id'", "]", ",", "dt", "[", "'category_id'", "]", "]", ".", "append", "(", "dt", ")", "self", ".", "evalImgs", "=", "defaultdict", "(", "list", ")", "# per-image per-category evaluation results", "self", ".", "eval", "=", "{", "}" ]
https://github.com/intel/caffe/blob/3f494b442ee3f9d17a07b09ecbd5fa2bbda00836/examples/rfcn/lib/pycocotools/cocoeval.py#L82-L127
arangodb/arangodb
0d658689c7d1b721b314fa3ca27d38303e1570c8
3rdParty/V8/gyp/easy_xml.py
python
XmlToString
(content, encoding='utf-8', pretty=False)
return ''.join(xml_parts)
Writes the XML content to disk, touching the file only if it has changed. Visual Studio files have a lot of pre-defined structures. This function makes it easy to represent these structures as Python data structures, instead of having to create a lot of function calls. Each XML element of the content is represented as a list composed of: 1. The name of the element, a string, 2. The attributes of the element, a dictionary (optional), and 3+. The content of the element, if any. Strings are simple text nodes and lists are child elements. Example 1: <test/> becomes ['test'] Example 2: <myelement a='value1' b='value2'> <childtype>This is</childtype> <childtype>it!</childtype> </myelement> becomes ['myelement', {'a':'value1', 'b':'value2'}, ['childtype', 'This is'], ['childtype', 'it!'], ] Args: content: The structured content to be converted. encoding: The encoding to report on the first XML line. pretty: True if we want pretty printing with indents and new lines. Returns: The XML content as a string.
Writes the XML content to disk, touching the file only if it has changed.
[ "Writes", "the", "XML", "content", "to", "disk", "touching", "the", "file", "only", "if", "it", "has", "changed", "." ]
def XmlToString(content, encoding='utf-8', pretty=False): """ Writes the XML content to disk, touching the file only if it has changed. Visual Studio files have a lot of pre-defined structures. This function makes it easy to represent these structures as Python data structures, instead of having to create a lot of function calls. Each XML element of the content is represented as a list composed of: 1. The name of the element, a string, 2. The attributes of the element, a dictionary (optional), and 3+. The content of the element, if any. Strings are simple text nodes and lists are child elements. Example 1: <test/> becomes ['test'] Example 2: <myelement a='value1' b='value2'> <childtype>This is</childtype> <childtype>it!</childtype> </myelement> becomes ['myelement', {'a':'value1', 'b':'value2'}, ['childtype', 'This is'], ['childtype', 'it!'], ] Args: content: The structured content to be converted. encoding: The encoding to report on the first XML line. pretty: True if we want pretty printing with indents and new lines. Returns: The XML content as a string. """ # We create a huge list of all the elements of the file. xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding] if pretty: xml_parts.append('\n') _ConstructContentList(xml_parts, content, pretty) # Convert it to a string return ''.join(xml_parts)
[ "def", "XmlToString", "(", "content", ",", "encoding", "=", "'utf-8'", ",", "pretty", "=", "False", ")", ":", "# We create a huge list of all the elements of the file.", "xml_parts", "=", "[", "'<?xml version=\"1.0\" encoding=\"%s\"?>'", "%", "encoding", "]", "if", "pretty", ":", "xml_parts", ".", "append", "(", "'\\n'", ")", "_ConstructContentList", "(", "xml_parts", ",", "content", ",", "pretty", ")", "# Convert it to a string", "return", "''", ".", "join", "(", "xml_parts", ")" ]
https://github.com/arangodb/arangodb/blob/0d658689c7d1b721b314fa3ca27d38303e1570c8/3rdParty/V8/gyp/easy_xml.py#L12-L57
HyeonwooNoh/caffe
d9e8494a2832d67b25dee37194c7bcb9d52d0e42
scripts/cpp_lint.py
python
_CppLintState.PrintErrorCounts
(self)
Print a summary of errors by category, and the total.
Print a summary of errors by category, and the total.
[ "Print", "a", "summary", "of", "errors", "by", "category", "and", "the", "total", "." ]
def PrintErrorCounts(self): """Print a summary of errors by category, and the total.""" for category, count in self.errors_by_category.iteritems(): sys.stderr.write('Category \'%s\' errors found: %d\n' % (category, count)) sys.stderr.write('Total errors found: %d\n' % self.error_count)
[ "def", "PrintErrorCounts", "(", "self", ")", ":", "for", "category", ",", "count", "in", "self", ".", "errors_by_category", ".", "iteritems", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'Category \\'%s\\' errors found: %d\\n'", "%", "(", "category", ",", "count", ")", ")", "sys", ".", "stderr", ".", "write", "(", "'Total errors found: %d\\n'", "%", "self", ".", "error_count", ")" ]
https://github.com/HyeonwooNoh/caffe/blob/d9e8494a2832d67b25dee37194c7bcb9d52d0e42/scripts/cpp_lint.py#L757-L762
microsoft/ELL
a1d6bacc37a14879cc025d9be2ba40b1a0632315
tools/importers/common/converters.py
python
ConvertInput.convert_node
(self, conversion_parameters: typing.Mapping[str, typing.Any])
Derived classes override to return the appropriate ELL node
Derived classes override to return the appropriate ELL node
[ "Derived", "classes", "override", "to", "return", "the", "appropriate", "ELL", "node" ]
def convert_node(self, conversion_parameters: typing.Mapping[str, typing.Any]): """ Derived classes override to return the appropriate ELL node """ model = conversion_parameters["model"] builder = conversion_parameters["builder"] lookup_table = conversion_parameters["lookup_table"] step_interval_msec = conversion_parameters["step_interval_msec"] lag_threshold_msec = conversion_parameters["lag_threshold_msec"] function_prefix = "" # Add the InputNode to the model shape_entry = self.importer_node.output_shapes[0] ell_shape = self.get_ell_shape(shape_entry[0], shape_entry[1], 0) original_input_node = None if step_interval_msec is not None: # in the steppable case the input is a clock ticks (which is a double) input_node = builder.AddInputNode( model, ell.model.PortMemoryLayout([1]), ell.nodes.PortType.real) if lag_threshold_msec is None: lag_threshold_msec = 2 * step_interval_msec clock_node = builder.AddClockNode( model, ell.nodes.PortElements(input_node.GetOutputPort("output")), float(step_interval_msec), float(lag_threshold_msec), "{}LagNotification".format(function_prefix)) source_node = builder.AddSourceNode( model, ell.nodes.PortElements(clock_node.GetOutputPort("output")), ell.nodes.PortType.smallReal, ell.model.PortMemoryLayout(ell_shape), "{}InputCallback".format(function_prefix)) original_input_node = input_node input_node = source_node else: input_node = builder.AddInputNode( model, ell.model.PortMemoryLayout(ell_shape), ell.nodes.PortType.smallReal) original_input_node = input_node # Register the mapping lookup_table.add_imported_ell_node(self.importer_node, input_node) if step_interval_msec is not None: lookup_table.add_imported_ell_node(self.importer_node, clock_node) lookup_table.add_imported_ell_node(self.importer_node, source_node) # Special case: If output requires padding e.g. Input is connected to a # Convolutional node that requires padding, add a ReorderData node to # ensure proper memory layout. This can be skipped once Input supports # different memory layouts of the output. padding = self.importer_node.output_padding["size"] if padding > 0: # Create the reorder node port_elements = lookup_table.get_output_port_elements_for_node(input_node) input_memory_layout = memory_shapes.get_ell_port_memory_layout(shape_entry[0], shape_entry[1], 0) output_memory_layout = memory_shapes.get_ell_port_memory_layout(shape_entry[0], shape_entry[1], padding) reorder_node = builder.AddReorderDataNode(model, port_elements, input_memory_layout, output_memory_layout, [0, 1, 2]) # Register the mapping lookup_table.add_imported_ell_node(self.importer_node, reorder_node) lookup_table.add_ell_input(original_input_node)
[ "def", "convert_node", "(", "self", ",", "conversion_parameters", ":", "typing", ".", "Mapping", "[", "str", ",", "typing", ".", "Any", "]", ")", ":", "model", "=", "conversion_parameters", "[", "\"model\"", "]", "builder", "=", "conversion_parameters", "[", "\"builder\"", "]", "lookup_table", "=", "conversion_parameters", "[", "\"lookup_table\"", "]", "step_interval_msec", "=", "conversion_parameters", "[", "\"step_interval_msec\"", "]", "lag_threshold_msec", "=", "conversion_parameters", "[", "\"lag_threshold_msec\"", "]", "function_prefix", "=", "\"\"", "# Add the InputNode to the model", "shape_entry", "=", "self", ".", "importer_node", ".", "output_shapes", "[", "0", "]", "ell_shape", "=", "self", ".", "get_ell_shape", "(", "shape_entry", "[", "0", "]", ",", "shape_entry", "[", "1", "]", ",", "0", ")", "original_input_node", "=", "None", "if", "step_interval_msec", "is", "not", "None", ":", "# in the steppable case the input is a clock ticks (which is a double)", "input_node", "=", "builder", ".", "AddInputNode", "(", "model", ",", "ell", ".", "model", ".", "PortMemoryLayout", "(", "[", "1", "]", ")", ",", "ell", ".", "nodes", ".", "PortType", ".", "real", ")", "if", "lag_threshold_msec", "is", "None", ":", "lag_threshold_msec", "=", "2", "*", "step_interval_msec", "clock_node", "=", "builder", ".", "AddClockNode", "(", "model", ",", "ell", ".", "nodes", ".", "PortElements", "(", "input_node", ".", "GetOutputPort", "(", "\"output\"", ")", ")", ",", "float", "(", "step_interval_msec", ")", ",", "float", "(", "lag_threshold_msec", ")", ",", "\"{}LagNotification\"", ".", "format", "(", "function_prefix", ")", ")", "source_node", "=", "builder", ".", "AddSourceNode", "(", "model", ",", "ell", ".", "nodes", ".", "PortElements", "(", "clock_node", ".", "GetOutputPort", "(", "\"output\"", ")", ")", ",", "ell", ".", "nodes", ".", "PortType", ".", "smallReal", ",", "ell", ".", "model", ".", "PortMemoryLayout", "(", "ell_shape", ")", ",", "\"{}InputCallback\"", ".", "format", "(", "function_prefix", ")", ")", "original_input_node", "=", "input_node", "input_node", "=", "source_node", "else", ":", "input_node", "=", "builder", ".", "AddInputNode", "(", "model", ",", "ell", ".", "model", ".", "PortMemoryLayout", "(", "ell_shape", ")", ",", "ell", ".", "nodes", ".", "PortType", ".", "smallReal", ")", "original_input_node", "=", "input_node", "# Register the mapping", "lookup_table", ".", "add_imported_ell_node", "(", "self", ".", "importer_node", ",", "input_node", ")", "if", "step_interval_msec", "is", "not", "None", ":", "lookup_table", ".", "add_imported_ell_node", "(", "self", ".", "importer_node", ",", "clock_node", ")", "lookup_table", ".", "add_imported_ell_node", "(", "self", ".", "importer_node", ",", "source_node", ")", "# Special case: If output requires padding e.g. Input is connected to a", "# Convolutional node that requires padding, add a ReorderData node to", "# ensure proper memory layout. This can be skipped once Input supports", "# different memory layouts of the output.", "padding", "=", "self", ".", "importer_node", ".", "output_padding", "[", "\"size\"", "]", "if", "padding", ">", "0", ":", "# Create the reorder node", "port_elements", "=", "lookup_table", ".", "get_output_port_elements_for_node", "(", "input_node", ")", "input_memory_layout", "=", "memory_shapes", ".", "get_ell_port_memory_layout", "(", "shape_entry", "[", "0", "]", ",", "shape_entry", "[", "1", "]", ",", "0", ")", "output_memory_layout", "=", "memory_shapes", ".", "get_ell_port_memory_layout", "(", "shape_entry", "[", "0", "]", ",", "shape_entry", "[", "1", "]", ",", "padding", ")", "reorder_node", "=", "builder", ".", "AddReorderDataNode", "(", "model", ",", "port_elements", ",", "input_memory_layout", ",", "output_memory_layout", ",", "[", "0", ",", "1", ",", "2", "]", ")", "# Register the mapping", "lookup_table", ".", "add_imported_ell_node", "(", "self", ".", "importer_node", ",", "reorder_node", ")", "lookup_table", ".", "add_ell_input", "(", "original_input_node", ")" ]
https://github.com/microsoft/ELL/blob/a1d6bacc37a14879cc025d9be2ba40b1a0632315/tools/importers/common/converters.py#L1008-L1068
Atarity/Lightpack
4dee73a443cba4c4073291febe450e6c1941f3af
Software/apiexamples/liOSC/OSC.py
python
OSCMessage.__delitem__
(self, i)
Removes the indicated argument (or slice)
Removes the indicated argument (or slice)
[ "Removes", "the", "indicated", "argument", "(", "or", "slice", ")" ]
def __delitem__(self, i): """Removes the indicated argument (or slice) """ items = self.items() del items[i] self._reencode(items)
[ "def", "__delitem__", "(", "self", ",", "i", ")", ":", "items", "=", "self", ".", "items", "(", ")", "del", "items", "[", "i", "]", "self", ".", "_reencode", "(", "items", ")" ]
https://github.com/Atarity/Lightpack/blob/4dee73a443cba4c4073291febe450e6c1941f3af/Software/apiexamples/liOSC/OSC.py#L367-L373
SpaceNetChallenge/BuildingDetectors
3def3c44b5847c744cd2f3356182892d92496579
qinhaifang/src/caffe-mnc/python/caffe/pycaffe.py
python
_Net_forward_backward_all
(self, blobs=None, diffs=None, **kwargs)
return all_outs, all_diffs
Run net forward + backward in batches. Parameters ---------- blobs: list of blobs to extract as in forward() diffs: list of diffs to extract as in backward() kwargs: Keys are input (for forward) and output (for backward) blob names and values are ndarrays. Refer to forward() and backward(). Prefilled variants are called for lack of input or output blobs. Returns ------- all_blobs: {blob name: blob ndarray} dict. all_diffs: {blob name: diff ndarray} dict.
Run net forward + backward in batches.
[ "Run", "net", "forward", "+", "backward", "in", "batches", "." ]
def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs): """ Run net forward + backward in batches. Parameters ---------- blobs: list of blobs to extract as in forward() diffs: list of diffs to extract as in backward() kwargs: Keys are input (for forward) and output (for backward) blob names and values are ndarrays. Refer to forward() and backward(). Prefilled variants are called for lack of input or output blobs. Returns ------- all_blobs: {blob name: blob ndarray} dict. all_diffs: {blob name: diff ndarray} dict. """ # Batch blobs and diffs. all_outs = {out: [] for out in set(self.outputs + (blobs or []))} all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))} forward_batches = self._batch({in_: kwargs[in_] for in_ in self.inputs if in_ in kwargs}) backward_batches = self._batch({out: kwargs[out] for out in self.outputs if out in kwargs}) # Collect outputs from batches (and heed lack of forward/backward batches). for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}): batch_blobs = self.forward(blobs=blobs, **fb) batch_diffs = self.backward(diffs=diffs, **bb) for out, out_blobs in batch_blobs.iteritems(): all_outs[out].extend(out_blobs.copy()) for diff, out_diffs in batch_diffs.iteritems(): all_diffs[diff].extend(out_diffs.copy()) # Package in ndarray. for out, diff in zip(all_outs, all_diffs): all_outs[out] = np.asarray(all_outs[out]) all_diffs[diff] = np.asarray(all_diffs[diff]) # Discard padding at the end and package in ndarray. pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next()) if pad: for out, diff in zip(all_outs, all_diffs): all_outs[out] = all_outs[out][:-pad] all_diffs[diff] = all_diffs[diff][:-pad] return all_outs, all_diffs
[ "def", "_Net_forward_backward_all", "(", "self", ",", "blobs", "=", "None", ",", "diffs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Batch blobs and diffs.", "all_outs", "=", "{", "out", ":", "[", "]", "for", "out", "in", "set", "(", "self", ".", "outputs", "+", "(", "blobs", "or", "[", "]", ")", ")", "}", "all_diffs", "=", "{", "diff", ":", "[", "]", "for", "diff", "in", "set", "(", "self", ".", "inputs", "+", "(", "diffs", "or", "[", "]", ")", ")", "}", "forward_batches", "=", "self", ".", "_batch", "(", "{", "in_", ":", "kwargs", "[", "in_", "]", "for", "in_", "in", "self", ".", "inputs", "if", "in_", "in", "kwargs", "}", ")", "backward_batches", "=", "self", ".", "_batch", "(", "{", "out", ":", "kwargs", "[", "out", "]", "for", "out", "in", "self", ".", "outputs", "if", "out", "in", "kwargs", "}", ")", "# Collect outputs from batches (and heed lack of forward/backward batches).", "for", "fb", ",", "bb", "in", "izip_longest", "(", "forward_batches", ",", "backward_batches", ",", "fillvalue", "=", "{", "}", ")", ":", "batch_blobs", "=", "self", ".", "forward", "(", "blobs", "=", "blobs", ",", "*", "*", "fb", ")", "batch_diffs", "=", "self", ".", "backward", "(", "diffs", "=", "diffs", ",", "*", "*", "bb", ")", "for", "out", ",", "out_blobs", "in", "batch_blobs", ".", "iteritems", "(", ")", ":", "all_outs", "[", "out", "]", ".", "extend", "(", "out_blobs", ".", "copy", "(", ")", ")", "for", "diff", ",", "out_diffs", "in", "batch_diffs", ".", "iteritems", "(", ")", ":", "all_diffs", "[", "diff", "]", ".", "extend", "(", "out_diffs", ".", "copy", "(", ")", ")", "# Package in ndarray.", "for", "out", ",", "diff", "in", "zip", "(", "all_outs", ",", "all_diffs", ")", ":", "all_outs", "[", "out", "]", "=", "np", ".", "asarray", "(", "all_outs", "[", "out", "]", ")", "all_diffs", "[", "diff", "]", "=", "np", ".", "asarray", "(", "all_diffs", "[", "diff", "]", ")", "# Discard padding at the end and package in ndarray.", "pad", "=", "len", "(", "all_outs", ".", "itervalues", "(", ")", ".", "next", "(", ")", ")", "-", "len", "(", "kwargs", ".", "itervalues", "(", ")", ".", "next", "(", ")", ")", "if", "pad", ":", "for", "out", ",", "diff", "in", "zip", "(", "all_outs", ",", "all_diffs", ")", ":", "all_outs", "[", "out", "]", "=", "all_outs", "[", "out", "]", "[", ":", "-", "pad", "]", "all_diffs", "[", "diff", "]", "=", "all_diffs", "[", "diff", "]", "[", ":", "-", "pad", "]", "return", "all_outs", ",", "all_diffs" ]
https://github.com/SpaceNetChallenge/BuildingDetectors/blob/3def3c44b5847c744cd2f3356182892d92496579/qinhaifang/src/caffe-mnc/python/caffe/pycaffe.py#L190-L232