nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
list
function
stringlengths
34
151k
function_tokens
list
url
stringlengths
90
278
Cantera/cantera
0119484b261967ccb55a0066c020599cacc312e4
interfaces/python_sdist/setup.py
python
lib_def
(sources, cflags, include_dirs, macros)
return dict(sources=sources, cflags=cflags, include_dirs=include_dirs, macros=macros)
Convenience factory to create the dictionary for a Setuptools library build.
Convenience factory to create the dictionary for a Setuptools library build.
[ "Convenience", "factory", "to", "create", "the", "dictionary", "for", "a", "Setuptools", "library", "build", "." ]
def lib_def(sources, cflags, include_dirs, macros): """Convenience factory to create the dictionary for a Setuptools library build.""" return dict(sources=sources, cflags=cflags, include_dirs=include_dirs, macros=macros)
[ "def", "lib_def", "(", "sources", ",", "cflags", ",", "include_dirs", ",", "macros", ")", ":", "return", "dict", "(", "sources", "=", "sources", ",", "cflags", "=", "cflags", ",", "include_dirs", "=", "include_dirs", ",", "macros", "=", "macros", ")" ]
https://github.com/Cantera/cantera/blob/0119484b261967ccb55a0066c020599cacc312e4/interfaces/python_sdist/setup.py#L129-L132
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/learn/python/learn/models.py
python
logistic_regression_zero_init
(x, y)
return logistic_regression(x, y, init_mean=0.0, init_stddev=0.0)
Logistic regression subgraph with zero-value initial weights and bias. Args: x: tensor or placeholder for input features. y: tensor or placeholder for labels. Returns: Predictions and loss tensors.
Logistic regression subgraph with zero-value initial weights and bias.
[ "Logistic", "regression", "subgraph", "with", "zero", "-", "value", "initial", "weights", "and", "bias", "." ]
def logistic_regression_zero_init(x, y): """Logistic regression subgraph with zero-value initial weights and bias. Args: x: tensor or placeholder for input features. y: tensor or placeholder for labels. Returns: Predictions and loss tensors. """ return logistic_regression(x, y, init_mean=0.0, init_stddev=0.0)
[ "def", "logistic_regression_zero_init", "(", "x", ",", "y", ")", ":", "return", "logistic_regression", "(", "x", ",", "y", ",", "init_mean", "=", "0.0", ",", "init_stddev", "=", "0.0", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/learn/python/learn/models.py#L46-L56
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_gdi.py
python
DC.LogicalToDeviceY
(*args, **kwargs)
return _gdi_.DC_LogicalToDeviceY(*args, **kwargs)
LogicalToDeviceY(self, int y) -> int Converts logical Y coordinate to device coordinate, using the current mapping mode.
LogicalToDeviceY(self, int y) -> int
[ "LogicalToDeviceY", "(", "self", "int", "y", ")", "-", ">", "int" ]
def LogicalToDeviceY(*args, **kwargs): """ LogicalToDeviceY(self, int y) -> int Converts logical Y coordinate to device coordinate, using the current mapping mode. """ return _gdi_.DC_LogicalToDeviceY(*args, **kwargs)
[ "def", "LogicalToDeviceY", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_gdi_", ".", "DC_LogicalToDeviceY", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_gdi.py#L4258-L4265
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/Draft/draftguitools/gui_arcs.py
python
Arc.numericInput
(self, numx, numy, numz)
Validate the entry fields in the user interface. This function is called by the toolbar or taskpanel interface when valid x, y, and z have been entered in the input fields.
Validate the entry fields in the user interface.
[ "Validate", "the", "entry", "fields", "in", "the", "user", "interface", "." ]
def numericInput(self, numx, numy, numz): """Validate the entry fields in the user interface. This function is called by the toolbar or taskpanel interface when valid x, y, and z have been entered in the input fields. """ self.center = App.Vector(numx, numy, numz) self.node = [self.center] self.arctrack.setCenter(self.center) self.arctrack.on() self.ui.radiusUi() self.step = 1 self.ui.setNextFocus() _msg(translate("draft", "Pick radius"))
[ "def", "numericInput", "(", "self", ",", "numx", ",", "numy", ",", "numz", ")", ":", "self", ".", "center", "=", "App", ".", "Vector", "(", "numx", ",", "numy", ",", "numz", ")", "self", ".", "node", "=", "[", "self", ".", "center", "]", "self", ".", "arctrack", ".", "setCenter", "(", "self", ".", "center", ")", "self", ".", "arctrack", ".", "on", "(", ")", "self", ".", "ui", ".", "radiusUi", "(", ")", "self", ".", "step", "=", "1", "self", ".", "ui", ".", "setNextFocus", "(", ")", "_msg", "(", "translate", "(", "\"draft\"", ",", "\"Pick radius\"", ")", ")" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftguitools/gui_arcs.py#L397-L410
lmb-freiburg/flownet2
b92e198b56b0e52e1ba0a5a98dc0e39fa5ae70cc
python/caffe/pycaffe.py
python
_Net_blobs
(self)
return self._blobs_dict
An OrderedDict (bottom to top, i.e., input to output) of network blobs indexed by name
An OrderedDict (bottom to top, i.e., input to output) of network blobs indexed by name
[ "An", "OrderedDict", "(", "bottom", "to", "top", "i", ".", "e", ".", "input", "to", "output", ")", "of", "network", "blobs", "indexed", "by", "name" ]
def _Net_blobs(self): """ An OrderedDict (bottom to top, i.e., input to output) of network blobs indexed by name """ if not hasattr(self, '_blobs_dict'): self._blobs_dict = OrderedDict(zip(self._blob_names, self._blobs)) return self._blobs_dict
[ "def", "_Net_blobs", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_blobs_dict'", ")", ":", "self", ".", "_blobs_dict", "=", "OrderedDict", "(", "zip", "(", "self", ".", "_blob_names", ",", "self", ".", "_blobs", ")", ")", "return", "self", ".", "_blobs_dict" ]
https://github.com/lmb-freiburg/flownet2/blob/b92e198b56b0e52e1ba0a5a98dc0e39fa5ae70cc/python/caffe/pycaffe.py#L25-L32
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/more-itertools/py2/more_itertools/recipes.py
python
tabulate
(function, start=0)
return map(function, count(start))
Return an iterator over the results of ``func(start)``, ``func(start + 1)``, ``func(start + 2)``... *func* should be a function that accepts one integer argument. If *start* is not specified it defaults to 0. It will be incremented each time the iterator is advanced. >>> square = lambda x: x ** 2 >>> iterator = tabulate(square, -3) >>> take(4, iterator) [9, 4, 1, 0]
Return an iterator over the results of ``func(start)``, ``func(start + 1)``, ``func(start + 2)``...
[ "Return", "an", "iterator", "over", "the", "results", "of", "func", "(", "start", ")", "func", "(", "start", "+", "1", ")", "func", "(", "start", "+", "2", ")", "..." ]
def tabulate(function, start=0): """Return an iterator over the results of ``func(start)``, ``func(start + 1)``, ``func(start + 2)``... *func* should be a function that accepts one integer argument. If *start* is not specified it defaults to 0. It will be incremented each time the iterator is advanced. >>> square = lambda x: x ** 2 >>> iterator = tabulate(square, -3) >>> take(4, iterator) [9, 4, 1, 0] """ return map(function, count(start))
[ "def", "tabulate", "(", "function", ",", "start", "=", "0", ")", ":", "return", "map", "(", "function", ",", "count", "(", "start", ")", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/more-itertools/py2/more_itertools/recipes.py#L97-L112
zju3dv/clean-pvnet
5870c509e3cc205e1bb28910a7b1a9a3c8add9a8
lib/utils/meshrenderer/pysixd/transform.py
python
random_quaternion
(rand=None)
return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1, numpy.cos(t1)*r1, numpy.sin(t2)*r2])
Return uniform random unit quaternion. rand: array like or None Three independent random variables that are uniformly distributed between 0 and 1. >>> q = random_quaternion() >>> numpy.allclose(1, vector_norm(q)) True >>> q = random_quaternion(numpy.random.random(3)) >>> len(q.shape), q.shape[0]==4 (1, True)
Return uniform random unit quaternion.
[ "Return", "uniform", "random", "unit", "quaternion", "." ]
def random_quaternion(rand=None): """Return uniform random unit quaternion. rand: array like or None Three independent random variables that are uniformly distributed between 0 and 1. >>> q = random_quaternion() >>> numpy.allclose(1, vector_norm(q)) True >>> q = random_quaternion(numpy.random.random(3)) >>> len(q.shape), q.shape[0]==4 (1, True) """ if rand is None: rand = numpy.random.rand(3) else: assert len(rand) == 3 r1 = numpy.sqrt(1.0 - rand[0]) r2 = numpy.sqrt(rand[0]) pi2 = math.pi * 2.0 t1 = pi2 * rand[1] t2 = pi2 * rand[2] return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1, numpy.cos(t1)*r1, numpy.sin(t2)*r2])
[ "def", "random_quaternion", "(", "rand", "=", "None", ")", ":", "if", "rand", "is", "None", ":", "rand", "=", "numpy", ".", "random", ".", "rand", "(", "3", ")", "else", ":", "assert", "len", "(", "rand", ")", "==", "3", "r1", "=", "numpy", ".", "sqrt", "(", "1.0", "-", "rand", "[", "0", "]", ")", "r2", "=", "numpy", ".", "sqrt", "(", "rand", "[", "0", "]", ")", "pi2", "=", "math", ".", "pi", "*", "2.0", "t1", "=", "pi2", "*", "rand", "[", "1", "]", "t2", "=", "pi2", "*", "rand", "[", "2", "]", "return", "numpy", ".", "array", "(", "[", "numpy", ".", "cos", "(", "t2", ")", "*", "r2", ",", "numpy", ".", "sin", "(", "t1", ")", "*", "r1", ",", "numpy", ".", "cos", "(", "t1", ")", "*", "r1", ",", "numpy", ".", "sin", "(", "t2", ")", "*", "r2", "]", ")" ]
https://github.com/zju3dv/clean-pvnet/blob/5870c509e3cc205e1bb28910a7b1a9a3c8add9a8/lib/utils/meshrenderer/pysixd/transform.py#L1463-L1488
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/debug/wrappers/framework.py
python
BaseDebugWrapperSession.on_run_end
(self, request)
Callback invoked on run() calls to the debug-wrapper session. This is a blocking callback. The invocation happens right before the wrapper exits its run() call. Args: request: (`OnRunEndRequest`) callback request object carrying information such as the actual action performed by the session wrapper for the run() call. Returns: An instance of `OnRunStartResponse`.
Callback invoked on run() calls to the debug-wrapper session.
[ "Callback", "invoked", "on", "run", "()", "calls", "to", "the", "debug", "-", "wrapper", "session", "." ]
def on_run_end(self, request): """Callback invoked on run() calls to the debug-wrapper session. This is a blocking callback. The invocation happens right before the wrapper exits its run() call. Args: request: (`OnRunEndRequest`) callback request object carrying information such as the actual action performed by the session wrapper for the run() call. Returns: An instance of `OnRunStartResponse`. """
[ "def", "on_run_end", "(", "self", ",", "request", ")", ":" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/debug/wrappers/framework.py#L659-L672
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/generic.py
python
NDFrame._repr_latex_
(self)
Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf).
Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf).
[ "Returns", "a", "LaTeX", "representation", "for", "a", "particular", "object", ".", "Mainly", "for", "use", "with", "nbconvert", "(", "jupyter", "notebook", "conversion", "to", "pdf", ")", "." ]
def _repr_latex_(self): """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ if config.get_option("display.latex.repr"): return self.to_latex() else: return None
[ "def", "_repr_latex_", "(", "self", ")", ":", "if", "config", ".", "get_option", "(", "\"display.latex.repr\"", ")", ":", "return", "self", ".", "to_latex", "(", ")", "else", ":", "return", "None" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/generic.py#L1981-L1989
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/dtypes/cast.py
python
is_nested_object
(obj)
return False
return a boolean if we have a nested object, e.g. a Series with 1 or more Series elements This may not be necessarily be performant.
return a boolean if we have a nested object, e.g. a Series with 1 or more Series elements
[ "return", "a", "boolean", "if", "we", "have", "a", "nested", "object", "e", ".", "g", ".", "a", "Series", "with", "1", "or", "more", "Series", "elements" ]
def is_nested_object(obj) -> bool: """ return a boolean if we have a nested object, e.g. a Series with 1 or more Series elements This may not be necessarily be performant. """ if isinstance(obj, ABCSeries) and is_object_dtype(obj): if any(isinstance(v, ABCSeries) for v in obj.values): return True return False
[ "def", "is_nested_object", "(", "obj", ")", "->", "bool", ":", "if", "isinstance", "(", "obj", ",", "ABCSeries", ")", "and", "is_object_dtype", "(", "obj", ")", ":", "if", "any", "(", "isinstance", "(", "v", ",", "ABCSeries", ")", "for", "v", "in", "obj", ".", "values", ")", ":", "return", "True", "return", "False" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/dtypes/cast.py#L83-L97
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pkg_resources/__init__.py
python
ResourceManager.extraction_error
(self)
Give an error message for problems extracting file(s)
Give an error message for problems extracting file(s)
[ "Give", "an", "error", "message", "for", "problems", "extracting", "file", "(", "s", ")" ]
def extraction_error(self): """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() tmpl = textwrap.dedent(""" Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: {old_exc} The Python egg cache directory is currently set to: {cache_path} Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. """).lstrip() err = ExtractionError(tmpl.format(**locals())) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err
[ "def", "extraction_error", "(", "self", ")", ":", "old_exc", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "cache_path", "=", "self", ".", "extraction_path", "or", "get_default_cache", "(", ")", "tmpl", "=", "textwrap", ".", "dedent", "(", "\"\"\"\n Can't extract file(s) to egg cache\n\n The following error occurred while trying to extract file(s)\n to the Python egg cache:\n\n {old_exc}\n\n The Python egg cache directory is currently set to:\n\n {cache_path}\n\n Perhaps your account does not have write access to this directory?\n You can change the cache directory by setting the PYTHON_EGG_CACHE\n environment variable to point to an accessible directory.\n \"\"\"", ")", ".", "lstrip", "(", ")", "err", "=", "ExtractionError", "(", "tmpl", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")", "err", ".", "manager", "=", "self", "err", ".", "cache_path", "=", "cache_path", "err", ".", "original_error", "=", "old_exc", "raise", "err" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pkg_resources/__init__.py#L1167-L1193
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/multiprocessing/managers.py
python
BaseManager._create
(*args, **kwds)
return Token(typeid, self._address, id), exposed
Create a new shared object; return the token and exposed tuple
Create a new shared object; return the token and exposed tuple
[ "Create", "a", "new", "shared", "object", ";", "return", "the", "token", "and", "exposed", "tuple" ]
def _create(*args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' self, typeid, *args = args args = tuple(args) assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed
[ "def", "_create", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "self", ",", "typeid", ",", "", "*", "args", "=", "args", "args", "=", "tuple", "(", "args", ")", "assert", "self", ".", "_state", ".", "value", "==", "State", ".", "STARTED", ",", "'server not yet started'", "conn", "=", "self", ".", "_Client", "(", "self", ".", "_address", ",", "authkey", "=", "self", ".", "_authkey", ")", "try", ":", "id", ",", "exposed", "=", "dispatch", "(", "conn", ",", "None", ",", "'create'", ",", "(", "typeid", ",", ")", "+", "args", ",", "kwds", ")", "finally", ":", "conn", ".", "close", "(", ")", "return", "Token", "(", "typeid", ",", "self", ".", "_address", ",", "id", ")", ",", "exposed" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/multiprocessing/managers.py#L599-L612
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_windows.py
python
HScrolledWindow.EstimateTotalWidth
(*args, **kwargs)
return _windows_.HScrolledWindow_EstimateTotalWidth(*args, **kwargs)
EstimateTotalWidth(self) -> int
EstimateTotalWidth(self) -> int
[ "EstimateTotalWidth", "(", "self", ")", "-", ">", "int" ]
def EstimateTotalWidth(*args, **kwargs): """EstimateTotalWidth(self) -> int""" return _windows_.HScrolledWindow_EstimateTotalWidth(*args, **kwargs)
[ "def", "EstimateTotalWidth", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "HScrolledWindow_EstimateTotalWidth", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_windows.py#L2524-L2526
GJDuck/LowFat
ecf6a0f0fa1b73a27a626cf493cc39e477b6faea
llvm-4.0.0.src/tools/clang/bindings/python/clang/cindex.py
python
Cursor.result_type
(self)
return self._result_type
Retrieve the Type of the result for this Cursor.
Retrieve the Type of the result for this Cursor.
[ "Retrieve", "the", "Type", "of", "the", "result", "for", "this", "Cursor", "." ]
def result_type(self): """Retrieve the Type of the result for this Cursor.""" if not hasattr(self, '_result_type'): self._result_type = conf.lib.clang_getResultType(self.type) return self._result_type
[ "def", "result_type", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_result_type'", ")", ":", "self", ".", "_result_type", "=", "conf", ".", "lib", ".", "clang_getResultType", "(", "self", ".", "type", ")", "return", "self", ".", "_result_type" ]
https://github.com/GJDuck/LowFat/blob/ecf6a0f0fa1b73a27a626cf493cc39e477b6faea/llvm-4.0.0.src/tools/clang/bindings/python/clang/cindex.py#L1527-L1532
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/command/easy_install.py
python
easy_install.expand_dirs
(self)
Calls `os.path.expanduser` on install dirs.
Calls `os.path.expanduser` on install dirs.
[ "Calls", "os", ".", "path", ".", "expanduser", "on", "install", "dirs", "." ]
def expand_dirs(self): """Calls `os.path.expanduser` on install dirs.""" dirs = [ 'install_purelib', 'install_platlib', 'install_lib', 'install_headers', 'install_scripts', 'install_data', ] self._expand_attrs(dirs)
[ "def", "expand_dirs", "(", "self", ")", ":", "dirs", "=", "[", "'install_purelib'", ",", "'install_platlib'", ",", "'install_lib'", ",", "'install_headers'", ",", "'install_scripts'", ",", "'install_data'", ",", "]", "self", ".", "_expand_attrs", "(", "dirs", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/command/easy_install.py#L401-L411
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/cache.py
python
Cache._get_cache_path_parts
(self, link)
return parts
Get parts of part that must be os.path.joined with cache_dir
Get parts of part that must be os.path.joined with cache_dir
[ "Get", "parts", "of", "part", "that", "must", "be", "os", ".", "path", ".", "joined", "with", "cache_dir" ]
def _get_cache_path_parts(self, link): # type: (Link) -> List[str] """Get parts of part that must be os.path.joined with cache_dir """ # We want to generate an url to use as our cache key, we don't want to # just re-use the URL because it might have other items in the fragment # and we don't care about those. key_parts = {"url": link.url_without_fragment} if link.hash_name is not None and link.hash is not None: key_parts[link.hash_name] = link.hash if link.subdirectory_fragment: key_parts["subdirectory"] = link.subdirectory_fragment # Include interpreter name, major and minor version in cache key # to cope with ill-behaved sdists that build a different wheel # depending on the python version their setup.py is being run on, # and don't encode the difference in compatibility tags. # https://github.com/pypa/pip/issues/7296 key_parts["interpreter_name"] = interpreter_name() key_parts["interpreter_version"] = interpreter_version() # Encode our key url with sha224, we'll use this because it has similar # security properties to sha256, but with a shorter total output (and # thus less secure). However the differences don't make a lot of # difference for our use case here. hashed = _hash_dict(key_parts) # We want to nest the directories some to prevent having a ton of top # level directories where we might run out of sub directories on some # FS. parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]] return parts
[ "def", "_get_cache_path_parts", "(", "self", ",", "link", ")", ":", "# type: (Link) -> List[str]", "# We want to generate an url to use as our cache key, we don't want to", "# just re-use the URL because it might have other items in the fragment", "# and we don't care about those.", "key_parts", "=", "{", "\"url\"", ":", "link", ".", "url_without_fragment", "}", "if", "link", ".", "hash_name", "is", "not", "None", "and", "link", ".", "hash", "is", "not", "None", ":", "key_parts", "[", "link", ".", "hash_name", "]", "=", "link", ".", "hash", "if", "link", ".", "subdirectory_fragment", ":", "key_parts", "[", "\"subdirectory\"", "]", "=", "link", ".", "subdirectory_fragment", "# Include interpreter name, major and minor version in cache key", "# to cope with ill-behaved sdists that build a different wheel", "# depending on the python version their setup.py is being run on,", "# and don't encode the difference in compatibility tags.", "# https://github.com/pypa/pip/issues/7296", "key_parts", "[", "\"interpreter_name\"", "]", "=", "interpreter_name", "(", ")", "key_parts", "[", "\"interpreter_version\"", "]", "=", "interpreter_version", "(", ")", "# Encode our key url with sha224, we'll use this because it has similar", "# security properties to sha256, but with a shorter total output (and", "# thus less secure). However the differences don't make a lot of", "# difference for our use case here.", "hashed", "=", "_hash_dict", "(", "key_parts", ")", "# We want to nest the directories some to prevent having a ton of top", "# level directories where we might run out of sub directories on some", "# FS.", "parts", "=", "[", "hashed", "[", ":", "2", "]", ",", "hashed", "[", "2", ":", "4", "]", ",", "hashed", "[", "4", ":", "6", "]", ",", "hashed", "[", "6", ":", "]", "]", "return", "parts" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/cache.py#L115-L181
larroy/clearskies_core
3574ddf0edc8555454c7044126e786a6c29444dc
tools/gyp/pylib/gyp/msvs_emulation.py
python
MsvsSettings.GetLdflags
(self, config, gyp_to_build_path, expand_special, manifest_base_name, output_name, is_executable, build_dir)
return ldflags, intermediate_manifest, manifest_files
Returns the flags that need to be added to link commands, and the manifest files.
Returns the flags that need to be added to link commands, and the manifest files.
[ "Returns", "the", "flags", "that", "need", "to", "be", "added", "to", "link", "commands", "and", "the", "manifest", "files", "." ]
def GetLdflags(self, config, gyp_to_build_path, expand_special, manifest_base_name, output_name, is_executable, build_dir): """Returns the flags that need to be added to link commands, and the manifest files.""" config = self._TargetConfig(config) ldflags = [] ld = self._GetWrapper(self, self.msvs_settings[config], 'VCLinkerTool', append=ldflags) self._GetDefFileAsLdflags(ldflags, gyp_to_build_path) ld('GenerateDebugInformation', map={'true': '/DEBUG'}) ld('TargetMachine', map={'1': 'X86', '17': 'X64'}, prefix='/MACHINE:') ldflags.extend(self._GetAdditionalLibraryDirectories( 'VCLinkerTool', config, gyp_to_build_path)) ld('DelayLoadDLLs', prefix='/DELAYLOAD:') ld('TreatLinkerWarningAsErrors', prefix='/WX', map={'true': '', 'false': ':NO'}) out = self.GetOutputName(config, expand_special) if out: ldflags.append('/OUT:' + out) pdb = self.GetPDBName(config, expand_special, output_name + '.pdb') if pdb: ldflags.append('/PDB:' + pdb) pgd = self.GetPGDName(config, expand_special) if pgd: ldflags.append('/PGD:' + pgd) map_file = self.GetMapFileName(config, expand_special) ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file else '/MAP'}) ld('MapExports', map={'true': '/MAPINFO:EXPORTS'}) ld('AdditionalOptions', prefix='') minimum_required_version = self._Setting( ('VCLinkerTool', 'MinimumRequiredVersion'), config, default='') if minimum_required_version: minimum_required_version = ',' + minimum_required_version ld('SubSystem', map={'1': 'CONSOLE%s' % minimum_required_version, '2': 'WINDOWS%s' % minimum_required_version}, prefix='/SUBSYSTEM:') ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE') ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL') ld('BaseAddress', prefix='/BASE:') ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED') ld('RandomizedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE') ld('DataExecutionPrevention', map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT') ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:') ld('ForceSymbolReferences', prefix='/INCLUDE:') ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:') ld('LinkTimeCodeGeneration', map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE', '4': ':PGUPDATE'}, prefix='/LTCG') ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:') ld('ResourceOnlyDLL', map={'true': '/NOENTRY'}) ld('EntryPointSymbol', prefix='/ENTRY:') ld('Profile', map={'true': '/PROFILE'}) ld('LargeAddressAware', map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE') # TODO(scottmg): This should sort of be somewhere else (not really a flag). ld('AdditionalDependencies', prefix='') # If the base address is not specifically controlled, DYNAMICBASE should # be on by default. base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED', ldflags) if not base_flags: ldflags.append('/DYNAMICBASE') # If the NXCOMPAT flag has not been specified, default to on. Despite the # documentation that says this only defaults to on when the subsystem is # Vista or greater (which applies to the linker), the IDE defaults it on # unless it's explicitly off. if not filter(lambda x: 'NXCOMPAT' in x, ldflags): ldflags.append('/NXCOMPAT') have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags) manifest_flags, intermediate_manifest, manifest_files = \ self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path, is_executable and not have_def_file, build_dir) ldflags.extend(manifest_flags) return ldflags, intermediate_manifest, manifest_files
[ "def", "GetLdflags", "(", "self", ",", "config", ",", "gyp_to_build_path", ",", "expand_special", ",", "manifest_base_name", ",", "output_name", ",", "is_executable", ",", "build_dir", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "ldflags", "=", "[", "]", "ld", "=", "self", ".", "_GetWrapper", "(", "self", ",", "self", ".", "msvs_settings", "[", "config", "]", ",", "'VCLinkerTool'", ",", "append", "=", "ldflags", ")", "self", ".", "_GetDefFileAsLdflags", "(", "ldflags", ",", "gyp_to_build_path", ")", "ld", "(", "'GenerateDebugInformation'", ",", "map", "=", "{", "'true'", ":", "'/DEBUG'", "}", ")", "ld", "(", "'TargetMachine'", ",", "map", "=", "{", "'1'", ":", "'X86'", ",", "'17'", ":", "'X64'", "}", ",", "prefix", "=", "'/MACHINE:'", ")", "ldflags", ".", "extend", "(", "self", ".", "_GetAdditionalLibraryDirectories", "(", "'VCLinkerTool'", ",", "config", ",", "gyp_to_build_path", ")", ")", "ld", "(", "'DelayLoadDLLs'", ",", "prefix", "=", "'/DELAYLOAD:'", ")", "ld", "(", "'TreatLinkerWarningAsErrors'", ",", "prefix", "=", "'/WX'", ",", "map", "=", "{", "'true'", ":", "''", ",", "'false'", ":", "':NO'", "}", ")", "out", "=", "self", ".", "GetOutputName", "(", "config", ",", "expand_special", ")", "if", "out", ":", "ldflags", ".", "append", "(", "'/OUT:'", "+", "out", ")", "pdb", "=", "self", ".", "GetPDBName", "(", "config", ",", "expand_special", ",", "output_name", "+", "'.pdb'", ")", "if", "pdb", ":", "ldflags", ".", "append", "(", "'/PDB:'", "+", "pdb", ")", "pgd", "=", "self", ".", "GetPGDName", "(", "config", ",", "expand_special", ")", "if", "pgd", ":", "ldflags", ".", "append", "(", "'/PGD:'", "+", "pgd", ")", "map_file", "=", "self", ".", "GetMapFileName", "(", "config", ",", "expand_special", ")", "ld", "(", "'GenerateMapFile'", ",", "map", "=", "{", "'true'", ":", "'/MAP:'", "+", "map_file", "if", "map_file", "else", "'/MAP'", "}", ")", "ld", "(", "'MapExports'", ",", "map", "=", "{", "'true'", ":", "'/MAPINFO:EXPORTS'", "}", ")", "ld", "(", "'AdditionalOptions'", ",", "prefix", "=", "''", ")", "minimum_required_version", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'MinimumRequiredVersion'", ")", ",", "config", ",", "default", "=", "''", ")", "if", "minimum_required_version", ":", "minimum_required_version", "=", "','", "+", "minimum_required_version", "ld", "(", "'SubSystem'", ",", "map", "=", "{", "'1'", ":", "'CONSOLE%s'", "%", "minimum_required_version", ",", "'2'", ":", "'WINDOWS%s'", "%", "minimum_required_version", "}", ",", "prefix", "=", "'/SUBSYSTEM:'", ")", "ld", "(", "'TerminalServerAware'", ",", "map", "=", "{", "'1'", ":", "':NO'", ",", "'2'", ":", "''", "}", ",", "prefix", "=", "'/TSAWARE'", ")", "ld", "(", "'LinkIncremental'", ",", "map", "=", "{", "'1'", ":", "':NO'", ",", "'2'", ":", "''", "}", ",", "prefix", "=", "'/INCREMENTAL'", ")", "ld", "(", "'BaseAddress'", ",", "prefix", "=", "'/BASE:'", ")", "ld", "(", "'FixedBaseAddress'", ",", "map", "=", "{", "'1'", ":", "':NO'", ",", "'2'", ":", "''", "}", ",", "prefix", "=", "'/FIXED'", ")", "ld", "(", "'RandomizedBaseAddress'", ",", "map", "=", "{", "'1'", ":", "':NO'", ",", "'2'", ":", "''", "}", ",", "prefix", "=", "'/DYNAMICBASE'", ")", "ld", "(", "'DataExecutionPrevention'", ",", "map", "=", "{", "'1'", ":", "':NO'", ",", "'2'", ":", "''", "}", ",", "prefix", "=", "'/NXCOMPAT'", ")", "ld", "(", "'OptimizeReferences'", ",", "map", "=", "{", "'1'", ":", "'NOREF'", ",", "'2'", ":", "'REF'", "}", ",", "prefix", "=", "'/OPT:'", ")", "ld", "(", "'ForceSymbolReferences'", ",", "prefix", "=", "'/INCLUDE:'", ")", "ld", "(", "'EnableCOMDATFolding'", ",", "map", "=", "{", "'1'", ":", "'NOICF'", ",", "'2'", ":", "'ICF'", "}", ",", "prefix", "=", "'/OPT:'", ")", "ld", "(", "'LinkTimeCodeGeneration'", ",", "map", "=", "{", "'1'", ":", "''", ",", "'2'", ":", "':PGINSTRUMENT'", ",", "'3'", ":", "':PGOPTIMIZE'", ",", "'4'", ":", "':PGUPDATE'", "}", ",", "prefix", "=", "'/LTCG'", ")", "ld", "(", "'IgnoreDefaultLibraryNames'", ",", "prefix", "=", "'/NODEFAULTLIB:'", ")", "ld", "(", "'ResourceOnlyDLL'", ",", "map", "=", "{", "'true'", ":", "'/NOENTRY'", "}", ")", "ld", "(", "'EntryPointSymbol'", ",", "prefix", "=", "'/ENTRY:'", ")", "ld", "(", "'Profile'", ",", "map", "=", "{", "'true'", ":", "'/PROFILE'", "}", ")", "ld", "(", "'LargeAddressAware'", ",", "map", "=", "{", "'1'", ":", "':NO'", ",", "'2'", ":", "''", "}", ",", "prefix", "=", "'/LARGEADDRESSAWARE'", ")", "# TODO(scottmg): This should sort of be somewhere else (not really a flag).", "ld", "(", "'AdditionalDependencies'", ",", "prefix", "=", "''", ")", "# If the base address is not specifically controlled, DYNAMICBASE should", "# be on by default.", "base_flags", "=", "filter", "(", "lambda", "x", ":", "'DYNAMICBASE'", "in", "x", "or", "x", "==", "'/FIXED'", ",", "ldflags", ")", "if", "not", "base_flags", ":", "ldflags", ".", "append", "(", "'/DYNAMICBASE'", ")", "# If the NXCOMPAT flag has not been specified, default to on. Despite the", "# documentation that says this only defaults to on when the subsystem is", "# Vista or greater (which applies to the linker), the IDE defaults it on", "# unless it's explicitly off.", "if", "not", "filter", "(", "lambda", "x", ":", "'NXCOMPAT'", "in", "x", ",", "ldflags", ")", ":", "ldflags", ".", "append", "(", "'/NXCOMPAT'", ")", "have_def_file", "=", "filter", "(", "lambda", "x", ":", "x", ".", "startswith", "(", "'/DEF:'", ")", ",", "ldflags", ")", "manifest_flags", ",", "intermediate_manifest", ",", "manifest_files", "=", "self", ".", "_GetLdManifestFlags", "(", "config", ",", "manifest_base_name", ",", "gyp_to_build_path", ",", "is_executable", "and", "not", "have_def_file", ",", "build_dir", ")", "ldflags", ".", "extend", "(", "manifest_flags", ")", "return", "ldflags", ",", "intermediate_manifest", ",", "manifest_files" ]
https://github.com/larroy/clearskies_core/blob/3574ddf0edc8555454c7044126e786a6c29444dc/tools/gyp/pylib/gyp/msvs_emulation.py#L474-L557
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
third_party/jinja2/runtime.py
python
Context.resolve
(self, key)
return self.environment.undefined(name=key)
Looks up a variable like `__getitem__` or `get` but returns an :class:`Undefined` object with the name of the name looked up.
Looks up a variable like `__getitem__` or `get` but returns an :class:`Undefined` object with the name of the name looked up.
[ "Looks", "up", "a", "variable", "like", "__getitem__", "or", "get", "but", "returns", "an", ":", "class", ":", "Undefined", "object", "with", "the", "name", "of", "the", "name", "looked", "up", "." ]
def resolve(self, key): """Looks up a variable like `__getitem__` or `get` but returns an :class:`Undefined` object with the name of the name looked up. """ if key in self.vars: return self.vars[key] if key in self.parent: return self.parent[key] return self.environment.undefined(name=key)
[ "def", "resolve", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "vars", ":", "return", "self", ".", "vars", "[", "key", "]", "if", "key", "in", "self", ".", "parent", ":", "return", "self", ".", "parent", "[", "key", "]", "return", "self", ".", "environment", ".", "undefined", "(", "name", "=", "key", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/jinja2/runtime.py#L146-L154
apache/arrow
af33dd1157eb8d7d9bfac25ebf61445b793b7943
cpp/build-support/cpplint.py
python
NestingState.InClassDeclaration
(self)
return self.stack and isinstance(self.stack[-1], _ClassInfo)
Check if we are currently one level inside a class or struct declaration. Returns: True if top of the stack is a class/struct, False otherwise.
Check if we are currently one level inside a class or struct declaration.
[ "Check", "if", "we", "are", "currently", "one", "level", "inside", "a", "class", "or", "struct", "declaration", "." ]
def InClassDeclaration(self): """Check if we are currently one level inside a class or struct declaration. Returns: True if top of the stack is a class/struct, False otherwise. """ return self.stack and isinstance(self.stack[-1], _ClassInfo)
[ "def", "InClassDeclaration", "(", "self", ")", ":", "return", "self", ".", "stack", "and", "isinstance", "(", "self", ".", "stack", "[", "-", "1", "]", ",", "_ClassInfo", ")" ]
https://github.com/apache/arrow/blob/af33dd1157eb8d7d9bfac25ebf61445b793b7943/cpp/build-support/cpplint.py#L2565-L2571
rdiankov/openrave
d1a23023fd4b58f077d2ca949ceaf1b91f3f13d7
python/examples/qtexampleselector.py
python
OpenRaveServer.__init__
(self,pipe)
Setup the shared memory data structure model and initialize the control parts.
Setup the shared memory data structure model and initialize the control parts.
[ "Setup", "the", "shared", "memory", "data", "structure", "model", "and", "initialize", "the", "control", "parts", "." ]
def __init__(self,pipe): ''' Setup the shared memory data structure model and initialize the control parts. ''' self.pipe = pipe self.running = True self._run()
[ "def", "__init__", "(", "self", ",", "pipe", ")", ":", "self", ".", "pipe", "=", "pipe", "self", ".", "running", "=", "True", "self", ".", "_run", "(", ")" ]
https://github.com/rdiankov/openrave/blob/d1a23023fd4b58f077d2ca949ceaf1b91f3f13d7/python/examples/qtexampleselector.py#L82-L88
stellar-deprecated/stellard
67eabb2217bdfa9a6ea317f62338fb6bca458c90
src/protobuf/python/mox.py
python
IsA.equals
(self, rhs)
Check to see if the RHS is an instance of class_name. Args: # rhs: the right hand side of the test rhs: object Returns: bool
Check to see if the RHS is an instance of class_name.
[ "Check", "to", "see", "if", "the", "RHS", "is", "an", "instance", "of", "class_name", "." ]
def equals(self, rhs): """Check to see if the RHS is an instance of class_name. Args: # rhs: the right hand side of the test rhs: object Returns: bool """ try: return isinstance(rhs, self._class_name) except TypeError: # Check raw types if there was a type error. This is helpful for # things like cStringIO.StringIO. return type(rhs) == type(self._class_name)
[ "def", "equals", "(", "self", ",", "rhs", ")", ":", "try", ":", "return", "isinstance", "(", "rhs", ",", "self", ".", "_class_name", ")", "except", "TypeError", ":", "# Check raw types if there was a type error. This is helpful for", "# things like cStringIO.StringIO.", "return", "type", "(", "rhs", ")", "==", "type", "(", "self", ".", "_class_name", ")" ]
https://github.com/stellar-deprecated/stellard/blob/67eabb2217bdfa9a6ea317f62338fb6bca458c90/src/protobuf/python/mox.py#L807-L823
snap-stanford/snap-python
d53c51b0a26aa7e3e7400b014cdf728948fde80a
snapx/snapx/convert.py
python
to_snapx_graph
(data, create_using=None, multigraph_input=False)
PORTED FROM NETWORKX Make a SnapX graph from a known data structure. The preferred way to call this is automatically from the class constructor >>> d = {0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1) >>> G = nx.Graph(d) instead of the equivalent >>> G = nx.from_dict_of_dicts(d) Parameters ---------- data : object to be converted Current known types are: any NetworkX graph dict-of-dicts dict-of-lists container (ie set, list, tuple, iterator) of edges Pandas DataFrame (row per edge) numpy matrix numpy ndarray scipy sparse matrix pygraphviz agraph create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. multigraph_input : bool (default False) If True and data is a dict_of_dicts, try to create a multigraph assuming dict_of_dict_of_lists. If data and create_using are both multigraphs then create a multigraph from a multigraph.
PORTED FROM NETWORKX Make a SnapX graph from a known data structure. The preferred way to call this is automatically from the class constructor >>> d = {0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1) >>> G = nx.Graph(d) instead of the equivalent >>> G = nx.from_dict_of_dicts(d) Parameters ---------- data : object to be converted Current known types are: any NetworkX graph dict-of-dicts dict-of-lists container (ie set, list, tuple, iterator) of edges Pandas DataFrame (row per edge) numpy matrix numpy ndarray scipy sparse matrix pygraphviz agraph create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. multigraph_input : bool (default False) If True and data is a dict_of_dicts, try to create a multigraph assuming dict_of_dict_of_lists. If data and create_using are both multigraphs then create a multigraph from a multigraph.
[ "PORTED", "FROM", "NETWORKX", "Make", "a", "SnapX", "graph", "from", "a", "known", "data", "structure", ".", "The", "preferred", "way", "to", "call", "this", "is", "automatically", "from", "the", "class", "constructor", ">>>", "d", "=", "{", "0", ":", "{", "1", ":", "{", "weight", ":", "1", "}}}", "#", "dict", "-", "of", "-", "dicts", "single", "edge", "(", "0", "1", ")", ">>>", "G", "=", "nx", ".", "Graph", "(", "d", ")", "instead", "of", "the", "equivalent", ">>>", "G", "=", "nx", ".", "from_dict_of_dicts", "(", "d", ")", "Parameters", "----------", "data", ":", "object", "to", "be", "converted", "Current", "known", "types", "are", ":", "any", "NetworkX", "graph", "dict", "-", "of", "-", "dicts", "dict", "-", "of", "-", "lists", "container", "(", "ie", "set", "list", "tuple", "iterator", ")", "of", "edges", "Pandas", "DataFrame", "(", "row", "per", "edge", ")", "numpy", "matrix", "numpy", "ndarray", "scipy", "sparse", "matrix", "pygraphviz", "agraph", "create_using", ":", "NetworkX", "graph", "constructor", "optional", "(", "default", "=", "nx", ".", "Graph", ")", "Graph", "type", "to", "create", ".", "If", "graph", "instance", "then", "cleared", "before", "populated", ".", "multigraph_input", ":", "bool", "(", "default", "False", ")", "If", "True", "and", "data", "is", "a", "dict_of_dicts", "try", "to", "create", "a", "multigraph", "assuming", "dict_of_dict_of_lists", ".", "If", "data", "and", "create_using", "are", "both", "multigraphs", "then", "create", "a", "multigraph", "from", "a", "multigraph", "." ]
def to_snapx_graph(data, create_using=None, multigraph_input=False): """PORTED FROM NETWORKX Make a SnapX graph from a known data structure. The preferred way to call this is automatically from the class constructor >>> d = {0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1) >>> G = nx.Graph(d) instead of the equivalent >>> G = nx.from_dict_of_dicts(d) Parameters ---------- data : object to be converted Current known types are: any NetworkX graph dict-of-dicts dict-of-lists container (ie set, list, tuple, iterator) of edges Pandas DataFrame (row per edge) numpy matrix numpy ndarray scipy sparse matrix pygraphviz agraph create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. multigraph_input : bool (default False) If True and data is a dict_of_dicts, try to create a multigraph assuming dict_of_dict_of_lists. If data and create_using are both multigraphs then create a multigraph from a multigraph. """ # SX graph if hasattr(data, "adj"): try: result = from_dict_of_dicts( data.adj, create_using=create_using, multigraph_input=data.is_multigraph(), ) if hasattr(data, "graph"): # data.graph should be dict-like result.graph.update(data.graph) if hasattr(data, "nodes"): # data.nodes should be dict-like # result.add_node_from(data.nodes.items()) possible but # for custom node_attr_dict_factory which may be hashable # will be unexpected behavior for n, dd in data.nodes.items(): result._node[n].update(dd) return result except: raise sx.SnapXError("Input is not a correct SnapX graph.") # pygraphviz agraph if hasattr(data, "is_strict"): raise NotImplementedError("TODO") #try: # return nx.nx_agraph.from_agraph(data, create_using=create_using) #except: # raise nx.NetworkXError("Input is not a correct pygraphviz graph.") # dict of dicts/lists if isinstance(data, dict): raise NotImplementedError("TODO") #try: # return from_dict_of_dicts( # data, create_using=create_using, multigraph_input=multigraph_input # ) #except: # try: # return from_dict_of_lists(data, create_using=create_using) # except: # raise TypeError("Input is not known type.") # list or generator of edges if isinstance(data, (list, tuple, set)) or any( hasattr(data, attr) for attr in ["_adjdict", "next", "__next__"] ): raise NotImplementedError("TODO") #try: # return from_edgelist(data, create_using=create_using) #except: # raise nx.NetworkXError("Input is not a valid edge list") # Pandas DataFrame try: import pandas as pd if isinstance(data, pd.DataFrame): raise NotImplementedError("TODO") #if data.shape[0] == data.shape[1]: # try: # return nx.from_pandas_adjacency(data, create_using=create_using) # except: # msg = "Input is not a correct Pandas DataFrame adjacency matrix." # raise nx.NetworkXError(msg) #else: # try: # return nx.from_pandas_edgelist( # data, edge_attr=True, create_using=create_using # ) # except: # msg = "Input is not a correct Pandas DataFrame edge-list." # raise nx.NetworkXError(msg) except ImportError: msg = "pandas not found, skipping conversion test." warnings.warn(msg, ImportWarning) # numpy matrix or ndarray try: import numpy if isinstance(data, (numpy.matrix, numpy.ndarray)): raise NotImplementedError("TODO") #try: # return nx.from_numpy_matrix(data, create_using=create_using) #except: # raise nx.NetworkXError("Input is not a correct numpy matrix or array.") except ImportError: warnings.warn("numpy not found, skipping conversion test.", ImportWarning) # scipy sparse matrix - any format try: import scipy if hasattr(data, "format"): raise NotImplementedError("TODO") #try: # return nx.from_scipy_sparse_matrix(data, create_using=create_using) #except: # raise nx.NetworkXError( # "Input is not a correct scipy sparse matrix type." # ) except ImportError: warnings.warn("scipy not found, skipping conversion test.", ImportWarning) raise sx.SnapXError("Input is not a known data type for conversion.")
[ "def", "to_snapx_graph", "(", "data", ",", "create_using", "=", "None", ",", "multigraph_input", "=", "False", ")", ":", "# SX graph", "if", "hasattr", "(", "data", ",", "\"adj\"", ")", ":", "try", ":", "result", "=", "from_dict_of_dicts", "(", "data", ".", "adj", ",", "create_using", "=", "create_using", ",", "multigraph_input", "=", "data", ".", "is_multigraph", "(", ")", ",", ")", "if", "hasattr", "(", "data", ",", "\"graph\"", ")", ":", "# data.graph should be dict-like", "result", ".", "graph", ".", "update", "(", "data", ".", "graph", ")", "if", "hasattr", "(", "data", ",", "\"nodes\"", ")", ":", "# data.nodes should be dict-like", "# result.add_node_from(data.nodes.items()) possible but", "# for custom node_attr_dict_factory which may be hashable", "# will be unexpected behavior", "for", "n", ",", "dd", "in", "data", ".", "nodes", ".", "items", "(", ")", ":", "result", ".", "_node", "[", "n", "]", ".", "update", "(", "dd", ")", "return", "result", "except", ":", "raise", "sx", ".", "SnapXError", "(", "\"Input is not a correct SnapX graph.\"", ")", "# pygraphviz agraph", "if", "hasattr", "(", "data", ",", "\"is_strict\"", ")", ":", "raise", "NotImplementedError", "(", "\"TODO\"", ")", "#try:", "# return nx.nx_agraph.from_agraph(data, create_using=create_using)", "#except:", "# raise nx.NetworkXError(\"Input is not a correct pygraphviz graph.\")", "# dict of dicts/lists", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "raise", "NotImplementedError", "(", "\"TODO\"", ")", "#try:", "# return from_dict_of_dicts(", "# data, create_using=create_using, multigraph_input=multigraph_input", "# )", "#except:", "# try:", "# return from_dict_of_lists(data, create_using=create_using)", "# except:", "# raise TypeError(\"Input is not known type.\")", "# list or generator of edges", "if", "isinstance", "(", "data", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", "or", "any", "(", "hasattr", "(", "data", ",", "attr", ")", "for", "attr", "in", "[", "\"_adjdict\"", ",", "\"next\"", ",", "\"__next__\"", "]", ")", ":", "raise", "NotImplementedError", "(", "\"TODO\"", ")", "#try:", "# return from_edgelist(data, create_using=create_using)", "#except:", "# raise nx.NetworkXError(\"Input is not a valid edge list\")", "# Pandas DataFrame", "try", ":", "import", "pandas", "as", "pd", "if", "isinstance", "(", "data", ",", "pd", ".", "DataFrame", ")", ":", "raise", "NotImplementedError", "(", "\"TODO\"", ")", "#if data.shape[0] == data.shape[1]:", "# try:", "# return nx.from_pandas_adjacency(data, create_using=create_using)", "# except:", "# msg = \"Input is not a correct Pandas DataFrame adjacency matrix.\"", "# raise nx.NetworkXError(msg)", "#else:", "# try:", "# return nx.from_pandas_edgelist(", "# data, edge_attr=True, create_using=create_using", "# )", "# except:", "# msg = \"Input is not a correct Pandas DataFrame edge-list.\"", "# raise nx.NetworkXError(msg)", "except", "ImportError", ":", "msg", "=", "\"pandas not found, skipping conversion test.\"", "warnings", ".", "warn", "(", "msg", ",", "ImportWarning", ")", "# numpy matrix or ndarray", "try", ":", "import", "numpy", "if", "isinstance", "(", "data", ",", "(", "numpy", ".", "matrix", ",", "numpy", ".", "ndarray", ")", ")", ":", "raise", "NotImplementedError", "(", "\"TODO\"", ")", "#try:", "# return nx.from_numpy_matrix(data, create_using=create_using)", "#except:", "# raise nx.NetworkXError(\"Input is not a correct numpy matrix or array.\")", "except", "ImportError", ":", "warnings", ".", "warn", "(", "\"numpy not found, skipping conversion test.\"", ",", "ImportWarning", ")", "# scipy sparse matrix - any format", "try", ":", "import", "scipy", "if", "hasattr", "(", "data", ",", "\"format\"", ")", ":", "raise", "NotImplementedError", "(", "\"TODO\"", ")", "#try:", "# return nx.from_scipy_sparse_matrix(data, create_using=create_using)", "#except:", "# raise nx.NetworkXError(", "# \"Input is not a correct scipy sparse matrix type.\"", "# )", "except", "ImportError", ":", "warnings", ".", "warn", "(", "\"scipy not found, skipping conversion test.\"", ",", "ImportWarning", ")", "raise", "sx", ".", "SnapXError", "(", "\"Input is not a known data type for conversion.\"", ")" ]
https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/snapx/snapx/convert.py#L4-L138
cmu-sei/pharos
af54b6ada58d50c046fa899452addce80e9ce8da
tools/ooanalyzer/ida/OOAnalyzer.py
python
PyOOAnalyzerExpForm.__load_from_json_file
(self, json_file)
return res_str, num_classes, num_vcalls, num_usages
Parse the JSON file
Parse the JSON file
[ "Parse", "the", "JSON", "file" ]
def __load_from_json_file(self, json_file): ''' Parse the JSON file ''' if json_file != None: if self.__ooanalyzer.set_json_file(json_file): print("Opened JSON file %s" % json_file) else: ida_kernwin.warning("Could not open %s" % json_file) return None, 0, 0, 0 if self.__ooanalyzer.is_parsed() == False: # not parsed yet result, msg = self.__ooanalyzer.parse() print("Parsed %s %s" % (result, msg)) if result == False: ida_kernwin.warning("Could not parse JSON: %s" % msg) return None, 0, 0, 0 parse_results = self.__ooanalyzer.get_parse_results() if parse_results == None: return None, 0, 0, 0 num_classes = 0 num_vcalls = 0 num_usages = 0 if "NumClasses" in parse_results: num_classes = parse_results["NumClasses"] if "NumVcalls" in parse_results: num_vcalls = parse_results["NumVcalls"] if "NumUsages" in parse_results: num_usages = parse_results["NumUsages"] res_str = """ Successfully parsed JSON file: "%s". The following C++ constructs are ready to apply: * %s class structures * %s object usages * %s virtual function calls Press \"Yes\" to apply these items to the IDB. Press no to apply them manually (Note that virtual function calls will be resolved automatically) """ % (self.__ooanalyzer.get_json_filename(), num_classes, num_usages, num_vcalls) return res_str, num_classes, num_vcalls, num_usages
[ "def", "__load_from_json_file", "(", "self", ",", "json_file", ")", ":", "if", "json_file", "!=", "None", ":", "if", "self", ".", "__ooanalyzer", ".", "set_json_file", "(", "json_file", ")", ":", "print", "(", "\"Opened JSON file %s\"", "%", "json_file", ")", "else", ":", "ida_kernwin", ".", "warning", "(", "\"Could not open %s\"", "%", "json_file", ")", "return", "None", ",", "0", ",", "0", ",", "0", "if", "self", ".", "__ooanalyzer", ".", "is_parsed", "(", ")", "==", "False", ":", "# not parsed yet", "result", ",", "msg", "=", "self", ".", "__ooanalyzer", ".", "parse", "(", ")", "print", "(", "\"Parsed %s %s\"", "%", "(", "result", ",", "msg", ")", ")", "if", "result", "==", "False", ":", "ida_kernwin", ".", "warning", "(", "\"Could not parse JSON: %s\"", "%", "msg", ")", "return", "None", ",", "0", ",", "0", ",", "0", "parse_results", "=", "self", ".", "__ooanalyzer", ".", "get_parse_results", "(", ")", "if", "parse_results", "==", "None", ":", "return", "None", ",", "0", ",", "0", ",", "0", "num_classes", "=", "0", "num_vcalls", "=", "0", "num_usages", "=", "0", "if", "\"NumClasses\"", "in", "parse_results", ":", "num_classes", "=", "parse_results", "[", "\"NumClasses\"", "]", "if", "\"NumVcalls\"", "in", "parse_results", ":", "num_vcalls", "=", "parse_results", "[", "\"NumVcalls\"", "]", "if", "\"NumUsages\"", "in", "parse_results", ":", "num_usages", "=", "parse_results", "[", "\"NumUsages\"", "]", "res_str", "=", "\"\"\"\nSuccessfully parsed JSON file: \"%s\".\n\nThe following C++ constructs are ready to apply:\n\n * %s class structures\n * %s object usages\n * %s virtual function calls\n\nPress \\\"Yes\\\" to apply these items to the IDB. Press no to apply them manually (Note that virtual function calls will be resolved automatically)\n\"\"\"", "%", "(", "self", ".", "__ooanalyzer", ".", "get_json_filename", "(", ")", ",", "num_classes", ",", "num_usages", ",", "num_vcalls", ")", "return", "res_str", ",", "num_classes", ",", "num_vcalls", ",", "num_usages" ]
https://github.com/cmu-sei/pharos/blob/af54b6ada58d50c046fa899452addce80e9ce8da/tools/ooanalyzer/ida/OOAnalyzer.py#L2175-L2221
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/resample.py
python
Resampler.nearest
(self, limit=None)
return self._upsample('nearest', limit=limit)
Resample by using the nearest value. When resampling data, missing values may appear (e.g., when the resampling frequency is higher than the original frequency). The `nearest` method will replace ``NaN`` values that appeared in the resampled data with the value from the nearest member of the sequence, based on the index value. Missing values that existed in the original data will not be modified. If `limit` is given, fill only this many values in each direction for each of the original values. Parameters ---------- limit : int, optional Limit of how many values to fill. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame An upsampled Series or DataFrame with ``NaN`` values filled with their nearest value. See Also -------- backfill : Backward fill the new missing values in the resampled data. pad : Forward fill ``NaN`` values. Examples -------- >>> s = pd.Series([1, 2], ... index=pd.date_range('20180101', ... periods=2, ... freq='1h')) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 Freq: H, dtype: int64 >>> s.resample('15min').nearest() 2018-01-01 00:00:00 1 2018-01-01 00:15:00 1 2018-01-01 00:30:00 2 2018-01-01 00:45:00 2 2018-01-01 01:00:00 2 Freq: 15T, dtype: int64 Limit the number of upsampled values imputed by the nearest: >>> s.resample('15min').nearest(limit=1) 2018-01-01 00:00:00 1.0 2018-01-01 00:15:00 1.0 2018-01-01 00:30:00 NaN 2018-01-01 00:45:00 2.0 2018-01-01 01:00:00 2.0 Freq: 15T, dtype: float64
Resample by using the nearest value.
[ "Resample", "by", "using", "the", "nearest", "value", "." ]
def nearest(self, limit=None): """ Resample by using the nearest value. When resampling data, missing values may appear (e.g., when the resampling frequency is higher than the original frequency). The `nearest` method will replace ``NaN`` values that appeared in the resampled data with the value from the nearest member of the sequence, based on the index value. Missing values that existed in the original data will not be modified. If `limit` is given, fill only this many values in each direction for each of the original values. Parameters ---------- limit : int, optional Limit of how many values to fill. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame An upsampled Series or DataFrame with ``NaN`` values filled with their nearest value. See Also -------- backfill : Backward fill the new missing values in the resampled data. pad : Forward fill ``NaN`` values. Examples -------- >>> s = pd.Series([1, 2], ... index=pd.date_range('20180101', ... periods=2, ... freq='1h')) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 Freq: H, dtype: int64 >>> s.resample('15min').nearest() 2018-01-01 00:00:00 1 2018-01-01 00:15:00 1 2018-01-01 00:30:00 2 2018-01-01 00:45:00 2 2018-01-01 01:00:00 2 Freq: 15T, dtype: int64 Limit the number of upsampled values imputed by the nearest: >>> s.resample('15min').nearest(limit=1) 2018-01-01 00:00:00 1.0 2018-01-01 00:15:00 1.0 2018-01-01 00:30:00 NaN 2018-01-01 00:45:00 2.0 2018-01-01 01:00:00 2.0 Freq: 15T, dtype: float64 """ return self._upsample('nearest', limit=limit)
[ "def", "nearest", "(", "self", ",", "limit", "=", "None", ")", ":", "return", "self", ".", "_upsample", "(", "'nearest'", ",", "limit", "=", "limit", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/resample.py#L436-L496
borglab/gtsam
a5bee157efce6a0563704bce6a5d188c29817f39
wrap/gtwrap/matlab_wrapper/mixins.py
python
FormatMixin._format_global_function
(self, function: Union[parser.GlobalFunction, Any], separator: str = '')
return method[2 * len(separator):]
Example: gtsamPoint3.staticFunction
Example:
[ "Example", ":" ]
def _format_global_function(self, function: Union[parser.GlobalFunction, Any], separator: str = ''): """Example: gtsamPoint3.staticFunction """ method = '' if isinstance(function, parser.GlobalFunction): method += "".join([separator + x for x in function.parent.full_namespaces()]) + \ separator return method[2 * len(separator):]
[ "def", "_format_global_function", "(", "self", ",", "function", ":", "Union", "[", "parser", ".", "GlobalFunction", ",", "Any", "]", ",", "separator", ":", "str", "=", "''", ")", ":", "method", "=", "''", "if", "isinstance", "(", "function", ",", "parser", ".", "GlobalFunction", ")", ":", "method", "+=", "\"\"", ".", "join", "(", "[", "separator", "+", "x", "for", "x", "in", "function", ".", "parent", ".", "full_namespaces", "(", ")", "]", ")", "+", "separator", "return", "method", "[", "2", "*", "len", "(", "separator", ")", ":", "]" ]
https://github.com/borglab/gtsam/blob/a5bee157efce6a0563704bce6a5d188c29817f39/wrap/gtwrap/matlab_wrapper/mixins.py#L204-L217
domino-team/openwrt-cc
8b181297c34d14d3ca521cc9f31430d561dbc688
package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/v8/tools/grokdump.py
python
InspectionShell.do_u
(self, args)
Unassemble memory in the region [address, address + size). If the size is not specified, a default value of 32 bytes is used. Synopsis: u 0x<address> 0x<size>
Unassemble memory in the region [address, address + size). If the size is not specified, a default value of 32 bytes is used. Synopsis: u 0x<address> 0x<size>
[ "Unassemble", "memory", "in", "the", "region", "[", "address", "address", "+", "size", ")", ".", "If", "the", "size", "is", "not", "specified", "a", "default", "value", "of", "32", "bytes", "is", "used", ".", "Synopsis", ":", "u", "0x<address", ">", "0x<size", ">" ]
def do_u(self, args): """ Unassemble memory in the region [address, address + size). If the size is not specified, a default value of 32 bytes is used. Synopsis: u 0x<address> 0x<size> """ args = args.split(' ') start = int(args[0], 16) size = int(args[1], 16) if len(args) > 1 else 0x20 if not self.reader.IsValidAddress(start): print "Address is not contained within the minidump!" return lines = self.reader.GetDisasmLines(start, size) for line in lines: print FormatDisasmLine(start, self.heap, line) print
[ "def", "do_u", "(", "self", ",", "args", ")", ":", "args", "=", "args", ".", "split", "(", "' '", ")", "start", "=", "int", "(", "args", "[", "0", "]", ",", "16", ")", "size", "=", "int", "(", "args", "[", "1", "]", ",", "16", ")", "if", "len", "(", "args", ")", ">", "1", "else", "0x20", "if", "not", "self", ".", "reader", ".", "IsValidAddress", "(", "start", ")", ":", "print", "\"Address is not contained within the minidump!\"", "return", "lines", "=", "self", ".", "reader", ".", "GetDisasmLines", "(", "start", ",", "size", ")", "for", "line", "in", "lines", ":", "print", "FormatDisasmLine", "(", "start", ",", "self", ".", "heap", ",", "line", ")", "print" ]
https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/v8/tools/grokdump.py#L3068-L3083
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/indexes/interval.py
python
interval_range
( start=None, end=None, periods=None, freq=None, name=None, closed="right" )
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
Return a fixed frequency IntervalIndex. Parameters ---------- start : numeric or datetime-like, default None Left bound for generating intervals. end : numeric or datetime-like, default None Right bound for generating intervals. periods : int, default None Number of periods to generate. freq : numeric, str, or DateOffset, default None The length of each interval. Must be consistent with the type of start and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1 for numeric and 'D' for datetime-like. name : str, default None Name of the resulting IntervalIndex. closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- IntervalIndex See Also -------- IntervalIndex : An Index of intervals that are all closed on the same side. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``IntervalIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end``, inclusively. To learn more about datetime-like frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Numeric ``start`` and ``end`` is supported. >>> pd.interval_range(start=0, end=5) IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], closed='right', dtype='interval[int64]') Additionally, datetime-like input is also supported. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... end=pd.Timestamp('2017-01-04')) IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03], (2017-01-03, 2017-01-04]], closed='right', dtype='interval[datetime64[ns]]') The ``freq`` parameter specifies the frequency between the left and right. endpoints of the individual intervals within the ``IntervalIndex``. For numeric ``start`` and ``end``, the frequency must also be numeric. >>> pd.interval_range(start=0, periods=4, freq=1.5) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], closed='right', dtype='interval[float64]') Similarly, for datetime-like ``start`` and ``end``, the frequency must be convertible to a DateOffset. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... periods=3, freq='MS') IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01], (2017-03-01, 2017-04-01]], closed='right', dtype='interval[datetime64[ns]]') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.interval_range(start=0, end=6, periods=4) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], closed='right', dtype='interval[float64]') The ``closed`` parameter specifies which endpoints of the individual intervals within the ``IntervalIndex`` are closed. >>> pd.interval_range(end=5, periods=4, closed='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], closed='both', dtype='interval[int64]')
Return a fixed frequency IntervalIndex.
[ "Return", "a", "fixed", "frequency", "IntervalIndex", "." ]
def interval_range( start=None, end=None, periods=None, freq=None, name=None, closed="right" ): """ Return a fixed frequency IntervalIndex. Parameters ---------- start : numeric or datetime-like, default None Left bound for generating intervals. end : numeric or datetime-like, default None Right bound for generating intervals. periods : int, default None Number of periods to generate. freq : numeric, str, or DateOffset, default None The length of each interval. Must be consistent with the type of start and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1 for numeric and 'D' for datetime-like. name : str, default None Name of the resulting IntervalIndex. closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- IntervalIndex See Also -------- IntervalIndex : An Index of intervals that are all closed on the same side. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``IntervalIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end``, inclusively. To learn more about datetime-like frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Numeric ``start`` and ``end`` is supported. >>> pd.interval_range(start=0, end=5) IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], closed='right', dtype='interval[int64]') Additionally, datetime-like input is also supported. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... end=pd.Timestamp('2017-01-04')) IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03], (2017-01-03, 2017-01-04]], closed='right', dtype='interval[datetime64[ns]]') The ``freq`` parameter specifies the frequency between the left and right. endpoints of the individual intervals within the ``IntervalIndex``. For numeric ``start`` and ``end``, the frequency must also be numeric. >>> pd.interval_range(start=0, periods=4, freq=1.5) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], closed='right', dtype='interval[float64]') Similarly, for datetime-like ``start`` and ``end``, the frequency must be convertible to a DateOffset. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... periods=3, freq='MS') IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01], (2017-03-01, 2017-04-01]], closed='right', dtype='interval[datetime64[ns]]') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.interval_range(start=0, end=6, periods=4) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], closed='right', dtype='interval[float64]') The ``closed`` parameter specifies which endpoints of the individual intervals within the ``IntervalIndex`` are closed. >>> pd.interval_range(end=5, periods=4, closed='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], closed='both', dtype='interval[int64]') """ start = com.maybe_box_datetimelike(start) end = com.maybe_box_datetimelike(end) endpoint = start if start is not None else end if freq is None and com.any_none(periods, start, end): freq = 1 if is_number(endpoint) else "D" if com.count_not_none(start, end, periods, freq) != 3: raise ValueError( "Of the four parameters: start, end, periods, and " "freq, exactly three must be specified" ) if not _is_valid_endpoint(start): raise ValueError(f"start must be numeric or datetime-like, got {start}") elif not _is_valid_endpoint(end): raise ValueError(f"end must be numeric or datetime-like, got {end}") if is_float(periods): periods = int(periods) elif not is_integer(periods) and periods is not None: raise TypeError(f"periods must be a number, got {periods}") if freq is not None and not is_number(freq): try: freq = to_offset(freq) except ValueError: raise ValueError( f"freq must be numeric or convertible to DateOffset, got {freq}" ) # verify type compatibility if not all( [ _is_type_compatible(start, end), _is_type_compatible(start, freq), _is_type_compatible(end, freq), ] ): raise TypeError("start, end, freq need to be type compatible") # +1 to convert interval count to breaks count (n breaks = n-1 intervals) if periods is not None: periods += 1 if is_number(endpoint): # force consistency between start/end/freq (lower end if freq skips it) if com.all_not_none(start, end, freq): end -= (end - start) % freq # compute the period/start/end if unspecified (at most one) if periods is None: periods = int((end - start) // freq) + 1 elif start is None: start = end - (periods - 1) * freq elif end is None: end = start + (periods - 1) * freq breaks = np.linspace(start, end, periods) if all(is_integer(x) for x in com.not_none(start, end, freq)): # np.linspace always produces float output breaks = maybe_downcast_to_dtype(breaks, "int64") else: # delegate to the appropriate range function if isinstance(endpoint, Timestamp): range_func = date_range else: range_func = timedelta_range breaks = range_func(start=start, end=end, periods=periods, freq=freq) return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
[ "def", "interval_range", "(", "start", "=", "None", ",", "end", "=", "None", ",", "periods", "=", "None", ",", "freq", "=", "None", ",", "name", "=", "None", ",", "closed", "=", "\"right\"", ")", ":", "start", "=", "com", ".", "maybe_box_datetimelike", "(", "start", ")", "end", "=", "com", ".", "maybe_box_datetimelike", "(", "end", ")", "endpoint", "=", "start", "if", "start", "is", "not", "None", "else", "end", "if", "freq", "is", "None", "and", "com", ".", "any_none", "(", "periods", ",", "start", ",", "end", ")", ":", "freq", "=", "1", "if", "is_number", "(", "endpoint", ")", "else", "\"D\"", "if", "com", ".", "count_not_none", "(", "start", ",", "end", ",", "periods", ",", "freq", ")", "!=", "3", ":", "raise", "ValueError", "(", "\"Of the four parameters: start, end, periods, and \"", "\"freq, exactly three must be specified\"", ")", "if", "not", "_is_valid_endpoint", "(", "start", ")", ":", "raise", "ValueError", "(", "f\"start must be numeric or datetime-like, got {start}\"", ")", "elif", "not", "_is_valid_endpoint", "(", "end", ")", ":", "raise", "ValueError", "(", "f\"end must be numeric or datetime-like, got {end}\"", ")", "if", "is_float", "(", "periods", ")", ":", "periods", "=", "int", "(", "periods", ")", "elif", "not", "is_integer", "(", "periods", ")", "and", "periods", "is", "not", "None", ":", "raise", "TypeError", "(", "f\"periods must be a number, got {periods}\"", ")", "if", "freq", "is", "not", "None", "and", "not", "is_number", "(", "freq", ")", ":", "try", ":", "freq", "=", "to_offset", "(", "freq", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "f\"freq must be numeric or convertible to DateOffset, got {freq}\"", ")", "# verify type compatibility", "if", "not", "all", "(", "[", "_is_type_compatible", "(", "start", ",", "end", ")", ",", "_is_type_compatible", "(", "start", ",", "freq", ")", ",", "_is_type_compatible", "(", "end", ",", "freq", ")", ",", "]", ")", ":", "raise", "TypeError", "(", "\"start, end, freq need to be type compatible\"", ")", "# +1 to convert interval count to breaks count (n breaks = n-1 intervals)", "if", "periods", "is", "not", "None", ":", "periods", "+=", "1", "if", "is_number", "(", "endpoint", ")", ":", "# force consistency between start/end/freq (lower end if freq skips it)", "if", "com", ".", "all_not_none", "(", "start", ",", "end", ",", "freq", ")", ":", "end", "-=", "(", "end", "-", "start", ")", "%", "freq", "# compute the period/start/end if unspecified (at most one)", "if", "periods", "is", "None", ":", "periods", "=", "int", "(", "(", "end", "-", "start", ")", "//", "freq", ")", "+", "1", "elif", "start", "is", "None", ":", "start", "=", "end", "-", "(", "periods", "-", "1", ")", "*", "freq", "elif", "end", "is", "None", ":", "end", "=", "start", "+", "(", "periods", "-", "1", ")", "*", "freq", "breaks", "=", "np", ".", "linspace", "(", "start", ",", "end", ",", "periods", ")", "if", "all", "(", "is_integer", "(", "x", ")", "for", "x", "in", "com", ".", "not_none", "(", "start", ",", "end", ",", "freq", ")", ")", ":", "# np.linspace always produces float output", "breaks", "=", "maybe_downcast_to_dtype", "(", "breaks", ",", "\"int64\"", ")", "else", ":", "# delegate to the appropriate range function", "if", "isinstance", "(", "endpoint", ",", "Timestamp", ")", ":", "range_func", "=", "date_range", "else", ":", "range_func", "=", "timedelta_range", "breaks", "=", "range_func", "(", "start", "=", "start", ",", "end", "=", "end", ",", "periods", "=", "periods", ",", "freq", "=", "freq", ")", "return", "IntervalIndex", ".", "from_breaks", "(", "breaks", ",", "name", "=", "name", ",", "closed", "=", "closed", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/indexes/interval.py#L1222-L1383
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/protobuf/python/google/protobuf/internal/well_known_types.py
python
Timestamp.FromMilliseconds
(self, millis)
Converts milliseconds since epoch to Timestamp.
Converts milliseconds since epoch to Timestamp.
[ "Converts", "milliseconds", "since", "epoch", "to", "Timestamp", "." ]
def FromMilliseconds(self, millis): """Converts milliseconds since epoch to Timestamp.""" self.seconds = millis // _MILLIS_PER_SECOND self.nanos = (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND
[ "def", "FromMilliseconds", "(", "self", ",", "millis", ")", ":", "self", ".", "seconds", "=", "millis", "//", "_MILLIS_PER_SECOND", "self", ".", "nanos", "=", "(", "millis", "%", "_MILLIS_PER_SECOND", ")", "*", "_NANOS_PER_MILLISECOND" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/protobuf/python/google/protobuf/internal/well_known_types.py#L216-L219
CaoWGG/TensorRT-YOLOv4
4d7c2edce99e8794a4cb4ea3540d51ce91158a36
tools/yolo_to_onnx.py
python
WeightLoader._load_one_param_type
(self, conv_params, param_category, suffix)
return param_name, param_data, param_shape
Deserializes the weights from a file stream in the DarkNet order. Keyword arguments: conv_params -- a ConvParams object param_category -- the category of parameters to be created ('bn' or 'conv') suffix -- a string determining the sub-type of above param_category (e.g., 'weights' or 'bias')
Deserializes the weights from a file stream in the DarkNet order.
[ "Deserializes", "the", "weights", "from", "a", "file", "stream", "in", "the", "DarkNet", "order", "." ]
def _load_one_param_type(self, conv_params, param_category, suffix): """Deserializes the weights from a file stream in the DarkNet order. Keyword arguments: conv_params -- a ConvParams object param_category -- the category of parameters to be created ('bn' or 'conv') suffix -- a string determining the sub-type of above param_category (e.g., 'weights' or 'bias') """ param_name = conv_params.generate_param_name(param_category, suffix) channels_out, channels_in, filter_h, filter_w = conv_params.conv_weight_dims if param_category == 'bn': param_shape = [channels_out] elif param_category == 'conv': if suffix == 'weights': param_shape = [channels_out, channels_in, filter_h, filter_w] elif suffix == 'bias': param_shape = [channels_out] param_size = np.product(np.array(param_shape)) param_data = np.ndarray( shape=param_shape, dtype='float32', buffer=self.weights_file.read(param_size * 4)) param_data = param_data.flatten().astype(float) return param_name, param_data, param_shape
[ "def", "_load_one_param_type", "(", "self", ",", "conv_params", ",", "param_category", ",", "suffix", ")", ":", "param_name", "=", "conv_params", ".", "generate_param_name", "(", "param_category", ",", "suffix", ")", "channels_out", ",", "channels_in", ",", "filter_h", ",", "filter_w", "=", "conv_params", ".", "conv_weight_dims", "if", "param_category", "==", "'bn'", ":", "param_shape", "=", "[", "channels_out", "]", "elif", "param_category", "==", "'conv'", ":", "if", "suffix", "==", "'weights'", ":", "param_shape", "=", "[", "channels_out", ",", "channels_in", ",", "filter_h", ",", "filter_w", "]", "elif", "suffix", "==", "'bias'", ":", "param_shape", "=", "[", "channels_out", "]", "param_size", "=", "np", ".", "product", "(", "np", ".", "array", "(", "param_shape", ")", ")", "param_data", "=", "np", ".", "ndarray", "(", "shape", "=", "param_shape", ",", "dtype", "=", "'float32'", ",", "buffer", "=", "self", ".", "weights_file", ".", "read", "(", "param_size", "*", "4", ")", ")", "param_data", "=", "param_data", ".", "flatten", "(", ")", ".", "astype", "(", "float", ")", "return", "param_name", ",", "param_data", ",", "param_shape" ]
https://github.com/CaoWGG/TensorRT-YOLOv4/blob/4d7c2edce99e8794a4cb4ea3540d51ce91158a36/tools/yolo_to_onnx.py#L256-L280
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/threading.py
python
Thread.start
(self)
Start the thread's activity. It must be called at most once per thread object. It arranges for the object's run() method to be invoked in a separate thread of control. This method will raise a RuntimeError if called more than once on the same thread object.
Start the thread's activity.
[ "Start", "the", "thread", "s", "activity", "." ]
def start(self): """Start the thread's activity. It must be called at most once per thread object. It arranges for the object's run() method to be invoked in a separate thread of control. This method will raise a RuntimeError if called more than once on the same thread object. """ if not self.__initialized: raise RuntimeError("thread.__init__() not called") if self.__started.is_set(): raise RuntimeError("threads can only be started once") if __debug__: self._note("%s.start(): starting thread", self) with _active_limbo_lock: _limbo[self] = self try: _start_new_thread(self.__bootstrap, ()) except Exception: with _active_limbo_lock: del _limbo[self] raise self.__started.wait()
[ "def", "start", "(", "self", ")", ":", "if", "not", "self", ".", "__initialized", ":", "raise", "RuntimeError", "(", "\"thread.__init__() not called\"", ")", "if", "self", ".", "__started", ".", "is_set", "(", ")", ":", "raise", "RuntimeError", "(", "\"threads can only be started once\"", ")", "if", "__debug__", ":", "self", ".", "_note", "(", "\"%s.start(): starting thread\"", ",", "self", ")", "with", "_active_limbo_lock", ":", "_limbo", "[", "self", "]", "=", "self", "try", ":", "_start_new_thread", "(", "self", ".", "__bootstrap", ",", "(", ")", ")", "except", "Exception", ":", "with", "_active_limbo_lock", ":", "del", "_limbo", "[", "self", "]", "raise", "self", ".", "__started", ".", "wait", "(", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/threading.py#L724-L748
NVIDIA/DALI
bf16cc86ba8f091b145f91962f21fe1b6aff243d
dali/python/nvidia/dali/math.py
python
sin
(input)
return _arithm_op("sin", input)
Computes sine of values in ``input``. :rtype: TensorList of sin(input). If input is an integer, the result will be float, otherwise the type is preserved.
Computes sine of values in ``input``.
[ "Computes", "sine", "of", "values", "in", "input", "." ]
def sin(input): """Computes sine of values in ``input``. :rtype: TensorList of sin(input). If input is an integer, the result will be float, otherwise the type is preserved. """ return _arithm_op("sin", input)
[ "def", "sin", "(", "input", ")", ":", "return", "_arithm_op", "(", "\"sin\"", ",", "input", ")" ]
https://github.com/NVIDIA/DALI/blob/bf16cc86ba8f091b145f91962f21fe1b6aff243d/dali/python/nvidia/dali/math.py#L112-L118
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pydecimal.py
python
Decimal.to_eng_string
(self, context=None)
return self.__str__(eng=True, context=context)
Convert to a string, using engineering notation if an exponent is needed. Engineering notation has an exponent which is a multiple of 3. This can leave up to 3 digits to the left of the decimal place and may require the addition of either one or two trailing zeros.
Convert to a string, using engineering notation if an exponent is needed.
[ "Convert", "to", "a", "string", "using", "engineering", "notation", "if", "an", "exponent", "is", "needed", "." ]
def to_eng_string(self, context=None): """Convert to a string, using engineering notation if an exponent is needed. Engineering notation has an exponent which is a multiple of 3. This can leave up to 3 digits to the left of the decimal place and may require the addition of either one or two trailing zeros. """ return self.__str__(eng=True, context=context)
[ "def", "to_eng_string", "(", "self", ",", "context", "=", "None", ")", ":", "return", "self", ".", "__str__", "(", "eng", "=", "True", ",", "context", "=", "context", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pydecimal.py#L1083-L1090
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/calltip_w.py
python
CalltipWindow.__init__
(self, text_widget)
Create a call-tip; shown by showtip(). text_widget: a Text widget with code for which call-tips are desired
Create a call-tip; shown by showtip().
[ "Create", "a", "call", "-", "tip", ";", "shown", "by", "showtip", "()", "." ]
def __init__(self, text_widget): """Create a call-tip; shown by showtip(). text_widget: a Text widget with code for which call-tips are desired """ # Note: The Text widget will be accessible as self.anchor_widget super(CalltipWindow, self).__init__(text_widget) self.label = self.text = None self.parenline = self.parencol = self.lastline = None self.hideid = self.checkhideid = None self.checkhide_after_id = None
[ "def", "__init__", "(", "self", ",", "text_widget", ")", ":", "# Note: The Text widget will be accessible as self.anchor_widget", "super", "(", "CalltipWindow", ",", "self", ")", ".", "__init__", "(", "text_widget", ")", "self", ".", "label", "=", "self", ".", "text", "=", "None", "self", ".", "parenline", "=", "self", ".", "parencol", "=", "self", ".", "lastline", "=", "None", "self", ".", "hideid", "=", "self", ".", "checkhideid", "=", "None", "self", ".", "checkhide_after_id", "=", "None" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/calltip_w.py#L22-L33
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/calendar.py
python
GenericCalendarCtrl.Create
(*args, **kwargs)
return _calendar.GenericCalendarCtrl_Create(*args, **kwargs)
Create(self, Window parent, int id, DateTime date=DefaultDateTime, Point pos=DefaultPosition, Size size=DefaultSize, long style=wxCAL_SHOW_HOLIDAYS|wxWANTS_CHARS, String name=CalendarNameStr) -> bool Acutally create the GUI portion of the CalendarCtrl for 2-phase creation.
Create(self, Window parent, int id, DateTime date=DefaultDateTime, Point pos=DefaultPosition, Size size=DefaultSize, long style=wxCAL_SHOW_HOLIDAYS|wxWANTS_CHARS, String name=CalendarNameStr) -> bool
[ "Create", "(", "self", "Window", "parent", "int", "id", "DateTime", "date", "=", "DefaultDateTime", "Point", "pos", "=", "DefaultPosition", "Size", "size", "=", "DefaultSize", "long", "style", "=", "wxCAL_SHOW_HOLIDAYS|wxWANTS_CHARS", "String", "name", "=", "CalendarNameStr", ")", "-", ">", "bool" ]
def Create(*args, **kwargs): """ Create(self, Window parent, int id, DateTime date=DefaultDateTime, Point pos=DefaultPosition, Size size=DefaultSize, long style=wxCAL_SHOW_HOLIDAYS|wxWANTS_CHARS, String name=CalendarNameStr) -> bool Acutally create the GUI portion of the CalendarCtrl for 2-phase creation. """ return _calendar.GenericCalendarCtrl_Create(*args, **kwargs)
[ "def", "Create", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_calendar", ".", "GenericCalendarCtrl_Create", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/calendar.py#L503-L513
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_windows.py
python
Printout.HasPage
(*args, **kwargs)
return _windows_.Printout_HasPage(*args, **kwargs)
HasPage(self, int page) -> bool
HasPage(self, int page) -> bool
[ "HasPage", "(", "self", "int", "page", ")", "-", ">", "bool" ]
def HasPage(*args, **kwargs): """HasPage(self, int page) -> bool""" return _windows_.Printout_HasPage(*args, **kwargs)
[ "def", "HasPage", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "Printout_HasPage", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_windows.py#L5403-L5405
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/bincount_ops.py
python
validate_dense_weights
(values, weights, dtype=None)
return weights
Validates the passed weight tensor or creates an empty one.
Validates the passed weight tensor or creates an empty one.
[ "Validates", "the", "passed", "weight", "tensor", "or", "creates", "an", "empty", "one", "." ]
def validate_dense_weights(values, weights, dtype=None): """Validates the passed weight tensor or creates an empty one.""" if weights is None: if dtype: return array_ops.constant([], dtype=dtype) return array_ops.constant([], dtype=values.dtype) if not isinstance(weights, ops.Tensor): raise ValueError( "Argument `weights` must be a tf.Tensor if `values` is a tf.Tensor. " f"Received weights={weights} of type: {type(weights).__name__}") return weights
[ "def", "validate_dense_weights", "(", "values", ",", "weights", ",", "dtype", "=", "None", ")", ":", "if", "weights", "is", "None", ":", "if", "dtype", ":", "return", "array_ops", ".", "constant", "(", "[", "]", ",", "dtype", "=", "dtype", ")", "return", "array_ops", ".", "constant", "(", "[", "]", ",", "dtype", "=", "values", ".", "dtype", ")", "if", "not", "isinstance", "(", "weights", ",", "ops", ".", "Tensor", ")", ":", "raise", "ValueError", "(", "\"Argument `weights` must be a tf.Tensor if `values` is a tf.Tensor. \"", "f\"Received weights={weights} of type: {type(weights).__name__}\"", ")", "return", "weights" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/bincount_ops.py#L452-L464
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/scipy/stats/_multivariate.py
python
_pinv_1d
(v, eps=1e-5)
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
A helper function for computing the pseudoinverse. Parameters ---------- v : iterable of numbers This may be thought of as a vector of eigenvalues or singular values. eps : float Values with magnitude no greater than eps are considered negligible. Returns ------- v_pinv : 1d float ndarray A vector of pseudo-inverted numbers.
A helper function for computing the pseudoinverse.
[ "A", "helper", "function", "for", "computing", "the", "pseudoinverse", "." ]
def _pinv_1d(v, eps=1e-5): """ A helper function for computing the pseudoinverse. Parameters ---------- v : iterable of numbers This may be thought of as a vector of eigenvalues or singular values. eps : float Values with magnitude no greater than eps are considered negligible. Returns ------- v_pinv : 1d float ndarray A vector of pseudo-inverted numbers. """ return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
[ "def", "_pinv_1d", "(", "v", ",", "eps", "=", "1e-5", ")", ":", "return", "np", ".", "array", "(", "[", "0", "if", "abs", "(", "x", ")", "<=", "eps", "else", "1", "/", "x", "for", "x", "in", "v", "]", ",", "dtype", "=", "float", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/stats/_multivariate.py#L83-L100
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/generic.py
python
NDFrame._get_cacher
(self)
return cacher
return my cacher or None
return my cacher or None
[ "return", "my", "cacher", "or", "None" ]
def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher
[ "def", "_get_cacher", "(", "self", ")", ":", "cacher", "=", "getattr", "(", "self", ",", "\"_cacher\"", ",", "None", ")", "if", "cacher", "is", "not", "None", ":", "cacher", "=", "cacher", "[", "1", "]", "(", ")", "return", "cacher" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/generic.py#L3248-L3253
cvxpy/cvxpy
5165b4fb750dfd237de8659383ef24b4b2e33aaf
cvxpy/reductions/solvers/conic_solvers/scs_conif.py
python
SCS.name
(self)
return s.SCS
The name of the solver.
The name of the solver.
[ "The", "name", "of", "the", "solver", "." ]
def name(self): """The name of the solver. """ return s.SCS
[ "def", "name", "(", "self", ")", ":", "return", "s", ".", "SCS" ]
https://github.com/cvxpy/cvxpy/blob/5165b4fb750dfd237de8659383ef24b4b2e33aaf/cvxpy/reductions/solvers/conic_solvers/scs_conif.py#L136-L139
NREL/EnergyPlus
fadc5973b85c70e8cc923efb69c144e808a26078
src/EnergyPlus/api/datatransfer.py
python
DataExchange.today_weather_outdoor_barometric_pressure_at_time
(self, state: c_void_p, hour: int, time_step_number: int)
return self.api.todayWeatherOutBarometricPressureAtTime(state, hour, time_step_number)
Gets the specified weather data at the specified hour and time step index within that hour :param state: An active EnergyPlus "state" that is returned from a call to `api.state_manager.new_state()`. :param hour: Integer hour of day (0 to 23) :param time_step_number: Time step index in hour, from 1 to the number of zone time steps per hour :return: Value of the weather condition at the specified time
Gets the specified weather data at the specified hour and time step index within that hour
[ "Gets", "the", "specified", "weather", "data", "at", "the", "specified", "hour", "and", "time", "step", "index", "within", "that", "hour" ]
def today_weather_outdoor_barometric_pressure_at_time(self, state: c_void_p, hour: int, time_step_number: int) -> float: """ Gets the specified weather data at the specified hour and time step index within that hour :param state: An active EnergyPlus "state" that is returned from a call to `api.state_manager.new_state()`. :param hour: Integer hour of day (0 to 23) :param time_step_number: Time step index in hour, from 1 to the number of zone time steps per hour :return: Value of the weather condition at the specified time """ return self.api.todayWeatherOutBarometricPressureAtTime(state, hour, time_step_number)
[ "def", "today_weather_outdoor_barometric_pressure_at_time", "(", "self", ",", "state", ":", "c_void_p", ",", "hour", ":", "int", ",", "time_step_number", ":", "int", ")", "->", "float", ":", "return", "self", ".", "api", ".", "todayWeatherOutBarometricPressureAtTime", "(", "state", ",", "hour", ",", "time_step_number", ")" ]
https://github.com/NREL/EnergyPlus/blob/fadc5973b85c70e8cc923efb69c144e808a26078/src/EnergyPlus/api/datatransfer.py#L1163-L1173
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/contrib/learn/python/learn/utils/checkpoints.py
python
load_variable
(checkpoint_dir, name)
return checkpoint_utils.load_variable(checkpoint_dir, name)
See `tf.contrib.framework.load_variable`.
See `tf.contrib.framework.load_variable`.
[ "See", "tf", ".", "contrib", ".", "framework", ".", "load_variable", "." ]
def load_variable(checkpoint_dir, name): """See `tf.contrib.framework.load_variable`.""" return checkpoint_utils.load_variable(checkpoint_dir, name)
[ "def", "load_variable", "(", "checkpoint_dir", ",", "name", ")", ":", "return", "checkpoint_utils", ".", "load_variable", "(", "checkpoint_dir", ",", "name", ")" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/learn/python/learn/utils/checkpoints.py#L35-L37
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemFramework/v1/AWS/common-code/lib/requests/utils.py
python
prepend_scheme_if_needed
(url, new_scheme)
return urlunparse((scheme, netloc, path, params, query, fragment))
Given a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument. :rtype: str
Given a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument.
[ "Given", "a", "URL", "that", "may", "or", "may", "not", "have", "a", "scheme", "prepend", "the", "given", "scheme", ".", "Does", "not", "replace", "a", "present", "scheme", "with", "the", "one", "provided", "as", "an", "argument", "." ]
def prepend_scheme_if_needed(url, new_scheme): """Given a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument. :rtype: str """ scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) # urlparse is a finicky beast, and sometimes decides that there isn't a # netloc present. Assume that it's being over-cautious, and switch netloc # and path if urlparse decided there was no netloc. if not netloc: netloc, path = path, netloc return urlunparse((scheme, netloc, path, params, query, fragment))
[ "def", "prepend_scheme_if_needed", "(", "url", ",", "new_scheme", ")", ":", "scheme", ",", "netloc", ",", "path", ",", "params", ",", "query", ",", "fragment", "=", "urlparse", "(", "url", ",", "new_scheme", ")", "# urlparse is a finicky beast, and sometimes decides that there isn't a", "# netloc present. Assume that it's being over-cautious, and switch netloc", "# and path if urlparse decided there was no netloc.", "if", "not", "netloc", ":", "netloc", ",", "path", "=", "path", ",", "netloc", "return", "urlunparse", "(", "(", "scheme", ",", "netloc", ",", "path", ",", "params", ",", "query", ",", "fragment", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/common-code/lib/requests/utils.py#L894-L908
MTG/gaia
0f7214dbdec6f9b651ca34211824841ffba0bc77
src/doc/doxy2swig.py
python
Doxy2SWIG.parse
(self, node)
Parse a given node. This function in turn calls the `parse_<nodeType>` functions which handle the respective nodes.
Parse a given node. This function in turn calls the `parse_<nodeType>` functions which handle the respective nodes.
[ "Parse", "a", "given", "node", ".", "This", "function", "in", "turn", "calls", "the", "parse_<nodeType", ">", "functions", "which", "handle", "the", "respective", "nodes", "." ]
def parse(self, node): """Parse a given node. This function in turn calls the `parse_<nodeType>` functions which handle the respective nodes. """ pm = getattr(self, "parse_%s" % node.__class__.__name__) pm(node)
[ "def", "parse", "(", "self", ",", "node", ")", ":", "pm", "=", "getattr", "(", "self", ",", "\"parse_%s\"", "%", "node", ".", "__class__", ".", "__name__", ")", "pm", "(", "node", ")" ]
https://github.com/MTG/gaia/blob/0f7214dbdec6f9b651ca34211824841ffba0bc77/src/doc/doxy2swig.py#L171-L178
MirrorYuChen/ncnn_example
a42608e6e0e51ed68d3bd8ada853595980935220
ncnn-20210525-full-source/python/ncnn/model_zoo/mobilenetv3ssdlite.py
python
MobileNetV3_SSDLite.__call__
(self, img)
return objects
#method 2, use ncnn.Mat->numpy.array to get the result, no memory copy too out = np.array(mat_out) for i in range(len(out)): values = out[i] obj = Detect_Object() obj.label = values[0] obj.prob = values[1] x1 = clamp(values[2] * self.img_width, 0.0, float(self.img_width - 1)) / self.img_width * img_w y1 = clamp(values[3] * self.img_height, 0.0, float(self.img_height - 1)) / self.img_height * img_h x2 = clamp(values[4] * self.img_width, 0.0, float(self.img_width - 1)) / self.img_width * img_w y2 = clamp(values[5] * self.img_height, 0.0, float(self.img_height - 1)) / self.img_height * img_h obj.rect.x = x1 obj.rect.y = y1 obj.rect.w = x2 - x1 obj.rect.h = y2 - y1 objects.append(obj)
#method 2, use ncnn.Mat->numpy.array to get the result, no memory copy too out = np.array(mat_out) for i in range(len(out)): values = out[i] obj = Detect_Object() obj.label = values[0] obj.prob = values[1]
[ "#method", "2", "use", "ncnn", ".", "Mat", "-", ">", "numpy", ".", "array", "to", "get", "the", "result", "no", "memory", "copy", "too", "out", "=", "np", ".", "array", "(", "mat_out", ")", "for", "i", "in", "range", "(", "len", "(", "out", "))", ":", "values", "=", "out", "[", "i", "]", "obj", "=", "Detect_Object", "()", "obj", ".", "label", "=", "values", "[", "0", "]", "obj", ".", "prob", "=", "values", "[", "1", "]" ]
def __call__(self, img): img_h = img.shape[0] img_w = img.shape[1] mat_in = ncnn.Mat.from_pixels_resize( img, ncnn.Mat.PixelType.PIXEL_BGR2RGB, img.shape[1], img.shape[0], self.target_size, self.target_size, ) mat_in.substract_mean_normalize([], self.norm_vals) mat_in.substract_mean_normalize(self.mean_vals, []) ex = self.net.create_extractor() ex.set_light_mode(True) ex.set_num_threads(self.num_threads) ex.input("input", mat_in) ret, mat_out = ex.extract("detection_out") objects = [] # printf("%d %d %d\n", mat_out.w, mat_out.h, mat_out.c) # method 1, use ncnn.Mat.row to get the result, no memory copy for i in range(mat_out.h): values = mat_out.row(i) obj = Detect_Object() obj.label = values[0] obj.prob = values[1] x1 = ( clamp(values[2] * self.target_size, 0.0, float(self.target_size - 1)) / self.target_size * img_w ) y1 = ( clamp(values[3] * self.target_size, 0.0, float(self.target_size - 1)) / self.target_size * img_h ) x2 = ( clamp(values[4] * self.target_size, 0.0, float(self.target_size - 1)) / self.target_size * img_w ) y2 = ( clamp(values[5] * self.target_size, 0.0, float(self.target_size - 1)) / self.target_size * img_h ) if np.isnan(x1) or np.isnan(y1) or np.isnan(x2) or np.isnan(y2): continue obj.rect.x = x1 obj.rect.y = y1 obj.rect.w = x2 - x1 obj.rect.h = y2 - y1 objects.append(obj) """ #method 2, use ncnn.Mat->numpy.array to get the result, no memory copy too out = np.array(mat_out) for i in range(len(out)): values = out[i] obj = Detect_Object() obj.label = values[0] obj.prob = values[1] x1 = clamp(values[2] * self.img_width, 0.0, float(self.img_width - 1)) / self.img_width * img_w y1 = clamp(values[3] * self.img_height, 0.0, float(self.img_height - 1)) / self.img_height * img_h x2 = clamp(values[4] * self.img_width, 0.0, float(self.img_width - 1)) / self.img_width * img_w y2 = clamp(values[5] * self.img_height, 0.0, float(self.img_height - 1)) / self.img_height * img_h obj.rect.x = x1 obj.rect.y = y1 obj.rect.w = x2 - x1 obj.rect.h = y2 - y1 objects.append(obj) """ return objects
[ "def", "__call__", "(", "self", ",", "img", ")", ":", "img_h", "=", "img", ".", "shape", "[", "0", "]", "img_w", "=", "img", ".", "shape", "[", "1", "]", "mat_in", "=", "ncnn", ".", "Mat", ".", "from_pixels_resize", "(", "img", ",", "ncnn", ".", "Mat", ".", "PixelType", ".", "PIXEL_BGR2RGB", ",", "img", ".", "shape", "[", "1", "]", ",", "img", ".", "shape", "[", "0", "]", ",", "self", ".", "target_size", ",", "self", ".", "target_size", ",", ")", "mat_in", ".", "substract_mean_normalize", "(", "[", "]", ",", "self", ".", "norm_vals", ")", "mat_in", ".", "substract_mean_normalize", "(", "self", ".", "mean_vals", ",", "[", "]", ")", "ex", "=", "self", ".", "net", ".", "create_extractor", "(", ")", "ex", ".", "set_light_mode", "(", "True", ")", "ex", ".", "set_num_threads", "(", "self", ".", "num_threads", ")", "ex", ".", "input", "(", "\"input\"", ",", "mat_in", ")", "ret", ",", "mat_out", "=", "ex", ".", "extract", "(", "\"detection_out\"", ")", "objects", "=", "[", "]", "# printf(\"%d %d %d\\n\", mat_out.w, mat_out.h, mat_out.c)", "# method 1, use ncnn.Mat.row to get the result, no memory copy", "for", "i", "in", "range", "(", "mat_out", ".", "h", ")", ":", "values", "=", "mat_out", ".", "row", "(", "i", ")", "obj", "=", "Detect_Object", "(", ")", "obj", ".", "label", "=", "values", "[", "0", "]", "obj", ".", "prob", "=", "values", "[", "1", "]", "x1", "=", "(", "clamp", "(", "values", "[", "2", "]", "*", "self", ".", "target_size", ",", "0.0", ",", "float", "(", "self", ".", "target_size", "-", "1", ")", ")", "/", "self", ".", "target_size", "*", "img_w", ")", "y1", "=", "(", "clamp", "(", "values", "[", "3", "]", "*", "self", ".", "target_size", ",", "0.0", ",", "float", "(", "self", ".", "target_size", "-", "1", ")", ")", "/", "self", ".", "target_size", "*", "img_h", ")", "x2", "=", "(", "clamp", "(", "values", "[", "4", "]", "*", "self", ".", "target_size", ",", "0.0", ",", "float", "(", "self", ".", "target_size", "-", "1", ")", ")", "/", "self", ".", "target_size", "*", "img_w", ")", "y2", "=", "(", "clamp", "(", "values", "[", "5", "]", "*", "self", ".", "target_size", ",", "0.0", ",", "float", "(", "self", ".", "target_size", "-", "1", ")", ")", "/", "self", ".", "target_size", "*", "img_h", ")", "if", "np", ".", "isnan", "(", "x1", ")", "or", "np", ".", "isnan", "(", "y1", ")", "or", "np", ".", "isnan", "(", "x2", ")", "or", "np", ".", "isnan", "(", "y2", ")", ":", "continue", "obj", ".", "rect", ".", "x", "=", "x1", "obj", ".", "rect", ".", "y", "=", "y1", "obj", ".", "rect", ".", "w", "=", "x2", "-", "x1", "obj", ".", "rect", ".", "h", "=", "y2", "-", "y1", "objects", ".", "append", "(", "obj", ")", "return", "objects" ]
https://github.com/MirrorYuChen/ncnn_example/blob/a42608e6e0e51ed68d3bd8ada853595980935220/ncnn-20210525-full-source/python/ncnn/model_zoo/mobilenetv3ssdlite.py#L74-L162
priyankchheda/algorithms
c361aa9071573fa9966d5b02d05e524815abcf2b
red_black_tree/red_black_tree.py
python
RedBlackTree.max
(self)
return current.data
returns right-most item present in red black tree which is also the maximum element in rb-tree
returns right-most item present in red black tree which is also the maximum element in rb-tree
[ "returns", "right", "-", "most", "item", "present", "in", "red", "black", "tree", "which", "is", "also", "the", "maximum", "element", "in", "rb", "-", "tree" ]
def max(self): """ returns right-most item present in red black tree which is also the maximum element in rb-tree """ if self.root is None: raise Exception("tree is empty") current = self.root while current.right is not None: current = current.right return current.data
[ "def", "max", "(", "self", ")", ":", "if", "self", ".", "root", "is", "None", ":", "raise", "Exception", "(", "\"tree is empty\"", ")", "current", "=", "self", ".", "root", "while", "current", ".", "right", "is", "not", "None", ":", "current", "=", "current", ".", "right", "return", "current", ".", "data" ]
https://github.com/priyankchheda/algorithms/blob/c361aa9071573fa9966d5b02d05e524815abcf2b/red_black_tree/red_black_tree.py#L168-L178
BitMEX/api-connectors
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
auto-generated/python/swagger_client/models/position.py
python
Position.opening_cost
(self, opening_cost)
Sets the opening_cost of this Position. :param opening_cost: The opening_cost of this Position. # noqa: E501 :type: float
Sets the opening_cost of this Position.
[ "Sets", "the", "opening_cost", "of", "this", "Position", "." ]
def opening_cost(self, opening_cost): """Sets the opening_cost of this Position. :param opening_cost: The opening_cost of this Position. # noqa: E501 :type: float """ self._opening_cost = opening_cost
[ "def", "opening_cost", "(", "self", ",", "opening_cost", ")", ":", "self", ".", "_opening_cost", "=", "opening_cost" ]
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/position.py#L892-L900
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/ipaddress.py
python
_BaseV6._explode_shorthand_ip_string
(self)
return ':'.join(parts)
Expand a shortened IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A string, the expanded IPv6 address.
Expand a shortened IPv6 address.
[ "Expand", "a", "shortened", "IPv6", "address", "." ]
def _explode_shorthand_ip_string(self): """Expand a shortened IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A string, the expanded IPv6 address. """ if isinstance(self, IPv6Network): ip_str = str(self.network_address) elif isinstance(self, IPv6Interface): ip_str = str(self.ip) else: ip_str = str(self) ip_int = self._ip_int_from_string(ip_str) hex_str = '%032x' % ip_int parts = [hex_str[x:x+4] for x in range(0, 32, 4)] if isinstance(self, (_BaseNetwork, IPv6Interface)): return '%s/%d' % (':'.join(parts), self._prefixlen) return ':'.join(parts)
[ "def", "_explode_shorthand_ip_string", "(", "self", ")", ":", "if", "isinstance", "(", "self", ",", "IPv6Network", ")", ":", "ip_str", "=", "str", "(", "self", ".", "network_address", ")", "elif", "isinstance", "(", "self", ",", "IPv6Interface", ")", ":", "ip_str", "=", "str", "(", "self", ".", "ip", ")", "else", ":", "ip_str", "=", "str", "(", "self", ")", "ip_int", "=", "self", ".", "_ip_int_from_string", "(", "ip_str", ")", "hex_str", "=", "'%032x'", "%", "ip_int", "parts", "=", "[", "hex_str", "[", "x", ":", "x", "+", "4", "]", "for", "x", "in", "range", "(", "0", ",", "32", ",", "4", ")", "]", "if", "isinstance", "(", "self", ",", "(", "_BaseNetwork", ",", "IPv6Interface", ")", ")", ":", "return", "'%s/%d'", "%", "(", "':'", ".", "join", "(", "parts", ")", ",", "self", ".", "_prefixlen", ")", "return", "':'", ".", "join", "(", "parts", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/ipaddress.py#L1812-L1834
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/buttons.py
python
GenButton._GetLabelSize
(self)
return w, h, True
used internally
used internally
[ "used", "internally" ]
def _GetLabelSize(self): """ used internally """ w, h = self.GetTextExtent(self.GetLabel()) return w, h, True
[ "def", "_GetLabelSize", "(", "self", ")", ":", "w", ",", "h", "=", "self", ".", "GetTextExtent", "(", "self", ".", "GetLabel", "(", ")", ")", "return", "w", ",", "h", ",", "True" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/buttons.py#L212-L215
bulletphysics/bullet3
f0f2a952e146f016096db6f85cf0c44ed75b0b9a
examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur.py
python
Minitaur._RecordInertiaInfoFromURDF
(self)
Record the inertia of each body from URDF file.
Record the inertia of each body from URDF file.
[ "Record", "the", "inertia", "of", "each", "body", "from", "URDF", "file", "." ]
def _RecordInertiaInfoFromURDF(self): """Record the inertia of each body from URDF file.""" self._link_urdf = [] num_bodies = self._pybullet_client.getNumJoints(self.quadruped) for body_id in range(-1, num_bodies): # -1 is for the base link. inertia = self._pybullet_client.getDynamicsInfo(self.quadruped, body_id)[2] self._link_urdf.append(inertia) # We need to use id+1 to index self._link_urdf because it has the base # (index = -1) at the first element. self._base_inertia_urdf = [ self._link_urdf[chassis_id + 1] for chassis_id in self._chassis_link_ids ] self._leg_inertia_urdf = [self._link_urdf[leg_id + 1] for leg_id in self._leg_link_ids] self._leg_inertia_urdf.extend( [self._link_urdf[motor_id + 1] for motor_id in self._motor_link_ids])
[ "def", "_RecordInertiaInfoFromURDF", "(", "self", ")", ":", "self", ".", "_link_urdf", "=", "[", "]", "num_bodies", "=", "self", ".", "_pybullet_client", ".", "getNumJoints", "(", "self", ".", "quadruped", ")", "for", "body_id", "in", "range", "(", "-", "1", ",", "num_bodies", ")", ":", "# -1 is for the base link.", "inertia", "=", "self", ".", "_pybullet_client", ".", "getDynamicsInfo", "(", "self", ".", "quadruped", ",", "body_id", ")", "[", "2", "]", "self", ".", "_link_urdf", ".", "append", "(", "inertia", ")", "# We need to use id+1 to index self._link_urdf because it has the base", "# (index = -1) at the first element.", "self", ".", "_base_inertia_urdf", "=", "[", "self", ".", "_link_urdf", "[", "chassis_id", "+", "1", "]", "for", "chassis_id", "in", "self", ".", "_chassis_link_ids", "]", "self", ".", "_leg_inertia_urdf", "=", "[", "self", ".", "_link_urdf", "[", "leg_id", "+", "1", "]", "for", "leg_id", "in", "self", ".", "_leg_link_ids", "]", "self", ".", "_leg_inertia_urdf", ".", "extend", "(", "[", "self", ".", "_link_urdf", "[", "motor_id", "+", "1", "]", "for", "motor_id", "in", "self", ".", "_motor_link_ids", "]", ")" ]
https://github.com/bulletphysics/bullet3/blob/f0f2a952e146f016096db6f85cf0c44ed75b0b9a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur.py#L184-L198
hpi-xnor/BMXNet-v2
af2b1859eafc5c721b1397cef02f946aaf2ce20d
example/image-classification/common/fit.py
python
add_fit_args
(parser)
return train
parser : argparse.ArgumentParser return a parser added with args required by fit
parser : argparse.ArgumentParser return a parser added with args required by fit
[ "parser", ":", "argparse", ".", "ArgumentParser", "return", "a", "parser", "added", "with", "args", "required", "by", "fit" ]
def add_fit_args(parser): """ parser : argparse.ArgumentParser return a parser added with args required by fit """ train = parser.add_argument_group('Training', 'model training') train.add_argument('--network', type=str, help='the neural network to use') train.add_argument('--num-layers', type=int, help='number of layers in the neural network, \ required by some networks such as resnet') train.add_argument('--gpus', type=str, help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu') train.add_argument('--kv-store', type=str, default='device', help='key-value store type') train.add_argument('--num-epochs', type=int, default=100, help='max num of epochs') train.add_argument('--lr', type=float, default=0.1, help='initial learning rate') train.add_argument('--lr-factor', type=float, default=0.1, help='the ratio to reduce lr on each step') train.add_argument('--lr-step-epochs', type=str, help='the epochs to reduce the lr, e.g. 30,60') train.add_argument('--initializer', type=str, default='default', help='the initializer type') train.add_argument('--optimizer', type=str, default='sgd', help='the optimizer type') train.add_argument('--mom', type=float, default=0.9, help='momentum for sgd') train.add_argument('--wd', type=float, default=0.0001, help='weight decay for sgd') train.add_argument('--batch-size', type=int, default=128, help='the batch size') train.add_argument('--disp-batches', type=int, default=20, help='show progress for every n batches') train.add_argument('--model-prefix', type=str, help='model prefix') train.add_argument('--save-period', type=int, default=1, help='params saving period') parser.add_argument('--monitor', dest='monitor', type=int, default=0, help='log network parameters every N iters if larger than 0') train.add_argument('--load-epoch', type=int, help='load the model on an epoch using the model-load-prefix') train.add_argument('--top-k', type=int, default=0, help='report the top-k accuracy. 0 means no report.') train.add_argument('--loss', type=str, default='', help='show the cross-entropy or nll loss. ce strands for cross-entropy, nll-loss stands for likelihood loss') train.add_argument('--test-io', type=int, default=0, help='1 means test reading speed without training') train.add_argument('--dtype', type=str, default='float32', help='precision: float32 or float16') train.add_argument('--gc-type', type=str, default='none', help='type of gradient compression to use, \ takes `2bit` or `none` for now') train.add_argument('--gc-threshold', type=float, default=0.5, help='threshold for 2bit gradient compression') # additional parameters for large batch sgd train.add_argument('--macrobatch-size', type=int, default=0, help='distributed effective batch size') train.add_argument('--warmup-epochs', type=int, default=5, help='the epochs to ramp-up lr to scaled large-batch value') train.add_argument('--warmup-strategy', type=str, default='linear', help='the ramping-up strategy for large batch sgd') train.add_argument('--profile-worker-suffix', type=str, default='', help='profile workers actions into this file. During distributed training\ filename saved will be rank1_ followed by this suffix') train.add_argument('--profile-server-suffix', type=str, default='', help='profile server actions into a file with name like rank1_ followed by this suffix \ during distributed training') return train
[ "def", "add_fit_args", "(", "parser", ")", ":", "train", "=", "parser", ".", "add_argument_group", "(", "'Training'", ",", "'model training'", ")", "train", ".", "add_argument", "(", "'--network'", ",", "type", "=", "str", ",", "help", "=", "'the neural network to use'", ")", "train", ".", "add_argument", "(", "'--num-layers'", ",", "type", "=", "int", ",", "help", "=", "'number of layers in the neural network, \\\n required by some networks such as resnet'", ")", "train", ".", "add_argument", "(", "'--gpus'", ",", "type", "=", "str", ",", "help", "=", "'list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu'", ")", "train", ".", "add_argument", "(", "'--kv-store'", ",", "type", "=", "str", ",", "default", "=", "'device'", ",", "help", "=", "'key-value store type'", ")", "train", ".", "add_argument", "(", "'--num-epochs'", ",", "type", "=", "int", ",", "default", "=", "100", ",", "help", "=", "'max num of epochs'", ")", "train", ".", "add_argument", "(", "'--lr'", ",", "type", "=", "float", ",", "default", "=", "0.1", ",", "help", "=", "'initial learning rate'", ")", "train", ".", "add_argument", "(", "'--lr-factor'", ",", "type", "=", "float", ",", "default", "=", "0.1", ",", "help", "=", "'the ratio to reduce lr on each step'", ")", "train", ".", "add_argument", "(", "'--lr-step-epochs'", ",", "type", "=", "str", ",", "help", "=", "'the epochs to reduce the lr, e.g. 30,60'", ")", "train", ".", "add_argument", "(", "'--initializer'", ",", "type", "=", "str", ",", "default", "=", "'default'", ",", "help", "=", "'the initializer type'", ")", "train", ".", "add_argument", "(", "'--optimizer'", ",", "type", "=", "str", ",", "default", "=", "'sgd'", ",", "help", "=", "'the optimizer type'", ")", "train", ".", "add_argument", "(", "'--mom'", ",", "type", "=", "float", ",", "default", "=", "0.9", ",", "help", "=", "'momentum for sgd'", ")", "train", ".", "add_argument", "(", "'--wd'", ",", "type", "=", "float", ",", "default", "=", "0.0001", ",", "help", "=", "'weight decay for sgd'", ")", "train", ".", "add_argument", "(", "'--batch-size'", ",", "type", "=", "int", ",", "default", "=", "128", ",", "help", "=", "'the batch size'", ")", "train", ".", "add_argument", "(", "'--disp-batches'", ",", "type", "=", "int", ",", "default", "=", "20", ",", "help", "=", "'show progress for every n batches'", ")", "train", ".", "add_argument", "(", "'--model-prefix'", ",", "type", "=", "str", ",", "help", "=", "'model prefix'", ")", "train", ".", "add_argument", "(", "'--save-period'", ",", "type", "=", "int", ",", "default", "=", "1", ",", "help", "=", "'params saving period'", ")", "parser", ".", "add_argument", "(", "'--monitor'", ",", "dest", "=", "'monitor'", ",", "type", "=", "int", ",", "default", "=", "0", ",", "help", "=", "'log network parameters every N iters if larger than 0'", ")", "train", ".", "add_argument", "(", "'--load-epoch'", ",", "type", "=", "int", ",", "help", "=", "'load the model on an epoch using the model-load-prefix'", ")", "train", ".", "add_argument", "(", "'--top-k'", ",", "type", "=", "int", ",", "default", "=", "0", ",", "help", "=", "'report the top-k accuracy. 0 means no report.'", ")", "train", ".", "add_argument", "(", "'--loss'", ",", "type", "=", "str", ",", "default", "=", "''", ",", "help", "=", "'show the cross-entropy or nll loss. ce strands for cross-entropy, nll-loss stands for likelihood loss'", ")", "train", ".", "add_argument", "(", "'--test-io'", ",", "type", "=", "int", ",", "default", "=", "0", ",", "help", "=", "'1 means test reading speed without training'", ")", "train", ".", "add_argument", "(", "'--dtype'", ",", "type", "=", "str", ",", "default", "=", "'float32'", ",", "help", "=", "'precision: float32 or float16'", ")", "train", ".", "add_argument", "(", "'--gc-type'", ",", "type", "=", "str", ",", "default", "=", "'none'", ",", "help", "=", "'type of gradient compression to use, \\\n takes `2bit` or `none` for now'", ")", "train", ".", "add_argument", "(", "'--gc-threshold'", ",", "type", "=", "float", ",", "default", "=", "0.5", ",", "help", "=", "'threshold for 2bit gradient compression'", ")", "# additional parameters for large batch sgd", "train", ".", "add_argument", "(", "'--macrobatch-size'", ",", "type", "=", "int", ",", "default", "=", "0", ",", "help", "=", "'distributed effective batch size'", ")", "train", ".", "add_argument", "(", "'--warmup-epochs'", ",", "type", "=", "int", ",", "default", "=", "5", ",", "help", "=", "'the epochs to ramp-up lr to scaled large-batch value'", ")", "train", ".", "add_argument", "(", "'--warmup-strategy'", ",", "type", "=", "str", ",", "default", "=", "'linear'", ",", "help", "=", "'the ramping-up strategy for large batch sgd'", ")", "train", ".", "add_argument", "(", "'--profile-worker-suffix'", ",", "type", "=", "str", ",", "default", "=", "''", ",", "help", "=", "'profile workers actions into this file. During distributed training\\\n filename saved will be rank1_ followed by this suffix'", ")", "train", ".", "add_argument", "(", "'--profile-server-suffix'", ",", "type", "=", "str", ",", "default", "=", "''", ",", "help", "=", "'profile server actions into a file with name like rank1_ followed by this suffix \\\n during distributed training'", ")", "return", "train" ]
https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/example/image-classification/common/fit.py#L77-L145
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/learn/python/learn/estimators/dnn.py
python
DNNRegressor.__init__
(self, hidden_units, feature_columns, model_dir=None, weight_column_name=None, optimizer=None, activation_fn=nn.relu, dropout=None, gradient_clip_norm=None, enable_centered_bias=False, config=None, feature_engineering_fn=None, label_dimension=1, embedding_lr_multipliers=None, input_layer_min_slice_size=None)
Initializes a `DNNRegressor` instance. Args: hidden_units: List of hidden units per layer. All layers are fully connected. Ex. `[64, 32]` means first layer has 64 nodes and second one has 32. feature_columns: An iterable containing all the feature columns used by the model. All items in the set should be instances of classes derived from `FeatureColumn`. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. optimizer: An instance of `tf.Optimizer` used to train the model. If `None`, will use an Adagrad optimizer. activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. Note that a string containing the unqualified name of the op may also be provided, e.g., "relu", "tanh", or "sigmoid". dropout: When not `None`, the probability we will drop out a given coordinate. gradient_clip_norm: A `float` > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See `tf.clip_by_global_norm` for more details. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. config: `RunConfig` object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. label_dimension: Number of regression targets per example. This is the size of the last dimension of the labels and logits `Tensor` objects (typically, these have shape `[batch_size, label_dimension]`). embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. input_layer_min_slice_size: Optional. The min slice size of input layer partitions. If not provided, will use the default of 64M. Returns: A `DNNRegressor` estimator.
Initializes a `DNNRegressor` instance.
[ "Initializes", "a", "DNNRegressor", "instance", "." ]
def __init__(self, hidden_units, feature_columns, model_dir=None, weight_column_name=None, optimizer=None, activation_fn=nn.relu, dropout=None, gradient_clip_norm=None, enable_centered_bias=False, config=None, feature_engineering_fn=None, label_dimension=1, embedding_lr_multipliers=None, input_layer_min_slice_size=None): """Initializes a `DNNRegressor` instance. Args: hidden_units: List of hidden units per layer. All layers are fully connected. Ex. `[64, 32]` means first layer has 64 nodes and second one has 32. feature_columns: An iterable containing all the feature columns used by the model. All items in the set should be instances of classes derived from `FeatureColumn`. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. optimizer: An instance of `tf.Optimizer` used to train the model. If `None`, will use an Adagrad optimizer. activation_fn: Activation function applied to each layer. If `None`, will use `tf.nn.relu`. Note that a string containing the unqualified name of the op may also be provided, e.g., "relu", "tanh", or "sigmoid". dropout: When not `None`, the probability we will drop out a given coordinate. gradient_clip_norm: A `float` > 0. If provided, gradients are clipped to their global norm with this clipping ratio. See `tf.clip_by_global_norm` for more details. enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. config: `RunConfig` object to configure the runtime settings. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into the model. label_dimension: Number of regression targets per example. This is the size of the last dimension of the labels and logits `Tensor` objects (typically, these have shape `[batch_size, label_dimension]`). embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to a `float` multiplier. Multiplier will be used to multiply with learning rate for the embedding variables. input_layer_min_slice_size: Optional. The min slice size of input layer partitions. If not provided, will use the default of 64M. Returns: A `DNNRegressor` estimator. """ self._feature_columns = tuple(feature_columns or []) super(DNNRegressor, self).__init__( model_fn=_dnn_model_fn, model_dir=model_dir, config=config, params={ "head": head_lib.regression_head( label_dimension=label_dimension, weight_column_name=weight_column_name, enable_centered_bias=enable_centered_bias), "hidden_units": hidden_units, "feature_columns": self._feature_columns, "optimizer": optimizer, "activation_fn": activation_fn, "dropout": dropout, "gradient_clip_norm": gradient_clip_norm, "embedding_lr_multipliers": embedding_lr_multipliers, "input_layer_min_slice_size": input_layer_min_slice_size, }, feature_engineering_fn=feature_engineering_fn)
[ "def", "__init__", "(", "self", ",", "hidden_units", ",", "feature_columns", ",", "model_dir", "=", "None", ",", "weight_column_name", "=", "None", ",", "optimizer", "=", "None", ",", "activation_fn", "=", "nn", ".", "relu", ",", "dropout", "=", "None", ",", "gradient_clip_norm", "=", "None", ",", "enable_centered_bias", "=", "False", ",", "config", "=", "None", ",", "feature_engineering_fn", "=", "None", ",", "label_dimension", "=", "1", ",", "embedding_lr_multipliers", "=", "None", ",", "input_layer_min_slice_size", "=", "None", ")", ":", "self", ".", "_feature_columns", "=", "tuple", "(", "feature_columns", "or", "[", "]", ")", "super", "(", "DNNRegressor", ",", "self", ")", ".", "__init__", "(", "model_fn", "=", "_dnn_model_fn", ",", "model_dir", "=", "model_dir", ",", "config", "=", "config", ",", "params", "=", "{", "\"head\"", ":", "head_lib", ".", "regression_head", "(", "label_dimension", "=", "label_dimension", ",", "weight_column_name", "=", "weight_column_name", ",", "enable_centered_bias", "=", "enable_centered_bias", ")", ",", "\"hidden_units\"", ":", "hidden_units", ",", "\"feature_columns\"", ":", "self", ".", "_feature_columns", ",", "\"optimizer\"", ":", "optimizer", ",", "\"activation_fn\"", ":", "activation_fn", ",", "\"dropout\"", ":", "dropout", ",", "\"gradient_clip_norm\"", ":", "gradient_clip_norm", ",", "\"embedding_lr_multipliers\"", ":", "embedding_lr_multipliers", ",", "\"input_layer_min_slice_size\"", ":", "input_layer_min_slice_size", ",", "}", ",", "feature_engineering_fn", "=", "feature_engineering_fn", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/learn/python/learn/estimators/dnn.py#L575-L655
netket/netket
0d534e54ecbf25b677ea72af6b85947979420652
netket/optimizer/qgt/qgt_jacobian_pytree_logic.py
python
jacobian_cplx
( forward_fn: Callable, params: PyTree, samples: Array, chunk_size: int = None, _build_fn: Callable = partial(jax.tree_multimap, jax.lax.complex), )
return vmap_chunked( _jacobian_cplx, in_axes=(None, None, 0, None), chunk_size=chunk_size )(forward_fn, params, samples, _build_fn)
Calculates Jacobian entries by vmapping grad. Assumes the function is R→C, backpropagates 1 and -1j Args: forward_fn: the log wavefunction ln Ψ params : a pytree of parameters p samples : an array of n samples σ Returns: The Jacobian matrix ∂/∂pₖ ln Ψ(σⱼ) as a PyTree
Calculates Jacobian entries by vmapping grad. Assumes the function is R→C, backpropagates 1 and -1j
[ "Calculates", "Jacobian", "entries", "by", "vmapping", "grad", ".", "Assumes", "the", "function", "is", "R→C", "backpropagates", "1", "and", "-", "1j" ]
def jacobian_cplx( forward_fn: Callable, params: PyTree, samples: Array, chunk_size: int = None, _build_fn: Callable = partial(jax.tree_multimap, jax.lax.complex), ) -> PyTree: """Calculates Jacobian entries by vmapping grad. Assumes the function is R→C, backpropagates 1 and -1j Args: forward_fn: the log wavefunction ln Ψ params : a pytree of parameters p samples : an array of n samples σ Returns: The Jacobian matrix ∂/∂pₖ ln Ψ(σⱼ) as a PyTree """ def _jacobian_cplx(forward_fn, params, samples, _build_fn): y, vjp_fun = jax.vjp(single_sample(forward_fn), params, samples) gr, _ = vjp_fun(np.array(1.0, dtype=jnp.result_type(y))) gi, _ = vjp_fun(np.array(-1.0j, dtype=jnp.result_type(y))) return _build_fn(gr, gi) return vmap_chunked( _jacobian_cplx, in_axes=(None, None, 0, None), chunk_size=chunk_size )(forward_fn, params, samples, _build_fn)
[ "def", "jacobian_cplx", "(", "forward_fn", ":", "Callable", ",", "params", ":", "PyTree", ",", "samples", ":", "Array", ",", "chunk_size", ":", "int", "=", "None", ",", "_build_fn", ":", "Callable", "=", "partial", "(", "jax", ".", "tree_multimap", ",", "jax", ".", "lax", ".", "complex", ")", ",", ")", "->", "PyTree", ":", "def", "_jacobian_cplx", "(", "forward_fn", ",", "params", ",", "samples", ",", "_build_fn", ")", ":", "y", ",", "vjp_fun", "=", "jax", ".", "vjp", "(", "single_sample", "(", "forward_fn", ")", ",", "params", ",", "samples", ")", "gr", ",", "_", "=", "vjp_fun", "(", "np", ".", "array", "(", "1.0", ",", "dtype", "=", "jnp", ".", "result_type", "(", "y", ")", ")", ")", "gi", ",", "_", "=", "vjp_fun", "(", "np", ".", "array", "(", "-", "1.0j", ",", "dtype", "=", "jnp", ".", "result_type", "(", "y", ")", ")", ")", "return", "_build_fn", "(", "gr", ",", "gi", ")", "return", "vmap_chunked", "(", "_jacobian_cplx", ",", "in_axes", "=", "(", "None", ",", "None", ",", "0", ",", "None", ")", ",", "chunk_size", "=", "chunk_size", ")", "(", "forward_fn", ",", "params", ",", "samples", ",", "_build_fn", ")" ]
https://github.com/netket/netket/blob/0d534e54ecbf25b677ea72af6b85947979420652/netket/optimizer/qgt/qgt_jacobian_pytree_logic.py#L78-L105
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/lite/python/convert_saved_model.py
python
freeze_saved_model
(saved_model_dir, input_arrays, input_shapes, output_arrays, tag_set, signature_key)
Converts a SavedModel to a frozen graph. Args: saved_model_dir: SavedModel directory to convert. input_arrays: List of input tensors to freeze graph with. Uses input arrays from SignatureDef when none are provided. input_shapes: Dict of strings representing input tensor names to list of integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}). Automatically determined when input shapes is None (e.g., {"foo" : None}). output_arrays: List of output tensors to freeze graph with. Uses output arrays from SignatureDef when none are provided. tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. signature_key: Key identifying SignatureDef containing inputs and outputs. Returns: frozen_graph_def: Frozen GraphDef. in_tensors: List of input tensors for the graph. out_tensors: List of output tensors for the graph. graph: `Graph` object. Raises: ValueError: SavedModel doesn't contain a MetaGraphDef identified by tag_set. signature_key is not in the MetaGraphDef. assets/ directory is in the MetaGraphDef. input_shapes does not match the length of input_arrays. input_arrays or output_arrays are not valid.
Converts a SavedModel to a frozen graph.
[ "Converts", "a", "SavedModel", "to", "a", "frozen", "graph", "." ]
def freeze_saved_model(saved_model_dir, input_arrays, input_shapes, output_arrays, tag_set, signature_key): """Converts a SavedModel to a frozen graph. Args: saved_model_dir: SavedModel directory to convert. input_arrays: List of input tensors to freeze graph with. Uses input arrays from SignatureDef when none are provided. input_shapes: Dict of strings representing input tensor names to list of integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}). Automatically determined when input shapes is None (e.g., {"foo" : None}). output_arrays: List of output tensors to freeze graph with. Uses output arrays from SignatureDef when none are provided. tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. signature_key: Key identifying SignatureDef containing inputs and outputs. Returns: frozen_graph_def: Frozen GraphDef. in_tensors: List of input tensors for the graph. out_tensors: List of output tensors for the graph. graph: `Graph` object. Raises: ValueError: SavedModel doesn't contain a MetaGraphDef identified by tag_set. signature_key is not in the MetaGraphDef. assets/ directory is in the MetaGraphDef. input_shapes does not match the length of input_arrays. input_arrays or output_arrays are not valid. """ # Read SignatureDef. meta_graph = get_meta_graph_def(saved_model_dir, tag_set) signature_def = get_signature_def(meta_graph, signature_key) inputs, outputs = get_inputs_outputs(signature_def) # Check SavedModel for assets directory. collection_def = meta_graph.collection_def if constants.ASSETS_KEY in collection_def: raise ValueError("SavedModels with assets/ directory are not supported.") graph = ops.Graph() with session.Session(graph=graph) as sess: loader.load(sess, meta_graph.meta_info_def.tags, saved_model_dir) # Gets input and output tensors. # TODO(zhixianyan): Use TFLite supported Op list to filter outputs. in_tensors = _get_tensors(graph, inputs, input_arrays) out_tensors = _get_tensors(graph, outputs, output_arrays) util.set_tensor_shapes(in_tensors, input_shapes) frozen_graph_def = util.freeze_graph(sess, in_tensors, out_tensors) return frozen_graph_def, in_tensors, out_tensors, sess.graph
[ "def", "freeze_saved_model", "(", "saved_model_dir", ",", "input_arrays", ",", "input_shapes", ",", "output_arrays", ",", "tag_set", ",", "signature_key", ")", ":", "# Read SignatureDef.", "meta_graph", "=", "get_meta_graph_def", "(", "saved_model_dir", ",", "tag_set", ")", "signature_def", "=", "get_signature_def", "(", "meta_graph", ",", "signature_key", ")", "inputs", ",", "outputs", "=", "get_inputs_outputs", "(", "signature_def", ")", "# Check SavedModel for assets directory.", "collection_def", "=", "meta_graph", ".", "collection_def", "if", "constants", ".", "ASSETS_KEY", "in", "collection_def", ":", "raise", "ValueError", "(", "\"SavedModels with assets/ directory are not supported.\"", ")", "graph", "=", "ops", ".", "Graph", "(", ")", "with", "session", ".", "Session", "(", "graph", "=", "graph", ")", "as", "sess", ":", "loader", ".", "load", "(", "sess", ",", "meta_graph", ".", "meta_info_def", ".", "tags", ",", "saved_model_dir", ")", "# Gets input and output tensors.", "# TODO(zhixianyan): Use TFLite supported Op list to filter outputs.", "in_tensors", "=", "_get_tensors", "(", "graph", ",", "inputs", ",", "input_arrays", ")", "out_tensors", "=", "_get_tensors", "(", "graph", ",", "outputs", ",", "output_arrays", ")", "util", ".", "set_tensor_shapes", "(", "in_tensors", ",", "input_shapes", ")", "frozen_graph_def", "=", "util", ".", "freeze_graph", "(", "sess", ",", "in_tensors", ",", "out_tensors", ")", "return", "frozen_graph_def", ",", "in_tensors", ",", "out_tensors", ",", "sess", ".", "graph" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/lite/python/convert_saved_model.py#L155-L207
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/python/virtualenv/virtualenv.py
python
make_relative_path
(source, dest, dest_is_directory=True)
return os.path.sep.join(full_parts)
Make a filename relative, where the filename is dest, and it is being referred to from the filename source. >>> make_relative_path('/usr/share/something/a-file.pth', ... '/usr/share/another-place/src/Directory') '../another-place/src/Directory' >>> make_relative_path('/usr/share/something/a-file.pth', ... '/home/user/src/Directory') '../../../home/user/src/Directory' >>> make_relative_path('/usr/share/a-file.pth', '/usr/share/') './'
Make a filename relative, where the filename is dest, and it is being referred to from the filename source.
[ "Make", "a", "filename", "relative", "where", "the", "filename", "is", "dest", "and", "it", "is", "being", "referred", "to", "from", "the", "filename", "source", "." ]
def make_relative_path(source, dest, dest_is_directory=True): """ Make a filename relative, where the filename is dest, and it is being referred to from the filename source. >>> make_relative_path('/usr/share/something/a-file.pth', ... '/usr/share/another-place/src/Directory') '../another-place/src/Directory' >>> make_relative_path('/usr/share/something/a-file.pth', ... '/home/user/src/Directory') '../../../home/user/src/Directory' >>> make_relative_path('/usr/share/a-file.pth', '/usr/share/') './' """ source = os.path.dirname(source) if not dest_is_directory: dest_filename = os.path.basename(dest) dest = os.path.dirname(dest) dest = os.path.normpath(os.path.abspath(dest)) source = os.path.normpath(os.path.abspath(source)) dest_parts = dest.strip(os.path.sep).split(os.path.sep) source_parts = source.strip(os.path.sep).split(os.path.sep) while dest_parts and source_parts and dest_parts[0] == source_parts[0]: dest_parts.pop(0) source_parts.pop(0) full_parts = ['..']*len(source_parts) + dest_parts if not dest_is_directory: full_parts.append(dest_filename) if not full_parts: # Special case for the current directory (otherwise it'd be '') return './' return os.path.sep.join(full_parts)
[ "def", "make_relative_path", "(", "source", ",", "dest", ",", "dest_is_directory", "=", "True", ")", ":", "source", "=", "os", ".", "path", ".", "dirname", "(", "source", ")", "if", "not", "dest_is_directory", ":", "dest_filename", "=", "os", ".", "path", ".", "basename", "(", "dest", ")", "dest", "=", "os", ".", "path", ".", "dirname", "(", "dest", ")", "dest", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "abspath", "(", "dest", ")", ")", "source", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "abspath", "(", "source", ")", ")", "dest_parts", "=", "dest", ".", "strip", "(", "os", ".", "path", ".", "sep", ")", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "source_parts", "=", "source", ".", "strip", "(", "os", ".", "path", ".", "sep", ")", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "while", "dest_parts", "and", "source_parts", "and", "dest_parts", "[", "0", "]", "==", "source_parts", "[", "0", "]", ":", "dest_parts", ".", "pop", "(", "0", ")", "source_parts", ".", "pop", "(", "0", ")", "full_parts", "=", "[", "'..'", "]", "*", "len", "(", "source_parts", ")", "+", "dest_parts", "if", "not", "dest_is_directory", ":", "full_parts", ".", "append", "(", "dest_filename", ")", "if", "not", "full_parts", ":", "# Special case for the current directory (otherwise it'd be '')", "return", "'./'", "return", "os", ".", "path", ".", "sep", ".", "join", "(", "full_parts", ")" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/virtualenv/virtualenv.py#L1763-L1794
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
gpu/command_buffer/build_gles2_cmd_buffer.py
python
CreateHandler.WriteHandlerImplementation
(self, func, file)
Overrriden from TypeHandler.
Overrriden from TypeHandler.
[ "Overrriden", "from", "TypeHandler", "." ]
def WriteHandlerImplementation (self, func, file): """Overrriden from TypeHandler.""" file.Write(" uint32 client_id = c.client_id;\n") file.Write(" if (!%sHelper(%s)) {\n" % (func.name, func.MakeCmdArgString(""))) file.Write(" return error::kInvalidArguments;\n") file.Write(" }\n")
[ "def", "WriteHandlerImplementation", "(", "self", ",", "func", ",", "file", ")", ":", "file", ".", "Write", "(", "\" uint32 client_id = c.client_id;\\n\"", ")", "file", ".", "Write", "(", "\" if (!%sHelper(%s)) {\\n\"", "%", "(", "func", ".", "name", ",", "func", ".", "MakeCmdArgString", "(", "\"\"", ")", ")", ")", "file", ".", "Write", "(", "\" return error::kInvalidArguments;\\n\"", ")", "file", ".", "Write", "(", "\" }\\n\"", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/gpu/command_buffer/build_gles2_cmd_buffer.py#L4190-L4196
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/klampt/model/geometry.py
python
fit_plane
(points : Sequence[Vector3])
return normal[0],normal[1],normal[2],-vectorops.dot(centroid,normal)
Returns a 3D plane equation that is a least squares fit through the points (len(points) >= 3).
Returns a 3D plane equation that is a least squares fit through the points (len(points) >= 3).
[ "Returns", "a", "3D", "plane", "equation", "that", "is", "a", "least", "squares", "fit", "through", "the", "points", "(", "len", "(", "points", ")", ">", "=", "3", ")", "." ]
def fit_plane(points : Sequence[Vector3]) -> Tuple[float,float,float,float]: """Returns a 3D plane equation that is a least squares fit through the points (len(points) >= 3).""" centroid,normal = fit_plane_centroid(points) return normal[0],normal[1],normal[2],-vectorops.dot(centroid,normal)
[ "def", "fit_plane", "(", "points", ":", "Sequence", "[", "Vector3", "]", ")", "->", "Tuple", "[", "float", ",", "float", ",", "float", ",", "float", "]", ":", "centroid", ",", "normal", "=", "fit_plane_centroid", "(", "points", ")", "return", "normal", "[", "0", "]", ",", "normal", "[", "1", "]", ",", "normal", "[", "2", "]", ",", "-", "vectorops", ".", "dot", "(", "centroid", ",", "normal", ")" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/model/geometry.py#L320-L324
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/WebOb/webob/request.py
python
BaseRequest.as_bytes
(self, skip_body=False)
return b'\r\n'.join(parts)
Return HTTP bytes representing this request. If skip_body is True, exclude the body. If skip_body is an integer larger than one, skip body only if its length is bigger than that number.
Return HTTP bytes representing this request. If skip_body is True, exclude the body. If skip_body is an integer larger than one, skip body only if its length is bigger than that number.
[ "Return", "HTTP", "bytes", "representing", "this", "request", ".", "If", "skip_body", "is", "True", "exclude", "the", "body", ".", "If", "skip_body", "is", "an", "integer", "larger", "than", "one", "skip", "body", "only", "if", "its", "length", "is", "bigger", "than", "that", "number", "." ]
def as_bytes(self, skip_body=False): """ Return HTTP bytes representing this request. If skip_body is True, exclude the body. If skip_body is an integer larger than one, skip body only if its length is bigger than that number. """ url = self.url host = self.host_url assert url.startswith(host) url = url[len(host):] parts = [bytes_('%s %s %s' % (self.method, url, self.http_version))] #self.headers.setdefault('Host', self.host) # acquire body before we handle headers so that # content-length will be set body = None if http_method_probably_has_body.get(self.method): if skip_body > 1: if len(self.body) > skip_body: body = bytes_('<body skipped (len=%s)>' % len(self.body)) else: skip_body = False if not skip_body: body = self.body for k, v in sorted(self.headers.items()): header = bytes_('%s: %s' % (k, v)) parts.append(header) if body: parts.extend([b'', body]) # HTTP clearly specifies CRLF return b'\r\n'.join(parts)
[ "def", "as_bytes", "(", "self", ",", "skip_body", "=", "False", ")", ":", "url", "=", "self", ".", "url", "host", "=", "self", ".", "host_url", "assert", "url", ".", "startswith", "(", "host", ")", "url", "=", "url", "[", "len", "(", "host", ")", ":", "]", "parts", "=", "[", "bytes_", "(", "'%s %s %s'", "%", "(", "self", ".", "method", ",", "url", ",", "self", ".", "http_version", ")", ")", "]", "#self.headers.setdefault('Host', self.host)", "# acquire body before we handle headers so that", "# content-length will be set", "body", "=", "None", "if", "http_method_probably_has_body", ".", "get", "(", "self", ".", "method", ")", ":", "if", "skip_body", ">", "1", ":", "if", "len", "(", "self", ".", "body", ")", ">", "skip_body", ":", "body", "=", "bytes_", "(", "'<body skipped (len=%s)>'", "%", "len", "(", "self", ".", "body", ")", ")", "else", ":", "skip_body", "=", "False", "if", "not", "skip_body", ":", "body", "=", "self", ".", "body", "for", "k", ",", "v", "in", "sorted", "(", "self", ".", "headers", ".", "items", "(", ")", ")", ":", "header", "=", "bytes_", "(", "'%s: %s'", "%", "(", "k", ",", "v", ")", ")", "parts", ".", "append", "(", "header", ")", "if", "body", ":", "parts", ".", "extend", "(", "[", "b''", ",", "body", "]", ")", "# HTTP clearly specifies CRLF", "return", "b'\\r\\n'", ".", "join", "(", "parts", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/WebOb/webob/request.py#L1129-L1162
apache/incubator-mxnet
f03fb23f1d103fec9541b5ae59ee06b1734a51d9
python/mxnet/image/image.py
python
ImageIter.augmentation_transform
(self, data)
return data
Transforms input data with specified augmentation.
Transforms input data with specified augmentation.
[ "Transforms", "input", "data", "with", "specified", "augmentation", "." ]
def augmentation_transform(self, data): """Transforms input data with specified augmentation.""" for aug in self.auglist: data = aug(data) return data
[ "def", "augmentation_transform", "(", "self", ",", "data", ")", ":", "for", "aug", "in", "self", ".", "auglist", ":", "data", "=", "aug", "(", "data", ")", "return", "data" ]
https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/image/image.py#L1603-L1607
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/idlelib/tree.py
python
wheel_event
(event, widget=None)
return 'break'
Handle scrollwheel event. For wheel up, event.delta = 120*n on Windows, -1*n on darwin, where n can be > 1 if one scrolls fast. Flicking the wheel generates up to maybe 20 events with n up to 10 or more 1. Macs use wheel down (delta = 1*n) to scroll up, so positive delta means to scroll up on both systems. X-11 sends Control-Button-4,5 events instead. The widget parameter is needed so browser label bindings can pass the underlying canvas. This function depends on widget.yview to not be overridden by a subclass.
Handle scrollwheel event.
[ "Handle", "scrollwheel", "event", "." ]
def wheel_event(event, widget=None): """Handle scrollwheel event. For wheel up, event.delta = 120*n on Windows, -1*n on darwin, where n can be > 1 if one scrolls fast. Flicking the wheel generates up to maybe 20 events with n up to 10 or more 1. Macs use wheel down (delta = 1*n) to scroll up, so positive delta means to scroll up on both systems. X-11 sends Control-Button-4,5 events instead. The widget parameter is needed so browser label bindings can pass the underlying canvas. This function depends on widget.yview to not be overridden by a subclass. """ up = {EventType.MouseWheel: event.delta > 0, EventType.ButtonPress: event.num == 4} lines = -5 if up[event.type] else 5 widget = event.widget if widget is None else widget widget.yview(SCROLL, lines, 'units') return 'break'
[ "def", "wheel_event", "(", "event", ",", "widget", "=", "None", ")", ":", "up", "=", "{", "EventType", ".", "MouseWheel", ":", "event", ".", "delta", ">", "0", ",", "EventType", ".", "ButtonPress", ":", "event", ".", "num", "==", "4", "}", "lines", "=", "-", "5", "if", "up", "[", "event", ".", "type", "]", "else", "5", "widget", "=", "event", ".", "widget", "if", "widget", "is", "None", "else", "widget", "widget", ".", "yview", "(", "SCROLL", ",", "lines", ",", "'units'", ")", "return", "'break'" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/idlelib/tree.py#L59-L81
HackWebRTC/webrtc
7abfc990c00ab35090fff285fcf635d1d7892433
PRESUBMIT.py
python
_CalculateAddedDeps
(os_path, old_contents, new_contents)
return results
Helper method for _CheckAddedDepsHaveTargetApprovals. Returns a set of DEPS entries that we should look up. For a directory (rather than a specific filename) we fake a path to a specific filename by adding /DEPS. This is chosen as a file that will seldom or never be subject to per-file include_rules.
Helper method for _CheckAddedDepsHaveTargetApprovals. Returns a set of DEPS entries that we should look up.
[ "Helper", "method", "for", "_CheckAddedDepsHaveTargetApprovals", ".", "Returns", "a", "set", "of", "DEPS", "entries", "that", "we", "should", "look", "up", "." ]
def _CalculateAddedDeps(os_path, old_contents, new_contents): """Helper method for _CheckAddedDepsHaveTargetApprovals. Returns a set of DEPS entries that we should look up. For a directory (rather than a specific filename) we fake a path to a specific filename by adding /DEPS. This is chosen as a file that will seldom or never be subject to per-file include_rules. """ # We ignore deps entries on auto-generated directories. auto_generated_dirs = ['grit', 'jni'] old_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(old_contents)) new_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(new_contents)) added_deps = new_deps.difference(old_deps) results = set() for added_dep in added_deps: if added_dep.split('/')[0] in auto_generated_dirs: continue # Assume that a rule that ends in .h is a rule for a specific file. if added_dep.endswith('.h'): results.add(added_dep) else: results.add(os_path.join(added_dep, 'DEPS')) return results
[ "def", "_CalculateAddedDeps", "(", "os_path", ",", "old_contents", ",", "new_contents", ")", ":", "# We ignore deps entries on auto-generated directories.", "auto_generated_dirs", "=", "[", "'grit'", ",", "'jni'", "]", "old_deps", "=", "_ExtractAddRulesFromParsedDeps", "(", "_ParseDeps", "(", "old_contents", ")", ")", "new_deps", "=", "_ExtractAddRulesFromParsedDeps", "(", "_ParseDeps", "(", "new_contents", ")", ")", "added_deps", "=", "new_deps", ".", "difference", "(", "old_deps", ")", "results", "=", "set", "(", ")", "for", "added_dep", "in", "added_deps", ":", "if", "added_dep", ".", "split", "(", "'/'", ")", "[", "0", "]", "in", "auto_generated_dirs", ":", "continue", "# Assume that a rule that ends in .h is a rule for a specific file.", "if", "added_dep", ".", "endswith", "(", "'.h'", ")", ":", "results", ".", "add", "(", "added_dep", ")", "else", ":", "results", ".", "add", "(", "os_path", ".", "join", "(", "added_dep", ",", "'DEPS'", ")", ")", "return", "results" ]
https://github.com/HackWebRTC/webrtc/blob/7abfc990c00ab35090fff285fcf635d1d7892433/PRESUBMIT.py#L1116-L1141
llvm/llvm-project
ffa6262cb4e2a335d26416fad39a581b4f98c5f4
lldb/third_party/Python/module/six/six.py
python
_SixMetaPathImporter.is_package
(self, fullname)
return hasattr(self.__get_module(fullname), "__path__")
Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451)
Return true, if the named module is a package.
[ "Return", "true", "if", "the", "named", "module", "is", "a", "package", "." ]
def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__")
[ "def", "is_package", "(", "self", ",", "fullname", ")", ":", "return", "hasattr", "(", "self", ".", "__get_module", "(", "fullname", ")", ",", "\"__path__\"", ")" ]
https://github.com/llvm/llvm-project/blob/ffa6262cb4e2a335d26416fad39a581b4f98c5f4/lldb/third_party/Python/module/six/six.py#L209-L216
Polidea/SiriusObfuscator
b0e590d8130e97856afe578869b83a209e2b19be
SymbolExtractorAndRenamer/clang/tools/scan-build-py/libscanbuild/shell.py
python
encode
(command)
return " ".join([escape(arg) for arg in command])
Takes a command as list and returns a string.
Takes a command as list and returns a string.
[ "Takes", "a", "command", "as", "list", "and", "returns", "a", "string", "." ]
def encode(command): """ Takes a command as list and returns a string. """ def needs_quote(word): """ Returns true if arguments needs to be protected by quotes. Previous implementation was shlex.split method, but that's not good for this job. Currently is running through the string with a basic state checking. """ reserved = {' ', '$', '%', '&', '(', ')', '[', ']', '{', '}', '*', '|', '<', '>', '@', '?', '!'} state = 0 for current in word: if state == 0 and current in reserved: return True elif state == 0 and current == '\\': state = 1 elif state == 1 and current in reserved | {'\\'}: state = 0 elif state == 0 and current == '"': state = 2 elif state == 2 and current == '"': state = 0 elif state == 0 and current == "'": state = 3 elif state == 3 and current == "'": state = 0 return state != 0 def escape(word): """ Do protect argument if that's needed. """ table = {'\\': '\\\\', '"': '\\"'} escaped = ''.join([table.get(c, c) for c in word]) return '"' + escaped + '"' if needs_quote(word) else escaped return " ".join([escape(arg) for arg in command])
[ "def", "encode", "(", "command", ")", ":", "def", "needs_quote", "(", "word", ")", ":", "\"\"\" Returns true if arguments needs to be protected by quotes.\n\n Previous implementation was shlex.split method, but that's not good\n for this job. Currently is running through the string with a basic\n state checking. \"\"\"", "reserved", "=", "{", "' '", ",", "'$'", ",", "'%'", ",", "'&'", ",", "'('", ",", "')'", ",", "'['", ",", "']'", ",", "'{'", ",", "'}'", ",", "'*'", ",", "'|'", ",", "'<'", ",", "'>'", ",", "'@'", ",", "'?'", ",", "'!'", "}", "state", "=", "0", "for", "current", "in", "word", ":", "if", "state", "==", "0", "and", "current", "in", "reserved", ":", "return", "True", "elif", "state", "==", "0", "and", "current", "==", "'\\\\'", ":", "state", "=", "1", "elif", "state", "==", "1", "and", "current", "in", "reserved", "|", "{", "'\\\\'", "}", ":", "state", "=", "0", "elif", "state", "==", "0", "and", "current", "==", "'\"'", ":", "state", "=", "2", "elif", "state", "==", "2", "and", "current", "==", "'\"'", ":", "state", "=", "0", "elif", "state", "==", "0", "and", "current", "==", "\"'\"", ":", "state", "=", "3", "elif", "state", "==", "3", "and", "current", "==", "\"'\"", ":", "state", "=", "0", "return", "state", "!=", "0", "def", "escape", "(", "word", ")", ":", "\"\"\" Do protect argument if that's needed. \"\"\"", "table", "=", "{", "'\\\\'", ":", "'\\\\\\\\'", ",", "'\"'", ":", "'\\\\\"'", "}", "escaped", "=", "''", ".", "join", "(", "[", "table", ".", "get", "(", "c", ",", "c", ")", "for", "c", "in", "word", "]", ")", "return", "'\"'", "+", "escaped", "+", "'\"'", "if", "needs_quote", "(", "word", ")", "else", "escaped", "return", "\" \"", ".", "join", "(", "[", "escape", "(", "arg", ")", "for", "arg", "in", "command", "]", ")" ]
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/clang/tools/scan-build-py/libscanbuild/shell.py#L14-L52
zhuli19901106/leetcode-zhuli
0f8fc29ccb8c33ea91149ecb2d4e961024c11db7
explore/queue-stack/1337_design-circular-queue_1_AC.py
python
MyCircularQueue.Rear
(self)
return self.arr[(self.head + self.cap) % (self.cap + 1)]
Get the last item from the queue.
Get the last item from the queue.
[ "Get", "the", "last", "item", "from", "the", "queue", "." ]
def Rear(self) -> int: """ Get the last item from the queue. """ if self.isEmpty(): return MyCircularQueue.NULL_VAL return self.arr[(self.head + self.cap) % (self.cap + 1)]
[ "def", "Rear", "(", "self", ")", "->", "int", ":", "if", "self", ".", "isEmpty", "(", ")", ":", "return", "MyCircularQueue", ".", "NULL_VAL", "return", "self", ".", "arr", "[", "(", "self", ".", "head", "+", "self", ".", "cap", ")", "%", "(", "self", ".", "cap", "+", "1", ")", "]" ]
https://github.com/zhuli19901106/leetcode-zhuli/blob/0f8fc29ccb8c33ea91149ecb2d4e961024c11db7/explore/queue-stack/1337_design-circular-queue_1_AC.py#L46-L52
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/targets/codegen.py
python
JITCodeLibrary.get_pointer_to_function
(self, name)
Generate native code for function named *name* and return a pointer to the start of the function (as an integer). This function implicitly calls .finalize(). Returns ------- pointer : int - zero (null) if no symbol of *name* is defined by this code library. - non-zero if the symbol is defined.
Generate native code for function named *name* and return a pointer to the start of the function (as an integer).
[ "Generate", "native", "code", "for", "function", "named", "*", "name", "*", "and", "return", "a", "pointer", "to", "the", "start", "of", "the", "function", "(", "as", "an", "integer", ")", "." ]
def get_pointer_to_function(self, name): """ Generate native code for function named *name* and return a pointer to the start of the function (as an integer). This function implicitly calls .finalize(). Returns ------- pointer : int - zero (null) if no symbol of *name* is defined by this code library. - non-zero if the symbol is defined. """ self._ensure_finalized() ee = self._codegen._engine if not ee.is_symbol_defined(name): return 0 else: return self._codegen._engine.get_function_address(name)
[ "def", "get_pointer_to_function", "(", "self", ",", "name", ")", ":", "self", ".", "_ensure_finalized", "(", ")", "ee", "=", "self", ".", "_codegen", ".", "_engine", "if", "not", "ee", ".", "is_symbol_defined", "(", "name", ")", ":", "return", "0", "else", ":", "return", "self", ".", "_codegen", ".", "_engine", ".", "get_function_address", "(", "name", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/targets/codegen.py#L475-L494
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_controls.py
python
TreeCtrl.SetFocusedItem
(*args, **kwargs)
return _controls_.TreeCtrl_SetFocusedItem(*args, **kwargs)
SetFocusedItem(self, TreeItemId item)
SetFocusedItem(self, TreeItemId item)
[ "SetFocusedItem", "(", "self", "TreeItemId", "item", ")" ]
def SetFocusedItem(*args, **kwargs): """SetFocusedItem(self, TreeItemId item)""" return _controls_.TreeCtrl_SetFocusedItem(*args, **kwargs)
[ "def", "SetFocusedItem", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_controls_", ".", "TreeCtrl_SetFocusedItem", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_controls.py#L5379-L5381
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py3/pandas/core/dtypes/concat.py
python
cast_to_common_type
(arr: ArrayLike, dtype: DtypeObj)
return arr.astype(dtype, copy=False)
Helper function for `arr.astype(common_dtype)` but handling all special cases.
Helper function for `arr.astype(common_dtype)` but handling all special cases.
[ "Helper", "function", "for", "arr", ".", "astype", "(", "common_dtype", ")", "but", "handling", "all", "special", "cases", "." ]
def cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike: """ Helper function for `arr.astype(common_dtype)` but handling all special cases. """ if is_dtype_equal(arr.dtype, dtype): return arr if ( is_categorical_dtype(arr.dtype) and isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.integer) ): # problem case: categorical of int -> gives int as result dtype, # but categorical can contain NAs -> fall back to object dtype try: return arr.astype(dtype, copy=False) except ValueError: return arr.astype(object, copy=False) if is_sparse(arr) and not is_sparse(dtype): # problem case: SparseArray.astype(dtype) doesn't follow the specified # dtype exactly, but converts this to Sparse[dtype] -> first manually # convert to dense array arr = cast(SparseArray, arr) return arr.to_dense().astype(dtype, copy=False) if ( isinstance(arr, np.ndarray) and arr.dtype.kind in ["m", "M"] and dtype is np.dtype("object") ): # wrap datetime-likes in EA to ensure astype(object) gives Timestamp/Timedelta # this can happen when concat_compat is called directly on arrays (when arrays # are not coming from Index/Series._values), eg in BlockManager.quantile arr = ensure_wrapped_if_datetimelike(arr) if isinstance(dtype, ExtensionDtype): if isinstance(arr, np.ndarray): # numpy's astype cannot handle ExtensionDtypes return pd_array(arr, dtype=dtype, copy=False) return arr.astype(dtype, copy=False) return arr.astype(dtype, copy=False)
[ "def", "cast_to_common_type", "(", "arr", ":", "ArrayLike", ",", "dtype", ":", "DtypeObj", ")", "->", "ArrayLike", ":", "if", "is_dtype_equal", "(", "arr", ".", "dtype", ",", "dtype", ")", ":", "return", "arr", "if", "(", "is_categorical_dtype", "(", "arr", ".", "dtype", ")", "and", "isinstance", "(", "dtype", ",", "np", ".", "dtype", ")", "and", "np", ".", "issubdtype", "(", "dtype", ",", "np", ".", "integer", ")", ")", ":", "# problem case: categorical of int -> gives int as result dtype,", "# but categorical can contain NAs -> fall back to object dtype", "try", ":", "return", "arr", ".", "astype", "(", "dtype", ",", "copy", "=", "False", ")", "except", "ValueError", ":", "return", "arr", ".", "astype", "(", "object", ",", "copy", "=", "False", ")", "if", "is_sparse", "(", "arr", ")", "and", "not", "is_sparse", "(", "dtype", ")", ":", "# problem case: SparseArray.astype(dtype) doesn't follow the specified", "# dtype exactly, but converts this to Sparse[dtype] -> first manually", "# convert to dense array", "arr", "=", "cast", "(", "SparseArray", ",", "arr", ")", "return", "arr", ".", "to_dense", "(", ")", ".", "astype", "(", "dtype", ",", "copy", "=", "False", ")", "if", "(", "isinstance", "(", "arr", ",", "np", ".", "ndarray", ")", "and", "arr", ".", "dtype", ".", "kind", "in", "[", "\"m\"", ",", "\"M\"", "]", "and", "dtype", "is", "np", ".", "dtype", "(", "\"object\"", ")", ")", ":", "# wrap datetime-likes in EA to ensure astype(object) gives Timestamp/Timedelta", "# this can happen when concat_compat is called directly on arrays (when arrays", "# are not coming from Index/Series._values), eg in BlockManager.quantile", "arr", "=", "ensure_wrapped_if_datetimelike", "(", "arr", ")", "if", "isinstance", "(", "dtype", ",", "ExtensionDtype", ")", ":", "if", "isinstance", "(", "arr", ",", "np", ".", "ndarray", ")", ":", "# numpy's astype cannot handle ExtensionDtypes", "return", "pd_array", "(", "arr", ",", "dtype", "=", "dtype", ",", "copy", "=", "False", ")", "return", "arr", ".", "astype", "(", "dtype", ",", "copy", "=", "False", ")", "return", "arr", ".", "astype", "(", "dtype", ",", "copy", "=", "False", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/dtypes/concat.py#L33-L75
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/python/requests/requests/sessions.py
python
Session.merge_environment_settings
(self, url, proxies, stream, verify, cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream, 'cert': cert}
Check the environment and merge it with some settings.
Check the environment and merge it with some settings.
[ "Check", "the", "environment", "and", "merge", "it", "with", "some", "settings", "." ]
def merge_environment_settings(self, url, proxies, stream, verify, cert): """Check the environment and merge it with some settings.""" # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. env_proxies = get_environ_proxies(url) or {} for (k, v) in env_proxies.items(): proxies.setdefault(k, v) # Look for requests environment configuration and be compatible # with cURL. if verify is True or verify is None: verify = (os.environ.get('REQUESTS_CA_BUNDLE') or os.environ.get('CURL_CA_BUNDLE')) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) return {'verify': verify, 'proxies': proxies, 'stream': stream, 'cert': cert}
[ "def", "merge_environment_settings", "(", "self", ",", "url", ",", "proxies", ",", "stream", ",", "verify", ",", "cert", ")", ":", "# Gather clues from the surrounding environment.", "if", "self", ".", "trust_env", ":", "# Set environment's proxies.", "env_proxies", "=", "get_environ_proxies", "(", "url", ")", "or", "{", "}", "for", "(", "k", ",", "v", ")", "in", "env_proxies", ".", "items", "(", ")", ":", "proxies", ".", "setdefault", "(", "k", ",", "v", ")", "# Look for requests environment configuration and be compatible", "# with cURL.", "if", "verify", "is", "True", "or", "verify", "is", "None", ":", "verify", "=", "(", "os", ".", "environ", ".", "get", "(", "'REQUESTS_CA_BUNDLE'", ")", "or", "os", ".", "environ", ".", "get", "(", "'CURL_CA_BUNDLE'", ")", ")", "# Merge all the kwargs.", "proxies", "=", "merge_setting", "(", "proxies", ",", "self", ".", "proxies", ")", "stream", "=", "merge_setting", "(", "stream", ",", "self", ".", "stream", ")", "verify", "=", "merge_setting", "(", "verify", ",", "self", ".", "verify", ")", "cert", "=", "merge_setting", "(", "cert", ",", "self", ".", "cert", ")", "return", "{", "'verify'", ":", "verify", ",", "'proxies'", ":", "proxies", ",", "'stream'", ":", "stream", ",", "'cert'", ":", "cert", "}" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/requests/requests/sessions.py#L614-L636
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/traitlets/py2/traitlets/config/configurable.py
python
Configurable.section_names
(cls)
return [c.__name__ for c in reversed(cls.__mro__) if issubclass(c, Configurable) and issubclass(cls, c) ]
return section names as a list
return section names as a list
[ "return", "section", "names", "as", "a", "list" ]
def section_names(cls): """return section names as a list""" return [c.__name__ for c in reversed(cls.__mro__) if issubclass(c, Configurable) and issubclass(cls, c) ]
[ "def", "section_names", "(", "cls", ")", ":", "return", "[", "c", ".", "__name__", "for", "c", "in", "reversed", "(", "cls", ".", "__mro__", ")", "if", "issubclass", "(", "c", ",", "Configurable", ")", "and", "issubclass", "(", "cls", ",", "c", ")", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/traitlets/py2/traitlets/config/configurable.py#L100-L104
bkaradzic/bgfx
e42fe374c33138a84d5e959566be4a77546310f6
3rdparty/glslang/build_info.py
python
deduce_software_version
(directory)
Returns a software version number parsed from the CHANGES.md file in the given directory. The CHANGES.md file describes most recent versions first.
Returns a software version number parsed from the CHANGES.md file in the given directory.
[ "Returns", "a", "software", "version", "number", "parsed", "from", "the", "CHANGES", ".", "md", "file", "in", "the", "given", "directory", "." ]
def deduce_software_version(directory): """Returns a software version number parsed from the CHANGES.md file in the given directory. The CHANGES.md file describes most recent versions first. """ # Match the first well-formed version-and-date line. # Allow trailing whitespace in the checked-out source code has # unexpected carriage returns on a linefeed-only system such as # Linux. pattern = re.compile(r'^#* +(\d+)\.(\d+)\.(\d+)(-\w+)? (\d\d\d\d-\d\d-\d\d)? *$') changes_file = os.path.join(directory, 'CHANGES.md') with open(changes_file, mode='r') as f: for line in f.readlines(): match = pattern.match(line) if match: flavor = match.group(4) if flavor == None: flavor = "" return { "major": match.group(1), "minor": match.group(2), "patch": match.group(3), "flavor": flavor.lstrip("-"), "-flavor": flavor, "date": match.group(5), } raise Exception('No version number found in {}'.format(changes_file))
[ "def", "deduce_software_version", "(", "directory", ")", ":", "# Match the first well-formed version-and-date line.", "# Allow trailing whitespace in the checked-out source code has", "# unexpected carriage returns on a linefeed-only system such as", "# Linux.", "pattern", "=", "re", ".", "compile", "(", "r'^#* +(\\d+)\\.(\\d+)\\.(\\d+)(-\\w+)? (\\d\\d\\d\\d-\\d\\d-\\d\\d)? *$'", ")", "changes_file", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "'CHANGES.md'", ")", "with", "open", "(", "changes_file", ",", "mode", "=", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "match", "=", "pattern", ".", "match", "(", "line", ")", "if", "match", ":", "flavor", "=", "match", ".", "group", "(", "4", ")", "if", "flavor", "==", "None", ":", "flavor", "=", "\"\"", "return", "{", "\"major\"", ":", "match", ".", "group", "(", "1", ")", ",", "\"minor\"", ":", "match", ".", "group", "(", "2", ")", ",", "\"patch\"", ":", "match", ".", "group", "(", "3", ")", ",", "\"flavor\"", ":", "flavor", ".", "lstrip", "(", "\"-\"", ")", ",", "\"-flavor\"", ":", "flavor", ",", "\"date\"", ":", "match", ".", "group", "(", "5", ")", ",", "}", "raise", "Exception", "(", "'No version number found in {}'", ".", "format", "(", "changes_file", ")", ")" ]
https://github.com/bkaradzic/bgfx/blob/e42fe374c33138a84d5e959566be4a77546310f6/3rdparty/glslang/build_info.py#L86-L114
google/earthenterprise
0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9
earth_enterprise/src/scons/khEnvironment.py
python
EmitBuildDateFunc
(target, build_date)
Emits build date information to target file.
Emits build date information to target file.
[ "Emits", "build", "date", "information", "to", "target", "file", "." ]
def EmitBuildDateFunc(target, build_date): """Emits build date information to target file.""" fp = open(target, 'w') fp.writelines(['// DO NOT MODIFY - auto-generated file\n', 'extern const char *const BUILD_DATE = "' + time.strftime('%Y-%m-%d', build_date) + '";\n', 'extern const char *const BUILD_YEAR = "' + time.strftime('%Y', build_date) + '";\n', 'extern const char *const BUILD_MONTH = "' + time.strftime('%m', build_date) + '";\n', 'extern const char *const BUILD_DAY = "' + time.strftime('%d', build_date) + '";\n', ]) fp.close()
[ "def", "EmitBuildDateFunc", "(", "target", ",", "build_date", ")", ":", "fp", "=", "open", "(", "target", ",", "'w'", ")", "fp", ".", "writelines", "(", "[", "'// DO NOT MODIFY - auto-generated file\\n'", ",", "'extern const char *const BUILD_DATE = \"'", "+", "time", ".", "strftime", "(", "'%Y-%m-%d'", ",", "build_date", ")", "+", "'\";\\n'", ",", "'extern const char *const BUILD_YEAR = \"'", "+", "time", ".", "strftime", "(", "'%Y'", ",", "build_date", ")", "+", "'\";\\n'", ",", "'extern const char *const BUILD_MONTH = \"'", "+", "time", ".", "strftime", "(", "'%m'", ",", "build_date", ")", "+", "'\";\\n'", ",", "'extern const char *const BUILD_DAY = \"'", "+", "time", ".", "strftime", "(", "'%d'", ",", "build_date", ")", "+", "'\";\\n'", ",", "]", ")", "fp", ".", "close", "(", ")" ]
https://github.com/google/earthenterprise/blob/0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9/earth_enterprise/src/scons/khEnvironment.py#L170-L183
LiquidPlayer/LiquidCore
9405979363f2353ac9a71ad8ab59685dd7f919c9
deps/boost_1_66_0/tools/build/src/build/scanner.py
python
get
(scanner_class, properties)
return __scanner_cache[scanner_id]
Returns an instance of previously registered scanner with the specified properties.
Returns an instance of previously registered scanner with the specified properties.
[ "Returns", "an", "instance", "of", "previously", "registered", "scanner", "with", "the", "specified", "properties", "." ]
def get(scanner_class, properties): """ Returns an instance of previously registered scanner with the specified properties. """ assert issubclass(scanner_class, Scanner) assert is_iterable_typed(properties, basestring) scanner_name = str(scanner_class) if not registered(scanner_name): raise BaseException ("attempt to get unregisted scanner: %s" % scanner_name) relevant_properties = __scanners[scanner_name] r = property.select(relevant_properties, properties) scanner_id = scanner_name + '.' + '-'.join(r) if scanner_id not in __scanner_cache: __scanner_cache[scanner_id] = scanner_class(r) return __scanner_cache[scanner_id]
[ "def", "get", "(", "scanner_class", ",", "properties", ")", ":", "assert", "issubclass", "(", "scanner_class", ",", "Scanner", ")", "assert", "is_iterable_typed", "(", "properties", ",", "basestring", ")", "scanner_name", "=", "str", "(", "scanner_class", ")", "if", "not", "registered", "(", "scanner_name", ")", ":", "raise", "BaseException", "(", "\"attempt to get unregisted scanner: %s\"", "%", "scanner_name", ")", "relevant_properties", "=", "__scanners", "[", "scanner_name", "]", "r", "=", "property", ".", "select", "(", "relevant_properties", ",", "properties", ")", "scanner_id", "=", "scanner_name", "+", "'.'", "+", "'-'", ".", "join", "(", "r", ")", "if", "scanner_id", "not", "in", "__scanner_cache", ":", "__scanner_cache", "[", "scanner_id", "]", "=", "scanner_class", "(", "r", ")", "return", "__scanner_cache", "[", "scanner_id", "]" ]
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/boost_1_66_0/tools/build/src/build/scanner.py#L68-L87
quantOS-org/DataCore
e2ef9bd2c22ee9e2845675b6435a14fa607f3551
mdlink/deps/windows/protobuf-2.5.0/python/google/protobuf/descriptor.py
python
FieldDescriptor.__init__
(self, name, full_name, index, number, type, cpp_type, label, default_value, message_type, enum_type, containing_type, is_extension, extension_scope, options=None, has_default_value=True)
The arguments are as described in the description of FieldDescriptor attributes above. Note that containing_type may be None, and may be set later if necessary (to deal with circular references between message types, for example). Likewise for extension_scope.
The arguments are as described in the description of FieldDescriptor attributes above.
[ "The", "arguments", "are", "as", "described", "in", "the", "description", "of", "FieldDescriptor", "attributes", "above", "." ]
def __init__(self, name, full_name, index, number, type, cpp_type, label, default_value, message_type, enum_type, containing_type, is_extension, extension_scope, options=None, has_default_value=True): """The arguments are as described in the description of FieldDescriptor attributes above. Note that containing_type may be None, and may be set later if necessary (to deal with circular references between message types, for example). Likewise for extension_scope. """ super(FieldDescriptor, self).__init__(options, 'FieldOptions') self.name = name self.full_name = full_name self.index = index self.number = number self.type = type self.cpp_type = cpp_type self.label = label self.has_default_value = has_default_value self.default_value = default_value self.containing_type = containing_type self.message_type = message_type self.enum_type = enum_type self.is_extension = is_extension self.extension_scope = extension_scope if api_implementation.Type() == 'cpp': if is_extension: if api_implementation.Version() == 2: self._cdescriptor = _message.GetExtensionDescriptor(full_name) else: self._cdescriptor = cpp_message.GetExtensionDescriptor(full_name) else: if api_implementation.Version() == 2: self._cdescriptor = _message.GetFieldDescriptor(full_name) else: self._cdescriptor = cpp_message.GetFieldDescriptor(full_name) else: self._cdescriptor = None
[ "def", "__init__", "(", "self", ",", "name", ",", "full_name", ",", "index", ",", "number", ",", "type", ",", "cpp_type", ",", "label", ",", "default_value", ",", "message_type", ",", "enum_type", ",", "containing_type", ",", "is_extension", ",", "extension_scope", ",", "options", "=", "None", ",", "has_default_value", "=", "True", ")", ":", "super", "(", "FieldDescriptor", ",", "self", ")", ".", "__init__", "(", "options", ",", "'FieldOptions'", ")", "self", ".", "name", "=", "name", "self", ".", "full_name", "=", "full_name", "self", ".", "index", "=", "index", "self", ".", "number", "=", "number", "self", ".", "type", "=", "type", "self", ".", "cpp_type", "=", "cpp_type", "self", ".", "label", "=", "label", "self", ".", "has_default_value", "=", "has_default_value", "self", ".", "default_value", "=", "default_value", "self", ".", "containing_type", "=", "containing_type", "self", ".", "message_type", "=", "message_type", "self", ".", "enum_type", "=", "enum_type", "self", ".", "is_extension", "=", "is_extension", "self", ".", "extension_scope", "=", "extension_scope", "if", "api_implementation", ".", "Type", "(", ")", "==", "'cpp'", ":", "if", "is_extension", ":", "if", "api_implementation", ".", "Version", "(", ")", "==", "2", ":", "self", ".", "_cdescriptor", "=", "_message", ".", "GetExtensionDescriptor", "(", "full_name", ")", "else", ":", "self", ".", "_cdescriptor", "=", "cpp_message", ".", "GetExtensionDescriptor", "(", "full_name", ")", "else", ":", "if", "api_implementation", ".", "Version", "(", ")", "==", "2", ":", "self", ".", "_cdescriptor", "=", "_message", ".", "GetFieldDescriptor", "(", "full_name", ")", "else", ":", "self", ".", "_cdescriptor", "=", "cpp_message", ".", "GetFieldDescriptor", "(", "full_name", ")", "else", ":", "self", ".", "_cdescriptor", "=", "None" ]
https://github.com/quantOS-org/DataCore/blob/e2ef9bd2c22ee9e2845675b6435a14fa607f3551/mdlink/deps/windows/protobuf-2.5.0/python/google/protobuf/descriptor.py#L428-L466
openvinotoolkit/openvino
dedcbeafa8b84cccdc55ca64b8da516682b381c7
src/bindings/python/src/compatibility/ngraph/opset1/ops.py
python
result
(data: NodeInput, name: Optional[str] = None)
return _get_node_factory_opset1().create("Result", [data])
Return a node which represents an output of a graph (Function). :param data: The tensor containing the input data :return: Result node
Return a node which represents an output of a graph (Function).
[ "Return", "a", "node", "which", "represents", "an", "output", "of", "a", "graph", "(", "Function", ")", "." ]
def result(data: NodeInput, name: Optional[str] = None) -> Node: """Return a node which represents an output of a graph (Function). :param data: The tensor containing the input data :return: Result node """ return _get_node_factory_opset1().create("Result", [data])
[ "def", "result", "(", "data", ":", "NodeInput", ",", "name", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Node", ":", "return", "_get_node_factory_opset1", "(", ")", ".", "create", "(", "\"Result\"", ",", "[", "data", "]", ")" ]
https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/src/bindings/python/src/compatibility/ngraph/opset1/ops.py#L2442-L2448
leggedrobotics/free_gait
93e6c2f385fe9ac7107153965e14f6b7a1e0d702
free_gait_python/src/free_gait/free_gait.py
python
LocalTransformListener.unregister
(self)
Unregisters all tf subscribers.
Unregisters all tf subscribers.
[ "Unregisters", "all", "tf", "subscribers", "." ]
def unregister(self): """ Unregisters all tf subscribers. """ self.tf_sub.unregister() self.tf_static_sub.unregister()
[ "def", "unregister", "(", "self", ")", ":", "self", ".", "tf_sub", ".", "unregister", "(", ")", "self", ".", "tf_static_sub", ".", "unregister", "(", ")" ]
https://github.com/leggedrobotics/free_gait/blob/93e6c2f385fe9ac7107153965e14f6b7a1e0d702/free_gait_python/src/free_gait/free_gait.py#L583-L588
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/cuda/amp/grad_scaler.py
python
GradScaler.unscale_
(self, optimizer)
Divides ("unscales") the optimizer's gradient tensors by the scale factor. :meth:`unscale_` is optional, serving cases where you need to :ref:`modify or inspect gradients<working-with-unscaled-gradients>` between the backward pass(es) and :meth:`step`. If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`. Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients:: ... scaler.scale(loss).backward() scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) scaler.step(optimizer) scaler.update() Args: optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled. .. note:: :meth:`unscale_` does not incur a CPU-GPU sync. .. warning:: :meth:`unscale_` should only be called once per optimizer per :meth:`step` call, and only after all gradients for that optimizer's assigned parameters have been accumulated. Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError. .. warning:: :meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute.
Divides ("unscales") the optimizer's gradient tensors by the scale factor.
[ "Divides", "(", "unscales", ")", "the", "optimizer", "s", "gradient", "tensors", "by", "the", "scale", "factor", "." ]
def unscale_(self, optimizer): """ Divides ("unscales") the optimizer's gradient tensors by the scale factor. :meth:`unscale_` is optional, serving cases where you need to :ref:`modify or inspect gradients<working-with-unscaled-gradients>` between the backward pass(es) and :meth:`step`. If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`. Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients:: ... scaler.scale(loss).backward() scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) scaler.step(optimizer) scaler.update() Args: optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled. .. note:: :meth:`unscale_` does not incur a CPU-GPU sync. .. warning:: :meth:`unscale_` should only be called once per optimizer per :meth:`step` call, and only after all gradients for that optimizer's assigned parameters have been accumulated. Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError. .. warning:: :meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute. """ if not self._enabled: return self._check_scale_growth_tracker("unscale_") optimizer_state = self._per_optimizer_states[id(optimizer)] if optimizer_state["stage"] is OptState.UNSCALED: raise RuntimeError("unscale_() has already been called on this optimizer since the last update().") elif optimizer_state["stage"] is OptState.STEPPED: raise RuntimeError("unscale_() is being called after step().") # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64. assert self._scale is not None inv_scale = self._scale.double().reciprocal().float() found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=self._scale.device) optimizer_state["found_inf_per_device"] = self._unscale_grads_(optimizer, inv_scale, found_inf, False) optimizer_state["stage"] = OptState.UNSCALED
[ "def", "unscale_", "(", "self", ",", "optimizer", ")", ":", "if", "not", "self", ".", "_enabled", ":", "return", "self", ".", "_check_scale_growth_tracker", "(", "\"unscale_\"", ")", "optimizer_state", "=", "self", ".", "_per_optimizer_states", "[", "id", "(", "optimizer", ")", "]", "if", "optimizer_state", "[", "\"stage\"", "]", "is", "OptState", ".", "UNSCALED", ":", "raise", "RuntimeError", "(", "\"unscale_() has already been called on this optimizer since the last update().\"", ")", "elif", "optimizer_state", "[", "\"stage\"", "]", "is", "OptState", ".", "STEPPED", ":", "raise", "RuntimeError", "(", "\"unscale_() is being called after step().\"", ")", "# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.", "assert", "self", ".", "_scale", "is", "not", "None", "inv_scale", "=", "self", ".", "_scale", ".", "double", "(", ")", ".", "reciprocal", "(", ")", ".", "float", "(", ")", "found_inf", "=", "torch", ".", "full", "(", "(", "1", ",", ")", ",", "0.0", ",", "dtype", "=", "torch", ".", "float32", ",", "device", "=", "self", ".", "_scale", ".", "device", ")", "optimizer_state", "[", "\"found_inf_per_device\"", "]", "=", "self", ".", "_unscale_grads_", "(", "optimizer", ",", "inv_scale", ",", "found_inf", ",", "False", ")", "optimizer_state", "[", "\"stage\"", "]", "=", "OptState", ".", "UNSCALED" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/cuda/amp/grad_scaler.py#L230-L280
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/model.py
python
Info.search
(cls, name_prefix=None)
return query
Create search query based on info record name prefix. Args: name_prefix: User input name-prefix to search for. If name_prefix is empty string or None returns all records of Info sub-class. Records are sorted by their encoded name. Returns: Datastore query pointing to search results.
Create search query based on info record name prefix.
[ "Create", "search", "query", "based", "on", "info", "record", "name", "prefix", "." ]
def search(cls, name_prefix=None): """Create search query based on info record name prefix. Args: name_prefix: User input name-prefix to search for. If name_prefix is empty string or None returns all records of Info sub-class. Records are sorted by their encoded name. Returns: Datastore query pointing to search results. """ name_prefix = _normalize_name(name_prefix) query = cls.all().order('encoded_name') if name_prefix: query.filter('encoded_name >=', db.ByteString(name_prefix)) # Do not need to worry about name_prefix + '\xff\xff' because not # a unicode character. query.filter('encoded_name <=', db.ByteString(name_prefix + '\xff')) return query
[ "def", "search", "(", "cls", ",", "name_prefix", "=", "None", ")", ":", "name_prefix", "=", "_normalize_name", "(", "name_prefix", ")", "query", "=", "cls", ".", "all", "(", ")", ".", "order", "(", "'encoded_name'", ")", "if", "name_prefix", ":", "query", ".", "filter", "(", "'encoded_name >='", ",", "db", ".", "ByteString", "(", "name_prefix", ")", ")", "# Do not need to worry about name_prefix + '\\xff\\xff' because not", "# a unicode character.", "query", ".", "filter", "(", "'encoded_name <='", ",", "db", ".", "ByteString", "(", "name_prefix", "+", "'\\xff'", ")", ")", "return", "query" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/protorpc/demos/tunes_db/server/model.py#L85-L103
linyouhappy/kongkongxiyou
7a69b2913eb29f4be77f9a62fb90cdd72c4160f1
cocosjs/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py
python
SourceRange.end
(self)
return conf.lib.clang_getRangeEnd(self)
Return a SourceLocation representing the last character within a source range.
Return a SourceLocation representing the last character within a source range.
[ "Return", "a", "SourceLocation", "representing", "the", "last", "character", "within", "a", "source", "range", "." ]
def end(self): """ Return a SourceLocation representing the last character within a source range. """ return conf.lib.clang_getRangeEnd(self)
[ "def", "end", "(", "self", ")", ":", "return", "conf", ".", "lib", ".", "clang_getRangeEnd", "(", "self", ")" ]
https://github.com/linyouhappy/kongkongxiyou/blob/7a69b2913eb29f4be77f9a62fb90cdd72c4160f1/cocosjs/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py#L256-L261
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/rexec.py
python
RExec.s_import
(self, *args)
return self.s_apply(self.r_import, args)
Import a module, raising an ImportError exception if the module is considered unsafe. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment. Similar to the r_import() method, but has access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout.
Import a module, raising an ImportError exception if the module is considered unsafe.
[ "Import", "a", "module", "raising", "an", "ImportError", "exception", "if", "the", "module", "is", "considered", "unsafe", "." ]
def s_import(self, *args): """Import a module, raising an ImportError exception if the module is considered unsafe. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment. Similar to the r_import() method, but has access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout. """ return self.s_apply(self.r_import, args)
[ "def", "s_import", "(", "self", ",", "*", "args", ")", ":", "return", "self", ".", "s_apply", "(", "self", ".", "r_import", ",", "args", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/rexec.py#L462-L475
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/html.py
python
HtmlHelpWindow.WriteCustomization
(*args, **kwargs)
return _html.HtmlHelpWindow_WriteCustomization(*args, **kwargs)
WriteCustomization(self, ConfigBase cfg, String path=wxEmptyString)
WriteCustomization(self, ConfigBase cfg, String path=wxEmptyString)
[ "WriteCustomization", "(", "self", "ConfigBase", "cfg", "String", "path", "=", "wxEmptyString", ")" ]
def WriteCustomization(*args, **kwargs): """WriteCustomization(self, ConfigBase cfg, String path=wxEmptyString)""" return _html.HtmlHelpWindow_WriteCustomization(*args, **kwargs)
[ "def", "WriteCustomization", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_html", ".", "HtmlHelpWindow_WriteCustomization", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/html.py#L1630-L1632
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py
python
IResourceProvider.resource_listdir
(resource_name)
List of resource names in the directory (like ``os.listdir()``)
List of resource names in the directory (like ``os.listdir()``)
[ "List", "of", "resource", "names", "in", "the", "directory", "(", "like", "os", ".", "listdir", "()", ")" ]
def resource_listdir(resource_name): """List of resource names in the directory (like ``os.listdir()``)"""
[ "def", "resource_listdir", "(", "resource_name", ")", ":" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py#L549-L550
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/ros_comm/roslaunch/src/roslaunch/core.py
python
generate_run_id
()
return str(uuid.uuid1())
Utility routine for generating run IDs (UUIDs) :returns: guid, ``str``
Utility routine for generating run IDs (UUIDs) :returns: guid, ``str``
[ "Utility", "routine", "for", "generating", "run", "IDs", "(", "UUIDs", ")", ":", "returns", ":", "guid", "str" ]
def generate_run_id(): """ Utility routine for generating run IDs (UUIDs) :returns: guid, ``str`` """ import uuid return str(uuid.uuid1())
[ "def", "generate_run_id", "(", ")", ":", "import", "uuid", "return", "str", "(", "uuid", ".", "uuid1", "(", ")", ")" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/ros_comm/roslaunch/src/roslaunch/core.py#L668-L674
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/ragged/ragged_math_ops.py
python
tensor_not_equals
(self: ragged_tensor.RaggedOrDense, other: ragged_tensor.RaggedOrDense)
Ragged version of the operation invoked by `Tensor.__ne__`.
Ragged version of the operation invoked by `Tensor.__ne__`.
[ "Ragged", "version", "of", "the", "operation", "invoked", "by", "Tensor", ".", "__ne__", "." ]
def tensor_not_equals(self: ragged_tensor.RaggedOrDense, other: ragged_tensor.RaggedOrDense): """Ragged version of the operation invoked by `Tensor.__ne__`.""" if other is None: return False elif _use_legacy_mode_for_tensor_equality(self): return self is not other else: try: return math_ops.not_equal(self, other) except (errors.InvalidArgumentError, ValueError): return True
[ "def", "tensor_not_equals", "(", "self", ":", "ragged_tensor", ".", "RaggedOrDense", ",", "other", ":", "ragged_tensor", ".", "RaggedOrDense", ")", ":", "if", "other", "is", "None", ":", "return", "False", "elif", "_use_legacy_mode_for_tensor_equality", "(", "self", ")", ":", "return", "self", "is", "not", "other", "else", ":", "try", ":", "return", "math_ops", ".", "not_equal", "(", "self", ",", "other", ")", "except", "(", "errors", ".", "InvalidArgumentError", ",", "ValueError", ")", ":", "return", "True" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/ragged/ragged_math_ops.py#L1123-L1134
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/pipeline/pipeline/pipeline.py
python
Pipeline.__init__
(self, *args, **kwargs)
Initializer. Args: *args: The positional arguments for this function-object. **kwargs: The keyword arguments for this function-object.
Initializer.
[ "Initializer", "." ]
def __init__(self, *args, **kwargs): """Initializer. Args: *args: The positional arguments for this function-object. **kwargs: The keyword arguments for this function-object. """ self.args = args self.kwargs = kwargs self.outputs = None self.backoff_seconds = _DEFAULT_BACKOFF_SECONDS self.backoff_factor = _DEFAULT_BACKOFF_FACTOR self.max_attempts = _DEFAULT_MAX_ATTEMPTS self.target = None self.task_retry = False self._current_attempt = 0 self._root_pipeline_key = None self._pipeline_key = None self._context = None self._result_status = None self._set_class_path() # Introspectively set the target so pipelines stick to the version it # started. self.target = mr_util._get_task_target() if _TEST_MODE: self._context = _PipelineContext('', 'default', '') self._root_pipeline_key = _TEST_ROOT_PIPELINE_KEY self._pipeline_key = db.Key.from_path( _PipelineRecord.kind(), uuid.uuid4().hex) self.outputs = PipelineFuture(self.output_names) self._context.evaluate_test(self)
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "args", "=", "args", "self", ".", "kwargs", "=", "kwargs", "self", ".", "outputs", "=", "None", "self", ".", "backoff_seconds", "=", "_DEFAULT_BACKOFF_SECONDS", "self", ".", "backoff_factor", "=", "_DEFAULT_BACKOFF_FACTOR", "self", ".", "max_attempts", "=", "_DEFAULT_MAX_ATTEMPTS", "self", ".", "target", "=", "None", "self", ".", "task_retry", "=", "False", "self", ".", "_current_attempt", "=", "0", "self", ".", "_root_pipeline_key", "=", "None", "self", ".", "_pipeline_key", "=", "None", "self", ".", "_context", "=", "None", "self", ".", "_result_status", "=", "None", "self", ".", "_set_class_path", "(", ")", "# Introspectively set the target so pipelines stick to the version it", "# started.", "self", ".", "target", "=", "mr_util", ".", "_get_task_target", "(", ")", "if", "_TEST_MODE", ":", "self", ".", "_context", "=", "_PipelineContext", "(", "''", ",", "'default'", ",", "''", ")", "self", ".", "_root_pipeline_key", "=", "_TEST_ROOT_PIPELINE_KEY", "self", ".", "_pipeline_key", "=", "db", ".", "Key", ".", "from_path", "(", "_PipelineRecord", ".", "kind", "(", ")", ",", "uuid", ".", "uuid4", "(", ")", ".", "hex", ")", "self", ".", "outputs", "=", "PipelineFuture", "(", "self", ".", "output_names", ")", "self", ".", "_context", ".", "evaluate_test", "(", "self", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/pipeline/pipeline/pipeline.py#L448-L479
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
tools/telemetry/third_party/pyserial/serial/serialutil.py
python
SerialBase.setInterCharTimeout
(self, interCharTimeout)
Change inter-character timeout setting.
Change inter-character timeout setting.
[ "Change", "inter", "-", "character", "timeout", "setting", "." ]
def setInterCharTimeout(self, interCharTimeout): """Change inter-character timeout setting.""" if interCharTimeout is not None: if interCharTimeout < 0: raise ValueError("Not a valid timeout: %r" % interCharTimeout) try: interCharTimeout + 1 # test if it's a number, will throw a TypeError if not... except TypeError: raise ValueError("Not a valid timeout: %r" % interCharTimeout) self._interCharTimeout = interCharTimeout if self._isOpen: self._reconfigurePort()
[ "def", "setInterCharTimeout", "(", "self", ",", "interCharTimeout", ")", ":", "if", "interCharTimeout", "is", "not", "None", ":", "if", "interCharTimeout", "<", "0", ":", "raise", "ValueError", "(", "\"Not a valid timeout: %r\"", "%", "interCharTimeout", ")", "try", ":", "interCharTimeout", "+", "1", "# test if it's a number, will throw a TypeError if not...", "except", "TypeError", ":", "raise", "ValueError", "(", "\"Not a valid timeout: %r\"", "%", "interCharTimeout", ")", "self", ".", "_interCharTimeout", "=", "interCharTimeout", "if", "self", ".", "_isOpen", ":", "self", ".", "_reconfigurePort", "(", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/telemetry/third_party/pyserial/serial/serialutil.py#L468-L478
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/Draft/drafttaskpanels/task_circulararray.py
python
TaskPanelCircularArray.print_link_state
(self, use_link)
Print the link state translated.
Print the link state translated.
[ "Print", "the", "link", "state", "translated", "." ]
def print_link_state(self, use_link): """Print the link state translated.""" if use_link: state = self.tr_true else: state = self.tr_false _msg(translate("draft","Create Link array:") + " {}".format(state))
[ "def", "print_link_state", "(", "self", ",", "use_link", ")", ":", "if", "use_link", ":", "state", "=", "self", ".", "tr_true", "else", ":", "state", "=", "self", ".", "tr_false", "_msg", "(", "translate", "(", "\"draft\"", ",", "\"Create Link array:\"", ")", "+", "\" {}\"", ".", "format", "(", "state", ")", ")" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/drafttaskpanels/task_circulararray.py#L359-L365
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/src/ed_vim.py
python
EditraCommander.WordEndBig
(self, repeat=1)
Move to end of this Word (words are separated by space)
Move to end of this Word (words are separated by space)
[ "Move", "to", "end", "of", "this", "Word", "(", "words", "are", "separated", "by", "space", ")" ]
def WordEndBig(self, repeat=1): """Move to end of this Word (words are separated by space)""" # TODO:CJP Test on empty document, possible error condition for i in range(repeat): self.stc.WordRightEnd() while self.GetChar() and not self.GetChar().isspace(): self.stc.WordRightEnd()
[ "def", "WordEndBig", "(", "self", ",", "repeat", "=", "1", ")", ":", "# TODO:CJP Test on empty document, possible error condition", "for", "i", "in", "range", "(", "repeat", ")", ":", "self", ".", "stc", ".", "WordRightEnd", "(", ")", "while", "self", ".", "GetChar", "(", ")", "and", "not", "self", ".", "GetChar", "(", ")", ".", "isspace", "(", ")", ":", "self", ".", "stc", ".", "WordRightEnd", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_vim.py#L386-L392
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/keras/_impl/keras/preprocessing/sequence.py
python
skipgrams
(sequence, vocabulary_size, window_size=4, negative_samples=1., shuffle=True, categorical=False, sampling_table=None, seed=None)
return couples, labels
Generates skipgram word pairs. Takes a sequence (list of indexes of words), returns couples of [word_index, other_word index] and labels (1s or 0s), where label = 1 if 'other_word' belongs to the context of 'word', and label=0 if 'other_word' is randomly sampled Arguments: sequence: a word sequence (sentence), encoded as a list of word indices (integers). If using a `sampling_table`, word indices are expected to match the rank of the words in a reference dataset (e.g. 10 would encode the 10-th most frequently occurring token). Note that index 0 is expected to be a non-word and will be skipped. vocabulary_size: int. maximum possible word index + 1 window_size: int. actually half-window. The window of a word wi will be [i-window_size, i+window_size+1] negative_samples: float >= 0. 0 for no negative (=random) samples. 1 for same number as positive samples. etc. shuffle: whether to shuffle the word couples before returning them. categorical: bool. if False, labels will be integers (eg. [0, 1, 1 .. ]), if True labels will be categorical eg. [[1,0],[0,1],[0,1] .. ] sampling_table: 1D array of size `vocabulary_size` where the entry i encodes the probabibily to sample a word of rank i. seed: Random seed. Returns: couples, labels: where `couples` are int pairs and `labels` are either 0 or 1. # Note By convention, index 0 in the vocabulary is a non-word and will be skipped.
Generates skipgram word pairs.
[ "Generates", "skipgram", "word", "pairs", "." ]
def skipgrams(sequence, vocabulary_size, window_size=4, negative_samples=1., shuffle=True, categorical=False, sampling_table=None, seed=None): """Generates skipgram word pairs. Takes a sequence (list of indexes of words), returns couples of [word_index, other_word index] and labels (1s or 0s), where label = 1 if 'other_word' belongs to the context of 'word', and label=0 if 'other_word' is randomly sampled Arguments: sequence: a word sequence (sentence), encoded as a list of word indices (integers). If using a `sampling_table`, word indices are expected to match the rank of the words in a reference dataset (e.g. 10 would encode the 10-th most frequently occurring token). Note that index 0 is expected to be a non-word and will be skipped. vocabulary_size: int. maximum possible word index + 1 window_size: int. actually half-window. The window of a word wi will be [i-window_size, i+window_size+1] negative_samples: float >= 0. 0 for no negative (=random) samples. 1 for same number as positive samples. etc. shuffle: whether to shuffle the word couples before returning them. categorical: bool. if False, labels will be integers (eg. [0, 1, 1 .. ]), if True labels will be categorical eg. [[1,0],[0,1],[0,1] .. ] sampling_table: 1D array of size `vocabulary_size` where the entry i encodes the probabibily to sample a word of rank i. seed: Random seed. Returns: couples, labels: where `couples` are int pairs and `labels` are either 0 or 1. # Note By convention, index 0 in the vocabulary is a non-word and will be skipped. """ couples = [] labels = [] for i, wi in enumerate(sequence): if not wi: continue if sampling_table is not None: if sampling_table[wi] < random.random(): continue window_start = max(0, i - window_size) window_end = min(len(sequence), i + window_size + 1) for j in range(window_start, window_end): if j != i: wj = sequence[j] if not wj: continue couples.append([wi, wj]) if categorical: labels.append([0, 1]) else: labels.append(1) if negative_samples > 0: num_negative_samples = int(len(labels) * negative_samples) words = [c[0] for c in couples] random.shuffle(words) couples += [[words[i % len(words)], random.randint(1, vocabulary_size - 1)] for i in range(num_negative_samples)] if categorical: labels += [[1, 0]] * num_negative_samples else: labels += [0] * num_negative_samples if shuffle: if seed is None: seed = random.randint(0, 10e6) random.seed(seed) random.shuffle(couples) random.seed(seed) random.shuffle(labels) return couples, labels
[ "def", "skipgrams", "(", "sequence", ",", "vocabulary_size", ",", "window_size", "=", "4", ",", "negative_samples", "=", "1.", ",", "shuffle", "=", "True", ",", "categorical", "=", "False", ",", "sampling_table", "=", "None", ",", "seed", "=", "None", ")", ":", "couples", "=", "[", "]", "labels", "=", "[", "]", "for", "i", ",", "wi", "in", "enumerate", "(", "sequence", ")", ":", "if", "not", "wi", ":", "continue", "if", "sampling_table", "is", "not", "None", ":", "if", "sampling_table", "[", "wi", "]", "<", "random", ".", "random", "(", ")", ":", "continue", "window_start", "=", "max", "(", "0", ",", "i", "-", "window_size", ")", "window_end", "=", "min", "(", "len", "(", "sequence", ")", ",", "i", "+", "window_size", "+", "1", ")", "for", "j", "in", "range", "(", "window_start", ",", "window_end", ")", ":", "if", "j", "!=", "i", ":", "wj", "=", "sequence", "[", "j", "]", "if", "not", "wj", ":", "continue", "couples", ".", "append", "(", "[", "wi", ",", "wj", "]", ")", "if", "categorical", ":", "labels", ".", "append", "(", "[", "0", ",", "1", "]", ")", "else", ":", "labels", ".", "append", "(", "1", ")", "if", "negative_samples", ">", "0", ":", "num_negative_samples", "=", "int", "(", "len", "(", "labels", ")", "*", "negative_samples", ")", "words", "=", "[", "c", "[", "0", "]", "for", "c", "in", "couples", "]", "random", ".", "shuffle", "(", "words", ")", "couples", "+=", "[", "[", "words", "[", "i", "%", "len", "(", "words", ")", "]", ",", "random", ".", "randint", "(", "1", ",", "vocabulary_size", "-", "1", ")", "]", "for", "i", "in", "range", "(", "num_negative_samples", ")", "]", "if", "categorical", ":", "labels", "+=", "[", "[", "1", ",", "0", "]", "]", "*", "num_negative_samples", "else", ":", "labels", "+=", "[", "0", "]", "*", "num_negative_samples", "if", "shuffle", ":", "if", "seed", "is", "None", ":", "seed", "=", "random", ".", "randint", "(", "0", ",", "10e6", ")", "random", ".", "seed", "(", "seed", ")", "random", ".", "shuffle", "(", "couples", ")", "random", ".", "seed", "(", "seed", ")", "random", ".", "shuffle", "(", "labels", ")", "return", "couples", ",", "labels" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/keras/_impl/keras/preprocessing/sequence.py#L140-L226
NVIDIA/MDL-SDK
aa9642b2546ad7b6236b5627385d882c2ed83c5d
src/mdl/jit/llvm/dist/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
python
TimingScriptGenerator.writeTimingCall
(self, filename, numFuncs, funcsCalled, totalCalls)
Echo some comments and invoke both versions of toy
Echo some comments and invoke both versions of toy
[ "Echo", "some", "comments", "and", "invoke", "both", "versions", "of", "toy" ]
def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls): """Echo some comments and invoke both versions of toy""" rootname = filename if '.' in filename: rootname = filename[:filename.rfind('.')] self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile)) self.shfile.write("echo \"\" >> %s\n" % self.timeFile) self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile) self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"") self.shfile.write(" -o %s -a " % self.timeFile) self.shfile.write("./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname)) self.shfile.write("echo \"\" >> %s\n" % self.timeFile) self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile) self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"") self.shfile.write(" -o %s -a " % self.timeFile) self.shfile.write("./toy-jit < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname)) self.shfile.write("echo \"\" >> %s\n" % self.timeFile) self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
[ "def", "writeTimingCall", "(", "self", ",", "filename", ",", "numFuncs", ",", "funcsCalled", ",", "totalCalls", ")", ":", "rootname", "=", "filename", "if", "'.'", "in", "filename", ":", "rootname", "=", "filename", "[", ":", "filename", ".", "rfind", "(", "'.'", ")", "]", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"%s: Calls %d of %d functions, %d total\\\" >> %s\\n\"", "%", "(", "filename", ",", "funcsCalled", ",", "numFuncs", ",", "totalCalls", ",", "self", ".", "timeFile", ")", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"With MCJIT\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"/usr/bin/time -f \\\"Command %C\\\\n\\\\tuser time: %U s\\\\n\\\\tsytem time: %S s\\\\n\\\\tmax set: %M kb\\\"\"", ")", "self", ".", "shfile", ".", "write", "(", "\" -o %s -a \"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\\n\"", "%", "(", "filename", ",", "rootname", ",", "rootname", ")", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"With JIT\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"/usr/bin/time -f \\\"Command %C\\\\n\\\\tuser time: %U s\\\\n\\\\tsytem time: %S s\\\\n\\\\tmax set: %M kb\\\"\"", ")", "self", ".", "shfile", ".", "write", "(", "\" -o %s -a \"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"./toy-jit < %s > %s-jit.out 2> %s-jit.err\\n\"", "%", "(", "filename", ",", "rootname", ",", "rootname", ")", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")" ]
https://github.com/NVIDIA/MDL-SDK/blob/aa9642b2546ad7b6236b5627385d882c2ed83c5d/src/mdl/jit/llvm/dist/examples/Kaleidoscope/MCJIT/cached/genk-timing.py#L13-L30
bcrusco/Forward-Plus-Renderer
1f130f1ae58882f651d94695823044f9833cfa30
Forward-Plus/Forward-Plus/external/assimp-3.1.1/port/PyAssimp/pyassimp/core.py
python
_get_properties
(properties, length)
return PropertyGetter(result)
Convenience Function to get the material properties as a dict and values in a python format.
Convenience Function to get the material properties as a dict and values in a python format.
[ "Convenience", "Function", "to", "get", "the", "material", "properties", "as", "a", "dict", "and", "values", "in", "a", "python", "format", "." ]
def _get_properties(properties, length): """ Convenience Function to get the material properties as a dict and values in a python format. """ result = {} #read all properties for p in [properties[i] for i in range(length)]: #the name p = p.contents key = (str(p.mKey.data.decode("utf-8")).split('.')[1], p.mSemantic) #the data from ctypes import POINTER, cast, c_int, c_float, sizeof if p.mType == 1: arr = cast(p.mData, POINTER(c_float * int(p.mDataLength/sizeof(c_float)) )).contents value = [x for x in arr] elif p.mType == 3: #string can't be an array value = cast(p.mData, POINTER(structs.MaterialPropertyString)).contents.data.decode("utf-8") elif p.mType == 4: arr = cast(p.mData, POINTER(c_int * int(p.mDataLength/sizeof(c_int)) )).contents value = [x for x in arr] else: value = p.mData[:p.mDataLength] if len(value) == 1: [value] = value result[key] = value return PropertyGetter(result)
[ "def", "_get_properties", "(", "properties", ",", "length", ")", ":", "result", "=", "{", "}", "#read all properties", "for", "p", "in", "[", "properties", "[", "i", "]", "for", "i", "in", "range", "(", "length", ")", "]", ":", "#the name", "p", "=", "p", ".", "contents", "key", "=", "(", "str", "(", "p", ".", "mKey", ".", "data", ".", "decode", "(", "\"utf-8\"", ")", ")", ".", "split", "(", "'.'", ")", "[", "1", "]", ",", "p", ".", "mSemantic", ")", "#the data", "from", "ctypes", "import", "POINTER", ",", "cast", ",", "c_int", ",", "c_float", ",", "sizeof", "if", "p", ".", "mType", "==", "1", ":", "arr", "=", "cast", "(", "p", ".", "mData", ",", "POINTER", "(", "c_float", "*", "int", "(", "p", ".", "mDataLength", "/", "sizeof", "(", "c_float", ")", ")", ")", ")", ".", "contents", "value", "=", "[", "x", "for", "x", "in", "arr", "]", "elif", "p", ".", "mType", "==", "3", ":", "#string can't be an array", "value", "=", "cast", "(", "p", ".", "mData", ",", "POINTER", "(", "structs", ".", "MaterialPropertyString", ")", ")", ".", "contents", ".", "data", ".", "decode", "(", "\"utf-8\"", ")", "elif", "p", ".", "mType", "==", "4", ":", "arr", "=", "cast", "(", "p", ".", "mData", ",", "POINTER", "(", "c_int", "*", "int", "(", "p", ".", "mDataLength", "/", "sizeof", "(", "c_int", ")", ")", ")", ")", ".", "contents", "value", "=", "[", "x", "for", "x", "in", "arr", "]", "else", ":", "value", "=", "p", ".", "mData", "[", ":", "p", ".", "mDataLength", "]", "if", "len", "(", "value", ")", "==", "1", ":", "[", "value", "]", "=", "value", "result", "[", "key", "]", "=", "value", "return", "PropertyGetter", "(", "result", ")" ]
https://github.com/bcrusco/Forward-Plus-Renderer/blob/1f130f1ae58882f651d94695823044f9833cfa30/Forward-Plus/Forward-Plus/external/assimp-3.1.1/port/PyAssimp/pyassimp/core.py#L371-L401
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/tools/api/lib/python_object_to_proto_visitor.py
python
PythonObjectToProtoVisitor.GetProtos
(self)
return self._protos
Return the list of protos stored.
Return the list of protos stored.
[ "Return", "the", "list", "of", "protos", "stored", "." ]
def GetProtos(self): """Return the list of protos stored.""" return self._protos
[ "def", "GetProtos", "(", "self", ")", ":", "return", "self", ".", "_protos" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/tools/api/lib/python_object_to_proto_visitor.py#L112-L114
tangzhenyu/Scene-Text-Understanding
0f7ffc7aea5971a50cdc03d33d0a41075285948b
ctpn_crnn_ocr/CTPN/caffe/tools/extra/parse_log.py
python
parse_log
(path_to_log)
return train_dict_list, test_dict_list
Parse log file Returns (train_dict_list, train_dict_names, test_dict_list, test_dict_names) train_dict_list and test_dict_list are lists of dicts that define the table rows train_dict_names and test_dict_names are ordered tuples of the column names for the two dict_lists
Parse log file Returns (train_dict_list, train_dict_names, test_dict_list, test_dict_names)
[ "Parse", "log", "file", "Returns", "(", "train_dict_list", "train_dict_names", "test_dict_list", "test_dict_names", ")" ]
def parse_log(path_to_log): """Parse log file Returns (train_dict_list, train_dict_names, test_dict_list, test_dict_names) train_dict_list and test_dict_list are lists of dicts that define the table rows train_dict_names and test_dict_names are ordered tuples of the column names for the two dict_lists """ regex_iteration = re.compile('Iteration (\d+)') regex_train_output = re.compile('Train net output #(\d+): (\S+) = ([\.\deE+-]+)') regex_test_output = re.compile('Test net output #(\d+): (\S+) = ([\.\deE+-]+)') regex_learning_rate = re.compile('lr = ([-+]?[0-9]*\.?[0-9]+([eE]?[-+]?[0-9]+)?)') # Pick out lines of interest iteration = -1 learning_rate = float('NaN') train_dict_list = [] test_dict_list = [] train_row = None test_row = None logfile_year = extract_seconds.get_log_created_year(path_to_log) with open(path_to_log) as f: start_time = extract_seconds.get_start_time(f, logfile_year) for line in f: iteration_match = regex_iteration.search(line) if iteration_match: iteration = float(iteration_match.group(1)) if iteration == -1: # Only start parsing for other stuff if we've found the first # iteration continue time = extract_seconds.extract_datetime_from_line(line, logfile_year) seconds = (time - start_time).total_seconds() learning_rate_match = regex_learning_rate.search(line) if learning_rate_match: learning_rate = float(learning_rate_match.group(1)) train_dict_list, train_row = parse_line_for_net_output( regex_train_output, train_row, train_dict_list, line, iteration, seconds, learning_rate ) test_dict_list, test_row = parse_line_for_net_output( regex_test_output, test_row, test_dict_list, line, iteration, seconds, learning_rate ) fix_initial_nan_learning_rate(train_dict_list) fix_initial_nan_learning_rate(test_dict_list) return train_dict_list, test_dict_list
[ "def", "parse_log", "(", "path_to_log", ")", ":", "regex_iteration", "=", "re", ".", "compile", "(", "'Iteration (\\d+)'", ")", "regex_train_output", "=", "re", ".", "compile", "(", "'Train net output #(\\d+): (\\S+) = ([\\.\\deE+-]+)'", ")", "regex_test_output", "=", "re", ".", "compile", "(", "'Test net output #(\\d+): (\\S+) = ([\\.\\deE+-]+)'", ")", "regex_learning_rate", "=", "re", ".", "compile", "(", "'lr = ([-+]?[0-9]*\\.?[0-9]+([eE]?[-+]?[0-9]+)?)'", ")", "# Pick out lines of interest", "iteration", "=", "-", "1", "learning_rate", "=", "float", "(", "'NaN'", ")", "train_dict_list", "=", "[", "]", "test_dict_list", "=", "[", "]", "train_row", "=", "None", "test_row", "=", "None", "logfile_year", "=", "extract_seconds", ".", "get_log_created_year", "(", "path_to_log", ")", "with", "open", "(", "path_to_log", ")", "as", "f", ":", "start_time", "=", "extract_seconds", ".", "get_start_time", "(", "f", ",", "logfile_year", ")", "for", "line", "in", "f", ":", "iteration_match", "=", "regex_iteration", ".", "search", "(", "line", ")", "if", "iteration_match", ":", "iteration", "=", "float", "(", "iteration_match", ".", "group", "(", "1", ")", ")", "if", "iteration", "==", "-", "1", ":", "# Only start parsing for other stuff if we've found the first", "# iteration", "continue", "time", "=", "extract_seconds", ".", "extract_datetime_from_line", "(", "line", ",", "logfile_year", ")", "seconds", "=", "(", "time", "-", "start_time", ")", ".", "total_seconds", "(", ")", "learning_rate_match", "=", "regex_learning_rate", ".", "search", "(", "line", ")", "if", "learning_rate_match", ":", "learning_rate", "=", "float", "(", "learning_rate_match", ".", "group", "(", "1", ")", ")", "train_dict_list", ",", "train_row", "=", "parse_line_for_net_output", "(", "regex_train_output", ",", "train_row", ",", "train_dict_list", ",", "line", ",", "iteration", ",", "seconds", ",", "learning_rate", ")", "test_dict_list", ",", "test_row", "=", "parse_line_for_net_output", "(", "regex_test_output", ",", "test_row", ",", "test_dict_list", ",", "line", ",", "iteration", ",", "seconds", ",", "learning_rate", ")", "fix_initial_nan_learning_rate", "(", "train_dict_list", ")", "fix_initial_nan_learning_rate", "(", "test_dict_list", ")", "return", "train_dict_list", ",", "test_dict_list" ]
https://github.com/tangzhenyu/Scene-Text-Understanding/blob/0f7ffc7aea5971a50cdc03d33d0a41075285948b/ctpn_crnn_ocr/CTPN/caffe/tools/extra/parse_log.py#L17-L74
microsoft/checkedc-clang
a173fefde5d7877b7750e7ce96dd08cf18baebf2
clang/bindings/python/clang/cindex.py
python
Cursor.location
(self)
return self._loc
Return the source location (the starting character) of the entity pointed at by the cursor.
Return the source location (the starting character) of the entity pointed at by the cursor.
[ "Return", "the", "source", "location", "(", "the", "starting", "character", ")", "of", "the", "entity", "pointed", "at", "by", "the", "cursor", "." ]
def location(self): """ Return the source location (the starting character) of the entity pointed at by the cursor. """ if not hasattr(self, '_loc'): self._loc = conf.lib.clang_getCursorLocation(self) return self._loc
[ "def", "location", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_loc'", ")", ":", "self", ".", "_loc", "=", "conf", ".", "lib", ".", "clang_getCursorLocation", "(", "self", ")", "return", "self", ".", "_loc" ]
https://github.com/microsoft/checkedc-clang/blob/a173fefde5d7877b7750e7ce96dd08cf18baebf2/clang/bindings/python/clang/cindex.py#L1574-L1582
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/data/experimental/ops/stats_ops.py
python
latency_stats
(tag)
return _apply_fn
Records the latency of producing each element of the input dataset. To consume the statistics, associate a `StatsAggregator` with the output dataset. Args: tag: String. All statistics recorded by the returned transformation will be associated with the given `tag`. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`.
Records the latency of producing each element of the input dataset.
[ "Records", "the", "latency", "of", "producing", "each", "element", "of", "the", "input", "dataset", "." ]
def latency_stats(tag): """Records the latency of producing each element of the input dataset. To consume the statistics, associate a `StatsAggregator` with the output dataset. Args: tag: String. All statistics recorded by the returned transformation will be associated with the given `tag`. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. """ def _apply_fn(dataset): return _StatsDataset( dataset, gen_experimental_dataset_ops.latency_stats_dataset, tag) return _apply_fn
[ "def", "latency_stats", "(", "tag", ")", ":", "def", "_apply_fn", "(", "dataset", ")", ":", "return", "_StatsDataset", "(", "dataset", ",", "gen_experimental_dataset_ops", ".", "latency_stats_dataset", ",", "tag", ")", "return", "_apply_fn" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/data/experimental/ops/stats_ops.py#L75-L94
perilouswithadollarsign/cstrike15_src
f82112a2388b841d72cb62ca48ab1846dfcc11c8
thirdparty/protobuf-2.5.0/python/mox.py
python
UnknownMethodCallError.__init__
(self, unknown_method_name)
Init exception. Args: # unknown_method_name: Method call that is not part of the mocked class's # public interface. unknown_method_name: str
Init exception.
[ "Init", "exception", "." ]
def __init__(self, unknown_method_name): """Init exception. Args: # unknown_method_name: Method call that is not part of the mocked class's # public interface. unknown_method_name: str """ Error.__init__(self) self._unknown_method_name = unknown_method_name
[ "def", "__init__", "(", "self", ",", "unknown_method_name", ")", ":", "Error", ".", "__init__", "(", "self", ")", "self", ".", "_unknown_method_name", "=", "unknown_method_name" ]
https://github.com/perilouswithadollarsign/cstrike15_src/blob/f82112a2388b841d72cb62ca48ab1846dfcc11c8/thirdparty/protobuf-2.5.0/python/mox.py#L133-L143
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/keras/_impl/keras/backend.py
python
conv1d
(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1)
return x
1D convolution. Arguments: x: Tensor or variable. kernel: kernel tensor. strides: stride integer. padding: string, `"same"`, `"causal"` or `"valid"`. data_format: string, one of "channels_last", "channels_first". dilation_rate: integer dilate rate. Returns: A tensor, result of 1D convolution.
1D convolution.
[ "1D", "convolution", "." ]
def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): """1D convolution. Arguments: x: Tensor or variable. kernel: kernel tensor. strides: stride integer. padding: string, `"same"`, `"causal"` or `"valid"`. data_format: string, one of "channels_last", "channels_first". dilation_rate: integer dilate rate. Returns: A tensor, result of 1D convolution. """ kernel_shape = kernel.get_shape().as_list() if padding == 'causal': # causal (dilated) convolution: left_pad = dilation_rate * (kernel_shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid' padding = _preprocess_padding(padding) if data_format == 'channels_last': tf_data_format = 'NWC' else: tf_data_format = 'NCW' x = nn.convolution( input=x, filter=kernel, dilation_rate=(dilation_rate,), strides=(strides,), padding=padding, data_format=tf_data_format) return x
[ "def", "conv1d", "(", "x", ",", "kernel", ",", "strides", "=", "1", ",", "padding", "=", "'valid'", ",", "data_format", "=", "None", ",", "dilation_rate", "=", "1", ")", ":", "kernel_shape", "=", "kernel", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "if", "padding", "==", "'causal'", ":", "# causal (dilated) convolution:", "left_pad", "=", "dilation_rate", "*", "(", "kernel_shape", "[", "0", "]", "-", "1", ")", "x", "=", "temporal_padding", "(", "x", ",", "(", "left_pad", ",", "0", ")", ")", "padding", "=", "'valid'", "padding", "=", "_preprocess_padding", "(", "padding", ")", "if", "data_format", "==", "'channels_last'", ":", "tf_data_format", "=", "'NWC'", "else", ":", "tf_data_format", "=", "'NCW'", "x", "=", "nn", ".", "convolution", "(", "input", "=", "x", ",", "filter", "=", "kernel", ",", "dilation_rate", "=", "(", "dilation_rate", ",", ")", ",", "strides", "=", "(", "strides", ",", ")", ",", "padding", "=", "padding", ",", "data_format", "=", "tf_data_format", ")", "return", "x" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/keras/_impl/keras/backend.py#L3246-L3283
JumpingYang001/webrtc
c03d6e965e1f54aeadd670e491eabe5fdb8db968
tools_webrtc/version_updater/update_version.py
python
_UploadCL
(commit_queue_mode)
Upload the committed changes as a changelist to Gerrit. commit_queue_mode: - 2: Submit to commit queue. - 1: Run trybots but do not submit to CQ. - 0: Skip CQ, upload only.
Upload the committed changes as a changelist to Gerrit.
[ "Upload", "the", "committed", "changes", "as", "a", "changelist", "to", "Gerrit", "." ]
def _UploadCL(commit_queue_mode): """Upload the committed changes as a changelist to Gerrit. commit_queue_mode: - 2: Submit to commit queue. - 1: Run trybots but do not submit to CQ. - 0: Skip CQ, upload only. """ cmd = ['git', 'cl', 'upload', '--force', '--bypass-hooks', '--cc=""', '--bypass-watchlist'] if commit_queue_mode >= 2: logging.info('Sending the CL to the CQ...') cmd.extend(['--use-commit-queue']) elif commit_queue_mode >= 1: logging.info('Starting CQ dry run...') cmd.extend(['--cq-dry-run']) subprocess.check_call(cmd)
[ "def", "_UploadCL", "(", "commit_queue_mode", ")", ":", "cmd", "=", "[", "'git'", ",", "'cl'", ",", "'upload'", ",", "'--force'", ",", "'--bypass-hooks'", ",", "'--cc=\"\"'", ",", "'--bypass-watchlist'", "]", "if", "commit_queue_mode", ">=", "2", ":", "logging", ".", "info", "(", "'Sending the CL to the CQ...'", ")", "cmd", ".", "extend", "(", "[", "'--use-commit-queue'", "]", ")", "elif", "commit_queue_mode", ">=", "1", ":", "logging", ".", "info", "(", "'Starting CQ dry run...'", ")", "cmd", ".", "extend", "(", "[", "'--cq-dry-run'", "]", ")", "subprocess", ".", "check_call", "(", "cmd", ")" ]
https://github.com/JumpingYang001/webrtc/blob/c03d6e965e1f54aeadd670e491eabe5fdb8db968/tools_webrtc/version_updater/update_version.py#L122-L138
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_tempfile.py
python
FakeTempfileModule.mkstemp
(self, suffix='', prefix=None, dir=None, text=False)
return (fd, filename)
Create temp file, returning a 2-tuple: (9999, filename). Important: Returns 9999 instead of a real file descriptor! Python 2.4.1 tempfile.mkstemp.__doc__ = >mkstemp([suffix, [prefix, [dir, [text]]]]) > >User-callable function to create and return a unique temporary file. >The return value is a pair (fd, name) where fd is the file descriptor >returned by os.open, and name is the filename. > >...[snip args]... > >The file is readable and writable only by the creating user ID. >If the operating system uses permission bits to indicate whether >a file is executable, the file is executable by no one. The file >descriptor is not inherited by children of this process. > >Caller is responsible for deleting the file when done with it. NOTE: if dir is unspecified, this call creates a directory. Output: self.tempdir is initialized if unset Args: suffix: optional string, filename suffix prefix: optional string, filename prefix dir: optional string, directory for temp file; must exist before call text: optional boolean, True = open file in text mode. default False = open file in binary mode. Returns: 2-tuple containing [0] = int, file descriptor number for the file object [1] = string, absolute pathname of a file Raises: OSError: when dir= is specified but does not exist
Create temp file, returning a 2-tuple: (9999, filename).
[ "Create", "temp", "file", "returning", "a", "2", "-", "tuple", ":", "(", "9999", "filename", ")", "." ]
def mkstemp(self, suffix='', prefix=None, dir=None, text=False): """Create temp file, returning a 2-tuple: (9999, filename). Important: Returns 9999 instead of a real file descriptor! Python 2.4.1 tempfile.mkstemp.__doc__ = >mkstemp([suffix, [prefix, [dir, [text]]]]) > >User-callable function to create and return a unique temporary file. >The return value is a pair (fd, name) where fd is the file descriptor >returned by os.open, and name is the filename. > >...[snip args]... > >The file is readable and writable only by the creating user ID. >If the operating system uses permission bits to indicate whether >a file is executable, the file is executable by no one. The file >descriptor is not inherited by children of this process. > >Caller is responsible for deleting the file when done with it. NOTE: if dir is unspecified, this call creates a directory. Output: self.tempdir is initialized if unset Args: suffix: optional string, filename suffix prefix: optional string, filename prefix dir: optional string, directory for temp file; must exist before call text: optional boolean, True = open file in text mode. default False = open file in binary mode. Returns: 2-tuple containing [0] = int, file descriptor number for the file object [1] = string, absolute pathname of a file Raises: OSError: when dir= is specified but does not exist """ # pylint: disable-msg=C6002 # TODO: optional boolean text is unused? # default dir affected by "global" filename = self._TempEntryname(suffix, prefix, dir) fh = self._filesystem.CreateFile(filename, st_mode=stat.S_IFREG|0o600) fd = self._filesystem.AddOpenFile(fh) self._mktemp_retvals.append(filename) return (fd, filename)
[ "def", "mkstemp", "(", "self", ",", "suffix", "=", "''", ",", "prefix", "=", "None", ",", "dir", "=", "None", ",", "text", "=", "False", ")", ":", "# pylint: disable-msg=C6002", "# TODO: optional boolean text is unused?", "# default dir affected by \"global\"", "filename", "=", "self", ".", "_TempEntryname", "(", "suffix", ",", "prefix", ",", "dir", ")", "fh", "=", "self", ".", "_filesystem", ".", "CreateFile", "(", "filename", ",", "st_mode", "=", "stat", ".", "S_IFREG", "|", "0o600", ")", "fd", "=", "self", ".", "_filesystem", ".", "AddOpenFile", "(", "fh", ")", "self", ".", "_mktemp_retvals", ".", "append", "(", "filename", ")", "return", "(", "fd", ",", "filename", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/third_party/pyfakefs/pyfakefs/fake_tempfile.py#L152-L197
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/asyncio/events.py
python
AbstractEventLoop.start_tls
(self, transport, protocol, sslcontext, *, server_side=False, server_hostname=None, ssl_handshake_timeout=None)
Upgrade a transport to TLS. Return a new transport that *protocol* should start using immediately.
Upgrade a transport to TLS.
[ "Upgrade", "a", "transport", "to", "TLS", "." ]
async def start_tls(self, transport, protocol, sslcontext, *, server_side=False, server_hostname=None, ssl_handshake_timeout=None): """Upgrade a transport to TLS. Return a new transport that *protocol* should start using immediately. """ raise NotImplementedError
[ "async", "def", "start_tls", "(", "self", ",", "transport", ",", "protocol", ",", "sslcontext", ",", "*", ",", "server_side", "=", "False", ",", "server_hostname", "=", "None", ",", "ssl_handshake_timeout", "=", "None", ")", ":", "raise", "NotImplementedError" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/asyncio/events.py#L372-L381
psi4/psi4
be533f7f426b6ccc263904e55122899b16663395
psi4/driver/procrouting/proc.py
python
select_mp2p5_gradient
(name, **kwargs)
Function selecting the algorithm for a MP2.5 gradient call and directing to specified or best-performance default modules.
Function selecting the algorithm for a MP2.5 gradient call and directing to specified or best-performance default modules.
[ "Function", "selecting", "the", "algorithm", "for", "a", "MP2", ".", "5", "gradient", "call", "and", "directing", "to", "specified", "or", "best", "-", "performance", "default", "modules", "." ]
def select_mp2p5_gradient(name, **kwargs): """Function selecting the algorithm for a MP2.5 gradient call and directing to specified or best-performance default modules. """ reference = core.get_option('SCF', 'REFERENCE') mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF" module = core.get_global_option('QC_MODULE') all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE") # Considering only [df]occ func = None if reference in ['RHF', 'UHF']: if mtd_type == 'CONV': if all_electron: if module in ['', 'OCC']: func = run_occ_gradient elif mtd_type == 'DF': if module in ['', 'OCC']: func = run_dfocc_gradient if func is None: raise ManagedMethodError(['select_mp2p5_gradient', name, 'MP_TYPE', mtd_type, reference, module, all_electron]) if kwargs.pop('probe', False): return else: return func(name, **kwargs)
[ "def", "select_mp2p5_gradient", "(", "name", ",", "*", "*", "kwargs", ")", ":", "reference", "=", "core", ".", "get_option", "(", "'SCF'", ",", "'REFERENCE'", ")", "mtd_type", "=", "core", ".", "get_global_option", "(", "'MP_TYPE'", ")", "if", "core", ".", "has_global_option_changed", "(", "\"MP_TYPE\"", ")", "else", "\"DF\"", "module", "=", "core", ".", "get_global_option", "(", "'QC_MODULE'", ")", "all_electron", "=", "(", "core", ".", "get_global_option", "(", "'FREEZE_CORE'", ")", "==", "\"FALSE\"", ")", "# Considering only [df]occ", "func", "=", "None", "if", "reference", "in", "[", "'RHF'", ",", "'UHF'", "]", ":", "if", "mtd_type", "==", "'CONV'", ":", "if", "all_electron", ":", "if", "module", "in", "[", "''", ",", "'OCC'", "]", ":", "func", "=", "run_occ_gradient", "elif", "mtd_type", "==", "'DF'", ":", "if", "module", "in", "[", "''", ",", "'OCC'", "]", ":", "func", "=", "run_dfocc_gradient", "if", "func", "is", "None", ":", "raise", "ManagedMethodError", "(", "[", "'select_mp2p5_gradient'", ",", "name", ",", "'MP_TYPE'", ",", "mtd_type", ",", "reference", ",", "module", ",", "all_electron", "]", ")", "if", "kwargs", ".", "pop", "(", "'probe'", ",", "False", ")", ":", "return", "else", ":", "return", "func", "(", "name", ",", "*", "*", "kwargs", ")" ]
https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/procrouting/proc.py#L550-L577
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/llvmlite/binding/ffi.py
python
ret_string
(ptr)
To wrap string return-value from C-API.
To wrap string return-value from C-API.
[ "To", "wrap", "string", "return", "-", "value", "from", "C", "-", "API", "." ]
def ret_string(ptr): """To wrap string return-value from C-API. """ if ptr is not None: return str(OutputString.from_return(ptr))
[ "def", "ret_string", "(", "ptr", ")", ":", "if", "ptr", "is", "not", "None", ":", "return", "str", "(", "OutputString", ".", "from_return", "(", "ptr", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/llvmlite/binding/ffi.py#L228-L232