nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/lib-tk/Tkinter.py
python
PanedWindow.add
(self, child, **kw)
Add a child widget to the panedwindow in a new pane. The child argument is the name of the child widget followed by pairs of arguments that specify how to manage the windows. The possible options and values are the ones accepted by the paneconfigure method.
Add a child widget to the panedwindow in a new pane.
[ "Add", "a", "child", "widget", "to", "the", "panedwindow", "in", "a", "new", "pane", "." ]
def add(self, child, **kw): """Add a child widget to the panedwindow in a new pane. The child argument is the name of the child widget followed by pairs of arguments that specify how to manage the windows. The possible options and values are the ones accepted by the paneconfigure method. """ self.tk.call((self._w, 'add', child) + self._options(kw))
[ "def", "add", "(", "self", ",", "child", ",", "*", "*", "kw", ")", ":", "self", ".", "tk", ".", "call", "(", "(", "self", ".", "_w", ",", "'add'", ",", "child", ")", "+", "self", ".", "_options", "(", "kw", ")", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/lib-tk/Tkinter.py#L3647-L3655
Sigil-Ebook/Sigil
0d145d3a4874b4a26f7aabd68dbd9d18a2402e52
src/Resource_Files/plugin_launchers/python/sigil_bs4/element.py
python
Tag.__getitem__
(self, key)
return self.attrs[key]
tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.
tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.
[ "tag", "[", "key", "]", "returns", "the", "value", "of", "the", "key", "attribute", "for", "the", "tag", "and", "throws", "an", "exception", "if", "it", "s", "not", "there", "." ]
def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self.attrs[key]
[ "def", "__getitem__", "(", "self", ",", "key", ")", ":", "return", "self", ".", "attrs", "[", "key", "]" ]
https://github.com/Sigil-Ebook/Sigil/blob/0d145d3a4874b4a26f7aabd68dbd9d18a2402e52/src/Resource_Files/plugin_launchers/python/sigil_bs4/element.py#L990-L993
yushroom/FishEngine
a4b9fb9b0a6dc202f7990e75f4b7d8d5163209d9
Script/reflect/clang/cindex.py
python
Cursor.lexical_parent
(self)
return self._lexical_parent
Return the lexical parent for this cursor.
Return the lexical parent for this cursor.
[ "Return", "the", "lexical", "parent", "for", "this", "cursor", "." ]
def lexical_parent(self): """Return the lexical parent for this cursor.""" if not hasattr(self, '_lexical_parent'): self._lexical_parent = conf.lib.clang_getCursorLexicalParent(self) return self._lexical_parent
[ "def", "lexical_parent", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_lexical_parent'", ")", ":", "self", ".", "_lexical_parent", "=", "conf", ".", "lib", ".", "clang_getCursorLexicalParent", "(", "self", ")", "return", "self", ".", "_lexical_parent" ]
https://github.com/yushroom/FishEngine/blob/a4b9fb9b0a6dc202f7990e75f4b7d8d5163209d9/Script/reflect/clang/cindex.py#L1612-L1617
intel/caffe
3f494b442ee3f9d17a07b09ecbd5fa2bbda00836
examples/rfcn/tools/train_faster_rcnn_alt_opt.py
python
parse_args
()
return args
Parse input arguments
Parse input arguments
[ "Parse", "input", "arguments" ]
def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Train a Faster R-CNN network') parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]', default=0, type=int) parser.add_argument('--net_name', dest='net_name', help='network name (e.g., "ZF")', default=None, type=str) parser.add_argument('--weights', dest='pretrained_model', help='initialize with pretrained model weights', default=None, type=str) parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default=None, type=str) parser.add_argument('--imdb', dest='imdb_name', help='dataset to train on', default='voc_2007_trainval', type=str) parser.add_argument('--set', dest='set_cfgs', help='set config keys', default=None, nargs=argparse.REMAINDER) if len(sys.argv) == 1: parser.print_help() sys.exit(1) args = parser.parse_args() return args
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Train a Faster R-CNN network'", ")", "parser", ".", "add_argument", "(", "'--gpu'", ",", "dest", "=", "'gpu_id'", ",", "help", "=", "'GPU device id to use [0]'", ",", "default", "=", "0", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--net_name'", ",", "dest", "=", "'net_name'", ",", "help", "=", "'network name (e.g., \"ZF\")'", ",", "default", "=", "None", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--weights'", ",", "dest", "=", "'pretrained_model'", ",", "help", "=", "'initialize with pretrained model weights'", ",", "default", "=", "None", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--cfg'", ",", "dest", "=", "'cfg_file'", ",", "help", "=", "'optional config file'", ",", "default", "=", "None", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--imdb'", ",", "dest", "=", "'imdb_name'", ",", "help", "=", "'dataset to train on'", ",", "default", "=", "'voc_2007_trainval'", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--set'", ",", "dest", "=", "'set_cfgs'", ",", "help", "=", "'set config keys'", ",", "default", "=", "None", ",", "nargs", "=", "argparse", ".", "REMAINDER", ")", "if", "len", "(", "sys", ".", "argv", ")", "==", "1", ":", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "1", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "return", "args" ]
https://github.com/intel/caffe/blob/3f494b442ee3f9d17a07b09ecbd5fa2bbda00836/examples/rfcn/tools/train_faster_rcnn_alt_opt.py#L29-L58
Yelp/MOE
5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c
moe/optimal_learning/python/cpp_wrappers/log_likelihood.py
python
GaussianProcessLogLikelihood.compute_log_likelihood
(self)
return C_GP.compute_log_likelihood( cpp_utils.cppify(self._points_sampled), cpp_utils.cppify(self._points_sampled_value), self.dim, self._num_sampled, self.objective_type, cpp_utils.cppify_hyperparameters(self.hyperparameters), cpp_utils.cppify(self._points_sampled_noise_variance), )
r"""Compute the objective_type measure at the specified hyperparameters. :return: value of log_likelihood evaluated at hyperparameters (``LL(y | X, \theta)``) :rtype: float64
r"""Compute the objective_type measure at the specified hyperparameters.
[ "r", "Compute", "the", "objective_type", "measure", "at", "the", "specified", "hyperparameters", "." ]
def compute_log_likelihood(self): r"""Compute the objective_type measure at the specified hyperparameters. :return: value of log_likelihood evaluated at hyperparameters (``LL(y | X, \theta)``) :rtype: float64 """ return C_GP.compute_log_likelihood( cpp_utils.cppify(self._points_sampled), cpp_utils.cppify(self._points_sampled_value), self.dim, self._num_sampled, self.objective_type, cpp_utils.cppify_hyperparameters(self.hyperparameters), cpp_utils.cppify(self._points_sampled_noise_variance), )
[ "def", "compute_log_likelihood", "(", "self", ")", ":", "return", "C_GP", ".", "compute_log_likelihood", "(", "cpp_utils", ".", "cppify", "(", "self", ".", "_points_sampled", ")", ",", "cpp_utils", ".", "cppify", "(", "self", ".", "_points_sampled_value", ")", ",", "self", ".", "dim", ",", "self", ".", "_num_sampled", ",", "self", ".", "objective_type", ",", "cpp_utils", ".", "cppify_hyperparameters", "(", "self", ".", "hyperparameters", ")", ",", "cpp_utils", ".", "cppify", "(", "self", ".", "_points_sampled_noise_variance", ")", ",", ")" ]
https://github.com/Yelp/MOE/blob/5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c/moe/optimal_learning/python/cpp_wrappers/log_likelihood.py#L303-L318
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/scipy/io/idl.py
python
_read_uint32
(f)
return np.uint32(struct.unpack('>I', f.read(4))[0])
Read an unsigned 32-bit integer
Read an unsigned 32-bit integer
[ "Read", "an", "unsigned", "32", "-", "bit", "integer" ]
def _read_uint32(f): '''Read an unsigned 32-bit integer''' return np.uint32(struct.unpack('>I', f.read(4))[0])
[ "def", "_read_uint32", "(", "f", ")", ":", "return", "np", ".", "uint32", "(", "struct", ".", "unpack", "(", "'>I'", ",", "f", ".", "read", "(", "4", ")", ")", "[", "0", "]", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/io/idl.py#L126-L128
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/polynomial/polyutils.py
python
getdomain
(x)
Return a domain suitable for given abscissae. Find a domain suitable for a polynomial or Chebyshev series defined at the values supplied. Parameters ---------- x : array_like 1-d array of abscissae whose domain will be determined. Returns ------- domain : ndarray 1-d array containing two values. If the inputs are complex, then the two returned points are the lower left and upper right corners of the smallest rectangle (aligned with the axes) in the complex plane containing the points `x`. If the inputs are real, then the two points are the ends of the smallest interval containing the points `x`. See Also -------- mapparms, mapdomain Examples -------- >>> from numpy.polynomial import polyutils as pu >>> points = np.arange(4)**2 - 5; points array([-5, -4, -1, 4]) >>> pu.getdomain(points) array([-5., 4.]) >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle >>> pu.getdomain(c) array([-1.-1.j, 1.+1.j])
Return a domain suitable for given abscissae.
[ "Return", "a", "domain", "suitable", "for", "given", "abscissae", "." ]
def getdomain(x): """ Return a domain suitable for given abscissae. Find a domain suitable for a polynomial or Chebyshev series defined at the values supplied. Parameters ---------- x : array_like 1-d array of abscissae whose domain will be determined. Returns ------- domain : ndarray 1-d array containing two values. If the inputs are complex, then the two returned points are the lower left and upper right corners of the smallest rectangle (aligned with the axes) in the complex plane containing the points `x`. If the inputs are real, then the two points are the ends of the smallest interval containing the points `x`. See Also -------- mapparms, mapdomain Examples -------- >>> from numpy.polynomial import polyutils as pu >>> points = np.arange(4)**2 - 5; points array([-5, -4, -1, 4]) >>> pu.getdomain(points) array([-5., 4.]) >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle >>> pu.getdomain(c) array([-1.-1.j, 1.+1.j]) """ [x] = as_series([x], trim=False) if x.dtype.char in np.typecodes['Complex']: rmin, rmax = x.real.min(), x.real.max() imin, imax = x.imag.min(), x.imag.max() return np.array((complex(rmin, imin), complex(rmax, imax))) else: return np.array((x.min(), x.max()))
[ "def", "getdomain", "(", "x", ")", ":", "[", "x", "]", "=", "as_series", "(", "[", "x", "]", ",", "trim", "=", "False", ")", "if", "x", ".", "dtype", ".", "char", "in", "np", ".", "typecodes", "[", "'Complex'", "]", ":", "rmin", ",", "rmax", "=", "x", ".", "real", ".", "min", "(", ")", ",", "x", ".", "real", ".", "max", "(", ")", "imin", ",", "imax", "=", "x", ".", "imag", ".", "min", "(", ")", ",", "x", ".", "imag", ".", "max", "(", ")", "return", "np", ".", "array", "(", "(", "complex", "(", "rmin", ",", "imin", ")", ",", "complex", "(", "rmax", ",", "imax", ")", ")", ")", "else", ":", "return", "np", ".", "array", "(", "(", "x", ".", "min", "(", ")", ",", "x", ".", "max", "(", ")", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/polynomial/polyutils.py#L258-L302
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/masked/maskededit.py
python
MaskedEditMixin._setFont
(self)
Set the control's font typeface -- pass the font name as str.
Set the control's font typeface -- pass the font name as str.
[ "Set", "the", "control", "s", "font", "typeface", "--", "pass", "the", "font", "name", "as", "str", "." ]
def _setFont(self): """ Set the control's font typeface -- pass the font name as str.""" #### dbg('MaskedEditMixin::_setFont', indent=1) if not self._useFixedWidthFont: self._font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT) else: font = self.GetFont() # get size, weight, etc from current font points = font.GetPointSize() if 'wxMac' in wx.PlatformInfo \ and self.GetWindowVariant() == wx.WINDOW_VARIANT_MINI: points -= 1 # Set to teletype font (guaranteed to be mappable to all wxWindows # platforms: self._font = wx.Font( points, wx.TELETYPE, font.GetStyle(), font.GetWeight(), font.GetUnderlined()) #### dbg('font string: "%s"' % font.GetNativeFontInfo().ToString()) self.SetFont(self._font)
[ "def", "_setFont", "(", "self", ")", ":", "#### dbg('MaskedEditMixin::_setFont', indent=1)", "if", "not", "self", ".", "_useFixedWidthFont", ":", "self", ".", "_font", "=", "wx", ".", "SystemSettings_GetFont", "(", "wx", ".", "SYS_DEFAULT_GUI_FONT", ")", "else", ":", "font", "=", "self", ".", "GetFont", "(", ")", "# get size, weight, etc from current font", "points", "=", "font", ".", "GetPointSize", "(", ")", "if", "'wxMac'", "in", "wx", ".", "PlatformInfo", "and", "self", ".", "GetWindowVariant", "(", ")", "==", "wx", ".", "WINDOW_VARIANT_MINI", ":", "points", "-=", "1", "# Set to teletype font (guaranteed to be mappable to all wxWindows", "# platforms:", "self", ".", "_font", "=", "wx", ".", "Font", "(", "points", ",", "wx", ".", "TELETYPE", ",", "font", ".", "GetStyle", "(", ")", ",", "font", ".", "GetWeight", "(", ")", ",", "font", ".", "GetUnderlined", "(", ")", ")", "#### dbg('font string: \"%s\"' % font.GetNativeFontInfo().ToString())", "self", ".", "SetFont", "(", "self", ".", "_font", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/masked/maskededit.py#L2906-L2924
casadi/casadi
8d0f80a4d0fe2054384bfb9748f7a0f6bae540ff
misc/cpplint.py
python
CheckForMultilineCommentsAndStrings
(filename, clean_lines, linenum, error)
Logs an error if we see /* ... */ or "..." that extend past one line. /* ... */ comments are legit inside macros, for one line. Otherwise, we prefer // comments, so it's ok to warn about the other. Likewise, it's ok for strings to extend across multiple lines, as long as a line continuation character (backslash) terminates each line. Although not currently prohibited by the C++ style guide, it's ugly and unnecessary. We don't do well with either in this lint program, so we warn about both. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
Logs an error if we see /* ... */ or "..." that extend past one line.
[ "Logs", "an", "error", "if", "we", "see", "/", "*", "...", "*", "/", "or", "...", "that", "extend", "past", "one", "line", "." ]
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): """Logs an error if we see /* ... */ or "..." that extend past one line. /* ... */ comments are legit inside macros, for one line. Otherwise, we prefer // comments, so it's ok to warn about the other. Likewise, it's ok for strings to extend across multiple lines, as long as a line continuation character (backslash) terminates each line. Although not currently prohibited by the C++ style guide, it's ugly and unnecessary. We don't do well with either in this lint program, so we warn about both. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Remove all \\ (escaped backslashes) from the line. They are OK, and the # second (escaped) slash may trigger later \" detection erroneously. line = line.replace('\\\\', '') if line.count('/*') > line.count('*/'): error(filename, linenum, 'readability/multiline_comment', 5, 'Complex multi-line /*...*/-style comment found. ' 'Lint may give bogus warnings. ' 'Consider replacing these with //-style comments, ' 'with #if 0...#endif, ' 'or with more clearly structured multi-line comments.') if (line.count('"') - line.count('\\"')) % 2: error(filename, linenum, 'readability/multiline_string', 5, 'Multi-line string ("...") found. This lint script doesn\'t ' 'do well with such strings, and may give bogus warnings. ' 'Use C++11 raw strings or concatenation instead.')
[ "def", "CheckForMultilineCommentsAndStrings", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# Remove all \\\\ (escaped backslashes) from the line. They are OK, and the", "# second (escaped) slash may trigger later \\\" detection erroneously.", "line", "=", "line", ".", "replace", "(", "'\\\\\\\\'", ",", "''", ")", "if", "line", ".", "count", "(", "'/*'", ")", ">", "line", ".", "count", "(", "'*/'", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/multiline_comment'", ",", "5", ",", "'Complex multi-line /*...*/-style comment found. '", "'Lint may give bogus warnings. '", "'Consider replacing these with //-style comments, '", "'with #if 0...#endif, '", "'or with more clearly structured multi-line comments.'", ")", "if", "(", "line", ".", "count", "(", "'\"'", ")", "-", "line", ".", "count", "(", "'\\\\\"'", ")", ")", "%", "2", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/multiline_string'", ",", "5", ",", "'Multi-line string (\"...\") found. This lint script doesn\\'t '", "'do well with such strings, and may give bogus warnings. '", "'Use C++11 raw strings or concatenation instead.'", ")" ]
https://github.com/casadi/casadi/blob/8d0f80a4d0fe2054384bfb9748f7a0f6bae540ff/misc/cpplint.py#L1520-L1555
facebookincubator/BOLT
88c70afe9d388ad430cc150cc158641701397f70
lldb/examples/python/gdbremote.py
python
RegisterInfo.bit_size
(self)
return 0
Get the size in bits of the register.
Get the size in bits of the register.
[ "Get", "the", "size", "in", "bits", "of", "the", "register", "." ]
def bit_size(self): '''Get the size in bits of the register.''' if self.info and 'bitsize' in self.info: return int(self.info['bitsize']) return 0
[ "def", "bit_size", "(", "self", ")", ":", "if", "self", ".", "info", "and", "'bitsize'", "in", "self", ".", "info", ":", "return", "int", "(", "self", ".", "info", "[", "'bitsize'", "]", ")", "return", "0" ]
https://github.com/facebookincubator/BOLT/blob/88c70afe9d388ad430cc150cc158641701397f70/lldb/examples/python/gdbremote.py#L356-L360
okex/V3-Open-API-SDK
c5abb0db7e2287718e0055e17e57672ce0ec7fd9
okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/pkg_resources/__init__.py
python
register_finder
(importer_type, distribution_finder)
Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.
Register `distribution_finder` to find distributions in sys.path items
[ "Register", "distribution_finder", "to", "find", "distributions", "in", "sys", ".", "path", "items" ]
def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.""" _distribution_finders[importer_type] = distribution_finder
[ "def", "register_finder", "(", "importer_type", ",", "distribution_finder", ")", ":", "_distribution_finders", "[", "importer_type", "]", "=", "distribution_finder" ]
https://github.com/okex/V3-Open-API-SDK/blob/c5abb0db7e2287718e0055e17e57672ce0ec7fd9/okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/pkg_resources/__init__.py#L1860-L1867
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/importlib_metadata/__init__.py
python
Distribution.version
(self)
return self.metadata['Version']
Return the 'Version' metadata for the distribution package.
Return the 'Version' metadata for the distribution package.
[ "Return", "the", "Version", "metadata", "for", "the", "distribution", "package", "." ]
def version(self): """Return the 'Version' metadata for the distribution package.""" return self.metadata['Version']
[ "def", "version", "(", "self", ")", ":", "return", "self", ".", "metadata", "[", "'Version'", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/importlib_metadata/__init__.py#L262-L264
gimli-org/gimli
17aa2160de9b15ababd9ef99e89b1bc3277bbb23
pygimli/solver/solver.py
python
crankNicolson
(times, S, I, f=None, u0=None, theta=1.0, dirichlet=None, solver=None, progress=None)
return u
Generic Crank Nicolson solver for time dependend problems. Limitations so far: S = Needs to be constant over time (i.e. no change in model coefficients) f = constant over time (would need assembling in every step) Args ---- times: iterable(float) Timeteps to solve for. Give at least 2. S: Matrix Systemmatrix holds your discrete equations and boundary conditions I: Matrix Identity matrix (FD, FV) or Masselementmatrix (FE) to handle solution vector u0: iterable [None] Starting condition. zero if not given f: iterable (float) [None] External forces. Note f might also contain compensation values due to algebraic Dirichlet correction of S theta: float [1.0] * 0: Backward difference scheme (implicit) * 1: Forward difference scheme (explicit) strong time steps dependency .. will be unstable for to small values * 0.5: probably best tradeoff but can also be unstable dirichlet: dirichlet generator Genertor object to applay dirichlet boundary conditions solver: LinSolver [None] Provide a pre configured solver if you want some special. progress: Progress [None] Provide progress object if you want to see some. Returns ------- np.ndarray: Solution for each time steps
Generic Crank Nicolson solver for time dependend problems.
[ "Generic", "Crank", "Nicolson", "solver", "for", "time", "dependend", "problems", "." ]
def crankNicolson(times, S, I, f=None, u0=None, theta=1.0, dirichlet=None, solver=None, progress=None): """Generic Crank Nicolson solver for time dependend problems. Limitations so far: S = Needs to be constant over time (i.e. no change in model coefficients) f = constant over time (would need assembling in every step) Args ---- times: iterable(float) Timeteps to solve for. Give at least 2. S: Matrix Systemmatrix holds your discrete equations and boundary conditions I: Matrix Identity matrix (FD, FV) or Masselementmatrix (FE) to handle solution vector u0: iterable [None] Starting condition. zero if not given f: iterable (float) [None] External forces. Note f might also contain compensation values due to algebraic Dirichlet correction of S theta: float [1.0] * 0: Backward difference scheme (implicit) * 1: Forward difference scheme (explicit) strong time steps dependency .. will be unstable for to small values * 0.5: probably best tradeoff but can also be unstable dirichlet: dirichlet generator Genertor object to applay dirichlet boundary conditions solver: LinSolver [None] Provide a pre configured solver if you want some special. progress: Progress [None] Provide progress object if you want to see some. Returns ------- np.ndarray: Solution for each time steps """ if len(times) < 2: raise BaseException("We need at least 2 times for " "Crank-Nicolsen time discretization." + str(len(times))) # sw = pg.core.Stopwatch(True) timeAssemble = [] timeSolve = [] timeMeasure = False if progress: timeMeasure = True dof = S.rows() rhs = np.zeros((len(times), dof)) if f is not None: rhs[:] = f u = np.zeros((len(times), dof)) if u0 is not None: u[0, :] = u0 if theta == 0: A = I.copy() if solver is None: solver = pg.solver.LinSolver(solver='scipy') dt = 0.0 for n in range(1, len(times)): newDt = times[n] - times[n-1] if abs(newDt - dt) > 1e-8: ## new dt, so we need to factorize the matrix again dt = newDt #pg.info('dt', dt) A = I + S * (dt * theta) if dirichlet is not None: dirichlet.apply(A) solver.factorize(A) St = None if timeMeasure: pg.tic(key='CrankNicolsonLoop') if theta == 0: if St is None: St = I - S * dt # cache what's possible b = St * u[n-1] + dt * rhs[n-1] elif theta == 1: b = I * u[n-1] + dt * rhs[n] else: if St is None: St = I - S *(dt*(1.-theta)) # cache what's possible b = St * u[n-1] + dt * ((1.0 - theta) * rhs[n-1] + theta * rhs[n]) if dirichlet is not None: dirichlet.apply(b) if timeMeasure: timeAssemble.append(pg.dur(key='CrankNicolsonLoop', reset=True)) u[n, :] = solver(b) if timeMeasure: timeSolve.append(pg.dur(key='CrankNicolsonLoop')) if progress: progress.update(n, 't_prep: ' + pg.pf(timeAssemble[-1]*1000) + 'ms ' + \ 't_step: ' + pg.pf(timeSolve[-1]*1000) + 'ms') #if verbose and (n % verbose == 0): ## print(min(u[n]), max(u[n])) #print("timesteps:", n, "/", len(times), #'runtime:', sw.duration(), "s", #'assemble:', np.mean(timeAssemble), #'solve:', np.mean(timeSolve)) return u
[ "def", "crankNicolson", "(", "times", ",", "S", ",", "I", ",", "f", "=", "None", ",", "u0", "=", "None", ",", "theta", "=", "1.0", ",", "dirichlet", "=", "None", ",", "solver", "=", "None", ",", "progress", "=", "None", ")", ":", "if", "len", "(", "times", ")", "<", "2", ":", "raise", "BaseException", "(", "\"We need at least 2 times for \"", "\"Crank-Nicolsen time discretization.\"", "+", "str", "(", "len", "(", "times", ")", ")", ")", "# sw = pg.core.Stopwatch(True)", "timeAssemble", "=", "[", "]", "timeSolve", "=", "[", "]", "timeMeasure", "=", "False", "if", "progress", ":", "timeMeasure", "=", "True", "dof", "=", "S", ".", "rows", "(", ")", "rhs", "=", "np", ".", "zeros", "(", "(", "len", "(", "times", ")", ",", "dof", ")", ")", "if", "f", "is", "not", "None", ":", "rhs", "[", ":", "]", "=", "f", "u", "=", "np", ".", "zeros", "(", "(", "len", "(", "times", ")", ",", "dof", ")", ")", "if", "u0", "is", "not", "None", ":", "u", "[", "0", ",", ":", "]", "=", "u0", "if", "theta", "==", "0", ":", "A", "=", "I", ".", "copy", "(", ")", "if", "solver", "is", "None", ":", "solver", "=", "pg", ".", "solver", ".", "LinSolver", "(", "solver", "=", "'scipy'", ")", "dt", "=", "0.0", "for", "n", "in", "range", "(", "1", ",", "len", "(", "times", ")", ")", ":", "newDt", "=", "times", "[", "n", "]", "-", "times", "[", "n", "-", "1", "]", "if", "abs", "(", "newDt", "-", "dt", ")", ">", "1e-8", ":", "## new dt, so we need to factorize the matrix again", "dt", "=", "newDt", "#pg.info('dt', dt)", "A", "=", "I", "+", "S", "*", "(", "dt", "*", "theta", ")", "if", "dirichlet", "is", "not", "None", ":", "dirichlet", ".", "apply", "(", "A", ")", "solver", ".", "factorize", "(", "A", ")", "St", "=", "None", "if", "timeMeasure", ":", "pg", ".", "tic", "(", "key", "=", "'CrankNicolsonLoop'", ")", "if", "theta", "==", "0", ":", "if", "St", "is", "None", ":", "St", "=", "I", "-", "S", "*", "dt", "# cache what's possible", "b", "=", "St", "*", "u", "[", "n", "-", "1", "]", "+", "dt", "*", "rhs", "[", "n", "-", "1", "]", "elif", "theta", "==", "1", ":", "b", "=", "I", "*", "u", "[", "n", "-", "1", "]", "+", "dt", "*", "rhs", "[", "n", "]", "else", ":", "if", "St", "is", "None", ":", "St", "=", "I", "-", "S", "*", "(", "dt", "*", "(", "1.", "-", "theta", ")", ")", "# cache what's possible", "b", "=", "St", "*", "u", "[", "n", "-", "1", "]", "+", "dt", "*", "(", "(", "1.0", "-", "theta", ")", "*", "rhs", "[", "n", "-", "1", "]", "+", "theta", "*", "rhs", "[", "n", "]", ")", "if", "dirichlet", "is", "not", "None", ":", "dirichlet", ".", "apply", "(", "b", ")", "if", "timeMeasure", ":", "timeAssemble", ".", "append", "(", "pg", ".", "dur", "(", "key", "=", "'CrankNicolsonLoop'", ",", "reset", "=", "True", ")", ")", "u", "[", "n", ",", ":", "]", "=", "solver", "(", "b", ")", "if", "timeMeasure", ":", "timeSolve", ".", "append", "(", "pg", ".", "dur", "(", "key", "=", "'CrankNicolsonLoop'", ")", ")", "if", "progress", ":", "progress", ".", "update", "(", "n", ",", "'t_prep: '", "+", "pg", ".", "pf", "(", "timeAssemble", "[", "-", "1", "]", "*", "1000", ")", "+", "'ms '", "+", "'t_step: '", "+", "pg", ".", "pf", "(", "timeSolve", "[", "-", "1", "]", "*", "1000", ")", "+", "'ms'", ")", "#if verbose and (n % verbose == 0):", "## print(min(u[n]), max(u[n]))", "#print(\"timesteps:\", n, \"/\", len(times),", "#'runtime:', sw.duration(), \"s\",", "#'assemble:', np.mean(timeAssemble),", "#'solve:', np.mean(timeSolve))", "return", "u" ]
https://github.com/gimli-org/gimli/blob/17aa2160de9b15ababd9ef99e89b1bc3277bbb23/pygimli/solver/solver.py#L2547-L2667
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/boringssl/src/util/generate_build_files.py
python
ArchForAsmFilename
(filename)
Returns the architectures that a given asm file should be compiled for based on substrings in the filename.
Returns the architectures that a given asm file should be compiled for based on substrings in the filename.
[ "Returns", "the", "architectures", "that", "a", "given", "asm", "file", "should", "be", "compiled", "for", "based", "on", "substrings", "in", "the", "filename", "." ]
def ArchForAsmFilename(filename): """Returns the architectures that a given asm file should be compiled for based on substrings in the filename.""" if 'x86_64' in filename or 'avx2' in filename: return ['x86_64'] elif ('x86' in filename and 'x86_64' not in filename) or '586' in filename: return ['x86'] elif 'armx' in filename: return ['arm', 'aarch64'] elif 'armv8' in filename: return ['aarch64'] elif 'arm' in filename: return ['arm'] else: raise ValueError('Unknown arch for asm filename: ' + filename)
[ "def", "ArchForAsmFilename", "(", "filename", ")", ":", "if", "'x86_64'", "in", "filename", "or", "'avx2'", "in", "filename", ":", "return", "[", "'x86_64'", "]", "elif", "(", "'x86'", "in", "filename", "and", "'x86_64'", "not", "in", "filename", ")", "or", "'586'", "in", "filename", ":", "return", "[", "'x86'", "]", "elif", "'armx'", "in", "filename", ":", "return", "[", "'arm'", ",", "'aarch64'", "]", "elif", "'armv8'", "in", "filename", ":", "return", "[", "'aarch64'", "]", "elif", "'arm'", "in", "filename", ":", "return", "[", "'arm'", "]", "else", ":", "raise", "ValueError", "(", "'Unknown arch for asm filename: '", "+", "filename", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/boringssl/src/util/generate_build_files.py#L566-L581
Polidea/SiriusObfuscator
b0e590d8130e97856afe578869b83a209e2b19be
SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py
python
SBTarget.FindWatchpointByID
(self, *args)
return _lldb.SBTarget_FindWatchpointByID(self, *args)
FindWatchpointByID(self, watch_id_t watch_id) -> SBWatchpoint
FindWatchpointByID(self, watch_id_t watch_id) -> SBWatchpoint
[ "FindWatchpointByID", "(", "self", "watch_id_t", "watch_id", ")", "-", ">", "SBWatchpoint" ]
def FindWatchpointByID(self, *args): """FindWatchpointByID(self, watch_id_t watch_id) -> SBWatchpoint""" return _lldb.SBTarget_FindWatchpointByID(self, *args)
[ "def", "FindWatchpointByID", "(", "self", ",", "*", "args", ")", ":", "return", "_lldb", ".", "SBTarget_FindWatchpointByID", "(", "self", ",", "*", "args", ")" ]
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L9209-L9211
p4lang/behavioral-model
81ce0163f0770c6b9d6056a28ce2e0cc035bb6e9
tools/cpplint.py
python
IsDecltype
(clean_lines, linenum, column)
return False
Check if the token ending on (linenum, column) is decltype(). Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is decltype() expression, False otherwise.
Check if the token ending on (linenum, column) is decltype().
[ "Check", "if", "the", "token", "ending", "on", "(", "linenum", "column", ")", "is", "decltype", "()", "." ]
def IsDecltype(clean_lines, linenum, column): """Check if the token ending on (linenum, column) is decltype(). Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is decltype() expression, False otherwise. """ (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column) if start_col < 0: return False if Search(r'\bdecltype\s*$', text[0:start_col]): return True return False
[ "def", "IsDecltype", "(", "clean_lines", ",", "linenum", ",", "column", ")", ":", "(", "text", ",", "_", ",", "start_col", ")", "=", "ReverseCloseExpression", "(", "clean_lines", ",", "linenum", ",", "column", ")", "if", "start_col", "<", "0", ":", "return", "False", "if", "Search", "(", "r'\\bdecltype\\s*$'", ",", "text", "[", "0", ":", "start_col", "]", ")", ":", "return", "True", "return", "False" ]
https://github.com/p4lang/behavioral-model/blob/81ce0163f0770c6b9d6056a28ce2e0cc035bb6e9/tools/cpplint.py#L4137-L4152
shogun-toolbox/shogun
9b8d856971af5a295dd6ad70623ae45647a6334c
examples/meta/generator/parse.py
python
FastParser.p_enum
(self, p)
enum : ENUMKEYWORD identifier DOT identifier
enum : ENUMKEYWORD identifier DOT identifier
[ "enum", ":", "ENUMKEYWORD", "identifier", "DOT", "identifier" ]
def p_enum(self, p): "enum : ENUMKEYWORD identifier DOT identifier" p[0] = {"Enum": [p[2], p[4]]}
[ "def", "p_enum", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "{", "\"Enum\"", ":", "[", "p", "[", "2", "]", ",", "p", "[", "4", "]", "]", "}" ]
https://github.com/shogun-toolbox/shogun/blob/9b8d856971af5a295dd6ad70623ae45647a6334c/examples/meta/generator/parse.py#L250-L252
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_gdi.py
python
Locale_AddLanguage
(*args, **kwargs)
return _gdi_.Locale_AddLanguage(*args, **kwargs)
Locale_AddLanguage(LanguageInfo info)
Locale_AddLanguage(LanguageInfo info)
[ "Locale_AddLanguage", "(", "LanguageInfo", "info", ")" ]
def Locale_AddLanguage(*args, **kwargs): """Locale_AddLanguage(LanguageInfo info)""" return _gdi_.Locale_AddLanguage(*args, **kwargs)
[ "def", "Locale_AddLanguage", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_gdi_", ".", "Locale_AddLanguage", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_gdi.py#L3144-L3146
Polidea/SiriusObfuscator
b0e590d8130e97856afe578869b83a209e2b19be
SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py
python
SBData.GetSignedInt32
(self, *args)
return _lldb.SBData_GetSignedInt32(self, *args)
GetSignedInt32(self, SBError error, offset_t offset) -> int32_t
GetSignedInt32(self, SBError error, offset_t offset) -> int32_t
[ "GetSignedInt32", "(", "self", "SBError", "error", "offset_t", "offset", ")", "-", ">", "int32_t" ]
def GetSignedInt32(self, *args): """GetSignedInt32(self, SBError error, offset_t offset) -> int32_t""" return _lldb.SBData_GetSignedInt32(self, *args)
[ "def", "GetSignedInt32", "(", "self", ",", "*", "args", ")", ":", "return", "_lldb", ".", "SBData_GetSignedInt32", "(", "self", ",", "*", "args", ")" ]
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L2732-L2734
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/src/ed_vim.py
python
EditraCommander.ParaDown
(self, repeat=1)
Move the caret one paragraph down
Move the caret one paragraph down
[ "Move", "the", "caret", "one", "paragraph", "down" ]
def ParaDown(self, repeat=1): """Move the caret one paragraph down""" for i in range(repeat): self.stc.ParaDown()
[ "def", "ParaDown", "(", "self", ",", "repeat", "=", "1", ")", ":", "for", "i", "in", "range", "(", "repeat", ")", ":", "self", ".", "stc", ".", "ParaDown", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_vim.py#L422-L425
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/stc.py
python
StyledTextCtrl.SetWrapIndentMode
(*args, **kwargs)
return _stc.StyledTextCtrl_SetWrapIndentMode(*args, **kwargs)
SetWrapIndentMode(self, int mode) Sets how wrapped sublines are placed. Default is fixed.
SetWrapIndentMode(self, int mode)
[ "SetWrapIndentMode", "(", "self", "int", "mode", ")" ]
def SetWrapIndentMode(*args, **kwargs): """ SetWrapIndentMode(self, int mode) Sets how wrapped sublines are placed. Default is fixed. """ return _stc.StyledTextCtrl_SetWrapIndentMode(*args, **kwargs)
[ "def", "SetWrapIndentMode", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_stc", ".", "StyledTextCtrl_SetWrapIndentMode", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/stc.py#L4135-L4141
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/nn/probability/distribution/normal.py
python
Normal.__init__
(self, mean=None, sd=None, seed=None, dtype=mstype.float32, name="Normal")
Constructor of Normal.
Constructor of Normal.
[ "Constructor", "of", "Normal", "." ]
def __init__(self, mean=None, sd=None, seed=None, dtype=mstype.float32, name="Normal"): """ Constructor of Normal. """ param = dict(locals()) param['param_dict'] = {'mean': mean, 'sd': sd} valid_dtype = mstype.float_type Validator.check_type_name( "dtype", dtype, valid_dtype, type(self).__name__) super(Normal, self).__init__(seed, dtype, name, param) self._mean_value = self._add_parameter(mean, 'mean') self._sd_value = self._add_parameter(sd, 'sd') if self._sd_value is not None: check_greater_zero(self._sd_value, "Standard deviation") # ops needed for the class self.exp = exp_generic self.expm1 = P.Expm1() # when the graph kernel mode is enable # use Log directly as akg will handle the corner cases self.log = P.Log() if context.get_context( "enable_graph_kernel") else log_generic self.erf = P.Erf() self.squeeze = P.Squeeze(0) self.cast = P.Cast() self.const = P.ScalarToArray() self.shape = P.Shape() self.sq = P.Square() self.sqrt = P.Sqrt()
[ "def", "__init__", "(", "self", ",", "mean", "=", "None", ",", "sd", "=", "None", ",", "seed", "=", "None", ",", "dtype", "=", "mstype", ".", "float32", ",", "name", "=", "\"Normal\"", ")", ":", "param", "=", "dict", "(", "locals", "(", ")", ")", "param", "[", "'param_dict'", "]", "=", "{", "'mean'", ":", "mean", ",", "'sd'", ":", "sd", "}", "valid_dtype", "=", "mstype", ".", "float_type", "Validator", ".", "check_type_name", "(", "\"dtype\"", ",", "dtype", ",", "valid_dtype", ",", "type", "(", "self", ")", ".", "__name__", ")", "super", "(", "Normal", ",", "self", ")", ".", "__init__", "(", "seed", ",", "dtype", ",", "name", ",", "param", ")", "self", ".", "_mean_value", "=", "self", ".", "_add_parameter", "(", "mean", ",", "'mean'", ")", "self", ".", "_sd_value", "=", "self", ".", "_add_parameter", "(", "sd", ",", "'sd'", ")", "if", "self", ".", "_sd_value", "is", "not", "None", ":", "check_greater_zero", "(", "self", ".", "_sd_value", ",", "\"Standard deviation\"", ")", "# ops needed for the class", "self", ".", "exp", "=", "exp_generic", "self", ".", "expm1", "=", "P", ".", "Expm1", "(", ")", "# when the graph kernel mode is enable", "# use Log directly as akg will handle the corner cases", "self", ".", "log", "=", "P", ".", "Log", "(", ")", "if", "context", ".", "get_context", "(", "\"enable_graph_kernel\"", ")", "else", "log_generic", "self", ".", "erf", "=", "P", ".", "Erf", "(", ")", "self", ".", "squeeze", "=", "P", ".", "Squeeze", "(", "0", ")", "self", ".", "cast", "=", "P", ".", "Cast", "(", ")", "self", ".", "const", "=", "P", ".", "ScalarToArray", "(", ")", "self", ".", "shape", "=", "P", ".", "Shape", "(", ")", "self", ".", "sq", "=", "P", ".", "Square", "(", ")", "self", ".", "sqrt", "=", "P", ".", "Sqrt", "(", ")" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/nn/probability/distribution/normal.py#L159-L193
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/ao/quantization/_dbr/quantization_state.py
python
AutoQuantizationState.op_convert_after_hook
( self, op: Callable, output, global_op_idx: List[int], )
return output
This function is called aftern an op call in a converted model. TODO: add dequant, if needed
This function is called aftern an op call in a converted model.
[ "This", "function", "is", "called", "aftern", "an", "op", "call", "in", "a", "converted", "model", "." ]
def op_convert_after_hook( self, op: Callable, output, global_op_idx: List[int], ) -> Any: """ This function is called aftern an op call in a converted model. TODO: add dequant, if needed """ if self.log_op_outputs: output_clone = clone_detach_tensor_without_dispatch(output) seen_q_op_info = self._get_cur_seen_q_op_info() self.op_outputs[-1].append( (global_op_idx[0], seen_q_op_info.fqn, seen_q_op_info.type, output_clone)) global_op_idx[0] += 1 return output
[ "def", "op_convert_after_hook", "(", "self", ",", "op", ":", "Callable", ",", "output", ",", "global_op_idx", ":", "List", "[", "int", "]", ",", ")", "->", "Any", ":", "if", "self", ".", "log_op_outputs", ":", "output_clone", "=", "clone_detach_tensor_without_dispatch", "(", "output", ")", "seen_q_op_info", "=", "self", ".", "_get_cur_seen_q_op_info", "(", ")", "self", ".", "op_outputs", "[", "-", "1", "]", ".", "append", "(", "(", "global_op_idx", "[", "0", "]", ",", "seen_q_op_info", ".", "fqn", ",", "seen_q_op_info", ".", "type", ",", "output_clone", ")", ")", "global_op_idx", "[", "0", "]", "+=", "1", "return", "output" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/ao/quantization/_dbr/quantization_state.py#L514-L532
DanielSWolf/rhubarb-lip-sync
5cface0af3b6e4e58c0b829c51561d784fb9f52f
rhubarb/lib/webrtc-8d2248ff/webrtc/tools/barcode_tools/barcode_encoder.py
python
convert_png_to_yuv_barcodes
(input_directory='.', output_directory='.')
return helper_functions.perform_action_on_all_files( input_directory, 'barcode_', 'png', 0, _convert_to_yuv_and_delete, output_directory=output_directory, pattern='barcode_')
Converts PNG barcodes to YUV barcode images. This function reads all the PNG files from the input directory which are in the format frame_xxxx.png, where xxxx is the number of the frame, starting from 0000. The frames should be consecutive numbers. The output YUV file is named frame_xxxx.yuv. The function uses ffmpeg to do the conversion. Args: input_directory(string): The input direcotry to read the PNG barcodes from. output_directory(string): The putput directory to write the YUV files to. Return: (bool): True if the conversion was without errors.
Converts PNG barcodes to YUV barcode images.
[ "Converts", "PNG", "barcodes", "to", "YUV", "barcode", "images", "." ]
def convert_png_to_yuv_barcodes(input_directory='.', output_directory='.'): """Converts PNG barcodes to YUV barcode images. This function reads all the PNG files from the input directory which are in the format frame_xxxx.png, where xxxx is the number of the frame, starting from 0000. The frames should be consecutive numbers. The output YUV file is named frame_xxxx.yuv. The function uses ffmpeg to do the conversion. Args: input_directory(string): The input direcotry to read the PNG barcodes from. output_directory(string): The putput directory to write the YUV files to. Return: (bool): True if the conversion was without errors. """ return helper_functions.perform_action_on_all_files( input_directory, 'barcode_', 'png', 0, _convert_to_yuv_and_delete, output_directory=output_directory, pattern='barcode_')
[ "def", "convert_png_to_yuv_barcodes", "(", "input_directory", "=", "'.'", ",", "output_directory", "=", "'.'", ")", ":", "return", "helper_functions", ".", "perform_action_on_all_files", "(", "input_directory", ",", "'barcode_'", ",", "'png'", ",", "0", ",", "_convert_to_yuv_and_delete", ",", "output_directory", "=", "output_directory", ",", "pattern", "=", "'barcode_'", ")" ]
https://github.com/DanielSWolf/rhubarb-lip-sync/blob/5cface0af3b6e4e58c0b829c51561d784fb9f52f/rhubarb/lib/webrtc-8d2248ff/webrtc/tools/barcode_tools/barcode_encoder.py#L67-L83
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_core.py
python
Window.GetPositionTuple
(*args, **kwargs)
return _core_.Window_GetPositionTuple(*args, **kwargs)
GetPositionTuple() -> (x,y) Get the window's position. Notice that the position is in client coordinates for child windows and screen coordinates for the top level ones, use `GetScreenPosition` if you need screen coordinates for all kinds of windows.
GetPositionTuple() -> (x,y)
[ "GetPositionTuple", "()", "-", ">", "(", "x", "y", ")" ]
def GetPositionTuple(*args, **kwargs): """ GetPositionTuple() -> (x,y) Get the window's position. Notice that the position is in client coordinates for child windows and screen coordinates for the top level ones, use `GetScreenPosition` if you need screen coordinates for all kinds of windows. """ return _core_.Window_GetPositionTuple(*args, **kwargs)
[ "def", "GetPositionTuple", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "Window_GetPositionTuple", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L9465-L9474
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/computation/pytables.py
python
ConditionBinOp.format
(self)
return self.condition
return the actual ne format
return the actual ne format
[ "return", "the", "actual", "ne", "format" ]
def format(self): """ return the actual ne format """ return self.condition
[ "def", "format", "(", "self", ")", ":", "return", "self", ".", "condition" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/computation/pytables.py#L322-L324
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
tools/valgrind/drmemory/PRESUBMIT.py
python
CheckChange
(input_api, output_api)
Checks the DrMemory suppression files for bad suppressions.
Checks the DrMemory suppression files for bad suppressions.
[ "Checks", "the", "DrMemory", "suppression", "files", "for", "bad", "suppressions", "." ]
def CheckChange(input_api, output_api): """Checks the DrMemory suppression files for bad suppressions.""" # TODO(timurrrr): find out how to do relative imports # and remove this ugly hack. Also, the CheckChange function won't be needed. tools_vg_path = input_api.os_path.join(input_api.PresubmitLocalPath(), '..') import sys old_path = sys.path try: sys.path = sys.path + [tools_vg_path] import suppressions return suppressions.PresubmitCheck(input_api, output_api) finally: sys.path = old_path
[ "def", "CheckChange", "(", "input_api", ",", "output_api", ")", ":", "# TODO(timurrrr): find out how to do relative imports", "# and remove this ugly hack. Also, the CheckChange function won't be needed.", "tools_vg_path", "=", "input_api", ".", "os_path", ".", "join", "(", "input_api", ".", "PresubmitLocalPath", "(", ")", ",", "'..'", ")", "import", "sys", "old_path", "=", "sys", ".", "path", "try", ":", "sys", ".", "path", "=", "sys", ".", "path", "+", "[", "tools_vg_path", "]", "import", "suppressions", "return", "suppressions", ".", "PresubmitCheck", "(", "input_api", ",", "output_api", ")", "finally", ":", "sys", ".", "path", "=", "old_path" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/valgrind/drmemory/PRESUBMIT.py#L10-L23
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Editor/Python/windows/Lib/site-packages/pip/_vendor/requests/utils.py
python
dict_to_sequence
(d)
return d
Returns an internal sequence dictionary update.
Returns an internal sequence dictionary update.
[ "Returns", "an", "internal", "sequence", "dictionary", "update", "." ]
def dict_to_sequence(d): """Returns an internal sequence dictionary update.""" if hasattr(d, 'items'): d = d.items() return d
[ "def", "dict_to_sequence", "(", "d", ")", ":", "if", "hasattr", "(", "d", ",", "'items'", ")", ":", "d", "=", "d", ".", "items", "(", ")", "return", "d" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/pip/_vendor/requests/utils.py#L41-L47
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/arrays/sparse.py
python
_wrap_result
(name, data, sparse_index, fill_value, dtype=None)
return SparseArray(data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype)
wrap op result to have correct dtype
wrap op result to have correct dtype
[ "wrap", "op", "result", "to", "have", "correct", "dtype" ]
def _wrap_result(name, data, sparse_index, fill_value, dtype=None): """ wrap op result to have correct dtype """ if name.startswith('__'): # e.g. __eq__ --> eq name = name[2:-2] if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'): dtype = np.bool fill_value = lib.item_from_zerodim(fill_value) if is_bool_dtype(dtype): # fill_value may be np.bool_ fill_value = bool(fill_value) return SparseArray(data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype)
[ "def", "_wrap_result", "(", "name", ",", "data", ",", "sparse_index", ",", "fill_value", ",", "dtype", "=", "None", ")", ":", "if", "name", ".", "startswith", "(", "'__'", ")", ":", "# e.g. __eq__ --> eq", "name", "=", "name", "[", "2", ":", "-", "2", "]", "if", "name", "in", "(", "'eq'", ",", "'ne'", ",", "'lt'", ",", "'gt'", ",", "'le'", ",", "'ge'", ")", ":", "dtype", "=", "np", ".", "bool", "fill_value", "=", "lib", ".", "item_from_zerodim", "(", "fill_value", ")", "if", "is_bool_dtype", "(", "dtype", ")", ":", "# fill_value may be np.bool_", "fill_value", "=", "bool", "(", "fill_value", ")", "return", "SparseArray", "(", "data", ",", "sparse_index", "=", "sparse_index", ",", "fill_value", "=", "fill_value", ",", "dtype", "=", "dtype", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/arrays/sparse.py#L483-L502
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/lib-tk/turtle.py
python
TNavigator.ycor
(self)
return self._position[1]
Return the turtle's y coordinate --- No arguments. Example (for a Turtle instance named turtle): >>> reset() >>> turtle.left(60) >>> turtle.forward(100) >>> print turtle.ycor() 86.6025403784
Return the turtle's y coordinate --- No arguments.
[ "Return", "the", "turtle", "s", "y", "coordinate", "---", "No", "arguments", "." ]
def ycor(self): """ Return the turtle's y coordinate --- No arguments. Example (for a Turtle instance named turtle): >>> reset() >>> turtle.left(60) >>> turtle.forward(100) >>> print turtle.ycor() 86.6025403784 """ return self._position[1]
[ "def", "ycor", "(", "self", ")", ":", "return", "self", ".", "_position", "[", "1", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/lib-tk/turtle.py#L1644-L1656
SmileiPIC/Smilei
07dcb51200029e10f626e1546558c1ae7599c8b1
happi/_Diagnostics/TrackParticles.py
python
TrackParticles.toVTK
(self, rendering="trajectory", data_format="xml")
Export the data to Vtk
Export the data to Vtk
[ "Export", "the", "data", "to", "Vtk" ]
def toVTK(self, rendering="trajectory", data_format="xml"): """ Export the data to Vtk """ if not self._validate(): return if not self._sort: print("Cannot export non-sorted data") return if self._ndim_particles != 3: print ("Cannot export tracked particles of a "+str(self._ndim_particles)+"D simulation to VTK") return # The specified rendering option is checked if rendering not in ["trajectory","cloud"]: print ("Rendering of type {} is not valid. It should be `trajectory` or `cloud`.".format(rendering)) return # The specified data format is checked if data_format not in ["xml","vtk"]: print ("Format of type {} is not valid. Should be `xml` or `vtk` ".format(data_format)) return self._mkdir(self._exportDir) fileprefix = self._exportDir + self._exportPrefix + "_" + rendering ntimes = len(self._timesteps) # Determine the correct file extension according to the given data format if data_format == "xml": extension = "vtp" else: extension = "vtk" # Creation of a customed vtk object vtk = VTKfile() # Require x, y and z xaxis = "x" if "x" not in self.axes: xaxis = "moving_x" if xaxis not in self.axes or "y" not in self.axes or "z" not in self.axes: print("Error exporting tracked particles to VTK: axes 'x', 'y' and 'z' are required") return # Cloud mode: each time step is a separated cloud of particles # If there is only one timestep, the trajectory mode becomes a cloud if (ntimes == 1)or(rendering == "cloud"): data = self.getData() for istep,step in enumerate(self._timesteps): data_clean_step = {} # Clean data at istep: remove NaN mask = self._np.ones(len(data[self.axes[0]][istep]), dtype=bool) for ax in self.axes: mask = self._np.logical_and(mask,self._np.logical_not(self._np.isnan(self._np.asarray(data[ax][istep])))) for ax in self.axes: #print(ax,data[ax][istep]) data_clean_step[ax] = self._np.asarray(data[ax][istep])[mask] pcoords_step = self._np.stack((data_clean_step[xaxis],data_clean_step["y"],data_clean_step["z"])).transpose() pcoords_step = self._np.ascontiguousarray(pcoords_step, dtype='float32') # Convert pcoords that is a numpy array into vtkFloatArray pcoords_step = vtk.Array(pcoords_step, "") # List of scalar arrays attributes = [] for ax in self.axes: if ax not in ["x", "y", "z", "moving_x", "Id"]: attributes += [vtk.Array(self._np.ascontiguousarray(data_clean_step[ax].flatten(),'float32'),ax)] # Integer arrays elif ax == "Id": attributes += [vtk.Array(self._np.ascontiguousarray(data_clean_step[ax].flatten(),'int32'),ax)] vtk.WriteCloud(pcoords_step, attributes, data_format, fileprefix+"_{:06d}.{}".format(step,extension)) print("Exportation of {}_{:06d}.{}".format(fileprefix,step,extension)) print("Successfully exported tracked particles to VTK, folder='"+self._exportDir) # Trajectory mode elif (rendering == "trajectory"): data = self.getData() pcoords = self._np.stack((data[xaxis],data["y"],data["z"])).transpose() npoints, nt, nd = pcoords.shape pcoords = self._np.reshape(pcoords, (npoints*nt, nd)) pcoords = self._np.ascontiguousarray(pcoords, dtype='float32') # Convert pcoords that is a numpy array into vtkFloatArray pcoords = vtk.Array(pcoords, "") # Segments between points to describe the trajectories connectivity = self._np.ascontiguousarray([[nt]+[nt*i+j for j in range(nt)] for i in range(npoints)]) # List of scalar arrays attributes = [] for ax in self.axes: if ax not in ["x", "y", "z", "moving_x", "Id"]: attributes += [vtk.Array(self._np.ascontiguousarray(data[ax].flatten(),'float32'),ax)] # Integer arrays elif ax == "Id": attributes += [vtk.Array(self._np.ascontiguousarray(data[ax].flatten(),'int32'),ax)] vtk.WriteLines(pcoords, connectivity, attributes, data_format, fileprefix+".{}".format(extension)) print("Successfully exported tracked particles to VTK, folder='"+self._exportDir)
[ "def", "toVTK", "(", "self", ",", "rendering", "=", "\"trajectory\"", ",", "data_format", "=", "\"xml\"", ")", ":", "if", "not", "self", ".", "_validate", "(", ")", ":", "return", "if", "not", "self", ".", "_sort", ":", "print", "(", "\"Cannot export non-sorted data\"", ")", "return", "if", "self", ".", "_ndim_particles", "!=", "3", ":", "print", "(", "\"Cannot export tracked particles of a \"", "+", "str", "(", "self", ".", "_ndim_particles", ")", "+", "\"D simulation to VTK\"", ")", "return", "# The specified rendering option is checked", "if", "rendering", "not", "in", "[", "\"trajectory\"", ",", "\"cloud\"", "]", ":", "print", "(", "\"Rendering of type {} is not valid. It should be `trajectory` or `cloud`.\"", ".", "format", "(", "rendering", ")", ")", "return", "# The specified data format is checked", "if", "data_format", "not", "in", "[", "\"xml\"", ",", "\"vtk\"", "]", ":", "print", "(", "\"Format of type {} is not valid. Should be `xml` or `vtk` \"", ".", "format", "(", "data_format", ")", ")", "return", "self", ".", "_mkdir", "(", "self", ".", "_exportDir", ")", "fileprefix", "=", "self", ".", "_exportDir", "+", "self", ".", "_exportPrefix", "+", "\"_\"", "+", "rendering", "ntimes", "=", "len", "(", "self", ".", "_timesteps", ")", "# Determine the correct file extension according to the given data format", "if", "data_format", "==", "\"xml\"", ":", "extension", "=", "\"vtp\"", "else", ":", "extension", "=", "\"vtk\"", "# Creation of a customed vtk object", "vtk", "=", "VTKfile", "(", ")", "# Require x, y and z", "xaxis", "=", "\"x\"", "if", "\"x\"", "not", "in", "self", ".", "axes", ":", "xaxis", "=", "\"moving_x\"", "if", "xaxis", "not", "in", "self", ".", "axes", "or", "\"y\"", "not", "in", "self", ".", "axes", "or", "\"z\"", "not", "in", "self", ".", "axes", ":", "print", "(", "\"Error exporting tracked particles to VTK: axes 'x', 'y' and 'z' are required\"", ")", "return", "# Cloud mode: each time step is a separated cloud of particles", "# If there is only one timestep, the trajectory mode becomes a cloud", "if", "(", "ntimes", "==", "1", ")", "or", "(", "rendering", "==", "\"cloud\"", ")", ":", "data", "=", "self", ".", "getData", "(", ")", "for", "istep", ",", "step", "in", "enumerate", "(", "self", ".", "_timesteps", ")", ":", "data_clean_step", "=", "{", "}", "# Clean data at istep: remove NaN", "mask", "=", "self", ".", "_np", ".", "ones", "(", "len", "(", "data", "[", "self", ".", "axes", "[", "0", "]", "]", "[", "istep", "]", ")", ",", "dtype", "=", "bool", ")", "for", "ax", "in", "self", ".", "axes", ":", "mask", "=", "self", ".", "_np", ".", "logical_and", "(", "mask", ",", "self", ".", "_np", ".", "logical_not", "(", "self", ".", "_np", ".", "isnan", "(", "self", ".", "_np", ".", "asarray", "(", "data", "[", "ax", "]", "[", "istep", "]", ")", ")", ")", ")", "for", "ax", "in", "self", ".", "axes", ":", "#print(ax,data[ax][istep])", "data_clean_step", "[", "ax", "]", "=", "self", ".", "_np", ".", "asarray", "(", "data", "[", "ax", "]", "[", "istep", "]", ")", "[", "mask", "]", "pcoords_step", "=", "self", ".", "_np", ".", "stack", "(", "(", "data_clean_step", "[", "xaxis", "]", ",", "data_clean_step", "[", "\"y\"", "]", ",", "data_clean_step", "[", "\"z\"", "]", ")", ")", ".", "transpose", "(", ")", "pcoords_step", "=", "self", ".", "_np", ".", "ascontiguousarray", "(", "pcoords_step", ",", "dtype", "=", "'float32'", ")", "# Convert pcoords that is a numpy array into vtkFloatArray", "pcoords_step", "=", "vtk", ".", "Array", "(", "pcoords_step", ",", "\"\"", ")", "# List of scalar arrays", "attributes", "=", "[", "]", "for", "ax", "in", "self", ".", "axes", ":", "if", "ax", "not", "in", "[", "\"x\"", ",", "\"y\"", ",", "\"z\"", ",", "\"moving_x\"", ",", "\"Id\"", "]", ":", "attributes", "+=", "[", "vtk", ".", "Array", "(", "self", ".", "_np", ".", "ascontiguousarray", "(", "data_clean_step", "[", "ax", "]", ".", "flatten", "(", ")", ",", "'float32'", ")", ",", "ax", ")", "]", "# Integer arrays", "elif", "ax", "==", "\"Id\"", ":", "attributes", "+=", "[", "vtk", ".", "Array", "(", "self", ".", "_np", ".", "ascontiguousarray", "(", "data_clean_step", "[", "ax", "]", ".", "flatten", "(", ")", ",", "'int32'", ")", ",", "ax", ")", "]", "vtk", ".", "WriteCloud", "(", "pcoords_step", ",", "attributes", ",", "data_format", ",", "fileprefix", "+", "\"_{:06d}.{}\"", ".", "format", "(", "step", ",", "extension", ")", ")", "print", "(", "\"Exportation of {}_{:06d}.{}\"", ".", "format", "(", "fileprefix", ",", "step", ",", "extension", ")", ")", "print", "(", "\"Successfully exported tracked particles to VTK, folder='\"", "+", "self", ".", "_exportDir", ")", "# Trajectory mode", "elif", "(", "rendering", "==", "\"trajectory\"", ")", ":", "data", "=", "self", ".", "getData", "(", ")", "pcoords", "=", "self", ".", "_np", ".", "stack", "(", "(", "data", "[", "xaxis", "]", ",", "data", "[", "\"y\"", "]", ",", "data", "[", "\"z\"", "]", ")", ")", ".", "transpose", "(", ")", "npoints", ",", "nt", ",", "nd", "=", "pcoords", ".", "shape", "pcoords", "=", "self", ".", "_np", ".", "reshape", "(", "pcoords", ",", "(", "npoints", "*", "nt", ",", "nd", ")", ")", "pcoords", "=", "self", ".", "_np", ".", "ascontiguousarray", "(", "pcoords", ",", "dtype", "=", "'float32'", ")", "# Convert pcoords that is a numpy array into vtkFloatArray", "pcoords", "=", "vtk", ".", "Array", "(", "pcoords", ",", "\"\"", ")", "# Segments between points to describe the trajectories", "connectivity", "=", "self", ".", "_np", ".", "ascontiguousarray", "(", "[", "[", "nt", "]", "+", "[", "nt", "*", "i", "+", "j", "for", "j", "in", "range", "(", "nt", ")", "]", "for", "i", "in", "range", "(", "npoints", ")", "]", ")", "# List of scalar arrays", "attributes", "=", "[", "]", "for", "ax", "in", "self", ".", "axes", ":", "if", "ax", "not", "in", "[", "\"x\"", ",", "\"y\"", ",", "\"z\"", ",", "\"moving_x\"", ",", "\"Id\"", "]", ":", "attributes", "+=", "[", "vtk", ".", "Array", "(", "self", ".", "_np", ".", "ascontiguousarray", "(", "data", "[", "ax", "]", ".", "flatten", "(", ")", ",", "'float32'", ")", ",", "ax", ")", "]", "# Integer arrays", "elif", "ax", "==", "\"Id\"", ":", "attributes", "+=", "[", "vtk", ".", "Array", "(", "self", ".", "_np", ".", "ascontiguousarray", "(", "data", "[", "ax", "]", ".", "flatten", "(", ")", ",", "'int32'", ")", ",", "ax", ")", "]", "vtk", ".", "WriteLines", "(", "pcoords", ",", "connectivity", ",", "attributes", ",", "data_format", ",", "fileprefix", "+", "\".{}\"", ".", "format", "(", "extension", ")", ")", "print", "(", "\"Successfully exported tracked particles to VTK, folder='\"", "+", "self", ".", "_exportDir", ")" ]
https://github.com/SmileiPIC/Smilei/blob/07dcb51200029e10f626e1546558c1ae7599c8b1/happi/_Diagnostics/TrackParticles.py#L831-L941
ros-perception/image_pipeline
cd4aa7ab38726d88e8e0144aa0d45ad2f236535a
camera_calibration/nodes/cameracalibrator.py
python
optionsValidCharuco
(options, parser)
return True
Validates the provided options when the pattern type is 'charuco'
Validates the provided options when the pattern type is 'charuco'
[ "Validates", "the", "provided", "options", "when", "the", "pattern", "type", "is", "charuco" ]
def optionsValidCharuco(options, parser): """ Validates the provided options when the pattern type is 'charuco' """ if options.pattern != 'charuco': return False n_boards = len(options.size) if (n_boards != len(options.square) or n_boards != len(options.charuco_marker_size) or n_boards != len(options.aruco_dict)): parser.error("When using ChArUco boards, --size, --square, --charuco_marker_size, and --aruco_dict " + "must be specified for each board") return False # TODO: check for fisheye and stereo (not implemented with ChArUco) return True
[ "def", "optionsValidCharuco", "(", "options", ",", "parser", ")", ":", "if", "options", ".", "pattern", "!=", "'charuco'", ":", "return", "False", "n_boards", "=", "len", "(", "options", ".", "size", ")", "if", "(", "n_boards", "!=", "len", "(", "options", ".", "square", ")", "or", "n_boards", "!=", "len", "(", "options", ".", "charuco_marker_size", ")", "or", "n_boards", "!=", "len", "(", "options", ".", "aruco_dict", ")", ")", ":", "parser", ".", "error", "(", "\"When using ChArUco boards, --size, --square, --charuco_marker_size, and --aruco_dict \"", "+", "\"must be specified for each board\"", ")", "return", "False", "# TODO: check for fisheye and stereo (not implemented with ChArUco)", "return", "True" ]
https://github.com/ros-perception/image_pipeline/blob/cd4aa7ab38726d88e8e0144aa0d45ad2f236535a/camera_calibration/nodes/cameracalibrator.py#L43-L57
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/database.py
python
DependencyGraph.add_distribution
(self, distribution)
Add the *distribution* to the graph. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution`
Add the *distribution* to the graph.
[ "Add", "the", "*", "distribution", "*", "to", "the", "graph", "." ]
def add_distribution(self, distribution): """Add the *distribution* to the graph. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` """ self.adjacency_list[distribution] = [] self.reverse_list[distribution] = []
[ "def", "add_distribution", "(", "self", ",", "distribution", ")", ":", "self", ".", "adjacency_list", "[", "distribution", "]", "=", "[", "]", "self", ".", "reverse_list", "[", "distribution", "]", "=", "[", "]" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/database.py#L1054-L1061
perilouswithadollarsign/cstrike15_src
f82112a2388b841d72cb62ca48ab1846dfcc11c8
thirdparty/protobuf-2.5.0/python/google/protobuf/message.py
python
Message.IsInitialized
(self)
Checks if the message is initialized. Returns: The method returns True if the message is initialized (i.e. all of its required fields are set).
Checks if the message is initialized.
[ "Checks", "if", "the", "message", "is", "initialized", "." ]
def IsInitialized(self): """Checks if the message is initialized. Returns: The method returns True if the message is initialized (i.e. all of its required fields are set). """ raise NotImplementedError
[ "def", "IsInitialized", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/perilouswithadollarsign/cstrike15_src/blob/f82112a2388b841d72cb62ca48ab1846dfcc11c8/thirdparty/protobuf-2.5.0/python/google/protobuf/message.py#L134-L141
ceph/ceph
959663007321a369c83218414a29bd9dbc8bda3a
src/pybind/mgr/cephadm/migrations.py
python
queue_migrate_nfs_spec
(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any])
After 16.2.5 we dropped the NFSServiceSpec pool and namespace properties. Queue up a migration to process later, once we are sure that RADOS is available and so on.
After 16.2.5 we dropped the NFSServiceSpec pool and namespace properties. Queue up a migration to process later, once we are sure that RADOS is available and so on.
[ "After", "16", ".", "2", ".", "5", "we", "dropped", "the", "NFSServiceSpec", "pool", "and", "namespace", "properties", ".", "Queue", "up", "a", "migration", "to", "process", "later", "once", "we", "are", "sure", "that", "RADOS", "is", "available", "and", "so", "on", "." ]
def queue_migrate_nfs_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None: """ After 16.2.5 we dropped the NFSServiceSpec pool and namespace properties. Queue up a migration to process later, once we are sure that RADOS is available and so on. """ service_id = spec_dict['spec']['service_id'] args = spec_dict['spec'].get('spec', {}) pool = args.pop('pool', 'nfs-ganesha') ns = args.pop('namespace', service_id) queued = mgr.get_store('nfs_migration_queue') or '[]' ls = json.loads(queued) ls.append([service_id, pool, ns]) mgr.set_store('nfs_migration_queue', json.dumps(ls)) mgr.log.info(f'Queued nfs.{service_id} for migration')
[ "def", "queue_migrate_nfs_spec", "(", "mgr", ":", "\"CephadmOrchestrator\"", ",", "spec_dict", ":", "Dict", "[", "Any", ",", "Any", "]", ")", "->", "None", ":", "service_id", "=", "spec_dict", "[", "'spec'", "]", "[", "'service_id'", "]", "args", "=", "spec_dict", "[", "'spec'", "]", ".", "get", "(", "'spec'", ",", "{", "}", ")", "pool", "=", "args", ".", "pop", "(", "'pool'", ",", "'nfs-ganesha'", ")", "ns", "=", "args", ".", "pop", "(", "'namespace'", ",", "service_id", ")", "queued", "=", "mgr", ".", "get_store", "(", "'nfs_migration_queue'", ")", "or", "'[]'", "ls", "=", "json", ".", "loads", "(", "queued", ")", "ls", ".", "append", "(", "[", "service_id", ",", "pool", ",", "ns", "]", ")", "mgr", ".", "set_store", "(", "'nfs_migration_queue'", ",", "json", ".", "dumps", "(", "ls", ")", ")", "mgr", ".", "log", ".", "info", "(", "f'Queued nfs.{service_id} for migration'", ")" ]
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/src/pybind/mgr/cephadm/migrations.py#L319-L333
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/debug/lib/debug_data.py
python
DebugDumpDir.__init__
(self, dump_root, partition_graphs=None, validate=True)
`DebugDumpDir` constructor. Args: dump_root: (`str`) path to the dump root directory. partition_graphs: A repeated field of GraphDefs representing the partition graphs executed by the TensorFlow runtime. validate: (`bool`) whether the dump files are to be validated against the partition graphs. Raises: IOError: If dump_root does not exist as a directory. ValueError: If more than one core metadata file is found under the dump root directory.
`DebugDumpDir` constructor.
[ "DebugDumpDir", "constructor", "." ]
def __init__(self, dump_root, partition_graphs=None, validate=True): """`DebugDumpDir` constructor. Args: dump_root: (`str`) path to the dump root directory. partition_graphs: A repeated field of GraphDefs representing the partition graphs executed by the TensorFlow runtime. validate: (`bool`) whether the dump files are to be validated against the partition graphs. Raises: IOError: If dump_root does not exist as a directory. ValueError: If more than one core metadata file is found under the dump root directory. """ if not gfile.IsDirectory(dump_root): raise IOError("Dump root directory %s does not exist" % dump_root) self._core_metadata = [] # Find the list of devices. self._dump_root = dump_root self._load_core_metadata() self._load_fetches_info() self._load_feeds_info() self._load_all_device_dumps(partition_graphs, validate) self._python_graph = None
[ "def", "__init__", "(", "self", ",", "dump_root", ",", "partition_graphs", "=", "None", ",", "validate", "=", "True", ")", ":", "if", "not", "gfile", ".", "IsDirectory", "(", "dump_root", ")", ":", "raise", "IOError", "(", "\"Dump root directory %s does not exist\"", "%", "dump_root", ")", "self", ".", "_core_metadata", "=", "[", "]", "# Find the list of devices.", "self", ".", "_dump_root", "=", "dump_root", "self", ".", "_load_core_metadata", "(", ")", "self", ".", "_load_fetches_info", "(", ")", "self", ".", "_load_feeds_info", "(", ")", "self", ".", "_load_all_device_dumps", "(", "partition_graphs", ",", "validate", ")", "self", ".", "_python_graph", "=", "None" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/debug/lib/debug_data.py#L663-L692
NVIDIA/DALI
bf16cc86ba8f091b145f91962f21fe1b6aff243d
third_party/cpplint.py
python
FileInfo.FullName
(self)
return os.path.abspath(self._filename).replace('\\', '/')
Make Windows paths like Unix.
Make Windows paths like Unix.
[ "Make", "Windows", "paths", "like", "Unix", "." ]
def FullName(self): """Make Windows paths like Unix.""" return os.path.abspath(self._filename).replace('\\', '/')
[ "def", "FullName", "(", "self", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "self", ".", "_filename", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")" ]
https://github.com/NVIDIA/DALI/blob/bf16cc86ba8f091b145f91962f21fe1b6aff243d/third_party/cpplint.py#L1105-L1107
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/telemetry/telemetry/internal/platform/android_platform_backend.py
python
_FixPossibleAdbInstability
()
Host side workaround for crbug.com/268450 (adb instability). The adb server has a race which is mitigated by binding to a single core.
Host side workaround for crbug.com/268450 (adb instability).
[ "Host", "side", "workaround", "for", "crbug", ".", "com", "/", "268450", "(", "adb", "instability", ")", "." ]
def _FixPossibleAdbInstability(): """Host side workaround for crbug.com/268450 (adb instability). The adb server has a race which is mitigated by binding to a single core. """ if not psutil: return for process in psutil.process_iter(): try: if psutil.version_info >= (2, 0): if 'adb' in process.name(): process.cpu_affinity([0]) else: if 'adb' in process.name: process.set_cpu_affinity([0]) except (psutil.NoSuchProcess, psutil.AccessDenied): logging.warn('Failed to set adb process CPU affinity')
[ "def", "_FixPossibleAdbInstability", "(", ")", ":", "if", "not", "psutil", ":", "return", "for", "process", "in", "psutil", ".", "process_iter", "(", ")", ":", "try", ":", "if", "psutil", ".", "version_info", ">=", "(", "2", ",", "0", ")", ":", "if", "'adb'", "in", "process", ".", "name", "(", ")", ":", "process", ".", "cpu_affinity", "(", "[", "0", "]", ")", "else", ":", "if", "'adb'", "in", "process", ".", "name", ":", "process", ".", "set_cpu_affinity", "(", "[", "0", "]", ")", "except", "(", "psutil", ".", "NoSuchProcess", ",", "psutil", ".", "AccessDenied", ")", ":", "logging", ".", "warn", "(", "'Failed to set adb process CPU affinity'", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/telemetry/internal/platform/android_platform_backend.py#L749-L765
maidsafe-archive/MaidSafe
defd65e1c8cfb6a1cbdeaaa0eee31d065421792d
tools/cpplint.py
python
_IncludeState.CanonicalizeAlphabeticalOrder
(self, header_path)
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
Returns a path canonicalized for alphabetical comparison. - replaces "-" with "_" so they both cmp the same. - removes '-inl' since we don't require them to be after the main header. - lowercase everything, just in case. Args: header_path: Path to be canonicalized. Returns: Canonicalized path.
Returns a path canonicalized for alphabetical comparison.
[ "Returns", "a", "path", "canonicalized", "for", "alphabetical", "comparison", "." ]
def CanonicalizeAlphabeticalOrder(self, header_path): """Returns a path canonicalized for alphabetical comparison. - replaces "-" with "_" so they both cmp the same. - removes '-inl' since we don't require them to be after the main header. - lowercase everything, just in case. Args: header_path: Path to be canonicalized. Returns: Canonicalized path. """ return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
[ "def", "CanonicalizeAlphabeticalOrder", "(", "self", ",", "header_path", ")", ":", "return", "header_path", ".", "replace", "(", "'-inl.h'", ",", "'.h'", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", ".", "lower", "(", ")" ]
https://github.com/maidsafe-archive/MaidSafe/blob/defd65e1c8cfb6a1cbdeaaa0eee31d065421792d/tools/cpplint.py#L470-L483
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/src/extern/pygments/formatters/img.py
python
ImageFormatter._get_text_color
(self, style)
return fill
Get the correct color for the token from the style.
Get the correct color for the token from the style.
[ "Get", "the", "correct", "color", "for", "the", "token", "from", "the", "style", "." ]
def _get_text_color(self, style): """ Get the correct color for the token from the style. """ if style['color'] is not None: fill = '#' + style['color'] else: fill = '#000' return fill
[ "def", "_get_text_color", "(", "self", ",", "style", ")", ":", "if", "style", "[", "'color'", "]", "is", "not", "None", ":", "fill", "=", "'#'", "+", "style", "[", "'color'", "]", "else", ":", "fill", "=", "'#000'", "return", "fill" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/extern/pygments/formatters/img.py#L375-L383
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/keras/_impl/keras/backend.py
python
maximum
(x, y)
return math_ops.maximum(x, y)
Element-wise maximum of two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A tensor.
Element-wise maximum of two tensors.
[ "Element", "-", "wise", "maximum", "of", "two", "tensors", "." ]
def maximum(x, y): """Element-wise maximum of two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A tensor. """ return math_ops.maximum(x, y)
[ "def", "maximum", "(", "x", ",", "y", ")", ":", "return", "math_ops", ".", "maximum", "(", "x", ",", "y", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/keras/_impl/keras/backend.py#L1786-L1796
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/catkin/python/catkin/terminal_color.py
python
sanitize
(msg)
return msg
Sanitizes the existing msg, use before adding color annotations
Sanitizes the existing msg, use before adding color annotations
[ "Sanitizes", "the", "existing", "msg", "use", "before", "adding", "color", "annotations" ]
def sanitize(msg): """Sanitizes the existing msg, use before adding color annotations""" msg = msg.replace('@', '@@') msg = msg.replace('{', '{{') msg = msg.replace('}', '}}') msg = msg.replace('@@!', '@{atexclimation}') msg = msg.replace('@@/', '@{atfwdslash}') msg = msg.replace('@@_', '@{atunderscore}') msg = msg.replace('@@|', '@{atbar}') return msg
[ "def", "sanitize", "(", "msg", ")", ":", "msg", "=", "msg", ".", "replace", "(", "'@'", ",", "'@@'", ")", "msg", "=", "msg", ".", "replace", "(", "'{'", ",", "'{{'", ")", "msg", "=", "msg", ".", "replace", "(", "'}'", ",", "'}}'", ")", "msg", "=", "msg", ".", "replace", "(", "'@@!'", ",", "'@{atexclimation}'", ")", "msg", "=", "msg", ".", "replace", "(", "'@@/'", ",", "'@{atfwdslash}'", ")", "msg", "=", "msg", ".", "replace", "(", "'@@_'", ",", "'@{atunderscore}'", ")", "msg", "=", "msg", ".", "replace", "(", "'@@|'", ",", "'@{atbar}'", ")", "return", "msg" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/catkin/python/catkin/terminal_color.py#L112-L121
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/python/summary/event_accumulator.py
python
_CompressHistogram
(histo, bps)
return values
Creates fixed size histogram by adding compression to accumulated state. This routine transforms a histogram at a particular step by linearly interpolating its variable number of buckets to represent their cumulative weight at a constant number of compression points. This significantly reduces the size of the histogram and makes it suitable for a two-dimensional area plot where the output of this routine constitutes the ranges for a single x coordinate. Args: histo: A HistogramValue namedtuple. bps: Compression points represented in basis points, 1/100ths of a percent. Returns: List of CompressedHistogramValue namedtuples.
Creates fixed size histogram by adding compression to accumulated state.
[ "Creates", "fixed", "size", "histogram", "by", "adding", "compression", "to", "accumulated", "state", "." ]
def _CompressHistogram(histo, bps): """Creates fixed size histogram by adding compression to accumulated state. This routine transforms a histogram at a particular step by linearly interpolating its variable number of buckets to represent their cumulative weight at a constant number of compression points. This significantly reduces the size of the histogram and makes it suitable for a two-dimensional area plot where the output of this routine constitutes the ranges for a single x coordinate. Args: histo: A HistogramValue namedtuple. bps: Compression points represented in basis points, 1/100ths of a percent. Returns: List of CompressedHistogramValue namedtuples. """ # See also: Histogram::Percentile() in core/lib/histogram/histogram.cc if not histo.num: return [CompressedHistogramValue(b, 0.0) for b in bps] bucket = np.array(histo.bucket) weights = (bucket * bps[-1] / (bucket.sum() or 1.0)).cumsum() values = [] j = 0 while j < len(bps): i = np.searchsorted(weights, bps[j], side='right') while i < len(weights): cumsum = weights[i] cumsum_prev = weights[i - 1] if i > 0 else 0.0 if cumsum == cumsum_prev: # prevent remap divide by zero i += 1 continue if not i or not cumsum_prev: lhs = histo.min else: lhs = max(histo.bucket_limit[i - 1], histo.min) rhs = min(histo.bucket_limit[i], histo.max) weight = _Remap(bps[j], cumsum_prev, cumsum, lhs, rhs) values.append(CompressedHistogramValue(bps[j], weight)) j += 1 break else: break while j < len(bps): values.append(CompressedHistogramValue(bps[j], histo.max)) j += 1 return values
[ "def", "_CompressHistogram", "(", "histo", ",", "bps", ")", ":", "# See also: Histogram::Percentile() in core/lib/histogram/histogram.cc", "if", "not", "histo", ".", "num", ":", "return", "[", "CompressedHistogramValue", "(", "b", ",", "0.0", ")", "for", "b", "in", "bps", "]", "bucket", "=", "np", ".", "array", "(", "histo", ".", "bucket", ")", "weights", "=", "(", "bucket", "*", "bps", "[", "-", "1", "]", "/", "(", "bucket", ".", "sum", "(", ")", "or", "1.0", ")", ")", ".", "cumsum", "(", ")", "values", "=", "[", "]", "j", "=", "0", "while", "j", "<", "len", "(", "bps", ")", ":", "i", "=", "np", ".", "searchsorted", "(", "weights", ",", "bps", "[", "j", "]", ",", "side", "=", "'right'", ")", "while", "i", "<", "len", "(", "weights", ")", ":", "cumsum", "=", "weights", "[", "i", "]", "cumsum_prev", "=", "weights", "[", "i", "-", "1", "]", "if", "i", ">", "0", "else", "0.0", "if", "cumsum", "==", "cumsum_prev", ":", "# prevent remap divide by zero", "i", "+=", "1", "continue", "if", "not", "i", "or", "not", "cumsum_prev", ":", "lhs", "=", "histo", ".", "min", "else", ":", "lhs", "=", "max", "(", "histo", ".", "bucket_limit", "[", "i", "-", "1", "]", ",", "histo", ".", "min", ")", "rhs", "=", "min", "(", "histo", ".", "bucket_limit", "[", "i", "]", ",", "histo", ".", "max", ")", "weight", "=", "_Remap", "(", "bps", "[", "j", "]", ",", "cumsum_prev", ",", "cumsum", ",", "lhs", ",", "rhs", ")", "values", ".", "append", "(", "CompressedHistogramValue", "(", "bps", "[", "j", "]", ",", "weight", ")", ")", "j", "+=", "1", "break", "else", ":", "break", "while", "j", "<", "len", "(", "bps", ")", ":", "values", ".", "append", "(", "CompressedHistogramValue", "(", "bps", "[", "j", "]", ",", "histo", ".", "max", ")", ")", "j", "+=", "1", "return", "values" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/summary/event_accumulator.py#L577-L623
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/s3transfer/processpool.py
python
ProcessPoolDownloader.shutdown
(self)
Shutdown the downloader It will wait till all downloads are complete before returning.
Shutdown the downloader
[ "Shutdown", "the", "downloader" ]
def shutdown(self): """Shutdown the downloader It will wait till all downloads are complete before returning. """ self._shutdown_if_needed()
[ "def", "shutdown", "(", "self", ")", ":", "self", ".", "_shutdown_if_needed", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/s3transfer/processpool.py#L369-L374
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/third_party/lib_aarch64/python2.7/dist-packages/rosdep2/platforms/gem.py
python
gem_detect
(pkgs, exec_fn=None)
return ret_list
Given a list of package, return the list of installed packages. :param exec_fn: function to execute Popen and read stdout (for testing)
Given a list of package, return the list of installed packages.
[ "Given", "a", "list", "of", "package", "return", "the", "list", "of", "installed", "packages", "." ]
def gem_detect(pkgs, exec_fn=None): """ Given a list of package, return the list of installed packages. :param exec_fn: function to execute Popen and read stdout (for testing) """ if exec_fn is None: exec_fn = read_stdout pkg_list = exec_fn(['gem', 'list']).split('\n') ret_list = [] for pkg in pkg_list: pkg_row = pkg.split(" ") if pkg_row[0] in pkgs: ret_list.append( pkg_row[0]) return ret_list
[ "def", "gem_detect", "(", "pkgs", ",", "exec_fn", "=", "None", ")", ":", "if", "exec_fn", "is", "None", ":", "exec_fn", "=", "read_stdout", "pkg_list", "=", "exec_fn", "(", "[", "'gem'", ",", "'list'", "]", ")", ".", "split", "(", "'\\n'", ")", "ret_list", "=", "[", "]", "for", "pkg", "in", "pkg_list", ":", "pkg_row", "=", "pkg", ".", "split", "(", "\" \"", ")", "if", "pkg_row", "[", "0", "]", "in", "pkgs", ":", "ret_list", ".", "append", "(", "pkg_row", "[", "0", "]", ")", "return", "ret_list" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_aarch64/python2.7/dist-packages/rosdep2/platforms/gem.py#L53-L68
weolar/miniblink49
1c4678db0594a4abde23d3ebbcc7cd13c3170777
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/data.py
python
CoverageData.write
(self, suffix=None)
Write the collected coverage data to a file. `suffix` is a suffix to append to the base file name. This can be used for multiple or parallel execution, so that many coverage data files can exist simultaneously. A dot will be used to join the base name and the suffix.
Write the collected coverage data to a file.
[ "Write", "the", "collected", "coverage", "data", "to", "a", "file", "." ]
def write(self, suffix=None): """Write the collected coverage data to a file. `suffix` is a suffix to append to the base file name. This can be used for multiple or parallel execution, so that many coverage data files can exist simultaneously. A dot will be used to join the base name and the suffix. """ if self.use_file: filename = self.filename if suffix: filename += "." + suffix self.write_file(filename)
[ "def", "write", "(", "self", ",", "suffix", "=", "None", ")", ":", "if", "self", ".", "use_file", ":", "filename", "=", "self", ".", "filename", "if", "suffix", ":", "filename", "+=", "\".\"", "+", "suffix", "self", ".", "write_file", "(", "filename", ")" ]
https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/data.py#L78-L91
facebookresearch/ELF
1f790173095cd910976d9f651b80beb872ec5d12
rlpytorch/methods/q_learning.py
python
Q_learning.update
(self, mi, batch, stats)
Actor critic model update. Feed stats for later summarization. Args: mi(`ModelInterface`): mode interface used batch(dict): batch of data. Keys in a batch: ``s``: state, ``r``: immediate reward, ``terminal``: if game is terminated stats(`Stats`): Feed stats for later summarization.
Actor critic model update. Feed stats for later summarization.
[ "Actor", "critic", "model", "update", ".", "Feed", "stats", "for", "later", "summarization", "." ]
def update(self, mi, batch, stats): ''' Actor critic model update. Feed stats for later summarization. Args: mi(`ModelInterface`): mode interface used batch(dict): batch of data. Keys in a batch: ``s``: state, ``r``: immediate reward, ``terminal``: if game is terminated stats(`Stats`): Feed stats for later summarization. ''' m = mi["model"] args = self.args Q_node = args.Q_node a_node = args.a_node T = batch["s"].size(0) state_curr = m(batch.hist(T - 1)) Q = state_curr[Q_node].squeeze().data V = Q.max(1) self.discounted_reward.setR(V, stats) err = None for t in range(T - 2, -1, -1): bht = batch.hist(t) state_curr = m.forward(bht) # go through the sample and get the rewards. Q = state_curr[Q_node].squeeze() a = state_curr[a_node].squeeze() R = self.discounted_reward.feed( dict(r=batch["r"][t], terminal=batch["terminal"][t]), stats=stats) # Then you want to match Q value here. # Q: batchsize * #action. Q_sel = Q.gather(1, a.view(-1, 1)).squeeze() err = add_err(err, nn.L2Loss(Q_sel, Variable(R))) stats["cost"].feed(err.data[0] / (T - 1)) err.backward()
[ "def", "update", "(", "self", ",", "mi", ",", "batch", ",", "stats", ")", ":", "m", "=", "mi", "[", "\"model\"", "]", "args", "=", "self", ".", "args", "Q_node", "=", "args", ".", "Q_node", "a_node", "=", "args", ".", "a_node", "T", "=", "batch", "[", "\"s\"", "]", ".", "size", "(", "0", ")", "state_curr", "=", "m", "(", "batch", ".", "hist", "(", "T", "-", "1", ")", ")", "Q", "=", "state_curr", "[", "Q_node", "]", ".", "squeeze", "(", ")", ".", "data", "V", "=", "Q", ".", "max", "(", "1", ")", "self", ".", "discounted_reward", ".", "setR", "(", "V", ",", "stats", ")", "err", "=", "None", "for", "t", "in", "range", "(", "T", "-", "2", ",", "-", "1", ",", "-", "1", ")", ":", "bht", "=", "batch", ".", "hist", "(", "t", ")", "state_curr", "=", "m", ".", "forward", "(", "bht", ")", "# go through the sample and get the rewards.", "Q", "=", "state_curr", "[", "Q_node", "]", ".", "squeeze", "(", ")", "a", "=", "state_curr", "[", "a_node", "]", ".", "squeeze", "(", ")", "R", "=", "self", ".", "discounted_reward", ".", "feed", "(", "dict", "(", "r", "=", "batch", "[", "\"r\"", "]", "[", "t", "]", ",", "terminal", "=", "batch", "[", "\"terminal\"", "]", "[", "t", "]", ")", ",", "stats", "=", "stats", ")", "# Then you want to match Q value here.", "# Q: batchsize * #action.", "Q_sel", "=", "Q", ".", "gather", "(", "1", ",", "a", ".", "view", "(", "-", "1", ",", "1", ")", ")", ".", "squeeze", "(", ")", "err", "=", "add_err", "(", "err", ",", "nn", ".", "L2Loss", "(", "Q_sel", ",", "Variable", "(", "R", ")", ")", ")", "stats", "[", "\"cost\"", "]", ".", "feed", "(", "err", ".", "data", "[", "0", "]", "/", "(", "T", "-", "1", ")", ")", "err", ".", "backward", "(", ")" ]
https://github.com/facebookresearch/ELF/blob/1f790173095cd910976d9f651b80beb872ec5d12/rlpytorch/methods/q_learning.py#L37-L81
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_controls.py
python
ListItem.GetAttributes
(*args, **kwargs)
return _controls_.ListItem_GetAttributes(*args, **kwargs)
GetAttributes(self) -> ListItemAttr
GetAttributes(self) -> ListItemAttr
[ "GetAttributes", "(", "self", ")", "-", ">", "ListItemAttr" ]
def GetAttributes(*args, **kwargs): """GetAttributes(self) -> ListItemAttr""" return _controls_.ListItem_GetAttributes(*args, **kwargs)
[ "def", "GetAttributes", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_controls_", ".", "ListItem_GetAttributes", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L4248-L4250
NVIDIA/TensorRT
42805f078052daad1a98bc5965974fcffaad0960
samples/python/tensorflow_object_detection_api/build_engine.py
python
EngineBuilder.__init__
(self, verbose=False, workspace=8)
:param verbose: If enabled, a higher verbosity level will be set on the TensorRT logger. :param workspace: Max memory workspace to allow, in Gb.
:param verbose: If enabled, a higher verbosity level will be set on the TensorRT logger. :param workspace: Max memory workspace to allow, in Gb.
[ ":", "param", "verbose", ":", "If", "enabled", "a", "higher", "verbosity", "level", "will", "be", "set", "on", "the", "TensorRT", "logger", ".", ":", "param", "workspace", ":", "Max", "memory", "workspace", "to", "allow", "in", "Gb", "." ]
def __init__(self, verbose=False, workspace=8): """ :param verbose: If enabled, a higher verbosity level will be set on the TensorRT logger. :param workspace: Max memory workspace to allow, in Gb. """ self.trt_logger = trt.Logger(trt.Logger.INFO) if verbose: self.trt_logger.min_severity = trt.Logger.Severity.VERBOSE trt.init_libnvinfer_plugins(self.trt_logger, namespace="") self.builder = trt.Builder(self.trt_logger) self.config = self.builder.create_builder_config() self.config.max_workspace_size = workspace * (2 ** 30) self.batch_size = None self.network = None self.parser = None
[ "def", "__init__", "(", "self", ",", "verbose", "=", "False", ",", "workspace", "=", "8", ")", ":", "self", ".", "trt_logger", "=", "trt", ".", "Logger", "(", "trt", ".", "Logger", ".", "INFO", ")", "if", "verbose", ":", "self", ".", "trt_logger", ".", "min_severity", "=", "trt", ".", "Logger", ".", "Severity", ".", "VERBOSE", "trt", ".", "init_libnvinfer_plugins", "(", "self", ".", "trt_logger", ",", "namespace", "=", "\"\"", ")", "self", ".", "builder", "=", "trt", ".", "Builder", "(", "self", ".", "trt_logger", ")", "self", ".", "config", "=", "self", ".", "builder", ".", "create_builder_config", "(", ")", "self", ".", "config", ".", "max_workspace_size", "=", "workspace", "*", "(", "2", "**", "30", ")", "self", ".", "batch_size", "=", "None", "self", ".", "network", "=", "None", "self", ".", "parser", "=", "None" ]
https://github.com/NVIDIA/TensorRT/blob/42805f078052daad1a98bc5965974fcffaad0960/samples/python/tensorflow_object_detection_api/build_engine.py#L115-L132
cyberbotics/webots
af7fa7d68dcf7b4550f1f2e132092b41e83698fc
projects/robots/universal_robots/resources/ros_package/ur_e_webots/src/ur_e_webots/trajectory_follower.py
python
TrajectoryFollower.on_goal
(self, goal_handle)
Handle a new goal trajectory command.
Handle a new goal trajectory command.
[ "Handle", "a", "new", "goal", "trajectory", "command", "." ]
def on_goal(self, goal_handle): """Handle a new goal trajectory command.""" # Checks if the joints are just incorrect if set(goal_handle.get_goal().trajectory.joint_names) != set(self.prefixedJointNames): rospy.logerr("Received a goal with incorrect joint names: (%s)" % ', '.join(goal_handle.get_goal().trajectory.joint_names)) goal_handle.set_rejected() return if not trajectory_is_finite(goal_handle.get_goal().trajectory): rospy.logerr("Received a goal with infinites or NaNs") goal_handle.set_rejected(text="Received a goal with infinites or NaNs") return # Checks that the trajectory has velocities if not has_velocities(goal_handle.get_goal().trajectory): rospy.logerr("Received a goal without velocities") goal_handle.set_rejected(text="Received a goal without velocities") return # Orders the joints of the trajectory according to joint_names reorder_trajectory_joints(goal_handle.get_goal().trajectory, self.prefixedJointNames) # Inserts the current setpoint at the head of the trajectory now = self.robot.getTime() point0 = sample_trajectory(self.trajectory, now - self.trajectory_t0) point0.time_from_start = rospy.Duration(0.0) goal_handle.get_goal().trajectory.points.insert(0, point0) self.trajectory_t0 = now # Replaces the goal self.goal_handle = goal_handle self.trajectory = goal_handle.get_goal().trajectory goal_handle.set_accepted()
[ "def", "on_goal", "(", "self", ",", "goal_handle", ")", ":", "# Checks if the joints are just incorrect", "if", "set", "(", "goal_handle", ".", "get_goal", "(", ")", ".", "trajectory", ".", "joint_names", ")", "!=", "set", "(", "self", ".", "prefixedJointNames", ")", ":", "rospy", ".", "logerr", "(", "\"Received a goal with incorrect joint names: (%s)\"", "%", "', '", ".", "join", "(", "goal_handle", ".", "get_goal", "(", ")", ".", "trajectory", ".", "joint_names", ")", ")", "goal_handle", ".", "set_rejected", "(", ")", "return", "if", "not", "trajectory_is_finite", "(", "goal_handle", ".", "get_goal", "(", ")", ".", "trajectory", ")", ":", "rospy", ".", "logerr", "(", "\"Received a goal with infinites or NaNs\"", ")", "goal_handle", ".", "set_rejected", "(", "text", "=", "\"Received a goal with infinites or NaNs\"", ")", "return", "# Checks that the trajectory has velocities", "if", "not", "has_velocities", "(", "goal_handle", ".", "get_goal", "(", ")", ".", "trajectory", ")", ":", "rospy", ".", "logerr", "(", "\"Received a goal without velocities\"", ")", "goal_handle", ".", "set_rejected", "(", "text", "=", "\"Received a goal without velocities\"", ")", "return", "# Orders the joints of the trajectory according to joint_names", "reorder_trajectory_joints", "(", "goal_handle", ".", "get_goal", "(", ")", ".", "trajectory", ",", "self", ".", "prefixedJointNames", ")", "# Inserts the current setpoint at the head of the trajectory", "now", "=", "self", ".", "robot", ".", "getTime", "(", ")", "point0", "=", "sample_trajectory", "(", "self", ".", "trajectory", ",", "now", "-", "self", ".", "trajectory_t0", ")", "point0", ".", "time_from_start", "=", "rospy", ".", "Duration", "(", "0.0", ")", "goal_handle", ".", "get_goal", "(", ")", ".", "trajectory", ".", "points", ".", "insert", "(", "0", ",", "point0", ")", "self", ".", "trajectory_t0", "=", "now", "# Replaces the goal", "self", ".", "goal_handle", "=", "goal_handle", "self", ".", "trajectory", "=", "goal_handle", ".", "get_goal", "(", ")", ".", "trajectory", "goal_handle", ".", "set_accepted", "(", ")" ]
https://github.com/cyberbotics/webots/blob/af7fa7d68dcf7b4550f1f2e132092b41e83698fc/projects/robots/universal_robots/resources/ros_package/ur_e_webots/src/ur_e_webots/trajectory_follower.py#L153-L186
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/telnetlib.py
python
Telnet.get_socket
(self)
return self.sock
Return the socket object used internally.
Return the socket object used internally.
[ "Return", "the", "socket", "object", "used", "internally", "." ]
def get_socket(self): """Return the socket object used internally.""" return self.sock
[ "def", "get_socket", "(", "self", ")", ":", "return", "self", ".", "sock" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/telnetlib.py#L265-L267
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/dateutil/rrule.py
python
_rrulestr._handle_BYWEEKDAY
(self, rrkwargs, name, value, **kwargs)
Two ways to specify this: +1MO or MO(+1)
Two ways to specify this: +1MO or MO(+1)
[ "Two", "ways", "to", "specify", "this", ":", "+", "1MO", "or", "MO", "(", "+", "1", ")" ]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): """ Two ways to specify this: +1MO or MO(+1) """ l = [] for wday in value.split(','): if '(' in wday: # If it's of the form TH(+1), etc. splt = wday.split('(') w = splt[0] n = int(splt[1][:-1]) elif len(wday): # If it's of the form +1MO for i in range(len(wday)): if wday[i] not in '+-0123456789': break n = wday[:i] or None w = wday[i:] if n: n = int(n) else: raise ValueError("Invalid (empty) BYDAY specification.") l.append(weekdays[self._weekday_map[w]](n)) rrkwargs["byweekday"] = l
[ "def", "_handle_BYWEEKDAY", "(", "self", ",", "rrkwargs", ",", "name", ",", "value", ",", "*", "*", "kwargs", ")", ":", "l", "=", "[", "]", "for", "wday", "in", "value", ".", "split", "(", "','", ")", ":", "if", "'('", "in", "wday", ":", "# If it's of the form TH(+1), etc.", "splt", "=", "wday", ".", "split", "(", "'('", ")", "w", "=", "splt", "[", "0", "]", "n", "=", "int", "(", "splt", "[", "1", "]", "[", ":", "-", "1", "]", ")", "elif", "len", "(", "wday", ")", ":", "# If it's of the form +1MO", "for", "i", "in", "range", "(", "len", "(", "wday", ")", ")", ":", "if", "wday", "[", "i", "]", "not", "in", "'+-0123456789'", ":", "break", "n", "=", "wday", "[", ":", "i", "]", "or", "None", "w", "=", "wday", "[", "i", ":", "]", "if", "n", ":", "n", "=", "int", "(", "n", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid (empty) BYDAY specification.\"", ")", "l", ".", "append", "(", "weekdays", "[", "self", ".", "_weekday_map", "[", "w", "]", "]", "(", "n", ")", ")", "rrkwargs", "[", "\"byweekday\"", "]", "=", "l" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/dateutil/rrule.py#L1507-L1531
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/cuda/cudadrv/devicearray.py
python
DeviceNDArrayBase.__init__
(self, shape, strides, dtype, stream=0, writeback=None, gpu_data=None)
Args ---- shape array shape. strides array strides. dtype data type as np.dtype. stream cuda stream. writeback Deprecated. gpu_data user provided device memory for the ndarray data buffer
Args ----
[ "Args", "----" ]
def __init__(self, shape, strides, dtype, stream=0, writeback=None, gpu_data=None): """ Args ---- shape array shape. strides array strides. dtype data type as np.dtype. stream cuda stream. writeback Deprecated. gpu_data user provided device memory for the ndarray data buffer """ if isinstance(shape, six.integer_types): shape = (shape,) if isinstance(strides, six.integer_types): strides = (strides,) self.ndim = len(shape) if len(strides) != self.ndim: raise ValueError('strides not match ndim') self._dummy = dummyarray.Array.from_desc(0, shape, strides, dtype.itemsize) self.shape = tuple(shape) self.strides = tuple(strides) self.dtype = np.dtype(dtype) self.size = int(functools.reduce(operator.mul, self.shape, 1)) # prepare gpu memory if self.size > 0: if gpu_data is None: self.alloc_size = _driver.memory_size_from_info(self.shape, self.strides, self.dtype.itemsize) gpu_data = devices.get_context().memalloc(self.alloc_size) else: self.alloc_size = _driver.device_memory_size(gpu_data) else: # Make NULL pointer for empty allocation gpu_data = _driver.MemoryPointer(context=devices.get_context(), pointer=c_void_p(0), size=0) self.alloc_size = 0 self.gpu_data = gpu_data self.__writeback = writeback # should deprecate the use of this self.stream = stream
[ "def", "__init__", "(", "self", ",", "shape", ",", "strides", ",", "dtype", ",", "stream", "=", "0", ",", "writeback", "=", "None", ",", "gpu_data", "=", "None", ")", ":", "if", "isinstance", "(", "shape", ",", "six", ".", "integer_types", ")", ":", "shape", "=", "(", "shape", ",", ")", "if", "isinstance", "(", "strides", ",", "six", ".", "integer_types", ")", ":", "strides", "=", "(", "strides", ",", ")", "self", ".", "ndim", "=", "len", "(", "shape", ")", "if", "len", "(", "strides", ")", "!=", "self", ".", "ndim", ":", "raise", "ValueError", "(", "'strides not match ndim'", ")", "self", ".", "_dummy", "=", "dummyarray", ".", "Array", ".", "from_desc", "(", "0", ",", "shape", ",", "strides", ",", "dtype", ".", "itemsize", ")", "self", ".", "shape", "=", "tuple", "(", "shape", ")", "self", ".", "strides", "=", "tuple", "(", "strides", ")", "self", ".", "dtype", "=", "np", ".", "dtype", "(", "dtype", ")", "self", ".", "size", "=", "int", "(", "functools", ".", "reduce", "(", "operator", ".", "mul", ",", "self", ".", "shape", ",", "1", ")", ")", "# prepare gpu memory", "if", "self", ".", "size", ">", "0", ":", "if", "gpu_data", "is", "None", ":", "self", ".", "alloc_size", "=", "_driver", ".", "memory_size_from_info", "(", "self", ".", "shape", ",", "self", ".", "strides", ",", "self", ".", "dtype", ".", "itemsize", ")", "gpu_data", "=", "devices", ".", "get_context", "(", ")", ".", "memalloc", "(", "self", ".", "alloc_size", ")", "else", ":", "self", ".", "alloc_size", "=", "_driver", ".", "device_memory_size", "(", "gpu_data", ")", "else", ":", "# Make NULL pointer for empty allocation", "gpu_data", "=", "_driver", ".", "MemoryPointer", "(", "context", "=", "devices", ".", "get_context", "(", ")", ",", "pointer", "=", "c_void_p", "(", "0", ")", ",", "size", "=", "0", ")", "self", ".", "alloc_size", "=", "0", "self", ".", "gpu_data", "=", "gpu_data", "self", ".", "__writeback", "=", "writeback", "# should deprecate the use of this", "self", ".", "stream", "=", "stream" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/cuda/cudadrv/devicearray.py#L65-L115
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_windows.py
python
VarScrollHelperBase.CalcUnscrolledPosition
(*args, **kwargs)
return _windows_.VarScrollHelperBase_CalcUnscrolledPosition(*args, **kwargs)
CalcUnscrolledPosition(self, int coord) -> int
CalcUnscrolledPosition(self, int coord) -> int
[ "CalcUnscrolledPosition", "(", "self", "int", "coord", ")", "-", ">", "int" ]
def CalcUnscrolledPosition(*args, **kwargs): """CalcUnscrolledPosition(self, int coord) -> int""" return _windows_.VarScrollHelperBase_CalcUnscrolledPosition(*args, **kwargs)
[ "def", "CalcUnscrolledPosition", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "VarScrollHelperBase_CalcUnscrolledPosition", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_windows.py#L2234-L2236
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/learn/python/learn/estimators/head.py
python
_check_no_sparse_tensor
(x)
Raises ValueError if the given tensor is `SparseTensor`.
Raises ValueError if the given tensor is `SparseTensor`.
[ "Raises", "ValueError", "if", "the", "given", "tensor", "is", "SparseTensor", "." ]
def _check_no_sparse_tensor(x): """Raises ValueError if the given tensor is `SparseTensor`.""" if isinstance(x, sparse_tensor.SparseTensor): raise ValueError("SparseTensor is not supported.")
[ "def", "_check_no_sparse_tensor", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "sparse_tensor", ".", "SparseTensor", ")", ":", "raise", "ValueError", "(", "\"SparseTensor is not supported.\"", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/learn/python/learn/estimators/head.py#L1171-L1174
brave/brave-core
ceaa3de4735789d355b6fa80c21d4709e2c1d0e8
script/lib/github.py
python
parse_user_logins
(token, login_csv, verbose=False)
return parsed_logins
given a list of logins in csv format, parse into a list and validate logins
given a list of logins in csv format, parse into a list and validate logins
[ "given", "a", "list", "of", "logins", "in", "csv", "format", "parse", "into", "a", "list", "and", "validate", "logins" ]
def parse_user_logins(token, login_csv, verbose=False): """given a list of logins in csv format, parse into a list and validate logins""" if login_csv is None: return [] login_csv = login_csv.replace(" ", "") parsed_logins = login_csv.split(',') users = GitHub(token).users() invalid_logins = [] # check login/username against GitHub # for more info see: https://developer.github.com/v3/users/#get-a-single-user for login in parsed_logins: try: response = users(login).get() if verbose: print('[INFO] Login "' + login + '" found: ' + str(response)) except Exception as e: if verbose: print('[INFO] Login "' + login + '" does not appear to be valid. ' + str(e)) invalid_logins.append(login) if len(invalid_logins) > 0: raise Exception( 'Invalid logins found. Are they misspelled? ' + ','.join(invalid_logins)) return parsed_logins
[ "def", "parse_user_logins", "(", "token", ",", "login_csv", ",", "verbose", "=", "False", ")", ":", "if", "login_csv", "is", "None", ":", "return", "[", "]", "login_csv", "=", "login_csv", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "parsed_logins", "=", "login_csv", ".", "split", "(", "','", ")", "users", "=", "GitHub", "(", "token", ")", ".", "users", "(", ")", "invalid_logins", "=", "[", "]", "# check login/username against GitHub", "# for more info see: https://developer.github.com/v3/users/#get-a-single-user", "for", "login", "in", "parsed_logins", ":", "try", ":", "response", "=", "users", "(", "login", ")", ".", "get", "(", ")", "if", "verbose", ":", "print", "(", "'[INFO] Login \"'", "+", "login", "+", "'\" found: '", "+", "str", "(", "response", ")", ")", "except", "Exception", "as", "e", ":", "if", "verbose", ":", "print", "(", "'[INFO] Login \"'", "+", "login", "+", "'\" does not appear to be valid. '", "+", "str", "(", "e", ")", ")", "invalid_logins", ".", "append", "(", "login", ")", "if", "len", "(", "invalid_logins", ")", ">", "0", ":", "raise", "Exception", "(", "'Invalid logins found. Are they misspelled? '", "+", "','", ".", "join", "(", "invalid_logins", ")", ")", "return", "parsed_logins" ]
https://github.com/brave/brave-core/blob/ceaa3de4735789d355b6fa80c21d4709e2c1d0e8/script/lib/github.py#L101-L129
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
deps/src/libxml2-2.9.1/python/libxml2class.py
python
parserCtxt.ctxtReset
(self)
Reset a parser context
Reset a parser context
[ "Reset", "a", "parser", "context" ]
def ctxtReset(self): """Reset a parser context """ libxml2mod.xmlCtxtReset(self._o)
[ "def", "ctxtReset", "(", "self", ")", ":", "libxml2mod", ".", "xmlCtxtReset", "(", "self", ".", "_o", ")" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2class.py#L4290-L4292
microsoft/ELL
a1d6bacc37a14879cc025d9be2ba40b1a0632315
tools/importers/CNTK/lib/cntk_utilities.py
python
get_adjusted_shape
(inputShape, paddingParameters)
return ell.math.TensorShape(rows, columns, channels)
Returns the ell.math.TensorShape corresponding to the input shape adjusted with padding
Returns the ell.math.TensorShape corresponding to the input shape adjusted with padding
[ "Returns", "the", "ell", ".", "math", ".", "TensorShape", "corresponding", "to", "the", "input", "shape", "adjusted", "with", "padding" ]
def get_adjusted_shape(inputShape, paddingParameters): """"Returns the ell.math.TensorShape corresponding to the input shape adjusted with padding""" if (len(inputShape) == 3): # Adjust the input shape to account for padding in the row and column dimensions # CNTK's shape tensor is in channels, rows, columns order channels = inputShape[0] rows = inputShape[1] columns = inputShape[2] rows += 2 * paddingParameters.paddingSize columns += 2 * paddingParameters.paddingSize elif (len(inputShape) == 1): # If the input shape is a vector, make it a tensor with 1 row, 1 column and number of channels equal to the # length of the vector channels = inputShape[0] rows = 1 columns = 1 else: raise NotImplementedError( "Unsupported input shape length: " + str(len(inputShape))) return ell.math.TensorShape(rows, columns, channels)
[ "def", "get_adjusted_shape", "(", "inputShape", ",", "paddingParameters", ")", ":", "if", "(", "len", "(", "inputShape", ")", "==", "3", ")", ":", "# Adjust the input shape to account for padding in the row and column dimensions", "# CNTK's shape tensor is in channels, rows, columns order", "channels", "=", "inputShape", "[", "0", "]", "rows", "=", "inputShape", "[", "1", "]", "columns", "=", "inputShape", "[", "2", "]", "rows", "+=", "2", "*", "paddingParameters", ".", "paddingSize", "columns", "+=", "2", "*", "paddingParameters", ".", "paddingSize", "elif", "(", "len", "(", "inputShape", ")", "==", "1", ")", ":", "# If the input shape is a vector, make it a tensor with 1 row, 1 column and number of channels equal to the", "# length of the vector", "channels", "=", "inputShape", "[", "0", "]", "rows", "=", "1", "columns", "=", "1", "else", ":", "raise", "NotImplementedError", "(", "\"Unsupported input shape length: \"", "+", "str", "(", "len", "(", "inputShape", ")", ")", ")", "return", "ell", ".", "math", ".", "TensorShape", "(", "rows", ",", "columns", ",", "channels", ")" ]
https://github.com/microsoft/ELL/blob/a1d6bacc37a14879cc025d9be2ba40b1a0632315/tools/importers/CNTK/lib/cntk_utilities.py#L131-L153
tensorflow/io
92b44e180674a8af0e12e405530f7343e3e693e4
tensorflow_io/python/ops/mnist_dataset_ops.py
python
MNISTImageIODataset.__init__
(self, filename)
Create a MNISTImageDataset. Args: filename: A `tf.string` tensor containing filename.
Create a MNISTImageDataset.
[ "Create", "a", "MNISTImageDataset", "." ]
def __init__(self, filename): """Create a MNISTImageDataset. Args: filename: A `tf.string` tensor containing filename. """ _, compression = core_ops.io_file_info(filename) rows = tf.io.decode_raw( core_ops.io_file_read(filename, 8, 4, compression=compression), tf.int32, little_endian=False, ) cols = tf.io.decode_raw( core_ops.io_file_read(filename, 12, 4, compression=compression), tf.int32, little_endian=False, ) lens = rows[0] * cols[0] dataset = tf.data.FixedLengthRecordDataset( filename, tf.cast(lens, tf.int64), header_bytes=16, compression_type=compression, ) dataset = dataset.map(lambda e: tf.io.decode_raw(e, tf.uint8)) dataset = dataset.map(lambda e: tf.reshape(e, tf.concat([rows, cols], axis=0))) self._dataset = dataset super().__init__( self._dataset._variant_tensor )
[ "def", "__init__", "(", "self", ",", "filename", ")", ":", "_", ",", "compression", "=", "core_ops", ".", "io_file_info", "(", "filename", ")", "rows", "=", "tf", ".", "io", ".", "decode_raw", "(", "core_ops", ".", "io_file_read", "(", "filename", ",", "8", ",", "4", ",", "compression", "=", "compression", ")", ",", "tf", ".", "int32", ",", "little_endian", "=", "False", ",", ")", "cols", "=", "tf", ".", "io", ".", "decode_raw", "(", "core_ops", ".", "io_file_read", "(", "filename", ",", "12", ",", "4", ",", "compression", "=", "compression", ")", ",", "tf", ".", "int32", ",", "little_endian", "=", "False", ",", ")", "lens", "=", "rows", "[", "0", "]", "*", "cols", "[", "0", "]", "dataset", "=", "tf", ".", "data", ".", "FixedLengthRecordDataset", "(", "filename", ",", "tf", ".", "cast", "(", "lens", ",", "tf", ".", "int64", ")", ",", "header_bytes", "=", "16", ",", "compression_type", "=", "compression", ",", ")", "dataset", "=", "dataset", ".", "map", "(", "lambda", "e", ":", "tf", ".", "io", ".", "decode_raw", "(", "e", ",", "tf", ".", "uint8", ")", ")", "dataset", "=", "dataset", ".", "map", "(", "lambda", "e", ":", "tf", ".", "reshape", "(", "e", ",", "tf", ".", "concat", "(", "[", "rows", ",", "cols", "]", ",", "axis", "=", "0", ")", ")", ")", "self", ".", "_dataset", "=", "dataset", "super", "(", ")", ".", "__init__", "(", "self", ".", "_dataset", ".", "_variant_tensor", ")" ]
https://github.com/tensorflow/io/blob/92b44e180674a8af0e12e405530f7343e3e693e4/tensorflow_io/python/ops/mnist_dataset_ops.py#L53-L84
cornell-zhang/heterocl
6d9e4b4acc2ee2707b2d25b27298c0335bccedfd
python/heterocl/tvm/contrib/graph_runtime.py
python
GraphModule.load_params
(self, params_bytes)
Load parameters from serialized byte array of parameter dict. Parameters ---------- params_bytes : bytearray The serialized parameter dict.
Load parameters from serialized byte array of parameter dict.
[ "Load", "parameters", "from", "serialized", "byte", "array", "of", "parameter", "dict", "." ]
def load_params(self, params_bytes): """Load parameters from serialized byte array of parameter dict. Parameters ---------- params_bytes : bytearray The serialized parameter dict. """ self._load_params(bytearray(params_bytes))
[ "def", "load_params", "(", "self", ",", "params_bytes", ")", ":", "self", ".", "_load_params", "(", "bytearray", "(", "params_bytes", ")", ")" ]
https://github.com/cornell-zhang/heterocl/blob/6d9e4b4acc2ee2707b2d25b27298c0335bccedfd/python/heterocl/tvm/contrib/graph_runtime.py#L145-L153
calamares/calamares
9f6f82405b3074af7c99dc26487d2e46e4ece3e5
src/modules/plymouthcfg/main.py
python
detect_plymouth
()
return target_env_call(["sh", "-c", "which plymouth"]) == 0
Checks existence (runnability) of plymouth in the target system. @return True if plymouth exists in the target, False otherwise
Checks existence (runnability) of plymouth in the target system.
[ "Checks", "existence", "(", "runnability", ")", "of", "plymouth", "in", "the", "target", "system", "." ]
def detect_plymouth(): """ Checks existence (runnability) of plymouth in the target system. @return True if plymouth exists in the target, False otherwise """ # Used to only check existence of path /usr/bin/plymouth in target return target_env_call(["sh", "-c", "which plymouth"]) == 0
[ "def", "detect_plymouth", "(", ")", ":", "# Used to only check existence of path /usr/bin/plymouth in target", "return", "target_env_call", "(", "[", "\"sh\"", ",", "\"-c\"", ",", "\"which plymouth\"", "]", ")", "==", "0" ]
https://github.com/calamares/calamares/blob/9f6f82405b3074af7c99dc26487d2e46e4ece3e5/src/modules/plymouthcfg/main.py#L30-L37
pristineio/webrtc-mirror
7a5bcdffaab90a05bc1146b2b1ea71c004e54d71
PRESUBMIT.py
python
_RunCommand
(command, cwd)
return p.returncode, stdout, stderr
Runs a command and returns the output from that command.
Runs a command and returns the output from that command.
[ "Runs", "a", "command", "and", "returns", "the", "output", "from", "that", "command", "." ]
def _RunCommand(command, cwd): """Runs a command and returns the output from that command.""" p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd) stdout = p.stdout.read() stderr = p.stderr.read() p.wait() p.stdout.close() p.stderr.close() return p.returncode, stdout, stderr
[ "def", "_RunCommand", "(", "command", ",", "cwd", ")", ":", "p", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "cwd", "=", "cwd", ")", "stdout", "=", "p", ".", "stdout", ".", "read", "(", ")", "stderr", "=", "p", ".", "stderr", ".", "read", "(", ")", "p", ".", "wait", "(", ")", "p", ".", "stdout", ".", "close", "(", ")", "p", ".", "stderr", ".", "close", "(", ")", "return", "p", ".", "returncode", ",", "stdout", ",", "stderr" ]
https://github.com/pristineio/webrtc-mirror/blob/7a5bcdffaab90a05bc1146b2b1ea71c004e54d71/PRESUBMIT.py#L97-L106
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/variables.py
python
RefVariable.initializer
(self)
return self._initializer_op
The initializer operation for this variable.
The initializer operation for this variable.
[ "The", "initializer", "operation", "for", "this", "variable", "." ]
def initializer(self): """The initializer operation for this variable.""" return self._initializer_op
[ "def", "initializer", "(", "self", ")", ":", "return", "self", ".", "_initializer_op" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/variables.py#L2602-L2604
OpenGenus/cosmos
1a94e8880068e51d571543be179c323936bd0936
code/data_structures/src/list/singly_linked_list/operations/insertion/insertion_at_end.py
python
Node.__init__
(self, data=None, next=None)
Initializes node structure
Initializes node structure
[ "Initializes", "node", "structure" ]
def __init__(self, data=None, next=None): """ Initializes node structure""" self.data = data self.next = next
[ "def", "__init__", "(", "self", ",", "data", "=", "None", ",", "next", "=", "None", ")", ":", "self", ".", "data", "=", "data", "self", ".", "next", "=", "next" ]
https://github.com/OpenGenus/cosmos/blob/1a94e8880068e51d571543be179c323936bd0936/code/data_structures/src/list/singly_linked_list/operations/insertion/insertion_at_end.py#L19-L22
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py2/scipy/special/basic.py
python
jn_zeros
(n, nt)
return jnyn_zeros(n, nt)[0]
Compute zeros of integer-order Bessel function Jn(x). Parameters ---------- n : int Order of Bessel function nt : int Number of zeros to return References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
Compute zeros of integer-order Bessel function Jn(x).
[ "Compute", "zeros", "of", "integer", "-", "order", "Bessel", "function", "Jn", "(", "x", ")", "." ]
def jn_zeros(n, nt): """Compute zeros of integer-order Bessel function Jn(x). Parameters ---------- n : int Order of Bessel function nt : int Number of zeros to return References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ return jnyn_zeros(n, nt)[0]
[ "def", "jn_zeros", "(", "n", ",", "nt", ")", ":", "return", "jnyn_zeros", "(", "n", ",", "nt", ")", "[", "0", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/special/basic.py#L230-L247
godlikepanos/anki-3d-engine
e2f65e5045624492571ea8527a4dbf3fad8d2c0a
ThirdParty/Glslang/External/spirv-tools/utils/generate_language_headers.py
python
make_path_to_file
(f)
Makes all ancestor directories to the given file, if they don't yet exist. Arguments: f: The file whose ancestor directories are to be created.
Makes all ancestor directories to the given file, if they don't yet exist.
[ "Makes", "all", "ancestor", "directories", "to", "the", "given", "file", "if", "they", "don", "t", "yet", "exist", "." ]
def make_path_to_file(f): """Makes all ancestor directories to the given file, if they don't yet exist. Arguments: f: The file whose ancestor directories are to be created. """ dir = os.path.dirname(os.path.abspath(f)) try: os.makedirs(dir) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(dir): pass else: raise
[ "def", "make_path_to_file", "(", "f", ")", ":", "dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "f", ")", ")", "try", ":", "os", ".", "makedirs", "(", "dir", ")", "except", "OSError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "EEXIST", "and", "os", ".", "path", ".", "isdir", "(", "dir", ")", ":", "pass", "else", ":", "raise" ]
https://github.com/godlikepanos/anki-3d-engine/blob/e2f65e5045624492571ea8527a4dbf3fad8d2c0a/ThirdParty/Glslang/External/spirv-tools/utils/generate_language_headers.py#L23-L37
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/build/pymake/pymake/data.py
python
BaseExpansion.variable_references
(self, descend=False)
Obtain all variable references in this expansion. This is a generator for pymake.functionsVariableRef instances. To retrieve the names of variables, simply query the `vname` field on the returned instances. Most of the time these will be StringExpansion instances.
Obtain all variable references in this expansion.
[ "Obtain", "all", "variable", "references", "in", "this", "expansion", "." ]
def variable_references(self, descend=False): """Obtain all variable references in this expansion. This is a generator for pymake.functionsVariableRef instances. To retrieve the names of variables, simply query the `vname` field on the returned instances. Most of the time these will be StringExpansion instances. """ for f in self.functions(descend=descend): if not isinstance(f, functions.VariableRef): continue yield f
[ "def", "variable_references", "(", "self", ",", "descend", "=", "False", ")", ":", "for", "f", "in", "self", ".", "functions", "(", "descend", "=", "descend", ")", ":", "if", "not", "isinstance", "(", "f", ",", "functions", ".", "VariableRef", ")", ":", "continue", "yield", "f" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/build/pymake/pymake/data.py#L101-L114
NVIDIA/TensorRT
42805f078052daad1a98bc5965974fcffaad0960
samples/python/yolov3_onnx/yolov3_to_onnx.py
python
WeightLoader.__init__
(self, weights_file_path)
Initialized with a path to the YOLOv3 .weights file. Keyword argument: weights_file_path -- path to the weights file.
Initialized with a path to the YOLOv3 .weights file.
[ "Initialized", "with", "a", "path", "to", "the", "YOLOv3", ".", "weights", "file", "." ]
def __init__(self, weights_file_path): """Initialized with a path to the YOLOv3 .weights file. Keyword argument: weights_file_path -- path to the weights file. """ self.weights_file = self._open_weights_file(weights_file_path)
[ "def", "__init__", "(", "self", ",", "weights_file_path", ")", ":", "self", ".", "weights_file", "=", "self", ".", "_open_weights_file", "(", "weights_file_path", ")" ]
https://github.com/NVIDIA/TensorRT/blob/42805f078052daad1a98bc5965974fcffaad0960/samples/python/yolov3_onnx/yolov3_to_onnx.py#L231-L237
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py
python
PartialProgramLayer._check_params_all_inited
(self, main_program)
Check all params from main program are already initialized, see details as follows: 1. all parameters in self._params should be type `framework.ParamBase` which are created in dygraph. 2. all parameters from transformed program can be found in self._params. Because they share same data with ParamBase of original dygraph.
Check all params from main program are already initialized, see details as follows: 1. all parameters in self._params should be type `framework.ParamBase` which are created in dygraph. 2. all parameters from transformed program can be found in self._params. Because they share same data with ParamBase of original dygraph.
[ "Check", "all", "params", "from", "main", "program", "are", "already", "initialized", "see", "details", "as", "follows", ":", "1", ".", "all", "parameters", "in", "self", ".", "_params", "should", "be", "type", "framework", ".", "ParamBase", "which", "are", "created", "in", "dygraph", ".", "2", ".", "all", "parameters", "from", "transformed", "program", "can", "be", "found", "in", "self", ".", "_params", ".", "Because", "they", "share", "same", "data", "with", "ParamBase", "of", "original", "dygraph", "." ]
def _check_params_all_inited(self, main_program): """ Check all params from main program are already initialized, see details as follows: 1. all parameters in self._params should be type `framework.ParamBase` which are created in dygraph. 2. all parameters from transformed program can be found in self._params. Because they share same data with ParamBase of original dygraph. """ if not isinstance(self._params, (list, tuple)): raise TypeError( "Type of self._params in PartialProgramLayer should be list or tuple, but received %s." % type(self._params)) param_and_buffer_names_set = set() for i, var in enumerate(self._params): # self._params constains parameters and buffers with persistable=True. if not isinstance(var, core.VarBase): raise TypeError( 'Type of self._params[{}] in PartialProgramLayer should be Parameter or Variable, but received {}.'. format(i, type(var))) param_and_buffer_names_set.add(var.name) for block in main_program.blocks: for name, var in six.iteritems(block.vars): if isinstance(var, framework.Parameter): if name not in param_and_buffer_names_set: raise ValueError( "\n\tWe don't support to define layer with parameters in the function decorated by `@to_static`." "\n\tBut we found parameter(%s) was created in the decorated function." "\n" "\n\tRevise suggestion: " "\n\t\t1. Please ensure all your sublayers are inheritted from nn.Layer." "\n\t\t2. Please use nn.ParameterList and nn.LayerList as container instead of using a native Python container such as List" % name)
[ "def", "_check_params_all_inited", "(", "self", ",", "main_program", ")", ":", "if", "not", "isinstance", "(", "self", ".", "_params", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "\"Type of self._params in PartialProgramLayer should be list or tuple, but received %s.\"", "%", "type", "(", "self", ".", "_params", ")", ")", "param_and_buffer_names_set", "=", "set", "(", ")", "for", "i", ",", "var", "in", "enumerate", "(", "self", ".", "_params", ")", ":", "# self._params constains parameters and buffers with persistable=True.", "if", "not", "isinstance", "(", "var", ",", "core", ".", "VarBase", ")", ":", "raise", "TypeError", "(", "'Type of self._params[{}] in PartialProgramLayer should be Parameter or Variable, but received {}.'", ".", "format", "(", "i", ",", "type", "(", "var", ")", ")", ")", "param_and_buffer_names_set", ".", "add", "(", "var", ".", "name", ")", "for", "block", "in", "main_program", ".", "blocks", ":", "for", "name", ",", "var", "in", "six", ".", "iteritems", "(", "block", ".", "vars", ")", ":", "if", "isinstance", "(", "var", ",", "framework", ".", "Parameter", ")", ":", "if", "name", "not", "in", "param_and_buffer_names_set", ":", "raise", "ValueError", "(", "\"\\n\\tWe don't support to define layer with parameters in the function decorated by `@to_static`.\"", "\"\\n\\tBut we found parameter(%s) was created in the decorated function.\"", "\"\\n\"", "\"\\n\\tRevise suggestion: \"", "\"\\n\\t\\t1. Please ensure all your sublayers are inheritted from nn.Layer.\"", "\"\\n\\t\\t2. Please use nn.ParameterList and nn.LayerList as container instead of using a native Python container such as List\"", "%", "name", ")" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py#L515-L547
giuspen/cherrytree
84712f206478fcf9acf30174009ad28c648c6344
pygtk2/modules/findreplace.py
python
FindReplace.find_back
(self)
Continue the previous search (a_node/in_selected_node/in_all_nodes) but in Opposite Direction
Continue the previous search (a_node/in_selected_node/in_all_nodes) but in Opposite Direction
[ "Continue", "the", "previous", "search", "(", "a_node", "/", "in_selected_node", "/", "in_all_nodes", ")", "but", "in", "Opposite", "Direction" ]
def find_back(self): """Continue the previous search (a_node/in_selected_node/in_all_nodes) but in Opposite Direction""" self.from_find_back = True self.replace_active = False self.find_again()
[ "def", "find_back", "(", "self", ")", ":", "self", ".", "from_find_back", "=", "True", "self", ".", "replace_active", "=", "False", "self", ".", "find_again", "(", ")" ]
https://github.com/giuspen/cherrytree/blob/84712f206478fcf9acf30174009ad28c648c6344/pygtk2/modules/findreplace.py#L843-L847
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/rfc822.py
python
parsedate
(data)
return t[:9]
Convert a time string to a time tuple.
Convert a time string to a time tuple.
[ "Convert", "a", "time", "string", "to", "a", "time", "tuple", "." ]
def parsedate(data): """Convert a time string to a time tuple.""" t = parsedate_tz(data) if t is None: return t return t[:9]
[ "def", "parsedate", "(", "data", ")", ":", "t", "=", "parsedate_tz", "(", "data", ")", "if", "t", "is", "None", ":", "return", "t", "return", "t", "[", ":", "9", "]" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/rfc822.py#L935-L940
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Tools/Scripts/patchcheck.py
python
normalize_c_whitespace
(file_paths)
return fixed
Report if any C files
Report if any C files
[ "Report", "if", "any", "C", "files" ]
def normalize_c_whitespace(file_paths): """Report if any C files """ fixed = [] for path in file_paths: abspath = os.path.join(SRCDIR, path) with open(abspath, 'r') as f: if '\t' not in f.read(): continue untabify.process(abspath, 8, verbose=False) fixed.append(path) return fixed
[ "def", "normalize_c_whitespace", "(", "file_paths", ")", ":", "fixed", "=", "[", "]", "for", "path", "in", "file_paths", ":", "abspath", "=", "os", ".", "path", ".", "join", "(", "SRCDIR", ",", "path", ")", "with", "open", "(", "abspath", ",", "'r'", ")", "as", "f", ":", "if", "'\\t'", "not", "in", "f", ".", "read", "(", ")", ":", "continue", "untabify", ".", "process", "(", "abspath", ",", "8", ",", "verbose", "=", "False", ")", "fixed", ".", "append", "(", "path", ")", "return", "fixed" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Tools/Scripts/patchcheck.py#L104-L114
psi4/psi4
be533f7f426b6ccc263904e55122899b16663395
psi4/driver/ipi_broker.py
python
ipi_broker
(LOT, molecule=None, serverdata=False, options=None)
Run IPIBroker to connect to i-pi Arguments: molecule: Initial molecule serverdata: Configuration where to connect to ipi options: any additional Psi4 options
Run IPIBroker to connect to i-pi
[ "Run", "IPIBroker", "to", "connect", "to", "i", "-", "pi" ]
def ipi_broker(LOT, molecule=None, serverdata=False, options=None): """ Run IPIBroker to connect to i-pi Arguments: molecule: Initial molecule serverdata: Configuration where to connect to ipi options: any additional Psi4 options """ b = IPIBroker(LOT, molecule=molecule, serverdata=serverdata, options=options) try: if b.serverdata: b.run() else: return b except KeyboardInterrupt: psi4.core.print_out("Killing IPIBroker\n") b.__del__() # lgtm [py/explicit-call-to-delete] sys.exit(1)
[ "def", "ipi_broker", "(", "LOT", ",", "molecule", "=", "None", ",", "serverdata", "=", "False", ",", "options", "=", "None", ")", ":", "b", "=", "IPIBroker", "(", "LOT", ",", "molecule", "=", "molecule", ",", "serverdata", "=", "serverdata", ",", "options", "=", "options", ")", "try", ":", "if", "b", ".", "serverdata", ":", "b", ".", "run", "(", ")", "else", ":", "return", "b", "except", "KeyboardInterrupt", ":", "psi4", ".", "core", ".", "print_out", "(", "\"Killing IPIBroker\\n\"", ")", "b", ".", "__del__", "(", ")", "# lgtm [py/explicit-call-to-delete]", "sys", ".", "exit", "(", "1", ")" ]
https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/ipi_broker.py#L129-L148
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/ragged/ragged_shape.py
python
_to_row_partitions_and_nvals_from_lengths
( lengths: Sequence[Union[int, Sequence[int]]], dtype=None)
return (result, size_so_far)
Allow ragged and uniform shapes to be specified. For example, [2, [2,1], 2] represents a shape like: [[[0, 0], [0, 0]], [[0, 0]]] Args: lengths: a list of integers and lists of integers. dtype: dtype of the shape (tf.int32 or tf.int64) Returns: a sequence of RowPartitions, and the number of values of the last partition.
Allow ragged and uniform shapes to be specified.
[ "Allow", "ragged", "and", "uniform", "shapes", "to", "be", "specified", "." ]
def _to_row_partitions_and_nvals_from_lengths( lengths: Sequence[Union[int, Sequence[int]]], dtype=None) -> Tuple[Sequence[RowPartition], int]: """Allow ragged and uniform shapes to be specified. For example, [2, [2,1], 2] represents a shape like: [[[0, 0], [0, 0]], [[0, 0]]] Args: lengths: a list of integers and lists of integers. dtype: dtype of the shape (tf.int32 or tf.int64) Returns: a sequence of RowPartitions, and the number of values of the last partition. """ size_so_far = lengths[0] result = [] for current_lengths in lengths[1:]: if isinstance(current_lengths, int): nrows = size_so_far nvals = current_lengths * nrows size_so_far = nvals result.append( RowPartition.from_uniform_row_length( current_lengths, nvals, nrows=nrows, dtype_hint=dtype)) else: if size_so_far != len(current_lengths): raise ValueError("Shape not consistent.") result.append( RowPartition.from_row_lengths(current_lengths, dtype_hint=dtype)) size_so_far = sum(current_lengths) return (result, size_so_far)
[ "def", "_to_row_partitions_and_nvals_from_lengths", "(", "lengths", ":", "Sequence", "[", "Union", "[", "int", ",", "Sequence", "[", "int", "]", "]", "]", ",", "dtype", "=", "None", ")", "->", "Tuple", "[", "Sequence", "[", "RowPartition", "]", ",", "int", "]", ":", "size_so_far", "=", "lengths", "[", "0", "]", "result", "=", "[", "]", "for", "current_lengths", "in", "lengths", "[", "1", ":", "]", ":", "if", "isinstance", "(", "current_lengths", ",", "int", ")", ":", "nrows", "=", "size_so_far", "nvals", "=", "current_lengths", "*", "nrows", "size_so_far", "=", "nvals", "result", ".", "append", "(", "RowPartition", ".", "from_uniform_row_length", "(", "current_lengths", ",", "nvals", ",", "nrows", "=", "nrows", ",", "dtype_hint", "=", "dtype", ")", ")", "else", ":", "if", "size_so_far", "!=", "len", "(", "current_lengths", ")", ":", "raise", "ValueError", "(", "\"Shape not consistent.\"", ")", "result", ".", "append", "(", "RowPartition", ".", "from_row_lengths", "(", "current_lengths", ",", "dtype_hint", "=", "dtype", ")", ")", "size_so_far", "=", "sum", "(", "current_lengths", ")", "return", "(", "result", ",", "size_so_far", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/ragged/ragged_shape.py#L2213-L2244
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/build/waf-1.7.13/waflib/Tools/python.py
python
process_py
(self, node)
Add a callback using :py:func:`waflib.Tools.python.install_pyfile` to install a python file
Add a callback using :py:func:`waflib.Tools.python.install_pyfile` to install a python file
[ "Add", "a", "callback", "using", ":", "py", ":", "func", ":", "waflib", ".", "Tools", ".", "python", ".", "install_pyfile", "to", "install", "a", "python", "file" ]
def process_py(self, node): """ Add a callback using :py:func:`waflib.Tools.python.install_pyfile` to install a python file """ try: if not self.bld.is_install: return except AttributeError: return try: if not self.install_path: return except AttributeError: self.install_path = '${PYTHONDIR}' # i wonder now why we wanted to do this after the build is over # issue #901: people want to preserve the structure of installed files def inst_py(ctx): install_from = getattr(self, 'install_from', None) if install_from: install_from = self.path.find_dir(install_from) install_pyfile(self, node, install_from) self.bld.add_post_fun(inst_py)
[ "def", "process_py", "(", "self", ",", "node", ")", ":", "try", ":", "if", "not", "self", ".", "bld", ".", "is_install", ":", "return", "except", "AttributeError", ":", "return", "try", ":", "if", "not", "self", ".", "install_path", ":", "return", "except", "AttributeError", ":", "self", ".", "install_path", "=", "'${PYTHONDIR}'", "# i wonder now why we wanted to do this after the build is over", "# issue #901: people want to preserve the structure of installed files", "def", "inst_py", "(", "ctx", ")", ":", "install_from", "=", "getattr", "(", "self", ",", "'install_from'", ",", "None", ")", "if", "install_from", ":", "install_from", "=", "self", ".", "path", ".", "find_dir", "(", "install_from", ")", "install_pyfile", "(", "self", ",", "node", ",", "install_from", ")", "self", ".", "bld", ".", "add_post_fun", "(", "inst_py", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/waflib/Tools/python.py#L59-L82
google/mysql-protobuf
467cda676afaa49e762c5c9164a43f6ad31a1fbf
protobuf/python/mox.py
python
MockAnything.__nonzero__
(self)
return 1
Return 1 for nonzero so the mock can be used as a conditional.
Return 1 for nonzero so the mock can be used as a conditional.
[ "Return", "1", "for", "nonzero", "so", "the", "mock", "can", "be", "used", "as", "a", "conditional", "." ]
def __nonzero__(self): """Return 1 for nonzero so the mock can be used as a conditional.""" return 1
[ "def", "__nonzero__", "(", "self", ")", ":", "return", "1" ]
https://github.com/google/mysql-protobuf/blob/467cda676afaa49e762c5c9164a43f6ad31a1fbf/protobuf/python/mox.py#L309-L312
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/python/ops/seq2seq.py
python
attention_decoder
(decoder_inputs, initial_state, attention_states, cell, output_size=None, num_heads=1, loop_function=None, dtype=dtypes.float32, scope=None, initial_state_attention=False)
return outputs, state
RNN decoder with attention for the sequence-to-sequence model. In this context "attention" means that, during decoding, the RNN can look up information in the additional tensor attention_states, and it does this by focusing on a few entries from the tensor. This model has proven to yield especially good results in a number of sequence-to-sequence tasks. This implementation is based on http://arxiv.org/abs/1412.7449 (see below for details). It is recommended for complex sequence-to-sequence tasks. Args: decoder_inputs: A list of 2D Tensors [batch_size x input_size]. initial_state: 2D Tensor [batch_size x cell.state_size]. attention_states: 3D Tensor [batch_size x attn_length x attn_size]. cell: rnn_cell.RNNCell defining the cell function and size. output_size: Size of the output vectors; if None, we use cell.output_size. num_heads: Number of attention heads that read from attention_states. loop_function: If not None, this function will be applied to i-th output in order to generate i+1-th input, and decoder_inputs will be ignored, except for the first element ("GO" symbol). This can be used for decoding, but also for training to emulate http://arxiv.org/abs/1506.03099. Signature -- loop_function(prev, i) = next * prev is a 2D Tensor of shape [batch_size x output_size], * i is an integer, the step number (when advanced control is needed), * next is a 2D Tensor of shape [batch_size x input_size]. dtype: The dtype to use for the RNN initial state (default: tf.float32). scope: VariableScope for the created subgraph; default: "attention_decoder". initial_state_attention: If False (default), initial attentions are zero. If True, initialize the attentions from the initial state and attention states -- useful when we wish to resume decoding from a previously stored decoder state and attention states. Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors of shape [batch_size x output_size]. These represent the generated outputs. Output i is computed from input i (which is either the i-th element of decoder_inputs or loop_function(output {i-1}, i)) as follows. First, we run the cell on a combination of the input and previous attention masks: cell_output, new_state = cell(linear(input, prev_attn), prev_state). Then, we calculate new attention masks: new_attn = softmax(V^T * tanh(W * attention_states + U * new_state)) and then we calculate the output: output = linear(cell_output, new_attn). state: The state of each decoder cell the final time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. Raises: ValueError: when num_heads is not positive, there are no inputs, shapes of attention_states are not set, or input size cannot be inferred from the input.
RNN decoder with attention for the sequence-to-sequence model.
[ "RNN", "decoder", "with", "attention", "for", "the", "sequence", "-", "to", "-", "sequence", "model", "." ]
def attention_decoder(decoder_inputs, initial_state, attention_states, cell, output_size=None, num_heads=1, loop_function=None, dtype=dtypes.float32, scope=None, initial_state_attention=False): """RNN decoder with attention for the sequence-to-sequence model. In this context "attention" means that, during decoding, the RNN can look up information in the additional tensor attention_states, and it does this by focusing on a few entries from the tensor. This model has proven to yield especially good results in a number of sequence-to-sequence tasks. This implementation is based on http://arxiv.org/abs/1412.7449 (see below for details). It is recommended for complex sequence-to-sequence tasks. Args: decoder_inputs: A list of 2D Tensors [batch_size x input_size]. initial_state: 2D Tensor [batch_size x cell.state_size]. attention_states: 3D Tensor [batch_size x attn_length x attn_size]. cell: rnn_cell.RNNCell defining the cell function and size. output_size: Size of the output vectors; if None, we use cell.output_size. num_heads: Number of attention heads that read from attention_states. loop_function: If not None, this function will be applied to i-th output in order to generate i+1-th input, and decoder_inputs will be ignored, except for the first element ("GO" symbol). This can be used for decoding, but also for training to emulate http://arxiv.org/abs/1506.03099. Signature -- loop_function(prev, i) = next * prev is a 2D Tensor of shape [batch_size x output_size], * i is an integer, the step number (when advanced control is needed), * next is a 2D Tensor of shape [batch_size x input_size]. dtype: The dtype to use for the RNN initial state (default: tf.float32). scope: VariableScope for the created subgraph; default: "attention_decoder". initial_state_attention: If False (default), initial attentions are zero. If True, initialize the attentions from the initial state and attention states -- useful when we wish to resume decoding from a previously stored decoder state and attention states. Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors of shape [batch_size x output_size]. These represent the generated outputs. Output i is computed from input i (which is either the i-th element of decoder_inputs or loop_function(output {i-1}, i)) as follows. First, we run the cell on a combination of the input and previous attention masks: cell_output, new_state = cell(linear(input, prev_attn), prev_state). Then, we calculate new attention masks: new_attn = softmax(V^T * tanh(W * attention_states + U * new_state)) and then we calculate the output: output = linear(cell_output, new_attn). state: The state of each decoder cell the final time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. Raises: ValueError: when num_heads is not positive, there are no inputs, shapes of attention_states are not set, or input size cannot be inferred from the input. """ if not decoder_inputs: raise ValueError("Must provide at least 1 input to attention decoder.") if num_heads < 1: raise ValueError("With less than 1 heads, use a non-attention decoder.") if not attention_states.get_shape()[1:2].is_fully_defined(): raise ValueError("Shape[1] and [2] of attention_states must be known: %s" % attention_states.get_shape()) if output_size is None: output_size = cell.output_size with variable_scope.variable_scope(scope or "attention_decoder"): batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping. attn_length = attention_states.get_shape()[1].value attn_size = attention_states.get_shape()[2].value # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before. hidden = array_ops.reshape( attention_states, [-1, attn_length, 1, attn_size]) hidden_features = [] v = [] attention_vec_size = attn_size # Size of query vectors for attention. for a in xrange(num_heads): k = variable_scope.get_variable("AttnW_%d" % a, [1, 1, attn_size, attention_vec_size]) hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME")) v.append(variable_scope.get_variable("AttnV_%d" % a, [attention_vec_size])) state = initial_state def attention(query): """Put attention masks on hidden using hidden_features and query.""" ds = [] # Results of attention reads will be stored here. if nest.is_sequence(query): # If the query is a tuple, flatten it. query_list = nest.flatten(query) for q in query_list: # Check that ndims == 2 if specified. ndims = q.get_shape().ndims if ndims: assert ndims == 2 query = array_ops.concat(1, query_list) for a in xrange(num_heads): with variable_scope.variable_scope("Attention_%d" % a): y = linear(query, attention_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size]) # Attention mask is a softmax of v^T * tanh(...). s = math_ops.reduce_sum( v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3]) a = nn_ops.softmax(s) # Now calculate the attention-weighted vector d. d = math_ops.reduce_sum( array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2]) ds.append(array_ops.reshape(d, [-1, attn_size])) return ds outputs = [] prev = None batch_attn_size = array_ops.pack([batch_size, attn_size]) attns = [array_ops.zeros(batch_attn_size, dtype=dtype) for _ in xrange(num_heads)] for a in attns: # Ensure the second shape of attention vectors is set. a.set_shape([None, attn_size]) if initial_state_attention: attns = attention(initial_state) for i, inp in enumerate(decoder_inputs): if i > 0: variable_scope.get_variable_scope().reuse_variables() # If loop_function is set, we use it instead of decoder_inputs. if loop_function is not None and prev is not None: with variable_scope.variable_scope("loop_function", reuse=True): inp = loop_function(prev, i) # Merge input and previous attentions into one vector of the right size. input_size = inp.get_shape().with_rank(2)[1] if input_size.value is None: raise ValueError("Could not infer input size from input: %s" % inp.name) x = linear([inp] + attns, input_size, True) # Run the RNN. cell_output, state = cell(x, state) # Run the attention mechanism. if i == 0 and initial_state_attention: with variable_scope.variable_scope(variable_scope.get_variable_scope(), reuse=True): attns = attention(state) else: attns = attention(state) with variable_scope.variable_scope("AttnOutputProjection"): output = linear([cell_output] + attns, output_size, True) if loop_function is not None: prev = output outputs.append(output) return outputs, state
[ "def", "attention_decoder", "(", "decoder_inputs", ",", "initial_state", ",", "attention_states", ",", "cell", ",", "output_size", "=", "None", ",", "num_heads", "=", "1", ",", "loop_function", "=", "None", ",", "dtype", "=", "dtypes", ".", "float32", ",", "scope", "=", "None", ",", "initial_state_attention", "=", "False", ")", ":", "if", "not", "decoder_inputs", ":", "raise", "ValueError", "(", "\"Must provide at least 1 input to attention decoder.\"", ")", "if", "num_heads", "<", "1", ":", "raise", "ValueError", "(", "\"With less than 1 heads, use a non-attention decoder.\"", ")", "if", "not", "attention_states", ".", "get_shape", "(", ")", "[", "1", ":", "2", "]", ".", "is_fully_defined", "(", ")", ":", "raise", "ValueError", "(", "\"Shape[1] and [2] of attention_states must be known: %s\"", "%", "attention_states", ".", "get_shape", "(", ")", ")", "if", "output_size", "is", "None", ":", "output_size", "=", "cell", ".", "output_size", "with", "variable_scope", ".", "variable_scope", "(", "scope", "or", "\"attention_decoder\"", ")", ":", "batch_size", "=", "array_ops", ".", "shape", "(", "decoder_inputs", "[", "0", "]", ")", "[", "0", "]", "# Needed for reshaping.", "attn_length", "=", "attention_states", ".", "get_shape", "(", ")", "[", "1", "]", ".", "value", "attn_size", "=", "attention_states", ".", "get_shape", "(", ")", "[", "2", "]", ".", "value", "# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.", "hidden", "=", "array_ops", ".", "reshape", "(", "attention_states", ",", "[", "-", "1", ",", "attn_length", ",", "1", ",", "attn_size", "]", ")", "hidden_features", "=", "[", "]", "v", "=", "[", "]", "attention_vec_size", "=", "attn_size", "# Size of query vectors for attention.", "for", "a", "in", "xrange", "(", "num_heads", ")", ":", "k", "=", "variable_scope", ".", "get_variable", "(", "\"AttnW_%d\"", "%", "a", ",", "[", "1", ",", "1", ",", "attn_size", ",", "attention_vec_size", "]", ")", "hidden_features", ".", "append", "(", "nn_ops", ".", "conv2d", "(", "hidden", ",", "k", ",", "[", "1", ",", "1", ",", "1", ",", "1", "]", ",", "\"SAME\"", ")", ")", "v", ".", "append", "(", "variable_scope", ".", "get_variable", "(", "\"AttnV_%d\"", "%", "a", ",", "[", "attention_vec_size", "]", ")", ")", "state", "=", "initial_state", "def", "attention", "(", "query", ")", ":", "\"\"\"Put attention masks on hidden using hidden_features and query.\"\"\"", "ds", "=", "[", "]", "# Results of attention reads will be stored here.", "if", "nest", ".", "is_sequence", "(", "query", ")", ":", "# If the query is a tuple, flatten it.", "query_list", "=", "nest", ".", "flatten", "(", "query", ")", "for", "q", "in", "query_list", ":", "# Check that ndims == 2 if specified.", "ndims", "=", "q", ".", "get_shape", "(", ")", ".", "ndims", "if", "ndims", ":", "assert", "ndims", "==", "2", "query", "=", "array_ops", ".", "concat", "(", "1", ",", "query_list", ")", "for", "a", "in", "xrange", "(", "num_heads", ")", ":", "with", "variable_scope", ".", "variable_scope", "(", "\"Attention_%d\"", "%", "a", ")", ":", "y", "=", "linear", "(", "query", ",", "attention_vec_size", ",", "True", ")", "y", "=", "array_ops", ".", "reshape", "(", "y", ",", "[", "-", "1", ",", "1", ",", "1", ",", "attention_vec_size", "]", ")", "# Attention mask is a softmax of v^T * tanh(...).", "s", "=", "math_ops", ".", "reduce_sum", "(", "v", "[", "a", "]", "*", "math_ops", ".", "tanh", "(", "hidden_features", "[", "a", "]", "+", "y", ")", ",", "[", "2", ",", "3", "]", ")", "a", "=", "nn_ops", ".", "softmax", "(", "s", ")", "# Now calculate the attention-weighted vector d.", "d", "=", "math_ops", ".", "reduce_sum", "(", "array_ops", ".", "reshape", "(", "a", ",", "[", "-", "1", ",", "attn_length", ",", "1", ",", "1", "]", ")", "*", "hidden", ",", "[", "1", ",", "2", "]", ")", "ds", ".", "append", "(", "array_ops", ".", "reshape", "(", "d", ",", "[", "-", "1", ",", "attn_size", "]", ")", ")", "return", "ds", "outputs", "=", "[", "]", "prev", "=", "None", "batch_attn_size", "=", "array_ops", ".", "pack", "(", "[", "batch_size", ",", "attn_size", "]", ")", "attns", "=", "[", "array_ops", ".", "zeros", "(", "batch_attn_size", ",", "dtype", "=", "dtype", ")", "for", "_", "in", "xrange", "(", "num_heads", ")", "]", "for", "a", "in", "attns", ":", "# Ensure the second shape of attention vectors is set.", "a", ".", "set_shape", "(", "[", "None", ",", "attn_size", "]", ")", "if", "initial_state_attention", ":", "attns", "=", "attention", "(", "initial_state", ")", "for", "i", ",", "inp", "in", "enumerate", "(", "decoder_inputs", ")", ":", "if", "i", ">", "0", ":", "variable_scope", ".", "get_variable_scope", "(", ")", ".", "reuse_variables", "(", ")", "# If loop_function is set, we use it instead of decoder_inputs.", "if", "loop_function", "is", "not", "None", "and", "prev", "is", "not", "None", ":", "with", "variable_scope", ".", "variable_scope", "(", "\"loop_function\"", ",", "reuse", "=", "True", ")", ":", "inp", "=", "loop_function", "(", "prev", ",", "i", ")", "# Merge input and previous attentions into one vector of the right size.", "input_size", "=", "inp", ".", "get_shape", "(", ")", ".", "with_rank", "(", "2", ")", "[", "1", "]", "if", "input_size", ".", "value", "is", "None", ":", "raise", "ValueError", "(", "\"Could not infer input size from input: %s\"", "%", "inp", ".", "name", ")", "x", "=", "linear", "(", "[", "inp", "]", "+", "attns", ",", "input_size", ",", "True", ")", "# Run the RNN.", "cell_output", ",", "state", "=", "cell", "(", "x", ",", "state", ")", "# Run the attention mechanism.", "if", "i", "==", "0", "and", "initial_state_attention", ":", "with", "variable_scope", ".", "variable_scope", "(", "variable_scope", ".", "get_variable_scope", "(", ")", ",", "reuse", "=", "True", ")", ":", "attns", "=", "attention", "(", "state", ")", "else", ":", "attns", "=", "attention", "(", "state", ")", "with", "variable_scope", ".", "variable_scope", "(", "\"AttnOutputProjection\"", ")", ":", "output", "=", "linear", "(", "[", "cell_output", "]", "+", "attns", ",", "output_size", ",", "True", ")", "if", "loop_function", "is", "not", "None", ":", "prev", "=", "output", "outputs", ".", "append", "(", "output", ")", "return", "outputs", ",", "state" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/ops/seq2seq.py#L474-L622
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/telemetry/third_party/web-page-replay/third_party/dns/resolver.py
python
Resolver.read_registry
(self)
Extract resolver configuration from the Windows registry.
Extract resolver configuration from the Windows registry.
[ "Extract", "resolver", "configuration", "from", "the", "Windows", "registry", "." ]
def read_registry(self): """Extract resolver configuration from the Windows registry.""" lm = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) want_scan = False try: try: # XP, 2000 tcp_params = _winreg.OpenKey(lm, r'SYSTEM\CurrentControlSet' r'\Services\Tcpip\Parameters') want_scan = True except EnvironmentError: # ME tcp_params = _winreg.OpenKey(lm, r'SYSTEM\CurrentControlSet' r'\Services\VxD\MSTCP') try: self._config_win32_fromkey(tcp_params) finally: tcp_params.Close() if want_scan: interfaces = _winreg.OpenKey(lm, r'SYSTEM\CurrentControlSet' r'\Services\Tcpip\Parameters' r'\Interfaces') try: i = 0 while True: try: guid = _winreg.EnumKey(interfaces, i) i += 1 key = _winreg.OpenKey(interfaces, guid) if not self._win32_is_nic_enabled(lm, guid, key): continue try: self._config_win32_fromkey(key) finally: key.Close() except EnvironmentError: break finally: interfaces.Close() finally: lm.Close()
[ "def", "read_registry", "(", "self", ")", ":", "lm", "=", "_winreg", ".", "ConnectRegistry", "(", "None", ",", "_winreg", ".", "HKEY_LOCAL_MACHINE", ")", "want_scan", "=", "False", "try", ":", "try", ":", "# XP, 2000", "tcp_params", "=", "_winreg", ".", "OpenKey", "(", "lm", ",", "r'SYSTEM\\CurrentControlSet'", "r'\\Services\\Tcpip\\Parameters'", ")", "want_scan", "=", "True", "except", "EnvironmentError", ":", "# ME", "tcp_params", "=", "_winreg", ".", "OpenKey", "(", "lm", ",", "r'SYSTEM\\CurrentControlSet'", "r'\\Services\\VxD\\MSTCP'", ")", "try", ":", "self", ".", "_config_win32_fromkey", "(", "tcp_params", ")", "finally", ":", "tcp_params", ".", "Close", "(", ")", "if", "want_scan", ":", "interfaces", "=", "_winreg", ".", "OpenKey", "(", "lm", ",", "r'SYSTEM\\CurrentControlSet'", "r'\\Services\\Tcpip\\Parameters'", "r'\\Interfaces'", ")", "try", ":", "i", "=", "0", "while", "True", ":", "try", ":", "guid", "=", "_winreg", ".", "EnumKey", "(", "interfaces", ",", "i", ")", "i", "+=", "1", "key", "=", "_winreg", ".", "OpenKey", "(", "interfaces", ",", "guid", ")", "if", "not", "self", ".", "_win32_is_nic_enabled", "(", "lm", ",", "guid", ",", "key", ")", ":", "continue", "try", ":", "self", ".", "_config_win32_fromkey", "(", "key", ")", "finally", ":", "key", ".", "Close", "(", ")", "except", "EnvironmentError", ":", "break", "finally", ":", "interfaces", ".", "Close", "(", ")", "finally", ":", "lm", ".", "Close", "(", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/third_party/web-page-replay/third_party/dns/resolver.py#L428-L471
Polidea/SiriusObfuscator
b0e590d8130e97856afe578869b83a209e2b19be
SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py
python
SBValue.GetTypeFormat
(self)
return _lldb.SBValue_GetTypeFormat(self)
GetTypeFormat(self) -> SBTypeFormat
GetTypeFormat(self) -> SBTypeFormat
[ "GetTypeFormat", "(", "self", ")", "-", ">", "SBTypeFormat" ]
def GetTypeFormat(self): """GetTypeFormat(self) -> SBTypeFormat""" return _lldb.SBValue_GetTypeFormat(self)
[ "def", "GetTypeFormat", "(", "self", ")", ":", "return", "_lldb", ".", "SBValue_GetTypeFormat", "(", "self", ")" ]
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L11935-L11937
htcondor/htcondor
4829724575176d1d6c936e4693dfd78a728569b0
nmi_tools/glue/build/email_on_failure.py
python
create_email
(this_sha1, prev_sha1, description, runid, authors)
return msg
Create the email to send out
Create the email to send out
[ "Create", "the", "email", "to", "send", "out" ]
def create_email(this_sha1, prev_sha1, description, runid, authors): """ Create the email to send out """ # Form a detailed message msg = "A build of type '%s' failed. The previous build of this type succeeded.\n\n" % description msg += "Build SHA1 - %s\n" % this_sha1 msg += "Previous SHA1 - %s\n\n\n" % prev_sha1 msg += "Committers in between:\n" for author in authors.keys(): msg += "%s <%s>\n" % (author, authors[author]) msg += "\n" msg += "Link to dashboard:\n" msg += "http://nmi-s006.cs.wisc.edu/results/Run-condor-details.php?runid=%s&type=build&user=cndrauto\n\n" % runid msg += "Log of commits between these two builds:\n" msg += "http://condor-git.cs.wisc.edu/?p=condor.git;a=log;h=%s;hp=%s\n\n" % (this_sha1, prev_sha1) msg += message_footer() return msg
[ "def", "create_email", "(", "this_sha1", ",", "prev_sha1", ",", "description", ",", "runid", ",", "authors", ")", ":", "# Form a detailed message", "msg", "=", "\"A build of type '%s' failed. The previous build of this type succeeded.\\n\\n\"", "%", "description", "msg", "+=", "\"Build SHA1 - %s\\n\"", "%", "this_sha1", "msg", "+=", "\"Previous SHA1 - %s\\n\\n\\n\"", "%", "prev_sha1", "msg", "+=", "\"Committers in between:\\n\"", "for", "author", "in", "authors", ".", "keys", "(", ")", ":", "msg", "+=", "\"%s <%s>\\n\"", "%", "(", "author", ",", "authors", "[", "author", "]", ")", "msg", "+=", "\"\\n\"", "msg", "+=", "\"Link to dashboard:\\n\"", "msg", "+=", "\"http://nmi-s006.cs.wisc.edu/results/Run-condor-details.php?runid=%s&type=build&user=cndrauto\\n\\n\"", "%", "runid", "msg", "+=", "\"Log of commits between these two builds:\\n\"", "msg", "+=", "\"http://condor-git.cs.wisc.edu/?p=condor.git;a=log;h=%s;hp=%s\\n\\n\"", "%", "(", "this_sha1", ",", "prev_sha1", ")", "msg", "+=", "message_footer", "(", ")", "return", "msg" ]
https://github.com/htcondor/htcondor/blob/4829724575176d1d6c936e4693dfd78a728569b0/nmi_tools/glue/build/email_on_failure.py#L126-L146
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/timeseries/examples/known_anomaly.py
python
train_and_evaluate_exogenous
( estimator_fn, csv_file_name=_DATA_FILE, train_steps=300)
return (times, observed, all_times, mean, upper_limit, lower_limit, anomaly_locations)
Training, evaluating, and predicting on a series with changepoints.
Training, evaluating, and predicting on a series with changepoints.
[ "Training", "evaluating", "and", "predicting", "on", "a", "series", "with", "changepoints", "." ]
def train_and_evaluate_exogenous( estimator_fn, csv_file_name=_DATA_FILE, train_steps=300): """Training, evaluating, and predicting on a series with changepoints.""" # Indicate the format of our exogenous feature, in this case a string # representing a boolean value. string_feature = tf.feature_column.categorical_column_with_vocabulary_list( key="is_changepoint", vocabulary_list=["no", "yes"]) # Specify the way this feature is presented to the model, here using a one-hot # encoding. one_hot_feature = tf.feature_column.indicator_column( categorical_column=string_feature) estimator, batch_size, window_size = estimator_fn( exogenous_feature_columns=[one_hot_feature]) reader = tf.contrib.timeseries.CSVReader( csv_file_name, # Indicate the format of our CSV file. First we have two standard columns, # one for times and one for values. The third column is a custom exogenous # feature indicating whether each timestep is a changepoint. The # changepoint feature name must match the string_feature column name # above. column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES, tf.contrib.timeseries.TrainEvalFeatures.VALUES, "is_changepoint"), # Indicate dtypes for our features. column_dtypes=(tf.int64, tf.float32, tf.string), # This CSV has a header line; here we just ignore it. skip_header_lines=1) train_input_fn = tf.contrib.timeseries.RandomWindowInputFn( reader, batch_size=batch_size, window_size=window_size) estimator.train(input_fn=train_input_fn, steps=train_steps) evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader) evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1) # Create an input_fn for prediction, with a simulated changepoint. Since all # of the anomalies in the training data are explained by the exogenous # feature, we should get relatively confident predictions before the indicated # changepoint (since we are telling the model that no changepoint exists at # those times) and relatively uncertain predictions after. (predictions,) = tuple(estimator.predict( input_fn=tf.contrib.timeseries.predict_continuation_input_fn( evaluation, steps=100, exogenous_features={ "is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]}))) times = evaluation["times"][0] observed = evaluation["observed"][0, :, 0] mean = np.squeeze(np.concatenate( [evaluation["mean"][0], predictions["mean"]], axis=0)) variance = np.squeeze(np.concatenate( [evaluation["covariance"][0], predictions["covariance"]], axis=0)) all_times = np.concatenate([times, predictions["times"]], axis=0) upper_limit = mean + np.sqrt(variance) lower_limit = mean - np.sqrt(variance) # Indicate the locations of the changepoints for plotting vertical lines. anomaly_locations = [] with open(csv_file_name, "r") as csv_file: csv_reader = csv.DictReader(csv_file) for row in csv_reader: if row["is_changepoint"] == "yes": anomaly_locations.append(int(row["time"])) anomaly_locations.append(predictions["times"][49]) return (times, observed, all_times, mean, upper_limit, lower_limit, anomaly_locations)
[ "def", "train_and_evaluate_exogenous", "(", "estimator_fn", ",", "csv_file_name", "=", "_DATA_FILE", ",", "train_steps", "=", "300", ")", ":", "# Indicate the format of our exogenous feature, in this case a string", "# representing a boolean value.", "string_feature", "=", "tf", ".", "feature_column", ".", "categorical_column_with_vocabulary_list", "(", "key", "=", "\"is_changepoint\"", ",", "vocabulary_list", "=", "[", "\"no\"", ",", "\"yes\"", "]", ")", "# Specify the way this feature is presented to the model, here using a one-hot", "# encoding.", "one_hot_feature", "=", "tf", ".", "feature_column", ".", "indicator_column", "(", "categorical_column", "=", "string_feature", ")", "estimator", ",", "batch_size", ",", "window_size", "=", "estimator_fn", "(", "exogenous_feature_columns", "=", "[", "one_hot_feature", "]", ")", "reader", "=", "tf", ".", "contrib", ".", "timeseries", ".", "CSVReader", "(", "csv_file_name", ",", "# Indicate the format of our CSV file. First we have two standard columns,", "# one for times and one for values. The third column is a custom exogenous", "# feature indicating whether each timestep is a changepoint. The", "# changepoint feature name must match the string_feature column name", "# above.", "column_names", "=", "(", "tf", ".", "contrib", ".", "timeseries", ".", "TrainEvalFeatures", ".", "TIMES", ",", "tf", ".", "contrib", ".", "timeseries", ".", "TrainEvalFeatures", ".", "VALUES", ",", "\"is_changepoint\"", ")", ",", "# Indicate dtypes for our features.", "column_dtypes", "=", "(", "tf", ".", "int64", ",", "tf", ".", "float32", ",", "tf", ".", "string", ")", ",", "# This CSV has a header line; here we just ignore it.", "skip_header_lines", "=", "1", ")", "train_input_fn", "=", "tf", ".", "contrib", ".", "timeseries", ".", "RandomWindowInputFn", "(", "reader", ",", "batch_size", "=", "batch_size", ",", "window_size", "=", "window_size", ")", "estimator", ".", "train", "(", "input_fn", "=", "train_input_fn", ",", "steps", "=", "train_steps", ")", "evaluation_input_fn", "=", "tf", ".", "contrib", ".", "timeseries", ".", "WholeDatasetInputFn", "(", "reader", ")", "evaluation", "=", "estimator", ".", "evaluate", "(", "input_fn", "=", "evaluation_input_fn", ",", "steps", "=", "1", ")", "# Create an input_fn for prediction, with a simulated changepoint. Since all", "# of the anomalies in the training data are explained by the exogenous", "# feature, we should get relatively confident predictions before the indicated", "# changepoint (since we are telling the model that no changepoint exists at", "# those times) and relatively uncertain predictions after.", "(", "predictions", ",", ")", "=", "tuple", "(", "estimator", ".", "predict", "(", "input_fn", "=", "tf", ".", "contrib", ".", "timeseries", ".", "predict_continuation_input_fn", "(", "evaluation", ",", "steps", "=", "100", ",", "exogenous_features", "=", "{", "\"is_changepoint\"", ":", "[", "[", "\"no\"", "]", "*", "49", "+", "[", "\"yes\"", "]", "+", "[", "\"no\"", "]", "*", "50", "]", "}", ")", ")", ")", "times", "=", "evaluation", "[", "\"times\"", "]", "[", "0", "]", "observed", "=", "evaluation", "[", "\"observed\"", "]", "[", "0", ",", ":", ",", "0", "]", "mean", "=", "np", ".", "squeeze", "(", "np", ".", "concatenate", "(", "[", "evaluation", "[", "\"mean\"", "]", "[", "0", "]", ",", "predictions", "[", "\"mean\"", "]", "]", ",", "axis", "=", "0", ")", ")", "variance", "=", "np", ".", "squeeze", "(", "np", ".", "concatenate", "(", "[", "evaluation", "[", "\"covariance\"", "]", "[", "0", "]", ",", "predictions", "[", "\"covariance\"", "]", "]", ",", "axis", "=", "0", ")", ")", "all_times", "=", "np", ".", "concatenate", "(", "[", "times", ",", "predictions", "[", "\"times\"", "]", "]", ",", "axis", "=", "0", ")", "upper_limit", "=", "mean", "+", "np", ".", "sqrt", "(", "variance", ")", "lower_limit", "=", "mean", "-", "np", ".", "sqrt", "(", "variance", ")", "# Indicate the locations of the changepoints for plotting vertical lines.", "anomaly_locations", "=", "[", "]", "with", "open", "(", "csv_file_name", ",", "\"r\"", ")", "as", "csv_file", ":", "csv_reader", "=", "csv", ".", "DictReader", "(", "csv_file", ")", "for", "row", "in", "csv_reader", ":", "if", "row", "[", "\"is_changepoint\"", "]", "==", "\"yes\"", ":", "anomaly_locations", ".", "append", "(", "int", "(", "row", "[", "\"time\"", "]", ")", ")", "anomaly_locations", ".", "append", "(", "predictions", "[", "\"times\"", "]", "[", "49", "]", ")", "return", "(", "times", ",", "observed", ",", "all_times", ",", "mean", ",", "upper_limit", ",", "lower_limit", ",", "anomaly_locations", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/timeseries/examples/known_anomaly.py#L84-L145
oracle/graaljs
36a56e8e993d45fc40939a3a4d9c0c24990720f1
graal-nodejs/deps/v8/third_party/jinja2/ext.py
python
Extension.attr
(self, name, lineno=None)
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
Return an attribute node for the current extension. This is useful to pass constants on extensions to generated template code. :: self.attr('_my_attribute', lineno=lineno)
Return an attribute node for the current extension. This is useful to pass constants on extensions to generated template code.
[ "Return", "an", "attribute", "node", "for", "the", "current", "extension", ".", "This", "is", "useful", "to", "pass", "constants", "on", "extensions", "to", "generated", "template", "code", "." ]
def attr(self, name, lineno=None): """Return an attribute node for the current extension. This is useful to pass constants on extensions to generated template code. :: self.attr('_my_attribute', lineno=lineno) """ return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
[ "def", "attr", "(", "self", ",", "name", ",", "lineno", "=", "None", ")", ":", "return", "nodes", ".", "ExtensionAttribute", "(", "self", ".", "identifier", ",", "name", ",", "lineno", "=", "lineno", ")" ]
https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/deps/v8/third_party/jinja2/ext.py#L109-L117
GeometryCollective/boundary-first-flattening
8250e5a0e85980ec50b5e8aa8f49dd6519f915cd
deps/nanogui/ext/pybind11/tools/clang/cindex.py
python
Token.extent
(self)
return conf.lib.clang_getTokenExtent(self._tu, self)
The SourceRange this Token occupies.
The SourceRange this Token occupies.
[ "The", "SourceRange", "this", "Token", "occupies", "." ]
def extent(self): """The SourceRange this Token occupies.""" return conf.lib.clang_getTokenExtent(self._tu, self)
[ "def", "extent", "(", "self", ")", ":", "return", "conf", ".", "lib", ".", "clang_getTokenExtent", "(", "self", ".", "_tu", ",", "self", ")" ]
https://github.com/GeometryCollective/boundary-first-flattening/blob/8250e5a0e85980ec50b5e8aa8f49dd6519f915cd/deps/nanogui/ext/pybind11/tools/clang/cindex.py#L2885-L2887
eclipse/sumo
7132a9b8b6eea734bdec38479026b4d8c4336d03
tools/traci/_vehicletype.py
python
VehicleTypeDomain.setDecel
(self, typeID, decel)
setDecel(string, double) -> None Sets the maximal comfortable deceleration in m/s^2 of vehicles of this type.
setDecel(string, double) -> None
[ "setDecel", "(", "string", "double", ")", "-", ">", "None" ]
def setDecel(self, typeID, decel): """setDecel(string, double) -> None Sets the maximal comfortable deceleration in m/s^2 of vehicles of this type. """ self._setCmd(tc.VAR_DECEL, typeID, "d", decel)
[ "def", "setDecel", "(", "self", ",", "typeID", ",", "decel", ")", ":", "self", ".", "_setCmd", "(", "tc", ".", "VAR_DECEL", ",", "typeID", ",", "\"d\"", ",", "decel", ")" ]
https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/traci/_vehicletype.py#L284-L289
liulei01/DRBox
b5c76e033c555c9009590ab384e1f7bd3c66c237
python/caffe/io.py
python
array_to_datum
(arr, label=None)
return datum
Converts a 3-dimensional array to datum. If the array has dtype uint8, the output data will be encoded as a string. Otherwise, the output data will be stored in float format.
Converts a 3-dimensional array to datum. If the array has dtype uint8, the output data will be encoded as a string. Otherwise, the output data will be stored in float format.
[ "Converts", "a", "3", "-", "dimensional", "array", "to", "datum", ".", "If", "the", "array", "has", "dtype", "uint8", "the", "output", "data", "will", "be", "encoded", "as", "a", "string", ".", "Otherwise", "the", "output", "data", "will", "be", "stored", "in", "float", "format", "." ]
def array_to_datum(arr, label=None): """Converts a 3-dimensional array to datum. If the array has dtype uint8, the output data will be encoded as a string. Otherwise, the output data will be stored in float format. """ if arr.ndim != 3: raise ValueError('Incorrect array shape.') datum = caffe_pb2.Datum() datum.channels, datum.height, datum.width = arr.shape if arr.dtype == np.uint8: datum.data = arr.tostring() else: datum.float_data.extend(arr.flat) if label is not None: datum.label = label return datum
[ "def", "array_to_datum", "(", "arr", ",", "label", "=", "None", ")", ":", "if", "arr", ".", "ndim", "!=", "3", ":", "raise", "ValueError", "(", "'Incorrect array shape.'", ")", "datum", "=", "caffe_pb2", ".", "Datum", "(", ")", "datum", ".", "channels", ",", "datum", ".", "height", ",", "datum", ".", "width", "=", "arr", ".", "shape", "if", "arr", ".", "dtype", "==", "np", ".", "uint8", ":", "datum", ".", "data", "=", "arr", ".", "tostring", "(", ")", "else", ":", "datum", ".", "float_data", ".", "extend", "(", "arr", ".", "flat", ")", "if", "label", "is", "not", "None", ":", "datum", ".", "label", "=", "label", "return", "datum" ]
https://github.com/liulei01/DRBox/blob/b5c76e033c555c9009590ab384e1f7bd3c66c237/python/caffe/io.py#L66-L81
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/internals/managers.py
python
BlockManager.get_bool_data
(self, copy=False)
return self.combine([b for b in self.blocks if b.is_bool], copy)
Parameters ---------- copy : boolean, default False Whether to copy the blocks
Parameters ---------- copy : boolean, default False Whether to copy the blocks
[ "Parameters", "----------", "copy", ":", "boolean", "default", "False", "Whether", "to", "copy", "the", "blocks" ]
def get_bool_data(self, copy=False): """ Parameters ---------- copy : boolean, default False Whether to copy the blocks """ self._consolidate_inplace() return self.combine([b for b in self.blocks if b.is_bool], copy)
[ "def", "get_bool_data", "(", "self", ",", "copy", "=", "False", ")", ":", "self", ".", "_consolidate_inplace", "(", ")", "return", "self", ".", "combine", "(", "[", "b", "for", "b", "in", "self", ".", "blocks", "if", "b", ".", "is_bool", "]", ",", "copy", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/internals/managers.py#L642-L650
Jittor/jittor
e9aca0444c2bdc8e2389d99122954cd0903eec46
python/jittor/init.py
python
xavier_uniform_
(var, gain=1.0)
return var.assign(xavier_uniform(tuple(var.shape), var.dtype, gain))
Inplace initialize Jittor Var by xavier_uniform. The resulting var will have values sampled from :math:`uniform(-a, a)` where .. math:: a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}} Args: var (Jittor Var): Var to be initialized by random xavier_uniform gain (float): an optional scaling factor. Example:: from jittor import init from jittor import nn linear = nn.Linear(2,2) init.xavier_uniform_(linear.weight, init.calculate_gain('relu')) print(linear.weight) linear.weight.xavier_uniform_() # This is ok too
Inplace initialize Jittor Var by xavier_uniform. The resulting var will have values sampled from :math:`uniform(-a, a)` where
[ "Inplace", "initialize", "Jittor", "Var", "by", "xavier_uniform", ".", "The", "resulting", "var", "will", "have", "values", "sampled", "from", ":", "math", ":", "uniform", "(", "-", "a", "a", ")", "where" ]
def xavier_uniform_(var, gain=1.0): ''' Inplace initialize Jittor Var by xavier_uniform. The resulting var will have values sampled from :math:`uniform(-a, a)` where .. math:: a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}} Args: var (Jittor Var): Var to be initialized by random xavier_uniform gain (float): an optional scaling factor. Example:: from jittor import init from jittor import nn linear = nn.Linear(2,2) init.xavier_uniform_(linear.weight, init.calculate_gain('relu')) print(linear.weight) linear.weight.xavier_uniform_() # This is ok too ''' return var.assign(xavier_uniform(tuple(var.shape), var.dtype, gain))
[ "def", "xavier_uniform_", "(", "var", ",", "gain", "=", "1.0", ")", ":", "return", "var", ".", "assign", "(", "xavier_uniform", "(", "tuple", "(", "var", ".", "shape", ")", ",", "var", ".", "dtype", ",", "gain", ")", ")" ]
https://github.com/Jittor/jittor/blob/e9aca0444c2bdc8e2389d99122954cd0903eec46/python/jittor/init.py#L535-L559
intel/llvm
e6d0547e9d99b5a56430c4749f6c7e328bf221ab
lldb/third_party/Python/module/pexpect-4.6/pexpect/popen_spawn.py
python
PopenSpawn.kill
(self, sig)
Sends a Unix signal to the subprocess. Use constants from the :mod:`signal` module to specify which signal.
Sends a Unix signal to the subprocess.
[ "Sends", "a", "Unix", "signal", "to", "the", "subprocess", "." ]
def kill(self, sig): '''Sends a Unix signal to the subprocess. Use constants from the :mod:`signal` module to specify which signal. ''' if sys.platform == 'win32': if sig in [signal.SIGINT, signal.CTRL_C_EVENT]: sig = signal.CTRL_C_EVENT elif sig in [signal.SIGBREAK, signal.CTRL_BREAK_EVENT]: sig = signal.CTRL_BREAK_EVENT else: sig = signal.SIGTERM os.kill(self.proc.pid, sig)
[ "def", "kill", "(", "self", ",", "sig", ")", ":", "if", "sys", ".", "platform", "==", "'win32'", ":", "if", "sig", "in", "[", "signal", ".", "SIGINT", ",", "signal", ".", "CTRL_C_EVENT", "]", ":", "sig", "=", "signal", ".", "CTRL_C_EVENT", "elif", "sig", "in", "[", "signal", ".", "SIGBREAK", ",", "signal", ".", "CTRL_BREAK_EVENT", "]", ":", "sig", "=", "signal", ".", "CTRL_BREAK_EVENT", "else", ":", "sig", "=", "signal", ".", "SIGTERM", "os", ".", "kill", "(", "self", ".", "proc", ".", "pid", ",", "sig", ")" ]
https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/lldb/third_party/Python/module/pexpect-4.6/pexpect/popen_spawn.py#L171-L184
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/__init__.py
python
ResourceManager.resource_stream
(self, package_or_requirement, resource_name)
return get_provider(package_or_requirement).get_resource_stream( self, resource_name )
Return a readable file-like object for specified resource
Return a readable file-like object for specified resource
[ "Return", "a", "readable", "file", "-", "like", "object", "for", "specified", "resource" ]
def resource_stream(self, package_or_requirement, resource_name): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name )
[ "def", "resource_stream", "(", "self", ",", "package_or_requirement", ",", "resource_name", ")", ":", "return", "get_provider", "(", "package_or_requirement", ")", ".", "get_resource_stream", "(", "self", ",", "resource_name", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/__init__.py#L1149-L1153
bryanyzhu/Hidden-Two-Stream
f7f684adbdacb6df6b1cf196c3a476cd23484a0f
python/caffe/draw.py
python
get_pydot_graph
(caffe_net, rankdir, label_edges=True)
return pydot_graph
Create a data structure which represents the `caffe_net`. Parameters ---------- caffe_net : object rankdir : {'LR', 'TB', 'BT'} Direction of graph layout. label_edges : boolean, optional Label the edges (default is True). Returns ------- pydot graph object
Create a data structure which represents the `caffe_net`.
[ "Create", "a", "data", "structure", "which", "represents", "the", "caffe_net", "." ]
def get_pydot_graph(caffe_net, rankdir, label_edges=True): """Create a data structure which represents the `caffe_net`. Parameters ---------- caffe_net : object rankdir : {'LR', 'TB', 'BT'} Direction of graph layout. label_edges : boolean, optional Label the edges (default is True). Returns ------- pydot graph object """ pydot_graph = pydot.Dot(caffe_net.name if caffe_net.name else 'Net', graph_type='digraph', rankdir=rankdir) pydot_nodes = {} pydot_edges = [] for layer in caffe_net.layer: node_label = get_layer_label(layer, rankdir) node_name = "%s_%s" % (layer.name, layer.type) if (len(layer.bottom) == 1 and len(layer.top) == 1 and layer.bottom[0] == layer.top[0]): # We have an in-place neuron layer. pydot_nodes[node_name] = pydot.Node(node_label, **NEURON_LAYER_STYLE) else: layer_style = LAYER_STYLE_DEFAULT layer_style['fillcolor'] = choose_color_by_layertype(layer.type) pydot_nodes[node_name] = pydot.Node(node_label, **layer_style) for bottom_blob in layer.bottom: pydot_nodes[bottom_blob + '_blob'] = pydot.Node('%s' % bottom_blob, **BLOB_STYLE) edge_label = '""' pydot_edges.append({'src': bottom_blob + '_blob', 'dst': node_name, 'label': edge_label}) for top_blob in layer.top: pydot_nodes[top_blob + '_blob'] = pydot.Node('%s' % (top_blob)) if label_edges: edge_label = get_edge_label(layer) else: edge_label = '""' pydot_edges.append({'src': node_name, 'dst': top_blob + '_blob', 'label': edge_label}) # Now, add the nodes and edges to the graph. for node in pydot_nodes.values(): pydot_graph.add_node(node) for edge in pydot_edges: pydot_graph.add_edge( pydot.Edge(pydot_nodes[edge['src']], pydot_nodes[edge['dst']], label=edge['label'])) return pydot_graph
[ "def", "get_pydot_graph", "(", "caffe_net", ",", "rankdir", ",", "label_edges", "=", "True", ")", ":", "pydot_graph", "=", "pydot", ".", "Dot", "(", "caffe_net", ".", "name", "if", "caffe_net", ".", "name", "else", "'Net'", ",", "graph_type", "=", "'digraph'", ",", "rankdir", "=", "rankdir", ")", "pydot_nodes", "=", "{", "}", "pydot_edges", "=", "[", "]", "for", "layer", "in", "caffe_net", ".", "layer", ":", "node_label", "=", "get_layer_label", "(", "layer", ",", "rankdir", ")", "node_name", "=", "\"%s_%s\"", "%", "(", "layer", ".", "name", ",", "layer", ".", "type", ")", "if", "(", "len", "(", "layer", ".", "bottom", ")", "==", "1", "and", "len", "(", "layer", ".", "top", ")", "==", "1", "and", "layer", ".", "bottom", "[", "0", "]", "==", "layer", ".", "top", "[", "0", "]", ")", ":", "# We have an in-place neuron layer.", "pydot_nodes", "[", "node_name", "]", "=", "pydot", ".", "Node", "(", "node_label", ",", "*", "*", "NEURON_LAYER_STYLE", ")", "else", ":", "layer_style", "=", "LAYER_STYLE_DEFAULT", "layer_style", "[", "'fillcolor'", "]", "=", "choose_color_by_layertype", "(", "layer", ".", "type", ")", "pydot_nodes", "[", "node_name", "]", "=", "pydot", ".", "Node", "(", "node_label", ",", "*", "*", "layer_style", ")", "for", "bottom_blob", "in", "layer", ".", "bottom", ":", "pydot_nodes", "[", "bottom_blob", "+", "'_blob'", "]", "=", "pydot", ".", "Node", "(", "'%s'", "%", "bottom_blob", ",", "*", "*", "BLOB_STYLE", ")", "edge_label", "=", "'\"\"'", "pydot_edges", ".", "append", "(", "{", "'src'", ":", "bottom_blob", "+", "'_blob'", ",", "'dst'", ":", "node_name", ",", "'label'", ":", "edge_label", "}", ")", "for", "top_blob", "in", "layer", ".", "top", ":", "pydot_nodes", "[", "top_blob", "+", "'_blob'", "]", "=", "pydot", ".", "Node", "(", "'%s'", "%", "(", "top_blob", ")", ")", "if", "label_edges", ":", "edge_label", "=", "get_edge_label", "(", "layer", ")", "else", ":", "edge_label", "=", "'\"\"'", "pydot_edges", ".", "append", "(", "{", "'src'", ":", "node_name", ",", "'dst'", ":", "top_blob", "+", "'_blob'", ",", "'label'", ":", "edge_label", "}", ")", "# Now, add the nodes and edges to the graph.", "for", "node", "in", "pydot_nodes", ".", "values", "(", ")", ":", "pydot_graph", ".", "add_node", "(", "node", ")", "for", "edge", "in", "pydot_edges", ":", "pydot_graph", ".", "add_edge", "(", "pydot", ".", "Edge", "(", "pydot_nodes", "[", "edge", "[", "'src'", "]", "]", ",", "pydot_nodes", "[", "edge", "[", "'dst'", "]", "]", ",", "label", "=", "edge", "[", "'label'", "]", ")", ")", "return", "pydot_graph" ]
https://github.com/bryanyzhu/Hidden-Two-Stream/blob/f7f684adbdacb6df6b1cf196c3a476cd23484a0f/python/caffe/draw.py#L130-L186
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/_msvccompiler.py
python
_find_exe
(exe, paths=None)
return exe
Return path to an MSVC executable program. Tries to find the program in several places: first, one of the MSVC program search paths from the registry; next, the directories in the PATH environment variable. If any of those work, return an absolute path that is known to exist. If none of them work, just return the original program name, 'exe'.
Return path to an MSVC executable program.
[ "Return", "path", "to", "an", "MSVC", "executable", "program", "." ]
def _find_exe(exe, paths=None): """Return path to an MSVC executable program. Tries to find the program in several places: first, one of the MSVC program search paths from the registry; next, the directories in the PATH environment variable. If any of those work, return an absolute path that is known to exist. If none of them work, just return the original program name, 'exe'. """ if not paths: paths = os.getenv('path').split(os.pathsep) for p in paths: fn = os.path.join(os.path.abspath(p), exe) if os.path.isfile(fn): return fn return exe
[ "def", "_find_exe", "(", "exe", ",", "paths", "=", "None", ")", ":", "if", "not", "paths", ":", "paths", "=", "os", ".", "getenv", "(", "'path'", ")", ".", "split", "(", "os", ".", "pathsep", ")", "for", "p", "in", "paths", ":", "fn", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "p", ")", ",", "exe", ")", "if", "os", ".", "path", ".", "isfile", "(", "fn", ")", ":", "return", "fn", "return", "exe" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/_msvccompiler.py#L140-L155
apache/impala
8ddac48f3428c86f2cbd037ced89cfb903298b12
bin/start-impala-cluster.py
python
kill_matching_processes
(binary_names, force=False)
Kills all processes with the given binary name, waiting for them to exit
Kills all processes with the given binary name, waiting for them to exit
[ "Kills", "all", "processes", "with", "the", "given", "binary", "name", "waiting", "for", "them", "to", "exit" ]
def kill_matching_processes(binary_names, force=False): """Kills all processes with the given binary name, waiting for them to exit""" # Send all the signals before waiting so that processes can clean up in parallel. processes = [proc for _, proc in find_user_processes(binary_names)] for process in processes: try: if force: process.kill() else: process.terminate() except psutil.NoSuchProcess: pass for process in processes: try: process.wait(KILL_TIMEOUT_IN_SECONDS) except psutil.TimeoutExpired: raise RuntimeError(("Unable to kill {process_name} (pid {process_pid}) " "after {num_seconds} seconds.").format( process_name=process.name, process_pid=process.pid, num_seconds=KILL_TIMEOUT_IN_SECONDS))
[ "def", "kill_matching_processes", "(", "binary_names", ",", "force", "=", "False", ")", ":", "# Send all the signals before waiting so that processes can clean up in parallel.", "processes", "=", "[", "proc", "for", "_", ",", "proc", "in", "find_user_processes", "(", "binary_names", ")", "]", "for", "process", "in", "processes", ":", "try", ":", "if", "force", ":", "process", ".", "kill", "(", ")", "else", ":", "process", ".", "terminate", "(", ")", "except", "psutil", ".", "NoSuchProcess", ":", "pass", "for", "process", "in", "processes", ":", "try", ":", "process", ".", "wait", "(", "KILL_TIMEOUT_IN_SECONDS", ")", "except", "psutil", ".", "TimeoutExpired", ":", "raise", "RuntimeError", "(", "(", "\"Unable to kill {process_name} (pid {process_pid}) \"", "\"after {num_seconds} seconds.\"", ")", ".", "format", "(", "process_name", "=", "process", ".", "name", ",", "process_pid", "=", "process", ".", "pid", ",", "num_seconds", "=", "KILL_TIMEOUT_IN_SECONDS", ")", ")" ]
https://github.com/apache/impala/blob/8ddac48f3428c86f2cbd037ced89cfb903298b12/bin/start-impala-cluster.py#L207-L228
MythTV/mythtv
d282a209cb8be85d036f85a62a8ec971b67d45f4
mythtv/programs/scripts/internetcontent/nv_python_libs/mtv/mtv_api.py
python
Videos.ampReplace
(self, text)
return text.replace('&amp;','~~~~~').replace('&','&amp;').replace('~~~~~', '&amp;')
Replace all "&" characters with "&amp;"
Replace all "&" characters with "&amp;"
[ "Replace", "all", "&", "characters", "with", "&amp", ";" ]
def ampReplace(self, text): '''Replace all "&" characters with "&amp;" ''' text = self.textUtf8(text) return text.replace('&amp;','~~~~~').replace('&','&amp;').replace('~~~~~', '&amp;')
[ "def", "ampReplace", "(", "self", ",", "text", ")", ":", "text", "=", "self", ".", "textUtf8", "(", "text", ")", "return", "text", ".", "replace", "(", "'&amp;'", ",", "'~~~~~'", ")", ".", "replace", "(", "'&'", ",", "'&amp;'", ")", ".", "replace", "(", "'~~~~~'", ",", "'&amp;'", ")" ]
https://github.com/MythTV/mythtv/blob/d282a209cb8be85d036f85a62a8ec971b67d45f4/mythtv/programs/scripts/internetcontent/nv_python_libs/mtv/mtv_api.py#L317-L321
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/Paste/paste/util/mimeparse.py
python
quality
(mime_type, ranges)
return quality_parsed(mime_type, parsed_ranges)
Returns the quality 'q' of a mime-type when compared against the media-ranges in ranges. For example: >>> quality('text/html','text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5') 0.7
Returns the quality 'q' of a mime-type when compared against the media-ranges in ranges. For example:
[ "Returns", "the", "quality", "q", "of", "a", "mime", "-", "type", "when", "compared", "against", "the", "media", "-", "ranges", "in", "ranges", ".", "For", "example", ":" ]
def quality(mime_type, ranges): """Returns the quality 'q' of a mime-type when compared against the media-ranges in ranges. For example: >>> quality('text/html','text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5') 0.7 """ parsed_ranges = map(parse_media_range, ranges.split(',')) return quality_parsed(mime_type, parsed_ranges)
[ "def", "quality", "(", "mime_type", ",", "ranges", ")", ":", "parsed_ranges", "=", "map", "(", "parse_media_range", ",", "ranges", ".", "split", "(", "','", ")", ")", "return", "quality_parsed", "(", "mime_type", ",", "parsed_ranges", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/Paste/paste/util/mimeparse.py#L111-L120
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py
python
PostParse._visit_assignment_node
(self, node, expr_list)
return assign_node
Flatten parallel assignments into separate single assignments or cascaded assignments.
Flatten parallel assignments into separate single assignments or cascaded assignments.
[ "Flatten", "parallel", "assignments", "into", "separate", "single", "assignments", "or", "cascaded", "assignments", "." ]
def _visit_assignment_node(self, node, expr_list): """Flatten parallel assignments into separate single assignments or cascaded assignments. """ if sum([ 1 for expr in expr_list if expr.is_sequence_constructor or expr.is_string_literal ]) < 2: # no parallel assignments => nothing to do return node expr_list_list = [] flatten_parallel_assignments(expr_list, expr_list_list) temp_refs = [] eliminate_rhs_duplicates(expr_list_list, temp_refs) nodes = [] for expr_list in expr_list_list: lhs_list = expr_list[:-1] rhs = expr_list[-1] if len(lhs_list) == 1: node = Nodes.SingleAssignmentNode(rhs.pos, lhs = lhs_list[0], rhs = rhs) else: node = Nodes.CascadedAssignmentNode(rhs.pos, lhs_list = lhs_list, rhs = rhs) nodes.append(node) if len(nodes) == 1: assign_node = nodes[0] else: assign_node = Nodes.ParallelAssignmentNode(nodes[0].pos, stats = nodes) if temp_refs: duplicates_and_temps = [ (temp.expression, temp) for temp in temp_refs ] sort_common_subsequences(duplicates_and_temps) for _, temp_ref in duplicates_and_temps[::-1]: assign_node = LetNode(temp_ref, assign_node) return assign_node
[ "def", "_visit_assignment_node", "(", "self", ",", "node", ",", "expr_list", ")", ":", "if", "sum", "(", "[", "1", "for", "expr", "in", "expr_list", "if", "expr", ".", "is_sequence_constructor", "or", "expr", ".", "is_string_literal", "]", ")", "<", "2", ":", "# no parallel assignments => nothing to do", "return", "node", "expr_list_list", "=", "[", "]", "flatten_parallel_assignments", "(", "expr_list", ",", "expr_list_list", ")", "temp_refs", "=", "[", "]", "eliminate_rhs_duplicates", "(", "expr_list_list", ",", "temp_refs", ")", "nodes", "=", "[", "]", "for", "expr_list", "in", "expr_list_list", ":", "lhs_list", "=", "expr_list", "[", ":", "-", "1", "]", "rhs", "=", "expr_list", "[", "-", "1", "]", "if", "len", "(", "lhs_list", ")", "==", "1", ":", "node", "=", "Nodes", ".", "SingleAssignmentNode", "(", "rhs", ".", "pos", ",", "lhs", "=", "lhs_list", "[", "0", "]", ",", "rhs", "=", "rhs", ")", "else", ":", "node", "=", "Nodes", ".", "CascadedAssignmentNode", "(", "rhs", ".", "pos", ",", "lhs_list", "=", "lhs_list", ",", "rhs", "=", "rhs", ")", "nodes", ".", "append", "(", "node", ")", "if", "len", "(", "nodes", ")", "==", "1", ":", "assign_node", "=", "nodes", "[", "0", "]", "else", ":", "assign_node", "=", "Nodes", ".", "ParallelAssignmentNode", "(", "nodes", "[", "0", "]", ".", "pos", ",", "stats", "=", "nodes", ")", "if", "temp_refs", ":", "duplicates_and_temps", "=", "[", "(", "temp", ".", "expression", ",", "temp", ")", "for", "temp", "in", "temp_refs", "]", "sort_common_subsequences", "(", "duplicates_and_temps", ")", "for", "_", ",", "temp_ref", "in", "duplicates_and_temps", "[", ":", ":", "-", "1", "]", ":", "assign_node", "=", "LetNode", "(", "temp_ref", ",", "assign_node", ")", "return", "assign_node" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/cython/Cython/Compiler/ParseTreeTransforms.py#L278-L316
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/feature_column/feature_column_v2.py
python
categorical_column_with_vocabulary_list
(key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0)
return VocabularyListCategoricalColumn( key=key, vocabulary_list=tuple(vocabulary_list), dtype=dtype, default_value=default_value, num_oov_buckets=num_oov_buckets)
A `CategoricalColumn` with in-memory vocabulary. Use this when your inputs are in string or integer format, and you have an in-memory vocabulary mapping each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example with `num_oov_buckets`: In the following example, each input in `vocabulary_list` is assigned an ID 0-3 corresponding to its index (e.g., input 'B' produces output 2). All other inputs are hashed and assigned an ID 4-5. ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('R', 'G', 'B', 'Y'), num_oov_buckets=2) columns = [colors, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` Example with `default_value`: In the following example, each input in `vocabulary_list` is assigned an ID 0-4 corresponding to its index (e.g., input 'B' produces output 3). All other inputs are assigned `default_value` 0. ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0) columns = [colors, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` And to make an embedding with either: ```python columns = [embedding_column(colors, 3),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_list: An ordered iterable defining the vocabulary. Each feature is mapped to the index of its value (if present) in `vocabulary_list`. Must be castable to `dtype`. dtype: The type of features. Only string and integer types are supported. If `None`, it will be inferred from `vocabulary_list`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. Returns: A `CategoricalColumn` with in-memory vocabulary. Raises: ValueError: if `vocabulary_list` is empty, or contains duplicate keys. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: if `dtype` is not integer or string.
A `CategoricalColumn` with in-memory vocabulary.
[ "A", "CategoricalColumn", "with", "in", "-", "memory", "vocabulary", "." ]
def categorical_column_with_vocabulary_list(key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0): """A `CategoricalColumn` with in-memory vocabulary. Use this when your inputs are in string or integer format, and you have an in-memory vocabulary mapping each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example with `num_oov_buckets`: In the following example, each input in `vocabulary_list` is assigned an ID 0-3 corresponding to its index (e.g., input 'B' produces output 2). All other inputs are hashed and assigned an ID 4-5. ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('R', 'G', 'B', 'Y'), num_oov_buckets=2) columns = [colors, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` Example with `default_value`: In the following example, each input in `vocabulary_list` is assigned an ID 0-4 corresponding to its index (e.g., input 'B' produces output 3). All other inputs are assigned `default_value` 0. ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0) columns = [colors, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` And to make an embedding with either: ```python columns = [embedding_column(colors, 3),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_list: An ordered iterable defining the vocabulary. Each feature is mapped to the index of its value (if present) in `vocabulary_list`. Must be castable to `dtype`. dtype: The type of features. Only string and integer types are supported. If `None`, it will be inferred from `vocabulary_list`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. Returns: A `CategoricalColumn` with in-memory vocabulary. Raises: ValueError: if `vocabulary_list` is empty, or contains duplicate keys. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: if `dtype` is not integer or string. """ if (vocabulary_list is None) or (len(vocabulary_list) < 1): raise ValueError( 'vocabulary_list {} must be non-empty, column_name: {}'.format( vocabulary_list, key)) if len(set(vocabulary_list)) != len(vocabulary_list): raise ValueError( 'Duplicate keys in vocabulary_list {}, column_name: {}'.format( vocabulary_list, key)) vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype) if num_oov_buckets: if default_value != -1: raise ValueError( 'Can\'t specify both num_oov_buckets and default_value in {}.'.format( key)) if num_oov_buckets < 0: raise ValueError('Invalid num_oov_buckets {} in {}.'.format( num_oov_buckets, key)) fc_utils.assert_string_or_int( vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key)) if dtype is None: dtype = vocabulary_dtype elif dtype.is_integer != vocabulary_dtype.is_integer: raise ValueError( 'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format( dtype, vocabulary_dtype, key)) fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key)) fc_utils.assert_key_is_string(key) return VocabularyListCategoricalColumn( key=key, vocabulary_list=tuple(vocabulary_list), dtype=dtype, default_value=default_value, num_oov_buckets=num_oov_buckets)
[ "def", "categorical_column_with_vocabulary_list", "(", "key", ",", "vocabulary_list", ",", "dtype", "=", "None", ",", "default_value", "=", "-", "1", ",", "num_oov_buckets", "=", "0", ")", ":", "if", "(", "vocabulary_list", "is", "None", ")", "or", "(", "len", "(", "vocabulary_list", ")", "<", "1", ")", ":", "raise", "ValueError", "(", "'vocabulary_list {} must be non-empty, column_name: {}'", ".", "format", "(", "vocabulary_list", ",", "key", ")", ")", "if", "len", "(", "set", "(", "vocabulary_list", ")", ")", "!=", "len", "(", "vocabulary_list", ")", ":", "raise", "ValueError", "(", "'Duplicate keys in vocabulary_list {}, column_name: {}'", ".", "format", "(", "vocabulary_list", ",", "key", ")", ")", "vocabulary_dtype", "=", "dtypes", ".", "as_dtype", "(", "np", ".", "array", "(", "vocabulary_list", ")", ".", "dtype", ")", "if", "num_oov_buckets", ":", "if", "default_value", "!=", "-", "1", ":", "raise", "ValueError", "(", "'Can\\'t specify both num_oov_buckets and default_value in {}.'", ".", "format", "(", "key", ")", ")", "if", "num_oov_buckets", "<", "0", ":", "raise", "ValueError", "(", "'Invalid num_oov_buckets {} in {}.'", ".", "format", "(", "num_oov_buckets", ",", "key", ")", ")", "fc_utils", ".", "assert_string_or_int", "(", "vocabulary_dtype", ",", "prefix", "=", "'column_name: {} vocabulary'", ".", "format", "(", "key", ")", ")", "if", "dtype", "is", "None", ":", "dtype", "=", "vocabulary_dtype", "elif", "dtype", ".", "is_integer", "!=", "vocabulary_dtype", ".", "is_integer", ":", "raise", "ValueError", "(", "'dtype {} and vocabulary dtype {} do not match, column_name: {}'", ".", "format", "(", "dtype", ",", "vocabulary_dtype", ",", "key", ")", ")", "fc_utils", ".", "assert_string_or_int", "(", "dtype", ",", "prefix", "=", "'column_name: {}'", ".", "format", "(", "key", ")", ")", "fc_utils", ".", "assert_key_is_string", "(", "key", ")", "return", "VocabularyListCategoricalColumn", "(", "key", "=", "key", ",", "vocabulary_list", "=", "tuple", "(", "vocabulary_list", ")", ",", "dtype", "=", "dtype", ",", "default_value", "=", "default_value", ",", "num_oov_buckets", "=", "num_oov_buckets", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/feature_column/feature_column_v2.py#L1483-L1596
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/dataset/vision/validators.py
python
check_auto_contrast
(method)
return new_method
Wrapper method to check the parameters of AutoContrast ops (Python and C++).
Wrapper method to check the parameters of AutoContrast ops (Python and C++).
[ "Wrapper", "method", "to", "check", "the", "parameters", "of", "AutoContrast", "ops", "(", "Python", "and", "C", "++", ")", "." ]
def check_auto_contrast(method): """Wrapper method to check the parameters of AutoContrast ops (Python and C++).""" @wraps(method) def new_method(self, *args, **kwargs): [cutoff, ignore], _ = parse_user_args(method, *args, **kwargs) type_check(cutoff, (int, float), "cutoff") check_value_cutoff(cutoff, [0, 50], "cutoff") if ignore is not None: type_check(ignore, (list, tuple, int), "ignore") if isinstance(ignore, int): check_value(ignore, [0, 255], "ignore") if isinstance(ignore, (list, tuple)): for item in ignore: type_check(item, (int,), "item") check_value(item, [0, 255], "ignore") return method(self, *args, **kwargs) return new_method
[ "def", "check_auto_contrast", "(", "method", ")", ":", "@", "wraps", "(", "method", ")", "def", "new_method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "[", "cutoff", ",", "ignore", "]", ",", "_", "=", "parse_user_args", "(", "method", ",", "*", "args", ",", "*", "*", "kwargs", ")", "type_check", "(", "cutoff", ",", "(", "int", ",", "float", ")", ",", "\"cutoff\"", ")", "check_value_cutoff", "(", "cutoff", ",", "[", "0", ",", "50", "]", ",", "\"cutoff\"", ")", "if", "ignore", "is", "not", "None", ":", "type_check", "(", "ignore", ",", "(", "list", ",", "tuple", ",", "int", ")", ",", "\"ignore\"", ")", "if", "isinstance", "(", "ignore", ",", "int", ")", ":", "check_value", "(", "ignore", ",", "[", "0", ",", "255", "]", ",", "\"ignore\"", ")", "if", "isinstance", "(", "ignore", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "item", "in", "ignore", ":", "type_check", "(", "item", ",", "(", "int", ",", ")", ",", "\"item\"", ")", "check_value", "(", "item", ",", "[", "0", ",", "255", "]", ",", "\"ignore\"", ")", "return", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "new_method" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/dataset/vision/validators.py#L861-L879
etternagame/etterna
8775f74ac9c353320128609d4b4150672e9a6d04
extern/SQLiteCpp/cpplint.py
python
FindStartOfExpressionInLine
(line, endpos, depth, startchar, endchar)
return (-1, depth)
Find position at the matching startchar. This is almost the reverse of FindEndOfExpressionInLine, but note that the input position and returned position differs by 1. Args: line: a CleansedLines line. endpos: start searching at this position. depth: nesting level at endpos. startchar: expression opening character. endchar: expression closing character. Returns: On finding matching startchar: (index at matching startchar, 0) Otherwise: (-1, new depth at beginning of this line)
Find position at the matching startchar.
[ "Find", "position", "at", "the", "matching", "startchar", "." ]
def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar): """Find position at the matching startchar. This is almost the reverse of FindEndOfExpressionInLine, but note that the input position and returned position differs by 1. Args: line: a CleansedLines line. endpos: start searching at this position. depth: nesting level at endpos. startchar: expression opening character. endchar: expression closing character. Returns: On finding matching startchar: (index at matching startchar, 0) Otherwise: (-1, new depth at beginning of this line) """ for i in xrange(endpos, -1, -1): if line[i] == endchar: depth += 1 elif line[i] == startchar: depth -= 1 if depth == 0: return (i, 0) return (-1, depth)
[ "def", "FindStartOfExpressionInLine", "(", "line", ",", "endpos", ",", "depth", ",", "startchar", ",", "endchar", ")", ":", "for", "i", "in", "xrange", "(", "endpos", ",", "-", "1", ",", "-", "1", ")", ":", "if", "line", "[", "i", "]", "==", "endchar", ":", "depth", "+=", "1", "elif", "line", "[", "i", "]", "==", "startchar", ":", "depth", "-=", "1", "if", "depth", "==", "0", ":", "return", "(", "i", ",", "0", ")", "return", "(", "-", "1", ",", "depth", ")" ]
https://github.com/etternagame/etterna/blob/8775f74ac9c353320128609d4b4150672e9a6d04/extern/SQLiteCpp/cpplint.py#L1304-L1328
metashell/metashell
f4177e4854ea00c8dbc722cadab26ef413d798ea
3rd/templight/clang/tools/scan-build-py/libscanbuild/report.py
python
chop
(prefix, filename)
return filename if not len(prefix) else os.path.relpath(filename, prefix)
Create 'filename' from '/prefix/filename'
Create 'filename' from '/prefix/filename'
[ "Create", "filename", "from", "/", "prefix", "/", "filename" ]
def chop(prefix, filename): """ Create 'filename' from '/prefix/filename' """ return filename if not len(prefix) else os.path.relpath(filename, prefix)
[ "def", "chop", "(", "prefix", ",", "filename", ")", ":", "return", "filename", "if", "not", "len", "(", "prefix", ")", "else", "os", ".", "path", ".", "relpath", "(", "filename", ",", "prefix", ")" ]
https://github.com/metashell/metashell/blob/f4177e4854ea00c8dbc722cadab26ef413d798ea/3rd/templight/clang/tools/scan-build-py/libscanbuild/report.py#L442-L445
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
src/python/turicreate/toolkits/regression/decision_tree_regression.py
python
DecisionTreeRegression.__init__
(self, proxy)
__init__(self)
__init__(self)
[ "__init__", "(", "self", ")" ]
def __init__(self, proxy): """__init__(self)""" self.__proxy__ = proxy self.__name__ = self.__class__._native_name()
[ "def", "__init__", "(", "self", ",", "proxy", ")", ":", "self", ".", "__proxy__", "=", "proxy", "self", ".", "__name__", "=", "self", ".", "__class__", ".", "_native_name", "(", ")" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/python/turicreate/toolkits/regression/decision_tree_regression.py#L69-L72