nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
list
function
stringlengths
34
151k
function_tokens
list
url
stringlengths
90
278
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/ops/image_ops_impl.py
python
flip_left_right
(image)
return fix_image_flip_shape(image, array_ops.reverse(image, [1]))
Flip an image horizontally (left to right). Outputs the contents of `image` flipped along the second dimension, which is `width`. See also `reverse()`. Args: image: A 3-D tensor of shape `[height, width, channels].` Returns: A 3-D tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported.
Flip an image horizontally (left to right).
[ "Flip", "an", "image", "horizontally", "(", "left", "to", "right", ")", "." ]
def flip_left_right(image): """Flip an image horizontally (left to right). Outputs the contents of `image` flipped along the second dimension, which is `width`. See also `reverse()`. Args: image: A 3-D tensor of shape `[height, width, channels].` Returns: A 3-D tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ image = ops.convert_to_tensor(image, name='image') image = control_flow_ops.with_dependencies( _Check3DImage(image, require_static=False), image) return fix_image_flip_shape(image, array_ops.reverse(image, [1]))
[ "def", "flip_left_right", "(", "image", ")", ":", "image", "=", "ops", ".", "convert_to_tensor", "(", "image", ",", "name", "=", "'image'", ")", "image", "=", "control_flow_ops", ".", "with_dependencies", "(", "_Check3DImage", "(", "image", ",", "require_static", "=", "False", ")", ",", "image", ")", "return", "fix_image_flip_shape", "(", "image", ",", "array_ops", ".", "reverse", "(", "image", ",", "[", "1", "]", ")", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/image_ops_impl.py#L262-L282
InteractiveComputerGraphics/PositionBasedDynamics
136469f03f7869666d907ea8d27872b098715f4a
extern/pybind/pybind11/setup_helpers.py
python
tmp_chdir
()
Prepare and enter a temporary directory, cleanup when done
Prepare and enter a temporary directory, cleanup when done
[ "Prepare", "and", "enter", "a", "temporary", "directory", "cleanup", "when", "done" ]
def tmp_chdir(): "Prepare and enter a temporary directory, cleanup when done" # Threadsafe with tmp_chdir_lock: olddir = os.getcwd() try: tmpdir = tempfile.mkdtemp() os.chdir(tmpdir) yield tmpdir finally: os.chdir(olddir) shutil.rmtree(tmpdir)
[ "def", "tmp_chdir", "(", ")", ":", "# Threadsafe", "with", "tmp_chdir_lock", ":", "olddir", "=", "os", ".", "getcwd", "(", ")", "try", ":", "tmpdir", "=", "tempfile", ".", "mkdtemp", "(", ")", "os", ".", "chdir", "(", "tmpdir", ")", "yield", "tmpdir", "finally", ":", "os", ".", "chdir", "(", "olddir", ")", "shutil", ".", "rmtree", "(", "tmpdir", ")" ]
https://github.com/InteractiveComputerGraphics/PositionBasedDynamics/blob/136469f03f7869666d907ea8d27872b098715f4a/extern/pybind/pybind11/setup_helpers.py#L209-L221
apple/swift-lldb
d74be846ef3e62de946df343e8c234bde93a8912
scripts/Python/static-binding/lldb.py
python
SBStream.write
(self, str)
return _lldb.SBStream_write(self, str)
write(SBStream self, char const * str)
write(SBStream self, char const * str)
[ "write", "(", "SBStream", "self", "char", "const", "*", "str", ")" ]
def write(self, str): """write(SBStream self, char const * str)""" return _lldb.SBStream_write(self, str)
[ "def", "write", "(", "self", ",", "str", ")", ":", "return", "_lldb", ".", "SBStream_write", "(", "self", ",", "str", ")" ]
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L9570-L9572
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/requests/sessions.py
python
Session.close
(self)
Closes all adapters and as such the session
Closes all adapters and as such the session
[ "Closes", "all", "adapters", "and", "as", "such", "the", "session" ]
def close(self): """Closes all adapters and as such the session""" for v in self.adapters.values(): v.close()
[ "def", "close", "(", "self", ")", ":", "for", "v", "in", "self", ".", "adapters", ".", "values", "(", ")", ":", "v", ".", "close", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/requests/sessions.py#L705-L708
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/packaging/_compat.py
python
with_metaclass
(meta, *bases)
return type.__new__(metaclass, "temporary_class", (), {})
Create a base class with a metaclass.
Create a base class with a metaclass.
[ "Create", "a", "base", "class", "with", "a", "metaclass", "." ]
def with_metaclass(meta, *bases): # type: (Type[Any], Tuple[Type[Any], ...]) -> Any """ Create a base class with a metaclass. """ # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): # type: ignore def __new__(cls, name, this_bases, d): # type: (Type[Any], str, Tuple[Any], Dict[Any, Any]) -> Any return meta(name, bases, d) return type.__new__(metaclass, "temporary_class", (), {})
[ "def", "with_metaclass", "(", "meta", ",", "*", "bases", ")", ":", "# type: (Type[Any], Tuple[Type[Any], ...]) -> Any", "# This requires a bit of explanation: the basic idea is to make a dummy", "# metaclass for one level of class instantiation that replaces itself with", "# the actual metaclass.", "class", "metaclass", "(", "meta", ")", ":", "# type: ignore", "def", "__new__", "(", "cls", ",", "name", ",", "this_bases", ",", "d", ")", ":", "# type: (Type[Any], str, Tuple[Any], Dict[Any, Any]) -> Any", "return", "meta", "(", "name", ",", "bases", ",", "d", ")", "return", "type", ".", "__new__", "(", "metaclass", ",", "\"temporary_class\"", ",", "(", ")", ",", "{", "}", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/packaging/_compat.py#L25-L38
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Editor/Python/windows/Lib/site-packages/setuptools/package_index.py
python
find_external_links
(url, page)
Find rel="homepage" and rel="download" links in `page`, yielding URLs
Find rel="homepage" and rel="download" links in `page`, yielding URLs
[ "Find", "rel", "=", "homepage", "and", "rel", "=", "download", "links", "in", "page", "yielding", "URLs" ]
def find_external_links(url, page): """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" for match in REL.finditer(page): tag, rel = match.groups() rels = set(map(str.strip, rel.lower().split(','))) if 'homepage' in rels or 'download' in rels: for match in HREF.finditer(tag): yield urllib.parse.urljoin(url, htmldecode(match.group(1))) for tag in ("<th>Home Page", "<th>Download URL"): pos = page.find(tag) if pos != -1: match = HREF.search(page, pos) if match: yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
[ "def", "find_external_links", "(", "url", ",", "page", ")", ":", "for", "match", "in", "REL", ".", "finditer", "(", "page", ")", ":", "tag", ",", "rel", "=", "match", ".", "groups", "(", ")", "rels", "=", "set", "(", "map", "(", "str", ".", "strip", ",", "rel", ".", "lower", "(", ")", ".", "split", "(", "','", ")", ")", ")", "if", "'homepage'", "in", "rels", "or", "'download'", "in", "rels", ":", "for", "match", "in", "HREF", ".", "finditer", "(", "tag", ")", ":", "yield", "urllib", ".", "parse", ".", "urljoin", "(", "url", ",", "htmldecode", "(", "match", ".", "group", "(", "1", ")", ")", ")", "for", "tag", "in", "(", "\"<th>Home Page\"", ",", "\"<th>Download URL\"", ")", ":", "pos", "=", "page", ".", "find", "(", "tag", ")", "if", "pos", "!=", "-", "1", ":", "match", "=", "HREF", ".", "search", "(", "page", ",", "pos", ")", "if", "match", ":", "yield", "urllib", ".", "parse", ".", "urljoin", "(", "url", ",", "htmldecode", "(", "match", ".", "group", "(", "1", ")", ")", ")" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/setuptools/package_index.py#L222-L237
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/scipy/interpolate/interpolate.py
python
_PPolyBase.construct_fast
(cls, c, x, extrapolate=None, axis=0)
return self
Construct the piecewise polynomial without making checks. Takes the same parameters as the constructor. Input arguments `c` and `x` must be arrays of the correct shape and type. The `c` array can only be of dtypes float and complex, and `x` array must have dtype float.
Construct the piecewise polynomial without making checks.
[ "Construct", "the", "piecewise", "polynomial", "without", "making", "checks", "." ]
def construct_fast(cls, c, x, extrapolate=None, axis=0): """ Construct the piecewise polynomial without making checks. Takes the same parameters as the constructor. Input arguments `c` and `x` must be arrays of the correct shape and type. The `c` array can only be of dtypes float and complex, and `x` array must have dtype float. """ self = object.__new__(cls) self.c = c self.x = x self.axis = axis if extrapolate is None: extrapolate = True self.extrapolate = extrapolate return self
[ "def", "construct_fast", "(", "cls", ",", "c", ",", "x", ",", "extrapolate", "=", "None", ",", "axis", "=", "0", ")", ":", "self", "=", "object", ".", "__new__", "(", "cls", ")", "self", ".", "c", "=", "c", "self", ".", "x", "=", "x", "self", ".", "axis", "=", "axis", "if", "extrapolate", "is", "None", ":", "extrapolate", "=", "True", "self", ".", "extrapolate", "=", "extrapolate", "return", "self" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/interpolate/interpolate.py#L686-L702
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/quopri.py
python
unhex
(s)
return bits
Get the integer value of a hexadecimal number.
Get the integer value of a hexadecimal number.
[ "Get", "the", "integer", "value", "of", "a", "hexadecimal", "number", "." ]
def unhex(s): """Get the integer value of a hexadecimal number.""" bits = 0 for c in s: c = bytes((c,)) if b'0' <= c <= b'9': i = ord('0') elif b'a' <= c <= b'f': i = ord('a')-10 elif b'A' <= c <= b'F': i = ord(b'A')-10 else: assert False, "non-hex digit "+repr(c) bits = bits*16 + (ord(c) - i) return bits
[ "def", "unhex", "(", "s", ")", ":", "bits", "=", "0", "for", "c", "in", "s", ":", "c", "=", "bytes", "(", "(", "c", ",", ")", ")", "if", "b'0'", "<=", "c", "<=", "b'9'", ":", "i", "=", "ord", "(", "'0'", ")", "elif", "b'a'", "<=", "c", "<=", "b'f'", ":", "i", "=", "ord", "(", "'a'", ")", "-", "10", "elif", "b'A'", "<=", "c", "<=", "b'F'", ":", "i", "=", "ord", "(", "b'A'", ")", "-", "10", "else", ":", "assert", "False", ",", "\"non-hex digit \"", "+", "repr", "(", "c", ")", "bits", "=", "bits", "*", "16", "+", "(", "ord", "(", "c", ")", "-", "i", ")", "return", "bits" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/quopri.py#L177-L191
adobe/chromium
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
gpu/command_buffer/build_gles2_cmd_buffer.py
python
HandWrittenHandler.WriteServiceImplementation
(self, func, file)
Overrriden from TypeHandler.
Overrriden from TypeHandler.
[ "Overrriden", "from", "TypeHandler", "." ]
def WriteServiceImplementation(self, func, file): """Overrriden from TypeHandler.""" pass
[ "def", "WriteServiceImplementation", "(", "self", ",", "func", ",", "file", ")", ":", "pass" ]
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/gpu/command_buffer/build_gles2_cmd_buffer.py#L2453-L2455
apache/thrift
0b29261a4f3c6882ef3b09aae47914f0012b0472
lib/py/src/transport/TSocket.py
python
TSocket.__init__
(self, host='localhost', port=9090, unix_socket=None, socket_family=socket.AF_UNSPEC, socket_keepalive=False)
Initialize a TSocket @param host(str) The host to connect to. @param port(int) The (TCP) port to connect to. @param unix_socket(str) The filename of a unix socket to connect to. (host and port will be ignored.) @param socket_family(int) The socket family to use with this socket. @param socket_keepalive(bool) enable TCP keepalive, default off.
Initialize a TSocket
[ "Initialize", "a", "TSocket" ]
def __init__(self, host='localhost', port=9090, unix_socket=None, socket_family=socket.AF_UNSPEC, socket_keepalive=False): """Initialize a TSocket @param host(str) The host to connect to. @param port(int) The (TCP) port to connect to. @param unix_socket(str) The filename of a unix socket to connect to. (host and port will be ignored.) @param socket_family(int) The socket family to use with this socket. @param socket_keepalive(bool) enable TCP keepalive, default off. """ self.host = host self.port = port self.handle = None self._unix_socket = unix_socket self._timeout = None self._socket_family = socket_family self._socket_keepalive = socket_keepalive
[ "def", "__init__", "(", "self", ",", "host", "=", "'localhost'", ",", "port", "=", "9090", ",", "unix_socket", "=", "None", ",", "socket_family", "=", "socket", ".", "AF_UNSPEC", ",", "socket_keepalive", "=", "False", ")", ":", "self", ".", "host", "=", "host", "self", ".", "port", "=", "port", "self", ".", "handle", "=", "None", "self", ".", "_unix_socket", "=", "unix_socket", "self", ".", "_timeout", "=", "None", "self", ".", "_socket_family", "=", "socket_family", "self", ".", "_socket_keepalive", "=", "socket_keepalive" ]
https://github.com/apache/thrift/blob/0b29261a4f3c6882ef3b09aae47914f0012b0472/lib/py/src/transport/TSocket.py#L53-L71
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/distutils/cmd.py
python
Command.announce
(self, msg, level=1)
If the current verbosity level is of greater than or equal to 'level' print 'msg' to stdout.
If the current verbosity level is of greater than or equal to 'level' print 'msg' to stdout.
[ "If", "the", "current", "verbosity", "level", "is", "of", "greater", "than", "or", "equal", "to", "level", "print", "msg", "to", "stdout", "." ]
def announce(self, msg, level=1): """If the current verbosity level is of greater than or equal to 'level' print 'msg' to stdout. """ log.log(level, msg)
[ "def", "announce", "(", "self", ",", "msg", ",", "level", "=", "1", ")", ":", "log", ".", "log", "(", "level", ",", "msg", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/distutils/cmd.py#L180-L184
LiquidPlayer/LiquidCore
9405979363f2353ac9a71ad8ab59685dd7f919c9
deps/boost_1_66_0/libs/python/config/tools/sphinx4scons.py
python
_get_sphinxoptions
(env, target, source)
return " ".join(options)
Concatenates all the options for the sphinx command line.
Concatenates all the options for the sphinx command line.
[ "Concatenates", "all", "the", "options", "for", "the", "sphinx", "command", "line", "." ]
def _get_sphinxoptions(env, target, source): """Concatenates all the options for the sphinx command line.""" options = [] builder = _get_sphinxbuilder(env) options.append("-b %s" % env.subst(builder, target=target, source=source)) flags = env.get('options', env.get('SPHINXFLAGS', '')) options.append(env.subst(flags, target=target, source=source)) tags = env.get('tags', env.get('SPHINXTAGS', None)) if tags is not None: if not SCons.SCons.Util.is_List(tags): tags = [tags] for tag in tags: if tag != '': tag = env.subst(tag, target=target, source=source) options.append("-t %s" % tag) settings = env.get('settings', env.get('SPHINXSETTINGS', None)) if settings is not None: if not SCons.SCons.Util.is_Dict(settings): raise TypeError('SPHINXSETTINGS and/or settings argument must be a dictionary') for key, value in settings.iteritems(): if value != '': value = env.subst(value, target=target, source=source) options.append('-D "%s=%s"' % (key, value)) doctree = env.get('doctree', env.get("SPHINXDOCTREE", None)) if isinstance(doctree, SCons.Node.FS.Dir): options.append("-d %s" % doctree.get_abspath()) elif doctree is not None and doctree != '': doctree = env.subst(doctree, target=target, source=source) options.append("-d %s" % env.Dir(doctree).get_abspath()) config = _get_sphinxconfig_path(env, None) if config is not None and config != '': config = env.subst(config, target=target, source=source) options.append("-c %s" % env.Dir(config).File('conf.py').rfile().dir.get_abspath()) return " ".join(options)
[ "def", "_get_sphinxoptions", "(", "env", ",", "target", ",", "source", ")", ":", "options", "=", "[", "]", "builder", "=", "_get_sphinxbuilder", "(", "env", ")", "options", ".", "append", "(", "\"-b %s\"", "%", "env", ".", "subst", "(", "builder", ",", "target", "=", "target", ",", "source", "=", "source", ")", ")", "flags", "=", "env", ".", "get", "(", "'options'", ",", "env", ".", "get", "(", "'SPHINXFLAGS'", ",", "''", ")", ")", "options", ".", "append", "(", "env", ".", "subst", "(", "flags", ",", "target", "=", "target", ",", "source", "=", "source", ")", ")", "tags", "=", "env", ".", "get", "(", "'tags'", ",", "env", ".", "get", "(", "'SPHINXTAGS'", ",", "None", ")", ")", "if", "tags", "is", "not", "None", ":", "if", "not", "SCons", ".", "SCons", ".", "Util", ".", "is_List", "(", "tags", ")", ":", "tags", "=", "[", "tags", "]", "for", "tag", "in", "tags", ":", "if", "tag", "!=", "''", ":", "tag", "=", "env", ".", "subst", "(", "tag", ",", "target", "=", "target", ",", "source", "=", "source", ")", "options", ".", "append", "(", "\"-t %s\"", "%", "tag", ")", "settings", "=", "env", ".", "get", "(", "'settings'", ",", "env", ".", "get", "(", "'SPHINXSETTINGS'", ",", "None", ")", ")", "if", "settings", "is", "not", "None", ":", "if", "not", "SCons", ".", "SCons", ".", "Util", ".", "is_Dict", "(", "settings", ")", ":", "raise", "TypeError", "(", "'SPHINXSETTINGS and/or settings argument must be a dictionary'", ")", "for", "key", ",", "value", "in", "settings", ".", "iteritems", "(", ")", ":", "if", "value", "!=", "''", ":", "value", "=", "env", ".", "subst", "(", "value", ",", "target", "=", "target", ",", "source", "=", "source", ")", "options", ".", "append", "(", "'-D \"%s=%s\"'", "%", "(", "key", ",", "value", ")", ")", "doctree", "=", "env", ".", "get", "(", "'doctree'", ",", "env", ".", "get", "(", "\"SPHINXDOCTREE\"", ",", "None", ")", ")", "if", "isinstance", "(", "doctree", ",", "SCons", ".", "Node", ".", "FS", ".", "Dir", ")", ":", "options", ".", "append", "(", "\"-d %s\"", "%", "doctree", ".", "get_abspath", "(", ")", ")", "elif", "doctree", "is", "not", "None", "and", "doctree", "!=", "''", ":", "doctree", "=", "env", ".", "subst", "(", "doctree", ",", "target", "=", "target", ",", "source", "=", "source", ")", "options", ".", "append", "(", "\"-d %s\"", "%", "env", ".", "Dir", "(", "doctree", ")", ".", "get_abspath", "(", ")", ")", "config", "=", "_get_sphinxconfig_path", "(", "env", ",", "None", ")", "if", "config", "is", "not", "None", "and", "config", "!=", "''", ":", "config", "=", "env", ".", "subst", "(", "config", ",", "target", "=", "target", ",", "source", "=", "source", ")", "options", ".", "append", "(", "\"-c %s\"", "%", "env", ".", "Dir", "(", "config", ")", ".", "File", "(", "'conf.py'", ")", ".", "rfile", "(", ")", ".", "dir", ".", "get_abspath", "(", ")", ")", "return", "\" \"", ".", "join", "(", "options", ")" ]
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/boost_1_66_0/libs/python/config/tools/sphinx4scons.py#L142-L181
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py2/scipy/integrate/quadpack.py
python
dblquad
(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8)
return nquad(func, [temp_ranges, [a, b]], args=args, opts={"epsabs": epsabs, "epsrel": epsrel})
Compute a double integral. Return the double (definite) integral of ``func(y, x)`` from ``x = a..b`` and ``y = gfun(x)..hfun(x)``. Parameters ---------- func : callable A Python function or method of at least two variables: y must be the first argument and x the second argument. a, b : float The limits of integration in x: `a` < `b` gfun : callable or float The lower boundary curve in y which is a function taking a single floating point argument (x) and returning a floating point result or a float indicating a constant boundary curve. hfun : callable or float The upper boundary curve in y (same requirements as `gfun`). args : sequence, optional Extra arguments to pass to `func`. epsabs : float, optional Absolute tolerance passed directly to the inner 1-D quadrature integration. Default is 1.49e-8. epsrel : float, optional Relative tolerance of the inner 1-D integrals. Default is 1.49e-8. Returns ------- y : float The resultant integral. abserr : float An estimate of the error. See also -------- quad : single integral tplquad : triple integral nquad : N-dimensional integrals fixed_quad : fixed-order Gaussian quadrature quadrature : adaptive Gaussian quadrature odeint : ODE integrator ode : ODE integrator simps : integrator for sampled data romb : integrator for sampled data scipy.special : for coefficients and roots of orthogonal polynomials Examples -------- Compute the double integral of ``x * y**2`` over the box ``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1. >>> from scipy import integrate >>> f = lambda y, x: x*y**2 >>> integrate.dblquad(f, 0, 2, lambda x: 0, lambda x: 1) (0.6666666666666667, 7.401486830834377e-15)
Compute a double integral.
[ "Compute", "a", "double", "integral", "." ]
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8): """ Compute a double integral. Return the double (definite) integral of ``func(y, x)`` from ``x = a..b`` and ``y = gfun(x)..hfun(x)``. Parameters ---------- func : callable A Python function or method of at least two variables: y must be the first argument and x the second argument. a, b : float The limits of integration in x: `a` < `b` gfun : callable or float The lower boundary curve in y which is a function taking a single floating point argument (x) and returning a floating point result or a float indicating a constant boundary curve. hfun : callable or float The upper boundary curve in y (same requirements as `gfun`). args : sequence, optional Extra arguments to pass to `func`. epsabs : float, optional Absolute tolerance passed directly to the inner 1-D quadrature integration. Default is 1.49e-8. epsrel : float, optional Relative tolerance of the inner 1-D integrals. Default is 1.49e-8. Returns ------- y : float The resultant integral. abserr : float An estimate of the error. See also -------- quad : single integral tplquad : triple integral nquad : N-dimensional integrals fixed_quad : fixed-order Gaussian quadrature quadrature : adaptive Gaussian quadrature odeint : ODE integrator ode : ODE integrator simps : integrator for sampled data romb : integrator for sampled data scipy.special : for coefficients and roots of orthogonal polynomials Examples -------- Compute the double integral of ``x * y**2`` over the box ``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1. >>> from scipy import integrate >>> f = lambda y, x: x*y**2 >>> integrate.dblquad(f, 0, 2, lambda x: 0, lambda x: 1) (0.6666666666666667, 7.401486830834377e-15) """ def temp_ranges(*args): return [gfun(args[0]) if callable(gfun) else gfun, hfun(args[0]) if callable(hfun) else hfun] return nquad(func, [temp_ranges, [a, b]], args=args, opts={"epsabs": epsabs, "epsrel": epsrel})
[ "def", "dblquad", "(", "func", ",", "a", ",", "b", ",", "gfun", ",", "hfun", ",", "args", "=", "(", ")", ",", "epsabs", "=", "1.49e-8", ",", "epsrel", "=", "1.49e-8", ")", ":", "def", "temp_ranges", "(", "*", "args", ")", ":", "return", "[", "gfun", "(", "args", "[", "0", "]", ")", "if", "callable", "(", "gfun", ")", "else", "gfun", ",", "hfun", "(", "args", "[", "0", "]", ")", "if", "callable", "(", "hfun", ")", "else", "hfun", "]", "return", "nquad", "(", "func", ",", "[", "temp_ranges", ",", "[", "a", ",", "b", "]", "]", ",", "args", "=", "args", ",", "opts", "=", "{", "\"epsabs\"", ":", "epsabs", ",", "\"epsrel\"", ":", "epsrel", "}", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/integrate/quadpack.py#L515-L581
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/propgrid.py
python
PropertyGridIteratorBase.Prev
(*args, **kwargs)
return _propgrid.PropertyGridIteratorBase_Prev(*args, **kwargs)
Prev(self)
Prev(self)
[ "Prev", "(", "self", ")" ]
def Prev(*args, **kwargs): """Prev(self)""" return _propgrid.PropertyGridIteratorBase_Prev(*args, **kwargs)
[ "def", "Prev", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_propgrid", ".", "PropertyGridIteratorBase_Prev", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/propgrid.py#L954-L956
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
demo/PropertyGrid.py
python
TrivialPropertyEditor.CreateControls
(self, propgrid, property, pos, sz)
Create the actual wxPython controls here for editing the property value. You must use propgrid.GetPanel() as parent for created controls. Return value is either single editor control or tuple of two editor controls, of which first is the primary one and second is usually a button.
Create the actual wxPython controls here for editing the property value.
[ "Create", "the", "actual", "wxPython", "controls", "here", "for", "editing", "the", "property", "value", "." ]
def CreateControls(self, propgrid, property, pos, sz): """ Create the actual wxPython controls here for editing the property value. You must use propgrid.GetPanel() as parent for created controls. Return value is either single editor control or tuple of two editor controls, of which first is the primary one and second is usually a button. """ try: x, y = pos w, h = sz h = 64 + 6 # Make room for button bw = propgrid.GetRowHeight() w -= bw s = property.GetDisplayedString(); tc = wx.TextCtrl(propgrid.GetPanel(), wxpg.PG_SUBID1, s, (x,y), (w,h), wx.TE_PROCESS_ENTER) btn = wx.Button(propgrid.GetPanel(), wxpg.PG_SUBID2, '...', (x+w, y), (bw, h), wx.WANTS_CHARS) return (tc, btn) except: import traceback print(traceback.print_exc())
[ "def", "CreateControls", "(", "self", ",", "propgrid", ",", "property", ",", "pos", ",", "sz", ")", ":", "try", ":", "x", ",", "y", "=", "pos", "w", ",", "h", "=", "sz", "h", "=", "64", "+", "6", "# Make room for button", "bw", "=", "propgrid", ".", "GetRowHeight", "(", ")", "w", "-=", "bw", "s", "=", "property", ".", "GetDisplayedString", "(", ")", "tc", "=", "wx", ".", "TextCtrl", "(", "propgrid", ".", "GetPanel", "(", ")", ",", "wxpg", ".", "PG_SUBID1", ",", "s", ",", "(", "x", ",", "y", ")", ",", "(", "w", ",", "h", ")", ",", "wx", ".", "TE_PROCESS_ENTER", ")", "btn", "=", "wx", ".", "Button", "(", "propgrid", ".", "GetPanel", "(", ")", ",", "wxpg", ".", "PG_SUBID2", ",", "'...'", ",", "(", "x", "+", "w", ",", "y", ")", ",", "(", "bw", ",", "h", ")", ",", "wx", ".", "WANTS_CHARS", ")", "return", "(", "tc", ",", "btn", ")", "except", ":", "import", "traceback", "print", "(", "traceback", ".", "print_exc", "(", ")", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/demo/PropertyGrid.py#L399-L429
MrMC/mrmc
5a8e460b2aec44f03eb9604cbd7681d4277dbb81
tools/EventClients/lib/python/xbmcclient.py
python
Packet.get_udp_message
(self, packetnum=1)
return header + payload
Construct the UDP message for the specified packetnum and return as string Keyword arguments: packetnum -- the packet no. for which to construct the message (default 1)
Construct the UDP message for the specified packetnum and return as string
[ "Construct", "the", "UDP", "message", "for", "the", "specified", "packetnum", "and", "return", "as", "string" ]
def get_udp_message(self, packetnum=1): """Construct the UDP message for the specified packetnum and return as string Keyword arguments: packetnum -- the packet no. for which to construct the message (default 1) """ if packetnum > self.num_packets() or packetnum < 1: return "" header = "" if packetnum==1: header = self.get_header(self.packettype, packetnum, self.maxseq, self.get_payload_size(packetnum)) else: header = self.get_header(PT_BLOB, packetnum, self.maxseq, self.get_payload_size(packetnum)) payload = self.payload[ (packetnum-1) * MAX_PAYLOAD_SIZE : (packetnum-1) * MAX_PAYLOAD_SIZE+ self.get_payload_size(packetnum) ] return header + payload
[ "def", "get_udp_message", "(", "self", ",", "packetnum", "=", "1", ")", ":", "if", "packetnum", ">", "self", ".", "num_packets", "(", ")", "or", "packetnum", "<", "1", ":", "return", "\"\"", "header", "=", "\"\"", "if", "packetnum", "==", "1", ":", "header", "=", "self", ".", "get_header", "(", "self", ".", "packettype", ",", "packetnum", ",", "self", ".", "maxseq", ",", "self", ".", "get_payload_size", "(", "packetnum", ")", ")", "else", ":", "header", "=", "self", ".", "get_header", "(", "PT_BLOB", ",", "packetnum", ",", "self", ".", "maxseq", ",", "self", ".", "get_payload_size", "(", "packetnum", ")", ")", "payload", "=", "self", ".", "payload", "[", "(", "packetnum", "-", "1", ")", "*", "MAX_PAYLOAD_SIZE", ":", "(", "packetnum", "-", "1", ")", "*", "MAX_PAYLOAD_SIZE", "+", "self", ".", "get_payload_size", "(", "packetnum", ")", "]", "return", "header", "+", "payload" ]
https://github.com/MrMC/mrmc/blob/5a8e460b2aec44f03eb9604cbd7681d4277dbb81/tools/EventClients/lib/python/xbmcclient.py#L219-L240
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/contrib/graph_editor/subgraph.py
python
SubGraphView.remap_outputs
(self, new_output_indices)
return res
Remap the output of the subgraph. If the output of the original subgraph are [t0, t1, t2], remapping to [1,1,0] will create a new instance whose outputs is [t1, t1, t0]. Note that this is only modifying the view: the underlying tf.Graph is not affected. Args: new_output_indices: an iterable of integers representing a mapping between the old outputs and the new ones. This mapping can be under-complete and can have repetitions. Returns: A new modified instance of the original subgraph view with remapped outputs.
Remap the output of the subgraph.
[ "Remap", "the", "output", "of", "the", "subgraph", "." ]
def remap_outputs(self, new_output_indices): """Remap the output of the subgraph. If the output of the original subgraph are [t0, t1, t2], remapping to [1,1,0] will create a new instance whose outputs is [t1, t1, t0]. Note that this is only modifying the view: the underlying tf.Graph is not affected. Args: new_output_indices: an iterable of integers representing a mapping between the old outputs and the new ones. This mapping can be under-complete and can have repetitions. Returns: A new modified instance of the original subgraph view with remapped outputs. """ res = copy.copy(self) res._remap_outputs(new_output_indices) # pylint: disable=protected-access return res
[ "def", "remap_outputs", "(", "self", ",", "new_output_indices", ")", ":", "res", "=", "copy", ".", "copy", "(", "self", ")", "res", ".", "_remap_outputs", "(", "new_output_indices", ")", "# pylint: disable=protected-access", "return", "res" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/graph_editor/subgraph.py#L352-L371
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/ops/rnn.py
python
dynamic_rnn
(cell, inputs, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None)
Creates a recurrent neural network specified by RNNCell `cell`. Performs fully dynamic unrolling of `inputs`. `Inputs` may be a single `Tensor` where the maximum time is either the first or second dimension (see the parameter `time_major`). Alternatively, it may be a (possibly nested) tuple of Tensors, each of them having matching batch and time dimensions. The corresponding output is either a single `Tensor` having the same number of time steps and batch size, or a (possibly nested) tuple of such tensors, matching the nested structure of `cell.output_size`. The parameter `sequence_length` is optional and is used to copy-through state and zero-out outputs when past a batch element's sequence length. So it's more for correctness than performance. Args: cell: An instance of RNNCell. inputs: The RNN inputs. If `time_major == False` (default), this must be a `Tensor` of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. If `time_major == True`, this must be a `Tensor` of shape: `[max_time, batch_size, ...]`, or a nested tuple of such elements. This may also be a (possibly nested) tuple of Tensors satisfying this property. The first two dimensions must match across all the inputs, but otherwise the ranks and other shape components may differ. In this case, input to `cell` at each time-step will replicate the structure of these tuples, except for the time dimension (from which the time is taken). The input to `cell` at each time step will be a `Tensor` or (possibly nested) tuple of Tensors each with dimensions `[batch_size, ...]`. sequence_length: (optional) An int32/int64 vector sized `[batch_size]`. initial_state: (optional) An initial state for the RNN. If `cell.state_size` is an integer, this must be a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell.state_size`. dtype: (optional) The data type for the initial state and expected output. Required if initial_state is not provided or RNN state has a heterogeneous dtype. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. time_major: The shape format of the `inputs` and `outputs` Tensors. If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. scope: VariableScope for the created subgraph; defaults to "rnn". Returns: A pair (outputs, state) where: outputs: The RNN output `Tensor`. If time_major == False (default), this will be a `Tensor` shaped: `[batch_size, max_time, cell.output_size]`. If time_major == True, this will be a `Tensor` shaped: `[max_time, batch_size, cell.output_size]`. Note, if `cell.output_size` is a (possibly nested) tuple of integers or `TensorShape` objects, then `outputs` will be a tuple having the same structure as `cell.output_size`, containing Tensors having shapes corresponding to the shape data in `cell.output_size`. state: The final state. If `cell.state_size` is an int, this will be shaped `[batch_size, cell.state_size]`. If it is a `TensorShape`, this will be shaped `[batch_size] + cell.state_size`. If it is a (possibly nested) tuple of ints or `TensorShape`, this will be a tuple having the corresponding shapes. Raises: TypeError: If `cell` is not an instance of RNNCell. ValueError: If inputs is None or an empty list.
Creates a recurrent neural network specified by RNNCell `cell`.
[ "Creates", "a", "recurrent", "neural", "network", "specified", "by", "RNNCell", "cell", "." ]
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None): """Creates a recurrent neural network specified by RNNCell `cell`. Performs fully dynamic unrolling of `inputs`. `Inputs` may be a single `Tensor` where the maximum time is either the first or second dimension (see the parameter `time_major`). Alternatively, it may be a (possibly nested) tuple of Tensors, each of them having matching batch and time dimensions. The corresponding output is either a single `Tensor` having the same number of time steps and batch size, or a (possibly nested) tuple of such tensors, matching the nested structure of `cell.output_size`. The parameter `sequence_length` is optional and is used to copy-through state and zero-out outputs when past a batch element's sequence length. So it's more for correctness than performance. Args: cell: An instance of RNNCell. inputs: The RNN inputs. If `time_major == False` (default), this must be a `Tensor` of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. If `time_major == True`, this must be a `Tensor` of shape: `[max_time, batch_size, ...]`, or a nested tuple of such elements. This may also be a (possibly nested) tuple of Tensors satisfying this property. The first two dimensions must match across all the inputs, but otherwise the ranks and other shape components may differ. In this case, input to `cell` at each time-step will replicate the structure of these tuples, except for the time dimension (from which the time is taken). The input to `cell` at each time step will be a `Tensor` or (possibly nested) tuple of Tensors each with dimensions `[batch_size, ...]`. sequence_length: (optional) An int32/int64 vector sized `[batch_size]`. initial_state: (optional) An initial state for the RNN. If `cell.state_size` is an integer, this must be a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell.state_size`. dtype: (optional) The data type for the initial state and expected output. Required if initial_state is not provided or RNN state has a heterogeneous dtype. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. time_major: The shape format of the `inputs` and `outputs` Tensors. If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. scope: VariableScope for the created subgraph; defaults to "rnn". Returns: A pair (outputs, state) where: outputs: The RNN output `Tensor`. If time_major == False (default), this will be a `Tensor` shaped: `[batch_size, max_time, cell.output_size]`. If time_major == True, this will be a `Tensor` shaped: `[max_time, batch_size, cell.output_size]`. Note, if `cell.output_size` is a (possibly nested) tuple of integers or `TensorShape` objects, then `outputs` will be a tuple having the same structure as `cell.output_size`, containing Tensors having shapes corresponding to the shape data in `cell.output_size`. state: The final state. If `cell.state_size` is an int, this will be shaped `[batch_size, cell.state_size]`. If it is a `TensorShape`, this will be shaped `[batch_size] + cell.state_size`. If it is a (possibly nested) tuple of ints or `TensorShape`, this will be a tuple having the corresponding shapes. Raises: TypeError: If `cell` is not an instance of RNNCell. ValueError: If inputs is None or an empty list. """ if not _like_rnncell(cell): raise TypeError("cell must be an instance of RNNCell") # By default, time_major==False and inputs are batch-major: shaped # [batch, time, depth] # For internal calculations, we transpose to [time, batch, depth] flat_input = nest.flatten(inputs) if not time_major: # (B,T,D) => (T,B,D) flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input] flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input) parallel_iterations = parallel_iterations or 32 if sequence_length is not None: sequence_length = math_ops.to_int32(sequence_length) if sequence_length.get_shape().ndims not in (None, 1): raise ValueError( "sequence_length must be a vector of length batch_size, " "but saw shape: %s" % sequence_length.get_shape()) sequence_length = array_ops.identity( # Just to find it in the graph. sequence_length, name="sequence_length") # Create a new scope in which the caching device is either # determined by the parent scope, or is set to place the cached # Variable using the same placement as for the rest of the RNN. with vs.variable_scope(scope or "rnn") as varscope: if varscope.caching_device is None: varscope.set_caching_device(lambda op: op.device) batch_size = _best_effort_input_batch_size(flat_input) if initial_state is not None: state = initial_state else: if not dtype: raise ValueError("If there is no initial_state, you must give a dtype.") state = cell.zero_state(batch_size, dtype) def _assert_has_shape(x, shape): x_shape = array_ops.shape(x) packed_shape = array_ops.stack(shape) return control_flow_ops.Assert( math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)), ["Expected shape for Tensor %s is " % x.name, packed_shape, " but saw shape: ", x_shape]) if sequence_length is not None: # Perform some shape validation with ops.control_dependencies( [_assert_has_shape(sequence_length, [batch_size])]): sequence_length = array_ops.identity( sequence_length, name="CheckSeqLen") inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input) (outputs, final_state) = _dynamic_rnn_loop( cell, inputs, state, parallel_iterations=parallel_iterations, swap_memory=swap_memory, sequence_length=sequence_length, dtype=dtype) # Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth]. # If we are performing batch-major calculations, transpose output back # to shape [batch, time, depth] if not time_major: # (T,B,D) => (B,T,D) outputs = nest.map_structure(_transpose_batch_time, outputs) return (outputs, final_state)
[ "def", "dynamic_rnn", "(", "cell", ",", "inputs", ",", "sequence_length", "=", "None", ",", "initial_state", "=", "None", ",", "dtype", "=", "None", ",", "parallel_iterations", "=", "None", ",", "swap_memory", "=", "False", ",", "time_major", "=", "False", ",", "scope", "=", "None", ")", ":", "if", "not", "_like_rnncell", "(", "cell", ")", ":", "raise", "TypeError", "(", "\"cell must be an instance of RNNCell\"", ")", "# By default, time_major==False and inputs are batch-major: shaped", "# [batch, time, depth]", "# For internal calculations, we transpose to [time, batch, depth]", "flat_input", "=", "nest", ".", "flatten", "(", "inputs", ")", "if", "not", "time_major", ":", "# (B,T,D) => (T,B,D)", "flat_input", "=", "[", "ops", ".", "convert_to_tensor", "(", "input_", ")", "for", "input_", "in", "flat_input", "]", "flat_input", "=", "tuple", "(", "_transpose_batch_time", "(", "input_", ")", "for", "input_", "in", "flat_input", ")", "parallel_iterations", "=", "parallel_iterations", "or", "32", "if", "sequence_length", "is", "not", "None", ":", "sequence_length", "=", "math_ops", ".", "to_int32", "(", "sequence_length", ")", "if", "sequence_length", ".", "get_shape", "(", ")", ".", "ndims", "not", "in", "(", "None", ",", "1", ")", ":", "raise", "ValueError", "(", "\"sequence_length must be a vector of length batch_size, \"", "\"but saw shape: %s\"", "%", "sequence_length", ".", "get_shape", "(", ")", ")", "sequence_length", "=", "array_ops", ".", "identity", "(", "# Just to find it in the graph.", "sequence_length", ",", "name", "=", "\"sequence_length\"", ")", "# Create a new scope in which the caching device is either", "# determined by the parent scope, or is set to place the cached", "# Variable using the same placement as for the rest of the RNN.", "with", "vs", ".", "variable_scope", "(", "scope", "or", "\"rnn\"", ")", "as", "varscope", ":", "if", "varscope", ".", "caching_device", "is", "None", ":", "varscope", ".", "set_caching_device", "(", "lambda", "op", ":", "op", ".", "device", ")", "batch_size", "=", "_best_effort_input_batch_size", "(", "flat_input", ")", "if", "initial_state", "is", "not", "None", ":", "state", "=", "initial_state", "else", ":", "if", "not", "dtype", ":", "raise", "ValueError", "(", "\"If there is no initial_state, you must give a dtype.\"", ")", "state", "=", "cell", ".", "zero_state", "(", "batch_size", ",", "dtype", ")", "def", "_assert_has_shape", "(", "x", ",", "shape", ")", ":", "x_shape", "=", "array_ops", ".", "shape", "(", "x", ")", "packed_shape", "=", "array_ops", ".", "stack", "(", "shape", ")", "return", "control_flow_ops", ".", "Assert", "(", "math_ops", ".", "reduce_all", "(", "math_ops", ".", "equal", "(", "x_shape", ",", "packed_shape", ")", ")", ",", "[", "\"Expected shape for Tensor %s is \"", "%", "x", ".", "name", ",", "packed_shape", ",", "\" but saw shape: \"", ",", "x_shape", "]", ")", "if", "sequence_length", "is", "not", "None", ":", "# Perform some shape validation", "with", "ops", ".", "control_dependencies", "(", "[", "_assert_has_shape", "(", "sequence_length", ",", "[", "batch_size", "]", ")", "]", ")", ":", "sequence_length", "=", "array_ops", ".", "identity", "(", "sequence_length", ",", "name", "=", "\"CheckSeqLen\"", ")", "inputs", "=", "nest", ".", "pack_sequence_as", "(", "structure", "=", "inputs", ",", "flat_sequence", "=", "flat_input", ")", "(", "outputs", ",", "final_state", ")", "=", "_dynamic_rnn_loop", "(", "cell", ",", "inputs", ",", "state", ",", "parallel_iterations", "=", "parallel_iterations", ",", "swap_memory", "=", "swap_memory", ",", "sequence_length", "=", "sequence_length", ",", "dtype", "=", "dtype", ")", "# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].", "# If we are performing batch-major calculations, transpose output back", "# to shape [batch, time, depth]", "if", "not", "time_major", ":", "# (T,B,D) => (B,T,D)", "outputs", "=", "nest", ".", "map_structure", "(", "_transpose_batch_time", ",", "outputs", ")", "return", "(", "outputs", ",", "final_state", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/ops/rnn.py#L443-L607
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/keras/engine/network.py
python
Network._insert_layers
(self, layers, relevant_nodes=None)
Inserts Layers into the Network after Network creation. This is only valid for Keras Graph Networks. Layers added via this function will be included in the `call` computation and `get_config` of this Network. They will not be added to the Network's outputs. Arguments: layers: Arbitrary nested structure of Layers. Layers must be reachable from one or more of the `keras.Input` Tensors that correspond to this Network's inputs. relevant_nodes: Nodes from the Layers that should be considered part of this Network. If `None`, all Nodes will be considered part of this Network. Raises: ValueError: If the layers depend on `Input`s not found in this Model.
Inserts Layers into the Network after Network creation.
[ "Inserts", "Layers", "into", "the", "Network", "after", "Network", "creation", "." ]
def _insert_layers(self, layers, relevant_nodes=None): """Inserts Layers into the Network after Network creation. This is only valid for Keras Graph Networks. Layers added via this function will be included in the `call` computation and `get_config` of this Network. They will not be added to the Network's outputs. Arguments: layers: Arbitrary nested structure of Layers. Layers must be reachable from one or more of the `keras.Input` Tensors that correspond to this Network's inputs. relevant_nodes: Nodes from the Layers that should be considered part of this Network. If `None`, all Nodes will be considered part of this Network. Raises: ValueError: If the layers depend on `Input`s not found in this Model. """ layers = nest.flatten(layers) tf_utils.assert_no_legacy_layers(layers) node_to_depth = {} for depth, nodes in self._nodes_by_depth.items(): node_to_depth.update({node: depth for node in nodes}) # The nodes of these Layers that are relevant to this Network. If not # provided, assume all Nodes are relevant if not relevant_nodes: relevant_nodes = nest.flatten([layer._inbound_nodes for layer in layers]) network_nodes = set(relevant_nodes + list(node_to_depth.keys())) def _get_min_depth(node): """Gets the minimum depth at which node can be computed.""" min_depth = 0 for layer, node_id, _, _ in node.iterate_inbound(include_arguments=True): inbound_node = layer._inbound_nodes[node_id] if inbound_node in node_to_depth: min_depth = min(min_depth, node_to_depth[inbound_node]) elif inbound_node not in network_nodes: continue else: # Previous relevant nodes haven't been processed yet. return None # New node is one shallower than its shallowest input. return min_depth - 1 # Insert nodes into `_nodes_by_depth` and other node attrs. unprocessed_nodes = copy.copy(relevant_nodes) i = 0 while unprocessed_nodes: i += 1 # Do a sanity check. This can occur if `Input`s from outside this Model # are being relied on. if i > 10000: raise ValueError('Layers could not be added due to missing ' 'dependencies.') node = unprocessed_nodes.pop(0) depth = _get_min_depth(node) if depth is None: # Defer until inbound nodes are processed. unprocessed_nodes.append(node) continue node_key = _make_node_key(node.outbound_layer.name, node.outbound_layer._inbound_nodes.index(node)) if node_key not in self._network_nodes: node_to_depth[node] = depth self._network_nodes.add(node_key) self._nodes_by_depth[depth].append(node) # Insert layers and update other layer attrs. layer_set = set(self._layers) for layer in layers: if layer not in layer_set: self._layers.append(layer) self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call) layer_set.add(layer)
[ "def", "_insert_layers", "(", "self", ",", "layers", ",", "relevant_nodes", "=", "None", ")", ":", "layers", "=", "nest", ".", "flatten", "(", "layers", ")", "tf_utils", ".", "assert_no_legacy_layers", "(", "layers", ")", "node_to_depth", "=", "{", "}", "for", "depth", ",", "nodes", "in", "self", ".", "_nodes_by_depth", ".", "items", "(", ")", ":", "node_to_depth", ".", "update", "(", "{", "node", ":", "depth", "for", "node", "in", "nodes", "}", ")", "# The nodes of these Layers that are relevant to this Network. If not", "# provided, assume all Nodes are relevant", "if", "not", "relevant_nodes", ":", "relevant_nodes", "=", "nest", ".", "flatten", "(", "[", "layer", ".", "_inbound_nodes", "for", "layer", "in", "layers", "]", ")", "network_nodes", "=", "set", "(", "relevant_nodes", "+", "list", "(", "node_to_depth", ".", "keys", "(", ")", ")", ")", "def", "_get_min_depth", "(", "node", ")", ":", "\"\"\"Gets the minimum depth at which node can be computed.\"\"\"", "min_depth", "=", "0", "for", "layer", ",", "node_id", ",", "_", ",", "_", "in", "node", ".", "iterate_inbound", "(", "include_arguments", "=", "True", ")", ":", "inbound_node", "=", "layer", ".", "_inbound_nodes", "[", "node_id", "]", "if", "inbound_node", "in", "node_to_depth", ":", "min_depth", "=", "min", "(", "min_depth", ",", "node_to_depth", "[", "inbound_node", "]", ")", "elif", "inbound_node", "not", "in", "network_nodes", ":", "continue", "else", ":", "# Previous relevant nodes haven't been processed yet.", "return", "None", "# New node is one shallower than its shallowest input.", "return", "min_depth", "-", "1", "# Insert nodes into `_nodes_by_depth` and other node attrs.", "unprocessed_nodes", "=", "copy", ".", "copy", "(", "relevant_nodes", ")", "i", "=", "0", "while", "unprocessed_nodes", ":", "i", "+=", "1", "# Do a sanity check. This can occur if `Input`s from outside this Model", "# are being relied on.", "if", "i", ">", "10000", ":", "raise", "ValueError", "(", "'Layers could not be added due to missing '", "'dependencies.'", ")", "node", "=", "unprocessed_nodes", ".", "pop", "(", "0", ")", "depth", "=", "_get_min_depth", "(", "node", ")", "if", "depth", "is", "None", ":", "# Defer until inbound nodes are processed.", "unprocessed_nodes", ".", "append", "(", "node", ")", "continue", "node_key", "=", "_make_node_key", "(", "node", ".", "outbound_layer", ".", "name", ",", "node", ".", "outbound_layer", ".", "_inbound_nodes", ".", "index", "(", "node", ")", ")", "if", "node_key", "not", "in", "self", ".", "_network_nodes", ":", "node_to_depth", "[", "node", "]", "=", "depth", "self", ".", "_network_nodes", ".", "add", "(", "node_key", ")", "self", ".", "_nodes_by_depth", "[", "depth", "]", ".", "append", "(", "node", ")", "# Insert layers and update other layer attrs.", "layer_set", "=", "set", "(", "self", ".", "_layers", ")", "for", "layer", "in", "layers", ":", "if", "layer", "not", "in", "layer_set", ":", "self", ".", "_layers", ".", "append", "(", "layer", ")", "self", ".", "_layer_call_argspecs", "[", "layer", "]", "=", "tf_inspect", ".", "getfullargspec", "(", "layer", ".", "call", ")", "layer_set", ".", "add", "(", "layer", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/keras/engine/network.py#L1519-L1593
vtraag/louvain-igraph
124ea1be49ee74eec2eaca8006599d7fc5560db6
src/louvain/VertexPartition.py
python
MutableVertexPartition.__init__
(self, graph, initial_membership=None)
Parameters ---------- graph The `ig.Graph` on which this partition is defined. membership The membership vector of this partition. Membership[i] = c implies that node i is in community c. If None, it is initialised with a singleton partition community, i.e. membership[i] = i.
Parameters ---------- graph The `ig.Graph` on which this partition is defined.
[ "Parameters", "----------", "graph", "The", "ig", ".", "Graph", "on", "which", "this", "partition", "is", "defined", "." ]
def __init__(self, graph, initial_membership=None): """ Parameters ---------- graph The `ig.Graph` on which this partition is defined. membership The membership vector of this partition. Membership[i] = c implies that node i is in community c. If None, it is initialised with a singleton partition community, i.e. membership[i] = i. """ if initial_membership is not None: initial_membership = list(initial_membership) super(MutableVertexPartition, self).__init__(graph, initial_membership)
[ "def", "__init__", "(", "self", ",", "graph", ",", "initial_membership", "=", "None", ")", ":", "if", "initial_membership", "is", "not", "None", ":", "initial_membership", "=", "list", "(", "initial_membership", ")", "super", "(", "MutableVertexPartition", ",", "self", ")", ".", "__init__", "(", "graph", ",", "initial_membership", ")" ]
https://github.com/vtraag/louvain-igraph/blob/124ea1be49ee74eec2eaca8006599d7fc5560db6/src/louvain/VertexPartition.py#L37-L52
panda3d/panda3d
833ad89ebad58395d0af0b7ec08538e5e4308265
direct/src/showbase/ShowBase.py
python
ShowBase.setSceneGraphAnalyzerMeter
(self, flag)
Turns on or off (according to flag) a standard frame rate meter in the upper-right corner of the main window.
Turns on or off (according to flag) a standard frame rate meter in the upper-right corner of the main window.
[ "Turns", "on", "or", "off", "(", "according", "to", "flag", ")", "a", "standard", "frame", "rate", "meter", "in", "the", "upper", "-", "right", "corner", "of", "the", "main", "window", "." ]
def setSceneGraphAnalyzerMeter(self, flag): """ Turns on or off (according to flag) a standard frame rate meter in the upper-right corner of the main window. """ if flag: if not self.sceneGraphAnalyzerMeter: self.sceneGraphAnalyzerMeter = SceneGraphAnalyzerMeter('sceneGraphAnalyzerMeter', self.render.node()) self.sceneGraphAnalyzerMeter.setupWindow(self.win) else: if self.sceneGraphAnalyzerMeter: self.sceneGraphAnalyzerMeter.clearWindow() self.sceneGraphAnalyzerMeter = None
[ "def", "setSceneGraphAnalyzerMeter", "(", "self", ",", "flag", ")", ":", "if", "flag", ":", "if", "not", "self", ".", "sceneGraphAnalyzerMeter", ":", "self", ".", "sceneGraphAnalyzerMeter", "=", "SceneGraphAnalyzerMeter", "(", "'sceneGraphAnalyzerMeter'", ",", "self", ".", "render", ".", "node", "(", ")", ")", "self", ".", "sceneGraphAnalyzerMeter", ".", "setupWindow", "(", "self", ".", "win", ")", "else", ":", "if", "self", ".", "sceneGraphAnalyzerMeter", ":", "self", ".", "sceneGraphAnalyzerMeter", ".", "clearWindow", "(", ")", "self", ".", "sceneGraphAnalyzerMeter", "=", "None" ]
https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/showbase/ShowBase.py#L1126-L1138
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/multiprocessing/connection.py
python
SocketClient
(address)
return conn
Return a connection object connected to the socket given by `address`
Return a connection object connected to the socket given by `address`
[ "Return", "a", "connection", "object", "connected", "to", "the", "socket", "given", "by", "address" ]
def SocketClient(address): ''' Return a connection object connected to the socket given by `address` ''' family = address_type(address) s = socket.socket( getattr(socket, family) ) s.setblocking(True) t = _init_timeout() while 1: try: s.connect(address) except socket.error, e: if e.args[0] != errno.ECONNREFUSED or _check_timeout(t): debug('failed to connect to address %s', address) raise time.sleep(0.01) else: break else: raise fd = duplicate(s.fileno()) conn = _multiprocessing.Connection(fd) s.close() return conn
[ "def", "SocketClient", "(", "address", ")", ":", "family", "=", "address_type", "(", "address", ")", "s", "=", "socket", ".", "socket", "(", "getattr", "(", "socket", ",", "family", ")", ")", "s", ".", "setblocking", "(", "True", ")", "t", "=", "_init_timeout", "(", ")", "while", "1", ":", "try", ":", "s", ".", "connect", "(", "address", ")", "except", "socket", ".", "error", ",", "e", ":", "if", "e", ".", "args", "[", "0", "]", "!=", "errno", ".", "ECONNREFUSED", "or", "_check_timeout", "(", "t", ")", ":", "debug", "(", "'failed to connect to address %s'", ",", "address", ")", "raise", "time", ".", "sleep", "(", "0.01", ")", "else", ":", "break", "else", ":", "raise", "fd", "=", "duplicate", "(", "s", ".", "fileno", "(", ")", ")", "conn", "=", "_multiprocessing", ".", "Connection", "(", "fd", ")", "s", ".", "close", "(", ")", "return", "conn" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/multiprocessing/connection.py#L286-L311
mamedev/mame
02cd26d37ee11191f3e311e19e805d872cb1e3a4
3rdparty/portmidi/pm_python/pyportmidi/midi.py
python
Input.close
(self)
closes a midi stream, flushing any pending buffers. Input.close(): return None PortMidi attempts to close open streams when the application exits -- this is particularly difficult under Windows.
closes a midi stream, flushing any pending buffers. Input.close(): return None
[ "closes", "a", "midi", "stream", "flushing", "any", "pending", "buffers", ".", "Input", ".", "close", "()", ":", "return", "None" ]
def close(self): """ closes a midi stream, flushing any pending buffers. Input.close(): return None PortMidi attempts to close open streams when the application exits -- this is particularly difficult under Windows. """ _check_init() if not (self._input is None): self._input.Close() self._input = None
[ "def", "close", "(", "self", ")", ":", "_check_init", "(", ")", "if", "not", "(", "self", ".", "_input", "is", "None", ")", ":", "self", ".", "_input", ".", "Close", "(", ")", "self", ".", "_input", "=", "None" ]
https://github.com/mamedev/mame/blob/02cd26d37ee11191f3e311e19e805d872cb1e3a4/3rdparty/portmidi/pm_python/pyportmidi/midi.py#L257-L267
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/protobuf/py3/google/protobuf/descriptor.py
python
Descriptor.EnumValueName
(self, enum, value)
return self.enum_types_by_name[enum].values_by_number[value].name
Returns the string name of an enum value. This is just a small helper method to simplify a common operation. Args: enum: string name of the Enum. value: int, value of the enum. Returns: string name of the enum value. Raises: KeyError if either the Enum doesn't exist or the value is not a valid value for the enum.
Returns the string name of an enum value.
[ "Returns", "the", "string", "name", "of", "an", "enum", "value", "." ]
def EnumValueName(self, enum, value): """Returns the string name of an enum value. This is just a small helper method to simplify a common operation. Args: enum: string name of the Enum. value: int, value of the enum. Returns: string name of the enum value. Raises: KeyError if either the Enum doesn't exist or the value is not a valid value for the enum. """ return self.enum_types_by_name[enum].values_by_number[value].name
[ "def", "EnumValueName", "(", "self", ",", "enum", ",", "value", ")", ":", "return", "self", ".", "enum_types_by_name", "[", "enum", "]", ".", "values_by_number", "[", "value", "]", ".", "name" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/protobuf/py3/google/protobuf/descriptor.py#L382-L398
sdhash/sdhash
b9eff63e4e5867e910f41fd69032bbb1c94a2a5e
external/tools/build/v2/tools/common.py
python
reset
()
Clear the module state. This is mainly for testing purposes. Note that this must be called _after_ resetting the module 'feature'.
Clear the module state. This is mainly for testing purposes. Note that this must be called _after_ resetting the module 'feature'.
[ "Clear", "the", "module", "state", ".", "This", "is", "mainly", "for", "testing", "purposes", ".", "Note", "that", "this", "must", "be", "called", "_after_", "resetting", "the", "module", "feature", "." ]
def reset (): """ Clear the module state. This is mainly for testing purposes. Note that this must be called _after_ resetting the module 'feature'. """ global __had_unspecified_value, __had_value, __declared_subfeature global __init_loc global __all_signatures, __debug_configuration, __show_configuration # Stores toolsets without specified initialization values. __had_unspecified_value = {} # Stores toolsets with specified initialization values. __had_value = {} # Stores toolsets with declared subfeatures. __declared_subfeature = {} # Stores all signatures of the toolsets. __all_signatures = {} # Stores the initialization locations of each toolset __init_loc = {} __debug_configuration = '--debug-configuration' in bjam.variable('ARGV') __show_configuration = '--show-configuration' in bjam.variable('ARGV') global __executable_path_variable OS = bjam.call("peek", [], "OS")[0] if OS == "NT": # On Windows the case and capitalization of PATH is not always predictable, so # let's find out what variable name was really set. for n in sys.environ: if n.lower() == "path": __executable_path_variable = n break else: __executable_path_variable = "PATH" m = {"NT": __executable_path_variable, "CYGWIN": "PATH", "MACOSX": "DYLD_LIBRARY_PATH", "AIX": "LIBPATH"} global __shared_library_path_variable __shared_library_path_variable = m.get(OS, "LD_LIBRARY_PATH")
[ "def", "reset", "(", ")", ":", "global", "__had_unspecified_value", ",", "__had_value", ",", "__declared_subfeature", "global", "__init_loc", "global", "__all_signatures", ",", "__debug_configuration", ",", "__show_configuration", "# Stores toolsets without specified initialization values.", "__had_unspecified_value", "=", "{", "}", "# Stores toolsets with specified initialization values.", "__had_value", "=", "{", "}", "# Stores toolsets with declared subfeatures.", "__declared_subfeature", "=", "{", "}", "# Stores all signatures of the toolsets.", "__all_signatures", "=", "{", "}", "# Stores the initialization locations of each toolset", "__init_loc", "=", "{", "}", "__debug_configuration", "=", "'--debug-configuration'", "in", "bjam", ".", "variable", "(", "'ARGV'", ")", "__show_configuration", "=", "'--show-configuration'", "in", "bjam", ".", "variable", "(", "'ARGV'", ")", "global", "__executable_path_variable", "OS", "=", "bjam", ".", "call", "(", "\"peek\"", ",", "[", "]", ",", "\"OS\"", ")", "[", "0", "]", "if", "OS", "==", "\"NT\"", ":", "# On Windows the case and capitalization of PATH is not always predictable, so", "# let's find out what variable name was really set.", "for", "n", "in", "sys", ".", "environ", ":", "if", "n", ".", "lower", "(", ")", "==", "\"path\"", ":", "__executable_path_variable", "=", "n", "break", "else", ":", "__executable_path_variable", "=", "\"PATH\"", "m", "=", "{", "\"NT\"", ":", "__executable_path_variable", ",", "\"CYGWIN\"", ":", "\"PATH\"", ",", "\"MACOSX\"", ":", "\"DYLD_LIBRARY_PATH\"", ",", "\"AIX\"", ":", "\"LIBPATH\"", "}", "global", "__shared_library_path_variable", "__shared_library_path_variable", "=", "m", ".", "get", "(", "OS", ",", "\"LD_LIBRARY_PATH\"", ")" ]
https://github.com/sdhash/sdhash/blob/b9eff63e4e5867e910f41fd69032bbb1c94a2a5e/external/tools/build/v2/tools/common.py#L25-L68
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/summary/writer/writer.py
python
SummaryToEventTransformer.add_summary
(self, summary, global_step=None)
Adds a `Summary` protocol buffer to the event file. This method wraps the provided summary in an `Event` protocol buffer and adds it to the event file. You can pass the result of evaluating any summary op, using @{tf.Session.run} or @{tf.Tensor.eval}, to this function. Alternatively, you can pass a `tf.Summary` protocol buffer that you populate with your own data. The latter is commonly done to report evaluation results in event files. Args: summary: A `Summary` protocol buffer, optionally serialized as a string. global_step: Number. Optional global step value to record with the summary.
Adds a `Summary` protocol buffer to the event file.
[ "Adds", "a", "Summary", "protocol", "buffer", "to", "the", "event", "file", "." ]
def add_summary(self, summary, global_step=None): """Adds a `Summary` protocol buffer to the event file. This method wraps the provided summary in an `Event` protocol buffer and adds it to the event file. You can pass the result of evaluating any summary op, using @{tf.Session.run} or @{tf.Tensor.eval}, to this function. Alternatively, you can pass a `tf.Summary` protocol buffer that you populate with your own data. The latter is commonly done to report evaluation results in event files. Args: summary: A `Summary` protocol buffer, optionally serialized as a string. global_step: Number. Optional global step value to record with the summary. """ if isinstance(summary, bytes): summ = summary_pb2.Summary() summ.ParseFromString(summary) summary = summ # We strip metadata from values with tags that we have seen before in order # to save space - we just store the metadata on the first value with a # specific tag. for value in summary.value: if not value.metadata: continue if value.tag in self._seen_summary_tags: # This tag has been encountered before. Strip the metadata. value.ClearField("metadata") continue # We encounter a value with a tag we have not encountered previously. And # it has metadata. Remember to strip metadata from future values with this # tag string. self._seen_summary_tags.add(value.tag) event = event_pb2.Event(summary=summary) self._add_event(event, global_step)
[ "def", "add_summary", "(", "self", ",", "summary", ",", "global_step", "=", "None", ")", ":", "if", "isinstance", "(", "summary", ",", "bytes", ")", ":", "summ", "=", "summary_pb2", ".", "Summary", "(", ")", "summ", ".", "ParseFromString", "(", "summary", ")", "summary", "=", "summ", "# We strip metadata from values with tags that we have seen before in order", "# to save space - we just store the metadata on the first value with a", "# specific tag.", "for", "value", "in", "summary", ".", "value", ":", "if", "not", "value", ".", "metadata", ":", "continue", "if", "value", ".", "tag", "in", "self", ".", "_seen_summary_tags", ":", "# This tag has been encountered before. Strip the metadata.", "value", ".", "ClearField", "(", "\"metadata\"", ")", "continue", "# We encounter a value with a tag we have not encountered previously. And", "# it has metadata. Remember to strip metadata from future values with this", "# tag string.", "self", ".", "_seen_summary_tags", ".", "add", "(", "value", ".", "tag", ")", "event", "=", "event_pb2", ".", "Event", "(", "summary", "=", "summary", ")", "self", ".", "_add_event", "(", "event", ",", "global_step", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/summary/writer/writer.py#L97-L138
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
python
_Stream.tell
(self)
return self.pos
Return the stream's file pointer position.
Return the stream's file pointer position.
[ "Return", "the", "stream", "s", "file", "pointer", "position", "." ]
def tell(self): """Return the stream's file pointer position. """ return self.pos
[ "def", "tell", "(", "self", ")", ":", "return", "self", ".", "pos" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L547-L550
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/array_ops.py
python
expand_dims
(input, axis=None, name=None, dim=None)
return expand_dims_v2(input, axis, name)
Inserts a dimension of 1 into a tensor's shape. Given a tensor `input`, this operation inserts a dimension of 1 at the dimension index `axis` of `input`'s shape. The dimension index `axis` starts at zero; if you specify a negative number for `axis` it is counted backward from the end. This operation is useful if you want to add a batch dimension to a single element. For example, if you have a single image of shape `[height, width, channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, which will make the shape `[1, height, width, channels]`. Other examples: ```python # 't' is a tensor of shape [2] tf.shape(tf.expand_dims(t, 0)) # [1, 2] tf.shape(tf.expand_dims(t, 1)) # [2, 1] tf.shape(tf.expand_dims(t, -1)) # [2, 1] # 't2' is a tensor of shape [2, 3, 5] tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5] tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5] tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1] ``` This operation requires that: `-1-input.dims() <= dim <= input.dims()` This operation is related to `squeeze()`, which removes dimensions of size 1. Args: input: A `Tensor`. axis: 0-D (scalar). Specifies the dimension index at which to expand the shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`. name: The name of the output `Tensor` (optional). dim: 0-D (scalar). Equivalent to `axis`, to be deprecated. Returns: A `Tensor` with the same data as `input`, but its shape has an additional dimension of size 1 added. Raises: ValueError: if either both or neither of `dim` and `axis` are specified.
Inserts a dimension of 1 into a tensor's shape.
[ "Inserts", "a", "dimension", "of", "1", "into", "a", "tensor", "s", "shape", "." ]
def expand_dims(input, axis=None, name=None, dim=None): """Inserts a dimension of 1 into a tensor's shape. Given a tensor `input`, this operation inserts a dimension of 1 at the dimension index `axis` of `input`'s shape. The dimension index `axis` starts at zero; if you specify a negative number for `axis` it is counted backward from the end. This operation is useful if you want to add a batch dimension to a single element. For example, if you have a single image of shape `[height, width, channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, which will make the shape `[1, height, width, channels]`. Other examples: ```python # 't' is a tensor of shape [2] tf.shape(tf.expand_dims(t, 0)) # [1, 2] tf.shape(tf.expand_dims(t, 1)) # [2, 1] tf.shape(tf.expand_dims(t, -1)) # [2, 1] # 't2' is a tensor of shape [2, 3, 5] tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5] tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5] tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1] ``` This operation requires that: `-1-input.dims() <= dim <= input.dims()` This operation is related to `squeeze()`, which removes dimensions of size 1. Args: input: A `Tensor`. axis: 0-D (scalar). Specifies the dimension index at which to expand the shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`. name: The name of the output `Tensor` (optional). dim: 0-D (scalar). Equivalent to `axis`, to be deprecated. Returns: A `Tensor` with the same data as `input`, but its shape has an additional dimension of size 1 added. Raises: ValueError: if either both or neither of `dim` and `axis` are specified. """ axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim) if axis is None: raise ValueError("Must specify an axis argument to tf.expand_dims()") return expand_dims_v2(input, axis, name)
[ "def", "expand_dims", "(", "input", ",", "axis", "=", "None", ",", "name", "=", "None", ",", "dim", "=", "None", ")", ":", "axis", "=", "deprecation", ".", "deprecated_argument_lookup", "(", "\"axis\"", ",", "axis", ",", "\"dim\"", ",", "dim", ")", "if", "axis", "is", "None", ":", "raise", "ValueError", "(", "\"Must specify an axis argument to tf.expand_dims()\"", ")", "return", "expand_dims_v2", "(", "input", ",", "axis", ",", "name", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/array_ops.py#L214-L265
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/linalg/linear_operator_circulant.py
python
_BaseLinearOperatorCirculant.block_depth
(self)
return self._block_depth
Depth of recursively defined circulant blocks defining this `Operator`. With `A` the dense representation of this `Operator`, `block_depth = 1` means `A` is symmetric circulant. For example, ``` A = |w z y x| |x w z y| |y x w z| |z y x w| ``` `block_depth = 2` means `A` is block symmetric circulant with symmetric circulant blocks. For example, with `W`, `X`, `Y`, `Z` symmetric circulant, ``` A = |W Z Y X| |X W Z Y| |Y X W Z| |Z Y X W| ``` `block_depth = 3` means `A` is block symmetric circulant with block symmetric circulant blocks. Returns: Python `integer`.
Depth of recursively defined circulant blocks defining this `Operator`.
[ "Depth", "of", "recursively", "defined", "circulant", "blocks", "defining", "this", "Operator", "." ]
def block_depth(self): """Depth of recursively defined circulant blocks defining this `Operator`. With `A` the dense representation of this `Operator`, `block_depth = 1` means `A` is symmetric circulant. For example, ``` A = |w z y x| |x w z y| |y x w z| |z y x w| ``` `block_depth = 2` means `A` is block symmetric circulant with symmetric circulant blocks. For example, with `W`, `X`, `Y`, `Z` symmetric circulant, ``` A = |W Z Y X| |X W Z Y| |Y X W Z| |Z Y X W| ``` `block_depth = 3` means `A` is block symmetric circulant with block symmetric circulant blocks. Returns: Python `integer`. """ return self._block_depth
[ "def", "block_depth", "(", "self", ")", ":", "return", "self", ".", "_block_depth" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/linalg/linear_operator_circulant.py#L145-L175
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/build/waf-1.7.13/waflib/Tools/gnu_dirs.py
python
configure
(conf)
Read the command-line options to set lots of variables in *conf.env*. The variables BINDIR and LIBDIR will be overwritten.
Read the command-line options to set lots of variables in *conf.env*. The variables BINDIR and LIBDIR will be overwritten.
[ "Read", "the", "command", "-", "line", "options", "to", "set", "lots", "of", "variables", "in", "*", "conf", ".", "env", "*", ".", "The", "variables", "BINDIR", "and", "LIBDIR", "will", "be", "overwritten", "." ]
def configure(conf): """ Read the command-line options to set lots of variables in *conf.env*. The variables BINDIR and LIBDIR will be overwritten. """ def get_param(varname, default): return getattr(Options.options, varname, '') or default env = conf.env env.LIBDIR = env.BINDIR = [] env.EXEC_PREFIX = get_param('EXEC_PREFIX', env.PREFIX) env.PACKAGE = getattr(Context.g_module, 'APPNAME', None) or env.PACKAGE complete = False iter = 0 while not complete and iter < len(_options) + 1: iter += 1 complete = True for name, help, default in _options: name = name.upper() if not env[name]: try: env[name] = Utils.subst_vars(get_param(name, default).replace('/', os.sep), env) except TypeError: complete = False if not complete: lst = [name for name, _, _ in _options if not env[name.upper()]] raise conf.errors.WafError('Variable substitution failure %r' % lst)
[ "def", "configure", "(", "conf", ")", ":", "def", "get_param", "(", "varname", ",", "default", ")", ":", "return", "getattr", "(", "Options", ".", "options", ",", "varname", ",", "''", ")", "or", "default", "env", "=", "conf", ".", "env", "env", ".", "LIBDIR", "=", "env", ".", "BINDIR", "=", "[", "]", "env", ".", "EXEC_PREFIX", "=", "get_param", "(", "'EXEC_PREFIX'", ",", "env", ".", "PREFIX", ")", "env", ".", "PACKAGE", "=", "getattr", "(", "Context", ".", "g_module", ",", "'APPNAME'", ",", "None", ")", "or", "env", ".", "PACKAGE", "complete", "=", "False", "iter", "=", "0", "while", "not", "complete", "and", "iter", "<", "len", "(", "_options", ")", "+", "1", ":", "iter", "+=", "1", "complete", "=", "True", "for", "name", ",", "help", ",", "default", "in", "_options", ":", "name", "=", "name", ".", "upper", "(", ")", "if", "not", "env", "[", "name", "]", ":", "try", ":", "env", "[", "name", "]", "=", "Utils", ".", "subst_vars", "(", "get_param", "(", "name", ",", "default", ")", ".", "replace", "(", "'/'", ",", "os", ".", "sep", ")", ",", "env", ")", "except", "TypeError", ":", "complete", "=", "False", "if", "not", "complete", ":", "lst", "=", "[", "name", "for", "name", ",", "_", ",", "_", "in", "_options", "if", "not", "env", "[", "name", ".", "upper", "(", ")", "]", "]", "raise", "conf", ".", "errors", ".", "WafError", "(", "'Variable substitution failure %r'", "%", "lst", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/waflib/Tools/gnu_dirs.py#L70-L98
apple/swift-clang
d7403439fc6641751840b723e7165fb02f52db95
bindings/python/clang/cindex.py
python
Cursor.semantic_parent
(self)
return self._semantic_parent
Return the semantic parent for this cursor.
Return the semantic parent for this cursor.
[ "Return", "the", "semantic", "parent", "for", "this", "cursor", "." ]
def semantic_parent(self): """Return the semantic parent for this cursor.""" if not hasattr(self, '_semantic_parent'): self._semantic_parent = conf.lib.clang_getCursorSemanticParent(self) return self._semantic_parent
[ "def", "semantic_parent", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_semantic_parent'", ")", ":", "self", ".", "_semantic_parent", "=", "conf", ".", "lib", ".", "clang_getCursorSemanticParent", "(", "self", ")", "return", "self", ".", "_semantic_parent" ]
https://github.com/apple/swift-clang/blob/d7403439fc6641751840b723e7165fb02f52db95/bindings/python/clang/cindex.py#L1757-L1762
google/llvm-propeller
45c226984fe8377ebfb2ad7713c680d652ba678d
llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
python
generateKScript
(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
Generate a random Kaleidoscope script based on the given parameters
Generate a random Kaleidoscope script based on the given parameters
[ "Generate", "a", "random", "Kaleidoscope", "script", "based", "on", "the", "given", "parameters" ]
def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript): """ Generate a random Kaleidoscope script based on the given parameters """ print("Generating " + filename) print(" %d functions, %d elements per function, %d functions between execution" % (numFuncs, elementsPerFunc, funcsBetweenExec)) print(" Call weighting = %f" % callWeighting) script = KScriptGenerator(filename) script.setCallWeighting(callWeighting) script.writeComment("===========================================================================") script.writeComment("Auto-generated script") script.writeComment(" %d functions, %d elements per function, %d functions between execution" % (numFuncs, elementsPerFunc, funcsBetweenExec)) script.writeComment(" call weighting = %f" % callWeighting) script.writeComment("===========================================================================") script.writeEmptyLine() script.writePredefinedFunctions() funcsSinceLastExec = 0 for i in range(numFuncs): script.writeFunction(elementsPerFunc) funcsSinceLastExec += 1 if funcsSinceLastExec == funcsBetweenExec: script.writeFunctionCall() funcsSinceLastExec = 0 # Always end with a function call if funcsSinceLastExec > 0: script.writeFunctionCall() script.writeEmptyLine() script.writeFinalFunctionCounts() funcsCalled = len(script.calledFunctions) print(" Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted)) timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
[ "def", "generateKScript", "(", "filename", ",", "numFuncs", ",", "elementsPerFunc", ",", "funcsBetweenExec", ",", "callWeighting", ",", "timingScript", ")", ":", "print", "(", "\"Generating \"", "+", "filename", ")", "print", "(", "\" %d functions, %d elements per function, %d functions between execution\"", "%", "(", "numFuncs", ",", "elementsPerFunc", ",", "funcsBetweenExec", ")", ")", "print", "(", "\" Call weighting = %f\"", "%", "callWeighting", ")", "script", "=", "KScriptGenerator", "(", "filename", ")", "script", ".", "setCallWeighting", "(", "callWeighting", ")", "script", ".", "writeComment", "(", "\"===========================================================================\"", ")", "script", ".", "writeComment", "(", "\"Auto-generated script\"", ")", "script", ".", "writeComment", "(", "\" %d functions, %d elements per function, %d functions between execution\"", "%", "(", "numFuncs", ",", "elementsPerFunc", ",", "funcsBetweenExec", ")", ")", "script", ".", "writeComment", "(", "\" call weighting = %f\"", "%", "callWeighting", ")", "script", ".", "writeComment", "(", "\"===========================================================================\"", ")", "script", ".", "writeEmptyLine", "(", ")", "script", ".", "writePredefinedFunctions", "(", ")", "funcsSinceLastExec", "=", "0", "for", "i", "in", "range", "(", "numFuncs", ")", ":", "script", ".", "writeFunction", "(", "elementsPerFunc", ")", "funcsSinceLastExec", "+=", "1", "if", "funcsSinceLastExec", "==", "funcsBetweenExec", ":", "script", ".", "writeFunctionCall", "(", ")", "funcsSinceLastExec", "=", "0", "# Always end with a function call", "if", "funcsSinceLastExec", ">", "0", ":", "script", ".", "writeFunctionCall", "(", ")", "script", ".", "writeEmptyLine", "(", ")", "script", ".", "writeFinalFunctionCounts", "(", ")", "funcsCalled", "=", "len", "(", "script", ".", "calledFunctions", ")", "print", "(", "\" Called %d of %d functions, %d total\"", "%", "(", "funcsCalled", ",", "numFuncs", ",", "script", ".", "totalCallsExecuted", ")", ")", "timingScript", ".", "writeTimingCall", "(", "filename", ",", "numFuncs", ",", "funcsCalled", ",", "script", ".", "totalCallsExecuted", ")" ]
https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py#L176-L206
apple/swift-lldb
d74be846ef3e62de946df343e8c234bde93a8912
scripts/Python/static-binding/lldb.py
python
SBProcess.GetInterruptedFromEvent
(event)
return _lldb.SBProcess_GetInterruptedFromEvent(event)
GetInterruptedFromEvent(SBEvent event) -> bool
GetInterruptedFromEvent(SBEvent event) -> bool
[ "GetInterruptedFromEvent", "(", "SBEvent", "event", ")", "-", ">", "bool" ]
def GetInterruptedFromEvent(event): """GetInterruptedFromEvent(SBEvent event) -> bool""" return _lldb.SBProcess_GetInterruptedFromEvent(event)
[ "def", "GetInterruptedFromEvent", "(", "event", ")", ":", "return", "_lldb", ".", "SBProcess_GetInterruptedFromEvent", "(", "event", ")" ]
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L8677-L8679
clementine-player/Clementine
111379dfd027802b59125829fcf87e3e1d0ad73b
dist/cpplint.py
python
NestingState.CheckCompletedBlocks
(self, filename, error)
Checks that all classes and namespaces have been completely parsed. Call this when all lines in a file have been processed. Args: filename: The name of the current file. error: The function to call with any errors found.
Checks that all classes and namespaces have been completely parsed.
[ "Checks", "that", "all", "classes", "and", "namespaces", "have", "been", "completely", "parsed", "." ]
def CheckCompletedBlocks(self, filename, error): """Checks that all classes and namespaces have been completely parsed. Call this when all lines in a file have been processed. Args: filename: The name of the current file. error: The function to call with any errors found. """ # Note: This test can result in false positives if #ifdef constructs # get in the way of brace matching. See the testBuildClass test in # cpplint_unittest.py for an example of this. for obj in self.stack: if isinstance(obj, _ClassInfo): error(filename, obj.starting_linenum, 'build/class', 5, 'Failed to find complete declaration of class %s' % obj.name) elif isinstance(obj, _NamespaceInfo): error(filename, obj.starting_linenum, 'build/namespaces', 5, 'Failed to find complete declaration of namespace %s' % obj.name)
[ "def", "CheckCompletedBlocks", "(", "self", ",", "filename", ",", "error", ")", ":", "# Note: This test can result in false positives if #ifdef constructs", "# get in the way of brace matching. See the testBuildClass test in", "# cpplint_unittest.py for an example of this.", "for", "obj", "in", "self", ".", "stack", ":", "if", "isinstance", "(", "obj", ",", "_ClassInfo", ")", ":", "error", "(", "filename", ",", "obj", ".", "starting_linenum", ",", "'build/class'", ",", "5", ",", "'Failed to find complete declaration of class %s'", "%", "obj", ".", "name", ")", "elif", "isinstance", "(", "obj", ",", "_NamespaceInfo", ")", ":", "error", "(", "filename", ",", "obj", ".", "starting_linenum", ",", "'build/namespaces'", ",", "5", ",", "'Failed to find complete declaration of namespace %s'", "%", "obj", ".", "name", ")" ]
https://github.com/clementine-player/Clementine/blob/111379dfd027802b59125829fcf87e3e1d0ad73b/dist/cpplint.py#L2486-L2505
MVIG-SJTU/RMPE
5188c230ec800c12be7369c3619615bc9b020aa4
python/caffe/io.py
python
arraylist_to_blobprotovector_str
(arraylist)
return vec.SerializeToString()
Converts a list of arrays to a serialized blobprotovec, which could be then passed to a network for processing.
Converts a list of arrays to a serialized blobprotovec, which could be then passed to a network for processing.
[ "Converts", "a", "list", "of", "arrays", "to", "a", "serialized", "blobprotovec", "which", "could", "be", "then", "passed", "to", "a", "network", "for", "processing", "." ]
def arraylist_to_blobprotovector_str(arraylist): """Converts a list of arrays to a serialized blobprotovec, which could be then passed to a network for processing. """ vec = caffe_pb2.BlobProtoVector() vec.blobs.extend([array_to_blobproto(arr) for arr in arraylist]) return vec.SerializeToString()
[ "def", "arraylist_to_blobprotovector_str", "(", "arraylist", ")", ":", "vec", "=", "caffe_pb2", ".", "BlobProtoVector", "(", ")", "vec", ".", "blobs", ".", "extend", "(", "[", "array_to_blobproto", "(", "arr", ")", "for", "arr", "in", "arraylist", "]", ")", "return", "vec", ".", "SerializeToString", "(", ")" ]
https://github.com/MVIG-SJTU/RMPE/blob/5188c230ec800c12be7369c3619615bc9b020aa4/python/caffe/io.py#L49-L55
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_misc.py
python
Log.ClearTraceMasks
(*args, **kwargs)
return _misc_.Log_ClearTraceMasks(*args, **kwargs)
ClearTraceMasks()
ClearTraceMasks()
[ "ClearTraceMasks", "()" ]
def ClearTraceMasks(*args, **kwargs): """ClearTraceMasks()""" return _misc_.Log_ClearTraceMasks(*args, **kwargs)
[ "def", "ClearTraceMasks", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_misc_", ".", "Log_ClearTraceMasks", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_misc.py#L1570-L1572
intel/llvm
e6d0547e9d99b5a56430c4749f6c7e328bf221ab
lldb/examples/python/mach_o.py
python
TerminalColors.magenta
(self, fg=True)
return ''
Set the foreground or background color to magenta. The foreground color will be set if "fg" tests True. The background color will be set if "fg" tests False.
Set the foreground or background color to magenta. The foreground color will be set if "fg" tests True. The background color will be set if "fg" tests False.
[ "Set", "the", "foreground", "or", "background", "color", "to", "magenta", ".", "The", "foreground", "color", "will", "be", "set", "if", "fg", "tests", "True", ".", "The", "background", "color", "will", "be", "set", "if", "fg", "tests", "False", "." ]
def magenta(self, fg=True): '''Set the foreground or background color to magenta. The foreground color will be set if "fg" tests True. The background color will be set if "fg" tests False.''' if self.enabled: if fg: return "\x1b[35m" else: return "\x1b[45m" return ''
[ "def", "magenta", "(", "self", ",", "fg", "=", "True", ")", ":", "if", "self", ".", "enabled", ":", "if", "fg", ":", "return", "\"\\x1b[35m\"", "else", ":", "return", "\"\\x1b[45m\"", "return", "''" ]
https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/lldb/examples/python/mach_o.py#L321-L329
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tkinter/font.py
python
families
(root=None, displayof=None)
return root.tk.splitlist(root.tk.call("font", "families", *args))
Get font families (as a tuple)
Get font families (as a tuple)
[ "Get", "font", "families", "(", "as", "a", "tuple", ")" ]
def families(root=None, displayof=None): "Get font families (as a tuple)" if not root: root = tkinter._default_root args = () if displayof: args = ('-displayof', displayof) return root.tk.splitlist(root.tk.call("font", "families", *args))
[ "def", "families", "(", "root", "=", "None", ",", "displayof", "=", "None", ")", ":", "if", "not", "root", ":", "root", "=", "tkinter", ".", "_default_root", "args", "=", "(", ")", "if", "displayof", ":", "args", "=", "(", "'-displayof'", ",", "displayof", ")", "return", "root", ".", "tk", ".", "splitlist", "(", "root", ".", "tk", ".", "call", "(", "\"font\"", ",", "\"families\"", ",", "*", "args", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tkinter/font.py#L177-L184
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/Draft/draftviewproviders/view_dimension.py
python
ViewProviderAngularDimension.attach
(self, vobj)
Set up the scene sub-graph of the viewprovider.
Set up the scene sub-graph of the viewprovider.
[ "Set", "up", "the", "scene", "sub", "-", "graph", "of", "the", "viewprovider", "." ]
def attach(self, vobj): """Set up the scene sub-graph of the viewprovider.""" self.Object = vobj.Object self.color = coin.SoBaseColor() if hasattr(vobj, "LineColor"): self.color.rgb.setValue(vobj.LineColor[0], vobj.LineColor[1], vobj.LineColor[2]) self.font = coin.SoFont() self.font3d = coin.SoFont() self.text = coin.SoAsciiText() # Can be oriented in 3D space self.text3d = coin.SoText2() # Faces the camera always # The text string needs to be initialized to something, # otherwise it may cause a crash of the system self.text.string = "d" self.text3d.string = "d" self.text.justification = coin.SoAsciiText.CENTER self.text3d.justification = coin.SoAsciiText.CENTER self.textpos = coin.SoTransform() label = coin.SoSeparator() label.addChild(self.textpos) label.addChild(self.color) label.addChild(self.font) label.addChild(self.text) label3d = coin.SoSeparator() label3d.addChild(self.textpos) label3d.addChild(self.color) label3d.addChild(self.font3d) label3d.addChild(self.text3d) self.coord1 = coin.SoCoordinate3() self.trans1 = coin.SoTransform() self.coord2 = coin.SoCoordinate3() self.trans2 = coin.SoTransform() self.marks = coin.SoSeparator() self.drawstyle = coin.SoDrawStyle() self.coords = coin.SoCoordinate3() self.arc = coin.SoType.fromName("SoBrepEdgeSet").createInstance() self.node = coin.SoGroup() self.node.addChild(self.color) self.node.addChild(self.drawstyle) self.node.addChild(self.coords) self.node.addChild(self.arc) self.node.addChild(self.marks) self.node.addChild(label) self.node3d = coin.SoGroup() self.node3d.addChild(self.color) self.node3d.addChild(self.drawstyle) self.node3d.addChild(self.coords) self.node3d.addChild(self.arc) self.node3d.addChild(self.marks) self.node3d.addChild(label3d) vobj.addDisplayMode(self.node, "2D") vobj.addDisplayMode(self.node3d, "3D") self.updateData(vobj.Object, None) self.onChanged(vobj, "FontSize") self.onChanged(vobj, "FontName") self.onChanged(vobj, "ArrowType") self.onChanged(vobj, "LineColor")
[ "def", "attach", "(", "self", ",", "vobj", ")", ":", "self", ".", "Object", "=", "vobj", ".", "Object", "self", ".", "color", "=", "coin", ".", "SoBaseColor", "(", ")", "if", "hasattr", "(", "vobj", ",", "\"LineColor\"", ")", ":", "self", ".", "color", ".", "rgb", ".", "setValue", "(", "vobj", ".", "LineColor", "[", "0", "]", ",", "vobj", ".", "LineColor", "[", "1", "]", ",", "vobj", ".", "LineColor", "[", "2", "]", ")", "self", ".", "font", "=", "coin", ".", "SoFont", "(", ")", "self", ".", "font3d", "=", "coin", ".", "SoFont", "(", ")", "self", ".", "text", "=", "coin", ".", "SoAsciiText", "(", ")", "# Can be oriented in 3D space", "self", ".", "text3d", "=", "coin", ".", "SoText2", "(", ")", "# Faces the camera always", "# The text string needs to be initialized to something,", "# otherwise it may cause a crash of the system", "self", ".", "text", ".", "string", "=", "\"d\"", "self", ".", "text3d", ".", "string", "=", "\"d\"", "self", ".", "text", ".", "justification", "=", "coin", ".", "SoAsciiText", ".", "CENTER", "self", ".", "text3d", ".", "justification", "=", "coin", ".", "SoAsciiText", ".", "CENTER", "self", ".", "textpos", "=", "coin", ".", "SoTransform", "(", ")", "label", "=", "coin", ".", "SoSeparator", "(", ")", "label", ".", "addChild", "(", "self", ".", "textpos", ")", "label", ".", "addChild", "(", "self", ".", "color", ")", "label", ".", "addChild", "(", "self", ".", "font", ")", "label", ".", "addChild", "(", "self", ".", "text", ")", "label3d", "=", "coin", ".", "SoSeparator", "(", ")", "label3d", ".", "addChild", "(", "self", ".", "textpos", ")", "label3d", ".", "addChild", "(", "self", ".", "color", ")", "label3d", ".", "addChild", "(", "self", ".", "font3d", ")", "label3d", ".", "addChild", "(", "self", ".", "text3d", ")", "self", ".", "coord1", "=", "coin", ".", "SoCoordinate3", "(", ")", "self", ".", "trans1", "=", "coin", ".", "SoTransform", "(", ")", "self", ".", "coord2", "=", "coin", ".", "SoCoordinate3", "(", ")", "self", ".", "trans2", "=", "coin", ".", "SoTransform", "(", ")", "self", ".", "marks", "=", "coin", ".", "SoSeparator", "(", ")", "self", ".", "drawstyle", "=", "coin", ".", "SoDrawStyle", "(", ")", "self", ".", "coords", "=", "coin", ".", "SoCoordinate3", "(", ")", "self", ".", "arc", "=", "coin", ".", "SoType", ".", "fromName", "(", "\"SoBrepEdgeSet\"", ")", ".", "createInstance", "(", ")", "self", ".", "node", "=", "coin", ".", "SoGroup", "(", ")", "self", ".", "node", ".", "addChild", "(", "self", ".", "color", ")", "self", ".", "node", ".", "addChild", "(", "self", ".", "drawstyle", ")", "self", ".", "node", ".", "addChild", "(", "self", ".", "coords", ")", "self", ".", "node", ".", "addChild", "(", "self", ".", "arc", ")", "self", ".", "node", ".", "addChild", "(", "self", ".", "marks", ")", "self", ".", "node", ".", "addChild", "(", "label", ")", "self", ".", "node3d", "=", "coin", ".", "SoGroup", "(", ")", "self", ".", "node3d", ".", "addChild", "(", "self", ".", "color", ")", "self", ".", "node3d", ".", "addChild", "(", "self", ".", "drawstyle", ")", "self", ".", "node3d", ".", "addChild", "(", "self", ".", "coords", ")", "self", ".", "node3d", ".", "addChild", "(", "self", ".", "arc", ")", "self", ".", "node3d", ".", "addChild", "(", "self", ".", "marks", ")", "self", ".", "node3d", ".", "addChild", "(", "label3d", ")", "vobj", ".", "addDisplayMode", "(", "self", ".", "node", ",", "\"2D\"", ")", "vobj", ".", "addDisplayMode", "(", "self", ".", "node3d", ",", "\"3D\"", ")", "self", ".", "updateData", "(", "vobj", ".", "Object", ",", "None", ")", "self", ".", "onChanged", "(", "vobj", ",", "\"FontSize\"", ")", "self", ".", "onChanged", "(", "vobj", ",", "\"FontName\"", ")", "self", ".", "onChanged", "(", "vobj", ",", "\"ArrowType\"", ")", "self", ".", "onChanged", "(", "vobj", ",", "\"LineColor\"", ")" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftviewproviders/view_dimension.py#L919-L985
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/telemetry/third_party/pyserial/serial/rfc2217.py
python
PortManager.escape
(self, data)
\ this generator function is for the user. all outgoing data has to be properly escaped, so that no IAC character in the data stream messes up the Telnet state machine in the server. socket.sendall(escape(data))
\ this generator function is for the user. all outgoing data has to be properly escaped, so that no IAC character in the data stream messes up the Telnet state machine in the server.
[ "\\", "this", "generator", "function", "is", "for", "the", "user", ".", "all", "outgoing", "data", "has", "to", "be", "properly", "escaped", "so", "that", "no", "IAC", "character", "in", "the", "data", "stream", "messes", "up", "the", "Telnet", "state", "machine", "in", "the", "server", "." ]
def escape(self, data): """\ this generator function is for the user. all outgoing data has to be properly escaped, so that no IAC character in the data stream messes up the Telnet state machine in the server. socket.sendall(escape(data)) """ for byte in data: if byte == IAC: yield IAC yield IAC else: yield byte
[ "def", "escape", "(", "self", ",", "data", ")", ":", "for", "byte", "in", "data", ":", "if", "byte", "==", "IAC", ":", "yield", "IAC", "yield", "IAC", "else", ":", "yield", "byte" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/third_party/pyserial/serial/rfc2217.py#L1010-L1023
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/ipaddress.py
python
IPv4Address.is_unspecified
(self)
return self == self._constants._unspecified_address
Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 5735 3.
Test if the address is unspecified.
[ "Test", "if", "the", "address", "is", "unspecified", "." ]
def is_unspecified(self): """Test if the address is unspecified. Returns: A boolean, True if this is the unspecified address as defined in RFC 5735 3. """ return self == self._constants._unspecified_address
[ "def", "is_unspecified", "(", "self", ")", ":", "return", "self", "==", "self", ".", "_constants", ".", "_unspecified_address" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/ipaddress.py#L1374-L1382
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/keras/utils/tf_utils.py
python
assert_no_legacy_layers
(layers)
Prevent tf.layers.Layers from being used with Keras. Certain legacy layers inherit from their keras analogs; however they are not supported with keras and can lead to subtle and hard to diagnose bugs. Args: layers: A list of layers to check Raises: TypeError: If any elements of layers are tf.layers.Layers
Prevent tf.layers.Layers from being used with Keras.
[ "Prevent", "tf", ".", "layers", ".", "Layers", "from", "being", "used", "with", "Keras", "." ]
def assert_no_legacy_layers(layers): """Prevent tf.layers.Layers from being used with Keras. Certain legacy layers inherit from their keras analogs; however they are not supported with keras and can lead to subtle and hard to diagnose bugs. Args: layers: A list of layers to check Raises: TypeError: If any elements of layers are tf.layers.Layers """ # isinstance check for tf.layers.Layer introduces a circular dependency. legacy_layers = [l for l in layers if getattr(l, '_is_legacy_layer', None)] if legacy_layers: layer_str = '\n'.join(' ' + str(l) for l in legacy_layers) raise TypeError( 'The following are legacy tf.layers.Layers:\n{}\nTo use keras as a ' 'framework (for instance using the Network, Model, or Sequential ' 'classes), please use the tf.keras.layers implementation instead. ' '(Or, if writing custom layers, subclass from tf.keras.layers rather ' 'than tf.layers)'.format(layer_str))
[ "def", "assert_no_legacy_layers", "(", "layers", ")", ":", "# isinstance check for tf.layers.Layer introduces a circular dependency.", "legacy_layers", "=", "[", "l", "for", "l", "in", "layers", "if", "getattr", "(", "l", ",", "'_is_legacy_layer'", ",", "None", ")", "]", "if", "legacy_layers", ":", "layer_str", "=", "'\\n'", ".", "join", "(", "' '", "+", "str", "(", "l", ")", "for", "l", "in", "legacy_layers", ")", "raise", "TypeError", "(", "'The following are legacy tf.layers.Layers:\\n{}\\nTo use keras as a '", "'framework (for instance using the Network, Model, or Sequential '", "'classes), please use the tf.keras.layers implementation instead. '", "'(Or, if writing custom layers, subclass from tf.keras.layers rather '", "'than tf.layers)'", ".", "format", "(", "layer_str", ")", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/utils/tf_utils.py#L404-L426
google/fhir
d77f57706c1a168529b0b87ca7ccb1c0113e83c2
py/google/fhir/fhir_errors.py
python
ListErrorReporter.report_validation_warning
(self, element_path: str, msg: str)
Logs to the `warning` context and stores `msg` in `warnings`.
Logs to the `warning` context and stores `msg` in `warnings`.
[ "Logs", "to", "the", "warning", "context", "and", "stores", "msg", "in", "warnings", "." ]
def report_validation_warning(self, element_path: str, msg: str) -> None: """Logs to the `warning` context and stores `msg` in `warnings`.""" logging.warning('%s; %s', element_path, msg) self.warnings.append(msg)
[ "def", "report_validation_warning", "(", "self", ",", "element_path", ":", "str", ",", "msg", ":", "str", ")", "->", "None", ":", "logging", ".", "warning", "(", "'%s; %s'", ",", "element_path", ",", "msg", ")", "self", ".", "warnings", ".", "append", "(", "msg", ")" ]
https://github.com/google/fhir/blob/d77f57706c1a168529b0b87ca7ccb1c0113e83c2/py/google/fhir/fhir_errors.py#L132-L135
Z3Prover/z3
d745d03afdfdf638d66093e2bfbacaf87187f35b
src/api/python/z3/z3.py
python
Solver.consequences
(self, assumptions, variables)
return CheckSatResult(r), consequences
Determine fixed values for the variables based on the solver state and assumptions. >>> s = Solver() >>> a, b, c, d = Bools('a b c d') >>> s.add(Implies(a,b), Implies(b, c)) >>> s.consequences([a],[b,c,d]) (sat, [Implies(a, b), Implies(a, c)]) >>> s.consequences([Not(c),d],[a,b,c,d]) (sat, [Implies(d, d), Implies(Not(c), Not(c)), Implies(Not(c), Not(b)), Implies(Not(c), Not(a))])
Determine fixed values for the variables based on the solver state and assumptions. >>> s = Solver() >>> a, b, c, d = Bools('a b c d') >>> s.add(Implies(a,b), Implies(b, c)) >>> s.consequences([a],[b,c,d]) (sat, [Implies(a, b), Implies(a, c)]) >>> s.consequences([Not(c),d],[a,b,c,d]) (sat, [Implies(d, d), Implies(Not(c), Not(c)), Implies(Not(c), Not(b)), Implies(Not(c), Not(a))])
[ "Determine", "fixed", "values", "for", "the", "variables", "based", "on", "the", "solver", "state", "and", "assumptions", ".", ">>>", "s", "=", "Solver", "()", ">>>", "a", "b", "c", "d", "=", "Bools", "(", "a", "b", "c", "d", ")", ">>>", "s", ".", "add", "(", "Implies", "(", "a", "b", ")", "Implies", "(", "b", "c", "))", ">>>", "s", ".", "consequences", "(", "[", "a", "]", "[", "b", "c", "d", "]", ")", "(", "sat", "[", "Implies", "(", "a", "b", ")", "Implies", "(", "a", "c", ")", "]", ")", ">>>", "s", ".", "consequences", "(", "[", "Not", "(", "c", ")", "d", "]", "[", "a", "b", "c", "d", "]", ")", "(", "sat", "[", "Implies", "(", "d", "d", ")", "Implies", "(", "Not", "(", "c", ")", "Not", "(", "c", "))", "Implies", "(", "Not", "(", "c", ")", "Not", "(", "b", "))", "Implies", "(", "Not", "(", "c", ")", "Not", "(", "a", "))", "]", ")" ]
def consequences(self, assumptions, variables): """Determine fixed values for the variables based on the solver state and assumptions. >>> s = Solver() >>> a, b, c, d = Bools('a b c d') >>> s.add(Implies(a,b), Implies(b, c)) >>> s.consequences([a],[b,c,d]) (sat, [Implies(a, b), Implies(a, c)]) >>> s.consequences([Not(c),d],[a,b,c,d]) (sat, [Implies(d, d), Implies(Not(c), Not(c)), Implies(Not(c), Not(b)), Implies(Not(c), Not(a))]) """ if isinstance(assumptions, list): _asms = AstVector(None, self.ctx) for a in assumptions: _asms.push(a) assumptions = _asms if isinstance(variables, list): _vars = AstVector(None, self.ctx) for a in variables: _vars.push(a) variables = _vars _z3_assert(isinstance(assumptions, AstVector), "ast vector expected") _z3_assert(isinstance(variables, AstVector), "ast vector expected") consequences = AstVector(None, self.ctx) r = Z3_solver_get_consequences(self.ctx.ref(), self.solver, assumptions.vector, variables.vector, consequences.vector) sz = len(consequences) consequences = [consequences[i] for i in range(sz)] return CheckSatResult(r), consequences
[ "def", "consequences", "(", "self", ",", "assumptions", ",", "variables", ")", ":", "if", "isinstance", "(", "assumptions", ",", "list", ")", ":", "_asms", "=", "AstVector", "(", "None", ",", "self", ".", "ctx", ")", "for", "a", "in", "assumptions", ":", "_asms", ".", "push", "(", "a", ")", "assumptions", "=", "_asms", "if", "isinstance", "(", "variables", ",", "list", ")", ":", "_vars", "=", "AstVector", "(", "None", ",", "self", ".", "ctx", ")", "for", "a", "in", "variables", ":", "_vars", ".", "push", "(", "a", ")", "variables", "=", "_vars", "_z3_assert", "(", "isinstance", "(", "assumptions", ",", "AstVector", ")", ",", "\"ast vector expected\"", ")", "_z3_assert", "(", "isinstance", "(", "variables", ",", "AstVector", ")", ",", "\"ast vector expected\"", ")", "consequences", "=", "AstVector", "(", "None", ",", "self", ".", "ctx", ")", "r", "=", "Z3_solver_get_consequences", "(", "self", ".", "ctx", ".", "ref", "(", ")", ",", "self", ".", "solver", ",", "assumptions", ".", "vector", ",", "variables", ".", "vector", ",", "consequences", ".", "vector", ")", "sz", "=", "len", "(", "consequences", ")", "consequences", "=", "[", "consequences", "[", "i", "]", "for", "i", "in", "range", "(", "sz", ")", "]", "return", "CheckSatResult", "(", "r", ")", ",", "consequences" ]
https://github.com/Z3Prover/z3/blob/d745d03afdfdf638d66093e2bfbacaf87187f35b/src/api/python/z3/z3.py#L7109-L7136
google/iree
1224bbdbe65b0d1fdf40e7324f60f68beeaf7c76
integrations/tensorflow/python_projects/iree_tf/iree/tf/support/tf_utils.py
python
save_input_values
(inputs: Sequence[np.ndarray], artifacts_dir: str = None)
return result
Saves input values with IREE tools format if 'artifacts_dir' is set.
Saves input values with IREE tools format if 'artifacts_dir' is set.
[ "Saves", "input", "values", "with", "IREE", "tools", "format", "if", "artifacts_dir", "is", "set", "." ]
def save_input_values(inputs: Sequence[np.ndarray], artifacts_dir: str = None) -> str: """Saves input values with IREE tools format if 'artifacts_dir' is set.""" result = [] for array in inputs: shape_dtype = get_shape_and_dtype(array) values = " ".join([str(x) for x in array.flatten()]) result.append(f"{shape_dtype}={values}") result = "\n".join(result) if artifacts_dir is not None: inputs_path = os.path.join(artifacts_dir, "inputs.txt") logging.info("Saving IREE input values to: %s", inputs_path) with open(inputs_path, "w") as f: f.write(result) f.write("\n") return result
[ "def", "save_input_values", "(", "inputs", ":", "Sequence", "[", "np", ".", "ndarray", "]", ",", "artifacts_dir", ":", "str", "=", "None", ")", "->", "str", ":", "result", "=", "[", "]", "for", "array", "in", "inputs", ":", "shape_dtype", "=", "get_shape_and_dtype", "(", "array", ")", "values", "=", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "array", ".", "flatten", "(", ")", "]", ")", "result", ".", "append", "(", "f\"{shape_dtype}={values}\"", ")", "result", "=", "\"\\n\"", ".", "join", "(", "result", ")", "if", "artifacts_dir", "is", "not", "None", ":", "inputs_path", "=", "os", ".", "path", ".", "join", "(", "artifacts_dir", ",", "\"inputs.txt\"", ")", "logging", ".", "info", "(", "\"Saving IREE input values to: %s\"", ",", "inputs_path", ")", "with", "open", "(", "inputs_path", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "result", ")", "f", ".", "write", "(", "\"\\n\"", ")", "return", "result" ]
https://github.com/google/iree/blob/1224bbdbe65b0d1fdf40e7324f60f68beeaf7c76/integrations/tensorflow/python_projects/iree_tf/iree/tf/support/tf_utils.py#L114-L129
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
scripts/LargeScaleStructures/data_stitching.py
python
DataSet.is_loaded
(self)
return mtd.doesExist(self._ws_name)
Return True is this data set has been loaded
Return True is this data set has been loaded
[ "Return", "True", "is", "this", "data", "set", "has", "been", "loaded" ]
def is_loaded(self): """ Return True is this data set has been loaded """ return mtd.doesExist(self._ws_name)
[ "def", "is_loaded", "(", "self", ")", ":", "return", "mtd", ".", "doesExist", "(", "self", ".", "_ws_name", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/LargeScaleStructures/data_stitching.py#L235-L239
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/docview.py
python
Document.NotifyClosing
(self)
Notifies the views that the document is going to close.
Notifies the views that the document is going to close.
[ "Notifies", "the", "views", "that", "the", "document", "is", "going", "to", "close", "." ]
def NotifyClosing(self): """ Notifies the views that the document is going to close. """ for view in self._documentViews: view.OnClosingDocument()
[ "def", "NotifyClosing", "(", "self", ")", ":", "for", "view", "in", "self", ".", "_documentViews", ":", "view", ".", "OnClosingDocument", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/docview.py#L727-L732
kismetwireless/kismet
a7c0dc270c960fb1f58bd9cec4601c201885fd4e
capture_proxy_adsb/KismetCaptureProxyAdsb/__init__.py
python
KismetProxyAdsb.adsb_len_by_type
(self, type)
return 56
Get expected length of message in bits based on the type
Get expected length of message in bits based on the type
[ "Get", "expected", "length", "of", "message", "in", "bits", "based", "on", "the", "type" ]
def adsb_len_by_type(self, type): """ Get expected length of message in bits based on the type """ if type == 16 or type == 17 or type == 19 or type == 20 or type == 21: return 112 return 56
[ "def", "adsb_len_by_type", "(", "self", ",", "type", ")", ":", "if", "type", "==", "16", "or", "type", "==", "17", "or", "type", "==", "19", "or", "type", "==", "20", "or", "type", "==", "21", ":", "return", "112", "return", "56" ]
https://github.com/kismetwireless/kismet/blob/a7c0dc270c960fb1f58bd9cec4601c201885fd4e/capture_proxy_adsb/KismetCaptureProxyAdsb/__init__.py#L420-L428
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/ops/spectral_ops.py
python
_irfft_wrapper
(ifft_fn, fft_rank, default_name)
return _irfft
Wrapper around gen_spectral_ops.irfft* that infers fft_length argument.
Wrapper around gen_spectral_ops.irfft* that infers fft_length argument.
[ "Wrapper", "around", "gen_spectral_ops", ".", "irfft", "*", "that", "infers", "fft_length", "argument", "." ]
def _irfft_wrapper(ifft_fn, fft_rank, default_name): """Wrapper around gen_spectral_ops.irfft* that infers fft_length argument.""" def _irfft(input_tensor, fft_length=None, name=None): with _ops.name_scope(name, default_name, [input_tensor, fft_length]) as name: input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.complex64) input_tensor.shape.with_rank_at_least(fft_rank) if fft_length is None: fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank) else: fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32) input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=True) return ifft_fn(input_tensor, fft_length, name) _irfft.__doc__ = ifft_fn.__doc__ return _irfft
[ "def", "_irfft_wrapper", "(", "ifft_fn", ",", "fft_rank", ",", "default_name", ")", ":", "def", "_irfft", "(", "input_tensor", ",", "fft_length", "=", "None", ",", "name", "=", "None", ")", ":", "with", "_ops", ".", "name_scope", "(", "name", ",", "default_name", ",", "[", "input_tensor", ",", "fft_length", "]", ")", "as", "name", ":", "input_tensor", "=", "_ops", ".", "convert_to_tensor", "(", "input_tensor", ",", "_dtypes", ".", "complex64", ")", "input_tensor", ".", "shape", ".", "with_rank_at_least", "(", "fft_rank", ")", "if", "fft_length", "is", "None", ":", "fft_length", "=", "_infer_fft_length_for_irfft", "(", "input_tensor", ",", "fft_rank", ")", "else", ":", "fft_length", "=", "_ops", ".", "convert_to_tensor", "(", "fft_length", ",", "_dtypes", ".", "int32", ")", "input_tensor", "=", "_maybe_pad_for_rfft", "(", "input_tensor", ",", "fft_rank", ",", "fft_length", ",", "is_reverse", "=", "True", ")", "return", "ifft_fn", "(", "input_tensor", ",", "fft_length", ",", "name", ")", "_irfft", ".", "__doc__", "=", "ifft_fn", ".", "__doc__", "return", "_irfft" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/ops/spectral_ops.py#L138-L154
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/mailbox.py
python
MH.get_sequences
(self)
return results
Return a name-to-key-list dictionary to define each sequence.
Return a name-to-key-list dictionary to define each sequence.
[ "Return", "a", "name", "-", "to", "-", "key", "-", "list", "dictionary", "to", "define", "each", "sequence", "." ]
def get_sequences(self): """Return a name-to-key-list dictionary to define each sequence.""" results = {} f = open(os.path.join(self._path, '.mh_sequences'), 'r') try: all_keys = set(self.keys()) for line in f: try: name, contents = line.split(':') keys = set() for spec in contents.split(): if spec.isdigit(): keys.add(int(spec)) else: start, stop = (int(x) for x in spec.split('-')) keys.update(range(start, stop + 1)) results[name] = [key for key in sorted(keys) \ if key in all_keys] if len(results[name]) == 0: del results[name] except ValueError: raise FormatError('Invalid sequence specification: %s' % line.rstrip()) finally: f.close() return results
[ "def", "get_sequences", "(", "self", ")", ":", "results", "=", "{", "}", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_path", ",", "'.mh_sequences'", ")", ",", "'r'", ")", "try", ":", "all_keys", "=", "set", "(", "self", ".", "keys", "(", ")", ")", "for", "line", "in", "f", ":", "try", ":", "name", ",", "contents", "=", "line", ".", "split", "(", "':'", ")", "keys", "=", "set", "(", ")", "for", "spec", "in", "contents", ".", "split", "(", ")", ":", "if", "spec", ".", "isdigit", "(", ")", ":", "keys", ".", "add", "(", "int", "(", "spec", ")", ")", "else", ":", "start", ",", "stop", "=", "(", "int", "(", "x", ")", "for", "x", "in", "spec", ".", "split", "(", "'-'", ")", ")", "keys", ".", "update", "(", "range", "(", "start", ",", "stop", "+", "1", ")", ")", "results", "[", "name", "]", "=", "[", "key", "for", "key", "in", "sorted", "(", "keys", ")", "if", "key", "in", "all_keys", "]", "if", "len", "(", "results", "[", "name", "]", ")", "==", "0", ":", "del", "results", "[", "name", "]", "except", "ValueError", ":", "raise", "FormatError", "(", "'Invalid sequence specification: %s'", "%", "line", ".", "rstrip", "(", ")", ")", "finally", ":", "f", ".", "close", "(", ")", "return", "results" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/mailbox.py#L1122-L1147
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/stc.py
python
StyledTextCtrl.CallTipPosAtStart
(*args, **kwargs)
return _stc.StyledTextCtrl_CallTipPosAtStart(*args, **kwargs)
CallTipPosAtStart(self) -> int Retrieve the position where the caret was before displaying the call tip.
CallTipPosAtStart(self) -> int
[ "CallTipPosAtStart", "(", "self", ")", "-", ">", "int" ]
def CallTipPosAtStart(*args, **kwargs): """ CallTipPosAtStart(self) -> int Retrieve the position where the caret was before displaying the call tip. """ return _stc.StyledTextCtrl_CallTipPosAtStart(*args, **kwargs)
[ "def", "CallTipPosAtStart", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_stc", ".", "StyledTextCtrl_CallTipPosAtStart", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/stc.py#L3820-L3826
goldeneye-source/ges-code
2630cd8ef3d015af53c72ec2e19fc1f7e7fe8d9d
thirdparty/protobuf-2.3.0/python/google/protobuf/internal/decoder.py
python
_SkipVarint
(buffer, pos, end)
return pos
Skip a varint value. Returns the new position.
Skip a varint value. Returns the new position.
[ "Skip", "a", "varint", "value", ".", "Returns", "the", "new", "position", "." ]
def _SkipVarint(buffer, pos, end): """Skip a varint value. Returns the new position.""" while ord(buffer[pos]) & 0x80: pos += 1 pos += 1 if pos > end: raise _DecodeError('Truncated message.') return pos
[ "def", "_SkipVarint", "(", "buffer", ",", "pos", ",", "end", ")", ":", "while", "ord", "(", "buffer", "[", "pos", "]", ")", "&", "0x80", ":", "pos", "+=", "1", "pos", "+=", "1", "if", "pos", ">", "end", ":", "raise", "_DecodeError", "(", "'Truncated message.'", ")", "return", "pos" ]
https://github.com/goldeneye-source/ges-code/blob/2630cd8ef3d015af53c72ec2e19fc1f7e7fe8d9d/thirdparty/protobuf-2.3.0/python/google/protobuf/internal/decoder.py#L553-L561
NVIDIA/DALI
bf16cc86ba8f091b145f91962f21fe1b6aff243d
third_party/cpplint.py
python
NestingState.SeenOpenBrace
(self)
return (not self.stack) or self.stack[-1].seen_open_brace
Check if we have seen the opening brace for the innermost block. Returns: True if we have seen the opening brace, False if the innermost block is still expecting an opening brace.
Check if we have seen the opening brace for the innermost block.
[ "Check", "if", "we", "have", "seen", "the", "opening", "brace", "for", "the", "innermost", "block", "." ]
def SeenOpenBrace(self): """Check if we have seen the opening brace for the innermost block. Returns: True if we have seen the opening brace, False if the innermost block is still expecting an opening brace. """ return (not self.stack) or self.stack[-1].seen_open_brace
[ "def", "SeenOpenBrace", "(", "self", ")", ":", "return", "(", "not", "self", ".", "stack", ")", "or", "self", ".", "stack", "[", "-", "1", "]", ".", "seen_open_brace" ]
https://github.com/NVIDIA/DALI/blob/bf16cc86ba8f091b145f91962f21fe1b6aff243d/third_party/cpplint.py#L2429-L2436
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
samples/ide/activegrid/tool/FindInDirService.py
python
FindInDirService.SaveFindInDirConfig
(self, dirString, searchSubfolders)
Save search dir patterns and flags to registry. dirString = search directory searchSubfolders = Search subfolders
Save search dir patterns and flags to registry.
[ "Save", "search", "dir", "patterns", "and", "flags", "to", "registry", "." ]
def SaveFindInDirConfig(self, dirString, searchSubfolders): """ Save search dir patterns and flags to registry. dirString = search directory searchSubfolders = Search subfolders """ config = wx.ConfigBase_Get() config.Write(FIND_MATCHDIR, dirString) config.WriteInt(FIND_MATCHDIRSUBFOLDERS, searchSubfolders)
[ "def", "SaveFindInDirConfig", "(", "self", ",", "dirString", ",", "searchSubfolders", ")", ":", "config", "=", "wx", ".", "ConfigBase_Get", "(", ")", "config", ".", "Write", "(", "FIND_MATCHDIR", ",", "dirString", ")", "config", ".", "WriteInt", "(", "FIND_MATCHDIRSUBFOLDERS", ",", "searchSubfolders", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/samples/ide/activegrid/tool/FindInDirService.py#L302-L310
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/scipy/signal/signaltools.py
python
fftconvolve
(in1, in2, mode="full")
Convolve two N-dimensional arrays using FFT. Convolve `in1` and `in2` using the fast Fourier transform method, with the output size determined by the `mode` argument. This is generally much faster than `convolve` for large arrays (n > ~500), but can be slower when only a few output values are needed, and can only output float arrays (int or object array inputs will be cast to float). As of v0.19, `convolve` automatically chooses this method or the direct method based on an estimation of which is faster. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. If operating in 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. Returns ------- out : array An N-dimensional array containing a subset of the discrete linear convolution of `in1` with `in2`. Examples -------- Autocorrelation of white noise is an impulse. >>> from scipy import signal >>> sig = np.random.randn(1000) >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full') >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) >>> ax_orig.plot(sig) >>> ax_orig.set_title('White noise') >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr) >>> ax_mag.set_title('Autocorrelation') >>> fig.tight_layout() >>> fig.show() Gaussian blur implemented using FFT convolution. Notice the dark borders around the image, due to the zero-padding beyond its boundaries. The `convolve2d` function allows for other types of image boundaries, but is far slower. >>> from scipy import misc >>> face = misc.face(gray=True) >>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8)) >>> blurred = signal.fftconvolve(face, kernel, mode='same') >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1, ... figsize=(6, 15)) >>> ax_orig.imshow(face, cmap='gray') >>> ax_orig.set_title('Original') >>> ax_orig.set_axis_off() >>> ax_kernel.imshow(kernel, cmap='gray') >>> ax_kernel.set_title('Gaussian kernel') >>> ax_kernel.set_axis_off() >>> ax_blurred.imshow(blurred, cmap='gray') >>> ax_blurred.set_title('Blurred') >>> ax_blurred.set_axis_off() >>> fig.show()
Convolve two N-dimensional arrays using FFT.
[ "Convolve", "two", "N", "-", "dimensional", "arrays", "using", "FFT", "." ]
def fftconvolve(in1, in2, mode="full"): """Convolve two N-dimensional arrays using FFT. Convolve `in1` and `in2` using the fast Fourier transform method, with the output size determined by the `mode` argument. This is generally much faster than `convolve` for large arrays (n > ~500), but can be slower when only a few output values are needed, and can only output float arrays (int or object array inputs will be cast to float). As of v0.19, `convolve` automatically chooses this method or the direct method based on an estimation of which is faster. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. If operating in 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. Returns ------- out : array An N-dimensional array containing a subset of the discrete linear convolution of `in1` with `in2`. Examples -------- Autocorrelation of white noise is an impulse. >>> from scipy import signal >>> sig = np.random.randn(1000) >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full') >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) >>> ax_orig.plot(sig) >>> ax_orig.set_title('White noise') >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr) >>> ax_mag.set_title('Autocorrelation') >>> fig.tight_layout() >>> fig.show() Gaussian blur implemented using FFT convolution. Notice the dark borders around the image, due to the zero-padding beyond its boundaries. The `convolve2d` function allows for other types of image boundaries, but is far slower. >>> from scipy import misc >>> face = misc.face(gray=True) >>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8)) >>> blurred = signal.fftconvolve(face, kernel, mode='same') >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1, ... figsize=(6, 15)) >>> ax_orig.imshow(face, cmap='gray') >>> ax_orig.set_title('Original') >>> ax_orig.set_axis_off() >>> ax_kernel.imshow(kernel, cmap='gray') >>> ax_kernel.set_title('Gaussian kernel') >>> ax_kernel.set_axis_off() >>> ax_blurred.imshow(blurred, cmap='gray') >>> ax_blurred.set_title('Blurred') >>> ax_blurred.set_axis_off() >>> fig.show() """ in1 = asarray(in1) in2 = asarray(in2) if in1.ndim == in2.ndim == 0: # scalar inputs return in1 * in2 elif not in1.ndim == in2.ndim: raise ValueError("in1 and in2 should have the same dimensionality") elif in1.size == 0 or in2.size == 0: # empty arrays return array([]) s1 = array(in1.shape) s2 = array(in2.shape) complex_result = (np.issubdtype(in1.dtype, np.complexfloating) or np.issubdtype(in2.dtype, np.complexfloating)) shape = s1 + s2 - 1 # Check that input sizes are compatible with 'valid' mode if _inputs_swap_needed(mode, s1, s2): # Convolution is commutative; order doesn't have any effect on output in1, s1, in2, s2 = in2, s2, in1, s1 # Speed up FFT by padding to optimal size for FFTPACK fshape = [fftpack.helper.next_fast_len(int(d)) for d in shape] fslice = tuple([slice(0, int(sz)) for sz in shape]) # Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make # sure we only call rfftn/irfftn from one thread at a time. if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)): try: sp1 = np.fft.rfftn(in1, fshape) sp2 = np.fft.rfftn(in2, fshape) ret = (np.fft.irfftn(sp1 * sp2, fshape)[fslice].copy()) finally: if not _rfft_mt_safe: _rfft_lock.release() else: # If we're here, it's either because we need a complex result, or we # failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and # is already in use by another thread). In either case, use the # (threadsafe but slower) SciPy complex-FFT routines instead. sp1 = fftpack.fftn(in1, fshape) sp2 = fftpack.fftn(in2, fshape) ret = fftpack.ifftn(sp1 * sp2)[fslice].copy() if not complex_result: ret = ret.real if mode == "full": return ret elif mode == "same": return _centered(ret, s1) elif mode == "valid": return _centered(ret, s1 - s2 + 1) else: raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.")
[ "def", "fftconvolve", "(", "in1", ",", "in2", ",", "mode", "=", "\"full\"", ")", ":", "in1", "=", "asarray", "(", "in1", ")", "in2", "=", "asarray", "(", "in2", ")", "if", "in1", ".", "ndim", "==", "in2", ".", "ndim", "==", "0", ":", "# scalar inputs", "return", "in1", "*", "in2", "elif", "not", "in1", ".", "ndim", "==", "in2", ".", "ndim", ":", "raise", "ValueError", "(", "\"in1 and in2 should have the same dimensionality\"", ")", "elif", "in1", ".", "size", "==", "0", "or", "in2", ".", "size", "==", "0", ":", "# empty arrays", "return", "array", "(", "[", "]", ")", "s1", "=", "array", "(", "in1", ".", "shape", ")", "s2", "=", "array", "(", "in2", ".", "shape", ")", "complex_result", "=", "(", "np", ".", "issubdtype", "(", "in1", ".", "dtype", ",", "np", ".", "complexfloating", ")", "or", "np", ".", "issubdtype", "(", "in2", ".", "dtype", ",", "np", ".", "complexfloating", ")", ")", "shape", "=", "s1", "+", "s2", "-", "1", "# Check that input sizes are compatible with 'valid' mode", "if", "_inputs_swap_needed", "(", "mode", ",", "s1", ",", "s2", ")", ":", "# Convolution is commutative; order doesn't have any effect on output", "in1", ",", "s1", ",", "in2", ",", "s2", "=", "in2", ",", "s2", ",", "in1", ",", "s1", "# Speed up FFT by padding to optimal size for FFTPACK", "fshape", "=", "[", "fftpack", ".", "helper", ".", "next_fast_len", "(", "int", "(", "d", ")", ")", "for", "d", "in", "shape", "]", "fslice", "=", "tuple", "(", "[", "slice", "(", "0", ",", "int", "(", "sz", ")", ")", "for", "sz", "in", "shape", "]", ")", "# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make", "# sure we only call rfftn/irfftn from one thread at a time.", "if", "not", "complex_result", "and", "(", "_rfft_mt_safe", "or", "_rfft_lock", ".", "acquire", "(", "False", ")", ")", ":", "try", ":", "sp1", "=", "np", ".", "fft", ".", "rfftn", "(", "in1", ",", "fshape", ")", "sp2", "=", "np", ".", "fft", ".", "rfftn", "(", "in2", ",", "fshape", ")", "ret", "=", "(", "np", ".", "fft", ".", "irfftn", "(", "sp1", "*", "sp2", ",", "fshape", ")", "[", "fslice", "]", ".", "copy", "(", ")", ")", "finally", ":", "if", "not", "_rfft_mt_safe", ":", "_rfft_lock", ".", "release", "(", ")", "else", ":", "# If we're here, it's either because we need a complex result, or we", "# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and", "# is already in use by another thread). In either case, use the", "# (threadsafe but slower) SciPy complex-FFT routines instead.", "sp1", "=", "fftpack", ".", "fftn", "(", "in1", ",", "fshape", ")", "sp2", "=", "fftpack", ".", "fftn", "(", "in2", ",", "fshape", ")", "ret", "=", "fftpack", ".", "ifftn", "(", "sp1", "*", "sp2", ")", "[", "fslice", "]", ".", "copy", "(", ")", "if", "not", "complex_result", ":", "ret", "=", "ret", ".", "real", "if", "mode", "==", "\"full\"", ":", "return", "ret", "elif", "mode", "==", "\"same\"", ":", "return", "_centered", "(", "ret", ",", "s1", ")", "elif", "mode", "==", "\"valid\"", ":", "return", "_centered", "(", "ret", ",", "s1", "-", "s2", "+", "1", ")", "else", ":", "raise", "ValueError", "(", "\"Acceptable mode flags are 'valid',\"", "\" 'same', or 'full'.\"", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/signal/signaltools.py#L271-L405
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/keras/utils/kernelized_utils.py
python
_to_matrix
(u)
return u
If input tensor is a vector (i.e., has rank 1), converts it to matrix.
If input tensor is a vector (i.e., has rank 1), converts it to matrix.
[ "If", "input", "tensor", "is", "a", "vector", "(", "i", ".", "e", ".", "has", "rank", "1", ")", "converts", "it", "to", "matrix", "." ]
def _to_matrix(u): """If input tensor is a vector (i.e., has rank 1), converts it to matrix.""" u_rank = len(u.shape) if u_rank not in [1, 2]: raise ValueError('The input tensor should have rank 1 or 2. Given rank: {}' .format(u_rank)) if u_rank == 1: return array_ops.expand_dims(u, 0) return u
[ "def", "_to_matrix", "(", "u", ")", ":", "u_rank", "=", "len", "(", "u", ".", "shape", ")", "if", "u_rank", "not", "in", "[", "1", ",", "2", "]", ":", "raise", "ValueError", "(", "'The input tensor should have rank 1 or 2. Given rank: {}'", ".", "format", "(", "u_rank", ")", ")", "if", "u_rank", "==", "1", ":", "return", "array_ops", ".", "expand_dims", "(", "u", ",", "0", ")", "return", "u" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/utils/kernelized_utils.py#L21-L29
idaholab/moose
9eeebc65e098b4c30f8205fb41591fd5b61eb6ff
python/peacock/Input/BlockInfo.py
python
BlockInfo.paramValue
(self, param)
Gets the value of a parameter. Input: param[str]: Name of the parameter Return: str: Value of the parameter or None
Gets the value of a parameter. Input: param[str]: Name of the parameter Return: str: Value of the parameter or None
[ "Gets", "the", "value", "of", "a", "parameter", ".", "Input", ":", "param", "[", "str", "]", ":", "Name", "of", "the", "parameter", "Return", ":", "str", ":", "Value", "of", "the", "parameter", "or", "None" ]
def paramValue(self, param): """ Gets the value of a parameter. Input: param[str]: Name of the parameter Return: str: Value of the parameter or None """ param_info = self.getParamInfo(param) if param_info: return param_info.value
[ "def", "paramValue", "(", "self", ",", "param", ")", ":", "param_info", "=", "self", ".", "getParamInfo", "(", "param", ")", "if", "param_info", ":", "return", "param_info", ".", "value" ]
https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/peacock/Input/BlockInfo.py#L103-L113
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/grid.py
python
Grid.EnableEditing
(*args, **kwargs)
return _grid.Grid_EnableEditing(*args, **kwargs)
EnableEditing(self, bool edit)
EnableEditing(self, bool edit)
[ "EnableEditing", "(", "self", "bool", "edit", ")" ]
def EnableEditing(*args, **kwargs): """EnableEditing(self, bool edit)""" return _grid.Grid_EnableEditing(*args, **kwargs)
[ "def", "EnableEditing", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_grid", ".", "Grid_EnableEditing", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/grid.py#L1342-L1344
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/python/training/rmsprop.py
python
RMSPropOptimizer.__init__
(self, learning_rate, decay=0.9, momentum=0.0, epsilon=1e-10, use_locking=False, centered=False, name="RMSProp")
Construct a new RMSProp optimizer. Note that in dense implement of this algorithm, m_t and v_t will update even if g is zero, but in sparse implement, m_t and v_t will not update in iterations g is zero. Args: learning_rate: A Tensor or a floating point value. The learning rate. decay: Discounting factor for the history/coming gradient momentum: A scalar tensor. epsilon: Small value to avoid zero denominator. use_locking: If True use locks for update operation. centered: If True, gradients are normalized by the estimated variance of the gradient; if False, by the uncentered second moment. Setting this to True may help with training, but is slightly more expensive in terms of computation and memory. Defaults to False. name: Optional name prefix for the operations created when applying gradients. Defaults to "RMSProp".
Construct a new RMSProp optimizer.
[ "Construct", "a", "new", "RMSProp", "optimizer", "." ]
def __init__(self, learning_rate, decay=0.9, momentum=0.0, epsilon=1e-10, use_locking=False, centered=False, name="RMSProp"): """Construct a new RMSProp optimizer. Note that in dense implement of this algorithm, m_t and v_t will update even if g is zero, but in sparse implement, m_t and v_t will not update in iterations g is zero. Args: learning_rate: A Tensor or a floating point value. The learning rate. decay: Discounting factor for the history/coming gradient momentum: A scalar tensor. epsilon: Small value to avoid zero denominator. use_locking: If True use locks for update operation. centered: If True, gradients are normalized by the estimated variance of the gradient; if False, by the uncentered second moment. Setting this to True may help with training, but is slightly more expensive in terms of computation and memory. Defaults to False. name: Optional name prefix for the operations created when applying gradients. Defaults to "RMSProp". """ super(RMSPropOptimizer, self).__init__(use_locking, name) self._learning_rate = learning_rate self._decay = decay self._momentum = momentum self._epsilon = epsilon self._centered = centered # Tensors for learning rate and momentum. Created in _prepare. self._learning_rate_tensor = None self._decay_tensor = None self._momentum_tensor = None self._epsilon_tensor = None
[ "def", "__init__", "(", "self", ",", "learning_rate", ",", "decay", "=", "0.9", ",", "momentum", "=", "0.0", ",", "epsilon", "=", "1e-10", ",", "use_locking", "=", "False", ",", "centered", "=", "False", ",", "name", "=", "\"RMSProp\"", ")", ":", "super", "(", "RMSPropOptimizer", ",", "self", ")", ".", "__init__", "(", "use_locking", ",", "name", ")", "self", ".", "_learning_rate", "=", "learning_rate", "self", ".", "_decay", "=", "decay", "self", ".", "_momentum", "=", "momentum", "self", ".", "_epsilon", "=", "epsilon", "self", ".", "_centered", "=", "centered", "# Tensors for learning rate and momentum. Created in _prepare.", "self", ".", "_learning_rate_tensor", "=", "None", "self", ".", "_decay_tensor", "=", "None", "self", ".", "_momentum_tensor", "=", "None", "self", ".", "_epsilon_tensor", "=", "None" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/training/rmsprop.py#L59-L97
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/frame.py
python
DataFrame.__len__
(self)
return len(self.index)
Returns length of info axis, but here we use the index.
Returns length of info axis, but here we use the index.
[ "Returns", "length", "of", "info", "axis", "but", "here", "we", "use", "the", "index", "." ]
def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index)
[ "def", "__len__", "(", "self", ")", "->", "int", ":", "return", "len", "(", "self", ".", "index", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/frame.py#L1037-L1041
microsoft/checkedc-clang
a173fefde5d7877b7750e7ce96dd08cf18baebf2
llvm/utils/lit/lit/util.py
python
listdir_files
(dirname, suffixes=None, exclude_filenames=None)
Yields files in a directory. Filenames that are not excluded by rules below are yielded one at a time, as basenames (i.e., without dirname). Files starting with '.' are always skipped. If 'suffixes' is not None, then only filenames ending with one of its members will be yielded. These can be extensions, like '.exe', or strings, like 'Test'. (It is a lexicographic check; so an empty sequence will yield nothing, but a single empty string will yield all filenames.) If 'exclude_filenames' is not None, then none of the file basenames in it will be yielded. If specified, the containers for 'suffixes' and 'exclude_filenames' must support membership checking for strs. Args: dirname: a directory path. suffixes: (optional) a sequence of strings (set, list, etc.). exclude_filenames: (optional) a sequence of strings. Yields: Filenames as returned by os.listdir (generally, str).
Yields files in a directory.
[ "Yields", "files", "in", "a", "directory", "." ]
def listdir_files(dirname, suffixes=None, exclude_filenames=None): """Yields files in a directory. Filenames that are not excluded by rules below are yielded one at a time, as basenames (i.e., without dirname). Files starting with '.' are always skipped. If 'suffixes' is not None, then only filenames ending with one of its members will be yielded. These can be extensions, like '.exe', or strings, like 'Test'. (It is a lexicographic check; so an empty sequence will yield nothing, but a single empty string will yield all filenames.) If 'exclude_filenames' is not None, then none of the file basenames in it will be yielded. If specified, the containers for 'suffixes' and 'exclude_filenames' must support membership checking for strs. Args: dirname: a directory path. suffixes: (optional) a sequence of strings (set, list, etc.). exclude_filenames: (optional) a sequence of strings. Yields: Filenames as returned by os.listdir (generally, str). """ if exclude_filenames is None: exclude_filenames = set() if suffixes is None: suffixes = {''} for filename in os.listdir(dirname): if (os.path.isdir(os.path.join(dirname, filename)) or filename.startswith('.') or filename in exclude_filenames or not any(filename.endswith(sfx) for sfx in suffixes)): continue yield filename
[ "def", "listdir_files", "(", "dirname", ",", "suffixes", "=", "None", ",", "exclude_filenames", "=", "None", ")", ":", "if", "exclude_filenames", "is", "None", ":", "exclude_filenames", "=", "set", "(", ")", "if", "suffixes", "is", "None", ":", "suffixes", "=", "{", "''", "}", "for", "filename", "in", "os", ".", "listdir", "(", "dirname", ")", ":", "if", "(", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "filename", ")", ")", "or", "filename", ".", "startswith", "(", "'.'", ")", "or", "filename", "in", "exclude_filenames", "or", "not", "any", "(", "filename", ".", "endswith", "(", "sfx", ")", "for", "sfx", "in", "suffixes", ")", ")", ":", "continue", "yield", "filename" ]
https://github.com/microsoft/checkedc-clang/blob/a173fefde5d7877b7750e7ce96dd08cf18baebf2/llvm/utils/lit/lit/util.py#L167-L205
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/collections/__init__.py
python
OrderedDict.__setitem__
(self, key, value, dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link)
od.__setitem__(i, y) <==> od[i]=y
od.__setitem__(i, y) <==> od[i]=y
[ "od", ".", "__setitem__", "(", "i", "y", ")", "<", "==", ">", "od", "[", "i", "]", "=", "y" ]
def __setitem__(self, key, value, dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link at the end of the linked list, # and the inherited dictionary is updated with the new key/value pair. if key not in self: self.__map[key] = link = Link() root = self.__root last = root.prev link.prev, link.next, link.key = last, root, key last.next = link root.prev = proxy(link) dict_setitem(self, key, value)
[ "def", "__setitem__", "(", "self", ",", "key", ",", "value", ",", "dict_setitem", "=", "dict", ".", "__setitem__", ",", "proxy", "=", "_proxy", ",", "Link", "=", "_Link", ")", ":", "# Setting a new item creates a new link at the end of the linked list,", "# and the inherited dictionary is updated with the new key/value pair.", "if", "key", "not", "in", "self", ":", "self", ".", "__map", "[", "key", "]", "=", "link", "=", "Link", "(", ")", "root", "=", "self", ".", "__root", "last", "=", "root", ".", "prev", "link", ".", "prev", ",", "link", ".", "next", ",", "link", ".", "key", "=", "last", ",", "root", ",", "key", "last", ".", "next", "=", "link", "root", ".", "prev", "=", "proxy", "(", "link", ")", "dict_setitem", "(", "self", ",", "key", ",", "value", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/collections/__init__.py#L115-L127
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/prompt-toolkit/py2/prompt_toolkit/terminal/win32_output.py
python
ColorLookupTable.lookup_bg_color
(self, bg_color)
Return the color for use in the `windll.kernel32.SetConsoleTextAttribute` API call. :param bg_color: Background as text. E.g. 'ffffff' or 'red'
Return the color for use in the `windll.kernel32.SetConsoleTextAttribute` API call.
[ "Return", "the", "color", "for", "use", "in", "the", "windll", ".", "kernel32", ".", "SetConsoleTextAttribute", "API", "call", "." ]
def lookup_bg_color(self, bg_color): """ Return the color for use in the `windll.kernel32.SetConsoleTextAttribute` API call. :param bg_color: Background as text. E.g. 'ffffff' or 'red' """ # Background. if bg_color in BG_ANSI_COLORS: return BG_ANSI_COLORS[bg_color] else: return self._color_indexes(bg_color)[1]
[ "def", "lookup_bg_color", "(", "self", ",", "bg_color", ")", ":", "# Background.", "if", "bg_color", "in", "BG_ANSI_COLORS", ":", "return", "BG_ANSI_COLORS", "[", "bg_color", "]", "else", ":", "return", "self", ".", "_color_indexes", "(", "bg_color", ")", "[", "1", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py2/prompt_toolkit/terminal/win32_output.py#L545-L556
vgteam/vg
cf4d516a5e9ee5163c783e4437ddf16b18a4b561
scripts/giraffe-facts.py
python
Table.compute_merges
(self, merges=None)
return widths
Given a list of cell counts to merge horizontally, compute new widths from self.widths. If merges is None, use self.widths.
Given a list of cell counts to merge horizontally, compute new widths from self.widths. If merges is None, use self.widths.
[ "Given", "a", "list", "of", "cell", "counts", "to", "merge", "horizontally", "compute", "new", "widths", "from", "self", ".", "widths", ".", "If", "merges", "is", "None", "use", "self", ".", "widths", "." ]
def compute_merges(self, merges=None): """ Given a list of cell counts to merge horizontally, compute new widths from self.widths. If merges is None, use self.widths. """ widths = self.widths if merges is not None: new_widths = [] width_cursor = 0 for merge in merges: # Compute a new column by merging the given number of old columns. merged_width = 0 for i in range(merge): # Take the widths of all cells merged_width += widths[width_cursor] width_cursor += 1 # Take the separating columns between cells merged_width += merge - 1 new_widths.append(merged_width) while width_cursor < len(widths): # Copy any unmerged columns new_widths.append(widths[i]) widths = new_widths return widths
[ "def", "compute_merges", "(", "self", ",", "merges", "=", "None", ")", ":", "widths", "=", "self", ".", "widths", "if", "merges", "is", "not", "None", ":", "new_widths", "=", "[", "]", "width_cursor", "=", "0", "for", "merge", "in", "merges", ":", "# Compute a new column by merging the given number of old columns.", "merged_width", "=", "0", "for", "i", "in", "range", "(", "merge", ")", ":", "# Take the widths of all cells", "merged_width", "+=", "widths", "[", "width_cursor", "]", "width_cursor", "+=", "1", "# Take the separating columns between cells", "merged_width", "+=", "merge", "-", "1", "new_widths", ".", "append", "(", "merged_width", ")", "while", "width_cursor", "<", "len", "(", "widths", ")", ":", "# Copy any unmerged columns", "new_widths", ".", "append", "(", "widths", "[", "i", "]", ")", "widths", "=", "new_widths", "return", "widths" ]
https://github.com/vgteam/vg/blob/cf4d516a5e9ee5163c783e4437ddf16b18a4b561/scripts/giraffe-facts.py#L504-L532
google/flatbuffers
b3006913369e0a7550795e477011ac5bebb93497
python/flatbuffers/table.py
python
Table.Indirect
(self, off)
return off + encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
Indirect retrieves the relative offset stored at `offset`.
Indirect retrieves the relative offset stored at `offset`.
[ "Indirect", "retrieves", "the", "relative", "offset", "stored", "at", "offset", "." ]
def Indirect(self, off): """Indirect retrieves the relative offset stored at `offset`.""" N.enforce_number(off, N.UOffsetTFlags) return off + encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
[ "def", "Indirect", "(", "self", ",", "off", ")", ":", "N", ".", "enforce_number", "(", "off", ",", "N", ".", "UOffsetTFlags", ")", "return", "off", "+", "encode", ".", "Get", "(", "N", ".", "UOffsetTFlags", ".", "packer_type", ",", "self", ".", "Bytes", ",", "off", ")" ]
https://github.com/google/flatbuffers/blob/b3006913369e0a7550795e477011ac5bebb93497/python/flatbuffers/table.py#L43-L46
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/protobuf/python/google/protobuf/text_format.py
python
_Parser.ParseLines
(self, lines, message)
return message
Parses an text representation of a protocol message into a message.
Parses an text representation of a protocol message into a message.
[ "Parses", "an", "text", "representation", "of", "a", "protocol", "message", "into", "a", "message", "." ]
def ParseLines(self, lines, message): """Parses an text representation of a protocol message into a message.""" self._allow_multiple_scalars = False self._ParseOrMerge(lines, message) return message
[ "def", "ParseLines", "(", "self", ",", "lines", ",", "message", ")", ":", "self", ".", "_allow_multiple_scalars", "=", "False", "self", ".", "_ParseOrMerge", "(", "lines", ",", "message", ")", "return", "message" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/protobuf/python/google/protobuf/text_format.py#L427-L431
ricardoquesada/Spidermonkey
4a75ea2543408bd1b2c515aa95901523eeef7858
dom/bindings/parser/WebIDL.py
python
Parser.p_Inheritance
(self, p)
Inheritance : COLON ScopedName
Inheritance : COLON ScopedName
[ "Inheritance", ":", "COLON", "ScopedName" ]
def p_Inheritance(self, p): """ Inheritance : COLON ScopedName """ p[0] = IDLIdentifierPlaceholder(self.getLocation(p, 2), p[2])
[ "def", "p_Inheritance", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "IDLIdentifierPlaceholder", "(", "self", ".", "getLocation", "(", "p", ",", "2", ")", ",", "p", "[", "2", "]", ")" ]
https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/dom/bindings/parser/WebIDL.py#L4372-L4376
gnuradio/gnuradio
09c3c4fa4bfb1a02caac74cb5334dfe065391e3b
docs/doxygen/other/doxypy.py
python
Doxypy.makeCommentBlock
(self)
return l
Indents the current comment block with respect to the current indentation level. @returns a list of indented comment lines
Indents the current comment block with respect to the current indentation level.
[ "Indents", "the", "current", "comment", "block", "with", "respect", "to", "the", "current", "indentation", "level", "." ]
def makeCommentBlock(self): """Indents the current comment block with respect to the current indentation level. @returns a list of indented comment lines """ doxyStart = "##" commentLines = self.comment commentLines = ["%s# %s" % (self.indent, x) for x in commentLines] l = [self.indent + doxyStart] l.extend(commentLines) return l
[ "def", "makeCommentBlock", "(", "self", ")", ":", "doxyStart", "=", "\"##\"", "commentLines", "=", "self", ".", "comment", "commentLines", "=", "[", "\"%s# %s\"", "%", "(", "self", ".", "indent", ",", "x", ")", "for", "x", "in", "commentLines", "]", "l", "=", "[", "self", ".", "indent", "+", "doxyStart", "]", "l", ".", "extend", "(", "commentLines", ")", "return", "l" ]
https://github.com/gnuradio/gnuradio/blob/09c3c4fa4bfb1a02caac74cb5334dfe065391e3b/docs/doxygen/other/doxypy.py#L359-L372
miyosuda/TensorFlowAndroidDemo
35903e0221aa5f109ea2dbef27f20b52e317f42d
jni-build/jni/include/tensorflow/models/embedding/word2vec_optimized.py
python
Word2Vec.train
(self)
Train the model.
Train the model.
[ "Train", "the", "model", "." ]
def train(self): """Train the model.""" opts = self._options initial_epoch, initial_words = self._session.run([self._epoch, self._words]) workers = [] for _ in xrange(opts.concurrent_steps): t = threading.Thread(target=self._train_thread_body) t.start() workers.append(t) last_words, last_time = initial_words, time.time() while True: time.sleep(5) # Reports our progress once a while. (epoch, step, words, lr) = self._session.run([self._epoch, self.step, self._words, self._lr]) now = time.time() last_words, last_time, rate = words, now, (words - last_words) / ( now - last_time) print("Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step, lr, rate), end="") sys.stdout.flush() if epoch != initial_epoch: break for t in workers: t.join()
[ "def", "train", "(", "self", ")", ":", "opts", "=", "self", ".", "_options", "initial_epoch", ",", "initial_words", "=", "self", ".", "_session", ".", "run", "(", "[", "self", ".", "_epoch", ",", "self", ".", "_words", "]", ")", "workers", "=", "[", "]", "for", "_", "in", "xrange", "(", "opts", ".", "concurrent_steps", ")", ":", "t", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_train_thread_body", ")", "t", ".", "start", "(", ")", "workers", ".", "append", "(", "t", ")", "last_words", ",", "last_time", "=", "initial_words", ",", "time", ".", "time", "(", ")", "while", "True", ":", "time", ".", "sleep", "(", "5", ")", "# Reports our progress once a while.", "(", "epoch", ",", "step", ",", "words", ",", "lr", ")", "=", "self", ".", "_session", ".", "run", "(", "[", "self", ".", "_epoch", ",", "self", ".", "step", ",", "self", ".", "_words", ",", "self", ".", "_lr", "]", ")", "now", "=", "time", ".", "time", "(", ")", "last_words", ",", "last_time", ",", "rate", "=", "words", ",", "now", ",", "(", "words", "-", "last_words", ")", "/", "(", "now", "-", "last_time", ")", "print", "(", "\"Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\\r\"", "%", "(", "epoch", ",", "step", ",", "lr", ",", "rate", ")", ",", "end", "=", "\"\"", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "if", "epoch", "!=", "initial_epoch", ":", "break", "for", "t", "in", "workers", ":", "t", ".", "join", "(", ")" ]
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/models/embedding/word2vec_optimized.py#L310-L338
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/idlelib/configHandler.py
python
IdleUserConfParser.RemoveFile
(self)
Removes the user config file from disk if it exists.
Removes the user config file from disk if it exists.
[ "Removes", "the", "user", "config", "file", "from", "disk", "if", "it", "exists", "." ]
def RemoveFile(self): """ Removes the user config file from disk if it exists. """ if os.path.exists(self.file): os.remove(self.file)
[ "def", "RemoveFile", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "file", ")", ":", "os", ".", "remove", "(", "self", ".", "file", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/idlelib/configHandler.py#L127-L132
SequoiaDB/SequoiaDB
2894ed7e5bd6fe57330afc900cf76d0ff0df9f64
tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py
python
xmlNs.xpathNodeSetFreeNs
(self)
Namespace nodes in libxml don't match the XPath semantic. In a node set the namespace nodes are duplicated and the next pointer is set to the parent node in the XPath semantic. Check if such a node needs to be freed
Namespace nodes in libxml don't match the XPath semantic. In a node set the namespace nodes are duplicated and the next pointer is set to the parent node in the XPath semantic. Check if such a node needs to be freed
[ "Namespace", "nodes", "in", "libxml", "don", "t", "match", "the", "XPath", "semantic", ".", "In", "a", "node", "set", "the", "namespace", "nodes", "are", "duplicated", "and", "the", "next", "pointer", "is", "set", "to", "the", "parent", "node", "in", "the", "XPath", "semantic", ".", "Check", "if", "such", "a", "node", "needs", "to", "be", "freed" ]
def xpathNodeSetFreeNs(self): """Namespace nodes in libxml don't match the XPath semantic. In a node set the namespace nodes are duplicated and the next pointer is set to the parent node in the XPath semantic. Check if such a node needs to be freed """ libxml2mod.xmlXPathNodeSetFreeNs(self._o)
[ "def", "xpathNodeSetFreeNs", "(", "self", ")", ":", "libxml2mod", ".", "xmlXPathNodeSetFreeNs", "(", "self", ".", "_o", ")" ]
https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py#L5969-L5974
rsummers11/CADLab
976ed959a0b5208bb4173127a7ef732ac73a9b6f
lesion_detector_3DCE/rcnn/pycocotools/coco.py
python
COCO.loadCats
(self, ids=[])
Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects
Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects
[ "Load", "cats", "with", "the", "specified", "ids", ".", ":", "param", "ids", "(", "int", "array", ")", ":", "integer", "ids", "specifying", "cats", ":", "return", ":", "cats", "(", "object", "array", ")", ":", "loaded", "cat", "objects" ]
def loadCats(self, ids=[]): """ Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects """ if type(ids) == list: return [self.cats[id] for id in ids] elif type(ids) == int: return [self.cats[ids]]
[ "def", "loadCats", "(", "self", ",", "ids", "=", "[", "]", ")", ":", "if", "type", "(", "ids", ")", "==", "list", ":", "return", "[", "self", ".", "cats", "[", "id", "]", "for", "id", "in", "ids", "]", "elif", "type", "(", "ids", ")", "==", "int", ":", "return", "[", "self", ".", "cats", "[", "ids", "]", "]" ]
https://github.com/rsummers11/CADLab/blob/976ed959a0b5208bb4173127a7ef732ac73a9b6f/lesion_detector_3DCE/rcnn/pycocotools/coco.py#L206-L215
facebook/openr
ed38bdfd6bf290084bfab4821b59f83e7b59315d
build/fbcode_builder/getdeps/subcmd.py
python
cmd
(name, help=None, cmd_table=CmdTable)
return wrapper
@cmd() is a decorator that can be used to help define Subcmd instances Example usage: @subcmd('list', 'Show the result list') class ListCmd(Subcmd): def run(self, args): # Perform the command actions here... pass
@cmd() is a decorator that can be used to help define Subcmd instances
[ "@cmd", "()", "is", "a", "decorator", "that", "can", "be", "used", "to", "help", "define", "Subcmd", "instances" ]
def cmd(name, help=None, cmd_table=CmdTable): """ @cmd() is a decorator that can be used to help define Subcmd instances Example usage: @subcmd('list', 'Show the result list') class ListCmd(Subcmd): def run(self, args): # Perform the command actions here... pass """ def wrapper(cls): class SubclassedCmd(cls): NAME = name HELP = help cmd_table.append(SubclassedCmd) return SubclassedCmd return wrapper
[ "def", "cmd", "(", "name", ",", "help", "=", "None", ",", "cmd_table", "=", "CmdTable", ")", ":", "def", "wrapper", "(", "cls", ")", ":", "class", "SubclassedCmd", "(", "cls", ")", ":", "NAME", "=", "name", "HELP", "=", "help", "cmd_table", ".", "append", "(", "SubclassedCmd", ")", "return", "SubclassedCmd", "return", "wrapper" ]
https://github.com/facebook/openr/blob/ed38bdfd6bf290084bfab4821b59f83e7b59315d/build/fbcode_builder/getdeps/subcmd.py#L35-L56
kamyu104/LeetCode-Solutions
77605708a927ea3b85aee5a479db733938c7c211
Python/walking-robot-simulation-ii.py
python
Robot2.getPos
(self)
return self.__getPosDir()[0]
:rtype: List[int]
:rtype: List[int]
[ ":", "rtype", ":", "List", "[", "int", "]" ]
def getPos(self): """ :rtype: List[int] """ return self.__getPosDir()[0]
[ "def", "getPos", "(", "self", ")", ":", "return", "self", ".", "__getPosDir", "(", ")", "[", "0", "]" ]
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/walking-robot-simulation-ii.py#L75-L79
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/prompt-toolkit/py3/prompt_toolkit/shortcuts/progress_bar/formatters.py
python
_hue_to_rgb
(hue: float)
return [ (255, t, 0), (q, 255, 0), (0, 255, t), (0, q, 255), (t, 0, 255), (255, 0, q), ][i]
Take hue between 0 and 1, return (r, g, b).
Take hue between 0 and 1, return (r, g, b).
[ "Take", "hue", "between", "0", "and", "1", "return", "(", "r", "g", "b", ")", "." ]
def _hue_to_rgb(hue: float) -> Tuple[int, int, int]: """ Take hue between 0 and 1, return (r, g, b). """ i = int(hue * 6.0) f = (hue * 6.0) - i q = int(255 * (1.0 - f)) t = int(255 * (1.0 - (1.0 - f))) i %= 6 return [ (255, t, 0), (q, 255, 0), (0, 255, t), (0, q, 255), (t, 0, 255), (255, 0, q), ][i]
[ "def", "_hue_to_rgb", "(", "hue", ":", "float", ")", "->", "Tuple", "[", "int", ",", "int", ",", "int", "]", ":", "i", "=", "int", "(", "hue", "*", "6.0", ")", "f", "=", "(", "hue", "*", "6.0", ")", "-", "i", "q", "=", "int", "(", "255", "*", "(", "1.0", "-", "f", ")", ")", "t", "=", "int", "(", "255", "*", "(", "1.0", "-", "(", "1.0", "-", "f", ")", ")", ")", "i", "%=", "6", "return", "[", "(", "255", ",", "t", ",", "0", ")", ",", "(", "q", ",", "255", ",", "0", ")", ",", "(", "0", ",", "255", ",", "t", ")", ",", "(", "0", ",", "q", ",", "255", ")", ",", "(", "t", ",", "0", ",", "255", ")", ",", "(", "255", ",", "0", ",", "q", ")", ",", "]", "[", "i", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py3/prompt_toolkit/shortcuts/progress_bar/formatters.py#L361-L380
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/recipes.py
python
random_permutation
(iterable, r=None)
return tuple(sample(pool, r))
Return a random *r* length permutation of the elements in *iterable*. If *r* is not specified or is ``None``, then *r* defaults to the length of *iterable*. >>> random_permutation(range(5)) # doctest:+SKIP (3, 4, 0, 1, 2) This equivalent to taking a random selection from ``itertools.permutations(iterable, r)``.
Return a random *r* length permutation of the elements in *iterable*.
[ "Return", "a", "random", "*", "r", "*", "length", "permutation", "of", "the", "elements", "in", "*", "iterable", "*", "." ]
def random_permutation(iterable, r=None): """Return a random *r* length permutation of the elements in *iterable*. If *r* is not specified or is ``None``, then *r* defaults to the length of *iterable*. >>> random_permutation(range(5)) # doctest:+SKIP (3, 4, 0, 1, 2) This equivalent to taking a random selection from ``itertools.permutations(iterable, r)``. """ pool = tuple(iterable) r = len(pool) if r is None else r return tuple(sample(pool, r))
[ "def", "random_permutation", "(", "iterable", ",", "r", "=", "None", ")", ":", "pool", "=", "tuple", "(", "iterable", ")", "r", "=", "len", "(", "pool", ")", "if", "r", "is", "None", "else", "r", "return", "tuple", "(", "sample", "(", "pool", ",", "r", ")", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/recipes.py#L495-L510
llvm-mirror/lldb
d01083a850f577b85501a0902b52fd0930de72c7
utils/vim-lldb/python-vim-lldb/vim_panes.py
python
VimPane.get_highlights
(self)
return {}
Subclasses implement this to provide pane highlights. This function is expected to return a map of: { highlight_name ==> [line_number, ...], ... }
Subclasses implement this to provide pane highlights. This function is expected to return a map of: { highlight_name ==> [line_number, ...], ... }
[ "Subclasses", "implement", "this", "to", "provide", "pane", "highlights", ".", "This", "function", "is", "expected", "to", "return", "a", "map", "of", ":", "{", "highlight_name", "==", ">", "[", "line_number", "...", "]", "...", "}" ]
def get_highlights(self): """ Subclasses implement this to provide pane highlights. This function is expected to return a map of: { highlight_name ==> [line_number, ...], ... } """ return {}
[ "def", "get_highlights", "(", "self", ")", ":", "return", "{", "}" ]
https://github.com/llvm-mirror/lldb/blob/d01083a850f577b85501a0902b52fd0930de72c7/utils/vim-lldb/python-vim-lldb/vim_panes.py#L393-L398
intel/llvm
e6d0547e9d99b5a56430c4749f6c7e328bf221ab
lldb/utils/lui/lldbutil.py
python
get_parent_frame
(frame)
return None
Returns the parent frame of the input frame object; None if not available.
Returns the parent frame of the input frame object; None if not available.
[ "Returns", "the", "parent", "frame", "of", "the", "input", "frame", "object", ";", "None", "if", "not", "available", "." ]
def get_parent_frame(frame): """ Returns the parent frame of the input frame object; None if not available. """ thread = frame.GetThread() parent_found = False for f in thread: if parent_found: return f if f.GetFrameID() == frame.GetFrameID(): parent_found = True # If we reach here, no parent has been found, return None. return None
[ "def", "get_parent_frame", "(", "frame", ")", ":", "thread", "=", "frame", ".", "GetThread", "(", ")", "parent_found", "=", "False", "for", "f", "in", "thread", ":", "if", "parent_found", ":", "return", "f", "if", "f", ".", "GetFrameID", "(", ")", "==", "frame", ".", "GetFrameID", "(", ")", ":", "parent_found", "=", "True", "# If we reach here, no parent has been found, return None.", "return", "None" ]
https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/lldb/utils/lui/lldbutil.py#L837-L850
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_controls.py
python
Notebook.SendPageChangedEvent
(*args, **kwargs)
return _controls_.Notebook_SendPageChangedEvent(*args, **kwargs)
SendPageChangedEvent(self, int nPageOld, int nPageNew=-1)
SendPageChangedEvent(self, int nPageOld, int nPageNew=-1)
[ "SendPageChangedEvent", "(", "self", "int", "nPageOld", "int", "nPageNew", "=", "-", "1", ")" ]
def SendPageChangedEvent(*args, **kwargs): """SendPageChangedEvent(self, int nPageOld, int nPageNew=-1)""" return _controls_.Notebook_SendPageChangedEvent(*args, **kwargs)
[ "def", "SendPageChangedEvent", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_controls_", ".", "Notebook_SendPageChangedEvent", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L3138-L3140
mapnik/mapnik
f3da900c355e1d15059c4a91b00203dcc9d9f0ef
scons/scons-local-4.1.0/SCons/Node/FS.py
python
File.Dir
(self, name, create=True)
return self.dir.Dir(name, create=create)
Create a directory node named 'name' relative to the directory of this file.
Create a directory node named 'name' relative to the directory of this file.
[ "Create", "a", "directory", "node", "named", "name", "relative", "to", "the", "directory", "of", "this", "file", "." ]
def Dir(self, name, create=True): """Create a directory node named 'name' relative to the directory of this file.""" return self.dir.Dir(name, create=create)
[ "def", "Dir", "(", "self", ",", "name", ",", "create", "=", "True", ")", ":", "return", "self", ".", "dir", ".", "Dir", "(", "name", ",", "create", "=", "create", ")" ]
https://github.com/mapnik/mapnik/blob/f3da900c355e1d15059c4a91b00203dcc9d9f0ef/scons/scons-local-4.1.0/SCons/Node/FS.py#L2649-L2652
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_controls.py
python
PreStaticText
(*args, **kwargs)
return val
PreStaticText() -> StaticText
PreStaticText() -> StaticText
[ "PreStaticText", "()", "-", ">", "StaticText" ]
def PreStaticText(*args, **kwargs): """PreStaticText() -> StaticText""" val = _controls_.new_PreStaticText(*args, **kwargs) return val
[ "def", "PreStaticText", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "val", "=", "_controls_", ".", "new_PreStaticText", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "val" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L1043-L1046
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/math_ops.py
python
reduce_std
(input_tensor, axis=None, keepdims=False, name=None)
Computes the standard deviation of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[1., 2.], [3., 4.]]) tf.reduce_std(x) # 1.1180339887498949 tf.reduce_std(x, 0) # [1., 1.] tf.reduce_std(x, 1) # [0.5, 0.5] ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name scope for the associated operations (optional). Returns: The reduced tensor, of the same dtype as the input_tensor. @compatibility(numpy) Equivalent to np.std Please note that `np.std` has a `dtype` parameter that could be used to specify the output type. By default this is `dtype=float64`. On the other hand, `tf.reduce_std` has an aggressive type inference from `input_tensor`, @end_compatibility
Computes the standard deviation of elements across dimensions of a tensor.
[ "Computes", "the", "standard", "deviation", "of", "elements", "across", "dimensions", "of", "a", "tensor", "." ]
def reduce_std(input_tensor, axis=None, keepdims=False, name=None): """Computes the standard deviation of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[1., 2.], [3., 4.]]) tf.reduce_std(x) # 1.1180339887498949 tf.reduce_std(x, 0) # [1., 1.] tf.reduce_std(x, 1) # [0.5, 0.5] ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name scope for the associated operations (optional). Returns: The reduced tensor, of the same dtype as the input_tensor. @compatibility(numpy) Equivalent to np.std Please note that `np.std` has a `dtype` parameter that could be used to specify the output type. By default this is `dtype=float64`. On the other hand, `tf.reduce_std` has an aggressive type inference from `input_tensor`, @end_compatibility """ name = name if name else "reduce_std" with ops.name_scope(name): variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims) return gen_math_ops.sqrt(variance)
[ "def", "reduce_std", "(", "input_tensor", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ",", "name", "=", "None", ")", ":", "name", "=", "name", "if", "name", "else", "\"reduce_std\"", "with", "ops", ".", "name_scope", "(", "name", ")", ":", "variance", "=", "reduce_variance", "(", "input_tensor", ",", "axis", "=", "axis", ",", "keepdims", "=", "keepdims", ")", "return", "gen_math_ops", ".", "sqrt", "(", "variance", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/math_ops.py#L1922-L1964
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py3/numpy/ma/core.py
python
_check_fill_value
(fill_value, ndtype)
return np.array(fill_value)
Private function validating the given `fill_value` for the given dtype. If fill_value is None, it is set to the default corresponding to the dtype. If fill_value is not None, its value is forced to the given dtype. The result is always a 0d array.
Private function validating the given `fill_value` for the given dtype.
[ "Private", "function", "validating", "the", "given", "fill_value", "for", "the", "given", "dtype", "." ]
def _check_fill_value(fill_value, ndtype): """ Private function validating the given `fill_value` for the given dtype. If fill_value is None, it is set to the default corresponding to the dtype. If fill_value is not None, its value is forced to the given dtype. The result is always a 0d array. """ ndtype = np.dtype(ndtype) if fill_value is None: fill_value = default_fill_value(ndtype) elif ndtype.names is not None: if isinstance(fill_value, (ndarray, np.void)): try: fill_value = np.array(fill_value, copy=False, dtype=ndtype) except ValueError as e: err_msg = "Unable to transform %s to dtype %s" raise ValueError(err_msg % (fill_value, ndtype)) from e else: fill_value = np.asarray(fill_value, dtype=object) fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype) else: if isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): # Note this check doesn't work if fill_value is not a scalar err_msg = "Cannot set fill value of string with array of dtype %s" raise TypeError(err_msg % ndtype) else: # In case we want to convert 1e20 to int. # Also in case of converting string arrays. try: fill_value = np.array(fill_value, copy=False, dtype=ndtype) except (OverflowError, ValueError) as e: # Raise TypeError instead of OverflowError or ValueError. # OverflowError is seldom used, and the real problem here is # that the passed fill_value is not compatible with the ndtype. err_msg = "Cannot convert fill_value %s to dtype %s" raise TypeError(err_msg % (fill_value, ndtype)) from e return np.array(fill_value)
[ "def", "_check_fill_value", "(", "fill_value", ",", "ndtype", ")", ":", "ndtype", "=", "np", ".", "dtype", "(", "ndtype", ")", "if", "fill_value", "is", "None", ":", "fill_value", "=", "default_fill_value", "(", "ndtype", ")", "elif", "ndtype", ".", "names", "is", "not", "None", ":", "if", "isinstance", "(", "fill_value", ",", "(", "ndarray", ",", "np", ".", "void", ")", ")", ":", "try", ":", "fill_value", "=", "np", ".", "array", "(", "fill_value", ",", "copy", "=", "False", ",", "dtype", "=", "ndtype", ")", "except", "ValueError", "as", "e", ":", "err_msg", "=", "\"Unable to transform %s to dtype %s\"", "raise", "ValueError", "(", "err_msg", "%", "(", "fill_value", ",", "ndtype", ")", ")", "from", "e", "else", ":", "fill_value", "=", "np", ".", "asarray", "(", "fill_value", ",", "dtype", "=", "object", ")", "fill_value", "=", "np", ".", "array", "(", "_recursive_set_fill_value", "(", "fill_value", ",", "ndtype", ")", ",", "dtype", "=", "ndtype", ")", "else", ":", "if", "isinstance", "(", "fill_value", ",", "str", ")", "and", "(", "ndtype", ".", "char", "not", "in", "'OSVU'", ")", ":", "# Note this check doesn't work if fill_value is not a scalar", "err_msg", "=", "\"Cannot set fill value of string with array of dtype %s\"", "raise", "TypeError", "(", "err_msg", "%", "ndtype", ")", "else", ":", "# In case we want to convert 1e20 to int.", "# Also in case of converting string arrays.", "try", ":", "fill_value", "=", "np", ".", "array", "(", "fill_value", ",", "copy", "=", "False", ",", "dtype", "=", "ndtype", ")", "except", "(", "OverflowError", ",", "ValueError", ")", "as", "e", ":", "# Raise TypeError instead of OverflowError or ValueError.", "# OverflowError is seldom used, and the real problem here is", "# that the passed fill_value is not compatible with the ndtype.", "err_msg", "=", "\"Cannot convert fill_value %s to dtype %s\"", "raise", "TypeError", "(", "err_msg", "%", "(", "fill_value", ",", "ndtype", ")", ")", "from", "e", "return", "np", ".", "array", "(", "fill_value", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/ma/core.py#L428-L469
OpenVR-Advanced-Settings/OpenVR-AdvancedSettings
522ba46ac6bcff8e82907715f7f0af7abbbe1e7b
build_scripts/win/build.py
python
build
()
Runs: 'vcvarsall.bat', a Visual Studio file to set up the environment for QMAKE. QMAKE, creates the makefiles. jom/nmake, builds the makefiles.
Runs: 'vcvarsall.bat', a Visual Studio file to set up the environment for QMAKE. QMAKE, creates the makefiles. jom/nmake, builds the makefiles.
[ "Runs", ":", "vcvarsall", ".", "bat", "a", "Visual", "Studio", "file", "to", "set", "up", "the", "environment", "for", "QMAKE", ".", "QMAKE", "creates", "the", "makefiles", ".", "jom", "/", "nmake", "builds", "the", "makefiles", "." ]
def build(): """ Runs: 'vcvarsall.bat', a Visual Studio file to set up the environment for QMAKE. QMAKE, creates the makefiles. jom/nmake, builds the makefiles. """ set_current_activity("BUILD") set_dirs() COMPILE_MODE = "" COMPILER = "" say("Attempting to build version: " + VERSION_STRING) say("Testing if all required build environment variables are set:") QT_LOC = find_qt_path() VS_LOC = get_required_env_var_path(VS_LOC_VAR_NAME, VS_LOC_DEFAULT) if is_env_var_set(BUILD_CLANG_VAR_NAME): say(f"{BUILD_CLANG_VAR_NAME} defined. Building for win32-clang-msvc.") get_required_env_var_path(LLVM_LOC_VAR_NAME, LLVM_LOC_DEFAULT) COMPILER = "win32-clang-msvc" else: say(f"{BUILD_CLANG_VAR_NAME} not defined. Building for msvc.") COMPILER = "win32-msvc" say("All required build environment values are set.") #Otherwise qmake gets confused add_line_to_run_bat_file("cd " + get_project_dir()) say("Adding 'vcvarsall.bat' to batch file.") add_line_to_run_bat_file("@ECHO Setting up environment with Visual Studio 'vcvarsall.bat':") add_line_to_run_bat_file("call " + '"' + VS_LOC + '"' + " " + PLATFORM_TARGET) add_error_handling_line_to_bat_file() add_line_to_run_bat_file("@ECHO Environment set up.") say("'vcvarsall.bat' added to batch file.") QMAKE_LOC = QT_LOC + r"\qmake.exe" if is_env_var_set(BUILD_VSPROJ_VAR_NAME): say(f"{BUILD_VSPROJ_VAR_NAME} defined. Building Visual Studio project files.") add_line_to_run_bat_file("@ECHO Running Visual Studio project file building:") add_line_to_run_bat_file('"' + QMAKE_LOC + '"' + " -tp vc") add_error_handling_line_to_bat_file() add_line_to_run_bat_file("@ECHO Done building Visual Studio project files.") else: say(f"{BUILD_VSPROJ_VAR_NAME} not defined. Not building Visual Studio project files.") if is_env_var_set(BUILD_DEBUG_VAR_NAME): COMPILE_MODE = "debug" say(f"{BUILD_DEBUG_VAR_NAME} defined. Building '{COMPILE_MODE}' version.") else: COMPILE_MODE = "release" say(f"{BUILD_DEBUG_VAR_NAME} not defined. Building '{COMPILE_MODE}' version.") add_line_to_run_bat_file("@ECHO Running qmake:") if is_env_var_set("OVRAS_WARNINGS_AS_ERRORS"): add_line_to_run_bat_file('"' + QMAKE_LOC + '"' + " -spec " + COMPILER + " CONFIG+=X86_64 " + "CONFIG+=" + COMPILE_MODE + " CONFIG+=warnings_as_errors") else: add_line_to_run_bat_file('"' + QMAKE_LOC + '"' + " -spec " + COMPILER + " CONFIG+=X86_64 " + "CONFIG+=" + COMPILE_MODE) add_error_handling_line_to_bat_file() add_line_to_run_bat_file("@ECHO qmake done.") if is_env_var_set(JOM_LOC_VAR_NAME): JOM_LOC = os.getenv(JOM_LOC_VAR_NAME) else: say(f"{JOM_LOC_VAR_NAME} not defined. Using default value") JOM_LOC = JOM_LOC_DEFAULT say(f"{JOM_LOC_VAR_NAME} set to '{JOM_LOC}") if os.path.exists(JOM_LOC): say(f"{JOM_LOC_VAR_NAME} exists. Using jom.") add_line_to_run_bat_file("@ECHO Running jom:") add_line_to_run_bat_file(JOM_LOC) add_error_handling_line_to_bat_file() add_line_to_run_bat_file("@ECHO jom done.") say(f"{JOM_LOC_VAR_NAME} added to batch file.") else: say(f"{JOM_LOC_VAR_NAME} does not exists. Using nmake.") add_line_to_run_bat_file("@ECHO Running nmake:") #nmake is in the path because of 'vcvarsall.bat' from VS_LOC add_line_to_run_bat_file("nmake") add_error_handling_line_to_bat_file() add_line_to_run_bat_file("@ECHO nmake done.") say("nmake added to batch file.") add_line_to_run_bat_file("cd " + get_original_dir()) create_batch_file()
[ "def", "build", "(", ")", ":", "set_current_activity", "(", "\"BUILD\"", ")", "set_dirs", "(", ")", "COMPILE_MODE", "=", "\"\"", "COMPILER", "=", "\"\"", "say", "(", "\"Attempting to build version: \"", "+", "VERSION_STRING", ")", "say", "(", "\"Testing if all required build environment variables are set:\"", ")", "QT_LOC", "=", "find_qt_path", "(", ")", "VS_LOC", "=", "get_required_env_var_path", "(", "VS_LOC_VAR_NAME", ",", "VS_LOC_DEFAULT", ")", "if", "is_env_var_set", "(", "BUILD_CLANG_VAR_NAME", ")", ":", "say", "(", "f\"{BUILD_CLANG_VAR_NAME} defined. Building for win32-clang-msvc.\"", ")", "get_required_env_var_path", "(", "LLVM_LOC_VAR_NAME", ",", "LLVM_LOC_DEFAULT", ")", "COMPILER", "=", "\"win32-clang-msvc\"", "else", ":", "say", "(", "f\"{BUILD_CLANG_VAR_NAME} not defined. Building for msvc.\"", ")", "COMPILER", "=", "\"win32-msvc\"", "say", "(", "\"All required build environment values are set.\"", ")", "#Otherwise qmake gets confused", "add_line_to_run_bat_file", "(", "\"cd \"", "+", "get_project_dir", "(", ")", ")", "say", "(", "\"Adding 'vcvarsall.bat' to batch file.\"", ")", "add_line_to_run_bat_file", "(", "\"@ECHO Setting up environment with Visual Studio 'vcvarsall.bat':\"", ")", "add_line_to_run_bat_file", "(", "\"call \"", "+", "'\"'", "+", "VS_LOC", "+", "'\"'", "+", "\" \"", "+", "PLATFORM_TARGET", ")", "add_error_handling_line_to_bat_file", "(", ")", "add_line_to_run_bat_file", "(", "\"@ECHO Environment set up.\"", ")", "say", "(", "\"'vcvarsall.bat' added to batch file.\"", ")", "QMAKE_LOC", "=", "QT_LOC", "+", "r\"\\qmake.exe\"", "if", "is_env_var_set", "(", "BUILD_VSPROJ_VAR_NAME", ")", ":", "say", "(", "f\"{BUILD_VSPROJ_VAR_NAME} defined. Building Visual Studio project files.\"", ")", "add_line_to_run_bat_file", "(", "\"@ECHO Running Visual Studio project file building:\"", ")", "add_line_to_run_bat_file", "(", "'\"'", "+", "QMAKE_LOC", "+", "'\"'", "+", "\" -tp vc\"", ")", "add_error_handling_line_to_bat_file", "(", ")", "add_line_to_run_bat_file", "(", "\"@ECHO Done building Visual Studio project files.\"", ")", "else", ":", "say", "(", "f\"{BUILD_VSPROJ_VAR_NAME} not defined. Not building Visual Studio project files.\"", ")", "if", "is_env_var_set", "(", "BUILD_DEBUG_VAR_NAME", ")", ":", "COMPILE_MODE", "=", "\"debug\"", "say", "(", "f\"{BUILD_DEBUG_VAR_NAME} defined. Building '{COMPILE_MODE}' version.\"", ")", "else", ":", "COMPILE_MODE", "=", "\"release\"", "say", "(", "f\"{BUILD_DEBUG_VAR_NAME} not defined. Building '{COMPILE_MODE}' version.\"", ")", "add_line_to_run_bat_file", "(", "\"@ECHO Running qmake:\"", ")", "if", "is_env_var_set", "(", "\"OVRAS_WARNINGS_AS_ERRORS\"", ")", ":", "add_line_to_run_bat_file", "(", "'\"'", "+", "QMAKE_LOC", "+", "'\"'", "+", "\" -spec \"", "+", "COMPILER", "+", "\" CONFIG+=X86_64 \"", "+", "\"CONFIG+=\"", "+", "COMPILE_MODE", "+", "\" CONFIG+=warnings_as_errors\"", ")", "else", ":", "add_line_to_run_bat_file", "(", "'\"'", "+", "QMAKE_LOC", "+", "'\"'", "+", "\" -spec \"", "+", "COMPILER", "+", "\" CONFIG+=X86_64 \"", "+", "\"CONFIG+=\"", "+", "COMPILE_MODE", ")", "add_error_handling_line_to_bat_file", "(", ")", "add_line_to_run_bat_file", "(", "\"@ECHO qmake done.\"", ")", "if", "is_env_var_set", "(", "JOM_LOC_VAR_NAME", ")", ":", "JOM_LOC", "=", "os", ".", "getenv", "(", "JOM_LOC_VAR_NAME", ")", "else", ":", "say", "(", "f\"{JOM_LOC_VAR_NAME} not defined. Using default value\"", ")", "JOM_LOC", "=", "JOM_LOC_DEFAULT", "say", "(", "f\"{JOM_LOC_VAR_NAME} set to '{JOM_LOC}\"", ")", "if", "os", ".", "path", ".", "exists", "(", "JOM_LOC", ")", ":", "say", "(", "f\"{JOM_LOC_VAR_NAME} exists. Using jom.\"", ")", "add_line_to_run_bat_file", "(", "\"@ECHO Running jom:\"", ")", "add_line_to_run_bat_file", "(", "JOM_LOC", ")", "add_error_handling_line_to_bat_file", "(", ")", "add_line_to_run_bat_file", "(", "\"@ECHO jom done.\"", ")", "say", "(", "f\"{JOM_LOC_VAR_NAME} added to batch file.\"", ")", "else", ":", "say", "(", "f\"{JOM_LOC_VAR_NAME} does not exists. Using nmake.\"", ")", "add_line_to_run_bat_file", "(", "\"@ECHO Running nmake:\"", ")", "#nmake is in the path because of 'vcvarsall.bat' from VS_LOC", "add_line_to_run_bat_file", "(", "\"nmake\"", ")", "add_error_handling_line_to_bat_file", "(", ")", "add_line_to_run_bat_file", "(", "\"@ECHO nmake done.\"", ")", "say", "(", "\"nmake added to batch file.\"", ")", "add_line_to_run_bat_file", "(", "\"cd \"", "+", "get_original_dir", "(", ")", ")", "create_batch_file", "(", ")" ]
https://github.com/OpenVR-Advanced-Settings/OpenVR-AdvancedSettings/blob/522ba46ac6bcff8e82907715f7f0af7abbbe1e7b/build_scripts/win/build.py#L84-L174
BVLC/caffe
9b891540183ddc834a02b2bd81b31afae71b2153
python/caffe/draw.py
python
draw_net_to_file
(caffe_net, filename, rankdir='LR', phase=None, display_lrm=False)
Draws a caffe net, and saves it to file using the format given as the file extension. Use '.raw' to output raw text that you can manually feed to graphviz to draw graphs. Parameters ---------- caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer. filename : string The path to a file where the networks visualization will be stored. rankdir : {'LR', 'TB', 'BT'} Direction of graph layout. phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional Include layers from this network phase. If None, include all layers. (the default is None) display_lrm : boolean, optional If True display the learning rate multipliers for the learning layers (default is False).
Draws a caffe net, and saves it to file using the format given as the file extension. Use '.raw' to output raw text that you can manually feed to graphviz to draw graphs.
[ "Draws", "a", "caffe", "net", "and", "saves", "it", "to", "file", "using", "the", "format", "given", "as", "the", "file", "extension", ".", "Use", ".", "raw", "to", "output", "raw", "text", "that", "you", "can", "manually", "feed", "to", "graphviz", "to", "draw", "graphs", "." ]
def draw_net_to_file(caffe_net, filename, rankdir='LR', phase=None, display_lrm=False): """Draws a caffe net, and saves it to file using the format given as the file extension. Use '.raw' to output raw text that you can manually feed to graphviz to draw graphs. Parameters ---------- caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer. filename : string The path to a file where the networks visualization will be stored. rankdir : {'LR', 'TB', 'BT'} Direction of graph layout. phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional Include layers from this network phase. If None, include all layers. (the default is None) display_lrm : boolean, optional If True display the learning rate multipliers for the learning layers (default is False). """ ext = filename[filename.rfind('.')+1:] with open(filename, 'wb') as fid: fid.write(draw_net(caffe_net, rankdir, ext, phase, display_lrm))
[ "def", "draw_net_to_file", "(", "caffe_net", ",", "filename", ",", "rankdir", "=", "'LR'", ",", "phase", "=", "None", ",", "display_lrm", "=", "False", ")", ":", "ext", "=", "filename", "[", "filename", ".", "rfind", "(", "'.'", ")", "+", "1", ":", "]", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "fid", ":", "fid", ".", "write", "(", "draw_net", "(", "caffe_net", ",", "rankdir", ",", "ext", ",", "phase", ",", "display_lrm", ")", ")" ]
https://github.com/BVLC/caffe/blob/9b891540183ddc834a02b2bd81b31afae71b2153/python/caffe/draw.py#L293-L314
TheLegendAli/DeepLab-Context
fb04e9e2fc2682490ad9f60533b9d6c4c0e0479c
scripts/cpp_lint.py
python
_CppLintState.PrintErrorCounts
(self)
Print a summary of errors by category, and the total.
Print a summary of errors by category, and the total.
[ "Print", "a", "summary", "of", "errors", "by", "category", "and", "the", "total", "." ]
def PrintErrorCounts(self): """Print a summary of errors by category, and the total.""" for category, count in self.errors_by_category.iteritems(): sys.stderr.write('Category \'%s\' errors found: %d\n' % (category, count)) sys.stderr.write('Total errors found: %d\n' % self.error_count)
[ "def", "PrintErrorCounts", "(", "self", ")", ":", "for", "category", ",", "count", "in", "self", ".", "errors_by_category", ".", "iteritems", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "'Category \\'%s\\' errors found: %d\\n'", "%", "(", "category", ",", "count", ")", ")", "sys", ".", "stderr", ".", "write", "(", "'Total errors found: %d\\n'", "%", "self", ".", "error_count", ")" ]
https://github.com/TheLegendAli/DeepLab-Context/blob/fb04e9e2fc2682490ad9f60533b9d6c4c0e0479c/scripts/cpp_lint.py#L757-L762
cvxpy/cvxpy
5165b4fb750dfd237de8659383ef24b4b2e33aaf
cvxpy/reductions/complex2real/atom_canonicalizers/matrix_canon.py
python
hermitian_canon
(expr, real_args, imag_args, real2imag)
return expr.copy([matrix]), None
Canonicalize functions that take a Hermitian matrix.
Canonicalize functions that take a Hermitian matrix.
[ "Canonicalize", "functions", "that", "take", "a", "Hermitian", "matrix", "." ]
def hermitian_canon(expr, real_args, imag_args, real2imag): """Canonicalize functions that take a Hermitian matrix. """ if imag_args[0] is None: matrix = real_args[0] else: if real_args[0] is None: real_args[0] = np.zeros(imag_args[0].shape) matrix = bmat([[real_args[0], -imag_args[0]], [imag_args[0], real_args[0]]]) return expr.copy([matrix]), None
[ "def", "hermitian_canon", "(", "expr", ",", "real_args", ",", "imag_args", ",", "real2imag", ")", ":", "if", "imag_args", "[", "0", "]", "is", "None", ":", "matrix", "=", "real_args", "[", "0", "]", "else", ":", "if", "real_args", "[", "0", "]", "is", "None", ":", "real_args", "[", "0", "]", "=", "np", ".", "zeros", "(", "imag_args", "[", "0", "]", ".", "shape", ")", "matrix", "=", "bmat", "(", "[", "[", "real_args", "[", "0", "]", ",", "-", "imag_args", "[", "0", "]", "]", ",", "[", "imag_args", "[", "0", "]", ",", "real_args", "[", "0", "]", "]", "]", ")", "return", "expr", ".", "copy", "(", "[", "matrix", "]", ")", ",", "None" ]
https://github.com/cvxpy/cvxpy/blob/5165b4fb750dfd237de8659383ef24b4b2e33aaf/cvxpy/reductions/complex2real/atom_canonicalizers/matrix_canon.py#L29-L39
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/ops/mask_ops.py
python
kleene_or
( left: Union[bool, np.ndarray], right: Union[bool, np.ndarray], left_mask: Optional[np.ndarray], right_mask: Optional[np.ndarray], )
return result, mask
Boolean ``or`` using Kleene logic. Values are NA where we have ``NA | NA`` or ``NA | False``. ``NA | True`` is considered True. Parameters ---------- left, right : ndarray, NA, or bool The values of the array. left_mask, right_mask : ndarray, optional The masks. Only one of these may be None, which implies that the associated `left` or `right` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical or, and the new mask.
Boolean ``or`` using Kleene logic.
[ "Boolean", "or", "using", "Kleene", "logic", "." ]
def kleene_or( left: Union[bool, np.ndarray], right: Union[bool, np.ndarray], left_mask: Optional[np.ndarray], right_mask: Optional[np.ndarray], ): """ Boolean ``or`` using Kleene logic. Values are NA where we have ``NA | NA`` or ``NA | False``. ``NA | True`` is considered True. Parameters ---------- left, right : ndarray, NA, or bool The values of the array. left_mask, right_mask : ndarray, optional The masks. Only one of these may be None, which implies that the associated `left` or `right` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical or, and the new mask. """ # To reduce the number of cases, we ensure that `left` & `left_mask` # always come from an array, not a scalar. This is safe, since because # A | B == B | A if left_mask is None: return kleene_or(right, left, right_mask, left_mask) assert isinstance(left, np.ndarray) raise_for_nan(right, method="or") if right is libmissing.NA: result = left.copy() else: result = left | right if right_mask is not None: # output is unknown where (False & NA), (NA & False), (NA & NA) left_false = ~(left | left_mask) right_false = ~(right | right_mask) mask = ( (left_false & right_mask) | (right_false & left_mask) | (left_mask & right_mask) ) else: if right is True: mask = np.zeros_like(left_mask) elif right is libmissing.NA: mask = (~left & ~left_mask) | left_mask else: # False mask = left_mask.copy() return result, mask
[ "def", "kleene_or", "(", "left", ":", "Union", "[", "bool", ",", "np", ".", "ndarray", "]", ",", "right", ":", "Union", "[", "bool", ",", "np", ".", "ndarray", "]", ",", "left_mask", ":", "Optional", "[", "np", ".", "ndarray", "]", ",", "right_mask", ":", "Optional", "[", "np", ".", "ndarray", "]", ",", ")", ":", "# To reduce the number of cases, we ensure that `left` & `left_mask`", "# always come from an array, not a scalar. This is safe, since because", "# A | B == B | A", "if", "left_mask", "is", "None", ":", "return", "kleene_or", "(", "right", ",", "left", ",", "right_mask", ",", "left_mask", ")", "assert", "isinstance", "(", "left", ",", "np", ".", "ndarray", ")", "raise_for_nan", "(", "right", ",", "method", "=", "\"or\"", ")", "if", "right", "is", "libmissing", ".", "NA", ":", "result", "=", "left", ".", "copy", "(", ")", "else", ":", "result", "=", "left", "|", "right", "if", "right_mask", "is", "not", "None", ":", "# output is unknown where (False & NA), (NA & False), (NA & NA)", "left_false", "=", "~", "(", "left", "|", "left_mask", ")", "right_false", "=", "~", "(", "right", "|", "right_mask", ")", "mask", "=", "(", "(", "left_false", "&", "right_mask", ")", "|", "(", "right_false", "&", "left_mask", ")", "|", "(", "left_mask", "&", "right_mask", ")", ")", "else", ":", "if", "right", "is", "True", ":", "mask", "=", "np", ".", "zeros_like", "(", "left_mask", ")", "elif", "right", "is", "libmissing", ".", "NA", ":", "mask", "=", "(", "~", "left", "&", "~", "left_mask", ")", "|", "left_mask", "else", ":", "# False", "mask", "=", "left_mask", ".", "copy", "(", ")", "return", "result", ",", "mask" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/ops/mask_ops.py#L11-L69
msitt/blpapi-python
bebcf43668c9e5f5467b1f685f9baebbfc45bc87
src/blpapi/versionhelper.py
python
_swig_add_metaclass
(metaclass)
return wrapper
Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass
Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass
[ "Class", "decorator", "for", "adding", "a", "metaclass", "to", "a", "SWIG", "wrapped", "class", "-", "a", "slimmed", "down", "version", "of", "six", ".", "add_metaclass" ]
def _swig_add_metaclass(metaclass): """Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass""" def wrapper(cls): return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy()) return wrapper
[ "def", "_swig_add_metaclass", "(", "metaclass", ")", ":", "def", "wrapper", "(", "cls", ")", ":", "return", "metaclass", "(", "cls", ".", "__name__", ",", "cls", ".", "__bases__", ",", "cls", ".", "__dict__", ".", "copy", "(", ")", ")", "return", "wrapper" ]
https://github.com/msitt/blpapi-python/blob/bebcf43668c9e5f5467b1f685f9baebbfc45bc87/src/blpapi/versionhelper.py#L54-L58
miyosuda/TensorFlowAndroidDemo
35903e0221aa5f109ea2dbef27f20b52e317f42d
jni-build/jni/include/tensorflow/contrib/rnn/python/ops/lstm_ops.py
python
_FusedLSTMGradShape
(op)
return [x_shape] * max_len + [cs_prev_shape, h_prev_shape, w_shape, wci_shape, wco_shape, wcf_shape, b_shape]
Shape for FusedLSTM.
Shape for FusedLSTM.
[ "Shape", "for", "FusedLSTM", "." ]
def _FusedLSTMGradShape(op): """Shape for FusedLSTM.""" max_len = op.get_attr("max_len") x = op.inputs[1] cs_prev = op.inputs[1 + max_len] h_prev = op.inputs[2 + max_len] w = op.inputs[3 + max_len] wci = op.inputs[4 + max_len] wco = op.inputs[5 + max_len] wcf = op.inputs[6 + max_len] b = op.inputs[7 + max_len] x_shape = x.get_shape().with_rank(2) cs_prev_shape = cs_prev.get_shape().with_rank(2) h_prev_shape = h_prev.get_shape().with_rank(2) w_shape = w.get_shape().with_rank(2) wci_shape = wci.get_shape().with_rank(1) wco_shape = wco.get_shape().with_rank(1) wcf_shape = wcf.get_shape().with_rank(1) b_shape = b.get_shape().with_rank(1) return [x_shape] * max_len + [cs_prev_shape, h_prev_shape, w_shape, wci_shape, wco_shape, wcf_shape, b_shape]
[ "def", "_FusedLSTMGradShape", "(", "op", ")", ":", "max_len", "=", "op", ".", "get_attr", "(", "\"max_len\"", ")", "x", "=", "op", ".", "inputs", "[", "1", "]", "cs_prev", "=", "op", ".", "inputs", "[", "1", "+", "max_len", "]", "h_prev", "=", "op", ".", "inputs", "[", "2", "+", "max_len", "]", "w", "=", "op", ".", "inputs", "[", "3", "+", "max_len", "]", "wci", "=", "op", ".", "inputs", "[", "4", "+", "max_len", "]", "wco", "=", "op", ".", "inputs", "[", "5", "+", "max_len", "]", "wcf", "=", "op", ".", "inputs", "[", "6", "+", "max_len", "]", "b", "=", "op", ".", "inputs", "[", "7", "+", "max_len", "]", "x_shape", "=", "x", ".", "get_shape", "(", ")", ".", "with_rank", "(", "2", ")", "cs_prev_shape", "=", "cs_prev", ".", "get_shape", "(", ")", ".", "with_rank", "(", "2", ")", "h_prev_shape", "=", "h_prev", ".", "get_shape", "(", ")", ".", "with_rank", "(", "2", ")", "w_shape", "=", "w", ".", "get_shape", "(", ")", ".", "with_rank", "(", "2", ")", "wci_shape", "=", "wci", ".", "get_shape", "(", ")", ".", "with_rank", "(", "1", ")", "wco_shape", "=", "wco", ".", "get_shape", "(", ")", ".", "with_rank", "(", "1", ")", "wcf_shape", "=", "wcf", ".", "get_shape", "(", ")", ".", "with_rank", "(", "1", ")", "b_shape", "=", "b", ".", "get_shape", "(", ")", ".", "with_rank", "(", "1", ")", "return", "[", "x_shape", "]", "*", "max_len", "+", "[", "cs_prev_shape", ",", "h_prev_shape", ",", "w_shape", ",", "wci_shape", ",", "wco_shape", ",", "wcf_shape", ",", "b_shape", "]" ]
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/rnn/python/ops/lstm_ops.py#L369-L392
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/array_ops.py
python
_TileGradShape
(op)
Shape function for the TileGrad op.
Shape function for the TileGrad op.
[ "Shape", "function", "for", "the", "TileGrad", "op", "." ]
def _TileGradShape(op): """Shape function for the TileGrad op.""" multiples_shape = op.inputs[1].get_shape().with_rank(1) input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0]) # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i) # it is a vector of non-negative integers, and (ii) doing so allows # us to handle partially-known multiples. multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank( input_shape.ndims) if multiples.ndims is None: return [tensor_shape.unknown_shape()] else: output_dims = [] for dim, multiple in zip(input_shape.dims, multiples.dims): output_dims.append(dim // multiple) return [tensor_shape.TensorShape(output_dims)]
[ "def", "_TileGradShape", "(", "op", ")", ":", "multiples_shape", "=", "op", ".", "inputs", "[", "1", "]", ".", "get_shape", "(", ")", ".", "with_rank", "(", "1", ")", "input_shape", "=", "op", ".", "inputs", "[", "0", "]", ".", "get_shape", "(", ")", ".", "with_rank", "(", "multiples_shape", "[", "0", "]", ")", "# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)", "# it is a vector of non-negative integers, and (ii) doing so allows", "# us to handle partially-known multiples.", "multiples", "=", "tensor_util", ".", "constant_value_as_shape", "(", "op", ".", "inputs", "[", "1", "]", ")", ".", "with_rank", "(", "input_shape", ".", "ndims", ")", "if", "multiples", ".", "ndims", "is", "None", ":", "return", "[", "tensor_shape", ".", "unknown_shape", "(", ")", "]", "else", ":", "output_dims", "=", "[", "]", "for", "dim", ",", "multiple", "in", "zip", "(", "input_shape", ".", "dims", ",", "multiples", ".", "dims", ")", ":", "output_dims", ".", "append", "(", "dim", "//", "multiple", ")", "return", "[", "tensor_shape", ".", "TensorShape", "(", "output_dims", ")", "]" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/array_ops.py#L2993-L3008
BitMEX/api-connectors
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
auto-generated/python/swagger_client/models/affiliate.py
python
Affiliate.total_comm
(self)
return self._total_comm
Gets the total_comm of this Affiliate. # noqa: E501 :return: The total_comm of this Affiliate. # noqa: E501 :rtype: float
Gets the total_comm of this Affiliate. # noqa: E501
[ "Gets", "the", "total_comm", "of", "this", "Affiliate", ".", "#", "noqa", ":", "E501" ]
def total_comm(self): """Gets the total_comm of this Affiliate. # noqa: E501 :return: The total_comm of this Affiliate. # noqa: E501 :rtype: float """ return self._total_comm
[ "def", "total_comm", "(", "self", ")", ":", "return", "self", ".", "_total_comm" ]
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/affiliate.py#L343-L350
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_windows.py
python
PyScrolledWindow.DoGetBestSize
(*args, **kwargs)
return _windows_.PyScrolledWindow_DoGetBestSize(*args, **kwargs)
DoGetBestSize(self) -> Size
DoGetBestSize(self) -> Size
[ "DoGetBestSize", "(", "self", ")", "-", ">", "Size" ]
def DoGetBestSize(*args, **kwargs): """DoGetBestSize(self) -> Size""" return _windows_.PyScrolledWindow_DoGetBestSize(*args, **kwargs)
[ "def", "DoGetBestSize", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "PyScrolledWindow_DoGetBestSize", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_windows.py#L4544-L4546
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scikit-learn/py2/sklearn/multiclass.py
python
OneVsOneClassifier.predict
(self, X)
return self.classes_[Y.argmax(axis=1)]
Estimate the best class label for each sample in X. This is implemented as ``argmax(decision_function(X), axis=1)`` which will return the label of the class with most votes by estimators predicting the outcome of a decision for each possible class pair. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. Returns ------- y : numpy array of shape [n_samples] Predicted multi-class targets.
Estimate the best class label for each sample in X.
[ "Estimate", "the", "best", "class", "label", "for", "each", "sample", "in", "X", "." ]
def predict(self, X): """Estimate the best class label for each sample in X. This is implemented as ``argmax(decision_function(X), axis=1)`` which will return the label of the class with most votes by estimators predicting the outcome of a decision for each possible class pair. Parameters ---------- X : (sparse) array-like, shape = [n_samples, n_features] Data. Returns ------- y : numpy array of shape [n_samples] Predicted multi-class targets. """ Y = self.decision_function(X) return self.classes_[Y.argmax(axis=1)]
[ "def", "predict", "(", "self", ",", "X", ")", ":", "Y", "=", "self", ".", "decision_function", "(", "X", ")", "return", "self", ".", "classes_", "[", "Y", ".", "argmax", "(", "axis", "=", "1", ")", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/multiclass.py#L551-L569
infinit/memo
3a8394d0f647efe03ccb8bfe885a7279cb8be8a6
elle/drake/src/drake/__init__.py
python
Dictionary.hash
(self)
return hashlib.sha1(str(items).encode('utf-8')).hexdigest()
Hash value.
Hash value.
[ "Hash", "value", "." ]
def hash(self): """Hash value.""" # FIXME: sha1 of the string repr ain't optimal items = list(self) items.sort() return hashlib.sha1(str(items).encode('utf-8')).hexdigest()
[ "def", "hash", "(", "self", ")", ":", "# FIXME: sha1 of the string repr ain't optimal", "items", "=", "list", "(", "self", ")", "items", ".", "sort", "(", ")", "return", "hashlib", ".", "sha1", "(", "str", "(", "items", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")" ]
https://github.com/infinit/memo/blob/3a8394d0f647efe03ccb8bfe885a7279cb8be8a6/elle/drake/src/drake/__init__.py#L2550-L2555
kamyu104/LeetCode-Solutions
77605708a927ea3b85aee5a479db733938c7c211
Python/strobogrammatic-number-ii.py
python
Solution.findStrobogrammatic
(self, n)
return result
:type n: int :rtype: List[str]
:type n: int :rtype: List[str]
[ ":", "type", "n", ":", "int", ":", "rtype", ":", "List", "[", "str", "]" ]
def findStrobogrammatic(self, n): """ :type n: int :rtype: List[str] """ lookup = {'0':'0', '1':'1', '6':'9', '8':'8', '9':'6'} result = ['0', '1', '8'] if n%2 else [''] for i in xrange(n%2, n, 2): result = [a + num + b for a, b in lookup.iteritems() if i != n-2 or a != '0' for num in result] return result
[ "def", "findStrobogrammatic", "(", "self", ",", "n", ")", ":", "lookup", "=", "{", "'0'", ":", "'0'", ",", "'1'", ":", "'1'", ",", "'6'", ":", "'9'", ",", "'8'", ":", "'8'", ",", "'9'", ":", "'6'", "}", "result", "=", "[", "'0'", ",", "'1'", ",", "'8'", "]", "if", "n", "%", "2", "else", "[", "''", "]", "for", "i", "in", "xrange", "(", "n", "%", "2", ",", "n", ",", "2", ")", ":", "result", "=", "[", "a", "+", "num", "+", "b", "for", "a", ",", "b", "in", "lookup", ".", "iteritems", "(", ")", "if", "i", "!=", "n", "-", "2", "or", "a", "!=", "'0'", "for", "num", "in", "result", "]", "return", "result" ]
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/strobogrammatic-number-ii.py#L5-L14
clasp-developers/clasp
5287e5eb9bbd5e8da1e3a629a03d78bd71d01969
debugger-tools/extend_lldb/print_function.py
python
get_line_numbers
(thread)
return map(GetLineNumber, range(thread.GetNumFrames()))
Returns a sequence of line numbers from the stack frames of this thread.
Returns a sequence of line numbers from the stack frames of this thread.
[ "Returns", "a", "sequence", "of", "line", "numbers", "from", "the", "stack", "frames", "of", "this", "thread", "." ]
def get_line_numbers(thread): """ Returns a sequence of line numbers from the stack frames of this thread. """ def GetLineNumber(i): return thread.GetFrameAtIndex(i).GetLineEntry().GetLine() return map(GetLineNumber, range(thread.GetNumFrames()))
[ "def", "get_line_numbers", "(", "thread", ")", ":", "def", "GetLineNumber", "(", "i", ")", ":", "return", "thread", ".", "GetFrameAtIndex", "(", "i", ")", ".", "GetLineEntry", "(", ")", ".", "GetLine", "(", ")", "return", "map", "(", "GetLineNumber", ",", "range", "(", "thread", ".", "GetNumFrames", "(", ")", ")", ")" ]
https://github.com/clasp-developers/clasp/blob/5287e5eb9bbd5e8da1e3a629a03d78bd71d01969/debugger-tools/extend_lldb/print_function.py#L250-L257
lmb-freiburg/ogn
974f72ef4bf840d6f6693d22d1843a79223e77ce
scripts/cpp_lint.py
python
_NestingState.Update
(self, filename, clean_lines, linenum, error)
Update nesting state with current line. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
Update nesting state with current line.
[ "Update", "nesting", "state", "with", "current", "line", "." ]
def Update(self, filename, clean_lines, linenum, error): """Update nesting state with current line. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Update pp_stack first self.UpdatePreprocessor(line) # Count parentheses. This is to avoid adding struct arguments to # the nesting stack. if self.stack: inner_block = self.stack[-1] depth_change = line.count('(') - line.count(')') inner_block.open_parentheses += depth_change # Also check if we are starting or ending an inline assembly block. if inner_block.inline_asm in (_NO_ASM, _END_ASM): if (depth_change != 0 and inner_block.open_parentheses == 1 and _MATCH_ASM.match(line)): # Enter assembly block inner_block.inline_asm = _INSIDE_ASM else: # Not entering assembly block. If previous line was _END_ASM, # we will now shift to _NO_ASM state. inner_block.inline_asm = _NO_ASM elif (inner_block.inline_asm == _INSIDE_ASM and inner_block.open_parentheses == 0): # Exit assembly block inner_block.inline_asm = _END_ASM # Consume namespace declaration at the beginning of the line. Do # this in a loop so that we catch same line declarations like this: # namespace proto2 { namespace bridge { class MessageSet; } } while True: # Match start of namespace. The "\b\s*" below catches namespace # declarations even if it weren't followed by a whitespace, this # is so that we don't confuse our namespace checker. The # missing spaces will be flagged by CheckSpacing. namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) if not namespace_decl_match: break new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) self.stack.append(new_namespace) line = namespace_decl_match.group(2) if line.find('{') != -1: new_namespace.seen_open_brace = True line = line[line.find('{') + 1:] # Look for a class declaration in whatever is left of the line # after parsing namespaces. The regexp accounts for decorated classes # such as in: # class LOCKABLE API Object { # }; # # Templates with class arguments may confuse the parser, for example: # template <class T # class Comparator = less<T>, # class Vector = vector<T> > # class HeapQueue { # # Because this parser has no nesting state about templates, by the # time it saw "class Comparator", it may think that it's a new class. # Nested templates have a similar problem: # template < # typename ExportedType, # typename TupleType, # template <typename, typename> class ImplTemplate> # # To avoid these cases, we ignore classes that are followed by '=' or '>' class_decl_match = Match( r'\s*(template\s*<[\w\s<>,:]*>\s*)?' r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)' r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line) if (class_decl_match and (not self.stack or self.stack[-1].open_parentheses == 0)): self.stack.append(_ClassInfo( class_decl_match.group(4), class_decl_match.group(2), clean_lines, linenum)) line = class_decl_match.group(5) # If we have not yet seen the opening brace for the innermost block, # run checks here. if not self.SeenOpenBrace(): self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) # Update access control if we are inside a class/struct if self.stack and isinstance(self.stack[-1], _ClassInfo): classinfo = self.stack[-1] access_match = Match( r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' r':(?:[^:]|$)', line) if access_match: classinfo.access = access_match.group(2) # Check that access keywords are indented +1 space. Skip this # check if the keywords are not preceded by whitespaces. indent = access_match.group(1) if (len(indent) != classinfo.class_indent + 1 and Match(r'^\s*$', indent)): if classinfo.is_struct: parent = 'struct ' + classinfo.name else: parent = 'class ' + classinfo.name slots = '' if access_match.group(3): slots = access_match.group(3) error(filename, linenum, 'whitespace/indent', 3, '%s%s: should be indented +1 space inside %s' % ( access_match.group(2), slots, parent)) # Consume braces or semicolons from what's left of the line while True: # Match first brace, semicolon, or closed parenthesis. matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) if not matched: break token = matched.group(1) if token == '{': # If namespace or class hasn't seen a opening brace yet, mark # namespace/class head as complete. Push a new block onto the # stack otherwise. if not self.SeenOpenBrace(): self.stack[-1].seen_open_brace = True else: self.stack.append(_BlockInfo(True)) if _MATCH_ASM.match(line): self.stack[-1].inline_asm = _BLOCK_ASM elif token == ';' or token == ')': # If we haven't seen an opening brace yet, but we already saw # a semicolon, this is probably a forward declaration. Pop # the stack for these. # # Similarly, if we haven't seen an opening brace yet, but we # already saw a closing parenthesis, then these are probably # function arguments with extra "class" or "struct" keywords. # Also pop these stack for these. if not self.SeenOpenBrace(): self.stack.pop() else: # token == '}' # Perform end of block checks and pop the stack. if self.stack: self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) self.stack.pop() line = matched.group(2)
[ "def", "Update", "(", "self", ",", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# Update pp_stack first", "self", ".", "UpdatePreprocessor", "(", "line", ")", "# Count parentheses. This is to avoid adding struct arguments to", "# the nesting stack.", "if", "self", ".", "stack", ":", "inner_block", "=", "self", ".", "stack", "[", "-", "1", "]", "depth_change", "=", "line", ".", "count", "(", "'('", ")", "-", "line", ".", "count", "(", "')'", ")", "inner_block", ".", "open_parentheses", "+=", "depth_change", "# Also check if we are starting or ending an inline assembly block.", "if", "inner_block", ".", "inline_asm", "in", "(", "_NO_ASM", ",", "_END_ASM", ")", ":", "if", "(", "depth_change", "!=", "0", "and", "inner_block", ".", "open_parentheses", "==", "1", "and", "_MATCH_ASM", ".", "match", "(", "line", ")", ")", ":", "# Enter assembly block", "inner_block", ".", "inline_asm", "=", "_INSIDE_ASM", "else", ":", "# Not entering assembly block. If previous line was _END_ASM,", "# we will now shift to _NO_ASM state.", "inner_block", ".", "inline_asm", "=", "_NO_ASM", "elif", "(", "inner_block", ".", "inline_asm", "==", "_INSIDE_ASM", "and", "inner_block", ".", "open_parentheses", "==", "0", ")", ":", "# Exit assembly block", "inner_block", ".", "inline_asm", "=", "_END_ASM", "# Consume namespace declaration at the beginning of the line. Do", "# this in a loop so that we catch same line declarations like this:", "# namespace proto2 { namespace bridge { class MessageSet; } }", "while", "True", ":", "# Match start of namespace. The \"\\b\\s*\" below catches namespace", "# declarations even if it weren't followed by a whitespace, this", "# is so that we don't confuse our namespace checker. The", "# missing spaces will be flagged by CheckSpacing.", "namespace_decl_match", "=", "Match", "(", "r'^\\s*namespace\\b\\s*([:\\w]+)?(.*)$'", ",", "line", ")", "if", "not", "namespace_decl_match", ":", "break", "new_namespace", "=", "_NamespaceInfo", "(", "namespace_decl_match", ".", "group", "(", "1", ")", ",", "linenum", ")", "self", ".", "stack", ".", "append", "(", "new_namespace", ")", "line", "=", "namespace_decl_match", ".", "group", "(", "2", ")", "if", "line", ".", "find", "(", "'{'", ")", "!=", "-", "1", ":", "new_namespace", ".", "seen_open_brace", "=", "True", "line", "=", "line", "[", "line", ".", "find", "(", "'{'", ")", "+", "1", ":", "]", "# Look for a class declaration in whatever is left of the line", "# after parsing namespaces. The regexp accounts for decorated classes", "# such as in:", "# class LOCKABLE API Object {", "# };", "#", "# Templates with class arguments may confuse the parser, for example:", "# template <class T", "# class Comparator = less<T>,", "# class Vector = vector<T> >", "# class HeapQueue {", "#", "# Because this parser has no nesting state about templates, by the", "# time it saw \"class Comparator\", it may think that it's a new class.", "# Nested templates have a similar problem:", "# template <", "# typename ExportedType,", "# typename TupleType,", "# template <typename, typename> class ImplTemplate>", "#", "# To avoid these cases, we ignore classes that are followed by '=' or '>'", "class_decl_match", "=", "Match", "(", "r'\\s*(template\\s*<[\\w\\s<>,:]*>\\s*)?'", "r'(class|struct)\\s+([A-Z_]+\\s+)*(\\w+(?:::\\w+)*)'", "r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\\s*>)*)$'", ",", "line", ")", "if", "(", "class_decl_match", "and", "(", "not", "self", ".", "stack", "or", "self", ".", "stack", "[", "-", "1", "]", ".", "open_parentheses", "==", "0", ")", ")", ":", "self", ".", "stack", ".", "append", "(", "_ClassInfo", "(", "class_decl_match", ".", "group", "(", "4", ")", ",", "class_decl_match", ".", "group", "(", "2", ")", ",", "clean_lines", ",", "linenum", ")", ")", "line", "=", "class_decl_match", ".", "group", "(", "5", ")", "# If we have not yet seen the opening brace for the innermost block,", "# run checks here.", "if", "not", "self", ".", "SeenOpenBrace", "(", ")", ":", "self", ".", "stack", "[", "-", "1", "]", ".", "CheckBegin", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "# Update access control if we are inside a class/struct", "if", "self", ".", "stack", "and", "isinstance", "(", "self", ".", "stack", "[", "-", "1", "]", ",", "_ClassInfo", ")", ":", "classinfo", "=", "self", ".", "stack", "[", "-", "1", "]", "access_match", "=", "Match", "(", "r'^(.*)\\b(public|private|protected|signals)(\\s+(?:slots\\s*)?)?'", "r':(?:[^:]|$)'", ",", "line", ")", "if", "access_match", ":", "classinfo", ".", "access", "=", "access_match", ".", "group", "(", "2", ")", "# Check that access keywords are indented +1 space. Skip this", "# check if the keywords are not preceded by whitespaces.", "indent", "=", "access_match", ".", "group", "(", "1", ")", "if", "(", "len", "(", "indent", ")", "!=", "classinfo", ".", "class_indent", "+", "1", "and", "Match", "(", "r'^\\s*$'", ",", "indent", ")", ")", ":", "if", "classinfo", ".", "is_struct", ":", "parent", "=", "'struct '", "+", "classinfo", ".", "name", "else", ":", "parent", "=", "'class '", "+", "classinfo", ".", "name", "slots", "=", "''", "if", "access_match", ".", "group", "(", "3", ")", ":", "slots", "=", "access_match", ".", "group", "(", "3", ")", "error", "(", "filename", ",", "linenum", ",", "'whitespace/indent'", ",", "3", ",", "'%s%s: should be indented +1 space inside %s'", "%", "(", "access_match", ".", "group", "(", "2", ")", ",", "slots", ",", "parent", ")", ")", "# Consume braces or semicolons from what's left of the line", "while", "True", ":", "# Match first brace, semicolon, or closed parenthesis.", "matched", "=", "Match", "(", "r'^[^{;)}]*([{;)}])(.*)$'", ",", "line", ")", "if", "not", "matched", ":", "break", "token", "=", "matched", ".", "group", "(", "1", ")", "if", "token", "==", "'{'", ":", "# If namespace or class hasn't seen a opening brace yet, mark", "# namespace/class head as complete. Push a new block onto the", "# stack otherwise.", "if", "not", "self", ".", "SeenOpenBrace", "(", ")", ":", "self", ".", "stack", "[", "-", "1", "]", ".", "seen_open_brace", "=", "True", "else", ":", "self", ".", "stack", ".", "append", "(", "_BlockInfo", "(", "True", ")", ")", "if", "_MATCH_ASM", ".", "match", "(", "line", ")", ":", "self", ".", "stack", "[", "-", "1", "]", ".", "inline_asm", "=", "_BLOCK_ASM", "elif", "token", "==", "';'", "or", "token", "==", "')'", ":", "# If we haven't seen an opening brace yet, but we already saw", "# a semicolon, this is probably a forward declaration. Pop", "# the stack for these.", "#", "# Similarly, if we haven't seen an opening brace yet, but we", "# already saw a closing parenthesis, then these are probably", "# function arguments with extra \"class\" or \"struct\" keywords.", "# Also pop these stack for these.", "if", "not", "self", ".", "SeenOpenBrace", "(", ")", ":", "self", ".", "stack", ".", "pop", "(", ")", "else", ":", "# token == '}'", "# Perform end of block checks and pop the stack.", "if", "self", ".", "stack", ":", "self", ".", "stack", "[", "-", "1", "]", ".", "CheckEnd", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "self", ".", "stack", ".", "pop", "(", ")", "line", "=", "matched", ".", "group", "(", "2", ")" ]
https://github.com/lmb-freiburg/ogn/blob/974f72ef4bf840d6f6693d22d1843a79223e77ce/scripts/cpp_lint.py#L2004-L2158
klzgrad/naiveproxy
ed2c513637c77b18721fe428d7ed395b4d284c83
src/build/android/gyp/util/resource_utils.py
python
_ParseTextSymbolsFile
(path, fix_package_ids=False)
return ret
Given an R.txt file, returns a list of _TextSymbolEntry. Args: path: Input file path. fix_package_ids: if True, 0x00 and 0x02 package IDs read from the file will be fixed to 0x7f. Returns: A list of _TextSymbolEntry instances. Raises: Exception: An unexpected line was detected in the input.
Given an R.txt file, returns a list of _TextSymbolEntry.
[ "Given", "an", "R", ".", "txt", "file", "returns", "a", "list", "of", "_TextSymbolEntry", "." ]
def _ParseTextSymbolsFile(path, fix_package_ids=False): """Given an R.txt file, returns a list of _TextSymbolEntry. Args: path: Input file path. fix_package_ids: if True, 0x00 and 0x02 package IDs read from the file will be fixed to 0x7f. Returns: A list of _TextSymbolEntry instances. Raises: Exception: An unexpected line was detected in the input. """ ret = [] with open(path) as f: for line in f: m = re.match(r'(int(?:\[\])?) (\w+) (\w+) (.+)$', line) if not m: raise Exception('Unexpected line in R.txt: %s' % line) java_type, resource_type, name, value = m.groups() if fix_package_ids: value = _FixPackageIds(value) ret.append(_TextSymbolEntry(java_type, resource_type, name, value)) return ret
[ "def", "_ParseTextSymbolsFile", "(", "path", ",", "fix_package_ids", "=", "False", ")", ":", "ret", "=", "[", "]", "with", "open", "(", "path", ")", "as", "f", ":", "for", "line", "in", "f", ":", "m", "=", "re", ".", "match", "(", "r'(int(?:\\[\\])?) (\\w+) (\\w+) (.+)$'", ",", "line", ")", "if", "not", "m", ":", "raise", "Exception", "(", "'Unexpected line in R.txt: %s'", "%", "line", ")", "java_type", ",", "resource_type", ",", "name", ",", "value", "=", "m", ".", "groups", "(", ")", "if", "fix_package_ids", ":", "value", "=", "_FixPackageIds", "(", "value", ")", "ret", ".", "append", "(", "_TextSymbolEntry", "(", "java_type", ",", "resource_type", ",", "name", ",", "value", ")", ")", "return", "ret" ]
https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/build/android/gyp/util/resource_utils.py#L314-L336
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/ops/array_ops.py
python
_FakeQuantWithMinMaxArgsGradient
(op, grad)
return fake_quant_with_min_max_args_gradient( grad, op.inputs[0], min=op.get_attr("min"), max=op.get_attr("max"), num_bits=op.get_attr("num_bits"), narrow_range=op.get_attr("narrow_range"))
Gradient for FakeQuantWithMinMaxArgs op.
Gradient for FakeQuantWithMinMaxArgs op.
[ "Gradient", "for", "FakeQuantWithMinMaxArgs", "op", "." ]
def _FakeQuantWithMinMaxArgsGradient(op, grad): """Gradient for FakeQuantWithMinMaxArgs op.""" return fake_quant_with_min_max_args_gradient( grad, op.inputs[0], min=op.get_attr("min"), max=op.get_attr("max"), num_bits=op.get_attr("num_bits"), narrow_range=op.get_attr("narrow_range"))
[ "def", "_FakeQuantWithMinMaxArgsGradient", "(", "op", ",", "grad", ")", ":", "return", "fake_quant_with_min_max_args_gradient", "(", "grad", ",", "op", ".", "inputs", "[", "0", "]", ",", "min", "=", "op", ".", "get_attr", "(", "\"min\"", ")", ",", "max", "=", "op", ".", "get_attr", "(", "\"max\"", ")", ",", "num_bits", "=", "op", ".", "get_attr", "(", "\"num_bits\"", ")", ",", "narrow_range", "=", "op", ".", "get_attr", "(", "\"narrow_range\"", ")", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/array_ops.py#L2004-L2012