nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
list
function
stringlengths
34
151k
function_tokens
list
url
stringlengths
90
278
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py2/numpy/polynomial/laguerre.py
python
lagfromroots
(roots)
Generate a Laguerre series with given roots. The function returns the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), in Laguerre form, where the `r_n` are the roots specified in `roots`. If a zero has multiplicity n, then it must appear in `roots` n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are `c`, then .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) The coefficient of the last term is not generally 1 for monic polynomials in Laguerre form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of coefficients. If all roots are real then `out` is a real array, if some of the roots are complex, then `out` is complex even if all the coefficients in the result are real (see Examples below). See Also -------- polyfromroots, legfromroots, chebfromroots, hermfromroots, hermefromroots. Examples -------- >>> from numpy.polynomial.laguerre import lagfromroots, lagval >>> coef = lagfromroots((-1, 0, 1)) >>> lagval((-1, 0, 1), coef) array([ 0., 0., 0.]) >>> coef = lagfromroots((-1j, 1j)) >>> lagval((-1j, 1j), coef) array([ 0.+0.j, 0.+0.j])
Generate a Laguerre series with given roots.
[ "Generate", "a", "Laguerre", "series", "with", "given", "roots", "." ]
def lagfromroots(roots): """ Generate a Laguerre series with given roots. The function returns the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), in Laguerre form, where the `r_n` are the roots specified in `roots`. If a zero has multiplicity n, then it must appear in `roots` n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are `c`, then .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) The coefficient of the last term is not generally 1 for monic polynomials in Laguerre form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of coefficients. If all roots are real then `out` is a real array, if some of the roots are complex, then `out` is complex even if all the coefficients in the result are real (see Examples below). See Also -------- polyfromroots, legfromroots, chebfromroots, hermfromroots, hermefromroots. Examples -------- >>> from numpy.polynomial.laguerre import lagfromroots, lagval >>> coef = lagfromroots((-1, 0, 1)) >>> lagval((-1, 0, 1), coef) array([ 0., 0., 0.]) >>> coef = lagfromroots((-1j, 1j)) >>> lagval((-1j, 1j), coef) array([ 0.+0.j, 0.+0.j]) """ if len(roots) == 0: return np.ones(1) else: [roots] = pu.as_series([roots], trim=False) roots.sort() p = [lagline(-r, 1) for r in roots] n = len(p) while n > 1: m, r = divmod(n, 2) tmp = [lagmul(p[i], p[i+m]) for i in range(m)] if r: tmp[0] = lagmul(tmp[0], p[-1]) p = tmp n = m return p[0]
[ "def", "lagfromroots", "(", "roots", ")", ":", "if", "len", "(", "roots", ")", "==", "0", ":", "return", "np", ".", "ones", "(", "1", ")", "else", ":", "[", "roots", "]", "=", "pu", ".", "as_series", "(", "[", "roots", "]", ",", "trim", "=", "False", ")", "roots", ".", "sort", "(", ")", "p", "=", "[", "lagline", "(", "-", "r", ",", "1", ")", "for", "r", "in", "roots", "]", "n", "=", "len", "(", "p", ")", "while", "n", ">", "1", ":", "m", ",", "r", "=", "divmod", "(", "n", ",", "2", ")", "tmp", "=", "[", "lagmul", "(", "p", "[", "i", "]", ",", "p", "[", "i", "+", "m", "]", ")", "for", "i", "in", "range", "(", "m", ")", "]", "if", "r", ":", "tmp", "[", "0", "]", "=", "lagmul", "(", "tmp", "[", "0", "]", ",", "p", "[", "-", "1", "]", ")", "p", "=", "tmp", "n", "=", "m", "return", "p", "[", "0", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py2/numpy/polynomial/laguerre.py#L236-L300
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/build/waf-1.7.13/lmbrwaflib/android.py
python
apply_android
(self)
Generates the code generation task (produces R.java) and setups the task chaining for AIDL, Java and the code gen task
Generates the code generation task (produces R.java) and setups the task chaining for AIDL, Java and the code gen task
[ "Generates", "the", "code", "generation", "task", "(", "produces", "R", ".", "java", ")", "and", "setups", "the", "task", "chaining", "for", "AIDL", "Java", "and", "the", "code", "gen", "task" ]
def apply_android(self): """ Generates the code generation task (produces R.java) and setups the task chaining for AIDL, Java and the code gen task """ Utils.def_attrs( self, classpath = [], aapt_resources = [], aapt_includes = [], aapt_extra_packages = [], aapt_package_flags = [], ) main_package = getattr(self, 'android_package', None) if not main_package: raise Errors.WafError('[ERROR] No "android_package" specified in Android package task.') javac_task = getattr(self, 'javac_task', None) if not javac_task: raise Errors.WafError('[ERROR] It seems the "javac" task failed to be generated, unable to complete the Android build process.') self.code_gen_task = code_gen_task = self.create_task('android_code_gen') r_java_outdir = getattr(self, 'r_java_outdir', None) if r_java_outdir: if not isinstance(r_java_outdir, Node.Node): r_java_outdir = self.path.get_bld().make_node(r_java_outdir) else: r_java_outdir = self.path.get_bld().make_node('r') r_java_outdir.mkdir() code_gen_task.env['OUTDIR'] = r_java_outdir.abspath() android_manifest = self.main_android_manifest code_gen_task.env['ANDROID_MANIFEST'] = android_manifest.abspath() # resources aapt_resources = [] for resource in self.aapt_resources: if isinstance(resource, Node.Node): aapt_resources.append(resource.abspath()) else: aapt_resources.append(resource) self.aapt_resource_paths = aapt_resources code_gen_task.env.append_value('AAPT_RESOURCES', aapt_resources) # included jars aapt_includes = self.aapt_includes + self.classpath aapt_include_paths = [] for include_path in self.aapt_includes: if isinstance(include_path, Node.Node): aapt_include_paths.append(include_path.abspath()) else: aapt_include_paths.append(include_path) self.aapt_include_paths = aapt_include_paths code_gen_task.env.append_value('AAPT_INCLUDES', aapt_include_paths) # additional flags aapt_package_flags = self.aapt_package_flags extra_packages = self.aapt_extra_packages if extra_packages: aapt_package_flags.extend([ '--extra-packages', ':'.join(extra_packages) ]) code_gen_task.env.append_value('AAPT_PACKAGE_FLAGS', aapt_package_flags) # outputs (R.java files) included_packages = [ main_package ] + extra_packages output_nodes = [] for package in included_packages: sub_dirs = package.split('.') dir_path = os.path.join(*sub_dirs) r_java_path = os.path.join(dir_path, 'R.java') r_java_node = r_java_outdir.make_node(r_java_path) output_nodes.append(r_java_node) code_gen_task.set_outputs(output_nodes) # task chaining manifest_preproc_task = getattr(self, 'manifest_preproc_task', None) manifest_merger_task = getattr(self, 'manifest_merger_task', None) if manifest_preproc_task and manifest_merger_task: code_gen_task.set_run_after(manifest_merger_task) manifest_merger_task.set_run_after(manifest_preproc_task) elif manifest_preproc_task: code_gen_task.set_run_after(manifest_preproc_task) elif manifest_merger_task: code_gen_task.set_run_after(manifest_merger_task) aidl_tasks = getattr(self, 'aidl_tasks', []) for aidl_task in aidl_tasks: code_gen_task.set_run_after(aidl_task) javac_task.set_run_after(self.code_gen_task)
[ "def", "apply_android", "(", "self", ")", ":", "Utils", ".", "def_attrs", "(", "self", ",", "classpath", "=", "[", "]", ",", "aapt_resources", "=", "[", "]", ",", "aapt_includes", "=", "[", "]", ",", "aapt_extra_packages", "=", "[", "]", ",", "aapt_package_flags", "=", "[", "]", ",", ")", "main_package", "=", "getattr", "(", "self", ",", "'android_package'", ",", "None", ")", "if", "not", "main_package", ":", "raise", "Errors", ".", "WafError", "(", "'[ERROR] No \"android_package\" specified in Android package task.'", ")", "javac_task", "=", "getattr", "(", "self", ",", "'javac_task'", ",", "None", ")", "if", "not", "javac_task", ":", "raise", "Errors", ".", "WafError", "(", "'[ERROR] It seems the \"javac\" task failed to be generated, unable to complete the Android build process.'", ")", "self", ".", "code_gen_task", "=", "code_gen_task", "=", "self", ".", "create_task", "(", "'android_code_gen'", ")", "r_java_outdir", "=", "getattr", "(", "self", ",", "'r_java_outdir'", ",", "None", ")", "if", "r_java_outdir", ":", "if", "not", "isinstance", "(", "r_java_outdir", ",", "Node", ".", "Node", ")", ":", "r_java_outdir", "=", "self", ".", "path", ".", "get_bld", "(", ")", ".", "make_node", "(", "r_java_outdir", ")", "else", ":", "r_java_outdir", "=", "self", ".", "path", ".", "get_bld", "(", ")", ".", "make_node", "(", "'r'", ")", "r_java_outdir", ".", "mkdir", "(", ")", "code_gen_task", ".", "env", "[", "'OUTDIR'", "]", "=", "r_java_outdir", ".", "abspath", "(", ")", "android_manifest", "=", "self", ".", "main_android_manifest", "code_gen_task", ".", "env", "[", "'ANDROID_MANIFEST'", "]", "=", "android_manifest", ".", "abspath", "(", ")", "# resources", "aapt_resources", "=", "[", "]", "for", "resource", "in", "self", ".", "aapt_resources", ":", "if", "isinstance", "(", "resource", ",", "Node", ".", "Node", ")", ":", "aapt_resources", ".", "append", "(", "resource", ".", "abspath", "(", ")", ")", "else", ":", "aapt_resources", ".", "append", "(", "resource", ")", "self", ".", "aapt_resource_paths", "=", "aapt_resources", "code_gen_task", ".", "env", ".", "append_value", "(", "'AAPT_RESOURCES'", ",", "aapt_resources", ")", "# included jars", "aapt_includes", "=", "self", ".", "aapt_includes", "+", "self", ".", "classpath", "aapt_include_paths", "=", "[", "]", "for", "include_path", "in", "self", ".", "aapt_includes", ":", "if", "isinstance", "(", "include_path", ",", "Node", ".", "Node", ")", ":", "aapt_include_paths", ".", "append", "(", "include_path", ".", "abspath", "(", ")", ")", "else", ":", "aapt_include_paths", ".", "append", "(", "include_path", ")", "self", ".", "aapt_include_paths", "=", "aapt_include_paths", "code_gen_task", ".", "env", ".", "append_value", "(", "'AAPT_INCLUDES'", ",", "aapt_include_paths", ")", "# additional flags", "aapt_package_flags", "=", "self", ".", "aapt_package_flags", "extra_packages", "=", "self", ".", "aapt_extra_packages", "if", "extra_packages", ":", "aapt_package_flags", ".", "extend", "(", "[", "'--extra-packages'", ",", "':'", ".", "join", "(", "extra_packages", ")", "]", ")", "code_gen_task", ".", "env", ".", "append_value", "(", "'AAPT_PACKAGE_FLAGS'", ",", "aapt_package_flags", ")", "# outputs (R.java files)", "included_packages", "=", "[", "main_package", "]", "+", "extra_packages", "output_nodes", "=", "[", "]", "for", "package", "in", "included_packages", ":", "sub_dirs", "=", "package", ".", "split", "(", "'.'", ")", "dir_path", "=", "os", ".", "path", ".", "join", "(", "*", "sub_dirs", ")", "r_java_path", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "'R.java'", ")", "r_java_node", "=", "r_java_outdir", ".", "make_node", "(", "r_java_path", ")", "output_nodes", ".", "append", "(", "r_java_node", ")", "code_gen_task", ".", "set_outputs", "(", "output_nodes", ")", "# task chaining", "manifest_preproc_task", "=", "getattr", "(", "self", ",", "'manifest_preproc_task'", ",", "None", ")", "manifest_merger_task", "=", "getattr", "(", "self", ",", "'manifest_merger_task'", ",", "None", ")", "if", "manifest_preproc_task", "and", "manifest_merger_task", ":", "code_gen_task", ".", "set_run_after", "(", "manifest_merger_task", ")", "manifest_merger_task", ".", "set_run_after", "(", "manifest_preproc_task", ")", "elif", "manifest_preproc_task", ":", "code_gen_task", ".", "set_run_after", "(", "manifest_preproc_task", ")", "elif", "manifest_merger_task", ":", "code_gen_task", ".", "set_run_after", "(", "manifest_merger_task", ")", "aidl_tasks", "=", "getattr", "(", "self", ",", "'aidl_tasks'", ",", "[", "]", ")", "for", "aidl_task", "in", "aidl_tasks", ":", "code_gen_task", ".", "set_run_after", "(", "aidl_task", ")", "javac_task", ".", "set_run_after", "(", "self", ".", "code_gen_task", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/lmbrwaflib/android.py#L1986-L2098
bigartm/bigartm
47e37f982de87aa67bfd475ff1f39da696b181b3
3rdparty/protobuf-3.0.0/python/google/protobuf/internal/decoder.py
python
_SkipVarint
(buffer, pos, end)
return pos
Skip a varint value. Returns the new position.
Skip a varint value. Returns the new position.
[ "Skip", "a", "varint", "value", ".", "Returns", "the", "new", "position", "." ]
def _SkipVarint(buffer, pos, end): """Skip a varint value. Returns the new position.""" # Previously ord(buffer[pos]) raised IndexError when pos is out of range. # With this code, ord(b'') raises TypeError. Both are handled in # python_message.py to generate a 'Truncated message' error. while ord(buffer[pos:pos+1]) & 0x80: pos += 1 pos += 1 if pos > end: raise _DecodeError('Truncated message.') return pos
[ "def", "_SkipVarint", "(", "buffer", ",", "pos", ",", "end", ")", ":", "# Previously ord(buffer[pos]) raised IndexError when pos is out of range.", "# With this code, ord(b'') raises TypeError. Both are handled in", "# python_message.py to generate a 'Truncated message' error.", "while", "ord", "(", "buffer", "[", "pos", ":", "pos", "+", "1", "]", ")", "&", "0x80", ":", "pos", "+=", "1", "pos", "+=", "1", "if", "pos", ">", "end", ":", "raise", "_DecodeError", "(", "'Truncated message.'", ")", "return", "pos" ]
https://github.com/bigartm/bigartm/blob/47e37f982de87aa67bfd475ff1f39da696b181b3/3rdparty/protobuf-3.0.0/python/google/protobuf/internal/decoder.py#L765-L775
facebook/watchman
0917460c71b000b96be9b9575d77f06f2f6053bb
build/fbcode_builder/getdeps/subcmd.py
python
SubCmd.run
(self, args)
return 0
perform the command
perform the command
[ "perform", "the", "command" ]
def run(self, args): """perform the command""" return 0
[ "def", "run", "(", "self", ",", "args", ")", ":", "return", "0" ]
https://github.com/facebook/watchman/blob/0917460c71b000b96be9b9575d77f06f2f6053bb/build/fbcode_builder/getdeps/subcmd.py#L11-L13
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/configparser.py
python
RawConfigParser.read
(self, filenames, encoding=None)
return read_ok
Read and parse a filename or an iterable of filenames. Files that cannot be opened are silently ignored; this is designed so that you can specify an iterable of potential configuration file locations (e.g. current directory, user's home directory, systemwide directory), and all existing configuration files in the iterable will be read. A single filename may also be given. Return list of successfully read files.
Read and parse a filename or an iterable of filenames.
[ "Read", "and", "parse", "a", "filename", "or", "an", "iterable", "of", "filenames", "." ]
def read(self, filenames, encoding=None): """Read and parse a filename or an iterable of filenames. Files that cannot be opened are silently ignored; this is designed so that you can specify an iterable of potential configuration file locations (e.g. current directory, user's home directory, systemwide directory), and all existing configuration files in the iterable will be read. A single filename may also be given. Return list of successfully read files. """ if isinstance(filenames, (str, bytes, os.PathLike)): filenames = [filenames] read_ok = [] for filename in filenames: try: with open(filename, encoding=encoding) as fp: self._read(fp, filename) except OSError: continue if isinstance(filename, os.PathLike): filename = os.fspath(filename) read_ok.append(filename) return read_ok
[ "def", "read", "(", "self", ",", "filenames", ",", "encoding", "=", "None", ")", ":", "if", "isinstance", "(", "filenames", ",", "(", "str", ",", "bytes", ",", "os", ".", "PathLike", ")", ")", ":", "filenames", "=", "[", "filenames", "]", "read_ok", "=", "[", "]", "for", "filename", "in", "filenames", ":", "try", ":", "with", "open", "(", "filename", ",", "encoding", "=", "encoding", ")", "as", "fp", ":", "self", ".", "_read", "(", "fp", ",", "filename", ")", "except", "OSError", ":", "continue", "if", "isinstance", "(", "filename", ",", "os", ".", "PathLike", ")", ":", "filename", "=", "os", ".", "fspath", "(", "filename", ")", "read_ok", ".", "append", "(", "filename", ")", "return", "read_ok" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/configparser.py#L678-L702
ceph/ceph
959663007321a369c83218414a29bd9dbc8bda3a
qa/tasks/cephfs/filesystem.py
python
Filesystem.data_scan
(self, args, quiet=False, worker_count=1)
Invoke cephfs-data-scan with the passed arguments, and return its stdout :param worker_count: if greater than 1, multiple workers will be run in parallel and the return value will be None
Invoke cephfs-data-scan with the passed arguments, and return its stdout
[ "Invoke", "cephfs", "-", "data", "-", "scan", "with", "the", "passed", "arguments", "and", "return", "its", "stdout" ]
def data_scan(self, args, quiet=False, worker_count=1): """ Invoke cephfs-data-scan with the passed arguments, and return its stdout :param worker_count: if greater than 1, multiple workers will be run in parallel and the return value will be None """ workers = [] for n in range(0, worker_count): if worker_count > 1: # data-scan args first token is a command, followed by args to it. # insert worker arguments after the command. cmd = args[0] worker_args = [cmd] + ["--worker_n", n.__str__(), "--worker_m", worker_count.__str__()] + args[1:] else: worker_args = args workers.append(Greenlet.spawn(lambda wargs=worker_args: self._run_tool("cephfs-data-scan", wargs, None, quiet))) for w in workers: w.get() if worker_count == 1: return workers[0].value else: return None
[ "def", "data_scan", "(", "self", ",", "args", ",", "quiet", "=", "False", ",", "worker_count", "=", "1", ")", ":", "workers", "=", "[", "]", "for", "n", "in", "range", "(", "0", ",", "worker_count", ")", ":", "if", "worker_count", ">", "1", ":", "# data-scan args first token is a command, followed by args to it.", "# insert worker arguments after the command.", "cmd", "=", "args", "[", "0", "]", "worker_args", "=", "[", "cmd", "]", "+", "[", "\"--worker_n\"", ",", "n", ".", "__str__", "(", ")", ",", "\"--worker_m\"", ",", "worker_count", ".", "__str__", "(", ")", "]", "+", "args", "[", "1", ":", "]", "else", ":", "worker_args", "=", "args", "workers", ".", "append", "(", "Greenlet", ".", "spawn", "(", "lambda", "wargs", "=", "worker_args", ":", "self", ".", "_run_tool", "(", "\"cephfs-data-scan\"", ",", "wargs", ",", "None", ",", "quiet", ")", ")", ")", "for", "w", "in", "workers", ":", "w", ".", "get", "(", ")", "if", "worker_count", "==", "1", ":", "return", "workers", "[", "0", "]", ".", "value", "else", ":", "return", "None" ]
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/qa/tasks/cephfs/filesystem.py#L1518-L1546
root-project/root
fcd3583bb14852bf2e8cd2415717cbaac0e75896
interpreter/llvm/src/examples/Kaleidoscope/MCJIT/cached/split-lib.py
python
TimingScriptGenerator.writeTimingCall
(self, irname, callname)
Echo some comments and invoke both versions of toy
Echo some comments and invoke both versions of toy
[ "Echo", "some", "comments", "and", "invoke", "both", "versions", "of", "toy" ]
def writeTimingCall(self, irname, callname): """Echo some comments and invoke both versions of toy""" rootname = irname if '.' in irname: rootname = irname[:irname.rfind('.')] self.shfile.write("echo \"%s: Calls %s\" >> %s\n" % (callname, irname, self.timeFile)) self.shfile.write("echo \"\" >> %s\n" % self.timeFile) self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile) self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"") self.shfile.write(" -o %s -a " % self.timeFile) self.shfile.write("./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname)) self.shfile.write("echo \"\" >> %s\n" % self.timeFile) self.shfile.write("echo \"With MCJIT again\" >> %s\n" % self.timeFile) self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"") self.shfile.write(" -o %s -a " % self.timeFile) self.shfile.write("./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname)) self.shfile.write("echo \"\" >> %s\n" % self.timeFile) self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile) self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"") self.shfile.write(" -o %s -a " % self.timeFile) self.shfile.write("./toy-jit -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname)) self.shfile.write("echo \"\" >> %s\n" % self.timeFile) self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
[ "def", "writeTimingCall", "(", "self", ",", "irname", ",", "callname", ")", ":", "rootname", "=", "irname", "if", "'.'", "in", "irname", ":", "rootname", "=", "irname", "[", ":", "irname", ".", "rfind", "(", "'.'", ")", "]", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"%s: Calls %s\\\" >> %s\\n\"", "%", "(", "callname", ",", "irname", ",", "self", ".", "timeFile", ")", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"With MCJIT\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"/usr/bin/time -f \\\"Command %C\\\\n\\\\tuser time: %U s\\\\n\\\\tsytem time: %S s\\\\n\\\\tmax set: %M kb\\\"\"", ")", "self", ".", "shfile", ".", "write", "(", "\" -o %s -a \"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\\n\"", "%", "(", "irname", ",", "callname", ",", "rootname", ",", "rootname", ")", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"With MCJIT again\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"/usr/bin/time -f \\\"Command %C\\\\n\\\\tuser time: %U s\\\\n\\\\tsytem time: %S s\\\\n\\\\tmax set: %M kb\\\"\"", ")", "self", ".", "shfile", ".", "write", "(", "\" -o %s -a \"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\\n\"", "%", "(", "irname", ",", "callname", ",", "rootname", ",", "rootname", ")", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"With JIT\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"/usr/bin/time -f \\\"Command %C\\\\n\\\\tuser time: %U s\\\\n\\\\tsytem time: %S s\\\\n\\\\tmax set: %M kb\\\"\"", ")", "self", ".", "shfile", ".", "write", "(", "\" -o %s -a \"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"./toy-jit -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\\n\"", "%", "(", "irname", ",", "callname", ",", "rootname", ",", "rootname", ")", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")", "self", ".", "shfile", ".", "write", "(", "\"echo \\\"\\\" >> %s\\n\"", "%", "self", ".", "timeFile", ")" ]
https://github.com/root-project/root/blob/fcd3583bb14852bf2e8cd2415717cbaac0e75896/interpreter/llvm/src/examples/Kaleidoscope/MCJIT/cached/split-lib.py#L12-L34
google/llvm-propeller
45c226984fe8377ebfb2ad7713c680d652ba678d
lldb/third_party/Python/module/pexpect-4.6/pexpect/replwrap.py
python
python
(command="python")
return REPLWrapper(command, u">>> ", u"import sys; sys.ps1={0!r}; sys.ps2={1!r}")
Start a Python shell and return a :class:`REPLWrapper` object.
Start a Python shell and return a :class:`REPLWrapper` object.
[ "Start", "a", "Python", "shell", "and", "return", "a", ":", "class", ":", "REPLWrapper", "object", "." ]
def python(command="python"): """Start a Python shell and return a :class:`REPLWrapper` object.""" return REPLWrapper(command, u">>> ", u"import sys; sys.ps1={0!r}; sys.ps2={1!r}")
[ "def", "python", "(", "command", "=", "\"python\"", ")", ":", "return", "REPLWrapper", "(", "command", ",", "u\">>> \"", ",", "u\"import sys; sys.ps1={0!r}; sys.ps2={1!r}\"", ")" ]
https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/lldb/third_party/Python/module/pexpect-4.6/pexpect/replwrap.py#L103-L105
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_core.py
python
Object.IsSameAs
(*args, **kwargs)
return _core_.Object_IsSameAs(*args, **kwargs)
IsSameAs(self, Object p) -> bool For wx.Objects that use C++ reference counting internally, this method can be used to determine if two objects are referencing the same data object.
IsSameAs(self, Object p) -> bool
[ "IsSameAs", "(", "self", "Object", "p", ")", "-", ">", "bool" ]
def IsSameAs(*args, **kwargs): """ IsSameAs(self, Object p) -> bool For wx.Objects that use C++ reference counting internally, this method can be used to determine if two objects are referencing the same data object. """ return _core_.Object_IsSameAs(*args, **kwargs)
[ "def", "IsSameAs", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "Object_IsSameAs", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L821-L829
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/concurrent/futures/_base.py
python
Future.add_done_callback
(self, fn)
Attaches a callable that will be called when the future finishes. Args: fn: A callable that will be called with this future as its only argument when the future completes or is cancelled. The callable will always be called by a thread in the same process in which it was added. If the future has already completed or been cancelled then the callable will be called immediately. These callables are called in the order that they were added.
Attaches a callable that will be called when the future finishes.
[ "Attaches", "a", "callable", "that", "will", "be", "called", "when", "the", "future", "finishes", "." ]
def add_done_callback(self, fn): """Attaches a callable that will be called when the future finishes. Args: fn: A callable that will be called with this future as its only argument when the future completes or is cancelled. The callable will always be called by a thread in the same process in which it was added. If the future has already completed or been cancelled then the callable will be called immediately. These callables are called in the order that they were added. """ with self._condition: if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: self._done_callbacks.append(fn) return try: fn(self) except Exception: LOGGER.exception('exception calling callback for %r', self)
[ "def", "add_done_callback", "(", "self", ",", "fn", ")", ":", "with", "self", ".", "_condition", ":", "if", "self", ".", "_state", "not", "in", "[", "CANCELLED", ",", "CANCELLED_AND_NOTIFIED", ",", "FINISHED", "]", ":", "self", ".", "_done_callbacks", ".", "append", "(", "fn", ")", "return", "try", ":", "fn", "(", "self", ")", "except", "Exception", ":", "LOGGER", ".", "exception", "(", "'exception calling callback for %r'", ",", "self", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/concurrent/futures/_base.py#L398-L416
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/groupby/groupby.py
python
GroupBy.expanding
(self, *args, **kwargs)
return ExpandingGroupby(self, *args, **kwargs)
Return an expanding grouper, providing expanding functionality per group.
Return an expanding grouper, providing expanding functionality per group.
[ "Return", "an", "expanding", "grouper", "providing", "expanding", "functionality", "per", "group", "." ]
def expanding(self, *args, **kwargs): """ Return an expanding grouper, providing expanding functionality per group. """ from pandas.core.window import ExpandingGroupby return ExpandingGroupby(self, *args, **kwargs)
[ "def", "expanding", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "pandas", ".", "core", ".", "window", "import", "ExpandingGroupby", "return", "ExpandingGroupby", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/groupby/groupby.py#L1449-L1455
google/earthenterprise
0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9
earth_enterprise/src/google/protobuf-py/mox.py
python
MultipleTimesGroup.IsSatisfied
(self)
return False
Return True if all methods in this group are called at least once.
Return True if all methods in this group are called at least once.
[ "Return", "True", "if", "all", "methods", "in", "this", "group", "are", "called", "at", "least", "once", "." ]
def IsSatisfied(self): """Return True if all methods in this group are called at least once.""" # NOTE(psycho): We can't use the simple set difference here because we want # to match different parameters which are considered the same e.g. IsA(str) # and some string. This solution is O(n^2) but n should be small. tmp = self._methods.copy() for called in self._methods_called: for expected in tmp: if called == expected: tmp.remove(expected) if not tmp: return True break return False
[ "def", "IsSatisfied", "(", "self", ")", ":", "# NOTE(psycho): We can't use the simple set difference here because we want", "# to match different parameters which are considered the same e.g. IsA(str)", "# and some string. This solution is O(n^2) but n should be small.", "tmp", "=", "self", ".", "_methods", ".", "copy", "(", ")", "for", "called", "in", "self", ".", "_methods_called", ":", "for", "expected", "in", "tmp", ":", "if", "called", "==", "expected", ":", "tmp", ".", "remove", "(", "expected", ")", "if", "not", "tmp", ":", "return", "True", "break", "return", "False" ]
https://github.com/google/earthenterprise/blob/0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9/earth_enterprise/src/google/protobuf-py/mox.py#L1318-L1331
natanielruiz/android-yolo
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
jni-build/jni/include/external/bazel_tools/third_party/py/concurrent/futures/_base.py
python
Future.set_running_or_notify_cancel
(self)
Mark the future as running or process any cancel notifications. Should only be used by Executor implementations and unit tests. If the future has been cancelled (cancel() was called and returned True) then any threads waiting on the future completing (though calls to as_completed() or wait()) are notified and False is returned. If the future was not cancelled then it is put in the running state (future calls to running() will return True) and True is returned. This method should be called by Executor implementations before executing the work associated with this future. If this method returns False then the work should not be executed. Returns: False if the Future was cancelled, True otherwise. Raises: RuntimeError: if this method was already called or if set_result() or set_exception() was called.
Mark the future as running or process any cancel notifications.
[ "Mark", "the", "future", "as", "running", "or", "process", "any", "cancel", "notifications", "." ]
def set_running_or_notify_cancel(self): """Mark the future as running or process any cancel notifications. Should only be used by Executor implementations and unit tests. If the future has been cancelled (cancel() was called and returned True) then any threads waiting on the future completing (though calls to as_completed() or wait()) are notified and False is returned. If the future was not cancelled then it is put in the running state (future calls to running() will return True) and True is returned. This method should be called by Executor implementations before executing the work associated with this future. If this method returns False then the work should not be executed. Returns: False if the Future was cancelled, True otherwise. Raises: RuntimeError: if this method was already called or if set_result() or set_exception() was called. """ with self._condition: if self._state == CANCELLED: self._state = CANCELLED_AND_NOTIFIED for waiter in self._waiters: waiter.add_cancelled(self) # self._condition.notify_all() is not necessary because # self.cancel() triggers a notification. return False elif self._state == PENDING: self._state = RUNNING return True else: LOGGER.critical('Future %s in unexpected state: %s', id(self.future), self.future._state) raise RuntimeError('Future in unexpected state')
[ "def", "set_running_or_notify_cancel", "(", "self", ")", ":", "with", "self", ".", "_condition", ":", "if", "self", ".", "_state", "==", "CANCELLED", ":", "self", ".", "_state", "=", "CANCELLED_AND_NOTIFIED", "for", "waiter", "in", "self", ".", "_waiters", ":", "waiter", ".", "add_cancelled", "(", "self", ")", "# self._condition.notify_all() is not necessary because", "# self.cancel() triggers a notification.", "return", "False", "elif", "self", ".", "_state", "==", "PENDING", ":", "self", ".", "_state", "=", "RUNNING", "return", "True", "else", ":", "LOGGER", ".", "critical", "(", "'Future %s in unexpected state: %s'", ",", "id", "(", "self", ".", "future", ")", ",", "self", ".", "future", ".", "_state", ")", "raise", "RuntimeError", "(", "'Future in unexpected state'", ")" ]
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/external/bazel_tools/third_party/py/concurrent/futures/_base.py#L443-L481
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/html.py
python
HtmlCell.SetNext
(*args, **kwargs)
return _html.HtmlCell_SetNext(*args, **kwargs)
SetNext(self, HtmlCell cell)
SetNext(self, HtmlCell cell)
[ "SetNext", "(", "self", "HtmlCell", "cell", ")" ]
def SetNext(*args, **kwargs): """SetNext(self, HtmlCell cell)""" return _html.HtmlCell_SetNext(*args, **kwargs)
[ "def", "SetNext", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_html", ".", "HtmlCell_SetNext", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/html.py#L670-L672
apple/swift-lldb
d74be846ef3e62de946df343e8c234bde93a8912
scripts/Python/static-binding/lldb.py
python
SBDeclaration.GetDescription
(self, description)
return _lldb.SBDeclaration_GetDescription(self, description)
GetDescription(SBDeclaration self, SBStream description) -> bool
GetDescription(SBDeclaration self, SBStream description) -> bool
[ "GetDescription", "(", "SBDeclaration", "self", "SBStream", "description", ")", "-", ">", "bool" ]
def GetDescription(self, description): """GetDescription(SBDeclaration self, SBStream description) -> bool""" return _lldb.SBDeclaration_GetDescription(self, description)
[ "def", "GetDescription", "(", "self", ",", "description", ")", ":", "return", "_lldb", ".", "SBDeclaration_GetDescription", "(", "self", ",", "description", ")" ]
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L4454-L4456
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/PolDiffILLReduction.py
python
PolDiffILLReduction._load_and_prepare_data
(self, measurement_technique, process, progress)
return ws, progress
Loads the data, sets the instrument, and runs function to check the measurement method. In the case of a single crystal measurement, it also merges the omega scan data into one workspace per polarisation orientation.
Loads the data, sets the instrument, and runs function to check the measurement method. In the case of a single crystal measurement, it also merges the omega scan data into one workspace per polarisation orientation.
[ "Loads", "the", "data", "sets", "the", "instrument", "and", "runs", "function", "to", "check", "the", "measurement", "method", ".", "In", "the", "case", "of", "a", "single", "crystal", "measurement", "it", "also", "merges", "the", "omega", "scan", "data", "into", "one", "workspace", "per", "polarisation", "orientation", "." ]
def _load_and_prepare_data(self, measurement_technique, process, progress): """Loads the data, sets the instrument, and runs function to check the measurement method. In the case of a single crystal measurement, it also merges the omega scan data into one workspace per polarisation orientation.""" ws = '__' + self.getPropertyValue('OutputWorkspace') calibration_setting = 'YIGFile' if self.getProperty('InstrumentCalibration').isDefault: calibration_setting = 'None' progress.report(0, 'Loading data') LoadAndMerge(Filename=self.getPropertyValue('Run'), LoaderName='LoadILLPolarizedDiffraction', LoaderOptions={'PositionCalibration': calibration_setting, 'YIGFileName': self.getPropertyValue('InstrumentCalibration')}, OutputWorkspace=ws, startProgress=0.0, endProgress=0.6) if self.getPropertyValue("OutputTreatment") not in ['AverageTwoTheta', 'IndividualXY'] \ and measurement_technique != 'SingleCrystal' and process != 'Transmission': self._merge_twoTheta_scans(ws) masked_detectors = self.getProperty('MaskDetectors').value if len(masked_detectors) > 0: MaskDetectors(Workspace=ws, SpectraList=masked_detectors) self._instrument = mtd[ws][0].getInstrument().getName() self._figure_out_measurement_method(ws) if measurement_technique == 'SingleCrystal': progress.report(7, 'Merging omega scan') input_ws = self._merge_omega_scan(ws, self._data_structure_helper(), ws+'_conjoined') DeleteWorkspace(Workspace=ws) RenameWorkspace(InputWorkspace=input_ws, OutputWorkspace=ws) elif measurement_technique == 'TOF': if not self.getProperty('MaxTOFChannel').isDefault: max_TOF_channel = self.getProperty('MaxTOFChannel').value dataX = mtd[ws][0].readX(0) if len(dataX) > max_TOF_channel: lowerLimit = dataX[max_TOF_channel] upperLimit = dataX[-1] RemoveBins(InputWorkspace=ws, OutputWorkspace=ws, XMin=lowerLimit, XMax=upperLimit) if process in ['Vanadium', 'Sample']: self._read_experiment_properties(ws) return ws, progress
[ "def", "_load_and_prepare_data", "(", "self", ",", "measurement_technique", ",", "process", ",", "progress", ")", ":", "ws", "=", "'__'", "+", "self", ".", "getPropertyValue", "(", "'OutputWorkspace'", ")", "calibration_setting", "=", "'YIGFile'", "if", "self", ".", "getProperty", "(", "'InstrumentCalibration'", ")", ".", "isDefault", ":", "calibration_setting", "=", "'None'", "progress", ".", "report", "(", "0", ",", "'Loading data'", ")", "LoadAndMerge", "(", "Filename", "=", "self", ".", "getPropertyValue", "(", "'Run'", ")", ",", "LoaderName", "=", "'LoadILLPolarizedDiffraction'", ",", "LoaderOptions", "=", "{", "'PositionCalibration'", ":", "calibration_setting", ",", "'YIGFileName'", ":", "self", ".", "getPropertyValue", "(", "'InstrumentCalibration'", ")", "}", ",", "OutputWorkspace", "=", "ws", ",", "startProgress", "=", "0.0", ",", "endProgress", "=", "0.6", ")", "if", "self", ".", "getPropertyValue", "(", "\"OutputTreatment\"", ")", "not", "in", "[", "'AverageTwoTheta'", ",", "'IndividualXY'", "]", "and", "measurement_technique", "!=", "'SingleCrystal'", "and", "process", "!=", "'Transmission'", ":", "self", ".", "_merge_twoTheta_scans", "(", "ws", ")", "masked_detectors", "=", "self", ".", "getProperty", "(", "'MaskDetectors'", ")", ".", "value", "if", "len", "(", "masked_detectors", ")", ">", "0", ":", "MaskDetectors", "(", "Workspace", "=", "ws", ",", "SpectraList", "=", "masked_detectors", ")", "self", ".", "_instrument", "=", "mtd", "[", "ws", "]", "[", "0", "]", ".", "getInstrument", "(", ")", ".", "getName", "(", ")", "self", ".", "_figure_out_measurement_method", "(", "ws", ")", "if", "measurement_technique", "==", "'SingleCrystal'", ":", "progress", ".", "report", "(", "7", ",", "'Merging omega scan'", ")", "input_ws", "=", "self", ".", "_merge_omega_scan", "(", "ws", ",", "self", ".", "_data_structure_helper", "(", ")", ",", "ws", "+", "'_conjoined'", ")", "DeleteWorkspace", "(", "Workspace", "=", "ws", ")", "RenameWorkspace", "(", "InputWorkspace", "=", "input_ws", ",", "OutputWorkspace", "=", "ws", ")", "elif", "measurement_technique", "==", "'TOF'", ":", "if", "not", "self", ".", "getProperty", "(", "'MaxTOFChannel'", ")", ".", "isDefault", ":", "max_TOF_channel", "=", "self", ".", "getProperty", "(", "'MaxTOFChannel'", ")", ".", "value", "dataX", "=", "mtd", "[", "ws", "]", "[", "0", "]", ".", "readX", "(", "0", ")", "if", "len", "(", "dataX", ")", ">", "max_TOF_channel", ":", "lowerLimit", "=", "dataX", "[", "max_TOF_channel", "]", "upperLimit", "=", "dataX", "[", "-", "1", "]", "RemoveBins", "(", "InputWorkspace", "=", "ws", ",", "OutputWorkspace", "=", "ws", ",", "XMin", "=", "lowerLimit", ",", "XMax", "=", "upperLimit", ")", "if", "process", "in", "[", "'Vanadium'", ",", "'Sample'", "]", ":", "self", ".", "_read_experiment_properties", "(", "ws", ")", "return", "ws", ",", "progress" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/PolDiffILLReduction.py#L526-L564
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/fastparquet/api.py
python
statistics
(obj)
Return per-column statistics for a ParquerFile Parameters ---------- obj: ParquetFile Returns ------- dictionary mapping stats (min, max, distinct_count, null_count) to column names to lists of values. ``None``s used if no statistics found. Examples -------- >>> statistics(my_parquet_file) {'min': {'x': [1, 4], 'y': [5, 3]}, 'max': {'x': [2, 6], 'y': [8, 6]}, 'distinct_count': {'x': [None, None], 'y': [None, None]}, 'null_count': {'x': [0, 3], 'y': [0, 0]}}
Return per-column statistics for a ParquerFile
[ "Return", "per", "-", "column", "statistics", "for", "a", "ParquerFile" ]
def statistics(obj): """ Return per-column statistics for a ParquerFile Parameters ---------- obj: ParquetFile Returns ------- dictionary mapping stats (min, max, distinct_count, null_count) to column names to lists of values. ``None``s used if no statistics found. Examples -------- >>> statistics(my_parquet_file) {'min': {'x': [1, 4], 'y': [5, 3]}, 'max': {'x': [2, 6], 'y': [8, 6]}, 'distinct_count': {'x': [None, None], 'y': [None, None]}, 'null_count': {'x': [0, 3], 'y': [0, 0]}} """ if isinstance(obj, parquet_thrift.ColumnChunk): md = obj.meta_data s = obj.meta_data.statistics rv = {} if not s: return rv if s.max is not None: try: if md.type == parquet_thrift.Type.BYTE_ARRAY: rv['max'] = ensure_bytes(s.max) else: rv['max'] = encoding.read_plain(ensure_bytes(s.max), md.type, 1)[0] except: rv['max'] = None if s.min is not None: try: if md.type == parquet_thrift.Type.BYTE_ARRAY: rv['min'] = ensure_bytes(s.min) else: rv['min'] = encoding.read_plain(ensure_bytes(s.min), md.type, 1)[0] except: rv['min'] = None if s.null_count is not None: rv['null_count'] = s.null_count if s.distinct_count is not None: rv['distinct_count'] = s.distinct_count return rv if isinstance(obj, parquet_thrift.RowGroup): return {'.'.join(c.meta_data.path_in_schema): statistics(c) for c in obj.columns} if isinstance(obj, ParquetFile): L = list(map(statistics, obj.row_groups)) d = {n: {col: [item.get(col, {}).get(n, None) for item in L] for col in obj.columns} for n in ['min', 'max', 'null_count', 'distinct_count']} if not L: return d schema = obj.schema for col in obj.row_groups[0].columns: column = '.'.join(col.meta_data.path_in_schema) se = schema.schema_element(col.meta_data.path_in_schema) if (se.converted_type is not None or se.type == parquet_thrift.Type.INT96): dtype = 'S12' if se.type == parquet_thrift.Type.INT96 else None for name in ['min', 'max']: try: d[name][column] = ( [None] if d[name][column] is None or None in d[name][column] else list(converted_types.convert( np.array(d[name][column], dtype), se)) ) except (KeyError, ValueError): # catch no stat and bad conversions d[name][column] = [None] return d
[ "def", "statistics", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "parquet_thrift", ".", "ColumnChunk", ")", ":", "md", "=", "obj", ".", "meta_data", "s", "=", "obj", ".", "meta_data", ".", "statistics", "rv", "=", "{", "}", "if", "not", "s", ":", "return", "rv", "if", "s", ".", "max", "is", "not", "None", ":", "try", ":", "if", "md", ".", "type", "==", "parquet_thrift", ".", "Type", ".", "BYTE_ARRAY", ":", "rv", "[", "'max'", "]", "=", "ensure_bytes", "(", "s", ".", "max", ")", "else", ":", "rv", "[", "'max'", "]", "=", "encoding", ".", "read_plain", "(", "ensure_bytes", "(", "s", ".", "max", ")", ",", "md", ".", "type", ",", "1", ")", "[", "0", "]", "except", ":", "rv", "[", "'max'", "]", "=", "None", "if", "s", ".", "min", "is", "not", "None", ":", "try", ":", "if", "md", ".", "type", "==", "parquet_thrift", ".", "Type", ".", "BYTE_ARRAY", ":", "rv", "[", "'min'", "]", "=", "ensure_bytes", "(", "s", ".", "min", ")", "else", ":", "rv", "[", "'min'", "]", "=", "encoding", ".", "read_plain", "(", "ensure_bytes", "(", "s", ".", "min", ")", ",", "md", ".", "type", ",", "1", ")", "[", "0", "]", "except", ":", "rv", "[", "'min'", "]", "=", "None", "if", "s", ".", "null_count", "is", "not", "None", ":", "rv", "[", "'null_count'", "]", "=", "s", ".", "null_count", "if", "s", ".", "distinct_count", "is", "not", "None", ":", "rv", "[", "'distinct_count'", "]", "=", "s", ".", "distinct_count", "return", "rv", "if", "isinstance", "(", "obj", ",", "parquet_thrift", ".", "RowGroup", ")", ":", "return", "{", "'.'", ".", "join", "(", "c", ".", "meta_data", ".", "path_in_schema", ")", ":", "statistics", "(", "c", ")", "for", "c", "in", "obj", ".", "columns", "}", "if", "isinstance", "(", "obj", ",", "ParquetFile", ")", ":", "L", "=", "list", "(", "map", "(", "statistics", ",", "obj", ".", "row_groups", ")", ")", "d", "=", "{", "n", ":", "{", "col", ":", "[", "item", ".", "get", "(", "col", ",", "{", "}", ")", ".", "get", "(", "n", ",", "None", ")", "for", "item", "in", "L", "]", "for", "col", "in", "obj", ".", "columns", "}", "for", "n", "in", "[", "'min'", ",", "'max'", ",", "'null_count'", ",", "'distinct_count'", "]", "}", "if", "not", "L", ":", "return", "d", "schema", "=", "obj", ".", "schema", "for", "col", "in", "obj", ".", "row_groups", "[", "0", "]", ".", "columns", ":", "column", "=", "'.'", ".", "join", "(", "col", ".", "meta_data", ".", "path_in_schema", ")", "se", "=", "schema", ".", "schema_element", "(", "col", ".", "meta_data", ".", "path_in_schema", ")", "if", "(", "se", ".", "converted_type", "is", "not", "None", "or", "se", ".", "type", "==", "parquet_thrift", ".", "Type", ".", "INT96", ")", ":", "dtype", "=", "'S12'", "if", "se", ".", "type", "==", "parquet_thrift", ".", "Type", ".", "INT96", "else", "None", "for", "name", "in", "[", "'min'", ",", "'max'", "]", ":", "try", ":", "d", "[", "name", "]", "[", "column", "]", "=", "(", "[", "None", "]", "if", "d", "[", "name", "]", "[", "column", "]", "is", "None", "or", "None", "in", "d", "[", "name", "]", "[", "column", "]", "else", "list", "(", "converted_types", ".", "convert", "(", "np", ".", "array", "(", "d", "[", "name", "]", "[", "column", "]", ",", "dtype", ")", ",", "se", ")", ")", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "# catch no stat and bad conversions", "d", "[", "name", "]", "[", "column", "]", "=", "[", "None", "]", "return", "d" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/fastparquet/api.py#L675-L755
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/nn/probability/distribution/gumbel.py
python
Gumbel._log_prob
(self, value)
return -(z + self.exp(-z)) - self.log(self.scale)
r""" .. math:: log_pdf(X) = -(z + \exp(-z)) - \log(scale) where z = \frac{x - loc}{scale}
r""" .. math:: log_pdf(X) = -(z + \exp(-z)) - \log(scale) where z = \frac{x - loc}{scale}
[ "r", "..", "math", "::", "log_pdf", "(", "X", ")", "=", "-", "(", "z", "+", "\\", "exp", "(", "-", "z", "))", "-", "\\", "log", "(", "scale", ")", "where", "z", "=", "\\", "frac", "{", "x", "-", "loc", "}", "{", "scale", "}" ]
def _log_prob(self, value): r""" .. math:: log_pdf(X) = -(z + \exp(-z)) - \log(scale) where z = \frac{x - loc}{scale} """ value = self._check_value(value, 'value') value = self.cast(value, self.dtype) z = (value - self.loc) / self.scale return -(z + self.exp(-z)) - self.log(self.scale)
[ "def", "_log_prob", "(", "self", ",", "value", ")", ":", "value", "=", "self", ".", "_check_value", "(", "value", ",", "'value'", ")", "value", "=", "self", ".", "cast", "(", "value", ",", "self", ".", "dtype", ")", "z", "=", "(", "value", "-", "self", ".", "loc", ")", "/", "self", ".", "scale", "return", "-", "(", "z", "+", "self", ".", "exp", "(", "-", "z", ")", ")", "-", "self", ".", "log", "(", "self", ".", "scale", ")" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/nn/probability/distribution/gumbel.py#L199-L208
psi4/psi4
be533f7f426b6ccc263904e55122899b16663395
psi4/driver/qcdb/dbwrap.py
python
oxcom
(lst)
Returns gramatical comma separated string of *lst*.
Returns gramatical comma separated string of *lst*.
[ "Returns", "gramatical", "comma", "separated", "string", "of", "*", "lst", "*", "." ]
def oxcom(lst): """Returns gramatical comma separated string of *lst*.""" lst = [str(l) for l in lst] if not lst: return '' elif len(lst) == 1: return lst[0] elif len(lst) == 2: return ' and '.join(lst) else: return ', and '.join([', '.join(lst[:-1]), lst[-1]])
[ "def", "oxcom", "(", "lst", ")", ":", "lst", "=", "[", "str", "(", "l", ")", "for", "l", "in", "lst", "]", "if", "not", "lst", ":", "return", "''", "elif", "len", "(", "lst", ")", "==", "1", ":", "return", "lst", "[", "0", "]", "elif", "len", "(", "lst", ")", "==", "2", ":", "return", "' and '", ".", "join", "(", "lst", ")", "else", ":", "return", "', and '", ".", "join", "(", "[", "', '", ".", "join", "(", "lst", "[", ":", "-", "1", "]", ")", ",", "lst", "[", "-", "1", "]", "]", ")" ]
https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/qcdb/dbwrap.py#L218-L229
apache/parquet-cpp
642da055adf009652689b20e68a198cffb857651
build-support/cpplint.py
python
NestingState.UpdatePreprocessor
(self, line)
Update preprocessor stack. We need to handle preprocessors due to classes like this: #ifdef SWIG struct ResultDetailsPageElementExtensionPoint { #else struct ResultDetailsPageElementExtensionPoint : public Extension { #endif We make the following assumptions (good enough for most files): - Preprocessor condition evaluates to true from #if up to first #else/#elif/#endif. - Preprocessor condition evaluates to false from #else/#elif up to #endif. We still perform lint checks on these lines, but these do not affect nesting stack. Args: line: current line to check.
Update preprocessor stack.
[ "Update", "preprocessor", "stack", "." ]
def UpdatePreprocessor(self, line): """Update preprocessor stack. We need to handle preprocessors due to classes like this: #ifdef SWIG struct ResultDetailsPageElementExtensionPoint { #else struct ResultDetailsPageElementExtensionPoint : public Extension { #endif We make the following assumptions (good enough for most files): - Preprocessor condition evaluates to true from #if up to first #else/#elif/#endif. - Preprocessor condition evaluates to false from #else/#elif up to #endif. We still perform lint checks on these lines, but these do not affect nesting stack. Args: line: current line to check. """ if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): # Beginning of #if block, save the nesting stack here. The saved # stack will allow us to restore the parsing state in the #else case. self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) elif Match(r'^\s*#\s*(else|elif)\b', line): # Beginning of #else block if self.pp_stack: if not self.pp_stack[-1].seen_else: # This is the first #else or #elif block. Remember the # whole nesting stack up to this point. This is what we # keep after the #endif. self.pp_stack[-1].seen_else = True self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) # Restore the stack to how it was before the #if self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) else: # TODO(unknown): unexpected #else, issue warning? pass elif Match(r'^\s*#\s*endif\b', line): # End of #if or #else blocks. if self.pp_stack: # If we saw an #else, we will need to restore the nesting # stack to its former state before the #else, otherwise we # will just continue from where we left off. if self.pp_stack[-1].seen_else: # Here we can just use a shallow copy since we are the last # reference to it. self.stack = self.pp_stack[-1].stack_before_else # Drop the corresponding #if self.pp_stack.pop() else: # TODO(unknown): unexpected #endif, issue warning? pass
[ "def", "UpdatePreprocessor", "(", "self", ",", "line", ")", ":", "if", "Match", "(", "r'^\\s*#\\s*(if|ifdef|ifndef)\\b'", ",", "line", ")", ":", "# Beginning of #if block, save the nesting stack here. The saved", "# stack will allow us to restore the parsing state in the #else case.", "self", ".", "pp_stack", ".", "append", "(", "_PreprocessorInfo", "(", "copy", ".", "deepcopy", "(", "self", ".", "stack", ")", ")", ")", "elif", "Match", "(", "r'^\\s*#\\s*(else|elif)\\b'", ",", "line", ")", ":", "# Beginning of #else block", "if", "self", ".", "pp_stack", ":", "if", "not", "self", ".", "pp_stack", "[", "-", "1", "]", ".", "seen_else", ":", "# This is the first #else or #elif block. Remember the", "# whole nesting stack up to this point. This is what we", "# keep after the #endif.", "self", ".", "pp_stack", "[", "-", "1", "]", ".", "seen_else", "=", "True", "self", ".", "pp_stack", "[", "-", "1", "]", ".", "stack_before_else", "=", "copy", ".", "deepcopy", "(", "self", ".", "stack", ")", "# Restore the stack to how it was before the #if", "self", ".", "stack", "=", "copy", ".", "deepcopy", "(", "self", ".", "pp_stack", "[", "-", "1", "]", ".", "stack_before_if", ")", "else", ":", "# TODO(unknown): unexpected #else, issue warning?", "pass", "elif", "Match", "(", "r'^\\s*#\\s*endif\\b'", ",", "line", ")", ":", "# End of #if or #else blocks.", "if", "self", ".", "pp_stack", ":", "# If we saw an #else, we will need to restore the nesting", "# stack to its former state before the #else, otherwise we", "# will just continue from where we left off.", "if", "self", ".", "pp_stack", "[", "-", "1", "]", ".", "seen_else", ":", "# Here we can just use a shallow copy since we are the last", "# reference to it.", "self", ".", "stack", "=", "self", ".", "pp_stack", "[", "-", "1", "]", ".", "stack_before_else", "# Drop the corresponding #if", "self", ".", "pp_stack", ".", "pop", "(", ")", "else", ":", "# TODO(unknown): unexpected #endif, issue warning?", "pass" ]
https://github.com/apache/parquet-cpp/blob/642da055adf009652689b20e68a198cffb857651/build-support/cpplint.py#L2318-L2372
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/contrib/tensor_forest/data/data_ops.py
python
ParseLabelTensorOrDict
(labels)
Return a tensor to use for input labels to tensor_forest. The incoming targets can be a dict where keys are the string names of the columns, which we turn into a single 1-D tensor for classification or 2-D tensor for regression. Converts sparse tensors to dense ones. Args: labels: `Tensor` or `dict` of `Tensor` objects. Returns: A 2-D tensor for labels/outputs.
Return a tensor to use for input labels to tensor_forest.
[ "Return", "a", "tensor", "to", "use", "for", "input", "labels", "to", "tensor_forest", "." ]
def ParseLabelTensorOrDict(labels): """Return a tensor to use for input labels to tensor_forest. The incoming targets can be a dict where keys are the string names of the columns, which we turn into a single 1-D tensor for classification or 2-D tensor for regression. Converts sparse tensors to dense ones. Args: labels: `Tensor` or `dict` of `Tensor` objects. Returns: A 2-D tensor for labels/outputs. """ if isinstance(labels, dict): return math_ops.to_float(array_ops.concat( 1, [sparse_ops.sparse_tensor_to_dense(labels[ k], default_value=-1) if isinstance(labels, ops.SparseTensor) else labels[k] for k in sorted(labels.keys())])) else: if isinstance(labels, ops.SparseTensor): return math_ops.to_float(sparse_ops.sparse_tensor_to_dense( labels, default_value=-1)) else: return math_ops.to_float(labels)
[ "def", "ParseLabelTensorOrDict", "(", "labels", ")", ":", "if", "isinstance", "(", "labels", ",", "dict", ")", ":", "return", "math_ops", ".", "to_float", "(", "array_ops", ".", "concat", "(", "1", ",", "[", "sparse_ops", ".", "sparse_tensor_to_dense", "(", "labels", "[", "k", "]", ",", "default_value", "=", "-", "1", ")", "if", "isinstance", "(", "labels", ",", "ops", ".", "SparseTensor", ")", "else", "labels", "[", "k", "]", "for", "k", "in", "sorted", "(", "labels", ".", "keys", "(", ")", ")", "]", ")", ")", "else", ":", "if", "isinstance", "(", "labels", ",", "ops", ".", "SparseTensor", ")", ":", "return", "math_ops", ".", "to_float", "(", "sparse_ops", ".", "sparse_tensor_to_dense", "(", "labels", ",", "default_value", "=", "-", "1", ")", ")", "else", ":", "return", "math_ops", ".", "to_float", "(", "labels", ")" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/contrib/tensor_forest/data/data_ops.py#L194-L219
fatih/subvim
241b6d170597857105da219c9b7d36059e9f11fb
vim/base/YouCompleteMe/third_party/requests/requests/cookies.py
python
RequestsCookieJar.values
(self)
return values
Dict-like values() that returns a list of values of cookies from the jar. See keys() and items().
Dict-like values() that returns a list of values of cookies from the jar. See keys() and items().
[ "Dict", "-", "like", "values", "()", "that", "returns", "a", "list", "of", "values", "of", "cookies", "from", "the", "jar", ".", "See", "keys", "()", "and", "items", "()", "." ]
def values(self): """Dict-like values() that returns a list of values of cookies from the jar. See keys() and items().""" values = [] for cookie in iter(self): values.append(cookie.value) return values
[ "def", "values", "(", "self", ")", ":", "values", "=", "[", "]", "for", "cookie", "in", "iter", "(", "self", ")", ":", "values", ".", "append", "(", "cookie", ".", "value", ")", "return", "values" ]
https://github.com/fatih/subvim/blob/241b6d170597857105da219c9b7d36059e9f11fb/vim/base/YouCompleteMe/third_party/requests/requests/cookies.py#L198-L204
cybermaggedon/cyberprobe
f826dbc35ad3a79019cb871c0bc3fb1236130b3e
indicators/cyberprobe/fsm.py
python
FsmState.is_fail
(self)
return self.state == 'fail'
True if FSM state has reached 'fail'
True if FSM state has reached 'fail'
[ "True", "if", "FSM", "state", "has", "reached", "fail" ]
def is_fail(self): """ True if FSM state has reached 'fail' """ return self.state == 'fail'
[ "def", "is_fail", "(", "self", ")", ":", "return", "self", ".", "state", "==", "'fail'" ]
https://github.com/cybermaggedon/cyberprobe/blob/f826dbc35ad3a79019cb871c0bc3fb1236130b3e/indicators/cyberprobe/fsm.py#L138-L142
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/media/webrtc/trunk/tools/gyp/pylib/gyp/generator/ninja.py
python
NinjaWriter.WriteWinIdlFiles
(self, spec, prebuild)
return outputs
Writes rules to match MSVS's implicit idl handling.
Writes rules to match MSVS's implicit idl handling.
[ "Writes", "rules", "to", "match", "MSVS", "s", "implicit", "idl", "handling", "." ]
def WriteWinIdlFiles(self, spec, prebuild): """Writes rules to match MSVS's implicit idl handling.""" assert self.flavor == 'win' if self.msvs_settings.HasExplicitIdlRules(spec): return [] outputs = [] for source in filter(lambda x: x.endswith('.idl'), spec['sources']): self._WinIdlRule(source, prebuild, outputs) return outputs
[ "def", "WriteWinIdlFiles", "(", "self", ",", "spec", ",", "prebuild", ")", ":", "assert", "self", ".", "flavor", "==", "'win'", "if", "self", ".", "msvs_settings", ".", "HasExplicitIdlRules", "(", "spec", ")", ":", "return", "[", "]", "outputs", "=", "[", "]", "for", "source", "in", "filter", "(", "lambda", "x", ":", "x", ".", "endswith", "(", "'.idl'", ")", ",", "spec", "[", "'sources'", "]", ")", ":", "self", ".", "_WinIdlRule", "(", "source", ",", "prebuild", ",", "outputs", ")", "return", "outputs" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/media/webrtc/trunk/tools/gyp/pylib/gyp/generator/ninja.py#L486-L494
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/AWSPythonSDK/1.5.8/docutils/parsers/rst/states.py
python
Body.line_block
(self, match, context, next_state)
return [], next_state, []
First line of a line block.
First line of a line block.
[ "First", "line", "of", "a", "line", "block", "." ]
def line_block(self, match, context, next_state): """First line of a line block.""" block = nodes.line_block() self.parent += block lineno = self.state_machine.abs_line_number() line, messages, blank_finish = self.line_block_line(match, lineno) block += line self.parent += messages if not blank_finish: offset = self.state_machine.line_offset + 1 # next line new_line_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=block, initial_state='LineBlock', blank_finish=0) self.goto_line(new_line_offset) if not blank_finish: self.parent += self.reporter.warning( 'Line block ends without a blank line.', line=lineno+1) if len(block): if block[0].indent is None: block[0].indent = 0 self.nest_line_block_lines(block) return [], next_state, []
[ "def", "line_block", "(", "self", ",", "match", ",", "context", ",", "next_state", ")", ":", "block", "=", "nodes", ".", "line_block", "(", ")", "self", ".", "parent", "+=", "block", "lineno", "=", "self", ".", "state_machine", ".", "abs_line_number", "(", ")", "line", ",", "messages", ",", "blank_finish", "=", "self", ".", "line_block_line", "(", "match", ",", "lineno", ")", "block", "+=", "line", "self", ".", "parent", "+=", "messages", "if", "not", "blank_finish", ":", "offset", "=", "self", ".", "state_machine", ".", "line_offset", "+", "1", "# next line", "new_line_offset", ",", "blank_finish", "=", "self", ".", "nested_list_parse", "(", "self", ".", "state_machine", ".", "input_lines", "[", "offset", ":", "]", ",", "input_offset", "=", "self", ".", "state_machine", ".", "abs_line_offset", "(", ")", "+", "1", ",", "node", "=", "block", ",", "initial_state", "=", "'LineBlock'", ",", "blank_finish", "=", "0", ")", "self", ".", "goto_line", "(", "new_line_offset", ")", "if", "not", "blank_finish", ":", "self", ".", "parent", "+=", "self", ".", "reporter", ".", "warning", "(", "'Line block ends without a blank line.'", ",", "line", "=", "lineno", "+", "1", ")", "if", "len", "(", "block", ")", ":", "if", "block", "[", "0", "]", ".", "indent", "is", "None", ":", "block", "[", "0", "]", ".", "indent", "=", "0", "self", ".", "nest_line_block_lines", "(", "block", ")", "return", "[", "]", ",", "next_state", ",", "[", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/docutils/parsers/rst/states.py#L1574-L1598
gnuradio/gnuradio
09c3c4fa4bfb1a02caac74cb5334dfe065391e3b
gr-digital/python/digital/qa_ofdm_chanest_vcvc.py
python
qa_ofdm_chanest_vcvc.test_999_all_at_once
(self)
docstring for test_999_all_at_once
docstring for test_999_all_at_once
[ "docstring", "for", "test_999_all_at_once" ]
def test_999_all_at_once(self): """docstring for test_999_all_at_once""" fft_len = 32 # 6 carriers empty, 10 carriers full, 1 DC carrier, 10 carriers full, 5 # carriers empty syncsym_mask = ( 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0) carrier_mask = ( 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0) max_offset = 4 wgn_amplitude = 0.05 min_chan_ampl = 0.1 max_chan_ampl = 5 n_iter = 20 # The more the accurater def run_flow_graph(sync_sym1, sync_sym2, data_sym): top_block = gr.top_block() carr_offset = random.randint(-max_offset / 2, max_offset / 2) * 2 tx_data = shift_tuple(sync_sym1, carr_offset) + \ shift_tuple(sync_sym2, carr_offset) + \ shift_tuple(data_sym, carr_offset) channel = [ rand_range(min_chan_ampl, max_chan_ampl) * numpy.exp(1j * rand_range(0, 2 * numpy.pi)) for x in range(fft_len)] src = blocks.vector_source_c(tx_data, False, fft_len) chan = blocks.multiply_const_vcc(channel) noise = blocks.vector_source_c(numpy.random.normal( 0, wgn_amplitude, (len(tx_data),)), False, fft_len) add = blocks.add_cc(fft_len) chanest = digital.ofdm_chanest_vcvc(sync_sym1, sync_sym2, 1) sink = blocks.vector_sink_c(fft_len) top_block.connect(src, chan, (add, 0), chanest, sink) top_block.connect(noise, (add, 1)) top_block.run() channel_est = None carr_offset_hat = 0 rx_sym_est = [0, ] * fft_len tags = sink.tags() for tag in tags: if pmt.symbol_to_string(tag.key) == 'ofdm_sync_carr_offset': carr_offset_hat = pmt.to_long(tag.value) self.assertEqual(carr_offset, carr_offset_hat) if pmt.symbol_to_string(tag.key) == 'ofdm_sync_chan_taps': channel_est = shift_tuple( pmt.c32vector_elements( tag.value), carr_offset) shifted_carrier_mask = shift_tuple(carrier_mask, carr_offset) for i in range(fft_len): if shifted_carrier_mask[i] and channel_est[i]: self.assertAlmostEqual( channel[i], channel_est[i], places=0) rx_sym_est[i] = (sink.data()[i] / channel_est[i]).real return carr_offset, list(shift_tuple(rx_sym_est, -carr_offset_hat)) bit_errors = 0 for _ in range(n_iter): sync_sym = [(random.randint(0, 1) * 2 - 1) * syncsym_mask[i] for i in range(fft_len)] ref_sym = [(random.randint(0, 1) * 2 - 1) * carrier_mask[i] for i in range(fft_len)] data_sym = [(random.randint(0, 1) * 2 - 1) * carrier_mask[i] for i in range(fft_len)] data_sym[26] = 1 (_, rx_sym) = run_flow_graph(sync_sym, ref_sym, data_sym) rx_sym_est = [0, ] * fft_len for i in range(fft_len): if carrier_mask[i] == 0: continue rx_sym_est[i] = {True: 1, False: -1}[rx_sym[i] > 0] if rx_sym_est[i] != data_sym[i]: bit_errors += 1 # This is much more than we could allow self.assertTrue(bit_errors < n_iter)
[ "def", "test_999_all_at_once", "(", "self", ")", ":", "fft_len", "=", "32", "# 6 carriers empty, 10 carriers full, 1 DC carrier, 10 carriers full, 5", "# carriers empty", "syncsym_mask", "=", "(", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "1", ",", "0", ",", "1", ",", "0", ",", "1", ",", "0", ",", "1", ",", "0", ",", "1", ",", "0", ",", "1", ",", "0", ",", "1", ",", "0", ",", "1", ",", "0", ",", "1", ",", "0", ",", "1", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ")", "carrier_mask", "=", "(", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "0", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "1", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ")", "max_offset", "=", "4", "wgn_amplitude", "=", "0.05", "min_chan_ampl", "=", "0.1", "max_chan_ampl", "=", "5", "n_iter", "=", "20", "# The more the accurater", "def", "run_flow_graph", "(", "sync_sym1", ",", "sync_sym2", ",", "data_sym", ")", ":", "top_block", "=", "gr", ".", "top_block", "(", ")", "carr_offset", "=", "random", ".", "randint", "(", "-", "max_offset", "/", "2", ",", "max_offset", "/", "2", ")", "*", "2", "tx_data", "=", "shift_tuple", "(", "sync_sym1", ",", "carr_offset", ")", "+", "shift_tuple", "(", "sync_sym2", ",", "carr_offset", ")", "+", "shift_tuple", "(", "data_sym", ",", "carr_offset", ")", "channel", "=", "[", "rand_range", "(", "min_chan_ampl", ",", "max_chan_ampl", ")", "*", "numpy", ".", "exp", "(", "1j", "*", "rand_range", "(", "0", ",", "2", "*", "numpy", ".", "pi", ")", ")", "for", "x", "in", "range", "(", "fft_len", ")", "]", "src", "=", "blocks", ".", "vector_source_c", "(", "tx_data", ",", "False", ",", "fft_len", ")", "chan", "=", "blocks", ".", "multiply_const_vcc", "(", "channel", ")", "noise", "=", "blocks", ".", "vector_source_c", "(", "numpy", ".", "random", ".", "normal", "(", "0", ",", "wgn_amplitude", ",", "(", "len", "(", "tx_data", ")", ",", ")", ")", ",", "False", ",", "fft_len", ")", "add", "=", "blocks", ".", "add_cc", "(", "fft_len", ")", "chanest", "=", "digital", ".", "ofdm_chanest_vcvc", "(", "sync_sym1", ",", "sync_sym2", ",", "1", ")", "sink", "=", "blocks", ".", "vector_sink_c", "(", "fft_len", ")", "top_block", ".", "connect", "(", "src", ",", "chan", ",", "(", "add", ",", "0", ")", ",", "chanest", ",", "sink", ")", "top_block", ".", "connect", "(", "noise", ",", "(", "add", ",", "1", ")", ")", "top_block", ".", "run", "(", ")", "channel_est", "=", "None", "carr_offset_hat", "=", "0", "rx_sym_est", "=", "[", "0", ",", "]", "*", "fft_len", "tags", "=", "sink", ".", "tags", "(", ")", "for", "tag", "in", "tags", ":", "if", "pmt", ".", "symbol_to_string", "(", "tag", ".", "key", ")", "==", "'ofdm_sync_carr_offset'", ":", "carr_offset_hat", "=", "pmt", ".", "to_long", "(", "tag", ".", "value", ")", "self", ".", "assertEqual", "(", "carr_offset", ",", "carr_offset_hat", ")", "if", "pmt", ".", "symbol_to_string", "(", "tag", ".", "key", ")", "==", "'ofdm_sync_chan_taps'", ":", "channel_est", "=", "shift_tuple", "(", "pmt", ".", "c32vector_elements", "(", "tag", ".", "value", ")", ",", "carr_offset", ")", "shifted_carrier_mask", "=", "shift_tuple", "(", "carrier_mask", ",", "carr_offset", ")", "for", "i", "in", "range", "(", "fft_len", ")", ":", "if", "shifted_carrier_mask", "[", "i", "]", "and", "channel_est", "[", "i", "]", ":", "self", ".", "assertAlmostEqual", "(", "channel", "[", "i", "]", ",", "channel_est", "[", "i", "]", ",", "places", "=", "0", ")", "rx_sym_est", "[", "i", "]", "=", "(", "sink", ".", "data", "(", ")", "[", "i", "]", "/", "channel_est", "[", "i", "]", ")", ".", "real", "return", "carr_offset", ",", "list", "(", "shift_tuple", "(", "rx_sym_est", ",", "-", "carr_offset_hat", ")", ")", "bit_errors", "=", "0", "for", "_", "in", "range", "(", "n_iter", ")", ":", "sync_sym", "=", "[", "(", "random", ".", "randint", "(", "0", ",", "1", ")", "*", "2", "-", "1", ")", "*", "syncsym_mask", "[", "i", "]", "for", "i", "in", "range", "(", "fft_len", ")", "]", "ref_sym", "=", "[", "(", "random", ".", "randint", "(", "0", ",", "1", ")", "*", "2", "-", "1", ")", "*", "carrier_mask", "[", "i", "]", "for", "i", "in", "range", "(", "fft_len", ")", "]", "data_sym", "=", "[", "(", "random", ".", "randint", "(", "0", ",", "1", ")", "*", "2", "-", "1", ")", "*", "carrier_mask", "[", "i", "]", "for", "i", "in", "range", "(", "fft_len", ")", "]", "data_sym", "[", "26", "]", "=", "1", "(", "_", ",", "rx_sym", ")", "=", "run_flow_graph", "(", "sync_sym", ",", "ref_sym", ",", "data_sym", ")", "rx_sym_est", "=", "[", "0", ",", "]", "*", "fft_len", "for", "i", "in", "range", "(", "fft_len", ")", ":", "if", "carrier_mask", "[", "i", "]", "==", "0", ":", "continue", "rx_sym_est", "[", "i", "]", "=", "{", "True", ":", "1", ",", "False", ":", "-", "1", "}", "[", "rx_sym", "[", "i", "]", ">", "0", "]", "if", "rx_sym_est", "[", "i", "]", "!=", "data_sym", "[", "i", "]", ":", "bit_errors", "+=", "1", "# This is much more than we could allow", "self", ".", "assertTrue", "(", "bit_errors", "<", "n_iter", ")" ]
https://github.com/gnuradio/gnuradio/blob/09c3c4fa4bfb1a02caac74cb5334dfe065391e3b/gr-digital/python/digital/qa_ofdm_chanest_vcvc.py#L217-L351
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/ttk.py
python
Treeview.column
(self, column, option=None, **kw)
return _val_or_dict(self.tk, kw, self._w, "column", column)
Query or modify the options for the specified column. If kw is not given, returns a dict of the column option values. If option is specified then the value for that option is returned. Otherwise, sets the options to the corresponding values.
Query or modify the options for the specified column.
[ "Query", "or", "modify", "the", "options", "for", "the", "specified", "column", "." ]
def column(self, column, option=None, **kw): """Query or modify the options for the specified column. If kw is not given, returns a dict of the column option values. If option is specified then the value for that option is returned. Otherwise, sets the options to the corresponding values.""" if option is not None: kw[option] = None return _val_or_dict(self.tk, kw, self._w, "column", column)
[ "def", "column", "(", "self", ",", "column", ",", "option", "=", "None", ",", "*", "*", "kw", ")", ":", "if", "option", "is", "not", "None", ":", "kw", "[", "option", "]", "=", "None", "return", "_val_or_dict", "(", "self", ".", "tk", ",", "kw", ",", "self", ".", "_w", ",", "\"column\"", ",", "column", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/ttk.py#L1239-L1247
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/packaging/tags.py
python
interpreter_name
()
return INTERPRETER_SHORT_NAMES.get(name) or name
Returns the name of the running interpreter.
Returns the name of the running interpreter.
[ "Returns", "the", "name", "of", "the", "running", "interpreter", "." ]
def interpreter_name(): # type: () -> str """ Returns the name of the running interpreter. """ try: name = sys.implementation.name # type: ignore except AttributeError: # pragma: no cover # Python 2.7 compatibility. name = platform.python_implementation().lower() return INTERPRETER_SHORT_NAMES.get(name) or name
[ "def", "interpreter_name", "(", ")", ":", "# type: () -> str", "try", ":", "name", "=", "sys", ".", "implementation", ".", "name", "# type: ignore", "except", "AttributeError", ":", "# pragma: no cover", "# Python 2.7 compatibility.", "name", "=", "platform", ".", "python_implementation", "(", ")", ".", "lower", "(", ")", "return", "INTERPRETER_SHORT_NAMES", ".", "get", "(", "name", ")", "or", "name" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/packaging/tags.py#L815-L825
openvinotoolkit/openvino
dedcbeafa8b84cccdc55ca64b8da516682b381c7
src/bindings/python/src/compatibility/ngraph/opset1/ops.py
python
clamp
( data: NodeInput, min_value: ScalarData, max_value: ScalarData, name: Optional[str] = None )
return _get_node_factory_opset1().create( "Clamp", [as_node(data)], {"min": min_value, "max": max_value} )
Perform clamp element-wise on data from input node. :param data: Input tensor. One of: input node, array or scalar. :param min_value: The lower bound of the <min_value;max_value> range. Scalar value. :param max_value: The upper bound of the <min_value;max_value> range. Scalar value. :param name: Optional output node name. :return: The new node performing a clamp operation on its input data element-wise. Performs a clipping operation on an input value between a pair of boundary values. For each element in `data`, if the element's value is lower than `min_value`, it will be replaced with `min_value`. If the value is higher than `max_value`, it will be replaced by `max_value`. Intermediate values of `data` are returned without change. Clamp uses the following logic: .. code-block:: python if data < min_value: data=min_value elif data > max_value: data=max_value
Perform clamp element-wise on data from input node.
[ "Perform", "clamp", "element", "-", "wise", "on", "data", "from", "input", "node", "." ]
def clamp( data: NodeInput, min_value: ScalarData, max_value: ScalarData, name: Optional[str] = None ) -> Node: """Perform clamp element-wise on data from input node. :param data: Input tensor. One of: input node, array or scalar. :param min_value: The lower bound of the <min_value;max_value> range. Scalar value. :param max_value: The upper bound of the <min_value;max_value> range. Scalar value. :param name: Optional output node name. :return: The new node performing a clamp operation on its input data element-wise. Performs a clipping operation on an input value between a pair of boundary values. For each element in `data`, if the element's value is lower than `min_value`, it will be replaced with `min_value`. If the value is higher than `max_value`, it will be replaced by `max_value`. Intermediate values of `data` are returned without change. Clamp uses the following logic: .. code-block:: python if data < min_value: data=min_value elif data > max_value: data=max_value """ return _get_node_factory_opset1().create( "Clamp", [as_node(data)], {"min": min_value, "max": max_value} )
[ "def", "clamp", "(", "data", ":", "NodeInput", ",", "min_value", ":", "ScalarData", ",", "max_value", ":", "ScalarData", ",", "name", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Node", ":", "return", "_get_node_factory_opset1", "(", ")", ".", "create", "(", "\"Clamp\"", ",", "[", "as_node", "(", "data", ")", "]", ",", "{", "\"min\"", ":", "min_value", ",", "\"max\"", ":", "max_value", "}", ")" ]
https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/src/bindings/python/src/compatibility/ngraph/opset1/ops.py#L278-L307
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/socketserver.py
python
BaseServer.server_close
(self)
Called to clean-up the server. May be overridden.
Called to clean-up the server.
[ "Called", "to", "clean", "-", "up", "the", "server", "." ]
def server_close(self): """Called to clean-up the server. May be overridden. """ pass
[ "def", "server_close", "(", "self", ")", ":", "pass" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/socketserver.py#L350-L356
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
python
TarInfo.__init__
(self, name="")
Construct a TarInfo object. name is the optional name of the member.
Construct a TarInfo object. name is the optional name
[ "Construct", "a", "TarInfo", "object", ".", "name", "is", "the", "optional", "name" ]
def __init__(self, name=""): """Construct a TarInfo object. name is the optional name of the member. """ self.name = name # member name self.mode = 0o644 # file permissions self.uid = 0 # user id self.gid = 0 # group id self.size = 0 # file size self.mtime = 0 # modification time self.chksum = 0 # header checksum self.type = REGTYPE # member type self.linkname = "" # link name self.uname = "" # user name self.gname = "" # group name self.devmajor = 0 # device major number self.devminor = 0 # device minor number self.offset = 0 # the tar header starts here self.offset_data = 0 # the file's data starts here self.sparse = None # sparse member information self.pax_headers = {}
[ "def", "__init__", "(", "self", ",", "name", "=", "\"\"", ")", ":", "self", ".", "name", "=", "name", "# member name", "self", ".", "mode", "=", "0o644", "# file permissions", "self", ".", "uid", "=", "0", "# user id", "self", ".", "gid", "=", "0", "# group id", "self", ".", "size", "=", "0", "# file size", "self", ".", "mtime", "=", "0", "# modification time", "self", ".", "chksum", "=", "0", "# header checksum", "self", ".", "type", "=", "REGTYPE", "# member type", "self", ".", "linkname", "=", "\"\"", "# link name", "self", ".", "uname", "=", "\"\"", "# user name", "self", ".", "gname", "=", "\"\"", "# group name", "self", ".", "devmajor", "=", "0", "# device major number", "self", ".", "devminor", "=", "0", "# device minor number", "self", ".", "offset", "=", "0", "# the tar header starts here", "self", ".", "offset_data", "=", "0", "# the file's data starts here", "self", ".", "sparse", "=", "None", "# sparse member information", "self", ".", "pax_headers", "=", "{", "}" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L1873-L1917
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/ops/nn_ops.py
python
log_softmax
(logits, dim=-1, name=None)
return _softmax(logits, gen_nn_ops._log_softmax, dim, name)
Computes log softmax activations. For each batch `i` and class `j` we have logsoftmax = logits - log(reduce_sum(exp(logits), dim)) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. dim: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `logits`. Same shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `dim` is beyond the last dimension of `logits`.
Computes log softmax activations.
[ "Computes", "log", "softmax", "activations", "." ]
def log_softmax(logits, dim=-1, name=None): """Computes log softmax activations. For each batch `i` and class `j` we have logsoftmax = logits - log(reduce_sum(exp(logits), dim)) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. dim: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `logits`. Same shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `dim` is beyond the last dimension of `logits`. """ return _softmax(logits, gen_nn_ops._log_softmax, dim, name)
[ "def", "log_softmax", "(", "logits", ",", "dim", "=", "-", "1", ",", "name", "=", "None", ")", ":", "return", "_softmax", "(", "logits", ",", "gen_nn_ops", ".", "_log_softmax", ",", "dim", ",", "name", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/ops/nn_ops.py#L1484-L1505
jackaudio/jack2
21b293dbc37d42446141a08922cdec0d2550c6a0
waflib/Scripting.py
python
run_command
(cmd_name)
return ctx
Executes a single Waf command. Called by :py:func:`waflib.Scripting.run_commands`. :param cmd_name: command to execute, like ``build`` :type cmd_name: string
Executes a single Waf command. Called by :py:func:`waflib.Scripting.run_commands`.
[ "Executes", "a", "single", "Waf", "command", ".", "Called", "by", ":", "py", ":", "func", ":", "waflib", ".", "Scripting", ".", "run_commands", "." ]
def run_command(cmd_name): """ Executes a single Waf command. Called by :py:func:`waflib.Scripting.run_commands`. :param cmd_name: command to execute, like ``build`` :type cmd_name: string """ ctx = Context.create_context(cmd_name) ctx.log_timer = Utils.Timer() ctx.options = Options.options # provided for convenience ctx.cmd = cmd_name try: ctx.execute() finally: # Issue 1374 ctx.finalize() return ctx
[ "def", "run_command", "(", "cmd_name", ")", ":", "ctx", "=", "Context", ".", "create_context", "(", "cmd_name", ")", "ctx", ".", "log_timer", "=", "Utils", ".", "Timer", "(", ")", "ctx", ".", "options", "=", "Options", ".", "options", "# provided for convenience", "ctx", ".", "cmd", "=", "cmd_name", "try", ":", "ctx", ".", "execute", "(", ")", "finally", ":", "# Issue 1374", "ctx", ".", "finalize", "(", ")", "return", "ctx" ]
https://github.com/jackaudio/jack2/blob/21b293dbc37d42446141a08922cdec0d2550c6a0/waflib/Scripting.py#L224-L240
KDE/krita
10ea63984e00366865769c193ab298de73a59c5c
plugins/python/plugin_importer/plugin_downloader.py
python
is_zip
(url)
return response.getheader('Content-Type') == MTYPE
Check if the given URL is a direct link to a zip file
Check if the given URL is a direct link to a zip file
[ "Check", "if", "the", "given", "URL", "is", "a", "direct", "link", "to", "a", "zip", "file" ]
def is_zip(url): """Check if the given URL is a direct link to a zip file""" MTYPE = 'application/zip' # This just goes by the ending of the url string: if mimetypes.guess_type(url)[0] == MTYPE: return True # Could still be a zip, so check the HTTP headers: try: request = urllib.request.Request(url, method='HEAD') response = urllib.request.urlopen(request) except Exception as e: raise PluginDownloadError(str(e)) return response.getheader('Content-Type') == MTYPE
[ "def", "is_zip", "(", "url", ")", ":", "MTYPE", "=", "'application/zip'", "# This just goes by the ending of the url string:", "if", "mimetypes", ".", "guess_type", "(", "url", ")", "[", "0", "]", "==", "MTYPE", ":", "return", "True", "# Could still be a zip, so check the HTTP headers:", "try", ":", "request", "=", "urllib", ".", "request", ".", "Request", "(", "url", ",", "method", "=", "'HEAD'", ")", "response", "=", "urllib", ".", "request", ".", "urlopen", "(", "request", ")", "except", "Exception", "as", "e", ":", "raise", "PluginDownloadError", "(", "str", "(", "e", ")", ")", "return", "response", ".", "getheader", "(", "'Content-Type'", ")", "==", "MTYPE" ]
https://github.com/KDE/krita/blob/10ea63984e00366865769c193ab298de73a59c5c/plugins/python/plugin_importer/plugin_downloader.py#L29-L45
goldeneye-source/ges-code
2630cd8ef3d015af53c72ec2e19fc1f7e7fe8d9d
thirdparty/protobuf-2.3.0/python/google/protobuf/internal/wire_format.py
python
TagByteSize
(field_number)
return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
Returns the bytes required to serialize a tag with this field number.
Returns the bytes required to serialize a tag with this field number.
[ "Returns", "the", "bytes", "required", "to", "serialize", "a", "tag", "with", "this", "field", "number", "." ]
def TagByteSize(field_number): """Returns the bytes required to serialize a tag with this field number.""" # Just pass in type 0, since the type won't affect the tag+type size. return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
[ "def", "TagByteSize", "(", "field_number", ")", ":", "# Just pass in type 0, since the type won't affect the tag+type size.", "return", "_VarUInt64ByteSizeNoTag", "(", "PackTag", "(", "field_number", ",", "0", ")", ")" ]
https://github.com/goldeneye-source/ges-code/blob/2630cd8ef3d015af53c72ec2e19fc1f7e7fe8d9d/thirdparty/protobuf-2.3.0/python/google/protobuf/internal/wire_format.py#L224-L227
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/ipython/py3/IPython/core/completer.py
python
compress_user
(path:str, tilde_expand:bool, tilde_val:str)
Does the opposite of expand_user, with its outputs.
Does the opposite of expand_user, with its outputs.
[ "Does", "the", "opposite", "of", "expand_user", "with", "its", "outputs", "." ]
def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str: """Does the opposite of expand_user, with its outputs. """ if tilde_expand: return path.replace(tilde_val, '~') else: return path
[ "def", "compress_user", "(", "path", ":", "str", ",", "tilde_expand", ":", "bool", ",", "tilde_val", ":", "str", ")", "->", "str", ":", "if", "tilde_expand", ":", "return", "path", ".", "replace", "(", "tilde_val", ",", "'~'", ")", "else", ":", "return", "path" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py3/IPython/core/completer.py#L292-L298
LiquidPlayer/LiquidCore
9405979363f2353ac9a71ad8ab59685dd7f919c9
deps/node-10.15.3/deps/v8/third_party/jinja2/lexer.py
python
Token.test_any
(self, *iterable)
return False
Test against multiple token expressions.
Test against multiple token expressions.
[ "Test", "against", "multiple", "token", "expressions", "." ]
def test_any(self, *iterable): """Test against multiple token expressions.""" for expr in iterable: if self.test(expr): return True return False
[ "def", "test_any", "(", "self", ",", "*", "iterable", ")", ":", "for", "expr", "in", "iterable", ":", "if", "self", ".", "test", "(", "expr", ")", ":", "return", "True", "return", "False" ]
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/deps/v8/third_party/jinja2/lexer.py#L260-L265
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_core.py
python
PyEvtHandler.__init__
(self, *args, **kwargs)
__init__(self) -> PyEvtHandler The wx.PyEvtHandler class can be used to intercept calls to the `ProcessEvent` method. Simply derive a new class from this one, override ProcessEvent, and then push an instance of the class onto the event handler chain for a window using `wx.Window.PushEventHandler`.
__init__(self) -> PyEvtHandler
[ "__init__", "(", "self", ")", "-", ">", "PyEvtHandler" ]
def __init__(self, *args, **kwargs): """ __init__(self) -> PyEvtHandler The wx.PyEvtHandler class can be used to intercept calls to the `ProcessEvent` method. Simply derive a new class from this one, override ProcessEvent, and then push an instance of the class onto the event handler chain for a window using `wx.Window.PushEventHandler`. """ _core_.PyEvtHandler_swiginit(self,_core_.new_PyEvtHandler(*args, **kwargs)) self._setOORInfo(self);PyEvtHandler._setCallbackInfo(self, self, PyEvtHandler)
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_core_", ".", "PyEvtHandler_swiginit", "(", "self", ",", "_core_", ".", "new_PyEvtHandler", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "self", ".", "_setOORInfo", "(", "self", ")", "PyEvtHandler", ".", "_setCallbackInfo", "(", "self", ",", "self", ",", "PyEvtHandler", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_core.py#L4253-L4263
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/html.py
python
HtmlHelpController.MakeModalIfNeeded
(*args, **kwargs)
return _html.HtmlHelpController_MakeModalIfNeeded(*args, **kwargs)
MakeModalIfNeeded(self)
MakeModalIfNeeded(self)
[ "MakeModalIfNeeded", "(", "self", ")" ]
def MakeModalIfNeeded(*args, **kwargs): """MakeModalIfNeeded(self)""" return _html.HtmlHelpController_MakeModalIfNeeded(*args, **kwargs)
[ "def", "MakeModalIfNeeded", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_html", ".", "HtmlHelpController_MakeModalIfNeeded", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/html.py#L2002-L2004
natanielruiz/android-yolo
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
jni-build/jni/include/tensorflow/python/ops/math_ops.py
python
_OverrideBinaryOperatorHelper
(func, op_name, clazz_object=ops.Tensor)
Register operators with different tensor and scalar versions. If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices, sp_values, sp_shape, dense)` and outputs `(new_sp_values)`. Args: func: the operator op_name: name of the operator being overridden clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
Register operators with different tensor and scalar versions.
[ "Register", "operators", "with", "different", "tensor", "and", "scalar", "versions", "." ]
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor): """Register operators with different tensor and scalar versions. If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices, sp_values, sp_shape, dense)` and outputs `(new_sp_values)`. Args: func: the operator op_name: name of the operator being overridden clazz_object: class to override for. Either `Tensor` or `SparseTensor`. """ def binary_op_wrapper(x, y): with ops.op_scope([x, y], None, op_name) as name: if not isinstance(y, ops.SparseTensor): y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y") return func(x, y, name=name) def binary_op_wrapper_sparse(sp_x, y): with ops.op_scope([sp_x, y], None, op_name) as name: y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y") return ops.SparseTensor(sp_x.indices, func(sp_x.indices, sp_x.values, sp_x.shape, y, name=name), sp_x.shape) def r_binary_op_wrapper(y, x): with ops.op_scope([x, y], None, op_name) as name: x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x") return func(x, y, name=name) if clazz_object is ops.Tensor: clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper) del binary_op_wrapper clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper) del r_binary_op_wrapper else: clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper_sparse) del binary_op_wrapper_sparse
[ "def", "_OverrideBinaryOperatorHelper", "(", "func", ",", "op_name", ",", "clazz_object", "=", "ops", ".", "Tensor", ")", ":", "def", "binary_op_wrapper", "(", "x", ",", "y", ")", ":", "with", "ops", ".", "op_scope", "(", "[", "x", ",", "y", "]", ",", "None", ",", "op_name", ")", "as", "name", ":", "if", "not", "isinstance", "(", "y", ",", "ops", ".", "SparseTensor", ")", ":", "y", "=", "ops", ".", "convert_to_tensor", "(", "y", ",", "dtype", "=", "x", ".", "dtype", ".", "base_dtype", ",", "name", "=", "\"y\"", ")", "return", "func", "(", "x", ",", "y", ",", "name", "=", "name", ")", "def", "binary_op_wrapper_sparse", "(", "sp_x", ",", "y", ")", ":", "with", "ops", ".", "op_scope", "(", "[", "sp_x", ",", "y", "]", ",", "None", ",", "op_name", ")", "as", "name", ":", "y", "=", "ops", ".", "convert_to_tensor", "(", "y", ",", "dtype", "=", "sp_x", ".", "dtype", ".", "base_dtype", ",", "name", "=", "\"y\"", ")", "return", "ops", ".", "SparseTensor", "(", "sp_x", ".", "indices", ",", "func", "(", "sp_x", ".", "indices", ",", "sp_x", ".", "values", ",", "sp_x", ".", "shape", ",", "y", ",", "name", "=", "name", ")", ",", "sp_x", ".", "shape", ")", "def", "r_binary_op_wrapper", "(", "y", ",", "x", ")", ":", "with", "ops", ".", "op_scope", "(", "[", "x", ",", "y", "]", ",", "None", ",", "op_name", ")", "as", "name", ":", "x", "=", "ops", ".", "convert_to_tensor", "(", "x", ",", "dtype", "=", "y", ".", "dtype", ".", "base_dtype", ",", "name", "=", "\"x\"", ")", "return", "func", "(", "x", ",", "y", ",", "name", "=", "name", ")", "if", "clazz_object", "is", "ops", ".", "Tensor", ":", "clazz_object", ".", "_override_operator", "(", "\"__%s__\"", "%", "op_name", ",", "binary_op_wrapper", ")", "del", "binary_op_wrapper", "clazz_object", ".", "_override_operator", "(", "\"__r%s__\"", "%", "op_name", ",", "r_binary_op_wrapper", ")", "del", "r_binary_op_wrapper", "else", ":", "clazz_object", ".", "_override_operator", "(", "\"__%s__\"", "%", "op_name", ",", "binary_op_wrapper_sparse", ")", "del", "binary_op_wrapper_sparse" ]
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/ops/math_ops.py#L745-L782
OSGeo/gdal
3748fc4ba4fba727492774b2b908a2130c864a83
swig/python/osgeo/gdal.py
python
MajorObject.GetMetadataDomainList
(self, *args)
return _gdal.MajorObject_GetMetadataDomainList(self, *args)
r"""GetMetadataDomainList(MajorObject self) -> char **
r"""GetMetadataDomainList(MajorObject self) -> char **
[ "r", "GetMetadataDomainList", "(", "MajorObject", "self", ")", "-", ">", "char", "**" ]
def GetMetadataDomainList(self, *args): r"""GetMetadataDomainList(MajorObject self) -> char **""" return _gdal.MajorObject_GetMetadataDomainList(self, *args)
[ "def", "GetMetadataDomainList", "(", "self", ",", "*", "args", ")", ":", "return", "_gdal", ".", "MajorObject_GetMetadataDomainList", "(", "self", ",", "*", "args", ")" ]
https://github.com/OSGeo/gdal/blob/3748fc4ba4fba727492774b2b908a2130c864a83/swig/python/osgeo/gdal.py#L1865-L1867
kamyu104/LeetCode-Solutions
77605708a927ea3b85aee5a479db733938c7c211
Python/longest-common-subpath.py
python
Solution2.longestCommonSubpath
(self, n, paths)
return right
:type n: int :type paths: List[List[int]] :rtype: int
:type n: int :type paths: List[List[int]] :rtype: int
[ ":", "type", "n", ":", "int", ":", "type", "paths", ":", "List", "[", "List", "[", "int", "]]", ":", "rtype", ":", "int" ]
def longestCommonSubpath(self, n, paths): """ :type n: int :type paths: List[List[int]] :rtype: int """ def RabinKarp(arr, x): h = reduce(lambda h,x: (h*P+x)%MOD, (arr[i] for i in xrange(x)), 0) power = pow(P, x, MOD) lookup = {h} for i in xrange(x, len(arr)): h = (h*P - arr[i-x]*power + arr[i])%MOD lookup.add(h) return lookup def check(paths, x): intersect = RabinKarp(paths[0], x) for i in xrange(1, len(paths)): intersect = set.intersection(intersect, RabinKarp(paths[i], x)) if not intersect: return False return True MOD, P = 10**11+19, max(x for p in paths for x in p)+1 # MOD is the min prime of 12-digit number left, right = 1, min(len(p) for p in paths) while left <= right: mid = left + (right-left)//2 if not check(paths, mid): right = mid-1 else: left = mid+1 return right
[ "def", "longestCommonSubpath", "(", "self", ",", "n", ",", "paths", ")", ":", "def", "RabinKarp", "(", "arr", ",", "x", ")", ":", "h", "=", "reduce", "(", "lambda", "h", ",", "x", ":", "(", "h", "*", "P", "+", "x", ")", "%", "MOD", ",", "(", "arr", "[", "i", "]", "for", "i", "in", "xrange", "(", "x", ")", ")", ",", "0", ")", "power", "=", "pow", "(", "P", ",", "x", ",", "MOD", ")", "lookup", "=", "{", "h", "}", "for", "i", "in", "xrange", "(", "x", ",", "len", "(", "arr", ")", ")", ":", "h", "=", "(", "h", "*", "P", "-", "arr", "[", "i", "-", "x", "]", "*", "power", "+", "arr", "[", "i", "]", ")", "%", "MOD", "lookup", ".", "add", "(", "h", ")", "return", "lookup", "def", "check", "(", "paths", ",", "x", ")", ":", "intersect", "=", "RabinKarp", "(", "paths", "[", "0", "]", ",", "x", ")", "for", "i", "in", "xrange", "(", "1", ",", "len", "(", "paths", ")", ")", ":", "intersect", "=", "set", ".", "intersection", "(", "intersect", ",", "RabinKarp", "(", "paths", "[", "i", "]", ",", "x", ")", ")", "if", "not", "intersect", ":", "return", "False", "return", "True", "MOD", ",", "P", "=", "10", "**", "11", "+", "19", ",", "max", "(", "x", "for", "p", "in", "paths", "for", "x", "in", "p", ")", "+", "1", "# MOD is the min prime of 12-digit number", "left", ",", "right", "=", "1", ",", "min", "(", "len", "(", "p", ")", "for", "p", "in", "paths", ")", "while", "left", "<=", "right", ":", "mid", "=", "left", "+", "(", "right", "-", "left", ")", "//", "2", "if", "not", "check", "(", "paths", ",", "mid", ")", ":", "right", "=", "mid", "-", "1", "else", ":", "left", "=", "mid", "+", "1", "return", "right" ]
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/longest-common-subpath.py#L42-L73
weolar/miniblink49
1c4678db0594a4abde23d3ebbcc7cd13c3170777
v8_7_5/tools/v8_presubmit.py
python
SourceFileProcessor.RunOnPath
(self, path)
return self.ProcessFiles(all_files)
Runs processor on all files under the given path.
Runs processor on all files under the given path.
[ "Runs", "processor", "on", "all", "files", "under", "the", "given", "path", "." ]
def RunOnPath(self, path): """Runs processor on all files under the given path.""" all_files = [] for file in self.GetPathsToSearch(): all_files += self.FindFilesIn(join(path, file)) return self.ProcessFiles(all_files)
[ "def", "RunOnPath", "(", "self", ",", "path", ")", ":", "all_files", "=", "[", "]", "for", "file", "in", "self", ".", "GetPathsToSearch", "(", ")", ":", "all_files", "+=", "self", ".", "FindFilesIn", "(", "join", "(", "path", ",", "file", ")", ")", "return", "self", ".", "ProcessFiles", "(", "all_files", ")" ]
https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/v8_7_5/tools/v8_presubmit.py#L188-L194
tpfister/caffe-heatmap
4db69ef53e6b8a0b3b4ebb29328b0ab3dbf67c4e
scripts/cpp_lint.py
python
CheckCaffeAlternatives
(filename, clean_lines, linenum, error)
Checks for C(++) functions for which a Caffe substitute should be used. For certain native C functions (memset, memcpy), there is a Caffe alternative which should be used instead. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
Checks for C(++) functions for which a Caffe substitute should be used.
[ "Checks", "for", "C", "(", "++", ")", "functions", "for", "which", "a", "Caffe", "substitute", "should", "be", "used", "." ]
def CheckCaffeAlternatives(filename, clean_lines, linenum, error): """Checks for C(++) functions for which a Caffe substitute should be used. For certain native C functions (memset, memcpy), there is a Caffe alternative which should be used instead. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] for function, alts in caffe_alt_function_list: ix = line.find(function + '(') if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and line[ix - 1] not in ('_', '.', '>'))): disp_alts = ['%s(...)' % alt for alt in alts] error(filename, linenum, 'caffe/alt_fn', 2, 'Use Caffe function %s instead of %s(...).' % (' or '.join(disp_alts), function))
[ "def", "CheckCaffeAlternatives", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "for", "function", ",", "alts", "in", "caffe_alt_function_list", ":", "ix", "=", "line", ".", "find", "(", "function", "+", "'('", ")", "if", "ix", ">=", "0", "and", "(", "ix", "==", "0", "or", "(", "not", "line", "[", "ix", "-", "1", "]", ".", "isalnum", "(", ")", "and", "line", "[", "ix", "-", "1", "]", "not", "in", "(", "'_'", ",", "'.'", ",", "'>'", ")", ")", ")", ":", "disp_alts", "=", "[", "'%s(...)'", "%", "alt", "for", "alt", "in", "alts", "]", "error", "(", "filename", ",", "linenum", ",", "'caffe/alt_fn'", ",", "2", ",", "'Use Caffe function %s instead of %s(...).'", "%", "(", "' or '", ".", "join", "(", "disp_alts", ")", ",", "function", ")", ")" ]
https://github.com/tpfister/caffe-heatmap/blob/4db69ef53e6b8a0b3b4ebb29328b0ab3dbf67c4e/scripts/cpp_lint.py#L1572-L1592
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/keras/python/keras/engine/topology.py
python
Layer.__call__
(self, inputs, **kwargs)
return output
Wrapper around self.call(), for handling internal references. If a Keras tensor is passed: - We call self._add_inbound_node(). - If necessary, we `build` the layer to match the shape of the input(s). - We update the _keras_history of the output tensor(s) with the current layer. This is done as part of _add_inbound_node(). Arguments: inputs: Can be a tensor or list/tuple of tensors. **kwargs: Additional keyword arguments to be passed to `call()`. Returns: Output of the layer's `call` method. Raises: ValueError: in case the layer is missing shape information for its `build` call.
Wrapper around self.call(), for handling internal references.
[ "Wrapper", "around", "self", ".", "call", "()", "for", "handling", "internal", "references", "." ]
def __call__(self, inputs, **kwargs): """Wrapper around self.call(), for handling internal references. If a Keras tensor is passed: - We call self._add_inbound_node(). - If necessary, we `build` the layer to match the shape of the input(s). - We update the _keras_history of the output tensor(s) with the current layer. This is done as part of _add_inbound_node(). Arguments: inputs: Can be a tensor or list/tuple of tensors. **kwargs: Additional keyword arguments to be passed to `call()`. Returns: Output of the layer's `call` method. Raises: ValueError: in case the layer is missing shape information for its `build` call. """ if isinstance(inputs, list): inputs = inputs[:] # Handle mask propagation. previous_mask = _collect_previous_mask(inputs) user_kwargs = copy.copy(kwargs) if not _is_all_none(previous_mask): # The previous layer generated a mask. if has_arg(self.call, 'mask'): if 'mask' not in kwargs: # If mask is explicitly passed to __call__, # we should override the default mask. kwargs['mask'] = previous_mask # Actually call the layer (optionally building it). output = super(Layer, self).__call__(inputs, **kwargs) # Handle mask computation. with K.name_scope(self.name): output_mask = self.compute_mask(inputs, previous_mask) # If the layer returns tensors from its inputs, unmodified, # we copy them to avoid loss of tensor metadata. output_ls = _to_list(output) inputs_ls = _to_list(inputs) output_ls_copy = [] for x in output_ls: if x in inputs_ls: x = K.identity(x) output_ls_copy.append(x) if len(output_ls_copy) == 1: output = output_ls_copy[0] else: output = output_ls_copy # Add an inbound node to the layer, so that it keeps track # of the call and of all new variables created during the call. # This also updates the layer history of the output tensor(s). # If the input tensor(s) had not previous Keras history, # this does nothing. self._add_inbound_node( input_tensors=inputs, output_tensors=output, input_masks=previous_mask, output_masks=output_mask, arguments=user_kwargs) # Optionally load weight values that were specified at layer instantiation. if hasattr(self, '_initial_weights') and self._initial_weights is not None: self.set_weights(self._initial_weights) del self._initial_weights return output
[ "def", "__call__", "(", "self", ",", "inputs", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "inputs", ",", "list", ")", ":", "inputs", "=", "inputs", "[", ":", "]", "# Handle mask propagation.", "previous_mask", "=", "_collect_previous_mask", "(", "inputs", ")", "user_kwargs", "=", "copy", ".", "copy", "(", "kwargs", ")", "if", "not", "_is_all_none", "(", "previous_mask", ")", ":", "# The previous layer generated a mask.", "if", "has_arg", "(", "self", ".", "call", ",", "'mask'", ")", ":", "if", "'mask'", "not", "in", "kwargs", ":", "# If mask is explicitly passed to __call__,", "# we should override the default mask.", "kwargs", "[", "'mask'", "]", "=", "previous_mask", "# Actually call the layer (optionally building it).", "output", "=", "super", "(", "Layer", ",", "self", ")", ".", "__call__", "(", "inputs", ",", "*", "*", "kwargs", ")", "# Handle mask computation.", "with", "K", ".", "name_scope", "(", "self", ".", "name", ")", ":", "output_mask", "=", "self", ".", "compute_mask", "(", "inputs", ",", "previous_mask", ")", "# If the layer returns tensors from its inputs, unmodified,", "# we copy them to avoid loss of tensor metadata.", "output_ls", "=", "_to_list", "(", "output", ")", "inputs_ls", "=", "_to_list", "(", "inputs", ")", "output_ls_copy", "=", "[", "]", "for", "x", "in", "output_ls", ":", "if", "x", "in", "inputs_ls", ":", "x", "=", "K", ".", "identity", "(", "x", ")", "output_ls_copy", ".", "append", "(", "x", ")", "if", "len", "(", "output_ls_copy", ")", "==", "1", ":", "output", "=", "output_ls_copy", "[", "0", "]", "else", ":", "output", "=", "output_ls_copy", "# Add an inbound node to the layer, so that it keeps track", "# of the call and of all new variables created during the call.", "# This also updates the layer history of the output tensor(s).", "# If the input tensor(s) had not previous Keras history,", "# this does nothing.", "self", ".", "_add_inbound_node", "(", "input_tensors", "=", "inputs", ",", "output_tensors", "=", "output", ",", "input_masks", "=", "previous_mask", ",", "output_masks", "=", "output_mask", ",", "arguments", "=", "user_kwargs", ")", "# Optionally load weight values that were specified at layer instantiation.", "if", "hasattr", "(", "self", ",", "'_initial_weights'", ")", "and", "self", ".", "_initial_weights", "is", "not", "None", ":", "self", ".", "set_weights", "(", "self", ".", "_initial_weights", ")", "del", "self", ".", "_initial_weights", "return", "output" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/keras/python/keras/engine/topology.py#L359-L432
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/io/pytables.py
python
Table.index_cols
(self)
return [(i.axis, i.cname) for i in self.index_axes]
return a list of my index cols
return a list of my index cols
[ "return", "a", "list", "of", "my", "index", "cols" ]
def index_cols(self): """ return a list of my index cols """ # Note: each `i.cname` below is assured to be a str. return [(i.axis, i.cname) for i in self.index_axes]
[ "def", "index_cols", "(", "self", ")", ":", "# Note: each `i.cname` below is assured to be a str.", "return", "[", "(", "i", ".", "axis", ",", "i", ".", "cname", ")", "for", "i", "in", "self", ".", "index_axes", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/io/pytables.py#L3315-L3318
numworks/epsilon
8952d2f8b1de1c3f064eec8ffcea804c5594ba4c
build/device/usb/backend/__init__.py
python
IBackend.bulk_write
(self, dev_handle, ep, intf, data, timeout)
r"""Perform a bulk write. dev_handle is the value returned by the open_device() method. The ep parameter is the bEndpointAddress field whose endpoint the data will be sent to. intf is the bInterfaceNumber field of the interface containing the endpoint. The data parameter is the data to be sent. It must be an instance of the array.array class. The timeout parameter specifies a time limit to the operation in miliseconds. The method returns the number of bytes written.
r"""Perform a bulk write.
[ "r", "Perform", "a", "bulk", "write", "." ]
def bulk_write(self, dev_handle, ep, intf, data, timeout): r"""Perform a bulk write. dev_handle is the value returned by the open_device() method. The ep parameter is the bEndpointAddress field whose endpoint the data will be sent to. intf is the bInterfaceNumber field of the interface containing the endpoint. The data parameter is the data to be sent. It must be an instance of the array.array class. The timeout parameter specifies a time limit to the operation in miliseconds. The method returns the number of bytes written. """ _not_implemented(self.bulk_write)
[ "def", "bulk_write", "(", "self", ",", "dev_handle", ",", "ep", ",", "intf", ",", "data", ",", "timeout", ")", ":", "_not_implemented", "(", "self", ".", "bulk_write", ")" ]
https://github.com/numworks/epsilon/blob/8952d2f8b1de1c3f064eec8ffcea804c5594ba4c/build/device/usb/backend/__init__.py#L240-L253
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/tpu/tpu_feed.py
python
InfeedQueue.freeze
(self)
Freezes the InfeedQueue so it can no longer be modified. The configuration is implicitly frozen before any host-side or device-side Ops are generated. The configuration cannot be frozen until the types and shapes of the tuple elements have been set. Raises: ValueError: if the types or shapes of the tuple elements have not been set.
Freezes the InfeedQueue so it can no longer be modified.
[ "Freezes", "the", "InfeedQueue", "so", "it", "can", "no", "longer", "be", "modified", "." ]
def freeze(self): """Freezes the InfeedQueue so it can no longer be modified. The configuration is implicitly frozen before any host-side or device-side Ops are generated. The configuration cannot be frozen until the types and shapes of the tuple elements have been set. Raises: ValueError: if the types or shapes of the tuple elements have not been set. """ self._frozen = True if self._tuple_types is None: raise ValueError( "Can't freeze an InfeedQueue without setting all tuple types.") if self._tuple_shapes is None: raise ValueError( "Can't freeze an InfeedQueue without setting all tuple shapes.") for shape in self._tuple_shapes: if shape.dims is None: raise ValueError( "Can't freeze an InfeedQueue without setting all tuple shapes.") for policy in self._sharding_policies: policy.freeze() self._validate()
[ "def", "freeze", "(", "self", ")", ":", "self", ".", "_frozen", "=", "True", "if", "self", ".", "_tuple_types", "is", "None", ":", "raise", "ValueError", "(", "\"Can't freeze an InfeedQueue without setting all tuple types.\"", ")", "if", "self", ".", "_tuple_shapes", "is", "None", ":", "raise", "ValueError", "(", "\"Can't freeze an InfeedQueue without setting all tuple shapes.\"", ")", "for", "shape", "in", "self", ".", "_tuple_shapes", ":", "if", "shape", ".", "dims", "is", "None", ":", "raise", "ValueError", "(", "\"Can't freeze an InfeedQueue without setting all tuple shapes.\"", ")", "for", "policy", "in", "self", ".", "_sharding_policies", ":", "policy", ".", "freeze", "(", ")", "self", ".", "_validate", "(", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/tpu/tpu_feed.py#L434-L458
dmlc/treelite
df56babb6a4a2d7c29d719c28ce53acfa7dbab3c
python/treelite/sklearn/common.py
python
SKLConverterBase.process_leaf_node
(cls, treelite_tree, sklearn_tree, node_id, sklearn_model)
Process a test node with a given node ID. This method shall be implemented by the mixin class that represents the converter for a particular model type.
Process a test node with a given node ID. This method shall be implemented by the mixin class that represents the converter for a particular model type.
[ "Process", "a", "test", "node", "with", "a", "given", "node", "ID", ".", "This", "method", "shall", "be", "implemented", "by", "the", "mixin", "class", "that", "represents", "the", "converter", "for", "a", "particular", "model", "type", "." ]
def process_leaf_node(cls, treelite_tree, sklearn_tree, node_id, sklearn_model): """Process a test node with a given node ID. This method shall be implemented by the mixin class that represents the converter for a particular model type.""" raise NotImplementedError()
[ "def", "process_leaf_node", "(", "cls", ",", "treelite_tree", ",", "sklearn_tree", ",", "node_id", ",", "sklearn_model", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/dmlc/treelite/blob/df56babb6a4a2d7c29d719c28ce53acfa7dbab3c/python/treelite/sklearn/common.py#L48-L51
yan99033/CNN-SVO
d5591ea88103f8d1b26e5296129bf3b3196a14f1
rpg_vikit/vikit_py/src/vikit_py/transformations.py
python
orthogonalization_matrix
(lengths, angles)
return numpy.array(( ( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0), (-a*sinb*co, b*sina, 0.0, 0.0), ( a*cosb, b*cosa, c, 0.0), ( 0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64)
Return orthogonalization matrix for crystallographic cell coordinates. Angles are expected in degrees. The de-orthogonalization matrix is the inverse. >>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.)) >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10) True >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) >>> numpy.allclose(numpy.sum(O), 43.063229) True
Return orthogonalization matrix for crystallographic cell coordinates.
[ "Return", "orthogonalization", "matrix", "for", "crystallographic", "cell", "coordinates", "." ]
def orthogonalization_matrix(lengths, angles): """Return orthogonalization matrix for crystallographic cell coordinates. Angles are expected in degrees. The de-orthogonalization matrix is the inverse. >>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.)) >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10) True >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) >>> numpy.allclose(numpy.sum(O), 43.063229) True """ a, b, c = lengths angles = numpy.radians(angles) sina, sinb, _ = numpy.sin(angles) cosa, cosb, cosg = numpy.cos(angles) co = (cosa * cosb - cosg) / (sina * sinb) return numpy.array(( ( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0), (-a*sinb*co, b*sina, 0.0, 0.0), ( a*cosb, b*cosa, c, 0.0), ( 0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64)
[ "def", "orthogonalization_matrix", "(", "lengths", ",", "angles", ")", ":", "a", ",", "b", ",", "c", "=", "lengths", "angles", "=", "numpy", ".", "radians", "(", "angles", ")", "sina", ",", "sinb", ",", "_", "=", "numpy", ".", "sin", "(", "angles", ")", "cosa", ",", "cosb", ",", "cosg", "=", "numpy", ".", "cos", "(", "angles", ")", "co", "=", "(", "cosa", "*", "cosb", "-", "cosg", ")", "/", "(", "sina", "*", "sinb", ")", "return", "numpy", ".", "array", "(", "(", "(", "a", "*", "sinb", "*", "math", ".", "sqrt", "(", "1.0", "-", "co", "*", "co", ")", ",", "0.0", ",", "0.0", ",", "0.0", ")", ",", "(", "-", "a", "*", "sinb", "*", "co", ",", "b", "*", "sina", ",", "0.0", ",", "0.0", ")", ",", "(", "a", "*", "cosb", ",", "b", "*", "cosa", ",", "c", ",", "0.0", ")", ",", "(", "0.0", ",", "0.0", ",", "0.0", ",", "1.0", ")", ")", ",", "dtype", "=", "numpy", ".", "float64", ")" ]
https://github.com/yan99033/CNN-SVO/blob/d5591ea88103f8d1b26e5296129bf3b3196a14f1/rpg_vikit/vikit_py/src/vikit_py/transformations.py#L842-L867
BlzFans/wke
b0fa21158312e40c5fbd84682d643022b6c34a93
cygwin/lib/python2.6/cookielib.py
python
iso2time
(text)
return _str2time(day, mon, yr, hr, min, sec, tz)
As for http2time, but parses the ISO 8601 formats: 1994-02-03 14:15:29 -0100 -- ISO 8601 format 1994-02-03 14:15:29 -- zone is optional 1994-02-03 -- only date 1994-02-03T14:15:29 -- Use T as separator 19940203T141529Z -- ISO 8601 compact format 19940203 -- only date
As for http2time, but parses the ISO 8601 formats:
[ "As", "for", "http2time", "but", "parses", "the", "ISO", "8601", "formats", ":" ]
def iso2time(text): """ As for http2time, but parses the ISO 8601 formats: 1994-02-03 14:15:29 -0100 -- ISO 8601 format 1994-02-03 14:15:29 -- zone is optional 1994-02-03 -- only date 1994-02-03T14:15:29 -- Use T as separator 19940203T141529Z -- ISO 8601 compact format 19940203 -- only date """ # clean up text = text.lstrip() # tz is time zone specifier string day, mon, yr, hr, min, sec, tz = [None]*7 # loose regexp parse m = ISO_DATE_RE.search(text) if m is not None: # XXX there's an extra bit of the timezone I'm ignoring here: is # this the right thing to do? yr, mon, day, hr, min, sec, tz, _ = m.groups() else: return None # bad format return _str2time(day, mon, yr, hr, min, sec, tz)
[ "def", "iso2time", "(", "text", ")", ":", "# clean up", "text", "=", "text", ".", "lstrip", "(", ")", "# tz is time zone specifier string", "day", ",", "mon", ",", "yr", ",", "hr", ",", "min", ",", "sec", ",", "tz", "=", "[", "None", "]", "*", "7", "# loose regexp parse", "m", "=", "ISO_DATE_RE", ".", "search", "(", "text", ")", "if", "m", "is", "not", "None", ":", "# XXX there's an extra bit of the timezone I'm ignoring here: is", "# this the right thing to do?", "yr", ",", "mon", ",", "day", ",", "hr", ",", "min", ",", "sec", ",", "tz", ",", "_", "=", "m", ".", "groups", "(", ")", "else", ":", "return", "None", "# bad format", "return", "_str2time", "(", "day", ",", "mon", ",", "yr", ",", "hr", ",", "min", ",", "sec", ",", "tz", ")" ]
https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/cookielib.py#L284-L311
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/idlelib/pyshell.py
python
ModifiedInterpreter.runcommand
(self, code)
return 1
Run the code without invoking the debugger
Run the code without invoking the debugger
[ "Run", "the", "code", "without", "invoking", "the", "debugger" ]
def runcommand(self, code): "Run the code without invoking the debugger" # The code better not raise an exception! if self.tkconsole.executing: self.display_executing_dialog() return 0 if self.rpcclt: self.rpcclt.remotequeue("exec", "runcode", (code,), {}) else: exec(code, self.locals) return 1
[ "def", "runcommand", "(", "self", ",", "code", ")", ":", "# The code better not raise an exception!", "if", "self", ".", "tkconsole", ".", "executing", ":", "self", ".", "display_executing_dialog", "(", ")", "return", "0", "if", "self", ".", "rpcclt", ":", "self", ".", "rpcclt", ".", "remotequeue", "(", "\"exec\"", ",", "\"runcode\"", ",", "(", "code", ",", ")", ",", "{", "}", ")", "else", ":", "exec", "(", "code", ",", "self", ".", "locals", ")", "return", "1" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/idlelib/pyshell.py#L745-L755
ricardoquesada/Spidermonkey
4a75ea2543408bd1b2c515aa95901523eeef7858
python/mozbuild/mozbuild/jar.py
python
JarMaker.getCommandLineParser
(self)
return p
Get a optparse.OptionParser for jarmaker. This OptionParser has the options for jarmaker as well as the options for the inner PreProcessor.
Get a optparse.OptionParser for jarmaker.
[ "Get", "a", "optparse", ".", "OptionParser", "for", "jarmaker", "." ]
def getCommandLineParser(self): '''Get a optparse.OptionParser for jarmaker. This OptionParser has the options for jarmaker as well as the options for the inner PreProcessor. ''' # HACK, we need to unescape the string variables we get, # the perl versions didn't grok strings right p = self.pp.getCommandLineParser(unescapeDefines=True) p.add_option('-f', type='choice', default='jar', choices=('jar', 'flat', 'symlink'), help='fileformat used for output', metavar='[jar, flat, symlink]', ) p.add_option('-v', action='store_true', dest='verbose', help='verbose output') p.add_option('-q', action='store_false', dest='verbose', help='verbose output') p.add_option('-e', action='store_true', help='create chrome.manifest instead of jarfile.manifest' ) p.add_option('-s', type='string', action='append', default=[], help='source directory') p.add_option('-t', type='string', help='top source directory') p.add_option('-c', '--l10n-src', type='string', action='append' , help='localization directory') p.add_option('--l10n-base', type='string', action='store', help='base directory to be used for localization (requires relativesrcdir)' ) p.add_option('--locale-mergedir', type='string', action='store' , help='base directory to be used for l10n-merge (requires l10n-base and relativesrcdir)' ) p.add_option('--relativesrcdir', type='string', help='relativesrcdir to be used for localization') p.add_option('-j', type='string', help='jarfile directory') p.add_option('--root-manifest-entry-appid', type='string', help='add an app id specific root chrome manifest entry.' ) return p
[ "def", "getCommandLineParser", "(", "self", ")", ":", "# HACK, we need to unescape the string variables we get,", "# the perl versions didn't grok strings right", "p", "=", "self", ".", "pp", ".", "getCommandLineParser", "(", "unescapeDefines", "=", "True", ")", "p", ".", "add_option", "(", "'-f'", ",", "type", "=", "'choice'", ",", "default", "=", "'jar'", ",", "choices", "=", "(", "'jar'", ",", "'flat'", ",", "'symlink'", ")", ",", "help", "=", "'fileformat used for output'", ",", "metavar", "=", "'[jar, flat, symlink]'", ",", ")", "p", ".", "add_option", "(", "'-v'", ",", "action", "=", "'store_true'", ",", "dest", "=", "'verbose'", ",", "help", "=", "'verbose output'", ")", "p", ".", "add_option", "(", "'-q'", ",", "action", "=", "'store_false'", ",", "dest", "=", "'verbose'", ",", "help", "=", "'verbose output'", ")", "p", ".", "add_option", "(", "'-e'", ",", "action", "=", "'store_true'", ",", "help", "=", "'create chrome.manifest instead of jarfile.manifest'", ")", "p", ".", "add_option", "(", "'-s'", ",", "type", "=", "'string'", ",", "action", "=", "'append'", ",", "default", "=", "[", "]", ",", "help", "=", "'source directory'", ")", "p", ".", "add_option", "(", "'-t'", ",", "type", "=", "'string'", ",", "help", "=", "'top source directory'", ")", "p", ".", "add_option", "(", "'-c'", ",", "'--l10n-src'", ",", "type", "=", "'string'", ",", "action", "=", "'append'", ",", "help", "=", "'localization directory'", ")", "p", ".", "add_option", "(", "'--l10n-base'", ",", "type", "=", "'string'", ",", "action", "=", "'store'", ",", "help", "=", "'base directory to be used for localization (requires relativesrcdir)'", ")", "p", ".", "add_option", "(", "'--locale-mergedir'", ",", "type", "=", "'string'", ",", "action", "=", "'store'", ",", "help", "=", "'base directory to be used for l10n-merge (requires l10n-base and relativesrcdir)'", ")", "p", ".", "add_option", "(", "'--relativesrcdir'", ",", "type", "=", "'string'", ",", "help", "=", "'relativesrcdir to be used for localization'", ")", "p", ".", "add_option", "(", "'-j'", ",", "type", "=", "'string'", ",", "help", "=", "'jarfile directory'", ")", "p", ".", "add_option", "(", "'--root-manifest-entry-appid'", ",", "type", "=", "'string'", ",", "help", "=", "'add an app id specific root chrome manifest entry.'", ")", "return", "p" ]
https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/python/mozbuild/mozbuild/jar.py#L95-L136
bh107/bohrium
5b83e7117285fefc7779ed0e9acb0f8e74c7e068
bridge/bh107/bh107/array_create.py
python
empty_like
(a, dtype=None)
return empty(a.shape, dtype)
Return a new array with the same shape and type as a given array. Parameters ---------- a : array_like The shape and data-type of `a` define these same attributes of the returned array. dtype : data-type, optional Overrides the data type of the result. Returns ------- out : ndarray Array of uninitialized (arbitrary) data with the same shape and type as `a`. See Also -------- ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. empty : Return a new uninitialized array. ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. Notes ----- The order of the data in memory is always row-major (C-style). This function does *not* initialize the returned array; to do that use `zeros_like` or `ones_like` instead. It may be marginally faster than the functions that do set the array values. Examples -------- >>> a = ([1,2,3], [4,5,6]) # a is array-like >>> bh.empty_like(a) array([[-1073741821, -1073741821, 3], #random [ 0, 0, -1073741821]]) >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) >>> bh.empty_like(a) array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
Return a new array with the same shape and type as a given array.
[ "Return", "a", "new", "array", "with", "the", "same", "shape", "and", "type", "as", "a", "given", "array", "." ]
def empty_like(a, dtype=None): """ Return a new array with the same shape and type as a given array. Parameters ---------- a : array_like The shape and data-type of `a` define these same attributes of the returned array. dtype : data-type, optional Overrides the data type of the result. Returns ------- out : ndarray Array of uninitialized (arbitrary) data with the same shape and type as `a`. See Also -------- ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. empty : Return a new uninitialized array. ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. Notes ----- The order of the data in memory is always row-major (C-style). This function does *not* initialize the returned array; to do that use `zeros_like` or `ones_like` instead. It may be marginally faster than the functions that do set the array values. Examples -------- >>> a = ([1,2,3], [4,5,6]) # a is array-like >>> bh.empty_like(a) array([[-1073741821, -1073741821, 3], #random [ 0, 0, -1073741821]]) >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) >>> bh.empty_like(a) array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) """ if dtype is None: dtype = a.dtype return empty(a.shape, dtype)
[ "def", "empty_like", "(", "a", ",", "dtype", "=", "None", ")", ":", "if", "dtype", "is", "None", ":", "dtype", "=", "a", ".", "dtype", "return", "empty", "(", "a", ".", "shape", ",", "dtype", ")" ]
https://github.com/bh107/bohrium/blob/5b83e7117285fefc7779ed0e9acb0f8e74c7e068/bridge/bh107/bh107/array_create.py#L146-L193
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/ops/control_flow_ops.py
python
ControlFlowState.GetGradState
(self, op, before)
return None
Return the grad state for this op if it's in a forward loop context.
Return the grad state for this op if it's in a forward loop context.
[ "Return", "the", "grad", "state", "for", "this", "op", "if", "it", "s", "in", "a", "forward", "loop", "context", "." ]
def GetGradState(self, op, before): """Return the grad state for this op if it's in a forward loop context.""" if before and IsLoopExit(op): forward_ctxt = op._get_control_flow_context() forward_ctxt = forward_ctxt.outer_context if forward_ctxt: forward_ctxt = forward_ctxt.GetWhileContext() else: forward_ctxt = _GetWhileContext(op) if forward_ctxt: return self._map.get(forward_ctxt) return None
[ "def", "GetGradState", "(", "self", ",", "op", ",", "before", ")", ":", "if", "before", "and", "IsLoopExit", "(", "op", ")", ":", "forward_ctxt", "=", "op", ".", "_get_control_flow_context", "(", ")", "forward_ctxt", "=", "forward_ctxt", ".", "outer_context", "if", "forward_ctxt", ":", "forward_ctxt", "=", "forward_ctxt", ".", "GetWhileContext", "(", ")", "else", ":", "forward_ctxt", "=", "_GetWhileContext", "(", "op", ")", "if", "forward_ctxt", ":", "return", "self", ".", "_map", ".", "get", "(", "forward_ctxt", ")", "return", "None" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/ops/control_flow_ops.py#L1018-L1029
NeoGeographyToolkit/StereoPipeline
eedf54a919fb5cce1ab0e280bb0df4050763aa11
src/asp/IceBridge/icebridge_common.py
python
makeJpegFileName
(run, frame)
return ('%s_%05d.JPG' % (run.yyyy_mm_dd(), frame))
Generate a file name like: 2016_07_19_00015.JPG
Generate a file name like: 2016_07_19_00015.JPG
[ "Generate", "a", "file", "name", "like", ":", "2016_07_19_00015", ".", "JPG" ]
def makeJpegFileName(run, frame): '''Generate a file name like: 2016_07_19_00015.JPG''' return ('%s_%05d.JPG' % (run.yyyy_mm_dd(), frame))
[ "def", "makeJpegFileName", "(", "run", ",", "frame", ")", ":", "return", "(", "'%s_%05d.JPG'", "%", "(", "run", ".", "yyyy_mm_dd", "(", ")", ",", "frame", ")", ")" ]
https://github.com/NeoGeographyToolkit/StereoPipeline/blob/eedf54a919fb5cce1ab0e280bb0df4050763aa11/src/asp/IceBridge/icebridge_common.py#L221-L223
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py3/scipy/stats/_tukeylambda_stats.py
python
tukeylambda_variance
(lam)
return v
Variance of the Tukey Lambda distribution. Parameters ---------- lam : array_like The lambda values at which to compute the variance. Returns ------- v : ndarray The variance. For lam < -0.5, the variance is not defined, so np.nan is returned. For lam = 0.5, np.inf is returned. Notes ----- In an interval around lambda=0, this function uses the [4,4] Pade approximation to compute the variance. Otherwise it uses the standard formula (https://en.wikipedia.org/wiki/Tukey_lambda_distribution). The Pade approximation is used because the standard formula has a removable discontinuity at lambda = 0, and does not produce accurate numerical results near lambda = 0.
Variance of the Tukey Lambda distribution.
[ "Variance", "of", "the", "Tukey", "Lambda", "distribution", "." ]
def tukeylambda_variance(lam): """Variance of the Tukey Lambda distribution. Parameters ---------- lam : array_like The lambda values at which to compute the variance. Returns ------- v : ndarray The variance. For lam < -0.5, the variance is not defined, so np.nan is returned. For lam = 0.5, np.inf is returned. Notes ----- In an interval around lambda=0, this function uses the [4,4] Pade approximation to compute the variance. Otherwise it uses the standard formula (https://en.wikipedia.org/wiki/Tukey_lambda_distribution). The Pade approximation is used because the standard formula has a removable discontinuity at lambda = 0, and does not produce accurate numerical results near lambda = 0. """ lam = np.asarray(lam) shp = lam.shape lam = np.atleast_1d(lam).astype(np.float64) # For absolute values of lam less than threshold, use the Pade # approximation. threshold = 0.075 # Play games with masks to implement the conditional evaluation of # the distribution. # lambda < -0.5: var = nan low_mask = lam < -0.5 # lambda == -0.5: var = inf neghalf_mask = lam == -0.5 # abs(lambda) < threshold: use Pade approximation small_mask = np.abs(lam) < threshold # else the "regular" case: use the explicit formula. reg_mask = ~(low_mask | neghalf_mask | small_mask) # Get the 'lam' values for the cases where they are needed. small = lam[small_mask] reg = lam[reg_mask] # Compute the function for each case. v = np.empty_like(lam) v[low_mask] = np.nan v[neghalf_mask] = np.inf if small.size > 0: # Use the Pade approximation near lambda = 0. v[small_mask] = _tukeylambda_var_p(small) / _tukeylambda_var_q(small) if reg.size > 0: v[reg_mask] = (2.0 / reg**2) * (1.0 / (1.0 + 2 * reg) - beta(reg + 1, reg + 1)) v.shape = shp return v
[ "def", "tukeylambda_variance", "(", "lam", ")", ":", "lam", "=", "np", ".", "asarray", "(", "lam", ")", "shp", "=", "lam", ".", "shape", "lam", "=", "np", ".", "atleast_1d", "(", "lam", ")", ".", "astype", "(", "np", ".", "float64", ")", "# For absolute values of lam less than threshold, use the Pade", "# approximation.", "threshold", "=", "0.075", "# Play games with masks to implement the conditional evaluation of", "# the distribution.", "# lambda < -0.5: var = nan", "low_mask", "=", "lam", "<", "-", "0.5", "# lambda == -0.5: var = inf", "neghalf_mask", "=", "lam", "==", "-", "0.5", "# abs(lambda) < threshold: use Pade approximation", "small_mask", "=", "np", ".", "abs", "(", "lam", ")", "<", "threshold", "# else the \"regular\" case: use the explicit formula.", "reg_mask", "=", "~", "(", "low_mask", "|", "neghalf_mask", "|", "small_mask", ")", "# Get the 'lam' values for the cases where they are needed.", "small", "=", "lam", "[", "small_mask", "]", "reg", "=", "lam", "[", "reg_mask", "]", "# Compute the function for each case.", "v", "=", "np", ".", "empty_like", "(", "lam", ")", "v", "[", "low_mask", "]", "=", "np", ".", "nan", "v", "[", "neghalf_mask", "]", "=", "np", ".", "inf", "if", "small", ".", "size", ">", "0", ":", "# Use the Pade approximation near lambda = 0.", "v", "[", "small_mask", "]", "=", "_tukeylambda_var_p", "(", "small", ")", "/", "_tukeylambda_var_q", "(", "small", ")", "if", "reg", ".", "size", ">", "0", ":", "v", "[", "reg_mask", "]", "=", "(", "2.0", "/", "reg", "**", "2", ")", "*", "(", "1.0", "/", "(", "1.0", "+", "2", "*", "reg", ")", "-", "beta", "(", "reg", "+", "1", ",", "reg", "+", "1", ")", ")", "v", ".", "shape", "=", "shp", "return", "v" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/stats/_tukeylambda_stats.py#L45-L102
omnisci/omniscidb
b9c95f1bd602b4ffc8b0edf18bfad61031e08d86
python/omnisci/thrift/OmniSci.py
python
Iface.get_databases
(self, session)
Parameters: - session
Parameters: - session
[ "Parameters", ":", "-", "session" ]
def get_databases(self, session): """ Parameters: - session """ pass
[ "def", "get_databases", "(", "self", ",", "session", ")", ":", "pass" ]
https://github.com/omnisci/omniscidb/blob/b9c95f1bd602b4ffc8b0edf18bfad61031e08d86/python/omnisci/thrift/OmniSci.py#L177-L183
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/urllib3/poolmanager.py
python
PoolManager.urlopen
(self, method, url, redirect=True, **kw)
return self.urlopen(method, redirect_location, **kw)
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``.
[ "Same", "as", ":", "meth", ":", "urllib3", ".", "connectionpool", ".", "HTTPConnectionPool", ".", "urlopen", "with", "custom", "cross", "-", "host", "redirect", "logic", "and", "only", "sends", "the", "request", "-", "uri", "portion", "of", "the", "url", "." ]
def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw["assert_same_host"] = False kw["redirect"] = False if "headers" not in kw: kw["headers"] = self.headers.copy() if self.proxy is not None and u.scheme == "http": response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) # RFC 7231, Section 6.4.4 if response.status == 303: method = "GET" retries = kw.get("retries") if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) # Strip headers marked as unsafe to forward to the redirected location. # Check remove_headers_on_redirect to avoid a potential network call within # conn.is_same_host() which may use socket.gethostbyname() in the future. if retries.remove_headers_on_redirect and not conn.is_same_host( redirect_location ): headers = list(six.iterkeys(kw["headers"])) for header in headers: if header.lower() in retries.remove_headers_on_redirect: kw["headers"].pop(header, None) try: retries = retries.increment(method, url, response=response, _pool=conn) except MaxRetryError: if retries.raise_on_redirect: raise return response kw["retries"] = retries kw["redirect"] = redirect log.info("Redirecting %s -> %s", url, redirect_location) return self.urlopen(method, redirect_location, **kw)
[ "def", "urlopen", "(", "self", ",", "method", ",", "url", ",", "redirect", "=", "True", ",", "*", "*", "kw", ")", ":", "u", "=", "parse_url", "(", "url", ")", "conn", "=", "self", ".", "connection_from_host", "(", "u", ".", "host", ",", "port", "=", "u", ".", "port", ",", "scheme", "=", "u", ".", "scheme", ")", "kw", "[", "\"assert_same_host\"", "]", "=", "False", "kw", "[", "\"redirect\"", "]", "=", "False", "if", "\"headers\"", "not", "in", "kw", ":", "kw", "[", "\"headers\"", "]", "=", "self", ".", "headers", ".", "copy", "(", ")", "if", "self", ".", "proxy", "is", "not", "None", "and", "u", ".", "scheme", "==", "\"http\"", ":", "response", "=", "conn", ".", "urlopen", "(", "method", ",", "url", ",", "*", "*", "kw", ")", "else", ":", "response", "=", "conn", ".", "urlopen", "(", "method", ",", "u", ".", "request_uri", ",", "*", "*", "kw", ")", "redirect_location", "=", "redirect", "and", "response", ".", "get_redirect_location", "(", ")", "if", "not", "redirect_location", ":", "return", "response", "# Support relative URLs for redirecting.", "redirect_location", "=", "urljoin", "(", "url", ",", "redirect_location", ")", "# RFC 7231, Section 6.4.4", "if", "response", ".", "status", "==", "303", ":", "method", "=", "\"GET\"", "retries", "=", "kw", ".", "get", "(", "\"retries\"", ")", "if", "not", "isinstance", "(", "retries", ",", "Retry", ")", ":", "retries", "=", "Retry", ".", "from_int", "(", "retries", ",", "redirect", "=", "redirect", ")", "# Strip headers marked as unsafe to forward to the redirected location.", "# Check remove_headers_on_redirect to avoid a potential network call within", "# conn.is_same_host() which may use socket.gethostbyname() in the future.", "if", "retries", ".", "remove_headers_on_redirect", "and", "not", "conn", ".", "is_same_host", "(", "redirect_location", ")", ":", "headers", "=", "list", "(", "six", ".", "iterkeys", "(", "kw", "[", "\"headers\"", "]", ")", ")", "for", "header", "in", "headers", ":", "if", "header", ".", "lower", "(", ")", "in", "retries", ".", "remove_headers_on_redirect", ":", "kw", "[", "\"headers\"", "]", ".", "pop", "(", "header", ",", "None", ")", "try", ":", "retries", "=", "retries", ".", "increment", "(", "method", ",", "url", ",", "response", "=", "response", ",", "_pool", "=", "conn", ")", "except", "MaxRetryError", ":", "if", "retries", ".", "raise_on_redirect", ":", "raise", "return", "response", "kw", "[", "\"retries\"", "]", "=", "retries", "kw", "[", "\"redirect\"", "]", "=", "redirect", "log", ".", "info", "(", "\"Redirecting %s -> %s\"", ",", "url", ",", "redirect_location", ")", "return", "self", ".", "urlopen", "(", "method", ",", "redirect_location", ",", "*", "*", "kw", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/urllib3/poolmanager.py#L309-L369
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
tools/perf/metrics/__init__.py
python
Metric.CustomizeBrowserOptions
(cls, options)
Add browser options that are required by this metric. Some metrics do not have any special browser options that need to be added, and they do not need to override this method; by default, no browser options are added. To add options here, call options.AppendExtraBrowserArgs(arg).
Add browser options that are required by this metric.
[ "Add", "browser", "options", "that", "are", "required", "by", "this", "metric", "." ]
def CustomizeBrowserOptions(cls, options): """Add browser options that are required by this metric. Some metrics do not have any special browser options that need to be added, and they do not need to override this method; by default, no browser options are added. To add options here, call options.AppendExtraBrowserArgs(arg). """ pass
[ "def", "CustomizeBrowserOptions", "(", "cls", ",", "options", ")", ":", "pass" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/perf/metrics/__init__.py#L15-L24
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/strings.py
python
str_translate
(arr, table)
return _na_map(lambda x: x.translate(table), arr, dtype=str)
Map all characters in the string through the given mapping table. Equivalent to standard :meth:`str.translate`. Parameters ---------- table : dict Table is a mapping of Unicode ordinals to Unicode ordinals, strings, or None. Unmapped characters are left untouched. Characters mapped to None are deleted. :meth:`str.maketrans` is a helper function for making translation tables. Returns ------- Series or Index
Map all characters in the string through the given mapping table. Equivalent to standard :meth:`str.translate`.
[ "Map", "all", "characters", "in", "the", "string", "through", "the", "given", "mapping", "table", ".", "Equivalent", "to", "standard", ":", "meth", ":", "str", ".", "translate", "." ]
def str_translate(arr, table): """ Map all characters in the string through the given mapping table. Equivalent to standard :meth:`str.translate`. Parameters ---------- table : dict Table is a mapping of Unicode ordinals to Unicode ordinals, strings, or None. Unmapped characters are left untouched. Characters mapped to None are deleted. :meth:`str.maketrans` is a helper function for making translation tables. Returns ------- Series or Index """ return _na_map(lambda x: x.translate(table), arr, dtype=str)
[ "def", "str_translate", "(", "arr", ",", "table", ")", ":", "return", "_na_map", "(", "lambda", "x", ":", "x", ".", "translate", "(", "table", ")", ",", "arr", ",", "dtype", "=", "str", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/strings.py#L1765-L1782
lhmRyan/deep-supervised-hashing-DSH
631901f82e2ab031fbac33f914a5b08ef8e21d57
scripts/cpp_lint.py
python
IsErrorSuppressedByNolint
(category, linenum)
return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment.
Returns true if the specified error category is suppressed on this line.
[ "Returns", "true", "if", "the", "specified", "error", "category", "is", "suppressed", "on", "this", "line", "." ]
def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment. """ return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
[ "def", "IsErrorSuppressedByNolint", "(", "category", ",", "linenum", ")", ":", "return", "(", "linenum", "in", "_error_suppressions", ".", "get", "(", "category", ",", "set", "(", ")", ")", "or", "linenum", "in", "_error_suppressions", ".", "get", "(", "None", ",", "set", "(", ")", ")", ")" ]
https://github.com/lhmRyan/deep-supervised-hashing-DSH/blob/631901f82e2ab031fbac33f914a5b08ef8e21d57/scripts/cpp_lint.py#L500-L513
OSGeo/gdal
3748fc4ba4fba727492774b2b908a2130c864a83
swig/python/gdal-utils/osgeo_utils/auxiliary/color_palette.py
python
ColorPalette.read_file_txt
(self, filename: Optional[PathLikeOrStr] = None, lines: Optional[Sequence[str]] = None)
Read GDAL Text-based color configuration file
Read GDAL Text-based color configuration file
[ "Read", "GDAL", "Text", "-", "based", "color", "configuration", "file" ]
def read_file_txt(self, filename: Optional[PathLikeOrStr] = None, lines: Optional[Sequence[str]] = None): """ Read GDAL Text-based color configuration file """ if filename is not None: lines = open(filename).readlines() if not isinstance(lines, Sequence): raise Exception('unknown input {}'.format(lines)) self.pal.clear() for line in lines: split_line = line.strip().split(' ', 1) if len(split_line) < 2: continue try: color = self.pal_color_to_rgb(split_line[1]) key = split_line[0].strip() except: raise Exception('Error reading palette line: {}'.format(line)) try: key = base.num(key) except ValueError: if key.lower() in self.ndv_keys: self.ndv = color continue else: # maybe percent self._all_numeric = False self.pal[key] = color
[ "def", "read_file_txt", "(", "self", ",", "filename", ":", "Optional", "[", "PathLikeOrStr", "]", "=", "None", ",", "lines", ":", "Optional", "[", "Sequence", "[", "str", "]", "]", "=", "None", ")", ":", "if", "filename", "is", "not", "None", ":", "lines", "=", "open", "(", "filename", ")", ".", "readlines", "(", ")", "if", "not", "isinstance", "(", "lines", ",", "Sequence", ")", ":", "raise", "Exception", "(", "'unknown input {}'", ".", "format", "(", "lines", ")", ")", "self", ".", "pal", ".", "clear", "(", ")", "for", "line", "in", "lines", ":", "split_line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "' '", ",", "1", ")", "if", "len", "(", "split_line", ")", "<", "2", ":", "continue", "try", ":", "color", "=", "self", ".", "pal_color_to_rgb", "(", "split_line", "[", "1", "]", ")", "key", "=", "split_line", "[", "0", "]", ".", "strip", "(", ")", "except", ":", "raise", "Exception", "(", "'Error reading palette line: {}'", ".", "format", "(", "line", ")", ")", "try", ":", "key", "=", "base", ".", "num", "(", "key", ")", "except", "ValueError", ":", "if", "key", ".", "lower", "(", ")", "in", "self", ".", "ndv_keys", ":", "self", ".", "ndv", "=", "color", "continue", "else", ":", "# maybe percent", "self", ".", "_all_numeric", "=", "False", "self", ".", "pal", "[", "key", "]", "=", "color" ]
https://github.com/OSGeo/gdal/blob/3748fc4ba4fba727492774b2b908a2130c864a83/swig/python/gdal-utils/osgeo_utils/auxiliary/color_palette.py#L223-L249
tensorflow/minigo
6d89c202cdceaf449aefc3149ab2110d44f1a6a4
oneoffs/joseki/opening_freqs_export.py
python
create_top_report
(top_n=100)
Creates an html page showing the most common sequences in the database, and charting their popularity over time.
Creates an html page showing the most common sequences in the database, and charting their popularity over time.
[ "Creates", "an", "html", "page", "showing", "the", "most", "common", "sequences", "in", "the", "database", "and", "charting", "their", "popularity", "over", "time", "." ]
def create_top_report(top_n=100): """ Creates an html page showing the most common sequences in the database, and charting their popularity over time. """ db = sqlite3.connect(FLAGS.db_path) ts = lambda hr: int(dt.datetime.strptime(hr, "%Y-%m-%d-%H").timestamp()) print('querying') ranges = openings.run_time_ranges(db) interps = openings.build_run_time_transformers(ranges) seqs_by_run = top_seqs_by_run(db, top_n) runs = sorted(seqs_by_run.keys()) cols = [] cols.append({'id': 'time', 'label': '% of Training', 'type': 'number'}) for run in runs: cols.append({'id': run + 'count', 'label': run + ' times seen', 'type': 'number'}) for run in runs: data = [] sequences = seqs_by_run[run] for seq, count in sequences: print(run, seq, count) rows = collections.defaultdict(lambda: [0 for i in range(len(runs))]) for idx, r in enumerate(runs): cur = db.execute(''' SELECT hour, count from joseki_counts where seq = ? and run = ?; ''', (seq, r)) for hr, ct in cur.fetchall(): key = interps[r](ts(hr)) rows[key][idx] = ct row_data = [ {'c': [ {'v': key} ] + [{'v': v if v else None} for v in value ] } for key,value in rows.items()] obj = {'run': run, "count": count, 'cols': cols, "rows": row_data, "sequence": seq} data.append(obj) print('saving') tmpl = Template(open('oneoffs/joseki.html').read()) with open(run + FLAGS.out_file, 'w') as out: out.write(tmpl.render(giant_blob=json.dumps(data), run=run, time_ranges=json.dumps(ranges)))
[ "def", "create_top_report", "(", "top_n", "=", "100", ")", ":", "db", "=", "sqlite3", ".", "connect", "(", "FLAGS", ".", "db_path", ")", "ts", "=", "lambda", "hr", ":", "int", "(", "dt", ".", "datetime", ".", "strptime", "(", "hr", ",", "\"%Y-%m-%d-%H\"", ")", ".", "timestamp", "(", ")", ")", "print", "(", "'querying'", ")", "ranges", "=", "openings", ".", "run_time_ranges", "(", "db", ")", "interps", "=", "openings", ".", "build_run_time_transformers", "(", "ranges", ")", "seqs_by_run", "=", "top_seqs_by_run", "(", "db", ",", "top_n", ")", "runs", "=", "sorted", "(", "seqs_by_run", ".", "keys", "(", ")", ")", "cols", "=", "[", "]", "cols", ".", "append", "(", "{", "'id'", ":", "'time'", ",", "'label'", ":", "'% of Training'", ",", "'type'", ":", "'number'", "}", ")", "for", "run", "in", "runs", ":", "cols", ".", "append", "(", "{", "'id'", ":", "run", "+", "'count'", ",", "'label'", ":", "run", "+", "' times seen'", ",", "'type'", ":", "'number'", "}", ")", "for", "run", "in", "runs", ":", "data", "=", "[", "]", "sequences", "=", "seqs_by_run", "[", "run", "]", "for", "seq", ",", "count", "in", "sequences", ":", "print", "(", "run", ",", "seq", ",", "count", ")", "rows", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "[", "0", "for", "i", "in", "range", "(", "len", "(", "runs", ")", ")", "]", ")", "for", "idx", ",", "r", "in", "enumerate", "(", "runs", ")", ":", "cur", "=", "db", ".", "execute", "(", "'''\n SELECT hour, count from joseki_counts where seq = ? and run = ?;\n '''", ",", "(", "seq", ",", "r", ")", ")", "for", "hr", ",", "ct", "in", "cur", ".", "fetchall", "(", ")", ":", "key", "=", "interps", "[", "r", "]", "(", "ts", "(", "hr", ")", ")", "rows", "[", "key", "]", "[", "idx", "]", "=", "ct", "row_data", "=", "[", "{", "'c'", ":", "[", "{", "'v'", ":", "key", "}", "]", "+", "[", "{", "'v'", ":", "v", "if", "v", "else", "None", "}", "for", "v", "in", "value", "]", "}", "for", "key", ",", "value", "in", "rows", ".", "items", "(", ")", "]", "obj", "=", "{", "'run'", ":", "run", ",", "\"count\"", ":", "count", ",", "'cols'", ":", "cols", ",", "\"rows\"", ":", "row_data", ",", "\"sequence\"", ":", "seq", "}", "data", ".", "append", "(", "obj", ")", "print", "(", "'saving'", ")", "tmpl", "=", "Template", "(", "open", "(", "'oneoffs/joseki.html'", ")", ".", "read", "(", ")", ")", "with", "open", "(", "run", "+", "FLAGS", ".", "out_file", ",", "'w'", ")", "as", "out", ":", "out", ".", "write", "(", "tmpl", ".", "render", "(", "giant_blob", "=", "json", ".", "dumps", "(", "data", ")", ",", "run", "=", "run", ",", "time_ranges", "=", "json", ".", "dumps", "(", "ranges", ")", ")", ")" ]
https://github.com/tensorflow/minigo/blob/6d89c202cdceaf449aefc3149ab2110d44f1a6a4/oneoffs/joseki/opening_freqs_export.py#L231-L273
jbehley/point_labeler
bf22e6f255fe5c9f01979d2d670d0ac543ae6460
scripts/repair_labels.py
python
read_labels
(filename)
return arr
read labels from given file.
read labels from given file.
[ "read", "labels", "from", "given", "file", "." ]
def read_labels(filename): """ read labels from given file. """ contents = bytes() with open(filename, "rb") as f: f.seek(0, 2) # move the cursor to the end of the file num_points = int(f.tell() / 4) f.seek(0, 0) contents = f.read() arr = [struct.unpack('<I', contents[4 * i:4 * i + 4])[0] for i in range(num_points)] return arr
[ "def", "read_labels", "(", "filename", ")", ":", "contents", "=", "bytes", "(", ")", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "f", ".", "seek", "(", "0", ",", "2", ")", "# move the cursor to the end of the file", "num_points", "=", "int", "(", "f", ".", "tell", "(", ")", "/", "4", ")", "f", ".", "seek", "(", "0", ",", "0", ")", "contents", "=", "f", ".", "read", "(", ")", "arr", "=", "[", "struct", ".", "unpack", "(", "'<I'", ",", "contents", "[", "4", "*", "i", ":", "4", "*", "i", "+", "4", "]", ")", "[", "0", "]", "for", "i", "in", "range", "(", "num_points", ")", "]", "return", "arr" ]
https://github.com/jbehley/point_labeler/blob/bf22e6f255fe5c9f01979d2d670d0ac543ae6460/scripts/repair_labels.py#L7-L18
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/imghdr.py
python
test_xbm
(h, f)
X bitmap (X10 or X11)
X bitmap (X10 or X11)
[ "X", "bitmap", "(", "X10", "or", "X11", ")" ]
def test_xbm(h, f): """X bitmap (X10 or X11)""" s = '#define ' if h[:len(s)] == s: return 'xbm'
[ "def", "test_xbm", "(", "h", ",", "f", ")", ":", "s", "=", "'#define '", "if", "h", "[", ":", "len", "(", "s", ")", "]", "==", "s", ":", "return", "'xbm'" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/imghdr.py#L109-L113
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/base.py
python
SelectionMixin._get_cython_func
(self, arg: str)
return self._cython_table.get(arg)
if we define an internal function for this argument, return it
if we define an internal function for this argument, return it
[ "if", "we", "define", "an", "internal", "function", "for", "this", "argument", "return", "it" ]
def _get_cython_func(self, arg: str) -> Optional[str]: """ if we define an internal function for this argument, return it """ return self._cython_table.get(arg)
[ "def", "_get_cython_func", "(", "self", ",", "arg", ":", "str", ")", "->", "Optional", "[", "str", "]", ":", "return", "self", ".", "_cython_table", ".", "get", "(", "arg", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/base.py#L558-L562
bundy-dns/bundy
3d41934996b82b0cd2fe22dd74d2abc1daba835d
src/lib/python/bundy/config/cfgmgr.py
python
ConfigManager.handle_msg
(self, msg)
return answer
Handle a command from the cc channel to the configuration manager
Handle a command from the cc channel to the configuration manager
[ "Handle", "a", "command", "from", "the", "cc", "channel", "to", "the", "configuration", "manager" ]
def handle_msg(self, msg): """Handle a command from the cc channel to the configuration manager""" answer = {} cmd, arg = ccsession.parse_command(msg) if cmd: if cmd == ccsession.COMMAND_GET_COMMANDS_SPEC: answer = ccsession.create_answer(0, self.get_commands_spec()) elif cmd == ccsession.COMMAND_GET_STATISTICS_SPEC: answer = ccsession.create_answer(0, self.get_statistics_spec()) elif cmd == ccsession.COMMAND_GET_MODULE_SPEC: answer = self.__handle_get_module_spec(arg) elif cmd == ccsession.COMMAND_GET_CONFIG: answer = self.__handle_get_config(arg) elif cmd == ccsession.COMMAND_SET_CONFIG: answer = self.__handle_set_config(arg) elif cmd == ccsession.COMMAND_MODULE_STOPPING: answer = self.__handle_module_stopping(arg) elif cmd == ccsession.COMMAND_SHUTDOWN: self.running = False answer = ccsession.create_answer(0) elif cmd == ccsession.COMMAND_MODULE_SPEC: try: answer = self.__handle_module_spec(bundy.config.ModuleSpec(arg)) except bundy.config.ModuleSpecError as dde: answer = ccsession.create_answer(1, "Error in data definition: " + str(dde)) else: answer = ccsession.create_answer(1, "Unknown command: " + str(cmd)) else: answer = ccsession.create_answer(1, "Unknown message format: " + str(msg)) return answer
[ "def", "handle_msg", "(", "self", ",", "msg", ")", ":", "answer", "=", "{", "}", "cmd", ",", "arg", "=", "ccsession", ".", "parse_command", "(", "msg", ")", "if", "cmd", ":", "if", "cmd", "==", "ccsession", ".", "COMMAND_GET_COMMANDS_SPEC", ":", "answer", "=", "ccsession", ".", "create_answer", "(", "0", ",", "self", ".", "get_commands_spec", "(", ")", ")", "elif", "cmd", "==", "ccsession", ".", "COMMAND_GET_STATISTICS_SPEC", ":", "answer", "=", "ccsession", ".", "create_answer", "(", "0", ",", "self", ".", "get_statistics_spec", "(", ")", ")", "elif", "cmd", "==", "ccsession", ".", "COMMAND_GET_MODULE_SPEC", ":", "answer", "=", "self", ".", "__handle_get_module_spec", "(", "arg", ")", "elif", "cmd", "==", "ccsession", ".", "COMMAND_GET_CONFIG", ":", "answer", "=", "self", ".", "__handle_get_config", "(", "arg", ")", "elif", "cmd", "==", "ccsession", ".", "COMMAND_SET_CONFIG", ":", "answer", "=", "self", ".", "__handle_set_config", "(", "arg", ")", "elif", "cmd", "==", "ccsession", ".", "COMMAND_MODULE_STOPPING", ":", "answer", "=", "self", ".", "__handle_module_stopping", "(", "arg", ")", "elif", "cmd", "==", "ccsession", ".", "COMMAND_SHUTDOWN", ":", "self", ".", "running", "=", "False", "answer", "=", "ccsession", ".", "create_answer", "(", "0", ")", "elif", "cmd", "==", "ccsession", ".", "COMMAND_MODULE_SPEC", ":", "try", ":", "answer", "=", "self", ".", "__handle_module_spec", "(", "bundy", ".", "config", ".", "ModuleSpec", "(", "arg", ")", ")", "except", "bundy", ".", "config", ".", "ModuleSpecError", "as", "dde", ":", "answer", "=", "ccsession", ".", "create_answer", "(", "1", ",", "\"Error in data definition: \"", "+", "str", "(", "dde", ")", ")", "else", ":", "answer", "=", "ccsession", ".", "create_answer", "(", "1", ",", "\"Unknown command: \"", "+", "str", "(", "cmd", ")", ")", "else", ":", "answer", "=", "ccsession", ".", "create_answer", "(", "1", ",", "\"Unknown message format: \"", "+", "str", "(", "msg", ")", ")", "return", "answer" ]
https://github.com/bundy-dns/bundy/blob/3d41934996b82b0cd2fe22dd74d2abc1daba835d/src/lib/python/bundy/config/cfgmgr.py#L587-L616
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/plat-mac/lib-scriptpackages/CodeWarrior/Standard_Suite.py
python
Standard_Suite_Events.set
(self, _object, _attributes={}, **_arguments)
set: set an object's data Required argument: the object to change Keyword argument to: the new value Keyword argument _attributes: AppleEvent attribute dictionary
set: set an object's data Required argument: the object to change Keyword argument to: the new value Keyword argument _attributes: AppleEvent attribute dictionary
[ "set", ":", "set", "an", "object", "s", "data", "Required", "argument", ":", "the", "object", "to", "change", "Keyword", "argument", "to", ":", "the", "new", "value", "Keyword", "argument", "_attributes", ":", "AppleEvent", "attribute", "dictionary" ]
def set(self, _object, _attributes={}, **_arguments): """set: set an object's data Required argument: the object to change Keyword argument to: the new value Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'core' _subcode = 'setd' aetools.keysubst(_arguments, self._argmap_set) _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.get('errn', 0): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----']
[ "def", "set", "(", "self", ",", "_object", ",", "_attributes", "=", "{", "}", ",", "*", "*", "_arguments", ")", ":", "_code", "=", "'core'", "_subcode", "=", "'setd'", "aetools", ".", "keysubst", "(", "_arguments", ",", "self", ".", "_argmap_set", ")", "_arguments", "[", "'----'", "]", "=", "_object", "_reply", ",", "_arguments", ",", "_attributes", "=", "self", ".", "send", "(", "_code", ",", "_subcode", ",", "_arguments", ",", "_attributes", ")", "if", "_arguments", ".", "get", "(", "'errn'", ",", "0", ")", ":", "raise", "aetools", ".", "Error", ",", "aetools", ".", "decodeerror", "(", "_arguments", ")", "# XXXX Optionally decode result", "if", "_arguments", ".", "has_key", "(", "'----'", ")", ":", "return", "_arguments", "[", "'----'", "]" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/plat-mac/lib-scriptpackages/CodeWarrior/Standard_Suite.py#L153-L172
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/training/optimizer.py
python
_deduplicate_indexed_slices
(values, indices)
return (summed_values, unique_indices)
Sums `values` associated with any non-unique `indices`. Args: values: A `Tensor` with rank >= 1. indices: A one-dimensional integer `Tensor`, indexing into the first dimension of `values` (as in an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of `indices` and `summed_values` contains the sum of `values` slices associated with each unique index.
Sums `values` associated with any non-unique `indices`.
[ "Sums", "values", "associated", "with", "any", "non", "-", "unique", "indices", "." ]
def _deduplicate_indexed_slices(values, indices): """Sums `values` associated with any non-unique `indices`. Args: values: A `Tensor` with rank >= 1. indices: A one-dimensional integer `Tensor`, indexing into the first dimension of `values` (as in an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of `indices` and `summed_values` contains the sum of `values` slices associated with each unique index. """ unique_indices, new_index_positions = array_ops.unique(indices) summed_values = math_ops.unsorted_segment_sum( values, new_index_positions, array_ops.shape(unique_indices)[0]) return (summed_values, unique_indices)
[ "def", "_deduplicate_indexed_slices", "(", "values", ",", "indices", ")", ":", "unique_indices", ",", "new_index_positions", "=", "array_ops", ".", "unique", "(", "indices", ")", "summed_values", "=", "math_ops", ".", "unsorted_segment_sum", "(", "values", ",", "new_index_positions", ",", "array_ops", ".", "shape", "(", "unique_indices", ")", "[", "0", "]", ")", "return", "(", "summed_values", ",", "unique_indices", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/training/optimizer.py#L49-L65
PixarAnimationStudios/USD
faed18ce62c8736b02413635b584a2f637156bad
pxr/usd/usdUtils/complianceChecker.py
python
BaseRuleChecker.ResetCaches
(self)
Reset any caches the rule owns. Called whenever stage authoring occurs, such as when we iterate through VariantSet combinations.
Reset any caches the rule owns. Called whenever stage authoring occurs, such as when we iterate through VariantSet combinations.
[ "Reset", "any", "caches", "the", "rule", "owns", ".", "Called", "whenever", "stage", "authoring", "occurs", "such", "as", "when", "we", "iterate", "through", "VariantSet", "combinations", "." ]
def ResetCaches(self): """ Reset any caches the rule owns. Called whenever stage authoring occurs, such as when we iterate through VariantSet combinations. """ pass
[ "def", "ResetCaches", "(", "self", ")", ":", "pass" ]
https://github.com/PixarAnimationStudios/USD/blob/faed18ce62c8736b02413635b584a2f637156bad/pxr/usd/usdUtils/complianceChecker.py#L131-L135
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/stc.py
python
StyledTextCtrl.IndicatorSetStyle
(*args, **kwargs)
return _stc.StyledTextCtrl_IndicatorSetStyle(*args, **kwargs)
IndicatorSetStyle(self, int indic, int style) Set an indicator to plain, squiggle or TT.
IndicatorSetStyle(self, int indic, int style)
[ "IndicatorSetStyle", "(", "self", "int", "indic", "int", "style", ")" ]
def IndicatorSetStyle(*args, **kwargs): """ IndicatorSetStyle(self, int indic, int style) Set an indicator to plain, squiggle or TT. """ return _stc.StyledTextCtrl_IndicatorSetStyle(*args, **kwargs)
[ "def", "IndicatorSetStyle", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_stc", ".", "StyledTextCtrl_IndicatorSetStyle", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/stc.py#L2865-L2871
mblondel/svmlight-loader
e3d0a90676430e164486e6feffe0b8857532ee4e
svmlight_loader.py
python
load_svmlight_file
(file_path, n_features=None, dtype=None, buffer_mb=40, zero_based="auto")
return (X_train, labels)
Load datasets in the svmlight / libsvm format into sparse CSR matrix This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. This format is used as the default format for both svmlight and the libsvm command line programs. Parsing a text based source can be expensive. When working on repeatedly on the same dataset, it is recommended to wrap this loader with joblib.Memory.cache to store a memmapped backup of the CSR results of the first call and benefit from the near instantaneous loading of memmapped structures for the subsequent calls. Parameters ---------- f: str Path to a file to load. n_features: int or None The number of features to use. If None, it will be inferred. This argument is useful to load several files that are subsets of a bigger sliced dataset: each subset might not have example of every feature, hence the inferred shape might vary from one slice to another. Returns ------- (X, y) where X is a scipy.sparse matrix of shape (n_samples, n_features), y is a ndarray of shape (n_samples,).
Load datasets in the svmlight / libsvm format into sparse CSR matrix
[ "Load", "datasets", "in", "the", "svmlight", "/", "libsvm", "format", "into", "sparse", "CSR", "matrix" ]
def load_svmlight_file(file_path, n_features=None, dtype=None, buffer_mb=40, zero_based="auto"): """Load datasets in the svmlight / libsvm format into sparse CSR matrix This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. This format is used as the default format for both svmlight and the libsvm command line programs. Parsing a text based source can be expensive. When working on repeatedly on the same dataset, it is recommended to wrap this loader with joblib.Memory.cache to store a memmapped backup of the CSR results of the first call and benefit from the near instantaneous loading of memmapped structures for the subsequent calls. Parameters ---------- f: str Path to a file to load. n_features: int or None The number of features to use. If None, it will be inferred. This argument is useful to load several files that are subsets of a bigger sliced dataset: each subset might not have example of every feature, hence the inferred shape might vary from one slice to another. Returns ------- (X, y) where X is a scipy.sparse matrix of shape (n_samples, n_features), y is a ndarray of shape (n_samples,). """ data, indices, indptr, labels = _load_svmlight_file(file_path, buffer_mb) if zero_based is False or \ (zero_based == "auto" and np.min(indices) > 0): indices -= 1 if n_features is not None: shape = (indptr.shape[0] - 1, n_features) else: shape = None # inferred if dtype: data = np.array(data, dtype=dtype) X_train = sp.csr_matrix((data, indices, indptr), shape) return (X_train, labels)
[ "def", "load_svmlight_file", "(", "file_path", ",", "n_features", "=", "None", ",", "dtype", "=", "None", ",", "buffer_mb", "=", "40", ",", "zero_based", "=", "\"auto\"", ")", ":", "data", ",", "indices", ",", "indptr", ",", "labels", "=", "_load_svmlight_file", "(", "file_path", ",", "buffer_mb", ")", "if", "zero_based", "is", "False", "or", "(", "zero_based", "==", "\"auto\"", "and", "np", ".", "min", "(", "indices", ")", ">", "0", ")", ":", "indices", "-=", "1", "if", "n_features", "is", "not", "None", ":", "shape", "=", "(", "indptr", ".", "shape", "[", "0", "]", "-", "1", ",", "n_features", ")", "else", ":", "shape", "=", "None", "# inferred", "if", "dtype", ":", "data", "=", "np", ".", "array", "(", "data", ",", "dtype", "=", "dtype", ")", "X_train", "=", "sp", ".", "csr_matrix", "(", "(", "data", ",", "indices", ",", "indptr", ")", ",", "shape", ")", "return", "(", "X_train", ",", "labels", ")" ]
https://github.com/mblondel/svmlight-loader/blob/e3d0a90676430e164486e6feffe0b8857532ee4e/svmlight_loader.py#L17-L71
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/fluid/evaluator.py
python
Evaluator.reset
(self, executor, reset_program=None)
reset metric states at the begin of each pass/user specified batch Args: executor(Executor|ParallelExecutor): a executor for executing the reset_program reset_program(Program): a single Program for reset process
reset metric states at the begin of each pass/user specified batch
[ "reset", "metric", "states", "at", "the", "begin", "of", "each", "pass", "/", "user", "specified", "batch" ]
def reset(self, executor, reset_program=None): """ reset metric states at the begin of each pass/user specified batch Args: executor(Executor|ParallelExecutor): a executor for executing the reset_program reset_program(Program): a single Program for reset process """ if reset_program is None: reset_program = Program() with program_guard(main_program=reset_program): for var in self.states: assert isinstance(var, Variable) g_var = _clone_var_(reset_program.current_block(), var) layers.fill_constant( shape=g_var.shape, value=0.0, dtype=g_var.dtype, out=g_var) executor.run(reset_program)
[ "def", "reset", "(", "self", ",", "executor", ",", "reset_program", "=", "None", ")", ":", "if", "reset_program", "is", "None", ":", "reset_program", "=", "Program", "(", ")", "with", "program_guard", "(", "main_program", "=", "reset_program", ")", ":", "for", "var", "in", "self", ".", "states", ":", "assert", "isinstance", "(", "var", ",", "Variable", ")", "g_var", "=", "_clone_var_", "(", "reset_program", ".", "current_block", "(", ")", ",", "var", ")", "layers", ".", "fill_constant", "(", "shape", "=", "g_var", ".", "shape", ",", "value", "=", "0.0", ",", "dtype", "=", "g_var", ".", "dtype", ",", "out", "=", "g_var", ")", "executor", ".", "run", "(", "reset_program", ")" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/evaluator.py#L77-L95
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/masked/maskededit.py
python
MaskedEditMixin._SetKeycodeHandler
(self, keycode, func)
This function adds and/or replaces key event handling functions used by the control. <func> should take the event as argument and return False if no further action on the key is necessary.
This function adds and/or replaces key event handling functions used by the control. <func> should take the event as argument and return False if no further action on the key is necessary.
[ "This", "function", "adds", "and", "/", "or", "replaces", "key", "event", "handling", "functions", "used", "by", "the", "control", ".", "<func", ">", "should", "take", "the", "event", "as", "argument", "and", "return", "False", "if", "no", "further", "action", "on", "the", "key", "is", "necessary", "." ]
def _SetKeycodeHandler(self, keycode, func): """ This function adds and/or replaces key event handling functions used by the control. <func> should take the event as argument and return False if no further action on the key is necessary. """ if func: self._keyhandlers[keycode] = func elif self._keyhandlers.has_key(keycode): del self._keyhandlers[keycode]
[ "def", "_SetKeycodeHandler", "(", "self", ",", "keycode", ",", "func", ")", ":", "if", "func", ":", "self", ".", "_keyhandlers", "[", "keycode", "]", "=", "func", "elif", "self", ".", "_keyhandlers", ".", "has_key", "(", "keycode", ")", ":", "del", "self", ".", "_keyhandlers", "[", "keycode", "]" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/masked/maskededit.py#L2183-L2192
lballabio/quantlib-old
136336947ed4fea9ecc1da6edad188700e821739
gensrc/gensrc/functions/function.py
python
Function.serialize
(self, serializer)
Load/unload class state to/from serializer object.
Load/unload class state to/from serializer object.
[ "Load", "/", "unload", "class", "state", "to", "/", "from", "serializer", "object", "." ]
def serialize(self, serializer): """Load/unload class state to/from serializer object.""" serializer.serializeAttribute(self, common.NAME) serializer.serializeProperty(self, common.DESCRIPTION) serializer.serializeProperty(self, common.LONG_DESC, self.description_) serializer.serializeObjectDict(self, supportedplatform.SupportedPlatform) serializer.serializeProperty(self, common.ALIAS, environment.config().namespaceObjects() + '::' + self.name_) serializer.serializeObject(self, parameterlist.ParameterList) serializer.serializeBoolean(self, common.DOCUMENTATION_ONLY) serializer.serializeAttributeBoolean(self, common.DEPENDENCY_TRIGGER, True) serializer.serializeAttributeBoolean(self, 'visible', True)
[ "def", "serialize", "(", "self", ",", "serializer", ")", ":", "serializer", ".", "serializeAttribute", "(", "self", ",", "common", ".", "NAME", ")", "serializer", ".", "serializeProperty", "(", "self", ",", "common", ".", "DESCRIPTION", ")", "serializer", ".", "serializeProperty", "(", "self", ",", "common", ".", "LONG_DESC", ",", "self", ".", "description_", ")", "serializer", ".", "serializeObjectDict", "(", "self", ",", "supportedplatform", ".", "SupportedPlatform", ")", "serializer", ".", "serializeProperty", "(", "self", ",", "common", ".", "ALIAS", ",", "environment", ".", "config", "(", ")", ".", "namespaceObjects", "(", ")", "+", "'::'", "+", "self", ".", "name_", ")", "serializer", ".", "serializeObject", "(", "self", ",", "parameterlist", ".", "ParameterList", ")", "serializer", ".", "serializeBoolean", "(", "self", ",", "common", ".", "DOCUMENTATION_ONLY", ")", "serializer", ".", "serializeAttributeBoolean", "(", "self", ",", "common", ".", "DEPENDENCY_TRIGGER", ",", "True", ")", "serializer", ".", "serializeAttributeBoolean", "(", "self", ",", "'visible'", ",", "True", ")" ]
https://github.com/lballabio/quantlib-old/blob/136336947ed4fea9ecc1da6edad188700e821739/gensrc/gensrc/functions/function.py#L119-L129
Tencent/CMONGO
c40380caa14e05509f46993aa8b8da966b09b0b5
src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Node/__init__.py
python
Node.set_noclean
(self, noclean = 1)
Set the Node's noclean value.
Set the Node's noclean value.
[ "Set", "the", "Node", "s", "noclean", "value", "." ]
def set_noclean(self, noclean = 1): """Set the Node's noclean value.""" # Make sure noclean is an integer so the --debug=stree # output in Util.py can use it as an index. self.noclean = noclean and 1 or 0
[ "def", "set_noclean", "(", "self", ",", "noclean", "=", "1", ")", ":", "# Make sure noclean is an integer so the --debug=stree", "# output in Util.py can use it as an index.", "self", ".", "noclean", "=", "noclean", "and", "1", "or", "0" ]
https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Node/__init__.py#L1198-L1202
mapnik/mapnik
f3da900c355e1d15059c4a91b00203dcc9d9f0ef
scons/scons-local-4.1.0/SCons/Node/FS.py
python
File.Dirs
(self, pathlist)
return [self.Dir(p) for p in pathlist]
Create a list of directories relative to the SConscript directory of this file.
Create a list of directories relative to the SConscript directory of this file.
[ "Create", "a", "list", "of", "directories", "relative", "to", "the", "SConscript", "directory", "of", "this", "file", "." ]
def Dirs(self, pathlist): """Create a list of directories relative to the SConscript directory of this file.""" return [self.Dir(p) for p in pathlist]
[ "def", "Dirs", "(", "self", ",", "pathlist", ")", ":", "return", "[", "self", ".", "Dir", "(", "p", ")", "for", "p", "in", "pathlist", "]" ]
https://github.com/mapnik/mapnik/blob/f3da900c355e1d15059c4a91b00203dcc9d9f0ef/scons/scons-local-4.1.0/SCons/Node/FS.py#L2654-L2657
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_windows.py
python
PyWindow.DoMoveWindow
(*args, **kwargs)
return _windows_.PyWindow_DoMoveWindow(*args, **kwargs)
DoMoveWindow(self, int x, int y, int width, int height)
DoMoveWindow(self, int x, int y, int width, int height)
[ "DoMoveWindow", "(", "self", "int", "x", "int", "y", "int", "width", "int", "height", ")" ]
def DoMoveWindow(*args, **kwargs): """DoMoveWindow(self, int x, int y, int width, int height)""" return _windows_.PyWindow_DoMoveWindow(*args, **kwargs)
[ "def", "DoMoveWindow", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "PyWindow_DoMoveWindow", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_windows.py#L4146-L4148
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/AWSPythonSDK/1.5.8/s3transfer/manager.py
python
TransferCoordinatorController.__init__
(self)
Abstraction to control all transfer coordinators This abstraction allows the manager to wait for inprogress transfers to complete and cancel all inprogress transfers.
Abstraction to control all transfer coordinators
[ "Abstraction", "to", "control", "all", "transfer", "coordinators" ]
def __init__(self): """Abstraction to control all transfer coordinators This abstraction allows the manager to wait for inprogress transfers to complete and cancel all inprogress transfers. """ self._lock = threading.Lock() self._tracked_transfer_coordinators = set()
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "_lock", "=", "threading", ".", "Lock", "(", ")", "self", ".", "_tracked_transfer_coordinators", "=", "set", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/s3transfer/manager.py#L577-L584
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/cgitb.py
python
html
(einfo, context=5)
return head + ''.join(frames) + ''.join(exception) + ''' <!-- The above is a description of an error in a Python program, formatted for a Web browser because the 'cgitb' module was enabled. In case you are not reading this in a Web browser, here is the original traceback: %s --> ''' % pydoc.html.escape( ''.join(traceback.format_exception(etype, evalue, etb)))
Return a nice HTML document describing a given traceback.
Return a nice HTML document describing a given traceback.
[ "Return", "a", "nice", "HTML", "document", "describing", "a", "given", "traceback", "." ]
def html(einfo, context=5): """Return a nice HTML document describing a given traceback.""" etype, evalue, etb = einfo if type(etype) is types.ClassType: etype = etype.__name__ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading( '<big><big>%s</big></big>' % strong(pydoc.html.escape(str(etype))), '#ffffff', '#6622aa', pyver + '<br>' + date) + ''' <p>A problem occurred in a Python script. Here is the sequence of function calls leading up to the error, in the order they occurred.</p>''' indent = '<tt>' + small('&nbsp;' * 5) + '&nbsp;</tt>' frames = [] records = inspect.getinnerframes(etb, context) for frame, file, lnum, func, lines, index in records: if file: file = os.path.abspath(file) link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file)) else: file = link = '?' args, varargs, varkw, locals = inspect.getargvalues(frame) call = '' if func != '?': call = 'in ' + strong(func) + \ inspect.formatargvalues(args, varargs, varkw, locals, formatvalue=lambda value: '=' + pydoc.html.repr(value)) highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(file, lnum[0]) finally: lnum[0] += 1 vars = scanvars(reader, frame, locals) rows = ['<tr><td bgcolor="#d8bbff">%s%s %s</td></tr>' % ('<big>&nbsp;</big>', link, call)] if index is not None: i = lnum - index for line in lines: num = small('&nbsp;' * (5-len(str(i))) + str(i)) + '&nbsp;' if i in highlight: line = '<tt>=&gt;%s%s</tt>' % (num, pydoc.html.preformat(line)) rows.append('<tr><td bgcolor="#ffccee">%s</td></tr>' % line) else: line = '<tt>&nbsp;&nbsp;%s%s</tt>' % (num, pydoc.html.preformat(line)) rows.append('<tr><td>%s</td></tr>' % grey(line)) i += 1 done, dump = {}, [] for name, where, value in vars: if name in done: continue done[name] = 1 if value is not __UNDEF__: if where in ('global', 'builtin'): name = ('<em>%s</em> ' % where) + strong(name) elif where == 'local': name = strong(name) else: name = where + strong(name.split('.')[-1]) dump.append('%s&nbsp;= %s' % (name, pydoc.html.repr(value))) else: dump.append(name + ' <em>undefined</em>') rows.append('<tr><td>%s</td></tr>' % small(grey(', '.join(dump)))) frames.append(''' <table width="100%%" cellspacing=0 cellpadding=0 border=0> %s</table>''' % '\n'.join(rows)) exception = ['<p>%s: %s' % (strong(pydoc.html.escape(str(etype))), pydoc.html.escape(str(evalue)))] if isinstance(evalue, BaseException): for name in dir(evalue): if name[:1] == '_': continue value = pydoc.html.repr(getattr(evalue, name)) exception.append('\n<br>%s%s&nbsp;=\n%s' % (indent, name, value)) return head + ''.join(frames) + ''.join(exception) + ''' <!-- The above is a description of an error in a Python program, formatted for a Web browser because the 'cgitb' module was enabled. In case you are not reading this in a Web browser, here is the original traceback: %s --> ''' % pydoc.html.escape( ''.join(traceback.format_exception(etype, evalue, etb)))
[ "def", "html", "(", "einfo", ",", "context", "=", "5", ")", ":", "etype", ",", "evalue", ",", "etb", "=", "einfo", "if", "type", "(", "etype", ")", "is", "types", ".", "ClassType", ":", "etype", "=", "etype", ".", "__name__", "pyver", "=", "'Python '", "+", "sys", ".", "version", ".", "split", "(", ")", "[", "0", "]", "+", "': '", "+", "sys", ".", "executable", "date", "=", "time", ".", "ctime", "(", "time", ".", "time", "(", ")", ")", "head", "=", "'<body bgcolor=\"#f0f0f8\">'", "+", "pydoc", ".", "html", ".", "heading", "(", "'<big><big>%s</big></big>'", "%", "strong", "(", "pydoc", ".", "html", ".", "escape", "(", "str", "(", "etype", ")", ")", ")", ",", "'#ffffff'", ",", "'#6622aa'", ",", "pyver", "+", "'<br>'", "+", "date", ")", "+", "'''\n<p>A problem occurred in a Python script. Here is the sequence of\nfunction calls leading up to the error, in the order they occurred.</p>'''", "indent", "=", "'<tt>'", "+", "small", "(", "'&nbsp;'", "*", "5", ")", "+", "'&nbsp;</tt>'", "frames", "=", "[", "]", "records", "=", "inspect", ".", "getinnerframes", "(", "etb", ",", "context", ")", "for", "frame", ",", "file", ",", "lnum", ",", "func", ",", "lines", ",", "index", "in", "records", ":", "if", "file", ":", "file", "=", "os", ".", "path", ".", "abspath", "(", "file", ")", "link", "=", "'<a href=\"file://%s\">%s</a>'", "%", "(", "file", ",", "pydoc", ".", "html", ".", "escape", "(", "file", ")", ")", "else", ":", "file", "=", "link", "=", "'?'", "args", ",", "varargs", ",", "varkw", ",", "locals", "=", "inspect", ".", "getargvalues", "(", "frame", ")", "call", "=", "''", "if", "func", "!=", "'?'", ":", "call", "=", "'in '", "+", "strong", "(", "func", ")", "+", "inspect", ".", "formatargvalues", "(", "args", ",", "varargs", ",", "varkw", ",", "locals", ",", "formatvalue", "=", "lambda", "value", ":", "'='", "+", "pydoc", ".", "html", ".", "repr", "(", "value", ")", ")", "highlight", "=", "{", "}", "def", "reader", "(", "lnum", "=", "[", "lnum", "]", ")", ":", "highlight", "[", "lnum", "[", "0", "]", "]", "=", "1", "try", ":", "return", "linecache", ".", "getline", "(", "file", ",", "lnum", "[", "0", "]", ")", "finally", ":", "lnum", "[", "0", "]", "+=", "1", "vars", "=", "scanvars", "(", "reader", ",", "frame", ",", "locals", ")", "rows", "=", "[", "'<tr><td bgcolor=\"#d8bbff\">%s%s %s</td></tr>'", "%", "(", "'<big>&nbsp;</big>'", ",", "link", ",", "call", ")", "]", "if", "index", "is", "not", "None", ":", "i", "=", "lnum", "-", "index", "for", "line", "in", "lines", ":", "num", "=", "small", "(", "'&nbsp;'", "*", "(", "5", "-", "len", "(", "str", "(", "i", ")", ")", ")", "+", "str", "(", "i", ")", ")", "+", "'&nbsp;'", "if", "i", "in", "highlight", ":", "line", "=", "'<tt>=&gt;%s%s</tt>'", "%", "(", "num", ",", "pydoc", ".", "html", ".", "preformat", "(", "line", ")", ")", "rows", ".", "append", "(", "'<tr><td bgcolor=\"#ffccee\">%s</td></tr>'", "%", "line", ")", "else", ":", "line", "=", "'<tt>&nbsp;&nbsp;%s%s</tt>'", "%", "(", "num", ",", "pydoc", ".", "html", ".", "preformat", "(", "line", ")", ")", "rows", ".", "append", "(", "'<tr><td>%s</td></tr>'", "%", "grey", "(", "line", ")", ")", "i", "+=", "1", "done", ",", "dump", "=", "{", "}", ",", "[", "]", "for", "name", ",", "where", ",", "value", "in", "vars", ":", "if", "name", "in", "done", ":", "continue", "done", "[", "name", "]", "=", "1", "if", "value", "is", "not", "__UNDEF__", ":", "if", "where", "in", "(", "'global'", ",", "'builtin'", ")", ":", "name", "=", "(", "'<em>%s</em> '", "%", "where", ")", "+", "strong", "(", "name", ")", "elif", "where", "==", "'local'", ":", "name", "=", "strong", "(", "name", ")", "else", ":", "name", "=", "where", "+", "strong", "(", "name", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ")", "dump", ".", "append", "(", "'%s&nbsp;= %s'", "%", "(", "name", ",", "pydoc", ".", "html", ".", "repr", "(", "value", ")", ")", ")", "else", ":", "dump", ".", "append", "(", "name", "+", "' <em>undefined</em>'", ")", "rows", ".", "append", "(", "'<tr><td>%s</td></tr>'", "%", "small", "(", "grey", "(", "', '", ".", "join", "(", "dump", ")", ")", ")", ")", "frames", ".", "append", "(", "'''\n<table width=\"100%%\" cellspacing=0 cellpadding=0 border=0>\n%s</table>'''", "%", "'\\n'", ".", "join", "(", "rows", ")", ")", "exception", "=", "[", "'<p>%s: %s'", "%", "(", "strong", "(", "pydoc", ".", "html", ".", "escape", "(", "str", "(", "etype", ")", ")", ")", ",", "pydoc", ".", "html", ".", "escape", "(", "str", "(", "evalue", ")", ")", ")", "]", "if", "isinstance", "(", "evalue", ",", "BaseException", ")", ":", "for", "name", "in", "dir", "(", "evalue", ")", ":", "if", "name", "[", ":", "1", "]", "==", "'_'", ":", "continue", "value", "=", "pydoc", ".", "html", ".", "repr", "(", "getattr", "(", "evalue", ",", "name", ")", ")", "exception", ".", "append", "(", "'\\n<br>%s%s&nbsp;=\\n%s'", "%", "(", "indent", ",", "name", ",", "value", ")", ")", "return", "head", "+", "''", ".", "join", "(", "frames", ")", "+", "''", ".", "join", "(", "exception", ")", "+", "'''\n\n\n<!-- The above is a description of an error in a Python program, formatted\n for a Web browser because the 'cgitb' module was enabled. In case you\n are not reading this in a Web browser, here is the original traceback:\n\n%s\n-->\n'''", "%", "pydoc", ".", "html", ".", "escape", "(", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "etype", ",", "evalue", ",", "etb", ")", ")", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/cgitb.py#L102-L191
LUX-Core/lux
4e1ff7d34a9c76312135ddc869db09149c35170e
contrib/spendfrom/spendfrom.py
python
determine_db_dir
()
return os.path.expanduser("~/.lux")
Return the default location of the lux data directory
Return the default location of the lux data directory
[ "Return", "the", "default", "location", "of", "the", "lux", "data", "directory" ]
def determine_db_dir(): """Return the default location of the lux data directory""" if platform.system() == "Darwin": return os.path.expanduser("~/Library/Application Support/LUX/") elif platform.system() == "Windows": return os.path.join(os.environ['APPDATA'], "LUX") return os.path.expanduser("~/.lux")
[ "def", "determine_db_dir", "(", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "\"Darwin\"", ":", "return", "os", ".", "path", ".", "expanduser", "(", "\"~/Library/Application Support/LUX/\"", ")", "elif", "platform", ".", "system", "(", ")", "==", "\"Windows\"", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'APPDATA'", "]", ",", "\"LUX\"", ")", "return", "os", ".", "path", ".", "expanduser", "(", "\"~/.lux\"", ")" ]
https://github.com/LUX-Core/lux/blob/4e1ff7d34a9c76312135ddc869db09149c35170e/contrib/spendfrom/spendfrom.py#L35-L41
fifengine/fifengine
4b62c42e85bec19893cef8e63e6855927cff2c47
engine/python/fife/extensions/pychan/widgets/slider.py
python
Slider._setScaleEnd
(self, end)
setScaleEnd(self, double scaleEnd)
setScaleEnd(self, double scaleEnd)
[ "setScaleEnd", "(", "self", "double", "scaleEnd", ")" ]
def _setScaleEnd(self, end): """setScaleEnd(self, double scaleEnd)""" if type(end) != float: raise RuntimeError("Slider expects float for end scale") self.real_widget.setScaleEnd(end)
[ "def", "_setScaleEnd", "(", "self", ",", "end", ")", ":", "if", "type", "(", "end", ")", "!=", "float", ":", "raise", "RuntimeError", "(", "\"Slider expects float for end scale\"", ")", "self", ".", "real_widget", ".", "setScaleEnd", "(", "end", ")" ]
https://github.com/fifengine/fifengine/blob/4b62c42e85bec19893cef8e63e6855927cff2c47/engine/python/fife/extensions/pychan/widgets/slider.py#L200-L204
MTG/gaia
0f7214dbdec6f9b651ca34211824841ffba0bc77
src/bindings/pygaia/utils.py
python
makedir
(path)
Create a directory and all the required parents. Do not raise an exception if the directory already existed. This is the equivalent of 'mkdir -p'.
Create a directory and all the required parents. Do not raise an exception if the directory already existed.
[ "Create", "a", "directory", "and", "all", "the", "required", "parents", ".", "Do", "not", "raise", "an", "exception", "if", "the", "directory", "already", "existed", "." ]
def makedir(path): """Create a directory and all the required parents. Do not raise an exception if the directory already existed. This is the equivalent of 'mkdir -p'.""" try: os.makedirs(path) except OSError as e: if e.errno == errno.EEXIST: pass else: raise
[ "def", "makedir", "(", "path", ")", ":", "try", ":", "os", ".", "makedirs", "(", "path", ")", "except", "OSError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "EEXIST", ":", "pass", "else", ":", "raise" ]
https://github.com/MTG/gaia/blob/0f7214dbdec6f9b651ca34211824841ffba0bc77/src/bindings/pygaia/utils.py#L65-L74
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozpack/packager/__init__.py
python
SimpleManifestSink.__init__
(self, finder, formatter)
Initialize the SimpleManifestSink. The given FileFinder is used to get files matching the patterns given in the manifest. The given formatter does the packaging job.
Initialize the SimpleManifestSink. The given FileFinder is used to get files matching the patterns given in the manifest. The given formatter does the packaging job.
[ "Initialize", "the", "SimpleManifestSink", ".", "The", "given", "FileFinder", "is", "used", "to", "get", "files", "matching", "the", "patterns", "given", "in", "the", "manifest", ".", "The", "given", "formatter", "does", "the", "packaging", "job", "." ]
def __init__(self, finder, formatter): ''' Initialize the SimpleManifestSink. The given FileFinder is used to get files matching the patterns given in the manifest. The given formatter does the packaging job. ''' self._finder = finder self.packager = SimplePackager(formatter) self._closed = False self._manifests = set()
[ "def", "__init__", "(", "self", ",", "finder", ",", "formatter", ")", ":", "self", ".", "_finder", "=", "finder", "self", ".", "packager", "=", "SimplePackager", "(", "formatter", ")", "self", ".", "_closed", "=", "False", "self", ".", "_manifests", "=", "set", "(", ")" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozpack/packager/__init__.py#L333-L342
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/data_flow_ops.py
python
Barrier.name
(self)
return self._barrier_ref.op.name
The name of the underlying barrier.
The name of the underlying barrier.
[ "The", "name", "of", "the", "underlying", "barrier", "." ]
def name(self): """The name of the underlying barrier.""" if context.executing_eagerly(): return self._name return self._barrier_ref.op.name
[ "def", "name", "(", "self", ")", ":", "if", "context", ".", "executing_eagerly", "(", ")", ":", "return", "self", ".", "_name", "return", "self", ".", "_barrier_ref", ".", "op", ".", "name" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/data_flow_ops.py#L1084-L1088
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/Draft/draftguitools/gui_shapestrings.py
python
ShapeString.numericSSize
(self, ssize)
Validate the size in the user interface. This function is called by the toolbar or taskpanel interface when a valid size parameter has been entered in the input field.
Validate the size in the user interface.
[ "Validate", "the", "size", "in", "the", "user", "interface", "." ]
def numericSSize(self, ssize): """Validate the size in the user interface. This function is called by the toolbar or taskpanel interface when a valid size parameter has been entered in the input field. """ self.SSSize = ssize self.ui.STrackUi()
[ "def", "numericSSize", "(", "self", ",", "ssize", ")", ":", "self", ".", "SSSize", "=", "ssize", "self", ".", "ui", ".", "STrackUi", "(", ")" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftguitools/gui_shapestrings.py#L183-L190
BitMEX/api-connectors
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
auto-generated/python/swagger_client/models/position.py
python
Position.opening_cost
(self)
return self._opening_cost
Gets the opening_cost of this Position. # noqa: E501 :return: The opening_cost of this Position. # noqa: E501 :rtype: float
Gets the opening_cost of this Position. # noqa: E501
[ "Gets", "the", "opening_cost", "of", "this", "Position", ".", "#", "noqa", ":", "E501" ]
def opening_cost(self): """Gets the opening_cost of this Position. # noqa: E501 :return: The opening_cost of this Position. # noqa: E501 :rtype: float """ return self._opening_cost
[ "def", "opening_cost", "(", "self", ")", ":", "return", "self", ".", "_opening_cost" ]
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/position.py#L882-L889
cinder/Cinder
e83f5bb9c01a63eec20168d02953a0879e5100f7
src/freetype/tools/glnames.py
python
main
()
main program body
main program body
[ "main", "program", "body" ]
def main(): """main program body""" if len( sys.argv ) != 2: print __doc__ % sys.argv[0] sys.exit( 1 ) file = open( sys.argv[1], "wb" ) write = file.write count_sid = len( sid_standard_names ) # `mac_extras' contains the list of glyph names in the Macintosh standard # encoding which are not in the SID Standard Names. # mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names ) # `base_list' contains the names of our final glyph names table. # It consists of the `mac_extras' glyph names, followed by the SID # standard names. # mac_extras_count = len( mac_extras ) base_list = mac_extras + sid_standard_names write( "/***************************************************************************/\n" ) write( "/* */\n" ) write( "/* %-71s*/\n" % os.path.basename( sys.argv[1] ) ) write( "/* */\n" ) write( "/* PostScript glyph names. */\n" ) write( "/* */\n" ) write( "/* Copyright 2005-2018 by */\n" ) write( "/* David Turner, Robert Wilhelm, and Werner Lemberg. */\n" ) write( "/* */\n" ) write( "/* This file is part of the FreeType project, and may only be used, */\n" ) write( "/* modified, and distributed under the terms of the FreeType project */\n" ) write( "/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\n" ) write( "/* this file you indicate that you have read the license and */\n" ) write( "/* understand and accept it fully. */\n" ) write( "/* */\n" ) write( "/***************************************************************************/\n" ) write( "\n" ) write( "\n" ) write( " /* This file has been generated automatically -- do not edit! */\n" ) write( "\n" ) write( "\n" ) # dump final glyph list (mac extras + sid standard names) # st = StringTable( base_list, "ft_standard_glyph_names" ) st.dump( file ) st.dump_sublist( file, "ft_mac_names", "FT_NUM_MAC_NAMES", mac_standard_names ) st.dump_sublist( file, "ft_sid_names", "FT_NUM_SID_NAMES", sid_standard_names ) dump_encoding( file, "t1_standard_encoding", t1_standard_encoding ) dump_encoding( file, "t1_expert_encoding", t1_expert_encoding ) # dump the AGL in its compressed form # agl_glyphs, agl_values = adobe_glyph_values() dict = StringNode( "", 0 ) for g in range( len( agl_glyphs ) ): dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) ) dict = dict.optimize() dict_len = dict.locate( 0 ) dict_array = dict.store( "" ) write( """\ /* * This table is a compressed version of the Adobe Glyph List (AGL), * optimized for efficient searching. It has been generated by the * `glnames.py' python script located in the `src/tools' directory. * * The lookup function to get the Unicode value for a given string * is defined below the table. */ #ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST """ ) dump_array( dict_array, write, "ft_adobe_glyph_list" ) # write the lookup routine now # write( """\ #ifdef DEFINE_PS_TABLES /* * This function searches the compressed table efficiently. */ static unsigned long ft_get_adobe_glyph_index( const char* name, const char* limit ) { int c = 0; int count, min, max; const unsigned char* p = ft_adobe_glyph_list; if ( name == 0 || name >= limit ) goto NotFound; c = *name++; count = p[1]; p += 2; min = 0; max = count; while ( min < max ) { int mid = ( min + max ) >> 1; const unsigned char* q = p + mid * 2; int c2; q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] ); c2 = q[0] & 127; if ( c2 == c ) { p = q; goto Found; } if ( c2 < c ) min = mid + 1; else max = mid; } goto NotFound; Found: for (;;) { /* assert (*p & 127) == c */ if ( name >= limit ) { if ( (p[0] & 128) == 0 && (p[1] & 128) != 0 ) return (unsigned long)( ( (int)p[2] << 8 ) | p[3] ); goto NotFound; } c = *name++; if ( p[0] & 128 ) { p++; if ( c != (p[0] & 127) ) goto NotFound; continue; } p++; count = p[0] & 127; if ( p[0] & 128 ) p += 2; p++; for ( ; count > 0; count--, p += 2 ) { int offset = ( (int)p[0] << 8 ) | p[1]; const unsigned char* q = ft_adobe_glyph_list + offset; if ( c == ( q[0] & 127 ) ) { p = q; goto NextIter; } } goto NotFound; NextIter: ; } NotFound: return 0; } #endif /* DEFINE_PS_TABLES */ #endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */ """ ) if 0: # generate unit test, or don't # # now write the unit test to check that everything works OK # write( "#ifdef TEST\n\n" ) write( "static const char* const the_names[] = {\n" ) for name in agl_glyphs: write( ' "' + name + '",\n' ) write( " 0\n};\n" ) write( "static const unsigned long the_values[] = {\n" ) for val in agl_values: write( ' 0x' + val + ',\n' ) write( " 0\n};\n" ) write( """ #include <stdlib.h> #include <stdio.h> int main( void ) { int result = 0; const char* const* names = the_names; const unsigned long* values = the_values; for ( ; *names; names++, values++ ) { const char* name = *names; unsigned long reference = *values; unsigned long value; value = ft_get_adobe_glyph_index( name, name + strlen( name ) ); if ( value != reference ) { result = 1; fprintf( stderr, "name '%s' => %04x instead of %04x\\n", name, value, reference ); } } return result; } """ ) write( "#endif /* TEST */\n" ) write("\n/* END */\n")
[ "def", "main", "(", ")", ":", "if", "len", "(", "sys", ".", "argv", ")", "!=", "2", ":", "print", "__doc__", "%", "sys", ".", "argv", "[", "0", "]", "sys", ".", "exit", "(", "1", ")", "file", "=", "open", "(", "sys", ".", "argv", "[", "1", "]", ",", "\"wb\"", ")", "write", "=", "file", ".", "write", "count_sid", "=", "len", "(", "sid_standard_names", ")", "# `mac_extras' contains the list of glyph names in the Macintosh standard", "# encoding which are not in the SID Standard Names.", "#", "mac_extras", "=", "filter_glyph_names", "(", "mac_standard_names", ",", "sid_standard_names", ")", "# `base_list' contains the names of our final glyph names table.", "# It consists of the `mac_extras' glyph names, followed by the SID", "# standard names.", "#", "mac_extras_count", "=", "len", "(", "mac_extras", ")", "base_list", "=", "mac_extras", "+", "sid_standard_names", "write", "(", "\"/***************************************************************************/\\n\"", ")", "write", "(", "\"/* */\\n\"", ")", "write", "(", "\"/* %-71s*/\\n\"", "%", "os", ".", "path", ".", "basename", "(", "sys", ".", "argv", "[", "1", "]", ")", ")", "write", "(", "\"/* */\\n\"", ")", "write", "(", "\"/* PostScript glyph names. */\\n\"", ")", "write", "(", "\"/* */\\n\"", ")", "write", "(", "\"/* Copyright 2005-2018 by */\\n\"", ")", "write", "(", "\"/* David Turner, Robert Wilhelm, and Werner Lemberg. */\\n\"", ")", "write", "(", "\"/* */\\n\"", ")", "write", "(", "\"/* This file is part of the FreeType project, and may only be used, */\\n\"", ")", "write", "(", "\"/* modified, and distributed under the terms of the FreeType project */\\n\"", ")", "write", "(", "\"/* license, LICENSE.TXT. By continuing to use, modify, or distribute */\\n\"", ")", "write", "(", "\"/* this file you indicate that you have read the license and */\\n\"", ")", "write", "(", "\"/* understand and accept it fully. */\\n\"", ")", "write", "(", "\"/* */\\n\"", ")", "write", "(", "\"/***************************************************************************/\\n\"", ")", "write", "(", "\"\\n\"", ")", "write", "(", "\"\\n\"", ")", "write", "(", "\" /* This file has been generated automatically -- do not edit! */\\n\"", ")", "write", "(", "\"\\n\"", ")", "write", "(", "\"\\n\"", ")", "# dump final glyph list (mac extras + sid standard names)", "#", "st", "=", "StringTable", "(", "base_list", ",", "\"ft_standard_glyph_names\"", ")", "st", ".", "dump", "(", "file", ")", "st", ".", "dump_sublist", "(", "file", ",", "\"ft_mac_names\"", ",", "\"FT_NUM_MAC_NAMES\"", ",", "mac_standard_names", ")", "st", ".", "dump_sublist", "(", "file", ",", "\"ft_sid_names\"", ",", "\"FT_NUM_SID_NAMES\"", ",", "sid_standard_names", ")", "dump_encoding", "(", "file", ",", "\"t1_standard_encoding\"", ",", "t1_standard_encoding", ")", "dump_encoding", "(", "file", ",", "\"t1_expert_encoding\"", ",", "t1_expert_encoding", ")", "# dump the AGL in its compressed form", "#", "agl_glyphs", ",", "agl_values", "=", "adobe_glyph_values", "(", ")", "dict", "=", "StringNode", "(", "\"\"", ",", "0", ")", "for", "g", "in", "range", "(", "len", "(", "agl_glyphs", ")", ")", ":", "dict", ".", "add", "(", "agl_glyphs", "[", "g", "]", ",", "eval", "(", "\"0x\"", "+", "agl_values", "[", "g", "]", ")", ")", "dict", "=", "dict", ".", "optimize", "(", ")", "dict_len", "=", "dict", ".", "locate", "(", "0", ")", "dict_array", "=", "dict", ".", "store", "(", "\"\"", ")", "write", "(", "\"\"\"\\\n /*\n * This table is a compressed version of the Adobe Glyph List (AGL),\n * optimized for efficient searching. It has been generated by the\n * `glnames.py' python script located in the `src/tools' directory.\n *\n * The lookup function to get the Unicode value for a given string\n * is defined below the table.\n */\n\n#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST\n\n\"\"\"", ")", "dump_array", "(", "dict_array", ",", "write", ",", "\"ft_adobe_glyph_list\"", ")", "# write the lookup routine now", "#", "write", "(", "\"\"\"\\\n#ifdef DEFINE_PS_TABLES\n /*\n * This function searches the compressed table efficiently.\n */\n static unsigned long\n ft_get_adobe_glyph_index( const char* name,\n const char* limit )\n {\n int c = 0;\n int count, min, max;\n const unsigned char* p = ft_adobe_glyph_list;\n\n\n if ( name == 0 || name >= limit )\n goto NotFound;\n\n c = *name++;\n count = p[1];\n p += 2;\n\n min = 0;\n max = count;\n\n while ( min < max )\n {\n int mid = ( min + max ) >> 1;\n const unsigned char* q = p + mid * 2;\n int c2;\n\n\n q = ft_adobe_glyph_list + ( ( (int)q[0] << 8 ) | q[1] );\n\n c2 = q[0] & 127;\n if ( c2 == c )\n {\n p = q;\n goto Found;\n }\n if ( c2 < c )\n min = mid + 1;\n else\n max = mid;\n }\n goto NotFound;\n\n Found:\n for (;;)\n {\n /* assert (*p & 127) == c */\n\n if ( name >= limit )\n {\n if ( (p[0] & 128) == 0 &&\n (p[1] & 128) != 0 )\n return (unsigned long)( ( (int)p[2] << 8 ) | p[3] );\n\n goto NotFound;\n }\n c = *name++;\n if ( p[0] & 128 )\n {\n p++;\n if ( c != (p[0] & 127) )\n goto NotFound;\n\n continue;\n }\n\n p++;\n count = p[0] & 127;\n if ( p[0] & 128 )\n p += 2;\n\n p++;\n\n for ( ; count > 0; count--, p += 2 )\n {\n int offset = ( (int)p[0] << 8 ) | p[1];\n const unsigned char* q = ft_adobe_glyph_list + offset;\n\n if ( c == ( q[0] & 127 ) )\n {\n p = q;\n goto NextIter;\n }\n }\n goto NotFound;\n\n NextIter:\n ;\n }\n\n NotFound:\n return 0;\n }\n#endif /* DEFINE_PS_TABLES */\n\n#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */\n\n\"\"\"", ")", "if", "0", ":", "# generate unit test, or don't", "#", "# now write the unit test to check that everything works OK", "#", "write", "(", "\"#ifdef TEST\\n\\n\"", ")", "write", "(", "\"static const char* const the_names[] = {\\n\"", ")", "for", "name", "in", "agl_glyphs", ":", "write", "(", "' \"'", "+", "name", "+", "'\",\\n'", ")", "write", "(", "\" 0\\n};\\n\"", ")", "write", "(", "\"static const unsigned long the_values[] = {\\n\"", ")", "for", "val", "in", "agl_values", ":", "write", "(", "' 0x'", "+", "val", "+", "',\\n'", ")", "write", "(", "\" 0\\n};\\n\"", ")", "write", "(", "\"\"\"\n#include <stdlib.h>\n#include <stdio.h>\n\n int\n main( void )\n {\n int result = 0;\n const char* const* names = the_names;\n const unsigned long* values = the_values;\n\n\n for ( ; *names; names++, values++ )\n {\n const char* name = *names;\n unsigned long reference = *values;\n unsigned long value;\n\n\n value = ft_get_adobe_glyph_index( name, name + strlen( name ) );\n if ( value != reference )\n {\n result = 1;\n fprintf( stderr, \"name '%s' => %04x instead of %04x\\\\n\",\n name, value, reference );\n }\n }\n\n return result;\n }\n\"\"\"", ")", "write", "(", "\"#endif /* TEST */\\n\"", ")", "write", "(", "\"\\n/* END */\\n\"", ")" ]
https://github.com/cinder/Cinder/blob/e83f5bb9c01a63eec20168d02953a0879e5100f7/src/freetype/tools/glnames.py#L5289-L5532
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/keras/python/keras/backend.py
python
cos
(x)
return math_ops.cos(x)
Computes cos of x element-wise. Arguments: x: Tensor or variable. Returns: A tensor.
Computes cos of x element-wise.
[ "Computes", "cos", "of", "x", "element", "-", "wise", "." ]
def cos(x): """Computes cos of x element-wise. Arguments: x: Tensor or variable. Returns: A tensor. """ return math_ops.cos(x)
[ "def", "cos", "(", "x", ")", ":", "return", "math_ops", ".", "cos", "(", "x", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/keras/python/keras/backend.py#L1781-L1790
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/ftplib.py
python
FTP.rmd
(self, dirname)
return self.voidcmd('RMD ' + dirname)
Remove a directory.
Remove a directory.
[ "Remove", "a", "directory", "." ]
def rmd(self, dirname): '''Remove a directory.''' return self.voidcmd('RMD ' + dirname)
[ "def", "rmd", "(", "self", ",", "dirname", ")", ":", "return", "self", ".", "voidcmd", "(", "'RMD '", "+", "dirname", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/ftplib.py#L571-L573
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
deps/src/libxml2-2.9.1/python/libxml2.py
python
recoverFile
(filename)
return xmlDoc(_obj=ret)
parse an XML file and build a tree. Automatic support for ZLIB/Compress compressed document is provided by default if found at compile-time. In the case the document is not Well Formed, it attempts to build a tree anyway
parse an XML file and build a tree. Automatic support for ZLIB/Compress compressed document is provided by default if found at compile-time. In the case the document is not Well Formed, it attempts to build a tree anyway
[ "parse", "an", "XML", "file", "and", "build", "a", "tree", ".", "Automatic", "support", "for", "ZLIB", "/", "Compress", "compressed", "document", "is", "provided", "by", "default", "if", "found", "at", "compile", "-", "time", ".", "In", "the", "case", "the", "document", "is", "not", "Well", "Formed", "it", "attempts", "to", "build", "a", "tree", "anyway" ]
def recoverFile(filename): """parse an XML file and build a tree. Automatic support for ZLIB/Compress compressed document is provided by default if found at compile-time. In the case the document is not Well Formed, it attempts to build a tree anyway """ ret = libxml2mod.xmlRecoverFile(filename) if ret is None:raise treeError('xmlRecoverFile() failed') return xmlDoc(_obj=ret)
[ "def", "recoverFile", "(", "filename", ")", ":", "ret", "=", "libxml2mod", ".", "xmlRecoverFile", "(", "filename", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlRecoverFile() failed'", ")", "return", "xmlDoc", "(", "_obj", "=", "ret", ")" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2.py#L1390-L1397
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
third_party/protobuf/python/google/protobuf/internal/python_message.py
python
_AddClearFieldMethod
(message_descriptor, cls)
Helper for _AddMessageMethods().
Helper for _AddMessageMethods().
[ "Helper", "for", "_AddMessageMethods", "()", "." ]
def _AddClearFieldMethod(message_descriptor, cls): """Helper for _AddMessageMethods().""" def ClearField(self, field_name): try: field = message_descriptor.fields_by_name[field_name] except KeyError: raise ValueError('Protocol message has no "%s" field.' % field_name) if field in self._fields: # Note: If the field is a sub-message, its listener will still point # at us. That's fine, because the worst than can happen is that it # will call _Modified() and invalidate our byte size. Big deal. del self._fields[field] # Always call _Modified() -- even if nothing was changed, this is # a mutating method, and thus calling it should cause the field to become # present in the parent message. self._Modified() cls.ClearField = ClearField
[ "def", "_AddClearFieldMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "ClearField", "(", "self", ",", "field_name", ")", ":", "try", ":", "field", "=", "message_descriptor", ".", "fields_by_name", "[", "field_name", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'Protocol message has no \"%s\" field.'", "%", "field_name", ")", "if", "field", "in", "self", ".", "_fields", ":", "# Note: If the field is a sub-message, its listener will still point", "# at us. That's fine, because the worst than can happen is that it", "# will call _Modified() and invalidate our byte size. Big deal.", "del", "self", ".", "_fields", "[", "field", "]", "# Always call _Modified() -- even if nothing was changed, this is", "# a mutating method, and thus calling it should cause the field to become", "# present in the parent message.", "self", ".", "_Modified", "(", ")", "cls", ".", "ClearField", "=", "ClearField" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/protobuf/python/google/protobuf/internal/python_message.py#L608-L627
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/pipes.py
python
Template.__repr__
(self)
return '<Template instance, steps=%r>' % (self.steps,)
t.__repr__() implements repr(t).
t.__repr__() implements repr(t).
[ "t", ".", "__repr__", "()", "implements", "repr", "(", "t", ")", "." ]
def __repr__(self): """t.__repr__() implements repr(t).""" return '<Template instance, steps=%r>' % (self.steps,)
[ "def", "__repr__", "(", "self", ")", ":", "return", "'<Template instance, steps=%r>'", "%", "(", "self", ".", "steps", ",", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/pipes.py#L88-L90
ricardoquesada/Spidermonkey
4a75ea2543408bd1b2c515aa95901523eeef7858
ipc/ipdl/ipdl/parser.py
python
p_BridgesStmtsOpt
(p)
BridgesStmtsOpt : BridgesStmt BridgesStmtsOpt | OpensStmtsOpt
BridgesStmtsOpt : BridgesStmt BridgesStmtsOpt | OpensStmtsOpt
[ "BridgesStmtsOpt", ":", "BridgesStmt", "BridgesStmtsOpt", "|", "OpensStmtsOpt" ]
def p_BridgesStmtsOpt(p): """BridgesStmtsOpt : BridgesStmt BridgesStmtsOpt | OpensStmtsOpt""" if 2 == len(p): p[0] = p[1] else: p[2].bridgesStmts.insert(0, p[1]) p[0] = p[2]
[ "def", "p_BridgesStmtsOpt", "(", "p", ")", ":", "if", "2", "==", "len", "(", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "else", ":", "p", "[", "2", "]", ".", "bridgesStmts", ".", "insert", "(", "0", ",", "p", "[", "1", "]", ")", "p", "[", "0", "]", "=", "p", "[", "2", "]" ]
https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/ipc/ipdl/ipdl/parser.py#L393-L400
borglab/gtsam
a5bee157efce6a0563704bce6a5d188c29817f39
gtsam/3rdparty/Eigen/debug/gdb/printers.py
python
lookup_function
(val)
return None
Look-up and return a pretty-printer that can print va.
Look-up and return a pretty-printer that can print va.
[ "Look", "-", "up", "and", "return", "a", "pretty", "-", "printer", "that", "can", "print", "va", "." ]
def lookup_function(val): "Look-up and return a pretty-printer that can print va." type = val.type if type.code == gdb.TYPE_CODE_REF: type = type.target() type = type.unqualified().strip_typedefs() typename = type.tag if typename == None: return None for function in pretty_printers_dict: if function.search(typename): return pretty_printers_dict[function](val) return None
[ "def", "lookup_function", "(", "val", ")", ":", "type", "=", "val", ".", "type", "if", "type", ".", "code", "==", "gdb", ".", "TYPE_CODE_REF", ":", "type", "=", "type", ".", "target", "(", ")", "type", "=", "type", ".", "unqualified", "(", ")", ".", "strip_typedefs", "(", ")", "typename", "=", "type", ".", "tag", "if", "typename", "==", "None", ":", "return", "None", "for", "function", "in", "pretty_printers_dict", ":", "if", "function", ".", "search", "(", "typename", ")", ":", "return", "pretty_printers_dict", "[", "function", "]", "(", "val", ")", "return", "None" ]
https://github.com/borglab/gtsam/blob/a5bee157efce6a0563704bce6a5d188c29817f39/gtsam/3rdparty/Eigen/debug/gdb/printers.py#L192-L210
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/plat-mac/findertools.py
python
isactiveprocess
(processname)
return 0
Check of processname is active. MacOS9
Check of processname is active. MacOS9
[ "Check", "of", "processname", "is", "active", ".", "MacOS9" ]
def isactiveprocess(processname): """Check of processname is active. MacOS9""" all = processes() ok = 0 for n, c in all: if n == processname: return 1 return 0
[ "def", "isactiveprocess", "(", "processname", ")", ":", "all", "=", "processes", "(", ")", "ok", "=", "0", "for", "n", ",", "c", "in", "all", ":", "if", "n", "==", "processname", ":", "return", "1", "return", "0" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/plat-mac/findertools.py#L218-L225
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/dataset/vision/py_transforms.py
python
RandomResizedCrop.__call__
(self, img)
return util.random_resize_crop(img, self.size, self.scale, self.ratio, self.interpolation, self.max_attempts)
Call method. Args: img (PIL Image): Image to be randomly cropped and resized. Returns: PIL Image, randomly cropped and resized image.
Call method.
[ "Call", "method", "." ]
def __call__(self, img): """ Call method. Args: img (PIL Image): Image to be randomly cropped and resized. Returns: PIL Image, randomly cropped and resized image. """ return util.random_resize_crop(img, self.size, self.scale, self.ratio, self.interpolation, self.max_attempts)
[ "def", "__call__", "(", "self", ",", "img", ")", ":", "return", "util", ".", "random_resize_crop", "(", "img", ",", "self", ".", "size", ",", "self", ".", "scale", ",", "self", ".", "ratio", ",", "self", ".", "interpolation", ",", "self", ".", "max_attempts", ")" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/dataset/vision/py_transforms.py#L666-L677
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/aui/framemanager.py
python
AuiDockingHintWindow.SetShape
(self, region)
If the platform supports it, sets the shape of the window to that depicted by `region`. The system will not display or respond to any mouse event for the pixels that lie outside of the region. To reset the window to the normal rectangular shape simply call :meth:`SetShape` again with an empty region. :param Region `region`: the shape of the frame. :note: Overridden for wxMAC.
If the platform supports it, sets the shape of the window to that depicted by `region`. The system will not display or respond to any mouse event for the pixels that lie outside of the region. To reset the window to the normal rectangular shape simply call :meth:`SetShape` again with an empty region.
[ "If", "the", "platform", "supports", "it", "sets", "the", "shape", "of", "the", "window", "to", "that", "depicted", "by", "region", ".", "The", "system", "will", "not", "display", "or", "respond", "to", "any", "mouse", "event", "for", "the", "pixels", "that", "lie", "outside", "of", "the", "region", ".", "To", "reset", "the", "window", "to", "the", "normal", "rectangular", "shape", "simply", "call", ":", "meth", ":", "SetShape", "again", "with", "an", "empty", "region", "." ]
def SetShape(self, region): """ If the platform supports it, sets the shape of the window to that depicted by `region`. The system will not display or respond to any mouse event for the pixels that lie outside of the region. To reset the window to the normal rectangular shape simply call :meth:`SetShape` again with an empty region. :param Region `region`: the shape of the frame. :note: Overridden for wxMAC. """ if wx.Platform == '__WXMAC__': # HACK so we don't crash when SetShape is called return else: super(AuiDockingHintWindow, self).SetShape(region)
[ "def", "SetShape", "(", "self", ",", "region", ")", ":", "if", "wx", ".", "Platform", "==", "'__WXMAC__'", ":", "# HACK so we don't crash when SetShape is called", "return", "else", ":", "super", "(", "AuiDockingHintWindow", ",", "self", ")", ".", "SetShape", "(", "region", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/aui/framemanager.py#L2778-L2794
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Code/Tools/waf-1.7.13/waflib/extras/msvs.py
python
msvs_generator.get_solution_node
(self)
return self.solution_node
The solution filename is required when writing the .vcproj files return self.solution_node and if it does not exist, make one
The solution filename is required when writing the .vcproj files return self.solution_node and if it does not exist, make one
[ "The", "solution", "filename", "is", "required", "when", "writing", "the", ".", "vcproj", "files", "return", "self", ".", "solution_node", "and", "if", "it", "does", "not", "exist", "make", "one" ]
def get_solution_node(self): """ The solution filename is required when writing the .vcproj files return self.solution_node and if it does not exist, make one """ try: return self.solution_node except: pass solution_name = getattr(self, 'solution_name', None) if not solution_name: solution_name = getattr(Context.g_module, Context.APPNAME, 'project') + '.sln' if os.path.isabs(solution_name): self.solution_node = self.root.make_node(solution_name) else: self.solution_node = self.srcnode.make_node(solution_name) return self.solution_node
[ "def", "get_solution_node", "(", "self", ")", ":", "try", ":", "return", "self", ".", "solution_node", "except", ":", "pass", "solution_name", "=", "getattr", "(", "self", ",", "'solution_name'", ",", "None", ")", "if", "not", "solution_name", ":", "solution_name", "=", "getattr", "(", "Context", ".", "g_module", ",", "Context", ".", "APPNAME", ",", "'project'", ")", "+", "'.sln'", "if", "os", ".", "path", ".", "isabs", "(", "solution_name", ")", ":", "self", ".", "solution_node", "=", "self", ".", "root", ".", "make_node", "(", "solution_name", ")", "else", ":", "self", ".", "solution_node", "=", "self", ".", "srcnode", ".", "make_node", "(", "solution_name", ")", "return", "self", ".", "solution_node" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/extras/msvs.py#L788-L805