diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/BuildExecutable.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/BuildExecutable.py new file mode 100644 index 0000000000000000000000000000000000000000..0190cc86ff1539d0c4b4916c559b0f2163c279ed --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/BuildExecutable.py @@ -0,0 +1,170 @@ +""" +Compile a Python script into an executable that embeds CPython. +Requires CPython to be built as a shared library ('libpythonX.Y'). + +Basic usage: + + python -m Cython.Build.BuildExecutable [ARGS] somefile.py +""" + +from __future__ import absolute_import + +DEBUG = True + +import sys +import os +if sys.version_info < (3, 9): + from distutils import sysconfig as _sysconfig + + class sysconfig(object): + + @staticmethod + def get_path(name): + assert name == 'include' + return _sysconfig.get_python_inc() + + get_config_var = staticmethod(_sysconfig.get_config_var) +else: + # sysconfig can be trusted from cpython >= 3.8.7 + import sysconfig + + +def get_config_var(name, default=''): + return sysconfig.get_config_var(name) or default + +INCDIR = sysconfig.get_path('include') +LIBDIR1 = get_config_var('LIBDIR') +LIBDIR2 = get_config_var('LIBPL') +PYLIB = get_config_var('LIBRARY') +PYLIB_DYN = get_config_var('LDLIBRARY') +if PYLIB_DYN == PYLIB: + # no shared library + PYLIB_DYN = '' +else: + PYLIB_DYN = os.path.splitext(PYLIB_DYN[3:])[0] # 'lib(XYZ).so' -> XYZ + +CC = get_config_var('CC', os.environ.get('CC', '')) +CFLAGS = get_config_var('CFLAGS') + ' ' + os.environ.get('CFLAGS', '') +LINKCC = get_config_var('LINKCC', os.environ.get('LINKCC', CC)) +LINKFORSHARED = get_config_var('LINKFORSHARED') +LIBS = get_config_var('LIBS') +SYSLIBS = get_config_var('SYSLIBS') +EXE_EXT = sysconfig.get_config_var('EXE') + + +def _debug(msg, *args): + if DEBUG: + if args: + msg = msg % args + sys.stderr.write(msg + '\n') + + +def dump_config(): + _debug('INCDIR: %s', INCDIR) + _debug('LIBDIR1: %s', LIBDIR1) + _debug('LIBDIR2: %s', LIBDIR2) + _debug('PYLIB: %s', PYLIB) + _debug('PYLIB_DYN: %s', PYLIB_DYN) + _debug('CC: %s', CC) + _debug('CFLAGS: %s', CFLAGS) + _debug('LINKCC: %s', LINKCC) + _debug('LINKFORSHARED: %s', LINKFORSHARED) + _debug('LIBS: %s', LIBS) + _debug('SYSLIBS: %s', SYSLIBS) + _debug('EXE_EXT: %s', EXE_EXT) + + +def _parse_args(args): + cy_args = [] + last_arg = None + for i, arg in enumerate(args): + if arg.startswith('-'): + cy_args.append(arg) + elif last_arg in ('-X', '--directive'): + cy_args.append(arg) + else: + input_file = arg + args = args[i+1:] + break + last_arg = arg + else: + raise ValueError('no input file provided') + + return input_file, cy_args, args + + +def runcmd(cmd, shell=True): + if shell: + cmd = ' '.join(cmd) + _debug(cmd) + else: + _debug(' '.join(cmd)) + + import subprocess + returncode = subprocess.call(cmd, shell=shell) + + if returncode: + sys.exit(returncode) + + +def clink(basename): + runcmd([LINKCC, '-o', basename + EXE_EXT, basename+'.o', '-L'+LIBDIR1, '-L'+LIBDIR2] + + [PYLIB_DYN and ('-l'+PYLIB_DYN) or os.path.join(LIBDIR1, PYLIB)] + + LIBS.split() + SYSLIBS.split() + LINKFORSHARED.split()) + + +def ccompile(basename): + runcmd([CC, '-c', '-o', basename+'.o', basename+'.c', '-I' + INCDIR] + CFLAGS.split()) + + +def cycompile(input_file, options=()): + from ..Compiler import Version, CmdLine, Main + options, sources = CmdLine.parse_command_line(list(options or ()) + ['--embed', input_file]) + _debug('Using Cython %s to compile %s', Version.version, input_file) + result = Main.compile(sources, options) + if result.num_errors > 0: + sys.exit(1) + + +def exec_file(program_name, args=()): + runcmd([os.path.abspath(program_name)] + list(args), shell=False) + + +def build(input_file, compiler_args=(), force=False): + """ + Build an executable program from a Cython module. + + Returns the name of the executable file. + """ + basename = os.path.splitext(input_file)[0] + exe_file = basename + EXE_EXT + if not force and os.path.abspath(exe_file) == os.path.abspath(input_file): + raise ValueError("Input and output file names are the same, refusing to overwrite") + if (not force and os.path.exists(exe_file) and os.path.exists(input_file) + and os.path.getmtime(input_file) <= os.path.getmtime(exe_file)): + _debug("File is up to date, not regenerating %s", exe_file) + return exe_file + cycompile(input_file, compiler_args) + ccompile(basename) + clink(basename) + return exe_file + + +def build_and_run(args): + """ + Build an executable program from a Cython module and run it. + + Arguments after the module name will be passed verbatimly to the program. + """ + program_name, args = _build(args) + exec_file(program_name, args) + + +def _build(args): + input_file, cy_args, args = _parse_args(args) + program_name = build(input_file, cy_args) + return program_name, args + + +if __name__ == '__main__': + _build(sys.argv[1:]) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Cythonize.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Cythonize.py new file mode 100644 index 0000000000000000000000000000000000000000..b4beb21995f7b0ce0fe20e34ac54ee0dbe0a149d --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Cythonize.py @@ -0,0 +1,255 @@ +from __future__ import absolute_import, print_function + +import os +import shutil +import tempfile + +from .Dependencies import cythonize, extended_iglob +from ..Utils import is_package_dir +from ..Compiler import Options + +try: + import multiprocessing + parallel_compiles = int(multiprocessing.cpu_count() * 1.5) +except ImportError: + multiprocessing = None + parallel_compiles = 0 + + +class _FakePool(object): + def map_async(self, func, args): + try: + from itertools import imap + except ImportError: + imap=map + for _ in imap(func, args): + pass + + def close(self): + pass + + def terminate(self): + pass + + def join(self): + pass + + +def find_package_base(path): + base_dir, package_path = os.path.split(path) + while is_package_dir(base_dir): + base_dir, parent = os.path.split(base_dir) + package_path = '%s/%s' % (parent, package_path) + return base_dir, package_path + +def cython_compile(path_pattern, options): + all_paths = map(os.path.abspath, extended_iglob(path_pattern)) + _cython_compile_files(all_paths, options) + +def _cython_compile_files(all_paths, options): + pool = None + try: + for path in all_paths: + if options.build_inplace: + base_dir = path + while not os.path.isdir(base_dir) or is_package_dir(base_dir): + base_dir = os.path.dirname(base_dir) + else: + base_dir = None + + if os.path.isdir(path): + # recursively compiling a package + paths = [os.path.join(path, '**', '*.{py,pyx}')] + else: + # assume it's a file(-like thing) + paths = [path] + + ext_modules = cythonize( + paths, + nthreads=options.parallel, + exclude_failures=options.keep_going, + exclude=options.excludes, + compiler_directives=options.directives, + compile_time_env=options.compile_time_env, + force=options.force, + quiet=options.quiet, + depfile=options.depfile, + language=options.language, + **options.options) + + if ext_modules and options.build: + if len(ext_modules) > 1 and options.parallel > 1: + if pool is None: + try: + pool = multiprocessing.Pool(options.parallel) + except OSError: + pool = _FakePool() + pool.map_async(run_distutils, [ + (base_dir, [ext]) for ext in ext_modules]) + else: + run_distutils((base_dir, ext_modules)) + except: + if pool is not None: + pool.terminate() + raise + else: + if pool is not None: + pool.close() + pool.join() + + +def run_distutils(args): + try: + from distutils.core import setup + except ImportError: + try: + from setuptools import setup + except ImportError: + raise ImportError("'distutils' is not available. Please install 'setuptools' for binary builds.") + + base_dir, ext_modules = args + script_args = ['build_ext', '-i'] + cwd = os.getcwd() + temp_dir = None + try: + if base_dir: + os.chdir(base_dir) + temp_dir = tempfile.mkdtemp(dir=base_dir) + script_args.extend(['--build-temp', temp_dir]) + setup( + script_name='setup.py', + script_args=script_args, + ext_modules=ext_modules, + ) + finally: + if base_dir: + os.chdir(cwd) + if temp_dir and os.path.isdir(temp_dir): + shutil.rmtree(temp_dir) + + +def create_args_parser(): + from argparse import ArgumentParser, RawDescriptionHelpFormatter + from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction + + parser = ArgumentParser( + formatter_class=RawDescriptionHelpFormatter, + epilog="""\ +Environment variables: + CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless + of modification times and changes. + Environment variables accepted by setuptools are supported to configure the C compiler and build: + https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options""" + ) + + parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...', + dest='directives', default={}, type=str, + action=ParseDirectivesAction, + help='set a compiler directive') + parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...', + dest='compile_time_env', default={}, type=str, + action=ParseCompileTimeEnvAction, + help='set a compile time environment variable') + parser.add_argument('-s', '--option', metavar='NAME=VALUE', + dest='options', default={}, type=str, + action=ParseOptionsAction, + help='set a cythonize option') + parser.add_argument('-2', dest='language_level', action='store_const', const=2, default=None, + help='use Python 2 syntax mode by default') + parser.add_argument('-3', dest='language_level', action='store_const', const=3, + help='use Python 3 syntax mode by default') + parser.add_argument('--3str', dest='language_level', action='store_const', const='3str', + help='use Python 3 syntax mode by default') + parser.add_argument('-+', '--cplus', dest='language', action='store_const', const='c++', default=None, + help='Compile as C++ rather than C') + parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate', + help='Produce a colorized HTML version of the source.') + parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate', + help='Produce a colorized HTML version of the source ' + 'which includes entire generated C/C++-code.') + parser.add_argument('-x', '--exclude', metavar='PATTERN', dest='excludes', + action='append', default=[], + help='exclude certain file patterns from the compilation') + + parser.add_argument('-b', '--build', dest='build', action='store_true', default=None, + help='build extension modules using distutils/setuptools') + parser.add_argument('-i', '--inplace', dest='build_inplace', action='store_true', default=None, + help='build extension modules in place using distutils/setuptools (implies -b)') + parser.add_argument('-j', '--parallel', dest='parallel', metavar='N', + type=int, default=parallel_compiles, + help=('run builds in N parallel jobs (default: %d)' % + parallel_compiles or 1)) + parser.add_argument('-f', '--force', dest='force', action='store_true', default=None, + help='force recompilation') + parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=None, + help='be less verbose during compilation') + + parser.add_argument('--lenient', dest='lenient', action='store_true', default=None, + help='increase Python compatibility by ignoring some compile time errors') + parser.add_argument('-k', '--keep-going', dest='keep_going', action='store_true', default=None, + help='compile as much as possible, ignore compilation failures') + parser.add_argument('--no-docstrings', dest='no_docstrings', action='store_true', default=None, + help='strip docstrings') + parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources') + parser.add_argument('sources', nargs='*') + return parser + + +def parse_args_raw(parser, args): + options, unknown = parser.parse_known_args(args) + sources = options.sources + # if positional arguments were interspersed + # some of them are in unknown + for option in unknown: + if option.startswith('-'): + parser.error("unknown option "+option) + else: + sources.append(option) + del options.sources + return (options, sources) + + +def parse_args(args): + parser = create_args_parser() + options, args = parse_args_raw(parser, args) + + if not args: + parser.error("no source files provided") + if options.build_inplace: + options.build = True + if multiprocessing is None: + options.parallel = 0 + if options.language_level: + assert options.language_level in (2, 3, '3str') + options.options['language_level'] = options.language_level + + if options.lenient: + # increase Python compatibility by ignoring compile time errors + Options.error_on_unknown_names = False + Options.error_on_uninitialized = False + + if options.annotate: + Options.annotate = options.annotate + + if options.no_docstrings: + Options.docstrings = False + + return options, args + + +def main(args=None): + options, paths = parse_args(args) + + all_paths = [] + for path in paths: + expanded_path = [os.path.abspath(p) for p in extended_iglob(path)] + if not expanded_path: + import sys + print("{}: No such file or directory: '{}'".format(sys.argv[0], path), file=sys.stderr) + sys.exit(1) + all_paths.extend(expanded_path) + _cython_compile_files(all_paths, options) + + +if __name__ == '__main__': + main() diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Dependencies.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..7de406516c00c5478726518ec4fc99b91f3b7286 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Dependencies.py @@ -0,0 +1,1380 @@ +from __future__ import absolute_import, print_function + +import cython +from .. import __version__ + +import collections +import contextlib +import hashlib +import os +import shutil +import subprocess +import re, sys, time +from glob import iglob +from io import open as io_open +from os.path import relpath as _relpath +import zipfile + +try: + from collections.abc import Iterable +except ImportError: + from collections import Iterable + +try: + import gzip + gzip_open = gzip.open + gzip_ext = '.gz' +except ImportError: + gzip_open = open + gzip_ext = '' + +try: + import zlib + zipfile_compression_mode = zipfile.ZIP_DEFLATED +except ImportError: + zipfile_compression_mode = zipfile.ZIP_STORED + +try: + import pythran +except: + pythran = None + +from .. import Utils +from ..Utils import (cached_function, cached_method, path_exists, + safe_makedirs, copy_file_to_dir_if_newer, is_package_dir, write_depfile) +from ..Compiler import Errors +from ..Compiler.Main import Context +from ..Compiler.Options import (CompilationOptions, default_options, + get_directive_defaults) + +join_path = cached_function(os.path.join) +copy_once_if_newer = cached_function(copy_file_to_dir_if_newer) +safe_makedirs_once = cached_function(safe_makedirs) + +if sys.version_info[0] < 3: + # stupid Py2 distutils enforces str type in list of sources + _fs_encoding = sys.getfilesystemencoding() + if _fs_encoding is None: + _fs_encoding = sys.getdefaultencoding() + def encode_filename_in_py2(filename): + if not isinstance(filename, bytes): + return filename.encode(_fs_encoding) + return filename +else: + def encode_filename_in_py2(filename): + return filename + basestring = str + + +def _make_relative(file_paths, base=None): + if not base: + base = os.getcwd() + if base[-1] != os.path.sep: + base += os.path.sep + return [_relpath(path, base) if path.startswith(base) else path + for path in file_paths] + + +def extended_iglob(pattern): + if '{' in pattern: + m = re.match('(.*){([^}]+)}(.*)', pattern) + if m: + before, switch, after = m.groups() + for case in switch.split(','): + for path in extended_iglob(before + case + after): + yield path + return + + # We always accept '/' and also '\' on Windows, + # because '/' is generally common for relative paths. + if '**/' in pattern or os.sep == '\\' and '**\\' in pattern: + seen = set() + first, rest = re.split(r'\*\*[%s]' % ('/\\\\' if os.sep == '\\' else '/'), pattern, 1) + if first: + first = iglob(first + os.sep) + else: + first = [''] + for root in first: + for path in extended_iglob(join_path(root, rest)): + if path not in seen: + seen.add(path) + yield path + for path in extended_iglob(join_path(root, '*', '**', rest)): + if path not in seen: + seen.add(path) + yield path + else: + for path in iglob(pattern): + yield path + + +def nonempty(it, error_msg="expected non-empty iterator"): + empty = True + for value in it: + empty = False + yield value + if empty: + raise ValueError(error_msg) + + +@cached_function +def file_hash(filename): + path = os.path.normpath(filename) + prefix = ('%d:%s' % (len(path), path)).encode("UTF-8") + m = hashlib.sha1(prefix) + with open(path, 'rb') as f: + data = f.read(65000) + while data: + m.update(data) + data = f.read(65000) + return m.hexdigest() + + +def update_pythran_extension(ext): + if pythran is None: + raise RuntimeError("You first need to install Pythran to use the np_pythran directive.") + try: + pythran_ext = pythran.config.make_extension(python=True) + except TypeError: # older pythran version only + pythran_ext = pythran.config.make_extension() + + ext.include_dirs.extend(pythran_ext['include_dirs']) + ext.extra_compile_args.extend(pythran_ext['extra_compile_args']) + ext.extra_link_args.extend(pythran_ext['extra_link_args']) + ext.define_macros.extend(pythran_ext['define_macros']) + ext.undef_macros.extend(pythran_ext['undef_macros']) + ext.library_dirs.extend(pythran_ext['library_dirs']) + ext.libraries.extend(pythran_ext['libraries']) + ext.language = 'c++' + + # These options are not compatible with the way normal Cython extensions work + for bad_option in ["-fwhole-program", "-fvisibility=hidden"]: + try: + ext.extra_compile_args.remove(bad_option) + except ValueError: + pass + + +def parse_list(s): + """ + >>> parse_list("") + [] + >>> parse_list("a") + ['a'] + >>> parse_list("a b c") + ['a', 'b', 'c'] + >>> parse_list("[a, b, c]") + ['a', 'b', 'c'] + >>> parse_list('a " " b') + ['a', ' ', 'b'] + >>> parse_list('[a, ",a", "a,", ",", ]') + ['a', ',a', 'a,', ','] + """ + if len(s) >= 2 and s[0] == '[' and s[-1] == ']': + s = s[1:-1] + delimiter = ',' + else: + delimiter = ' ' + s, literals = strip_string_literals(s) + def unquote(literal): + literal = literal.strip() + if literal[0] in "'\"": + return literals[literal[1:-1]] + else: + return literal + return [unquote(item) for item in s.split(delimiter) if item.strip()] + + +transitive_str = object() +transitive_list = object() +bool_or = object() + +distutils_settings = { + 'name': str, + 'sources': list, + 'define_macros': list, + 'undef_macros': list, + 'libraries': transitive_list, + 'library_dirs': transitive_list, + 'runtime_library_dirs': transitive_list, + 'include_dirs': transitive_list, + 'extra_objects': list, + 'extra_compile_args': transitive_list, + 'extra_link_args': transitive_list, + 'export_symbols': list, + 'depends': transitive_list, + 'language': transitive_str, + 'np_pythran': bool_or +} + + +def _legacy_strtobool(val): + # Used to be "distutils.util.strtobool", adapted for deprecation warnings. + if val == "True": + return True + elif val == "False": + return False + + import warnings + warnings.warn("The 'np_python' option requires 'True' or 'False'", category=DeprecationWarning) + val = val.lower() + if val in ('y', 'yes', 't', 'true', 'on', '1'): + return True + elif val in ('n', 'no', 'f', 'false', 'off', '0'): + return False + else: + raise ValueError("invalid truth value %r" % (val,)) + + +@cython.locals(start=cython.Py_ssize_t, end=cython.Py_ssize_t) +def line_iter(source): + if isinstance(source, basestring): + start = 0 + while True: + end = source.find('\n', start) + if end == -1: + yield source[start:] + return + yield source[start:end] + start = end+1 + else: + for line in source: + yield line + + +class DistutilsInfo(object): + + def __init__(self, source=None, exn=None): + self.values = {} + if source is not None: + for line in line_iter(source): + line = line.lstrip() + if not line: + continue + if line[0] != '#': + break + line = line[1:].lstrip() + kind = next((k for k in ("distutils:","cython:") if line.startswith(k)), None) + if kind is not None: + key, _, value = [s.strip() for s in line[len(kind):].partition('=')] + type = distutils_settings.get(key, None) + if line.startswith("cython:") and type is None: continue + if type in (list, transitive_list): + value = parse_list(value) + if key == 'define_macros': + value = [tuple(macro.split('=', 1)) + if '=' in macro else (macro, None) + for macro in value] + if type is bool_or: + value = _legacy_strtobool(value) + self.values[key] = value + elif exn is not None: + for key in distutils_settings: + if key in ('name', 'sources','np_pythran'): + continue + value = getattr(exn, key, None) + if value: + self.values[key] = value + + def merge(self, other): + if other is None: + return self + for key, value in other.values.items(): + type = distutils_settings[key] + if type is transitive_str and key not in self.values: + self.values[key] = value + elif type is transitive_list: + if key in self.values: + # Change a *copy* of the list (Trac #845) + all = self.values[key][:] + for v in value: + if v not in all: + all.append(v) + value = all + self.values[key] = value + elif type is bool_or: + self.values[key] = self.values.get(key, False) | value + return self + + def subs(self, aliases): + if aliases is None: + return self + resolved = DistutilsInfo() + for key, value in self.values.items(): + type = distutils_settings[key] + if type in [list, transitive_list]: + new_value_list = [] + for v in value: + if v in aliases: + v = aliases[v] + if isinstance(v, list): + new_value_list += v + else: + new_value_list.append(v) + value = new_value_list + else: + if value in aliases: + value = aliases[value] + resolved.values[key] = value + return resolved + + def apply(self, extension): + for key, value in self.values.items(): + type = distutils_settings[key] + if type in [list, transitive_list]: + value = getattr(extension, key) + list(value) + setattr(extension, key, value) + + +@cython.locals(start=cython.Py_ssize_t, q=cython.Py_ssize_t, + single_q=cython.Py_ssize_t, double_q=cython.Py_ssize_t, + hash_mark=cython.Py_ssize_t, end=cython.Py_ssize_t, + k=cython.Py_ssize_t, counter=cython.Py_ssize_t, quote_len=cython.Py_ssize_t) +def strip_string_literals(code, prefix='__Pyx_L'): + """ + Normalizes every string literal to be of the form '__Pyx_Lxxx', + returning the normalized code and a mapping of labels to + string literals. + """ + new_code = [] + literals = {} + counter = 0 + start = q = 0 + in_quote = False + hash_mark = single_q = double_q = -1 + code_len = len(code) + quote_type = None + quote_len = -1 + + while True: + if hash_mark < q: + hash_mark = code.find('#', q) + if single_q < q: + single_q = code.find("'", q) + if double_q < q: + double_q = code.find('"', q) + q = min(single_q, double_q) + if q == -1: + q = max(single_q, double_q) + + # We're done. + if q == -1 and hash_mark == -1: + new_code.append(code[start:]) + break + + # Try to close the quote. + elif in_quote: + if code[q-1] == u'\\': + k = 2 + while q >= k and code[q-k] == u'\\': + k += 1 + if k % 2 == 0: + q += 1 + continue + if code[q] == quote_type and ( + quote_len == 1 or (code_len > q + 2 and quote_type == code[q+1] == code[q+2])): + counter += 1 + label = "%s%s_" % (prefix, counter) + literals[label] = code[start+quote_len:q] + full_quote = code[q:q+quote_len] + new_code.append(full_quote) + new_code.append(label) + new_code.append(full_quote) + q += quote_len + in_quote = False + start = q + else: + q += 1 + + # Process comment. + elif -1 != hash_mark and (hash_mark < q or q == -1): + new_code.append(code[start:hash_mark+1]) + end = code.find('\n', hash_mark) + counter += 1 + label = "%s%s_" % (prefix, counter) + if end == -1: + end_or_none = None + else: + end_or_none = end + literals[label] = code[hash_mark+1:end_or_none] + new_code.append(label) + if end == -1: + break + start = q = end + + # Open the quote. + else: + if code_len >= q+3 and (code[q] == code[q+1] == code[q+2]): + quote_len = 3 + else: + quote_len = 1 + in_quote = True + quote_type = code[q] + new_code.append(code[start:q]) + start = q + q += quote_len + + return "".join(new_code), literals + + +# We need to allow spaces to allow for conditional compilation like +# IF ...: +# cimport ... +dependency_regex = re.compile(r"(?:^\s*from +([0-9a-zA-Z_.]+) +cimport)|" + r"(?:^\s*cimport +([0-9a-zA-Z_.]+(?: *, *[0-9a-zA-Z_.]+)*))|" + r"(?:^\s*cdef +extern +from +['\"]([^'\"]+)['\"])|" + r"(?:^\s*include +['\"]([^'\"]+)['\"])", re.M) +dependency_after_from_regex = re.compile( + r"(?:^\s+\(([0-9a-zA-Z_., ]*)\)[#\n])|" + r"(?:^\s+([0-9a-zA-Z_., ]*)[#\n])", + re.M) + + +def normalize_existing(base_path, rel_paths): + return normalize_existing0(os.path.dirname(base_path), tuple(set(rel_paths))) + + +@cached_function +def normalize_existing0(base_dir, rel_paths): + """ + Given some base directory ``base_dir`` and a list of path names + ``rel_paths``, normalize each relative path name ``rel`` by + replacing it by ``os.path.join(base, rel)`` if that file exists. + + Return a couple ``(normalized, needed_base)`` where ``normalized`` + if the list of normalized file names and ``needed_base`` is + ``base_dir`` if we actually needed ``base_dir``. If no paths were + changed (for example, if all paths were already absolute), then + ``needed_base`` is ``None``. + """ + normalized = [] + needed_base = None + for rel in rel_paths: + if os.path.isabs(rel): + normalized.append(rel) + continue + path = join_path(base_dir, rel) + if path_exists(path): + normalized.append(os.path.normpath(path)) + needed_base = base_dir + else: + normalized.append(rel) + return (normalized, needed_base) + + +def resolve_depends(depends, include_dirs): + include_dirs = tuple(include_dirs) + resolved = [] + for depend in depends: + path = resolve_depend(depend, include_dirs) + if path is not None: + resolved.append(path) + return resolved + + +@cached_function +def resolve_depend(depend, include_dirs): + if depend[0] == '<' and depend[-1] == '>': + return None + for dir in include_dirs: + path = join_path(dir, depend) + if path_exists(path): + return os.path.normpath(path) + return None + + +@cached_function +def package(filename): + dir = os.path.dirname(os.path.abspath(str(filename))) + if dir != filename and is_package_dir(dir): + return package(dir) + (os.path.basename(dir),) + else: + return () + + +@cached_function +def fully_qualified_name(filename): + module = os.path.splitext(os.path.basename(filename))[0] + return '.'.join(package(filename) + (module,)) + + +@cached_function +def parse_dependencies(source_filename): + # Actual parsing is way too slow, so we use regular expressions. + # The only catch is that we must strip comments and string + # literals ahead of time. + with Utils.open_source_file(source_filename, error_handling='ignore') as fh: + source = fh.read() + distutils_info = DistutilsInfo(source) + source, literals = strip_string_literals(source) + source = source.replace('\\\n', ' ').replace('\t', ' ') + + # TODO: pure mode + cimports = [] + includes = [] + externs = [] + for m in dependency_regex.finditer(source): + cimport_from, cimport_list, extern, include = m.groups() + if cimport_from: + cimports.append(cimport_from) + m_after_from = dependency_after_from_regex.search(source, pos=m.end()) + if m_after_from: + multiline, one_line = m_after_from.groups() + subimports = multiline or one_line + cimports.extend("{0}.{1}".format(cimport_from, s.strip()) + for s in subimports.split(',')) + + elif cimport_list: + cimports.extend(x.strip() for x in cimport_list.split(",")) + elif extern: + externs.append(literals[extern]) + else: + includes.append(literals[include]) + return cimports, includes, externs, distutils_info + + +class DependencyTree(object): + + def __init__(self, context, quiet=False): + self.context = context + self.quiet = quiet + self._transitive_cache = {} + + def parse_dependencies(self, source_filename): + if path_exists(source_filename): + source_filename = os.path.normpath(source_filename) + return parse_dependencies(source_filename) + + @cached_method + def included_files(self, filename): + # This is messy because included files are textually included, resolving + # cimports (but not includes) relative to the including file. + all = set() + for include in self.parse_dependencies(filename)[1]: + include_path = join_path(os.path.dirname(filename), include) + if not path_exists(include_path): + include_path = self.context.find_include_file(include, source_file_path=filename) + if include_path: + if '.' + os.path.sep in include_path: + include_path = os.path.normpath(include_path) + all.add(include_path) + all.update(self.included_files(include_path)) + elif not self.quiet: + print(u"Unable to locate '%s' referenced from '%s'" % (filename, include)) + return all + + @cached_method + def cimports_externs_incdirs(self, filename): + # This is really ugly. Nested cimports are resolved with respect to the + # includer, but includes are resolved with respect to the includee. + cimports, includes, externs = self.parse_dependencies(filename)[:3] + cimports = set(cimports) + externs = set(externs) + incdirs = set() + for include in self.included_files(filename): + included_cimports, included_externs, included_incdirs = self.cimports_externs_incdirs(include) + cimports.update(included_cimports) + externs.update(included_externs) + incdirs.update(included_incdirs) + externs, incdir = normalize_existing(filename, externs) + if incdir: + incdirs.add(incdir) + return tuple(cimports), externs, incdirs + + def cimports(self, filename): + return self.cimports_externs_incdirs(filename)[0] + + def package(self, filename): + return package(filename) + + def fully_qualified_name(self, filename): + return fully_qualified_name(filename) + + @cached_method + def find_pxd(self, module, filename=None): + is_relative = module[0] == '.' + if is_relative and not filename: + raise NotImplementedError("New relative imports.") + if filename is not None: + module_path = module.split('.') + if is_relative: + module_path.pop(0) # just explicitly relative + package_path = list(self.package(filename)) + while module_path and not module_path[0]: + try: + package_path.pop() + except IndexError: + return None # FIXME: error? + module_path.pop(0) + relative = '.'.join(package_path + module_path) + pxd = self.context.find_pxd_file(relative, source_file_path=filename) + if pxd: + return pxd + if is_relative: + return None # FIXME: error? + return self.context.find_pxd_file(module, source_file_path=filename) + + @cached_method + def cimported_files(self, filename): + filename_root, filename_ext = os.path.splitext(filename) + if filename_ext in ('.pyx', '.py') and path_exists(filename_root + '.pxd'): + pxd_list = [filename_root + '.pxd'] + else: + pxd_list = [] + # Cimports generates all possible combinations package.module + # when imported as from package cimport module. + for module in self.cimports(filename): + if module[:7] == 'cython.' or module == 'cython': + continue + pxd_file = self.find_pxd(module, filename) + if pxd_file is not None: + pxd_list.append(pxd_file) + return tuple(pxd_list) + + @cached_method + def immediate_dependencies(self, filename): + all_deps = {filename} + all_deps.update(self.cimported_files(filename)) + all_deps.update(self.included_files(filename)) + return all_deps + + def all_dependencies(self, filename): + return self.transitive_merge(filename, self.immediate_dependencies, set.union) + + @cached_method + def timestamp(self, filename): + return os.path.getmtime(filename) + + def extract_timestamp(self, filename): + return self.timestamp(filename), filename + + def newest_dependency(self, filename): + return max([self.extract_timestamp(f) for f in self.all_dependencies(filename)]) + + def transitive_fingerprint(self, filename, module, compilation_options): + r""" + Return a fingerprint of a cython file that is about to be cythonized. + + Fingerprints are looked up in future compilations. If the fingerprint + is found, the cythonization can be skipped. The fingerprint must + incorporate everything that has an influence on the generated code. + """ + try: + m = hashlib.sha1(__version__.encode('UTF-8')) + m.update(file_hash(filename).encode('UTF-8')) + for x in sorted(self.all_dependencies(filename)): + if os.path.splitext(x)[1] not in ('.c', '.cpp', '.h'): + m.update(file_hash(x).encode('UTF-8')) + # Include the module attributes that change the compilation result + # in the fingerprint. We do not iterate over module.__dict__ and + # include almost everything here as users might extend Extension + # with arbitrary (random) attributes that would lead to cache + # misses. + m.update(str(( + module.language, + getattr(module, 'py_limited_api', False), + getattr(module, 'np_pythran', False) + )).encode('UTF-8')) + + m.update(compilation_options.get_fingerprint().encode('UTF-8')) + return m.hexdigest() + except IOError: + return None + + def distutils_info0(self, filename): + info = self.parse_dependencies(filename)[3] + kwds = info.values + cimports, externs, incdirs = self.cimports_externs_incdirs(filename) + basedir = os.getcwd() + # Add dependencies on "cdef extern from ..." files + if externs: + externs = _make_relative(externs, basedir) + if 'depends' in kwds: + kwds['depends'] = list(set(kwds['depends']).union(externs)) + else: + kwds['depends'] = list(externs) + # Add include_dirs to ensure that the C compiler will find the + # "cdef extern from ..." files + if incdirs: + include_dirs = list(kwds.get('include_dirs', [])) + for inc in _make_relative(incdirs, basedir): + if inc not in include_dirs: + include_dirs.append(inc) + kwds['include_dirs'] = include_dirs + return info + + def distutils_info(self, filename, aliases=None, base=None): + return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge) + .subs(aliases) + .merge(base)) + + def transitive_merge(self, node, extract, merge): + try: + seen = self._transitive_cache[extract, merge] + except KeyError: + seen = self._transitive_cache[extract, merge] = {} + return self.transitive_merge_helper( + node, extract, merge, seen, {}, self.cimported_files)[0] + + def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing): + if node in seen: + return seen[node], None + deps = extract(node) + if node in stack: + return deps, node + try: + stack[node] = len(stack) + loop = None + for next in outgoing(node): + sub_deps, sub_loop = self.transitive_merge_helper(next, extract, merge, seen, stack, outgoing) + if sub_loop is not None: + if loop is not None and stack[loop] < stack[sub_loop]: + pass + else: + loop = sub_loop + deps = merge(deps, sub_deps) + if loop == node: + loop = None + if loop is None: + seen[node] = deps + return deps, loop + finally: + del stack[node] + + +_dep_tree = None + +def create_dependency_tree(ctx=None, quiet=False): + global _dep_tree + if _dep_tree is None: + if ctx is None: + ctx = Context(["."], get_directive_defaults(), + options=CompilationOptions(default_options)) + _dep_tree = DependencyTree(ctx, quiet=quiet) + return _dep_tree + + +# If this changes, change also docs/src/reference/compilation.rst +# which mentions this function +def default_create_extension(template, kwds): + if 'depends' in kwds: + include_dirs = kwds.get('include_dirs', []) + ["."] + depends = resolve_depends(kwds['depends'], include_dirs) + kwds['depends'] = sorted(set(depends + template.depends)) + + t = template.__class__ + ext = t(**kwds) + metadata = dict(distutils=kwds, module_name=kwds['name']) + return (ext, metadata) + + +# This may be useful for advanced users? +def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=False, language=None, + exclude_failures=False): + if language is not None: + print('Warning: passing language={0!r} to cythonize() is deprecated. ' + 'Instead, put "# distutils: language={0}" in your .pyx or .pxd file(s)'.format(language)) + if exclude is None: + exclude = [] + if patterns is None: + return [], {} + elif isinstance(patterns, basestring) or not isinstance(patterns, Iterable): + patterns = [patterns] + + from distutils.extension import Extension + if 'setuptools' in sys.modules: + # Support setuptools Extension instances as well. + extension_classes = ( + Extension, # should normally be the same as 'setuptools.extension._Extension' + sys.modules['setuptools.extension']._Extension, + sys.modules['setuptools'].Extension, + ) + else: + extension_classes = (Extension,) + + explicit_modules = {m.name for m in patterns if isinstance(m, extension_classes)} + deps = create_dependency_tree(ctx, quiet=quiet) + + to_exclude = set() + if not isinstance(exclude, list): + exclude = [exclude] + for pattern in exclude: + to_exclude.update(map(os.path.abspath, extended_iglob(pattern))) + + module_list = [] + module_metadata = {} + + # if no create_extension() function is defined, use a simple + # default function. + create_extension = ctx.options.create_extension or default_create_extension + + seen = set() + for pattern in patterns: + if not isinstance(pattern, extension_classes): + pattern = encode_filename_in_py2(pattern) + if isinstance(pattern, str): + filepattern = pattern + template = Extension(pattern, []) # Fake Extension without sources + name = '*' + base = None + ext_language = language + elif isinstance(pattern, extension_classes): + cython_sources = [s for s in pattern.sources + if os.path.splitext(s)[1] in ('.py', '.pyx')] + if cython_sources: + filepattern = cython_sources[0] + if len(cython_sources) > 1: + print(u"Warning: Multiple cython sources found for extension '%s': %s\n" + u"See https://cython.readthedocs.io/en/latest/src/userguide/sharing_declarations.html " + u"for sharing declarations among Cython files." % (pattern.name, cython_sources)) + else: + # ignore non-cython modules + module_list.append(pattern) + continue + template = pattern + name = template.name + base = DistutilsInfo(exn=template) + ext_language = None # do not override whatever the Extension says + else: + msg = str("pattern is not of type str nor subclass of Extension (%s)" + " but of type %s and class %s" % (repr(Extension), + type(pattern), + pattern.__class__)) + raise TypeError(msg) + + for file in nonempty(sorted(extended_iglob(filepattern)), "'%s' doesn't match any files" % filepattern): + if os.path.abspath(file) in to_exclude: + continue + module_name = deps.fully_qualified_name(file) + if '*' in name: + if module_name in explicit_modules: + continue + elif name: + module_name = name + + Utils.raise_error_if_module_name_forbidden(module_name) + + if module_name not in seen: + try: + kwds = deps.distutils_info(file, aliases, base).values + except Exception: + if exclude_failures: + continue + raise + if base is not None: + for key, value in base.values.items(): + if key not in kwds: + kwds[key] = value + + kwds['name'] = module_name + + sources = [file] + [m for m in template.sources if m != filepattern] + if 'sources' in kwds: + # allow users to add .c files etc. + for source in kwds['sources']: + source = encode_filename_in_py2(source) + if source not in sources: + sources.append(source) + kwds['sources'] = sources + + if ext_language and 'language' not in kwds: + kwds['language'] = ext_language + + np_pythran = kwds.pop('np_pythran', False) + + # Create the new extension + m, metadata = create_extension(template, kwds) + m.np_pythran = np_pythran or getattr(m, 'np_pythran', False) + if m.np_pythran: + update_pythran_extension(m) + module_list.append(m) + + # Store metadata (this will be written as JSON in the + # generated C file but otherwise has no purpose) + module_metadata[module_name] = metadata + + if file not in m.sources: + # Old setuptools unconditionally replaces .pyx with .c/.cpp + target_file = os.path.splitext(file)[0] + ('.cpp' if m.language == 'c++' else '.c') + try: + m.sources.remove(target_file) + except ValueError: + # never seen this in the wild, but probably better to warn about this unexpected case + print(u"Warning: Cython source file not found in sources list, adding %s" % file) + m.sources.insert(0, file) + seen.add(name) + return module_list, module_metadata + + +# This is the user-exposed entry point. +def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=None, language=None, + exclude_failures=False, show_all_warnings=False, **options): + """ + Compile a set of source modules into C/C++ files and return a list of distutils + Extension objects for them. + + :param module_list: As module list, pass either a glob pattern, a list of glob + patterns or a list of Extension objects. The latter + allows you to configure the extensions separately + through the normal distutils options. + You can also pass Extension objects that have + glob patterns as their sources. Then, cythonize + will resolve the pattern and create a + copy of the Extension for every matching file. + + :param exclude: When passing glob patterns as ``module_list``, you can exclude certain + module names explicitly by passing them into the ``exclude`` option. + + :param nthreads: The number of concurrent builds for parallel compilation + (requires the ``multiprocessing`` module). + + :param aliases: If you want to use compiler directives like ``# distutils: ...`` but + can only know at compile time (when running the ``setup.py``) which values + to use, you can use aliases and pass a dictionary mapping those aliases + to Python strings when calling :func:`cythonize`. As an example, say you + want to use the compiler + directive ``# distutils: include_dirs = ../static_libs/include/`` + but this path isn't always fixed and you want to find it when running + the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``, + find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python + variable called ``foo`` as a string, and then call + ``cythonize(..., aliases={'MY_HEADERS': foo})``. + + :param quiet: If True, Cython won't print error, warning, or status messages during the + compilation. + + :param force: Forces the recompilation of the Cython modules, even if the timestamps + don't indicate that a recompilation is necessary. + + :param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this + will be determined at a per-file level based on compiler directives. This + affects only modules found based on file names. Extension instances passed + into :func:`cythonize` will not be changed. It is recommended to rather + use the compiler directive ``# distutils: language = c++`` than this option. + + :param exclude_failures: For a broad 'try to compile' mode that ignores compilation + failures and simply excludes the failed extensions, + pass ``exclude_failures=True``. Note that this only + really makes sense for compiling ``.py`` files which can also + be used without compilation. + + :param show_all_warnings: By default, not all Cython warnings are printed. + Set to true to show all warnings. + + :param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py`` + files compiled. The HTML file gives an indication + of how much Python interaction there is in + each of the source code lines, compared to plain C code. + It also allows you to see the C/C++ code + generated for each line of Cython code. This report is invaluable when + optimizing a function for speed, + and for determining when to :ref:`release the GIL `: + in general, a ``nogil`` block may contain only "white" code. + See examples in :ref:`determining_where_to_add_types` or + :ref:`primes`. + + + :param annotate-fullc: If ``True`` will produce a colorized HTML version of + the source which includes entire generated C/C++-code. + + + :param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this: + ``compiler_directives={'embedsignature': True}``. + See :ref:`compiler-directives`. + + :param depfile: produce depfiles for the sources if True. + """ + if exclude is None: + exclude = [] + if 'include_path' not in options: + options['include_path'] = ['.'] + if 'common_utility_include_dir' in options: + safe_makedirs(options['common_utility_include_dir']) + + depfile = options.pop('depfile', None) + + if pythran is None: + pythran_options = None + else: + pythran_options = CompilationOptions(**options) + pythran_options.cplus = True + pythran_options.np_pythran = True + + if force is None: + force = os.environ.get("CYTHON_FORCE_REGEN") == "1" # allow global overrides for build systems + + c_options = CompilationOptions(**options) + cpp_options = CompilationOptions(**options); cpp_options.cplus = True + ctx = Context.from_options(c_options) + options = c_options + module_list, module_metadata = create_extension_list( + module_list, + exclude=exclude, + ctx=ctx, + quiet=quiet, + exclude_failures=exclude_failures, + language=language, + aliases=aliases) + + fix_windows_unicode_modules(module_list) + + deps = create_dependency_tree(ctx, quiet=quiet) + build_dir = getattr(options, 'build_dir', None) + + def copy_to_build_dir(filepath, root=os.getcwd()): + filepath_abs = os.path.abspath(filepath) + if os.path.isabs(filepath): + filepath = filepath_abs + if filepath_abs.startswith(root): + # distutil extension depends are relative to cwd + mod_dir = join_path(build_dir, + os.path.dirname(_relpath(filepath, root))) + copy_once_if_newer(filepath_abs, mod_dir) + + modules_by_cfile = collections.defaultdict(list) + to_compile = [] + for m in module_list: + if build_dir: + for dep in m.depends: + copy_to_build_dir(dep) + + cy_sources = [ + source for source in m.sources + if os.path.splitext(source)[1] in ('.pyx', '.py')] + if len(cy_sources) == 1: + # normal "special" case: believe the Extension module name to allow user overrides + full_module_name = m.name + else: + # infer FQMN from source files + full_module_name = None + + new_sources = [] + for source in m.sources: + base, ext = os.path.splitext(source) + if ext in ('.pyx', '.py'): + if m.np_pythran: + c_file = base + '.cpp' + options = pythran_options + elif m.language == 'c++': + c_file = base + '.cpp' + options = cpp_options + else: + c_file = base + '.c' + options = c_options + + # setup for out of place build directory if enabled + if build_dir: + if os.path.isabs(c_file): + c_file = os.path.splitdrive(c_file)[1] + c_file = c_file.split(os.sep, 1)[1] + c_file = os.path.join(build_dir, c_file) + dir = os.path.dirname(c_file) + safe_makedirs_once(dir) + + # write out the depfile, if requested + if depfile: + dependencies = deps.all_dependencies(source) + write_depfile(c_file, source, dependencies) + + # Missing files and those generated by other Cython versions should always be recreated. + if Utils.file_generated_by_this_cython(c_file): + c_timestamp = os.path.getmtime(c_file) + else: + c_timestamp = -1 + + # Priority goes first to modified files, second to direct + # dependents, and finally to indirect dependents. + if c_timestamp < deps.timestamp(source): + dep_timestamp, dep = deps.timestamp(source), source + priority = 0 + else: + dep_timestamp, dep = deps.newest_dependency(source) + priority = 2 - (dep in deps.immediate_dependencies(source)) + if force or c_timestamp < dep_timestamp: + if not quiet and not force: + if source == dep: + print(u"Compiling %s because it changed." % Utils.decode_filename(source)) + else: + print(u"Compiling %s because it depends on %s." % ( + Utils.decode_filename(source), + Utils.decode_filename(dep), + )) + if not force and options.cache: + fingerprint = deps.transitive_fingerprint(source, m, options) + else: + fingerprint = None + to_compile.append(( + priority, source, c_file, fingerprint, quiet, + options, not exclude_failures, module_metadata.get(m.name), + full_module_name, show_all_warnings)) + new_sources.append(c_file) + modules_by_cfile[c_file].append(m) + else: + new_sources.append(source) + if build_dir: + copy_to_build_dir(source) + m.sources = new_sources + + if options.cache: + if not os.path.exists(options.cache): + os.makedirs(options.cache) + to_compile.sort() + # Drop "priority" component of "to_compile" entries and add a + # simple progress indicator. + N = len(to_compile) + progress_fmt = "[{0:%d}/{1}] " % len(str(N)) + for i in range(N): + progress = progress_fmt.format(i+1, N) + to_compile[i] = to_compile[i][1:] + (progress,) + + if N <= 1: + nthreads = 0 + if nthreads: + import multiprocessing + pool = multiprocessing.Pool( + nthreads, initializer=_init_multiprocessing_helper) + # This is a bit more involved than it should be, because KeyboardInterrupts + # break the multiprocessing workers when using a normal pool.map(). + # See, for example: + # https://noswap.com/blog/python-multiprocessing-keyboardinterrupt + try: + result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1) + pool.close() + while not result.ready(): + try: + result.get(99999) # seconds + except multiprocessing.TimeoutError: + pass + except KeyboardInterrupt: + pool.terminate() + raise + pool.join() + else: + for args in to_compile: + cythonize_one(*args) + + if exclude_failures: + failed_modules = set() + for c_file, modules in modules_by_cfile.items(): + if not os.path.exists(c_file): + failed_modules.update(modules) + elif os.path.getsize(c_file) < 200: + f = io_open(c_file, 'r', encoding='iso8859-1') + try: + if f.read(len('#error ')) == '#error ': + # dead compilation result + failed_modules.update(modules) + finally: + f.close() + if failed_modules: + for module in failed_modules: + module_list.remove(module) + print(u"Failed compilations: %s" % ', '.join(sorted([ + module.name for module in failed_modules]))) + + if options.cache: + cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100)) + # cythonize() is often followed by the (non-Python-buffered) + # compiler output, flush now to avoid interleaving output. + sys.stdout.flush() + return module_list + + +def fix_windows_unicode_modules(module_list): + # Hack around a distutils 3.[5678] bug on Windows for unicode module names. + # https://bugs.python.org/issue39432 + if sys.platform != "win32": + return + if sys.version_info < (3, 5) or sys.version_info >= (3, 8, 2): + return + + def make_filtered_list(ignored_symbol, old_entries): + class FilteredExportSymbols(list): + # export_symbols for unicode filename cause link errors on Windows + # Cython doesn't need them (it already defines PyInit with the correct linkage) + # so use this class as a temporary fix to stop them from being generated + def __contains__(self, val): + # so distutils doesn't "helpfully" add PyInit_ + return val == ignored_symbol or list.__contains__(self, val) + + filtered_list = FilteredExportSymbols(old_entries) + if old_entries: + filtered_list.extend(name for name in old_entries if name != ignored_symbol) + return filtered_list + + for m in module_list: + # TODO: use m.name.isascii() in Py3.7+ + try: + m.name.encode("ascii") + continue + except UnicodeEncodeError: + pass + m.export_symbols = make_filtered_list( + "PyInit_" + m.name.rsplit(".", 1)[-1], + m.export_symbols, + ) + + +if os.environ.get('XML_RESULTS'): + compile_result_dir = os.environ['XML_RESULTS'] + def record_results(func): + def with_record(*args): + t = time.time() + success = True + try: + try: + func(*args) + except: + success = False + finally: + t = time.time() - t + module = fully_qualified_name(args[0]) + name = "cythonize." + module + failures = 1 - success + if success: + failure_item = "" + else: + failure_item = "failure" + output = open(os.path.join(compile_result_dir, name + ".xml"), "w") + output.write(""" + + + + %(failure_item)s + + + """.strip() % locals()) + output.close() + return with_record +else: + def record_results(func): + return func + + +# TODO: Share context? Issue: pyx processing leaks into pxd module +@record_results +def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None, + raise_on_failure=True, embedded_metadata=None, + full_module_name=None, show_all_warnings=False, + progress=""): + from ..Compiler.Main import compile_single, default_options + from ..Compiler.Errors import CompileError, PyrexError + + if fingerprint: + if not os.path.exists(options.cache): + safe_makedirs(options.cache) + # Cython-generated c files are highly compressible. + # (E.g. a compression ratio of about 10 for Sage). + fingerprint_file_base = join_path( + options.cache, "%s-%s" % (os.path.basename(c_file), fingerprint)) + gz_fingerprint_file = fingerprint_file_base + gzip_ext + zip_fingerprint_file = fingerprint_file_base + '.zip' + if os.path.exists(gz_fingerprint_file) or os.path.exists(zip_fingerprint_file): + if not quiet: + print(u"%sFound compiled %s in cache" % (progress, pyx_file)) + if os.path.exists(gz_fingerprint_file): + os.utime(gz_fingerprint_file, None) + with contextlib.closing(gzip_open(gz_fingerprint_file, 'rb')) as g: + with contextlib.closing(open(c_file, 'wb')) as f: + shutil.copyfileobj(g, f) + else: + os.utime(zip_fingerprint_file, None) + dirname = os.path.dirname(c_file) + with contextlib.closing(zipfile.ZipFile(zip_fingerprint_file)) as z: + for artifact in z.namelist(): + z.extract(artifact, os.path.join(dirname, artifact)) + return + if not quiet: + print(u"%sCythonizing %s" % (progress, Utils.decode_filename(pyx_file))) + if options is None: + options = CompilationOptions(default_options) + options.output_file = c_file + options.embedded_metadata = embedded_metadata + + old_warning_level = Errors.LEVEL + if show_all_warnings: + Errors.LEVEL = 0 + + any_failures = 0 + try: + result = compile_single(pyx_file, options, full_module_name=full_module_name) + if result.num_errors > 0: + any_failures = 1 + except (EnvironmentError, PyrexError) as e: + sys.stderr.write('%s\n' % e) + any_failures = 1 + # XXX + import traceback + traceback.print_exc() + except Exception: + if raise_on_failure: + raise + import traceback + traceback.print_exc() + any_failures = 1 + finally: + if show_all_warnings: + Errors.LEVEL = old_warning_level + + if any_failures: + if raise_on_failure: + raise CompileError(None, pyx_file) + elif os.path.exists(c_file): + os.remove(c_file) + elif fingerprint: + artifacts = list(filter(None, [ + getattr(result, attr, None) + for attr in ('c_file', 'h_file', 'api_file', 'i_file')])) + if len(artifacts) == 1: + fingerprint_file = gz_fingerprint_file + with contextlib.closing(open(c_file, 'rb')) as f: + with contextlib.closing(gzip_open(fingerprint_file + '.tmp', 'wb')) as g: + shutil.copyfileobj(f, g) + else: + fingerprint_file = zip_fingerprint_file + with contextlib.closing(zipfile.ZipFile( + fingerprint_file + '.tmp', 'w', zipfile_compression_mode)) as zip: + for artifact in artifacts: + zip.write(artifact, os.path.basename(artifact)) + os.rename(fingerprint_file + '.tmp', fingerprint_file) + + +def cythonize_one_helper(m): + import traceback + try: + return cythonize_one(*m) + except Exception: + traceback.print_exc() + raise + + +def _init_multiprocessing_helper(): + # KeyboardInterrupt kills workers, so don't let them get it + import signal + signal.signal(signal.SIGINT, signal.SIG_IGN) + + +def cleanup_cache(cache, target_size, ratio=.85): + try: + p = subprocess.Popen(['du', '-s', '-k', os.path.abspath(cache)], stdout=subprocess.PIPE) + stdout, _ = p.communicate() + res = p.wait() + if res == 0: + total_size = 1024 * int(stdout.strip().split()[0]) + if total_size < target_size: + return + except (OSError, ValueError): + pass + total_size = 0 + all = [] + for file in os.listdir(cache): + path = join_path(cache, file) + s = os.stat(path) + total_size += s.st_size + all.append((s.st_atime, s.st_size, path)) + if total_size > target_size: + for time, size, file in reversed(sorted(all)): + os.unlink(file) + total_size -= size + if total_size < target_size * ratio: + break diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Distutils.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Distutils.py new file mode 100644 index 0000000000000000000000000000000000000000..3efcc0d7b5101f5b5fbacfaa47c9afe760dbaaa6 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Distutils.py @@ -0,0 +1 @@ +from Cython.Distutils.build_ext import build_ext diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Inline.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Inline.py new file mode 100644 index 0000000000000000000000000000000000000000..fac6a79b9c33388e470ad39d2194ffd966425443 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Inline.py @@ -0,0 +1,367 @@ +from __future__ import absolute_import + +import hashlib +import inspect +import os +import re +import sys + +from distutils.core import Distribution, Extension +from distutils.command.build_ext import build_ext + +import Cython +from ..Compiler.Main import Context +from ..Compiler.Options import (default_options, CompilationOptions, + get_directive_defaults) + +from ..Compiler.Visitor import CythonTransform, EnvTransform +from ..Compiler.ParseTreeTransforms import SkipDeclarations +from ..Compiler.TreeFragment import parse_from_strings +from ..Compiler.StringEncoding import _unicode +from .Dependencies import strip_string_literals, cythonize, cached_function +from ..Compiler import Pipeline +from ..Utils import get_cython_cache_dir +import cython as cython_module + + +IS_PY3 = sys.version_info >= (3,) + +# A utility function to convert user-supplied ASCII strings to unicode. +if not IS_PY3: + def to_unicode(s): + if isinstance(s, bytes): + return s.decode('ascii') + else: + return s +else: + to_unicode = lambda x: x + + +if sys.version_info < (3, 5): + import imp + def load_dynamic(name, module_path): + return imp.load_dynamic(name, module_path) +else: + import importlib.util + from importlib.machinery import ExtensionFileLoader + + def load_dynamic(name, path): + spec = importlib.util.spec_from_file_location(name, loader=ExtensionFileLoader(name, path)) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +class UnboundSymbols(EnvTransform, SkipDeclarations): + def __init__(self): + super(EnvTransform, self).__init__(context=None) + self.unbound = set() + def visit_NameNode(self, node): + if not self.current_env().lookup(node.name): + self.unbound.add(node.name) + return node + def __call__(self, node): + super(UnboundSymbols, self).__call__(node) + return self.unbound + + +@cached_function +def unbound_symbols(code, context=None): + code = to_unicode(code) + if context is None: + context = Context([], get_directive_defaults(), + options=CompilationOptions(default_options)) + from ..Compiler.ParseTreeTransforms import AnalyseDeclarationsTransform + tree = parse_from_strings('(tree fragment)', code) + for phase in Pipeline.create_pipeline(context, 'pyx'): + if phase is None: + continue + tree = phase(tree) + if isinstance(phase, AnalyseDeclarationsTransform): + break + try: + import builtins + except ImportError: + import __builtin__ as builtins + return tuple(UnboundSymbols()(tree) - set(dir(builtins))) + + +def unsafe_type(arg, context=None): + py_type = type(arg) + if py_type is int: + return 'long' + else: + return safe_type(arg, context) + + +def safe_type(arg, context=None): + py_type = type(arg) + if py_type in (list, tuple, dict, str): + return py_type.__name__ + elif py_type is complex: + return 'double complex' + elif py_type is float: + return 'double' + elif py_type is bool: + return 'bint' + elif 'numpy' in sys.modules and isinstance(arg, sys.modules['numpy'].ndarray): + return 'numpy.ndarray[numpy.%s_t, ndim=%s]' % (arg.dtype.name, arg.ndim) + else: + for base_type in py_type.__mro__: + if base_type.__module__ in ('__builtin__', 'builtins'): + return 'object' + module = context.find_module(base_type.__module__, need_pxd=False) + if module: + entry = module.lookup(base_type.__name__) + if entry.is_type: + return '%s.%s' % (base_type.__module__, base_type.__name__) + return 'object' + + +def _get_build_extension(): + dist = Distribution() + # Ensure the build respects distutils configuration by parsing + # the configuration files + config_files = dist.find_config_files() + dist.parse_config_files(config_files) + build_extension = build_ext(dist) + build_extension.finalize_options() + return build_extension + + +@cached_function +def _create_context(cython_include_dirs): + return Context( + list(cython_include_dirs), + get_directive_defaults(), + options=CompilationOptions(default_options) + ) + + +_cython_inline_cache = {} +_cython_inline_default_context = _create_context(('.',)) + + +def _populate_unbound(kwds, unbound_symbols, locals=None, globals=None): + for symbol in unbound_symbols: + if symbol not in kwds: + if locals is None or globals is None: + calling_frame = inspect.currentframe().f_back.f_back.f_back + if locals is None: + locals = calling_frame.f_locals + if globals is None: + globals = calling_frame.f_globals + if symbol in locals: + kwds[symbol] = locals[symbol] + elif symbol in globals: + kwds[symbol] = globals[symbol] + else: + print("Couldn't find %r" % symbol) + + +def _inline_key(orig_code, arg_sigs, language_level): + key = orig_code, arg_sigs, sys.version_info, sys.executable, language_level, Cython.__version__ + return hashlib.sha1(_unicode(key).encode('utf-8')).hexdigest() + + +def cython_inline(code, get_type=unsafe_type, + lib_dir=os.path.join(get_cython_cache_dir(), 'inline'), + cython_include_dirs=None, cython_compiler_directives=None, + force=False, quiet=False, locals=None, globals=None, language_level=None, **kwds): + + if get_type is None: + get_type = lambda x: 'object' + ctx = _create_context(tuple(cython_include_dirs)) if cython_include_dirs else _cython_inline_default_context + + cython_compiler_directives = dict(cython_compiler_directives) if cython_compiler_directives else {} + if language_level is None and 'language_level' not in cython_compiler_directives: + language_level = '3str' + if language_level is not None: + cython_compiler_directives['language_level'] = language_level + + key_hash = None + + # Fast path if this has been called in this session. + _unbound_symbols = _cython_inline_cache.get(code) + if _unbound_symbols is not None: + _populate_unbound(kwds, _unbound_symbols, locals, globals) + args = sorted(kwds.items()) + arg_sigs = tuple([(get_type(value, ctx), arg) for arg, value in args]) + key_hash = _inline_key(code, arg_sigs, language_level) + invoke = _cython_inline_cache.get((code, arg_sigs, key_hash)) + if invoke is not None: + arg_list = [arg[1] for arg in args] + return invoke(*arg_list) + + orig_code = code + code = to_unicode(code) + code, literals = strip_string_literals(code) + code = strip_common_indent(code) + if locals is None: + locals = inspect.currentframe().f_back.f_back.f_locals + if globals is None: + globals = inspect.currentframe().f_back.f_back.f_globals + try: + _cython_inline_cache[orig_code] = _unbound_symbols = unbound_symbols(code) + _populate_unbound(kwds, _unbound_symbols, locals, globals) + except AssertionError: + if not quiet: + # Parsing from strings not fully supported (e.g. cimports). + print("Could not parse code as a string (to extract unbound symbols).") + + cimports = [] + for name, arg in list(kwds.items()): + if arg is cython_module: + cimports.append('\ncimport cython as %s' % name) + del kwds[name] + arg_names = sorted(kwds) + arg_sigs = tuple([(get_type(kwds[arg], ctx), arg) for arg in arg_names]) + if key_hash is None: + key_hash = _inline_key(orig_code, arg_sigs, language_level) + module_name = "_cython_inline_" + key_hash + + if module_name in sys.modules: + module = sys.modules[module_name] + + else: + build_extension = None + if cython_inline.so_ext is None: + # Figure out and cache current extension suffix + build_extension = _get_build_extension() + cython_inline.so_ext = build_extension.get_ext_filename('') + + lib_dir = os.path.abspath(lib_dir) + module_path = os.path.join(lib_dir, module_name + cython_inline.so_ext) + + if not os.path.exists(lib_dir): + os.makedirs(lib_dir) + if force or not os.path.isfile(module_path): + cflags = [] + define_macros = [] + c_include_dirs = [] + qualified = re.compile(r'([.\w]+)[.]') + for type, _ in arg_sigs: + m = qualified.match(type) + if m: + cimports.append('\ncimport %s' % m.groups()[0]) + # one special case + if m.groups()[0] == 'numpy': + import numpy + c_include_dirs.append(numpy.get_include()) + define_macros.append(("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")) + # cflags.append('-Wno-unused') + module_body, func_body = extract_func_code(code) + params = ', '.join(['%s %s' % a for a in arg_sigs]) + module_code = """ +%(module_body)s +%(cimports)s +def __invoke(%(params)s): +%(func_body)s + return locals() + """ % {'cimports': '\n'.join(cimports), + 'module_body': module_body, + 'params': params, + 'func_body': func_body } + for key, value in literals.items(): + module_code = module_code.replace(key, value) + pyx_file = os.path.join(lib_dir, module_name + '.pyx') + fh = open(pyx_file, 'w') + try: + fh.write(module_code) + finally: + fh.close() + extension = Extension( + name=module_name, + sources=[pyx_file], + include_dirs=c_include_dirs or None, + extra_compile_args=cflags or None, + define_macros=define_macros or None, + ) + if build_extension is None: + build_extension = _get_build_extension() + build_extension.extensions = cythonize( + [extension], + include_path=cython_include_dirs or ['.'], + compiler_directives=cython_compiler_directives, + quiet=quiet) + build_extension.build_temp = os.path.dirname(pyx_file) + build_extension.build_lib = lib_dir + build_extension.run() + + if sys.platform == 'win32' and sys.version_info >= (3, 8): + with os.add_dll_directory(os.path.abspath(lib_dir)): + module = load_dynamic(module_name, module_path) + else: + module = load_dynamic(module_name, module_path) + + _cython_inline_cache[orig_code, arg_sigs, key_hash] = module.__invoke + arg_list = [kwds[arg] for arg in arg_names] + return module.__invoke(*arg_list) + + +# Cached suffix used by cython_inline above. None should get +# overridden with actual value upon the first cython_inline invocation +cython_inline.so_ext = None + +_find_non_space = re.compile('[^ ]').search + + +def strip_common_indent(code): + min_indent = None + lines = code.splitlines() + for line in lines: + match = _find_non_space(line) + if not match: + continue # blank + indent = match.start() + if line[indent] == '#': + continue # comment + if min_indent is None or min_indent > indent: + min_indent = indent + for ix, line in enumerate(lines): + match = _find_non_space(line) + if not match or not line or line[indent:indent+1] == '#': + continue + lines[ix] = line[min_indent:] + return '\n'.join(lines) + + +module_statement = re.compile(r'^((cdef +(extern|class))|cimport|(from .+ cimport)|(from .+ import +[*]))') +def extract_func_code(code): + module = [] + function = [] + current = function + code = code.replace('\t', ' ') + lines = code.split('\n') + for line in lines: + if not line.startswith(' '): + if module_statement.match(line): + current = module + else: + current = function + current.append(line) + return '\n'.join(module), ' ' + '\n '.join(function) + + +def get_body(source): + ix = source.index(':') + if source[:5] == 'lambda': + return "return %s" % source[ix+1:] + else: + return source[ix+1:] + + +# Lots to be done here... It would be especially cool if compiled functions +# could invoke each other quickly. +class RuntimeCompiledFunction(object): + + def __init__(self, f): + self._f = f + self._body = get_body(inspect.getsource(f)) + + def __call__(self, *args, **kwds): + all = inspect.getcallargs(self._f, *args, **kwds) + if IS_PY3: + return cython_inline(self._body, locals=self._f.__globals__, globals=self._f.__globals__, **all) + else: + return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/IpythonMagic.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/IpythonMagic.py new file mode 100644 index 0000000000000000000000000000000000000000..3fa43c96d1eb6f7ed809a0953779002e561ca40e --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/IpythonMagic.py @@ -0,0 +1,572 @@ +# -*- coding: utf-8 -*- +""" +===================== +Cython related magics +===================== + +Magic command interface for interactive work with Cython + +.. note:: + + The ``Cython`` package needs to be installed separately. It + can be obtained using ``easy_install`` or ``pip``. + +Usage +===== + +To enable the magics below, execute ``%load_ext cython``. + +``%%cython`` + +{CYTHON_DOC} + +``%%cython_inline`` + +{CYTHON_INLINE_DOC} + +``%%cython_pyximport`` + +{CYTHON_PYXIMPORT_DOC} + +Author: +* Brian Granger + +Code moved from IPython and adapted by: +* Martín Gaitán + +Parts of this code were taken from Cython.inline. +""" +#----------------------------------------------------------------------------- +# Copyright (C) 2010-2011, IPython Development Team. +# +# Distributed under the terms of the Modified BSD License. +# +# The full license is in the file ipython-COPYING.rst, distributed with this software. +#----------------------------------------------------------------------------- + +from __future__ import absolute_import, print_function + +import io +import os +import re +import sys +import time +import copy +import distutils.log +import textwrap + +IO_ENCODING = sys.getfilesystemencoding() +IS_PY2 = sys.version_info[0] < 3 + +import hashlib +from distutils.core import Distribution, Extension +from distutils.command.build_ext import build_ext + +from IPython.core import display +from IPython.core import magic_arguments +from IPython.core.magic import Magics, magics_class, cell_magic +try: + from IPython.paths import get_ipython_cache_dir +except ImportError: + # older IPython version + from IPython.utils.path import get_ipython_cache_dir +from IPython.utils.text import dedent + +from ..Shadow import __version__ as cython_version +from ..Compiler.Errors import CompileError +from .Inline import cython_inline, load_dynamic +from .Dependencies import cythonize +from ..Utils import captured_fd, print_captured + + +PGO_CONFIG = { + 'gcc': { + 'gen': ['-fprofile-generate', '-fprofile-dir={TEMPDIR}'], + 'use': ['-fprofile-use', '-fprofile-correction', '-fprofile-dir={TEMPDIR}'], + }, + # blind copy from 'configure' script in CPython 3.7 + 'icc': { + 'gen': ['-prof-gen'], + 'use': ['-prof-use'], + } +} +PGO_CONFIG['mingw32'] = PGO_CONFIG['gcc'] + + +if IS_PY2: + def encode_fs(name): + return name if isinstance(name, bytes) else name.encode(IO_ENCODING) +else: + def encode_fs(name): + return name + + +@magics_class +class CythonMagics(Magics): + + def __init__(self, shell): + super(CythonMagics, self).__init__(shell) + self._reloads = {} + self._code_cache = {} + self._pyximport_installed = False + + def _import_all(self, module): + mdict = module.__dict__ + if '__all__' in mdict: + keys = mdict['__all__'] + else: + keys = [k for k in mdict if not k.startswith('_')] + + for k in keys: + try: + self.shell.push({k: mdict[k]}) + except KeyError: + msg = "'module' object has no attribute '%s'" % k + raise AttributeError(msg) + + @cell_magic + def cython_inline(self, line, cell): + """Compile and run a Cython code cell using Cython.inline. + + This magic simply passes the body of the cell to Cython.inline + and returns the result. If the variables `a` and `b` are defined + in the user's namespace, here is a simple example that returns + their sum:: + + %%cython_inline + return a+b + + For most purposes, we recommend the usage of the `%%cython` magic. + """ + locs = self.shell.user_global_ns + globs = self.shell.user_ns + return cython_inline(cell, locals=locs, globals=globs) + + @cell_magic + def cython_pyximport(self, line, cell): + """Compile and import a Cython code cell using pyximport. + + The contents of the cell are written to a `.pyx` file in the current + working directory, which is then imported using `pyximport`. This + magic requires a module name to be passed:: + + %%cython_pyximport modulename + def f(x): + return 2.0*x + + The compiled module is then imported and all of its symbols are + injected into the user's namespace. For most purposes, we recommend + the usage of the `%%cython` magic. + """ + module_name = line.strip() + if not module_name: + raise ValueError('module name must be given') + fname = module_name + '.pyx' + with io.open(fname, 'w', encoding='utf-8') as f: + f.write(cell) + if 'pyximport' not in sys.modules or not self._pyximport_installed: + import pyximport + pyximport.install() + self._pyximport_installed = True + if module_name in self._reloads: + module = self._reloads[module_name] + # Note: reloading extension modules is not actually supported + # (requires PEP-489 reinitialisation support). + # Don't know why this should ever have worked as it reads here. + # All we really need to do is to update the globals below. + #reload(module) + else: + __import__(module_name) + module = sys.modules[module_name] + self._reloads[module_name] = module + self._import_all(module) + + @magic_arguments.magic_arguments() + @magic_arguments.argument( + '-a', '--annotate', action='store_const', const='default', dest='annotate', + help="Produce a colorized HTML version of the source." + ) + @magic_arguments.argument( + '--annotate-fullc', action='store_const', const='fullc', dest='annotate', + help="Produce a colorized HTML version of the source " + "which includes entire generated C/C++-code." + ) + @magic_arguments.argument( + '-+', '--cplus', action='store_true', default=False, + help="Output a C++ rather than C file." + ) + @magic_arguments.argument( + '-3', dest='language_level', action='store_const', const=3, default=None, + help="Select Python 3 syntax." + ) + @magic_arguments.argument( + '-2', dest='language_level', action='store_const', const=2, default=None, + help="Select Python 2 syntax." + ) + @magic_arguments.argument( + '-f', '--force', action='store_true', default=False, + help="Force the compilation of a new module, even if the source has been " + "previously compiled." + ) + @magic_arguments.argument( + '-c', '--compile-args', action='append', default=[], + help="Extra flags to pass to compiler via the `extra_compile_args` " + "Extension flag (can be specified multiple times)." + ) + @magic_arguments.argument( + '--link-args', action='append', default=[], + help="Extra flags to pass to linker via the `extra_link_args` " + "Extension flag (can be specified multiple times)." + ) + @magic_arguments.argument( + '-l', '--lib', action='append', default=[], + help="Add a library to link the extension against (can be specified " + "multiple times)." + ) + @magic_arguments.argument( + '-n', '--name', + help="Specify a name for the Cython module." + ) + @magic_arguments.argument( + '-L', dest='library_dirs', metavar='dir', action='append', default=[], + help="Add a path to the list of library directories (can be specified " + "multiple times)." + ) + @magic_arguments.argument( + '-I', '--include', action='append', default=[], + help="Add a path to the list of include directories (can be specified " + "multiple times)." + ) + @magic_arguments.argument( + '-S', '--src', action='append', default=[], + help="Add a path to the list of src files (can be specified " + "multiple times)." + ) + @magic_arguments.argument( + '--pgo', dest='pgo', action='store_true', default=False, + help=("Enable profile guided optimisation in the C compiler. " + "Compiles the cell twice and executes it in between to generate a runtime profile.") + ) + @magic_arguments.argument( + '--verbose', dest='quiet', action='store_false', default=True, + help=("Print debug information like generated .c/.cpp file location " + "and exact gcc/g++ command invoked.") + ) + @cell_magic + def cython(self, line, cell): + """Compile and import everything from a Cython code cell. + + The contents of the cell are written to a `.pyx` file in the + directory `IPYTHONDIR/cython` using a filename with the hash of the + code. This file is then cythonized and compiled. The resulting module + is imported and all of its symbols are injected into the user's + namespace. The usage is similar to that of `%%cython_pyximport` but + you don't have to pass a module name:: + + %%cython + def f(x): + return 2.0*x + + To compile OpenMP codes, pass the required `--compile-args` + and `--link-args`. For example with gcc:: + + %%cython --compile-args=-fopenmp --link-args=-fopenmp + ... + + To enable profile guided optimisation, pass the ``--pgo`` option. + Note that the cell itself needs to take care of establishing a suitable + profile when executed. This can be done by implementing the functions to + optimise, and then calling them directly in the same cell on some realistic + training data like this:: + + %%cython --pgo + def critical_function(data): + for item in data: + ... + + # execute function several times to build profile + from somewhere import some_typical_data + for _ in range(100): + critical_function(some_typical_data) + + In Python 3.5 and later, you can distinguish between the profile and + non-profile runs as follows:: + + if "_pgo_" in __name__: + ... # execute critical code here + """ + args = magic_arguments.parse_argstring(self.cython, line) + code = cell if cell.endswith('\n') else cell + '\n' + lib_dir = os.path.join(get_ipython_cache_dir(), 'cython') + key = (code, line, sys.version_info, sys.executable, cython_version) + + if not os.path.exists(lib_dir): + os.makedirs(lib_dir) + + if args.pgo: + key += ('pgo',) + if args.force: + # Force a new module name by adding the current time to the + # key which is hashed to determine the module name. + key += (time.time(),) + + if args.name: + module_name = str(args.name) # no-op in Py3 + else: + module_name = "_cython_magic_" + hashlib.sha1(str(key).encode('utf-8')).hexdigest() + html_file = os.path.join(lib_dir, module_name + '.html') + module_path = os.path.join(lib_dir, module_name + self.so_ext) + + have_module = os.path.isfile(module_path) + need_cythonize = args.pgo or not have_module + + if args.annotate: + if not os.path.isfile(html_file): + need_cythonize = True + + extension = None + if need_cythonize: + extensions = self._cythonize(module_name, code, lib_dir, args, quiet=args.quiet) + if extensions is None: + # Compilation failed and printed error message + return None + assert len(extensions) == 1 + extension = extensions[0] + self._code_cache[key] = module_name + + if args.pgo: + self._profile_pgo_wrapper(extension, lib_dir) + + def print_compiler_output(stdout, stderr, where): + # On windows, errors are printed to stdout, we redirect both to sys.stderr. + print_captured(stdout, where, u"Content of stdout:\n") + print_captured(stderr, where, u"Content of stderr:\n") + + get_stderr = get_stdout = None + try: + with captured_fd(1) as get_stdout: + with captured_fd(2) as get_stderr: + self._build_extension( + extension, lib_dir, pgo_step_name='use' if args.pgo else None, quiet=args.quiet) + except (distutils.errors.CompileError, distutils.errors.LinkError): + # Build failed, print error message from compiler/linker + print_compiler_output(get_stdout(), get_stderr(), sys.stderr) + return None + + # Build seems ok, but we might still want to show any warnings that occurred + print_compiler_output(get_stdout(), get_stderr(), sys.stdout) + + module = load_dynamic(module_name, module_path) + self._import_all(module) + + if args.annotate: + try: + with io.open(html_file, encoding='utf-8') as f: + annotated_html = f.read() + except IOError as e: + # File could not be opened. Most likely the user has a version + # of Cython before 0.15.1 (when `cythonize` learned the + # `force` keyword argument) and has already compiled this + # exact source without annotation. + print('Cython completed successfully but the annotated ' + 'source could not be read.', file=sys.stderr) + print(e, file=sys.stderr) + else: + return display.HTML(self.clean_annotated_html(annotated_html)) + + def _profile_pgo_wrapper(self, extension, lib_dir): + """ + Generate a .c file for a separate extension module that calls the + module init function of the original module. This makes sure that the + PGO profiler sees the correct .o file of the final module, but it still + allows us to import the module under a different name for profiling, + before recompiling it into the PGO optimised module. Overwriting and + reimporting the same shared library is not portable. + """ + extension = copy.copy(extension) # shallow copy, do not modify sources in place! + module_name = extension.name + pgo_module_name = '_pgo_' + module_name + pgo_wrapper_c_file = os.path.join(lib_dir, pgo_module_name + '.c') + with io.open(pgo_wrapper_c_file, 'w', encoding='utf-8') as f: + f.write(textwrap.dedent(u""" + #include "Python.h" + #if PY_MAJOR_VERSION < 3 + extern PyMODINIT_FUNC init%(module_name)s(void); + PyMODINIT_FUNC init%(pgo_module_name)s(void); /*proto*/ + PyMODINIT_FUNC init%(pgo_module_name)s(void) { + PyObject *sys_modules; + init%(module_name)s(); if (PyErr_Occurred()) return; + sys_modules = PyImport_GetModuleDict(); /* borrowed, no exception, "never" fails */ + if (sys_modules) { + PyObject *module = PyDict_GetItemString(sys_modules, "%(module_name)s"); if (!module) return; + PyDict_SetItemString(sys_modules, "%(pgo_module_name)s", module); + Py_DECREF(module); + } + } + #else + extern PyMODINIT_FUNC PyInit_%(module_name)s(void); + PyMODINIT_FUNC PyInit_%(pgo_module_name)s(void); /*proto*/ + PyMODINIT_FUNC PyInit_%(pgo_module_name)s(void) { + return PyInit_%(module_name)s(); + } + #endif + """ % {'module_name': module_name, 'pgo_module_name': pgo_module_name})) + + extension.sources = extension.sources + [pgo_wrapper_c_file] # do not modify in place! + extension.name = pgo_module_name + + self._build_extension(extension, lib_dir, pgo_step_name='gen') + + # import and execute module code to generate profile + so_module_path = os.path.join(lib_dir, pgo_module_name + self.so_ext) + load_dynamic(pgo_module_name, so_module_path) + + def _cythonize(self, module_name, code, lib_dir, args, quiet=True): + pyx_file = os.path.join(lib_dir, module_name + '.pyx') + pyx_file = encode_fs(pyx_file) + + c_include_dirs = args.include + c_src_files = list(map(str, args.src)) + if 'numpy' in code: + import numpy + c_include_dirs.append(numpy.get_include()) + with io.open(pyx_file, 'w', encoding='utf-8') as f: + f.write(code) + extension = Extension( + name=module_name, + sources=[pyx_file] + c_src_files, + include_dirs=c_include_dirs, + library_dirs=args.library_dirs, + extra_compile_args=args.compile_args, + extra_link_args=args.link_args, + libraries=args.lib, + language='c++' if args.cplus else 'c', + ) + try: + opts = dict( + quiet=quiet, + annotate=args.annotate, + force=True, + language_level=min(3, sys.version_info[0]), + ) + if args.language_level is not None: + assert args.language_level in (2, 3) + opts['language_level'] = args.language_level + return cythonize([extension], **opts) + except CompileError: + return None + + def _build_extension(self, extension, lib_dir, temp_dir=None, pgo_step_name=None, quiet=True): + build_extension = self._get_build_extension( + extension, lib_dir=lib_dir, temp_dir=temp_dir, pgo_step_name=pgo_step_name) + old_threshold = None + try: + if not quiet: + old_threshold = distutils.log.set_threshold(distutils.log.DEBUG) + build_extension.run() + finally: + if not quiet and old_threshold is not None: + distutils.log.set_threshold(old_threshold) + + def _add_pgo_flags(self, build_extension, step_name, temp_dir): + compiler_type = build_extension.compiler.compiler_type + if compiler_type == 'unix': + compiler_cmd = build_extension.compiler.compiler_so + # TODO: we could try to call "[cmd] --version" for better insights + if not compiler_cmd: + pass + elif 'clang' in compiler_cmd or 'clang' in compiler_cmd[0]: + compiler_type = 'clang' + elif 'icc' in compiler_cmd or 'icc' in compiler_cmd[0]: + compiler_type = 'icc' + elif 'gcc' in compiler_cmd or 'gcc' in compiler_cmd[0]: + compiler_type = 'gcc' + elif 'g++' in compiler_cmd or 'g++' in compiler_cmd[0]: + compiler_type = 'gcc' + config = PGO_CONFIG.get(compiler_type) + orig_flags = [] + if config and step_name in config: + flags = [f.format(TEMPDIR=temp_dir) for f in config[step_name]] + for extension in build_extension.extensions: + orig_flags.append((extension.extra_compile_args, extension.extra_link_args)) + extension.extra_compile_args = extension.extra_compile_args + flags + extension.extra_link_args = extension.extra_link_args + flags + else: + print("No PGO %s configuration known for C compiler type '%s'" % (step_name, compiler_type), + file=sys.stderr) + return orig_flags + + @property + def so_ext(self): + """The extension suffix for compiled modules.""" + try: + return self._so_ext + except AttributeError: + self._so_ext = self._get_build_extension().get_ext_filename('') + return self._so_ext + + def _clear_distutils_mkpath_cache(self): + """clear distutils mkpath cache + + prevents distutils from skipping re-creation of dirs that have been removed + """ + try: + from distutils.dir_util import _path_created + except ImportError: + pass + else: + _path_created.clear() + + def _get_build_extension(self, extension=None, lib_dir=None, temp_dir=None, + pgo_step_name=None, _build_ext=build_ext): + self._clear_distutils_mkpath_cache() + dist = Distribution() + config_files = dist.find_config_files() + try: + config_files.remove('setup.cfg') + except ValueError: + pass + dist.parse_config_files(config_files) + + if not temp_dir: + temp_dir = lib_dir + add_pgo_flags = self._add_pgo_flags + + if pgo_step_name: + base_build_ext = _build_ext + class _build_ext(_build_ext): + def build_extensions(self): + add_pgo_flags(self, pgo_step_name, temp_dir) + base_build_ext.build_extensions(self) + + build_extension = _build_ext(dist) + build_extension.finalize_options() + if temp_dir: + temp_dir = encode_fs(temp_dir) + build_extension.build_temp = temp_dir + if lib_dir: + lib_dir = encode_fs(lib_dir) + build_extension.build_lib = lib_dir + if extension is not None: + build_extension.extensions = [extension] + return build_extension + + @staticmethod + def clean_annotated_html(html): + """Clean up the annotated HTML source. + + Strips the link to the generated C or C++ file, which we do not + present to the user. + """ + r = re.compile('

Raw output: (.*)') + html = '\n'.join(l for l in html.splitlines() if not r.match(l)) + return html + +__doc__ = __doc__.format( + # rST doesn't see the -+ flag as part of an option list, so we + # hide it from the module-level docstring. + CYTHON_DOC=dedent(CythonMagics.cython.__doc__\ + .replace('-+, --cplus', '--cplus ')), + CYTHON_INLINE_DOC=dedent(CythonMagics.cython_inline.__doc__), + CYTHON_PYXIMPORT_DOC=dedent(CythonMagics.cython_pyximport.__doc__), +) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestCyCache.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestCyCache.py new file mode 100644 index 0000000000000000000000000000000000000000..e5108ab9ea531903a8d2fdc00b9189eeaaca8f0f --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestCyCache.py @@ -0,0 +1,121 @@ +import difflib +import glob +import gzip +import os +import sys +import tempfile +import unittest + +import Cython.Build.Dependencies +import Cython.Utils +from Cython.TestUtils import CythonTest + + +class TestCyCache(CythonTest): + + def setUp(self): + CythonTest.setUp(self) + self.temp_dir = tempfile.mkdtemp( + prefix='cycache-test', + dir='TEST_TMP' if os.path.isdir('TEST_TMP') else None) + self.src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir) + self.cache_dir = tempfile.mkdtemp(prefix='cache', dir=self.temp_dir) + + def cache_files(self, file_glob): + return glob.glob(os.path.join(self.cache_dir, file_glob)) + + def fresh_cythonize(self, *args, **kwargs): + Cython.Utils.clear_function_caches() + Cython.Build.Dependencies._dep_tree = None # discard method caches + Cython.Build.Dependencies.cythonize(*args, **kwargs) + + def test_cycache_switch(self): + content1 = 'value = 1\n' + content2 = 'value = 2\n' + a_pyx = os.path.join(self.src_dir, 'a.pyx') + a_c = a_pyx[:-4] + '.c' + + with open(a_pyx, 'w') as f: + f.write(content1) + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + self.assertEqual(1, len(self.cache_files('a.c*'))) + with open(a_c) as f: + a_contents1 = f.read() + os.unlink(a_c) + + with open(a_pyx, 'w') as f: + f.write(content2) + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + with open(a_c) as f: + a_contents2 = f.read() + os.unlink(a_c) + + self.assertNotEqual(a_contents1, a_contents2, 'C file not changed!') + self.assertEqual(2, len(self.cache_files('a.c*'))) + + with open(a_pyx, 'w') as f: + f.write(content1) + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + self.assertEqual(2, len(self.cache_files('a.c*'))) + with open(a_c) as f: + a_contents = f.read() + self.assertEqual( + a_contents, a_contents1, + msg='\n'.join(list(difflib.unified_diff( + a_contents.split('\n'), a_contents1.split('\n')))[:10])) + + @unittest.skipIf(sys.version_info[:2] == (3, 12) and sys.platform == "win32", + "This test is mysteriously broken on Windows on the CI only " + "(https://github.com/cython/cython/issues/5825)") + def test_cycache_uses_cache(self): + a_pyx = os.path.join(self.src_dir, 'a.pyx') + a_c = a_pyx[:-4] + '.c' + with open(a_pyx, 'w') as f: + f.write('pass') + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + a_cache = os.path.join(self.cache_dir, os.listdir(self.cache_dir)[0]) + gzip.GzipFile(a_cache, 'wb').write('fake stuff'.encode('ascii')) + os.unlink(a_c) + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + with open(a_c) as f: + a_contents = f.read() + self.assertEqual(a_contents, 'fake stuff', + 'Unexpected contents: %s...' % a_contents[:100]) + + def test_multi_file_output(self): + a_pyx = os.path.join(self.src_dir, 'a.pyx') + a_c = a_pyx[:-4] + '.c' + a_h = a_pyx[:-4] + '.h' + a_api_h = a_pyx[:-4] + '_api.h' + with open(a_pyx, 'w') as f: + f.write('cdef public api int foo(int x): return x\n') + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + expected = [a_c, a_h, a_api_h] + for output in expected: + self.assertTrue(os.path.exists(output), output) + os.unlink(output) + self.fresh_cythonize(a_pyx, cache=self.cache_dir) + for output in expected: + self.assertTrue(os.path.exists(output), output) + + def test_options_invalidation(self): + hash_pyx = os.path.join(self.src_dir, 'options.pyx') + hash_c = hash_pyx[:-len('.pyx')] + '.c' + + with open(hash_pyx, 'w') as f: + f.write('pass') + self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False) + self.assertEqual(1, len(self.cache_files('options.c*'))) + + os.unlink(hash_c) + self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=True) + self.assertEqual(2, len(self.cache_files('options.c*'))) + + os.unlink(hash_c) + self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False, show_version=False) + self.assertEqual(2, len(self.cache_files('options.c*'))) + + os.unlink(hash_c) + self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False, show_version=True) + self.assertEqual(2, len(self.cache_files('options.c*'))) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestCythonizeArgsParser.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestCythonizeArgsParser.py new file mode 100644 index 0000000000000000000000000000000000000000..c5a682dd6440800dad3cc3e120a910f381f1e83b --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestCythonizeArgsParser.py @@ -0,0 +1,482 @@ +from Cython.Build.Cythonize import ( + create_args_parser, parse_args_raw, parse_args, + parallel_compiles +) + +from Cython.Compiler import Options +from Cython.Compiler.Tests.Utils import backup_Options, restore_Options, check_global_options + +from unittest import TestCase + +import sys +try: + from StringIO import StringIO +except ImportError: + from io import StringIO # doesn't accept 'str' in Py2 + + +class TestCythonizeArgsParser(TestCase): + + def setUp(self): + TestCase.setUp(self) + self.parse_args = lambda x, parser=create_args_parser() : parse_args_raw(parser, x) + + + def are_default(self, options, skip): + # empty containers + empty_containers = ['directives', 'compile_time_env', 'options', 'excludes'] + are_none = ['language_level', 'annotate', 'build', 'build_inplace', 'force', 'quiet', 'lenient', 'keep_going', 'no_docstrings'] + for opt_name in empty_containers: + if len(getattr(options, opt_name))!=0 and (opt_name not in skip): + self.assertEqual(opt_name,"", msg="For option "+opt_name) + return False + for opt_name in are_none: + if (getattr(options, opt_name) is not None) and (opt_name not in skip): + self.assertEqual(opt_name,"", msg="For option "+opt_name) + return False + if options.parallel!=parallel_compiles and ('parallel' not in skip): + return False + return True + + # testing directives: + def test_directive_short(self): + options, args = self.parse_args(['-X', 'cdivision=True']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives'])) + self.assertEqual(options.directives['cdivision'], True) + + def test_directive_long(self): + options, args = self.parse_args(['--directive', 'cdivision=True']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives'])) + self.assertEqual(options.directives['cdivision'], True) + + def test_directive_multiple(self): + options, args = self.parse_args(['-X', 'cdivision=True', '-X', 'c_string_type=bytes']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives'])) + self.assertEqual(options.directives['cdivision'], True) + self.assertEqual(options.directives['c_string_type'], 'bytes') + + def test_directive_multiple_v2(self): + options, args = self.parse_args(['-X', 'cdivision=True,c_string_type=bytes']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives'])) + self.assertEqual(options.directives['cdivision'], True) + self.assertEqual(options.directives['c_string_type'], 'bytes') + + def test_directive_value_yes(self): + options, args = self.parse_args(['-X', 'cdivision=YeS']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives'])) + self.assertEqual(options.directives['cdivision'], True) + + def test_directive_value_no(self): + options, args = self.parse_args(['-X', 'cdivision=no']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives'])) + self.assertEqual(options.directives['cdivision'], False) + + def test_directive_value_invalid(self): + with self.assertRaises(ValueError) as context: + options, args = self.parse_args(['-X', 'cdivision=sadfasd']) + + def test_directive_key_invalid(self): + with self.assertRaises(ValueError) as context: + options, args = self.parse_args(['-X', 'abracadabra']) + + def test_directive_no_value(self): + with self.assertRaises(ValueError) as context: + options, args = self.parse_args(['-X', 'cdivision']) + + def test_directives_types(self): + directives = { + 'auto_pickle': True, + 'c_string_type': 'bytearray', + 'c_string_type': 'bytes', + 'c_string_type': 'str', + 'c_string_type': 'bytearray', + 'c_string_type': 'unicode', + 'c_string_encoding' : 'ascii', + 'language_level' : 2, + 'language_level' : 3, + 'language_level' : '3str', + 'set_initial_path' : 'my_initial_path', + } + for key, value in directives.items(): + cmd = '{key}={value}'.format(key=key, value=str(value)) + options, args = self.parse_args(['-X', cmd]) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['directives']), msg = "Error for option: "+cmd) + self.assertEqual(options.directives[key], value, msg = "Error for option: "+cmd) + + def test_directives_wrong(self): + directives = { + 'auto_pickle': 42, # for bool type + 'auto_pickle': 'NONONO', # for bool type + 'c_string_type': 'bites', + #'c_string_encoding' : 'a', + #'language_level' : 4, + } + for key, value in directives.items(): + cmd = '{key}={value}'.format(key=key, value=str(value)) + with self.assertRaises(ValueError, msg = "Error for option: "+cmd) as context: + options, args = self.parse_args(['-X', cmd]) + + def test_compile_time_env_short(self): + options, args = self.parse_args(['-E', 'MYSIZE=10']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['compile_time_env'])) + self.assertEqual(options.compile_time_env['MYSIZE'], 10) + + def test_compile_time_env_long(self): + options, args = self.parse_args(['--compile-time-env', 'MYSIZE=10']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['compile_time_env'])) + self.assertEqual(options.compile_time_env['MYSIZE'], 10) + + def test_compile_time_env_multiple(self): + options, args = self.parse_args(['-E', 'MYSIZE=10', '-E', 'ARRSIZE=11']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['compile_time_env'])) + self.assertEqual(options.compile_time_env['MYSIZE'], 10) + self.assertEqual(options.compile_time_env['ARRSIZE'], 11) + + def test_compile_time_env_multiple_v2(self): + options, args = self.parse_args(['-E', 'MYSIZE=10,ARRSIZE=11']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['compile_time_env'])) + self.assertEqual(options.compile_time_env['MYSIZE'], 10) + self.assertEqual(options.compile_time_env['ARRSIZE'], 11) + + #testing options + def test_option_short(self): + options, args = self.parse_args(['-s', 'docstrings=True']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_long(self): + options, args = self.parse_args(['--option', 'docstrings=True']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_multiple(self): + options, args = self.parse_args(['-s', 'docstrings=True', '-s', 'buffer_max_dims=8']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + self.assertEqual(options.options['buffer_max_dims'], True) # really? + + def test_option_multiple_v2(self): + options, args = self.parse_args(['-s', 'docstrings=True,buffer_max_dims=8']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + self.assertEqual(options.options['buffer_max_dims'], True) # really? + + def test_option_value_yes(self): + options, args = self.parse_args(['-s', 'docstrings=YeS']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_value_4242(self): + options, args = self.parse_args(['-s', 'docstrings=4242']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_value_0(self): + options, args = self.parse_args(['-s', 'docstrings=0']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], False) + + def test_option_value_emptystr(self): + options, args = self.parse_args(['-s', 'docstrings=']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_value_a_str(self): + options, args = self.parse_args(['-s', 'docstrings=BB']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_value_no(self): + options, args = self.parse_args(['-s', 'docstrings=nO']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], False) + + def test_option_no_value(self): + options, args = self.parse_args(['-s', 'docstrings']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['docstrings'], True) + + def test_option_any_key(self): + options, args = self.parse_args(['-s', 'abracadabra']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['options'])) + self.assertEqual(options.options['abracadabra'], True) + + def test_language_level_2(self): + options, args = self.parse_args(['-2']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['language_level'])) + self.assertEqual(options.language_level, 2) + + def test_language_level_3(self): + options, args = self.parse_args(['-3']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['language_level'])) + self.assertEqual(options.language_level, 3) + + def test_language_level_3str(self): + options, args = self.parse_args(['--3str']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['language_level'])) + self.assertEqual(options.language_level, '3str') + + def test_annotate_short(self): + options, args = self.parse_args(['-a']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['annotate'])) + self.assertEqual(options.annotate, 'default') + + def test_annotate_long(self): + options, args = self.parse_args(['--annotate']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['annotate'])) + self.assertEqual(options.annotate, 'default') + + def test_annotate_fullc(self): + options, args = self.parse_args(['--annotate-fullc']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['annotate'])) + self.assertEqual(options.annotate, 'fullc') + + def test_annotate_and_positional(self): + options, args = self.parse_args(['-a', 'foo.pyx']) + self.assertEqual(args, ['foo.pyx']) + self.assertTrue(self.are_default(options, ['annotate'])) + self.assertEqual(options.annotate, 'default') + + def test_annotate_and_optional(self): + options, args = self.parse_args(['-a', '--3str']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['annotate', 'language_level'])) + self.assertEqual(options.annotate, 'default') + self.assertEqual(options.language_level, '3str') + + def test_exclude_short(self): + options, args = self.parse_args(['-x', '*.pyx']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['excludes'])) + self.assertTrue('*.pyx' in options.excludes) + + def test_exclude_long(self): + options, args = self.parse_args(['--exclude', '*.pyx']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['excludes'])) + self.assertTrue('*.pyx' in options.excludes) + + def test_exclude_multiple(self): + options, args = self.parse_args(['--exclude', '*.pyx', '--exclude', '*.py', ]) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['excludes'])) + self.assertEqual(options.excludes, ['*.pyx', '*.py']) + + def test_build_short(self): + options, args = self.parse_args(['-b']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['build'])) + self.assertEqual(options.build, True) + + def test_build_long(self): + options, args = self.parse_args(['--build']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['build'])) + self.assertEqual(options.build, True) + + def test_inplace_short(self): + options, args = self.parse_args(['-i']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['build_inplace'])) + self.assertEqual(options.build_inplace, True) + + def test_inplace_long(self): + options, args = self.parse_args(['--inplace']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['build_inplace'])) + self.assertEqual(options.build_inplace, True) + + def test_parallel_short(self): + options, args = self.parse_args(['-j', '42']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['parallel'])) + self.assertEqual(options.parallel, 42) + + def test_parallel_long(self): + options, args = self.parse_args(['--parallel', '42']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['parallel'])) + self.assertEqual(options.parallel, 42) + + def test_force_short(self): + options, args = self.parse_args(['-f']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['force'])) + self.assertEqual(options.force, True) + + def test_force_long(self): + options, args = self.parse_args(['--force']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['force'])) + self.assertEqual(options.force, True) + + def test_quite_short(self): + options, args = self.parse_args(['-q']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['quiet'])) + self.assertEqual(options.quiet, True) + + def test_quite_long(self): + options, args = self.parse_args(['--quiet']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['quiet'])) + self.assertEqual(options.quiet, True) + + def test_lenient_long(self): + options, args = self.parse_args(['--lenient']) + self.assertTrue(self.are_default(options, ['lenient'])) + self.assertFalse(args) + self.assertEqual(options.lenient, True) + + def test_keep_going_short(self): + options, args = self.parse_args(['-k']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['keep_going'])) + self.assertEqual(options.keep_going, True) + + def test_keep_going_long(self): + options, args = self.parse_args(['--keep-going']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['keep_going'])) + self.assertEqual(options.keep_going, True) + + def test_no_docstrings_long(self): + options, args = self.parse_args(['--no-docstrings']) + self.assertFalse(args) + self.assertTrue(self.are_default(options, ['no_docstrings'])) + self.assertEqual(options.no_docstrings, True) + + def test_file_name(self): + options, args = self.parse_args(['file1.pyx', 'file2.pyx']) + self.assertEqual(len(args), 2) + self.assertEqual(args[0], 'file1.pyx') + self.assertEqual(args[1], 'file2.pyx') + self.assertTrue(self.are_default(options, [])) + + def test_option_first(self): + options, args = self.parse_args(['-i', 'file.pyx']) + self.assertEqual(args, ['file.pyx']) + self.assertEqual(options.build_inplace, True) + self.assertTrue(self.are_default(options, ['build_inplace'])) + + def test_file_inbetween(self): + options, args = self.parse_args(['-i', 'file.pyx', '-a']) + self.assertEqual(args, ['file.pyx']) + self.assertEqual(options.build_inplace, True) + self.assertEqual(options.annotate, 'default') + self.assertTrue(self.are_default(options, ['build_inplace', 'annotate'])) + + def test_option_trailing(self): + options, args = self.parse_args(['file.pyx', '-i']) + self.assertEqual(args, ['file.pyx']) + self.assertEqual(options.build_inplace, True) + self.assertTrue(self.are_default(options, ['build_inplace'])) + + def test_interspersed_positional(self): + options, sources = self.parse_args([ + 'file1.pyx', '-a', + 'file2.pyx' + ]) + self.assertEqual(sources, ['file1.pyx', 'file2.pyx']) + self.assertEqual(options.annotate, 'default') + self.assertTrue(self.are_default(options, ['annotate'])) + + def test_interspersed_positional2(self): + options, sources = self.parse_args([ + 'file1.pyx', '-a', + 'file2.pyx', '-a', 'file3.pyx' + ]) + self.assertEqual(sources, ['file1.pyx', 'file2.pyx', 'file3.pyx']) + self.assertEqual(options.annotate, 'default') + self.assertTrue(self.are_default(options, ['annotate'])) + + def test_interspersed_positional3(self): + options, sources = self.parse_args([ + '-f', 'f1', 'f2', '-a', + 'f3', 'f4', '-a', 'f5' + ]) + self.assertEqual(sources, ['f1', 'f2', 'f3', 'f4', 'f5']) + self.assertEqual(options.annotate, 'default') + self.assertEqual(options.force, True) + self.assertTrue(self.are_default(options, ['annotate', 'force'])) + + def test_wrong_option(self): + old_stderr = sys.stderr + stderr = sys.stderr = StringIO() + try: + self.assertRaises(SystemExit, self.parse_args, + ['--unknown-option'] + ) + finally: + sys.stderr = old_stderr + self.assertTrue(stderr.getvalue()) + + +class TestParseArgs(TestCase): + def setUp(self): + self._options_backup = backup_Options() + + def tearDown(self): + restore_Options(self._options_backup) + + def check_default_global_options(self, white_list=[]): + self.assertEqual(check_global_options(self._options_backup, white_list), "") + + def test_build_set_for_inplace(self): + options, args = parse_args(['foo.pyx', '-i']) + self.assertEqual(options.build, True) + self.check_default_global_options() + + def test_lenient(self): + options, sources = parse_args(['foo.pyx', '--lenient']) + self.assertEqual(sources, ['foo.pyx']) + self.assertEqual(Options.error_on_unknown_names, False) + self.assertEqual(Options.error_on_uninitialized, False) + self.check_default_global_options(['error_on_unknown_names', 'error_on_uninitialized']) + + def test_annotate(self): + options, sources = parse_args(['foo.pyx', '--annotate']) + self.assertEqual(sources, ['foo.pyx']) + self.assertEqual(Options.annotate, 'default') + self.check_default_global_options(['annotate']) + + def test_annotate_fullc(self): + options, sources = parse_args(['foo.pyx', '--annotate-fullc']) + self.assertEqual(sources, ['foo.pyx']) + self.assertEqual(Options.annotate, 'fullc') + self.check_default_global_options(['annotate']) + + def test_no_docstrings(self): + options, sources = parse_args(['foo.pyx', '--no-docstrings']) + self.assertEqual(sources, ['foo.pyx']) + self.assertEqual(Options.docstrings, False) + self.check_default_global_options(['docstrings']) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestDependencies.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestDependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..d3888117d846d67f91fec9cf14d8f852a5a65d82 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestDependencies.py @@ -0,0 +1,142 @@ +import contextlib +import os.path +import sys +import tempfile +import unittest +from io import open +from os.path import join as pjoin + +from ..Dependencies import extended_iglob + + +@contextlib.contextmanager +def writable_file(dir_path, filename): + with open(pjoin(dir_path, filename), "w", encoding="utf8") as f: + yield f + + +class TestGlobbing(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls._orig_dir = os.getcwd() + if sys.version_info[0] < 3: + temp_path = cls._tmpdir = tempfile.mkdtemp() + else: + cls._tmpdir = tempfile.TemporaryDirectory() + temp_path = cls._tmpdir.name + os.chdir(temp_path) + + for dir1 in "abcd": + for dir1x in [dir1, dir1 + 'x']: + for dir2 in "xyz": + dir_path = pjoin(dir1x, dir2) + os.makedirs(dir_path) + with writable_file(dir_path, "file2_pyx.pyx") as f: + f.write(u'""" PYX """') + with writable_file(dir_path, "file2_py.py") as f: + f.write(u'""" PY """') + + with writable_file(dir1x, "file1_pyx.pyx") as f: + f.write(u'""" PYX """') + with writable_file(dir1x, "file1_py.py") as f: + f.write(u'""" PY """') + + @classmethod + def tearDownClass(cls): + os.chdir(cls._orig_dir) + if sys.version_info[0] < 3: + import shutil + shutil.rmtree(cls._tmpdir) + else: + cls._tmpdir.cleanup() + + def files_equal(self, pattern, expected_files): + expected_files = sorted(expected_files) + # It's the users's choice whether '/' will appear on Windows. + matched_files = sorted(path.replace('/', os.sep) for path in extended_iglob(pattern)) + self.assertListEqual(matched_files, expected_files) # / + + # Special case for Windows: also support '\' in patterns. + if os.sep == '\\' and '/' in pattern: + matched_files = sorted(extended_iglob(pattern.replace('/', '\\'))) + self.assertListEqual(matched_files, expected_files) # \ + + def test_extended_iglob_simple(self): + ax_files = [pjoin("a", "x", "file2_pyx.pyx"), pjoin("a", "x", "file2_py.py")] + self.files_equal("a/x/*", ax_files) + self.files_equal("a/x/*.c12", []) + self.files_equal("a/x/*.{py,pyx,c12}", ax_files) + self.files_equal("a/x/*.{py,pyx}", ax_files) + self.files_equal("a/x/*.{pyx}", ax_files[:1]) + self.files_equal("a/x/*.pyx", ax_files[:1]) + self.files_equal("a/x/*.{py}", ax_files[1:]) + self.files_equal("a/x/*.py", ax_files[1:]) + + def test_extended_iglob_simple_star(self): + for basedir in "ad": + files = [ + pjoin(basedir, dirname, filename) + for dirname in "xyz" + for filename in ["file2_pyx.pyx", "file2_py.py"] + ] + self.files_equal(basedir + "/*/*", files) + self.files_equal(basedir + "/*/*.c12", []) + self.files_equal(basedir + "/*/*.{py,pyx,c12}", files) + self.files_equal(basedir + "/*/*.{py,pyx}", files) + self.files_equal(basedir + "/*/*.{pyx}", files[::2]) + self.files_equal(basedir + "/*/*.pyx", files[::2]) + self.files_equal(basedir + "/*/*.{py}", files[1::2]) + self.files_equal(basedir + "/*/*.py", files[1::2]) + + for subdir in "xy*": + files = [ + pjoin(basedir, dirname, filename) + for dirname in "xyz" + if subdir in ('*', dirname) + for filename in ["file2_pyx.pyx", "file2_py.py"] + ] + path = basedir + '/' + subdir + '/' + self.files_equal(path + "*", files) + self.files_equal(path + "*.{py,pyx}", files) + self.files_equal(path + "*.{pyx}", files[::2]) + self.files_equal(path + "*.pyx", files[::2]) + self.files_equal(path + "*.{py}", files[1::2]) + self.files_equal(path + "*.py", files[1::2]) + + def test_extended_iglob_double_star(self): + basedirs = os.listdir(".") + files = [ + pjoin(basedir, dirname, filename) + for basedir in basedirs + for dirname in "xyz" + for filename in ["file2_pyx.pyx", "file2_py.py"] + ] + all_files = [ + pjoin(basedir, filename) + for basedir in basedirs + for filename in ["file1_pyx.pyx", "file1_py.py"] + ] + files + self.files_equal("*/*/*", files) + self.files_equal("*/*/**/*", files) + self.files_equal("*/**/*.*", all_files) + self.files_equal("**/*.*", all_files) + self.files_equal("*/**/*.c12", []) + self.files_equal("**/*.c12", []) + self.files_equal("*/*/*.{py,pyx,c12}", files) + self.files_equal("*/*/**/*.{py,pyx,c12}", files) + self.files_equal("*/**/*/*.{py,pyx,c12}", files) + self.files_equal("**/*/*/*.{py,pyx,c12}", files) + self.files_equal("**/*.{py,pyx,c12}", all_files) + self.files_equal("*/*/*.{py,pyx}", files) + self.files_equal("**/*/*/*.{py,pyx}", files) + self.files_equal("*/**/*/*.{py,pyx}", files) + self.files_equal("**/*.{py,pyx}", all_files) + self.files_equal("*/*/*.{pyx}", files[::2]) + self.files_equal("**/*.{pyx}", all_files[::2]) + self.files_equal("*/**/*/*.pyx", files[::2]) + self.files_equal("*/*/*.pyx", files[::2]) + self.files_equal("**/*.pyx", all_files[::2]) + self.files_equal("*/*/*.{py}", files[1::2]) + self.files_equal("**/*.{py}", all_files[1::2]) + self.files_equal("*/*/*.py", files[1::2]) + self.files_equal("**/*.py", all_files[1::2]) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestInline.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestInline.py new file mode 100644 index 0000000000000000000000000000000000000000..53346137052b7ea6d3f02a5407f2df266868c109 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestInline.py @@ -0,0 +1,112 @@ +import os +import tempfile +import unittest +from Cython.Shadow import inline +from Cython.Build.Inline import safe_type +from Cython.TestUtils import CythonTest + +try: + import numpy + has_numpy = True +except: + has_numpy = False + +test_kwds = dict(force=True, quiet=True) + +global_value = 100 + +class TestInline(CythonTest): + def setUp(self): + CythonTest.setUp(self) + self._call_kwds = dict(test_kwds) + if os.path.isdir('TEST_TMP'): + lib_dir = os.path.join('TEST_TMP','inline') + else: + lib_dir = tempfile.mkdtemp(prefix='cython_inline_') + self._call_kwds['lib_dir'] = lib_dir + + def test_simple(self): + self.assertEqual(inline("return 1+2", **self._call_kwds), 3) + + def test_types(self): + self.assertEqual(inline(""" + cimport cython + return cython.typeof(a), cython.typeof(b) + """, a=1.0, b=[], **self._call_kwds), ('double', 'list object')) + + def test_locals(self): + a = 1 + b = 2 + self.assertEqual(inline("return a+b", **self._call_kwds), 3) + + def test_globals(self): + self.assertEqual(inline("return global_value + 1", **self._call_kwds), global_value + 1) + + def test_no_return(self): + self.assertEqual(inline(""" + a = 1 + cdef double b = 2 + cdef c = [] + """, **self._call_kwds), dict(a=1, b=2.0, c=[])) + + def test_def_node(self): + foo = inline("def foo(x): return x * x", **self._call_kwds)['foo'] + self.assertEqual(foo(7), 49) + + def test_class_ref(self): + class Type(object): + pass + tp = inline("Type")['Type'] + self.assertEqual(tp, Type) + + def test_pure(self): + import cython as cy + b = inline(""" + b = cy.declare(float, a) + c = cy.declare(cy.pointer(cy.float), &b) + return b + """, a=3, **self._call_kwds) + self.assertEqual(type(b), float) + + def test_compiler_directives(self): + self.assertEqual( + inline('return sum(x)', + x=[1, 2, 3], + cython_compiler_directives={'boundscheck': False}), + 6 + ) + + def test_lang_version(self): + # GH-3419. Caching for inline code didn't always respect compiler directives. + inline_divcode = "def f(int a, int b): return a/b" + self.assertEqual( + inline(inline_divcode, language_level=2)['f'](5,2), + 2 + ) + self.assertEqual( + inline(inline_divcode, language_level=3)['f'](5,2), + 2.5 + ) + self.assertEqual( + inline(inline_divcode, language_level=2)['f'](5,2), + 2 + ) + + def test_repeated_use(self): + inline_mulcode = "def f(int a, int b): return a * b" + self.assertEqual(inline(inline_mulcode)['f'](5, 2), 10) + self.assertEqual(inline(inline_mulcode)['f'](5, 3), 15) + self.assertEqual(inline(inline_mulcode)['f'](6, 2), 12) + self.assertEqual(inline(inline_mulcode)['f'](5, 2), 10) + + f = inline(inline_mulcode)['f'] + self.assertEqual(f(5, 2), 10) + self.assertEqual(f(5, 3), 15) + + @unittest.skipIf(not has_numpy, "NumPy is not available") + def test_numpy(self): + import numpy + a = numpy.ndarray((10, 20)) + a[0,0] = 10 + self.assertEqual(safe_type(a), 'numpy.ndarray[numpy.float64_t, ndim=2]') + self.assertEqual(inline("return a[0,0]", a=a, **self._call_kwds), 10.0) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestIpythonMagic.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestIpythonMagic.py new file mode 100644 index 0000000000000000000000000000000000000000..65d801c6b72e883fc766f034836234973d987ffe --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestIpythonMagic.py @@ -0,0 +1,295 @@ +# -*- coding: utf-8 -*- +# tag: ipython + +"""Tests for the Cython magics extension.""" + +from __future__ import absolute_import + +import os +import io +import sys +from contextlib import contextmanager +from unittest import skipIf + +from Cython.Build import IpythonMagic +from Cython.TestUtils import CythonTest +from Cython.Compiler.Annotate import AnnotationCCodeWriter + +try: + import IPython.testing.globalipapp +except ImportError: + # Disable tests and fake helpers for initialisation below. + def skip_if_not_installed(_): + return None +else: + def skip_if_not_installed(c): + return c + +# not using IPython's decorators here because they depend on "nose" +skip_win32 = skipIf(sys.platform == 'win32', "Skip on Windows") +skip_py27 = skipIf(sys.version_info[:2] == (2,7), "Disabled in Py2.7") + +try: + # disable IPython history thread before it gets started to avoid having to clean it up + from IPython.core.history import HistoryManager + HistoryManager.enabled = False +except ImportError: + pass + + +@contextmanager +def capture_output(): + backup = sys.stdout, sys.stderr + try: + replacement = [ + io.TextIOWrapper(io.BytesIO(), encoding=sys.stdout.encoding), + io.TextIOWrapper(io.BytesIO(), encoding=sys.stderr.encoding), + ] + sys.stdout, sys.stderr = replacement + output = [] + yield output + finally: + sys.stdout, sys.stderr = backup + for wrapper in replacement: + wrapper.seek(0) # rewind + output.append(wrapper.read()) + wrapper.close() + + +code = u"""\ +def f(x): + return 2*x +""" + +cython3_code = u"""\ +def f(int x): + return 2 / x + +def call(x): + return f(*(x,)) +""" + +pgo_cython3_code = cython3_code + u"""\ +def main(): + for _ in range(100): call(5) +main() +""" + +compile_error_code = u'''\ +cdef extern from *: + """ + xxx a=1; + """ + int a; +def doit(): + return a +''' + +compile_warning_code = u'''\ +cdef extern from *: + """ + #pragma message ( "CWarning" ) + int a = 42; + """ + int a; +def doit(): + return a +''' + + +@skip_if_not_installed +class TestIPythonMagic(CythonTest): + + @classmethod + def setUpClass(cls): + CythonTest.setUpClass() + cls._ip = IPython.testing.globalipapp.get_ipython() + + def setUp(self): + CythonTest.setUp(self) + self._ip.extension_manager.load_extension('cython') + + def test_cython_inline(self): + ip = self._ip + ip.ex('a=10; b=20') + result = ip.run_cell_magic('cython_inline', '', 'return a+b') + self.assertEqual(result, 30) + + @skip_win32 + def test_cython_pyximport(self): + ip = self._ip + module_name = '_test_cython_pyximport' + ip.run_cell_magic('cython_pyximport', module_name, code) + ip.ex('g = f(10)') + self.assertEqual(ip.user_ns['g'], 20.0) + ip.run_cell_magic('cython_pyximport', module_name, code) + ip.ex('h = f(-10)') + self.assertEqual(ip.user_ns['h'], -20.0) + try: + os.remove(module_name + '.pyx') + except OSError: + pass + + def test_cython(self): + ip = self._ip + ip.run_cell_magic('cython', '', code) + ip.ex('g = f(10)') + self.assertEqual(ip.user_ns['g'], 20.0) + + def test_cython_name(self): + # The Cython module named 'mymodule' defines the function f. + ip = self._ip + ip.run_cell_magic('cython', '--name=mymodule', code) + # This module can now be imported in the interactive namespace. + ip.ex('import mymodule; g = mymodule.f(10)') + self.assertEqual(ip.user_ns['g'], 20.0) + + def test_cython_language_level(self): + # The Cython cell defines the functions f() and call(). + ip = self._ip + ip.run_cell_magic('cython', '', cython3_code) + ip.ex('g = f(10); h = call(10)') + if sys.version_info[0] < 3: + self.assertEqual(ip.user_ns['g'], 2 // 10) + self.assertEqual(ip.user_ns['h'], 2 // 10) + else: + self.assertEqual(ip.user_ns['g'], 2.0 / 10.0) + self.assertEqual(ip.user_ns['h'], 2.0 / 10.0) + + def test_cython3(self): + # The Cython cell defines the functions f() and call(). + ip = self._ip + ip.run_cell_magic('cython', '-3', cython3_code) + ip.ex('g = f(10); h = call(10)') + self.assertEqual(ip.user_ns['g'], 2.0 / 10.0) + self.assertEqual(ip.user_ns['h'], 2.0 / 10.0) + + def test_cython2(self): + # The Cython cell defines the functions f() and call(). + ip = self._ip + ip.run_cell_magic('cython', '-2', cython3_code) + ip.ex('g = f(10); h = call(10)') + self.assertEqual(ip.user_ns['g'], 2 // 10) + self.assertEqual(ip.user_ns['h'], 2 // 10) + + def test_cython_compile_error_shown(self): + ip = self._ip + with capture_output() as out: + ip.run_cell_magic('cython', '-3', compile_error_code) + captured_out, captured_err = out + + # it could be that c-level output is captured by distutil-extension + # (and not by us) and is printed to stdout: + captured_all = captured_out + "\n" + captured_err + self.assertTrue("error" in captured_all, msg="error in " + captured_all) + + def test_cython_link_error_shown(self): + ip = self._ip + with capture_output() as out: + ip.run_cell_magic('cython', '-3 -l=xxxxxxxx', code) + captured_out, captured_err = out + + # it could be that c-level output is captured by distutil-extension + # (and not by us) and is printed to stdout: + captured_all = captured_out + "\n!" + captured_err + self.assertTrue("error" in captured_all, msg="error in " + captured_all) + + def test_cython_warning_shown(self): + ip = self._ip + with capture_output() as out: + # force rebuild, otherwise no warning as after the first success + # no build step is performed + ip.run_cell_magic('cython', '-3 -f', compile_warning_code) + captured_out, captured_err = out + + # check that warning was printed to stdout even if build hasn't failed + self.assertTrue("CWarning" in captured_out) + + @skip_py27 # Not strictly broken in Py2.7 but currently fails in CI due to C compiler issues. + @skip_win32 + def test_cython3_pgo(self): + # The Cython cell defines the functions f() and call(). + ip = self._ip + ip.run_cell_magic('cython', '-3 --pgo', pgo_cython3_code) + ip.ex('g = f(10); h = call(10); main()') + self.assertEqual(ip.user_ns['g'], 2.0 / 10.0) + self.assertEqual(ip.user_ns['h'], 2.0 / 10.0) + + @skip_win32 + def test_extlibs(self): + ip = self._ip + code = u""" +from libc.math cimport sin +x = sin(0.0) + """ + ip.user_ns['x'] = 1 + ip.run_cell_magic('cython', '-l m', code) + self.assertEqual(ip.user_ns['x'], 0) + + + def test_cython_verbose(self): + ip = self._ip + ip.run_cell_magic('cython', '--verbose', code) + ip.ex('g = f(10)') + self.assertEqual(ip.user_ns['g'], 20.0) + + def test_cython_verbose_thresholds(self): + @contextmanager + def mock_distutils(): + class MockLog: + DEBUG = 1 + INFO = 2 + thresholds = [INFO] + + def set_threshold(self, val): + self.thresholds.append(val) + return self.thresholds[-2] + + + new_log = MockLog() + old_log = IpythonMagic.distutils.log + try: + IpythonMagic.distutils.log = new_log + yield new_log + finally: + IpythonMagic.distutils.log = old_log + + ip = self._ip + with mock_distutils() as verbose_log: + ip.run_cell_magic('cython', '--verbose', code) + ip.ex('g = f(10)') + self.assertEqual(ip.user_ns['g'], 20.0) + self.assertEqual([verbose_log.INFO, verbose_log.DEBUG, verbose_log.INFO], + verbose_log.thresholds) + + with mock_distutils() as normal_log: + ip.run_cell_magic('cython', '', code) + ip.ex('g = f(10)') + self.assertEqual(ip.user_ns['g'], 20.0) + self.assertEqual([normal_log.INFO], normal_log.thresholds) + + def test_cython_no_annotate(self): + ip = self._ip + html = ip.run_cell_magic('cython', '', code) + self.assertTrue(html is None) + + def test_cython_annotate(self): + ip = self._ip + html = ip.run_cell_magic('cython', '--annotate', code) + # somewhat brittle way to differentiate between annotated htmls + # with/without complete source code: + self.assertTrue(AnnotationCCodeWriter.COMPLETE_CODE_TITLE not in html.data) + + def test_cython_annotate_default(self): + ip = self._ip + html = ip.run_cell_magic('cython', '-a', code) + # somewhat brittle way to differentiate between annotated htmls + # with/without complete source code: + self.assertTrue(AnnotationCCodeWriter.COMPLETE_CODE_TITLE not in html.data) + + def test_cython_annotate_complete_c_code(self): + ip = self._ip + html = ip.run_cell_magic('cython', '--annotate-fullc', code) + # somewhat brittle way to differentiate between annotated htmls + # with/without complete source code: + self.assertTrue(AnnotationCCodeWriter.COMPLETE_CODE_TITLE in html.data) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestRecythonize.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestRecythonize.py new file mode 100644 index 0000000000000000000000000000000000000000..eb87018cb8770832852d50a210afbdec45d2fd36 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestRecythonize.py @@ -0,0 +1,212 @@ +import shutil +import os +import tempfile +import time + +import Cython.Build.Dependencies +import Cython.Utils +from Cython.TestUtils import CythonTest + + +def fresh_cythonize(*args, **kwargs): + Cython.Utils.clear_function_caches() + Cython.Build.Dependencies._dep_tree = None # discard method caches + Cython.Build.Dependencies.cythonize(*args, **kwargs) + +class TestRecythonize(CythonTest): + + def setUp(self): + CythonTest.setUp(self) + self.temp_dir = ( + tempfile.mkdtemp( + prefix='recythonize-test', + dir='TEST_TMP' if os.path.isdir('TEST_TMP') else None + ) + ) + + def tearDown(self): + CythonTest.tearDown(self) + shutil.rmtree(self.temp_dir) + + def test_recythonize_pyx_on_pxd_change(self): + + src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir) + + a_pxd = os.path.join(src_dir, 'a.pxd') + a_pyx = os.path.join(src_dir, 'a.pyx') + a_c = os.path.join(src_dir, 'a.c') + dep_tree = Cython.Build.Dependencies.create_dependency_tree() + + with open(a_pxd, 'w') as f: + f.write('cdef int value\n') + + with open(a_pyx, 'w') as f: + f.write('value = 1\n') + + + # The dependencies for "a.pyx" are "a.pxd" and "a.pyx". + self.assertEqual({a_pxd, a_pyx}, dep_tree.all_dependencies(a_pyx)) + + # Cythonize to create a.c + fresh_cythonize(a_pyx) + + # Sleep to address coarse time-stamp precision. + time.sleep(1) + + with open(a_c) as f: + a_c_contents1 = f.read() + + with open(a_pxd, 'w') as f: + f.write('cdef double value\n') + + fresh_cythonize(a_pyx) + + with open(a_c) as f: + a_c_contents2 = f.read() + + self.assertTrue("__pyx_v_1a_value = 1;" in a_c_contents1) + self.assertFalse("__pyx_v_1a_value = 1;" in a_c_contents2) + self.assertTrue("__pyx_v_1a_value = 1.0;" in a_c_contents2) + self.assertFalse("__pyx_v_1a_value = 1.0;" in a_c_contents1) + + + def test_recythonize_py_on_pxd_change(self): + + src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir) + + a_pxd = os.path.join(src_dir, 'a.pxd') + a_py = os.path.join(src_dir, 'a.py') + a_c = os.path.join(src_dir, 'a.c') + dep_tree = Cython.Build.Dependencies.create_dependency_tree() + + with open(a_pxd, 'w') as f: + f.write('cdef int value\n') + + with open(a_py, 'w') as f: + f.write('value = 1\n') + + + # The dependencies for "a.py" are "a.pxd" and "a.py". + self.assertEqual({a_pxd, a_py}, dep_tree.all_dependencies(a_py)) + + # Cythonize to create a.c + fresh_cythonize(a_py) + + # Sleep to address coarse time-stamp precision. + time.sleep(1) + + with open(a_c) as f: + a_c_contents1 = f.read() + + with open(a_pxd, 'w') as f: + f.write('cdef double value\n') + + fresh_cythonize(a_py) + + with open(a_c) as f: + a_c_contents2 = f.read() + + + self.assertTrue("__pyx_v_1a_value = 1;" in a_c_contents1) + self.assertFalse("__pyx_v_1a_value = 1;" in a_c_contents2) + self.assertTrue("__pyx_v_1a_value = 1.0;" in a_c_contents2) + self.assertFalse("__pyx_v_1a_value = 1.0;" in a_c_contents1) + + def test_recythonize_pyx_on_dep_pxd_change(self): + src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir) + + a_pxd = os.path.join(src_dir, 'a.pxd') + a_pyx = os.path.join(src_dir, 'a.pyx') + b_pyx = os.path.join(src_dir, 'b.pyx') + b_c = os.path.join(src_dir, 'b.c') + dep_tree = Cython.Build.Dependencies.create_dependency_tree() + + with open(a_pxd, 'w') as f: + f.write('cdef int value\n') + + with open(a_pyx, 'w') as f: + f.write('value = 1\n') + + with open(b_pyx, 'w') as f: + f.write('cimport a\n' + 'a.value = 2\n') + + + # The dependencies for "b.pyx" are "a.pxd" and "b.pyx". + self.assertEqual({a_pxd, b_pyx}, dep_tree.all_dependencies(b_pyx)) + + + # Cythonize to create b.c + fresh_cythonize([a_pyx, b_pyx]) + + # Sleep to address coarse time-stamp precision. + time.sleep(1) + + with open(b_c) as f: + b_c_contents1 = f.read() + + with open(a_pxd, 'w') as f: + f.write('cdef double value\n') + + fresh_cythonize([a_pyx, b_pyx]) + + with open(b_c) as f: + b_c_contents2 = f.read() + + + + self.assertTrue("__pyx_v_1a_value = 2;" in b_c_contents1) + self.assertFalse("__pyx_v_1a_value = 2;" in b_c_contents2) + self.assertTrue("__pyx_v_1a_value = 2.0;" in b_c_contents2) + self.assertFalse("__pyx_v_1a_value = 2.0;" in b_c_contents1) + + + + def test_recythonize_py_on_dep_pxd_change(self): + + src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir) + + a_pxd = os.path.join(src_dir, 'a.pxd') + a_pyx = os.path.join(src_dir, 'a.pyx') + b_pxd = os.path.join(src_dir, 'b.pxd') + b_py = os.path.join(src_dir, 'b.py') + b_c = os.path.join(src_dir, 'b.c') + dep_tree = Cython.Build.Dependencies.create_dependency_tree() + + with open(a_pxd, 'w') as f: + f.write('cdef int value\n') + + with open(a_pyx, 'w') as f: + f.write('value = 1\n') + + with open(b_pxd, 'w') as f: + f.write('cimport a\n') + + with open(b_py, 'w') as f: + f.write('a.value = 2\n') + + + # The dependencies for b.py are "a.pxd", "b.pxd" and "b.py". + self.assertEqual({a_pxd, b_pxd, b_py}, dep_tree.all_dependencies(b_py)) + + + # Cythonize to create b.c + fresh_cythonize([a_pyx, b_py]) + + # Sleep to address coarse time-stamp precision. + time.sleep(1) + + with open(b_c) as f: + b_c_contents1 = f.read() + + with open(a_pxd, 'w') as f: + f.write('cdef double value\n') + + fresh_cythonize([a_pyx, b_py]) + + with open(b_c) as f: + b_c_contents2 = f.read() + + self.assertTrue("__pyx_v_1a_value = 2;" in b_c_contents1) + self.assertFalse("__pyx_v_1a_value = 2;" in b_c_contents2) + self.assertTrue("__pyx_v_1a_value = 2.0;" in b_c_contents2) + self.assertFalse("__pyx_v_1a_value = 2.0;" in b_c_contents1) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestStripLiterals.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestStripLiterals.py new file mode 100644 index 0000000000000000000000000000000000000000..cbe5c65a906b1759f17a026d63213d0c936ab66e --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestStripLiterals.py @@ -0,0 +1,56 @@ +from Cython.Build.Dependencies import strip_string_literals + +from Cython.TestUtils import CythonTest + +class TestStripLiterals(CythonTest): + + def t(self, before, expected): + actual, literals = strip_string_literals(before, prefix="_L") + self.assertEqual(expected, actual) + for key, value in literals.items(): + actual = actual.replace(key, value) + self.assertEqual(before, actual) + + def test_empty(self): + self.t("", "") + + def test_single_quote(self): + self.t("'x'", "'_L1_'") + + def test_double_quote(self): + self.t('"x"', '"_L1_"') + + def test_nested_quotes(self): + self.t(""" '"' "'" """, """ '_L1_' "_L2_" """) + + def test_triple_quote(self): + self.t(" '''a\n''' ", " '''_L1_''' ") + + def test_backslash(self): + self.t(r"'a\'b'", "'_L1_'") + self.t(r"'a\\'", "'_L1_'") + self.t(r"'a\\\'b'", "'_L1_'") + + def test_unicode(self): + self.t("u'abc'", "u'_L1_'") + + def test_raw(self): + self.t(r"r'abc\\'", "r'_L1_'") + + def test_raw_unicode(self): + self.t(r"ru'abc\\'", "ru'_L1_'") + + def test_comment(self): + self.t("abc # foo", "abc #_L1_") + + def test_comment_and_quote(self): + self.t("abc # 'x'", "abc #_L1_") + self.t("'abc#'", "'_L1_'") + + def test_include(self): + self.t("include 'a.pxi' # something here", + "include '_L1_' #_L2_") + + def test_extern(self): + self.t("cdef extern from 'a.h': # comment", + "cdef extern from '_L1_': #_L2_") diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa81adaff68e06d8e915a6afa375f62f7e5a8fad --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/__init__.py @@ -0,0 +1 @@ +# empty file diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4e4775fc3a94d0ff945d4ff3cb7a656d4f5fbe8a --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/__init__.py @@ -0,0 +1,14 @@ +from .Dependencies import cythonize + +import sys +if sys.version_info < (3, 7): + from .Distutils import build_ext +del sys + + +def __getattr__(name): + if name == 'build_ext': + # Lazy import, fails if distutils is not available (in Python 3.12+). + from .Distutils import build_ext + return build_ext + raise AttributeError("module '%s' has no attribute '%s'" % (__name__, name)) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/CodeWriter.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/CodeWriter.py new file mode 100644 index 0000000000000000000000000000000000000000..f386da21ca7ff86b9fc84eee91b7ca610cb89288 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/CodeWriter.py @@ -0,0 +1,820 @@ +""" +Serializes a Cython code tree to Cython code. This is primarily useful for +debugging and testing purposes. +The output is in a strict format, no whitespace or comments from the input +is preserved (and it could not be as it is not present in the code tree). +""" + +from __future__ import absolute_import, print_function + +from .Compiler.Visitor import TreeVisitor +from .Compiler.ExprNodes import * +from .Compiler.Nodes import CSimpleBaseTypeNode + + +class LinesResult(object): + def __init__(self): + self.lines = [] + self.s = u"" + + def put(self, s): + self.s += s + + def newline(self): + self.lines.append(self.s) + self.s = u"" + + def putline(self, s): + self.put(s) + self.newline() + + +class DeclarationWriter(TreeVisitor): + """ + A Cython code writer that is limited to declarations nodes. + """ + + indent_string = u" " + + def __init__(self, result=None): + super(DeclarationWriter, self).__init__() + if result is None: + result = LinesResult() + self.result = result + self.numindents = 0 + self.tempnames = {} + self.tempblockindex = 0 + + def write(self, tree): + self.visit(tree) + return self.result + + def indent(self): + self.numindents += 1 + + def dedent(self): + self.numindents -= 1 + + def startline(self, s=u""): + self.result.put(self.indent_string * self.numindents + s) + + def put(self, s): + self.result.put(s) + + def putline(self, s): + self.result.putline(self.indent_string * self.numindents + s) + + def endline(self, s=u""): + self.result.putline(s) + + def line(self, s): + self.startline(s) + self.endline() + + def comma_separated_list(self, items, output_rhs=False): + if len(items) > 0: + for item in items[:-1]: + self.visit(item) + if output_rhs and item.default is not None: + self.put(u" = ") + self.visit(item.default) + self.put(u", ") + self.visit(items[-1]) + if output_rhs and items[-1].default is not None: + self.put(u" = ") + self.visit(items[-1].default) + + def _visit_indented(self, node): + self.indent() + self.visit(node) + self.dedent() + + def visit_Node(self, node): + raise AssertionError("Node not handled by serializer: %r" % node) + + def visit_ModuleNode(self, node): + self.visitchildren(node) + + def visit_StatListNode(self, node): + self.visitchildren(node) + + def visit_CDefExternNode(self, node): + if node.include_file is None: + file = u'*' + else: + file = u'"%s"' % node.include_file + self.putline(u"cdef extern from %s:" % file) + self._visit_indented(node.body) + + def visit_CPtrDeclaratorNode(self, node): + self.put('*') + self.visit(node.base) + + def visit_CReferenceDeclaratorNode(self, node): + self.put('&') + self.visit(node.base) + + def visit_CArrayDeclaratorNode(self, node): + self.visit(node.base) + self.put(u'[') + if node.dimension is not None: + self.visit(node.dimension) + self.put(u']') + + def visit_CFuncDeclaratorNode(self, node): + # TODO: except, gil, etc. + self.visit(node.base) + self.put(u'(') + self.comma_separated_list(node.args) + self.endline(u')') + + def visit_CNameDeclaratorNode(self, node): + self.put(node.name) + + def visit_CSimpleBaseTypeNode(self, node): + # See Parsing.p_sign_and_longness + if node.is_basic_c_type: + self.put(("unsigned ", "", "signed ")[node.signed]) + if node.longness < 0: + self.put("short " * -node.longness) + elif node.longness > 0: + self.put("long " * node.longness) + if node.name is not None: + self.put(node.name) + + def visit_CComplexBaseTypeNode(self, node): + self.visit(node.base_type) + self.visit(node.declarator) + + def visit_CNestedBaseTypeNode(self, node): + self.visit(node.base_type) + self.put(u'.') + self.put(node.name) + + def visit_TemplatedTypeNode(self, node): + self.visit(node.base_type_node) + self.put(u'[') + self.comma_separated_list(node.positional_args + node.keyword_args.key_value_pairs) + self.put(u']') + + def visit_CVarDefNode(self, node): + self.startline(u"cdef ") + self.visit(node.base_type) + self.put(u" ") + self.comma_separated_list(node.declarators, output_rhs=True) + self.endline() + + def _visit_container_node(self, node, decl, extras, attributes): + # TODO: visibility + self.startline(decl) + if node.name: + self.put(u' ') + self.put(node.name) + if node.cname is not None: + self.put(u' "%s"' % node.cname) + if extras: + self.put(extras) + self.endline(':') + self.indent() + if not attributes: + self.putline('pass') + else: + for attribute in attributes: + self.visit(attribute) + self.dedent() + + def visit_CStructOrUnionDefNode(self, node): + if node.typedef_flag: + decl = u'ctypedef ' + else: + decl = u'cdef ' + if node.visibility == 'public': + decl += u'public ' + if node.packed: + decl += u'packed ' + decl += node.kind + self._visit_container_node(node, decl, None, node.attributes) + + def visit_CppClassNode(self, node): + extras = "" + if node.templates: + extras = u"[%s]" % ", ".join(node.templates) + if node.base_classes: + extras += "(%s)" % ", ".join(node.base_classes) + self._visit_container_node(node, u"cdef cppclass", extras, node.attributes) + + def visit_CEnumDefNode(self, node): + self._visit_container_node(node, u"cdef enum", None, node.items) + + def visit_CEnumDefItemNode(self, node): + self.startline(node.name) + if node.cname: + self.put(u' "%s"' % node.cname) + if node.value: + self.put(u" = ") + self.visit(node.value) + self.endline() + + def visit_CClassDefNode(self, node): + assert not node.module_name + if node.decorators: + for decorator in node.decorators: + self.visit(decorator) + self.startline(u"cdef class ") + self.put(node.class_name) + if node.base_class_name: + self.put(u"(") + if node.base_class_module: + self.put(node.base_class_module) + self.put(u".") + self.put(node.base_class_name) + self.put(u")") + self.endline(u":") + self._visit_indented(node.body) + + def visit_CTypeDefNode(self, node): + self.startline(u"ctypedef ") + self.visit(node.base_type) + self.put(u" ") + self.visit(node.declarator) + self.endline() + + def visit_FuncDefNode(self, node): + # TODO: support cdef + cpdef functions + self.startline(u"def %s(" % node.name) + self.comma_separated_list(node.args) + self.endline(u"):") + self._visit_indented(node.body) + + def visit_CFuncDefNode(self, node): + self.startline(u'cpdef ' if node.overridable else u'cdef ') + if node.modifiers: + self.put(' '.join(node.modifiers)) + self.put(' ') + if node.visibility != 'private': + self.put(node.visibility) + self.put(u' ') + if node.api: + self.put(u'api ') + + if node.base_type: + self.visit(node.base_type) + if node.base_type.name is not None: + self.put(u' ') + + # visit the CFuncDeclaratorNode, but put a `:` at the end of line + self.visit(node.declarator.base) + self.put(u'(') + self.comma_separated_list(node.declarator.args) + self.endline(u'):') + + self._visit_indented(node.body) + + def visit_CArgDeclNode(self, node): + # For "CSimpleBaseTypeNode", the variable type may have been parsed as type. + # For other node types, the "name" is always None. + if not isinstance(node.base_type, CSimpleBaseTypeNode) or \ + node.base_type.name is not None: + self.visit(node.base_type) + + # If we printed something for "node.base_type", we may need to print an extra ' '. + # + # Special case: if "node.declarator" is a "CNameDeclaratorNode", + # its "name" might be an empty string, for example, for "cdef f(x)". + if node.declarator.declared_name(): + self.put(u" ") + self.visit(node.declarator) + if node.default is not None: + self.put(u" = ") + self.visit(node.default) + + def visit_CImportStatNode(self, node): + self.startline(u"cimport ") + self.put(node.module_name) + if node.as_name: + self.put(u" as ") + self.put(node.as_name) + self.endline() + + def visit_FromCImportStatNode(self, node): + self.startline(u"from ") + self.put(node.module_name) + self.put(u" cimport ") + first = True + for pos, name, as_name, kind in node.imported_names: + assert kind is None + if first: + first = False + else: + self.put(u", ") + self.put(name) + if as_name: + self.put(u" as ") + self.put(as_name) + self.endline() + + def visit_NameNode(self, node): + self.put(node.name) + + def visit_DecoratorNode(self, node): + self.startline("@") + self.visit(node.decorator) + self.endline() + + def visit_PassStatNode(self, node): + self.startline(u"pass") + self.endline() + + +class StatementWriter(DeclarationWriter): + """ + A Cython code writer for most language statement features. + """ + + def visit_SingleAssignmentNode(self, node): + self.startline() + self.visit(node.lhs) + self.put(u" = ") + self.visit(node.rhs) + self.endline() + + def visit_CascadedAssignmentNode(self, node): + self.startline() + for lhs in node.lhs_list: + self.visit(lhs) + self.put(u" = ") + self.visit(node.rhs) + self.endline() + + def visit_PrintStatNode(self, node): + self.startline(u"print ") + self.comma_separated_list(node.arg_tuple.args) + if not node.append_newline: + self.put(u",") + self.endline() + + def visit_ForInStatNode(self, node): + self.startline(u"for ") + if node.target.is_sequence_constructor: + self.comma_separated_list(node.target.args) + else: + self.visit(node.target) + self.put(u" in ") + self.visit(node.iterator.sequence) + self.endline(u":") + self._visit_indented(node.body) + if node.else_clause is not None: + self.line(u"else:") + self._visit_indented(node.else_clause) + + def visit_IfStatNode(self, node): + # The IfClauseNode is handled directly without a separate match + # for clariy. + self.startline(u"if ") + self.visit(node.if_clauses[0].condition) + self.endline(":") + self._visit_indented(node.if_clauses[0].body) + for clause in node.if_clauses[1:]: + self.startline("elif ") + self.visit(clause.condition) + self.endline(":") + self._visit_indented(clause.body) + if node.else_clause is not None: + self.line("else:") + self._visit_indented(node.else_clause) + + def visit_WhileStatNode(self, node): + self.startline(u"while ") + self.visit(node.condition) + self.endline(u":") + self._visit_indented(node.body) + if node.else_clause is not None: + self.line("else:") + self._visit_indented(node.else_clause) + + def visit_ContinueStatNode(self, node): + self.line(u"continue") + + def visit_BreakStatNode(self, node): + self.line(u"break") + + def visit_SequenceNode(self, node): + self.comma_separated_list(node.args) # Might need to discover whether we need () around tuples...hmm... + + def visit_ExprStatNode(self, node): + self.startline() + self.visit(node.expr) + self.endline() + + def visit_InPlaceAssignmentNode(self, node): + self.startline() + self.visit(node.lhs) + self.put(u" %s= " % node.operator) + self.visit(node.rhs) + self.endline() + + def visit_WithStatNode(self, node): + self.startline() + self.put(u"with ") + self.visit(node.manager) + if node.target is not None: + self.put(u" as ") + self.visit(node.target) + self.endline(u":") + self._visit_indented(node.body) + + def visit_TryFinallyStatNode(self, node): + self.line(u"try:") + self._visit_indented(node.body) + self.line(u"finally:") + self._visit_indented(node.finally_clause) + + def visit_TryExceptStatNode(self, node): + self.line(u"try:") + self._visit_indented(node.body) + for x in node.except_clauses: + self.visit(x) + if node.else_clause is not None: + self.visit(node.else_clause) + + def visit_ExceptClauseNode(self, node): + self.startline(u"except") + if node.pattern is not None: + self.put(u" ") + self.visit(node.pattern) + if node.target is not None: + self.put(u", ") + self.visit(node.target) + self.endline(":") + self._visit_indented(node.body) + + def visit_ReturnStatNode(self, node): + self.startline("return") + if node.value is not None: + self.put(u" ") + self.visit(node.value) + self.endline() + + def visit_ReraiseStatNode(self, node): + self.line("raise") + + def visit_ImportNode(self, node): + self.put(u"(import %s)" % node.module_name.value) + + def visit_TempsBlockNode(self, node): + """ + Temporaries are output like $1_1', where the first number is + an index of the TempsBlockNode and the second number is an index + of the temporary which that block allocates. + """ + idx = 0 + for handle in node.temps: + self.tempnames[handle] = "$%d_%d" % (self.tempblockindex, idx) + idx += 1 + self.tempblockindex += 1 + self.visit(node.body) + + def visit_TempRefNode(self, node): + self.put(self.tempnames[node.handle]) + + +class ExpressionWriter(TreeVisitor): + """ + A Cython code writer that is intentionally limited to expressions. + """ + + def __init__(self, result=None): + super(ExpressionWriter, self).__init__() + if result is None: + result = u"" + self.result = result + self.precedence = [0] + + def write(self, tree): + self.visit(tree) + return self.result + + def put(self, s): + self.result += s + + def remove(self, s): + if self.result.endswith(s): + self.result = self.result[:-len(s)] + + def comma_separated_list(self, items): + if len(items) > 0: + for item in items[:-1]: + self.visit(item) + self.put(u", ") + self.visit(items[-1]) + + def visit_Node(self, node): + raise AssertionError("Node not handled by serializer: %r" % node) + + def visit_IntNode(self, node): + self.put(node.value) + + def visit_FloatNode(self, node): + self.put(node.value) + + def visit_NoneNode(self, node): + self.put(u"None") + + def visit_NameNode(self, node): + self.put(node.name) + + def visit_EllipsisNode(self, node): + self.put(u"...") + + def visit_BoolNode(self, node): + self.put(str(node.value)) + + def visit_ConstNode(self, node): + self.put(str(node.value)) + + def visit_ImagNode(self, node): + self.put(node.value) + self.put(u"j") + + def emit_string(self, node, prefix=u""): + repr_val = repr(node.value) + if repr_val[0] in 'ub': + repr_val = repr_val[1:] + self.put(u"%s%s" % (prefix, repr_val)) + + def visit_BytesNode(self, node): + self.emit_string(node, u"b") + + def visit_StringNode(self, node): + self.emit_string(node) + + def visit_UnicodeNode(self, node): + self.emit_string(node, u"u") + + def emit_sequence(self, node, parens=(u"", u"")): + open_paren, close_paren = parens + items = node.subexpr_nodes() + self.put(open_paren) + self.comma_separated_list(items) + self.put(close_paren) + + def visit_ListNode(self, node): + self.emit_sequence(node, u"[]") + + def visit_TupleNode(self, node): + self.emit_sequence(node, u"()") + + def visit_SetNode(self, node): + if len(node.subexpr_nodes()) > 0: + self.emit_sequence(node, u"{}") + else: + self.put(u"set()") + + def visit_DictNode(self, node): + self.emit_sequence(node, u"{}") + + def visit_DictItemNode(self, node): + self.visit(node.key) + self.put(u": ") + self.visit(node.value) + + unop_precedence = { + 'not': 3, '!': 3, + '+': 11, '-': 11, '~': 11, + } + binop_precedence = { + 'or': 1, + 'and': 2, + # unary: 'not': 3, '!': 3, + 'in': 4, 'not_in': 4, 'is': 4, 'is_not': 4, '<': 4, '<=': 4, '>': 4, '>=': 4, '!=': 4, '==': 4, + '|': 5, + '^': 6, + '&': 7, + '<<': 8, '>>': 8, + '+': 9, '-': 9, + '*': 10, '@': 10, '/': 10, '//': 10, '%': 10, + # unary: '+': 11, '-': 11, '~': 11 + '**': 12, + } + + def operator_enter(self, new_prec): + old_prec = self.precedence[-1] + if old_prec > new_prec: + self.put(u"(") + self.precedence.append(new_prec) + + def operator_exit(self): + old_prec, new_prec = self.precedence[-2:] + if old_prec > new_prec: + self.put(u")") + self.precedence.pop() + + def visit_NotNode(self, node): + op = 'not' + prec = self.unop_precedence[op] + self.operator_enter(prec) + self.put(u"not ") + self.visit(node.operand) + self.operator_exit() + + def visit_UnopNode(self, node): + op = node.operator + prec = self.unop_precedence[op] + self.operator_enter(prec) + self.put(u"%s" % node.operator) + self.visit(node.operand) + self.operator_exit() + + def visit_BinopNode(self, node): + op = node.operator + prec = self.binop_precedence.get(op, 0) + self.operator_enter(prec) + self.visit(node.operand1) + self.put(u" %s " % op.replace('_', ' ')) + self.visit(node.operand2) + self.operator_exit() + + def visit_BoolBinopNode(self, node): + self.visit_BinopNode(node) + + def visit_PrimaryCmpNode(self, node): + self.visit_BinopNode(node) + + def visit_IndexNode(self, node): + self.visit(node.base) + self.put(u"[") + if isinstance(node.index, TupleNode): + if node.index.subexpr_nodes(): + self.emit_sequence(node.index) + else: + self.put(u"()") + else: + self.visit(node.index) + self.put(u"]") + + def visit_SliceIndexNode(self, node): + self.visit(node.base) + self.put(u"[") + if node.start: + self.visit(node.start) + self.put(u":") + if node.stop: + self.visit(node.stop) + if node.slice: + self.put(u":") + self.visit(node.slice) + self.put(u"]") + + def visit_SliceNode(self, node): + if not node.start.is_none: + self.visit(node.start) + self.put(u":") + if not node.stop.is_none: + self.visit(node.stop) + if not node.step.is_none: + self.put(u":") + self.visit(node.step) + + def visit_CondExprNode(self, node): + self.visit(node.true_val) + self.put(u" if ") + self.visit(node.test) + self.put(u" else ") + self.visit(node.false_val) + + def visit_AttributeNode(self, node): + self.visit(node.obj) + self.put(u".%s" % node.attribute) + + def visit_SimpleCallNode(self, node): + self.visit(node.function) + self.put(u"(") + self.comma_separated_list(node.args) + self.put(")") + + def emit_pos_args(self, node): + if node is None: + return + if isinstance(node, AddNode): + self.emit_pos_args(node.operand1) + self.emit_pos_args(node.operand2) + elif isinstance(node, TupleNode): + for expr in node.subexpr_nodes(): + self.visit(expr) + self.put(u", ") + elif isinstance(node, AsTupleNode): + self.put("*") + self.visit(node.arg) + self.put(u", ") + else: + self.visit(node) + self.put(u", ") + + def emit_kwd_args(self, node): + if node is None: + return + if isinstance(node, MergedDictNode): + for expr in node.subexpr_nodes(): + self.emit_kwd_args(expr) + elif isinstance(node, DictNode): + for expr in node.subexpr_nodes(): + self.put(u"%s=" % expr.key.value) + self.visit(expr.value) + self.put(u", ") + else: + self.put(u"**") + self.visit(node) + self.put(u", ") + + def visit_GeneralCallNode(self, node): + self.visit(node.function) + self.put(u"(") + self.emit_pos_args(node.positional_args) + self.emit_kwd_args(node.keyword_args) + self.remove(u", ") + self.put(")") + + def emit_comprehension(self, body, target, + sequence, condition, + parens=(u"", u"")): + open_paren, close_paren = parens + self.put(open_paren) + self.visit(body) + self.put(u" for ") + self.visit(target) + self.put(u" in ") + self.visit(sequence) + if condition: + self.put(u" if ") + self.visit(condition) + self.put(close_paren) + + def visit_ComprehensionAppendNode(self, node): + self.visit(node.expr) + + def visit_DictComprehensionAppendNode(self, node): + self.visit(node.key_expr) + self.put(u": ") + self.visit(node.value_expr) + + def visit_ComprehensionNode(self, node): + tpmap = {'list': u"[]", 'dict': u"{}", 'set': u"{}"} + parens = tpmap[node.type.py_type_name()] + body = node.loop.body + target = node.loop.target + sequence = node.loop.iterator.sequence + condition = None + if hasattr(body, 'if_clauses'): + # type(body) is Nodes.IfStatNode + condition = body.if_clauses[0].condition + body = body.if_clauses[0].body + self.emit_comprehension(body, target, sequence, condition, parens) + + def visit_GeneratorExpressionNode(self, node): + body = node.loop.body + target = node.loop.target + sequence = node.loop.iterator.sequence + condition = None + if hasattr(body, 'if_clauses'): + # type(body) is Nodes.IfStatNode + condition = body.if_clauses[0].condition + body = body.if_clauses[0].body.expr.arg + elif hasattr(body, 'expr'): + # type(body) is Nodes.ExprStatNode + body = body.expr.arg + self.emit_comprehension(body, target, sequence, condition, u"()") + + +class PxdWriter(DeclarationWriter, ExpressionWriter): + """ + A Cython code writer for everything supported in pxd files. + (currently unused) + """ + + def __call__(self, node): + print(u'\n'.join(self.write(node).lines)) + return node + + def visit_CFuncDefNode(self, node): + if node.overridable: + self.startline(u'cpdef ') + else: + self.startline(u'cdef ') + if node.modifiers: + self.put(' '.join(node.modifiers)) + self.put(' ') + if node.visibility != 'private': + self.put(node.visibility) + self.put(u' ') + if node.api: + self.put(u'api ') + self.visit(node.declarator) + + def visit_StatNode(self, node): + pass + + +class CodeWriter(StatementWriter, ExpressionWriter): + """ + A complete Cython code writer. + """ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/AnalysedTreeTransforms.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/AnalysedTreeTransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..d4941606ef6f4c1072b5c68dcabe328ac6eaad3a --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/AnalysedTreeTransforms.py @@ -0,0 +1,99 @@ +from __future__ import absolute_import + +from .Visitor import ScopeTrackingTransform +from .Nodes import StatListNode, SingleAssignmentNode, CFuncDefNode, DefNode +from .ExprNodes import DictNode, DictItemNode, NameNode, UnicodeNode +from .PyrexTypes import py_object_type +from .StringEncoding import EncodedString +from . import Symtab + +class AutoTestDictTransform(ScopeTrackingTransform): + # Handles autotestdict directive + + excludelist = ['__cinit__', '__dealloc__', '__richcmp__', + '__nonzero__', '__bool__', + '__len__', '__contains__'] + + def visit_ModuleNode(self, node): + if node.is_pxd: + return node + self.scope_type = 'module' + self.scope_node = node + + if not self.current_directives['autotestdict']: + return node + self.all_docstrings = self.current_directives['autotestdict.all'] + self.cdef_docstrings = self.all_docstrings or self.current_directives['autotestdict.cdef'] + + assert isinstance(node.body, StatListNode) + + # First see if __test__ is already created + if u'__test__' in node.scope.entries: + # Do nothing + return node + + pos = node.pos + + self.tests = [] + self.testspos = node.pos + + test_dict_entry = node.scope.declare_var(EncodedString(u'__test__'), + py_object_type, + pos, + visibility='public') + create_test_dict_assignment = SingleAssignmentNode(pos, + lhs=NameNode(pos, name=EncodedString(u'__test__'), + entry=test_dict_entry), + rhs=DictNode(pos, key_value_pairs=self.tests)) + self.visitchildren(node) + node.body.stats.append(create_test_dict_assignment) + return node + + def add_test(self, testpos, path, doctest): + pos = self.testspos + keystr = u'%s (line %d)' % (path, testpos[1]) + key = UnicodeNode(pos, value=EncodedString(keystr)) + value = UnicodeNode(pos, value=doctest) + self.tests.append(DictItemNode(pos, key=key, value=value)) + + def visit_ExprNode(self, node): + # expressions cannot contain functions and lambda expressions + # do not have a docstring + return node + + def visit_FuncDefNode(self, node): + if not node.doc or (isinstance(node, DefNode) and node.fused_py_func): + return node + if not self.cdef_docstrings: + if isinstance(node, CFuncDefNode) and not node.py_func: + return node + if not self.all_docstrings and '>>>' not in node.doc: + return node + + pos = self.testspos + if self.scope_type == 'module': + path = node.entry.name + elif self.scope_type in ('pyclass', 'cclass'): + if isinstance(node, CFuncDefNode): + if node.py_func is not None: + name = node.py_func.name + else: + name = node.entry.name + else: + name = node.name + if self.scope_type == 'cclass' and name in self.excludelist: + return node + if self.scope_type == 'pyclass': + class_name = self.scope_node.name + else: + class_name = self.scope_node.class_name + if isinstance(node.entry.scope, Symtab.PropertyScope): + property_method_name = node.entry.scope.name + path = "%s.%s.%s" % (class_name, node.entry.scope.name, + node.entry.name) + else: + path = "%s.%s" % (class_name, node.entry.name) + else: + assert False + self.add_test(node.pos, path, node.doc) + return node diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Annotate.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Annotate.py new file mode 100644 index 0000000000000000000000000000000000000000..8e8d2c4a8d822c7b5e9fe8d4467716670d8086c2 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Annotate.py @@ -0,0 +1,341 @@ +# Note: Work in progress + +from __future__ import absolute_import + +import os +import os.path +import re +import codecs +import textwrap +from datetime import datetime +from functools import partial +from collections import defaultdict +from xml.sax.saxutils import escape as html_escape +try: + from StringIO import StringIO +except ImportError: + from io import StringIO # does not support writing 'str' in Py2 + +from . import Version +from .Code import CCodeWriter +from .. import Utils + + +class AnnotationCCodeWriter(CCodeWriter): + + # also used as marker for detection of complete code emission in tests + COMPLETE_CODE_TITLE = "Complete cythonized code" + + def __init__(self, create_from=None, buffer=None, copy_formatting=True, show_entire_c_code=False, source_desc=None): + CCodeWriter.__init__(self, create_from, buffer, copy_formatting=copy_formatting) + self.show_entire_c_code = show_entire_c_code + if create_from is None: + self.annotation_buffer = StringIO() + self.last_annotated_pos = None + # annotations[filename][line] -> [(column, AnnotationItem)*] + self.annotations = defaultdict(partial(defaultdict, list)) + # code[filename][line] -> str + self.code = defaultdict(partial(defaultdict, str)) + # scopes[filename][line] -> set(scopes) + self.scopes = defaultdict(partial(defaultdict, set)) + else: + # When creating an insertion point, keep references to the same database + self.annotation_buffer = create_from.annotation_buffer + self.annotations = create_from.annotations + self.code = create_from.code + self.scopes = create_from.scopes + self.last_annotated_pos = create_from.last_annotated_pos + + def create_new(self, create_from, buffer, copy_formatting): + return AnnotationCCodeWriter(create_from, buffer, copy_formatting) + + def _write_to_buffer(self, s): + self.buffer.write(s) + self.annotation_buffer.write(s) + + def mark_pos(self, pos, trace=True): + if pos is not None: + CCodeWriter.mark_pos(self, pos, trace) + if self.funcstate and self.funcstate.scope: + # lambdas and genexprs can result in multiple scopes per line => keep them in a set + self.scopes[pos[0].filename][pos[1]].add(self.funcstate.scope) + if self.last_annotated_pos: + source_desc, line, _ = self.last_annotated_pos + pos_code = self.code[source_desc.filename] + pos_code[line] += self.annotation_buffer.getvalue() + self.annotation_buffer = StringIO() + self.last_annotated_pos = pos + + def annotate(self, pos, item): + self.annotations[pos[0].filename][pos[1]].append((pos[2], item)) + + def _css(self): + """css template will later allow to choose a colormap""" + css = [self._css_template] + for i in range(255): + color = u"FFFF%02x" % int(255.0 // (1.0 + i/10.0)) + css.append('.cython.score-%d {background-color: #%s;}' % (i, color)) + try: + from pygments.formatters import HtmlFormatter + except ImportError: + pass + else: + css.append(HtmlFormatter().get_style_defs('.cython')) + return '\n'.join(css) + + _css_template = textwrap.dedent(""" + body.cython { font-family: courier; font-size: 12; } + + .cython.tag { } + .cython.line { color: #000000; margin: 0em } + .cython.code { font-size: 9; color: #444444; display: none; margin: 0px 0px 0px 8px; border-left: 8px none; } + + .cython.line .run { background-color: #B0FFB0; } + .cython.line .mis { background-color: #FFB0B0; } + .cython.code.run { border-left: 8px solid #B0FFB0; } + .cython.code.mis { border-left: 8px solid #FFB0B0; } + + .cython.code .py_c_api { color: red; } + .cython.code .py_macro_api { color: #FF7000; } + .cython.code .pyx_c_api { color: #FF3000; } + .cython.code .pyx_macro_api { color: #FF7000; } + .cython.code .refnanny { color: #FFA000; } + .cython.code .trace { color: #FFA000; } + .cython.code .error_goto { color: #FFA000; } + + .cython.code .coerce { color: #008000; border: 1px dotted #008000 } + .cython.code .py_attr { color: #FF0000; font-weight: bold; } + .cython.code .c_attr { color: #0000FF; } + .cython.code .py_call { color: #FF0000; font-weight: bold; } + .cython.code .c_call { color: #0000FF; } + """) + + # on-click toggle function to show/hide C source code + _onclick_attr = ' onclick="{0}"'.format(( + "(function(s){" + " s.display = s.display === 'block' ? 'none' : 'block'" + "})(this.nextElementSibling.style)" + ).replace(' ', '') # poor dev's JS minification + ) + + def save_annotation(self, source_filename, target_filename, coverage_xml=None): + with Utils.open_source_file(source_filename) as f: + code = f.read() + generated_code = self.code.get(source_filename, {}) + c_file = Utils.decode_filename(os.path.basename(target_filename)) + html_filename = os.path.splitext(target_filename)[0] + ".html" + + with codecs.open(html_filename, "w", encoding="UTF-8") as out_buffer: + out_buffer.write(self._save_annotation(code, generated_code, c_file, source_filename, coverage_xml)) + + def _save_annotation_header(self, c_file, source_filename, coverage_timestamp=None): + coverage_info = '' + if coverage_timestamp: + coverage_info = u' with coverage data from {timestamp}'.format( + timestamp=datetime.fromtimestamp(int(coverage_timestamp) // 1000)) + + outlist = [ + textwrap.dedent(u'''\ + + + + + + Cython: {filename} + + + +

Generated by Cython {watermark}{more_info}

+

+ Yellow lines hint at Python interaction.
+ Click on a line that starts with a "+" to see the C code that Cython generated for it. +

+ ''').format(css=self._css(), watermark=Version.watermark, + filename=os.path.basename(source_filename) if source_filename else '', + more_info=coverage_info) + ] + if c_file: + outlist.append(u'

Raw output: %s

\n' % (c_file, c_file)) + return outlist + + def _save_annotation_footer(self): + return (u'\n',) + + def _save_annotation(self, code, generated_code, c_file=None, source_filename=None, coverage_xml=None): + """ + lines : original cython source code split by lines + generated_code : generated c code keyed by line number in original file + target filename : name of the file in which to store the generated html + c_file : filename in which the c_code has been written + """ + if coverage_xml is not None and source_filename: + coverage_timestamp = coverage_xml.get('timestamp', '').strip() + covered_lines = self._get_line_coverage(coverage_xml, source_filename) + else: + coverage_timestamp = covered_lines = None + annotation_items = dict(self.annotations[source_filename]) + scopes = dict(self.scopes[source_filename]) + + outlist = [] + outlist.extend(self._save_annotation_header(c_file, source_filename, coverage_timestamp)) + outlist.extend(self._save_annotation_body(code, generated_code, annotation_items, scopes, covered_lines)) + outlist.extend(self._save_annotation_footer()) + return ''.join(outlist) + + def _get_line_coverage(self, coverage_xml, source_filename): + coverage_data = None + for entry in coverage_xml.iterfind('.//class'): + if not entry.get('filename'): + continue + if (entry.get('filename') == source_filename or + os.path.abspath(entry.get('filename')) == source_filename): + coverage_data = entry + break + elif source_filename.endswith(entry.get('filename')): + coverage_data = entry # but we might still find a better match... + if coverage_data is None: + return None + return dict( + (int(line.get('number')), int(line.get('hits'))) + for line in coverage_data.iterfind('lines/line') + ) + + def _htmlify_code(self, code, language): + try: + from pygments import highlight + from pygments.lexers import CythonLexer, CppLexer + from pygments.formatters import HtmlFormatter + except ImportError: + # no Pygments, just escape the code + return html_escape(code) + + if language == "cython": + lexer = CythonLexer(stripnl=False, stripall=False) + elif language == "c/cpp": + lexer = CppLexer(stripnl=False, stripall=False) + else: + # unknown language, use fallback + return html_escape(code) + html_code = highlight( + code, lexer, + HtmlFormatter(nowrap=True)) + return html_code + + def _save_annotation_body(self, cython_code, generated_code, annotation_items, scopes, covered_lines=None): + outlist = [u'
'] + pos_comment_marker = u'/* \N{HORIZONTAL ELLIPSIS} */\n' + new_calls_map = dict( + (name, 0) for name in + 'refnanny trace py_macro_api py_c_api pyx_macro_api pyx_c_api error_goto'.split() + ).copy + + self.mark_pos(None) + + def annotate(match): + group_name = match.lastgroup + calls[group_name] += 1 + return u"%s" % ( + group_name, match.group(group_name)) + + lines = self._htmlify_code(cython_code, "cython").splitlines() + lineno_width = len(str(len(lines))) + if not covered_lines: + covered_lines = None + + for k, line in enumerate(lines, 1): + try: + c_code = generated_code[k] + except KeyError: + c_code = '' + else: + c_code = _replace_pos_comment(pos_comment_marker, c_code) + if c_code.startswith(pos_comment_marker): + c_code = c_code[len(pos_comment_marker):] + c_code = html_escape(c_code) + + calls = new_calls_map() + c_code = _parse_code(annotate, c_code) + score = (5 * calls['py_c_api'] + 2 * calls['pyx_c_api'] + + calls['py_macro_api'] + calls['pyx_macro_api']) + + if c_code: + onclick = self._onclick_attr + expandsymbol = '+' + else: + onclick = '' + expandsymbol = ' ' + + covered = '' + if covered_lines is not None and k in covered_lines: + hits = covered_lines[k] + if hits is not None: + covered = 'run' if hits else 'mis' + + outlist.append( + u'
'
+                # generate line number with expand symbol in front,
+                # and the right  number of digit
+                u'{expandsymbol}{line:0{lineno_width}d}: {code}
\n'.format( + score=score, + expandsymbol=expandsymbol, + covered=covered, + lineno_width=lineno_width, + line=k, + code=line.rstrip(), + onclick=onclick, + )) + if c_code: + outlist.append(u"
{code}
".format( + score=score, covered=covered, code=c_code)) + outlist.append(u"
") + + # now the whole c-code if needed: + if self.show_entire_c_code: + outlist.append(u'

') + onclick_title = u"
+ {title}
\n" + outlist.append(onclick_title.format( + onclick=self._onclick_attr, + title=AnnotationCCodeWriter.COMPLETE_CODE_TITLE, + )) + complete_code_as_html = self._htmlify_code(self.buffer.getvalue(), "c/cpp") + outlist.append(u"
{code}
".format(code=complete_code_as_html)) + outlist.append(u"

") + + return outlist + + +_parse_code = re.compile(( + br'(?P__Pyx_X?(?:GOT|GIVE)REF|__Pyx_RefNanny[A-Za-z]+)|' + br'(?P__Pyx_Trace[A-Za-z]+)|' + br'(?:' + br'(?P__Pyx_[A-Z][A-Z_]+)|' + br'(?P(?:__Pyx_[A-Z][a-z_][A-Za-z_]*)|__pyx_convert_[A-Za-z_]*)|' + br'(?PPy[A-Z][a-z]+_[A-Z][A-Z_]+)|' + br'(?PPy[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]*)' + br')(?=\()|' # look-ahead to exclude subsequent '(' from replacement + br'(?P(?:(?<=;) *if [^;]* +)?__PYX_ERR\([^)]+\))' +).decode('ascii')).sub + + +_replace_pos_comment = re.compile( + # this matches what Cython generates as code line marker comment + br'^\s*/\*(?:(?:[^*]|\*[^/])*\n)+\s*\*/\s*\n'.decode('ascii'), + re.M +).sub + + +class AnnotationItem(object): + + def __init__(self, style, text, tag="", size=0): + self.style = style + self.text = text + self.tag = tag + self.size = size + + def start(self): + return u"%s" % (self.style, self.text, self.tag) + + def end(self): + return self.size, u"" diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/AutoDocTransforms.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/AutoDocTransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..c74aab7b7c1ae1315ef3848732126423258ab5e6 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/AutoDocTransforms.py @@ -0,0 +1,318 @@ +from __future__ import absolute_import, print_function + +from .Visitor import CythonTransform +from .StringEncoding import EncodedString +from . import Options +from . import PyrexTypes +from ..CodeWriter import ExpressionWriter +from .Errors import warning + + +class AnnotationWriter(ExpressionWriter): + """ + A Cython code writer for Python expressions in argument/variable annotations. + """ + def __init__(self, description=None): + """description is optional. If specified it is used in + warning messages for the nodes that don't convert to string properly. + If not specified then no messages are generated. + """ + ExpressionWriter.__init__(self) + self.description = description + self.incomplete = False + + def visit_Node(self, node): + self.put(u"") + self.incomplete = True + if self.description: + warning(node.pos, + "Failed to convert code to string representation in {0}".format( + self.description), level=1) + + def visit_LambdaNode(self, node): + # XXX Should we do better? + self.put("") + self.incomplete = True + if self.description: + warning(node.pos, + "Failed to convert lambda to string representation in {0}".format( + self.description), level=1) + + def visit_UnicodeNode(self, node): + # Discard Unicode prefix in annotations. Any tool looking at them + # would probably expect Py3 string semantics. + self.emit_string(node, "") + + def visit_AnnotationNode(self, node): + self.put(node.string.unicode_value) + + +class EmbedSignature(CythonTransform): + + def __init__(self, context): + super(EmbedSignature, self).__init__(context) + self.class_name = None + self.class_node = None + + def _fmt_expr(self, node): + writer = ExpressionWriter() + result = writer.write(node) + # print(type(node).__name__, '-->', result) + return result + + def _fmt_annotation(self, node): + writer = AnnotationWriter() + result = writer.write(node) + # print(type(node).__name__, '-->', result) + return result + + def _setup_format(self): + signature_format = self.current_directives['embedsignature.format'] + self.is_format_c = signature_format == 'c' + self.is_format_python = signature_format == 'python' + self.is_format_clinic = signature_format == 'clinic' + + def _fmt_arg(self, arg): + arg_doc = arg.name + annotation = None + defaultval = None + if arg.is_self_arg: + if self.is_format_clinic: + arg_doc = '$self' + elif arg.is_type_arg: + if self.is_format_clinic: + arg_doc = '$type' + elif self.is_format_c: + if arg.type is not PyrexTypes.py_object_type: + arg_doc = arg.type.declaration_code(arg.name, for_display=1) + elif self.is_format_python: + if not arg.annotation: + annotation = self._fmt_type(arg.type) + if arg.annotation: + if not self.is_format_clinic: + annotation = self._fmt_annotation(arg.annotation) + if arg.default: + defaultval = self._fmt_expr(arg.default) + if annotation: + arg_doc = arg_doc + (': %s' % annotation) + if defaultval: + arg_doc = arg_doc + (' = %s' % defaultval) + elif defaultval: + arg_doc = arg_doc + ('=%s' % defaultval) + return arg_doc + + def _fmt_star_arg(self, arg): + arg_doc = arg.name + if arg.annotation: + if not self.is_format_clinic: + annotation = self._fmt_annotation(arg.annotation) + arg_doc = arg_doc + (': %s' % annotation) + return arg_doc + + def _fmt_arglist(self, args, + npoargs=0, npargs=0, pargs=None, + nkargs=0, kargs=None, + hide_self=False): + arglist = [] + for arg in args: + if not hide_self or not arg.entry.is_self_arg: + arg_doc = self._fmt_arg(arg) + arglist.append(arg_doc) + if pargs: + arg_doc = self._fmt_star_arg(pargs) + arglist.insert(npargs + npoargs, '*%s' % arg_doc) + elif nkargs: + arglist.insert(npargs + npoargs, '*') + if npoargs: + arglist.insert(npoargs, '/') + if kargs: + arg_doc = self._fmt_star_arg(kargs) + arglist.append('**%s' % arg_doc) + return arglist + + def _fmt_type(self, type): + if type is PyrexTypes.py_object_type: + return None + elif self.is_format_c: + code = type.declaration_code("", for_display=1) + return code + elif self.is_format_python: + annotation = None + if type.is_string: + annotation = self.current_directives['c_string_type'] + elif type.is_numeric: + annotation = type.py_type_name() + if annotation is None: + code = type.declaration_code('', for_display=1) + annotation = code.replace(' ', '_').replace('*', 'p') + return annotation + return None + + def _fmt_signature(self, cls_name, func_name, args, + npoargs=0, npargs=0, pargs=None, + nkargs=0, kargs=None, + return_expr=None, return_type=None, + hide_self=False): + arglist = self._fmt_arglist( + args, npoargs, npargs, pargs, nkargs, kargs, + hide_self=hide_self, + ) + arglist_doc = ', '.join(arglist) + func_doc = '%s(%s)' % (func_name, arglist_doc) + if self.is_format_c and cls_name: + func_doc = '%s.%s' % (cls_name, func_doc) + if not self.is_format_clinic: + ret_doc = None + if return_expr: + ret_doc = self._fmt_annotation(return_expr) + elif return_type: + ret_doc = self._fmt_type(return_type) + if ret_doc: + func_doc = '%s -> %s' % (func_doc, ret_doc) + return func_doc + + def _embed_signature(self, signature, node_doc): + if self.is_format_clinic and self.current_directives['binding']: + return node_doc + if node_doc: + if self.is_format_clinic: + docfmt = "%s\n--\n\n%s" + else: + docfmt = "%s\n%s" + return docfmt % (signature, node_doc) + else: + if self.is_format_clinic: + docfmt = "%s\n--\n\n" + else: + docfmt = "%s" + return docfmt % signature + + def __call__(self, node): + if not Options.docstrings: + return node + else: + return super(EmbedSignature, self).__call__(node) + + def visit_ClassDefNode(self, node): + oldname = self.class_name + oldclass = self.class_node + self.class_node = node + try: + # PyClassDefNode + self.class_name = node.name + except AttributeError: + # CClassDefNode + self.class_name = node.class_name + self.visitchildren(node) + self.class_name = oldname + self.class_node = oldclass + return node + + def visit_LambdaNode(self, node): + # lambda expressions so not have signature or inner functions + return node + + def visit_DefNode(self, node): + if not self.current_directives['embedsignature']: + return node + self._setup_format() + + is_constructor = False + hide_self = False + if node.entry.is_special: + is_constructor = self.class_node and node.name == '__init__' + if not is_constructor: + return node + class_name = None + func_name = node.name + if self.is_format_c: + func_name = self.class_name + hide_self = True + else: + class_name, func_name = self.class_name, node.name + + npoargs = getattr(node, 'num_posonly_args', 0) + nkargs = getattr(node, 'num_kwonly_args', 0) + npargs = len(node.args) - nkargs - npoargs + signature = self._fmt_signature( + class_name, func_name, node.args, + npoargs, npargs, node.star_arg, + nkargs, node.starstar_arg, + return_expr=node.return_type_annotation, + return_type=None, hide_self=hide_self) + if signature: + if is_constructor and self.is_format_c: + doc_holder = self.class_node.entry.type.scope + else: + doc_holder = node.entry + if doc_holder.doc is not None: + old_doc = doc_holder.doc + elif not is_constructor and getattr(node, 'py_func', None) is not None: + old_doc = node.py_func.entry.doc + else: + old_doc = None + new_doc = self._embed_signature(signature, old_doc) + doc_holder.doc = EncodedString(new_doc) + if not is_constructor and getattr(node, 'py_func', None) is not None: + node.py_func.entry.doc = EncodedString(new_doc) + return node + + def visit_CFuncDefNode(self, node): + if not node.overridable: # not cpdef FOO(...): + return node + if not self.current_directives['embedsignature']: + return node + self._setup_format() + + signature = self._fmt_signature( + self.class_name, node.declarator.base.name, + node.declarator.args, + return_type=node.return_type) + if signature: + if node.entry.doc is not None: + old_doc = node.entry.doc + elif getattr(node, 'py_func', None) is not None: + old_doc = node.py_func.entry.doc + else: + old_doc = None + new_doc = self._embed_signature(signature, old_doc) + node.entry.doc = EncodedString(new_doc) + py_func = getattr(node, 'py_func', None) + if py_func is not None: + py_func.entry.doc = EncodedString(new_doc) + return node + + def visit_PropertyNode(self, node): + if not self.current_directives['embedsignature']: + return node + self._setup_format() + + entry = node.entry + body = node.body + prop_name = entry.name + type_name = None + if entry.visibility == 'public': + if self.is_format_c: + # property synthesised from a cdef public attribute + type_name = entry.type.declaration_code("", for_display=1) + if not entry.type.is_pyobject: + type_name = "'%s'" % type_name + elif entry.type.is_extension_type: + type_name = entry.type.module_name + '.' + type_name + elif self.is_format_python: + type_name = self._fmt_type(entry.type) + if type_name is None: + for stat in body.stats: + if stat.name != '__get__': + continue + if self.is_format_c: + prop_name = '%s.%s' % (self.class_name, prop_name) + ret_annotation = stat.return_type_annotation + if ret_annotation: + type_name = self._fmt_annotation(ret_annotation) + if type_name is not None : + signature = '%s: %s' % (prop_name, type_name) + new_doc = self._embed_signature(signature, entry.doc) + if not self.is_format_clinic: + entry.doc = EncodedString(new_doc) + return node diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Buffer.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..e86e1e9c24d206d537d682b7c0b67c1b0fabf96c --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Buffer.py @@ -0,0 +1,749 @@ +from __future__ import absolute_import + +from .Visitor import CythonTransform +from .ModuleNode import ModuleNode +from .Errors import CompileError +from .UtilityCode import CythonUtilityCode +from .Code import UtilityCode, TempitaUtilityCode + +from . import Options +from . import Interpreter +from . import PyrexTypes +from . import Naming +from . import Symtab + +def dedent(text, reindent=0): + from textwrap import dedent + text = dedent(text) + if reindent > 0: + indent = " " * reindent + text = '\n'.join([indent + x for x in text.split('\n')]) + return text + +class IntroduceBufferAuxiliaryVars(CythonTransform): + + # + # Entry point + # + + buffers_exists = False + using_memoryview = False + + def __call__(self, node): + assert isinstance(node, ModuleNode) + self.max_ndim = 0 + result = super(IntroduceBufferAuxiliaryVars, self).__call__(node) + if self.buffers_exists: + use_bufstruct_declare_code(node.scope) + use_py2_buffer_functions(node.scope) + + return result + + + # + # Basic operations for transforms + # + def handle_scope(self, node, scope): + # For all buffers, insert extra variables in the scope. + # The variables are also accessible from the buffer_info + # on the buffer entry + scope_items = scope.entries.items() + bufvars = [entry for name, entry in scope_items if entry.type.is_buffer] + if len(bufvars) > 0: + bufvars.sort(key=lambda entry: entry.name) + self.buffers_exists = True + + memviewslicevars = [entry for name, entry in scope_items if entry.type.is_memoryviewslice] + if len(memviewslicevars) > 0: + self.buffers_exists = True + + + for (name, entry) in scope_items: + if name == 'memoryview' and isinstance(entry.utility_code_definition, CythonUtilityCode): + self.using_memoryview = True + break + del scope_items + + if isinstance(node, ModuleNode) and len(bufvars) > 0: + # for now...note that pos is wrong + raise CompileError(node.pos, "Buffer vars not allowed in module scope") + for entry in bufvars: + if entry.type.dtype.is_ptr: + raise CompileError(node.pos, "Buffers with pointer types not yet supported.") + + name = entry.name + buftype = entry.type + if buftype.ndim > Options.buffer_max_dims: + raise CompileError(node.pos, + "Buffer ndims exceeds Options.buffer_max_dims = %d" % Options.buffer_max_dims) + if buftype.ndim > self.max_ndim: + self.max_ndim = buftype.ndim + + # Declare auxiliary vars + def decvar(type, prefix): + cname = scope.mangle(prefix, name) + aux_var = scope.declare_var(name=None, cname=cname, + type=type, pos=node.pos) + if entry.is_arg: + aux_var.used = True # otherwise, NameNode will mark whether it is used + + return aux_var + + auxvars = ((PyrexTypes.c_pyx_buffer_nd_type, Naming.pybuffernd_prefix), + (PyrexTypes.c_pyx_buffer_type, Naming.pybufferstruct_prefix)) + pybuffernd, rcbuffer = [decvar(type, prefix) for (type, prefix) in auxvars] + + entry.buffer_aux = Symtab.BufferAux(pybuffernd, rcbuffer) + + scope.buffer_entries = bufvars + self.scope = scope + + def visit_ModuleNode(self, node): + self.handle_scope(node, node.scope) + self.visitchildren(node) + return node + + def visit_FuncDefNode(self, node): + self.handle_scope(node, node.local_scope) + self.visitchildren(node) + return node + +# +# Analysis +# +buffer_options = ("dtype", "ndim", "mode", "negative_indices", "cast") # ordered! +buffer_defaults = {"ndim": 1, "mode": "full", "negative_indices": True, "cast": False} +buffer_positional_options_count = 1 # anything beyond this needs keyword argument + +ERR_BUF_OPTION_UNKNOWN = '"%s" is not a buffer option' +ERR_BUF_TOO_MANY = 'Too many buffer options' +ERR_BUF_DUP = '"%s" buffer option already supplied' +ERR_BUF_MISSING = '"%s" missing' +ERR_BUF_MODE = 'Only allowed buffer modes are: "c", "fortran", "full", "strided" (as a compile-time string)' +ERR_BUF_NDIM = 'ndim must be a non-negative integer' +ERR_BUF_DTYPE = 'dtype must be "object", numeric type or a struct' +ERR_BUF_BOOL = '"%s" must be a boolean' + +def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, need_complete=True): + """ + Must be called during type analysis, as analyse is called + on the dtype argument. + + posargs and dictargs should consist of a list and a dict + of tuples (value, pos). Defaults should be a dict of values. + + Returns a dict containing all the options a buffer can have and + its value (with the positions stripped). + """ + if defaults is None: + defaults = buffer_defaults + + posargs, dictargs = Interpreter.interpret_compiletime_options( + posargs, dictargs, type_env=env, type_args=(0, 'dtype')) + + if len(posargs) > buffer_positional_options_count: + raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY) + + options = {} + for name, (value, pos) in dictargs.items(): + if name not in buffer_options: + raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name) + options[name] = value + + for name, (value, pos) in zip(buffer_options, posargs): + if name not in buffer_options: + raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name) + if name in options: + raise CompileError(pos, ERR_BUF_DUP % name) + options[name] = value + + # Check that they are all there and copy defaults + for name in buffer_options: + if name not in options: + try: + options[name] = defaults[name] + except KeyError: + if need_complete: + raise CompileError(globalpos, ERR_BUF_MISSING % name) + + dtype = options.get("dtype") + if dtype and dtype.is_extension_type: + raise CompileError(globalpos, ERR_BUF_DTYPE) + + ndim = options.get("ndim") + if ndim and (not isinstance(ndim, int) or ndim < 0): + raise CompileError(globalpos, ERR_BUF_NDIM) + + mode = options.get("mode") + if mode and not (mode in ('full', 'strided', 'c', 'fortran')): + raise CompileError(globalpos, ERR_BUF_MODE) + + def assert_bool(name): + x = options.get(name) + if not isinstance(x, bool): + raise CompileError(globalpos, ERR_BUF_BOOL % name) + + assert_bool('negative_indices') + assert_bool('cast') + + return options + + +# +# Code generation +# + +class BufferEntry(object): + def __init__(self, entry): + self.entry = entry + self.type = entry.type + self.cname = entry.buffer_aux.buflocal_nd_var.cname + self.buf_ptr = "%s.rcbuffer->pybuffer.buf" % self.cname + self.buf_ptr_type = entry.type.buffer_ptr_type + self.init_attributes() + + def init_attributes(self): + self.shape = self.get_buf_shapevars() + self.strides = self.get_buf_stridevars() + self.suboffsets = self.get_buf_suboffsetvars() + + def get_buf_suboffsetvars(self): + return self._for_all_ndim("%s.diminfo[%d].suboffsets") + + def get_buf_stridevars(self): + return self._for_all_ndim("%s.diminfo[%d].strides") + + def get_buf_shapevars(self): + return self._for_all_ndim("%s.diminfo[%d].shape") + + def _for_all_ndim(self, s): + return [s % (self.cname, i) for i in range(self.type.ndim)] + + def generate_buffer_lookup_code(self, code, index_cnames): + # Create buffer lookup and return it + # This is done via utility macros/inline functions, which vary + # according to the access mode used. + params = [] + nd = self.type.ndim + mode = self.type.mode + if mode == 'full': + for i, s, o in zip(index_cnames, + self.get_buf_stridevars(), + self.get_buf_suboffsetvars()): + params.append(i) + params.append(s) + params.append(o) + funcname = "__Pyx_BufPtrFull%dd" % nd + funcgen = buf_lookup_full_code + else: + if mode == 'strided': + funcname = "__Pyx_BufPtrStrided%dd" % nd + funcgen = buf_lookup_strided_code + elif mode == 'c': + funcname = "__Pyx_BufPtrCContig%dd" % nd + funcgen = buf_lookup_c_code + elif mode == 'fortran': + funcname = "__Pyx_BufPtrFortranContig%dd" % nd + funcgen = buf_lookup_fortran_code + else: + assert False + for i, s in zip(index_cnames, self.get_buf_stridevars()): + params.append(i) + params.append(s) + + # Make sure the utility code is available + if funcname not in code.globalstate.utility_codes: + code.globalstate.utility_codes.add(funcname) + protocode = code.globalstate['utility_code_proto'] + defcode = code.globalstate['utility_code_def'] + funcgen(protocode, defcode, name=funcname, nd=nd) + + buf_ptr_type_code = self.buf_ptr_type.empty_declaration_code() + ptrcode = "%s(%s, %s, %s)" % (funcname, buf_ptr_type_code, self.buf_ptr, + ", ".join(params)) + return ptrcode + + +def get_flags(buffer_aux, buffer_type): + flags = 'PyBUF_FORMAT' + mode = buffer_type.mode + if mode == 'full': + flags += '| PyBUF_INDIRECT' + elif mode == 'strided': + flags += '| PyBUF_STRIDES' + elif mode == 'c': + flags += '| PyBUF_C_CONTIGUOUS' + elif mode == 'fortran': + flags += '| PyBUF_F_CONTIGUOUS' + else: + assert False + if buffer_aux.writable_needed: flags += "| PyBUF_WRITABLE" + return flags + +def used_buffer_aux_vars(entry): + buffer_aux = entry.buffer_aux + buffer_aux.buflocal_nd_var.used = True + buffer_aux.rcbuf_var.used = True + +def put_unpack_buffer_aux_into_scope(buf_entry, code): + # Generate code to copy the needed struct info into local + # variables. + buffer_aux, mode = buf_entry.buffer_aux, buf_entry.type.mode + pybuffernd_struct = buffer_aux.buflocal_nd_var.cname + + fldnames = ['strides', 'shape'] + if mode == 'full': + fldnames.append('suboffsets') + + ln = [] + for i in range(buf_entry.type.ndim): + for fldname in fldnames: + ln.append("%s.diminfo[%d].%s = %s.rcbuffer->pybuffer.%s[%d];" % ( + pybuffernd_struct, i, fldname, + pybuffernd_struct, fldname, i, + )) + code.putln(' '.join(ln)) + +def put_init_vars(entry, code): + bufaux = entry.buffer_aux + pybuffernd_struct = bufaux.buflocal_nd_var.cname + pybuffer_struct = bufaux.rcbuf_var.cname + # init pybuffer_struct + code.putln("%s.pybuffer.buf = NULL;" % pybuffer_struct) + code.putln("%s.refcount = 0;" % pybuffer_struct) + # init the buffer object + # code.put_init_var_to_py_none(entry) + # init the pybuffernd_struct + code.putln("%s.data = NULL;" % pybuffernd_struct) + code.putln("%s.rcbuffer = &%s;" % (pybuffernd_struct, pybuffer_struct)) + + +def put_acquire_arg_buffer(entry, code, pos): + buffer_aux = entry.buffer_aux + getbuffer = get_getbuffer_call(code, entry.cname, buffer_aux, entry.type) + + # Acquire any new buffer + code.putln("{") + code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % entry.type.dtype.struct_nesting_depth()) + code.putln(code.error_goto_if("%s == -1" % getbuffer, pos)) + code.putln("}") + # An exception raised in arg parsing cannot be caught, so no + # need to care about the buffer then. + put_unpack_buffer_aux_into_scope(entry, code) + + +def put_release_buffer_code(code, entry): + code.globalstate.use_utility_code(acquire_utility_code) + code.putln("__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);" % entry.buffer_aux.buflocal_nd_var.cname) + + +def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type): + ndim = buffer_type.ndim + cast = int(buffer_type.cast) + flags = get_flags(buffer_aux, buffer_type) + pybuffernd_struct = buffer_aux.buflocal_nd_var.cname + + dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype) + + code.globalstate.use_utility_code(acquire_utility_code) + return ("__Pyx_GetBufferAndValidate(&%(pybuffernd_struct)s.rcbuffer->pybuffer, " + "(PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, " + "%(cast)d, __pyx_stack)" % locals()) + + +def put_assign_to_buffer(lhs_cname, rhs_cname, buf_entry, + is_initialized, pos, code): + """ + Generate code for reassigning a buffer variables. This only deals with getting + the buffer auxiliary structure and variables set up correctly, the assignment + itself and refcounting is the responsibility of the caller. + + However, the assignment operation may throw an exception so that the reassignment + never happens. + + Depending on the circumstances there are two possible outcomes: + - Old buffer released, new acquired, rhs assigned to lhs + - Old buffer released, new acquired which fails, reaqcuire old lhs buffer + (which may or may not succeed). + """ + + buffer_aux, buffer_type = buf_entry.buffer_aux, buf_entry.type + pybuffernd_struct = buffer_aux.buflocal_nd_var.cname + flags = get_flags(buffer_aux, buffer_type) + + code.putln("{") # Set up necessary stack for getbuffer + code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth()) + + getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below + + if is_initialized: + # Release any existing buffer + code.putln('__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);' % pybuffernd_struct) + # Acquire + retcode_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) + code.putln("%s = %s;" % (retcode_cname, getbuffer % rhs_cname)) + code.putln('if (%s) {' % (code.unlikely("%s < 0" % retcode_cname))) + # If acquisition failed, attempt to reacquire the old buffer + # before raising the exception. A failure of reacquisition + # will cause the reacquisition exception to be reported, one + # can consider working around this later. + exc_temps = tuple(code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=False) + for _ in range(3)) + code.putln('PyErr_Fetch(&%s, &%s, &%s);' % exc_temps) + code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % lhs_cname))) + code.putln('Py_XDECREF(%s); Py_XDECREF(%s); Py_XDECREF(%s);' % exc_temps) # Do not refnanny these! + code.globalstate.use_utility_code(raise_buffer_fallback_code) + code.putln('__Pyx_RaiseBufferFallbackError();') + code.putln('} else {') + code.putln('PyErr_Restore(%s, %s, %s);' % exc_temps) + code.putln('}') + code.putln('%s = %s = %s = 0;' % exc_temps) + for t in exc_temps: + code.funcstate.release_temp(t) + code.putln('}') + # Unpack indices + put_unpack_buffer_aux_into_scope(buf_entry, code) + code.putln(code.error_goto_if_neg(retcode_cname, pos)) + code.funcstate.release_temp(retcode_cname) + else: + # Our entry had no previous value, so set to None when acquisition fails. + # In this case, auxiliary vars should be set up right in initialization to a zero-buffer, + # so it suffices to set the buf field to NULL. + code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % rhs_cname))) + code.putln('%s = %s; __Pyx_INCREF(Py_None); %s.rcbuffer->pybuffer.buf = NULL;' % + (lhs_cname, + PyrexTypes.typecast(buffer_type, PyrexTypes.py_object_type, "Py_None"), + pybuffernd_struct)) + code.putln(code.error_goto(pos)) + code.put('} else {') + # Unpack indices + put_unpack_buffer_aux_into_scope(buf_entry, code) + code.putln('}') + + code.putln("}") # Release stack + + +def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives, + pos, code, negative_indices, in_nogil_context): + """ + Generates code to process indices and calculate an offset into + a buffer. Returns a C string which gives a pointer which can be + read from or written to at will (it is an expression so caller should + store it in a temporary if it is used more than once). + + As the bounds checking can have any number of combinations of unsigned + arguments, smart optimizations etc. we insert it directly in the function + body. The lookup however is delegated to a inline function that is instantiated + once per ndim (lookup with suboffsets tend to get quite complicated). + + entry is a BufferEntry + """ + negative_indices = directives['wraparound'] and negative_indices + + if directives['boundscheck']: + # Check bounds and fix negative indices. + # We allocate a temporary which is initialized to -1, meaning OK (!). + # If an error occurs, the temp is set to the index dimension the + # error is occurring at. + failed_dim_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) + code.putln("%s = -1;" % failed_dim_temp) + for dim, (signed, cname, shape) in enumerate(zip(index_signeds, index_cnames, entry.get_buf_shapevars())): + if signed != 0: + # not unsigned, deal with negative index + code.putln("if (%s < 0) {" % cname) + if negative_indices: + code.putln("%s += %s;" % (cname, shape)) + code.putln("if (%s) %s = %d;" % ( + code.unlikely("%s < 0" % cname), + failed_dim_temp, dim)) + else: + code.putln("%s = %d;" % (failed_dim_temp, dim)) + code.put("} else ") + # check bounds in positive direction + if signed != 0: + cast = "" + else: + cast = "(size_t)" + code.putln("if (%s) %s = %d;" % ( + code.unlikely("%s >= %s%s" % (cname, cast, shape)), + failed_dim_temp, dim)) + + if in_nogil_context: + code.globalstate.use_utility_code(raise_indexerror_nogil) + func = '__Pyx_RaiseBufferIndexErrorNogil' + else: + code.globalstate.use_utility_code(raise_indexerror_code) + func = '__Pyx_RaiseBufferIndexError' + + code.putln("if (%s) {" % code.unlikely("%s != -1" % failed_dim_temp)) + code.putln('%s(%s);' % (func, failed_dim_temp)) + code.putln(code.error_goto(pos)) + code.putln('}') + code.funcstate.release_temp(failed_dim_temp) + elif negative_indices: + # Only fix negative indices. + for signed, cname, shape in zip(index_signeds, index_cnames, entry.get_buf_shapevars()): + if signed != 0: + code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape)) + + return entry.generate_buffer_lookup_code(code, index_cnames) + + +def use_bufstruct_declare_code(env): + env.use_utility_code(buffer_struct_declare_code) + + +def buf_lookup_full_code(proto, defin, name, nd): + """ + Generates a buffer lookup function for the right number + of dimensions. The function gives back a void* at the right location. + """ + # _i_ndex, _s_tride, sub_o_ffset + macroargs = ", ".join(["i%d, s%d, o%d" % (i, i, i) for i in range(nd)]) + proto.putln("#define %s(type, buf, %s) (type)(%s_imp(buf, %s))" % (name, macroargs, name, macroargs)) + + funcargs = ", ".join(["Py_ssize_t i%d, Py_ssize_t s%d, Py_ssize_t o%d" % (i, i, i) for i in range(nd)]) + proto.putln("static CYTHON_INLINE void* %s_imp(void* buf, %s);" % (name, funcargs)) + defin.putln(dedent(""" + static CYTHON_INLINE void* %s_imp(void* buf, %s) { + char* ptr = (char*)buf; + """) % (name, funcargs) + "".join([dedent("""\ + ptr += s%d * i%d; + if (o%d >= 0) ptr = *((char**)ptr) + o%d; + """) % (i, i, i, i) for i in range(nd)] + ) + "\nreturn ptr;\n}") + + +def buf_lookup_strided_code(proto, defin, name, nd): + """ + Generates a buffer lookup function for the right number + of dimensions. The function gives back a void* at the right location. + """ + # _i_ndex, _s_tride + args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)]) + offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd)]) + proto.putln("#define %s(type, buf, %s) (type)((char*)buf + %s)" % (name, args, offset)) + + +def buf_lookup_c_code(proto, defin, name, nd): + """ + Similar to strided lookup, but can assume that the last dimension + doesn't need a multiplication as long as. + Still we keep the same signature for now. + """ + if nd == 1: + proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name) + else: + args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)]) + offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd - 1)]) + proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, nd - 1)) + + +def buf_lookup_fortran_code(proto, defin, name, nd): + """ + Like C lookup, but the first index is optimized instead. + """ + if nd == 1: + proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name) + else: + args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)]) + offset = " + ".join(["i%d * s%d" % (i, i) for i in range(1, nd)]) + proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, 0)) + + +def use_py2_buffer_functions(env): + env.use_utility_code(GetAndReleaseBufferUtilityCode()) + + +class GetAndReleaseBufferUtilityCode(object): + # Emulation of PyObject_GetBuffer and PyBuffer_Release for Python 2. + # For >= 2.6 we do double mode -- use the new buffer interface on objects + # which has the right tp_flags set, but emulation otherwise. + + requires = None + is_cython_utility = False + + def __init__(self): + pass + + def __eq__(self, other): + return isinstance(other, GetAndReleaseBufferUtilityCode) + + def __hash__(self): + return 24342342 + + def get_tree(self, **kwargs): pass + + def put_code(self, output): + code = output['utility_code_def'] + proto_code = output['utility_code_proto'] + env = output.module_node.scope + cython_scope = env.context.cython_scope + + # Search all types for __getbuffer__ overloads + types = [] + visited_scopes = set() + def find_buffer_types(scope): + if scope in visited_scopes: + return + visited_scopes.add(scope) + for m in scope.cimported_modules: + find_buffer_types(m) + for e in scope.type_entries: + if isinstance(e.utility_code_definition, CythonUtilityCode): + continue + t = e.type + if t.is_extension_type: + if scope is cython_scope and not e.used: + continue + release = get = None + for x in t.scope.pyfunc_entries: + if x.name == u"__getbuffer__": get = x.func_cname + elif x.name == u"__releasebuffer__": release = x.func_cname + if get: + types.append((t.typeptr_cname, get, release)) + + find_buffer_types(env) + + util_code = TempitaUtilityCode.load( + "GetAndReleaseBuffer", from_file="Buffer.c", + context=dict(types=types)) + + proto = util_code.format_code(util_code.proto) + impl = util_code.format_code( + util_code.inject_string_constants(util_code.impl, output)[1]) + + proto_code.putln(proto) + code.putln(impl) + + +def mangle_dtype_name(dtype): + # Use prefixes to separate user defined types from builtins + # (consider "typedef float unsigned_int") + if dtype.is_pyobject: + return "object" + elif dtype.is_ptr: + return "ptr" + else: + if dtype.is_typedef or dtype.is_struct_or_union: + prefix = "nn_" + else: + prefix = "" + return prefix + dtype.specialization_name() + +def get_type_information_cname(code, dtype, maxdepth=None): + """ + Output the run-time type information (__Pyx_TypeInfo) for given dtype, + and return the name of the type info struct. + + Structs with two floats of the same size are encoded as complex numbers. + One can separate between complex numbers declared as struct or with native + encoding by inspecting to see if the fields field of the type is + filled in. + """ + namesuffix = mangle_dtype_name(dtype) + name = "__Pyx_TypeInfo_%s" % namesuffix + structinfo_name = "__Pyx_StructFields_%s" % namesuffix + + if dtype.is_error: return "" + + # It's critical that walking the type info doesn't use more stack + # depth than dtype.struct_nesting_depth() returns, so use an assertion for this + if maxdepth is None: maxdepth = dtype.struct_nesting_depth() + if maxdepth <= 0: + assert False + + if name not in code.globalstate.utility_codes: + code.globalstate.utility_codes.add(name) + typecode = code.globalstate['typeinfo'] + + arraysizes = [] + if dtype.is_array: + while dtype.is_array: + arraysizes.append(dtype.size) + dtype = dtype.base_type + + complex_possible = dtype.is_struct_or_union and dtype.can_be_complex() + + declcode = dtype.empty_declaration_code() + if dtype.is_simple_buffer_dtype(): + structinfo_name = "NULL" + elif dtype.is_struct: + struct_scope = dtype.scope + if dtype.is_cv_qualified: + struct_scope = struct_scope.base_type_scope + # Must pre-call all used types in order not to recurse during utility code writing. + fields = struct_scope.var_entries + assert len(fields) > 0 + types = [get_type_information_cname(code, f.type, maxdepth - 1) + for f in fields] + typecode.putln("static __Pyx_StructField %s[] = {" % structinfo_name, safe=True) + + if dtype.is_cv_qualified: + # roughly speaking, remove "const" from struct_type + struct_type = dtype.cv_base_type.empty_declaration_code() + else: + struct_type = dtype.empty_declaration_code() + + for f, typeinfo in zip(fields, types): + typecode.putln(' {&%s, "%s", offsetof(%s, %s)},' % + (typeinfo, f.name, struct_type, f.cname), safe=True) + + typecode.putln(' {NULL, NULL, 0}', safe=True) + typecode.putln("};", safe=True) + else: + assert False + + rep = str(dtype) + + flags = "0" + is_unsigned = "0" + if dtype is PyrexTypes.c_char_type: + is_unsigned = "__PYX_IS_UNSIGNED(%s)" % declcode + typegroup = "'H'" + elif dtype.is_int: + is_unsigned = "__PYX_IS_UNSIGNED(%s)" % declcode + typegroup = "%s ? 'U' : 'I'" % is_unsigned + elif complex_possible or dtype.is_complex: + typegroup = "'C'" + elif dtype.is_float: + typegroup = "'R'" + elif dtype.is_struct: + typegroup = "'S'" + if dtype.packed: + flags = "__PYX_BUF_FLAGS_PACKED_STRUCT" + elif dtype.is_pyobject: + typegroup = "'O'" + else: + assert False, dtype + + typeinfo = ('static __Pyx_TypeInfo %s = ' + '{ "%s", %s, sizeof(%s), { %s }, %s, %s, %s, %s };') + tup = (name, rep, structinfo_name, declcode, + ', '.join([str(x) for x in arraysizes]) or '0', len(arraysizes), + typegroup, is_unsigned, flags) + typecode.putln(typeinfo % tup, safe=True) + + return name + +def load_buffer_utility(util_code_name, context=None, **kwargs): + if context is None: + return UtilityCode.load(util_code_name, "Buffer.c", **kwargs) + else: + return TempitaUtilityCode.load(util_code_name, "Buffer.c", context=context, **kwargs) + +context = dict(max_dims=Options.buffer_max_dims) +buffer_struct_declare_code = load_buffer_utility("BufferStructDeclare", context=context) +buffer_formats_declare_code = load_buffer_utility("BufferFormatStructs") + +# Utility function to set the right exception +# The caller should immediately goto_error +raise_indexerror_code = load_buffer_utility("BufferIndexError") +raise_indexerror_nogil = load_buffer_utility("BufferIndexErrorNogil") +raise_buffer_fallback_code = load_buffer_utility("BufferFallbackError") + +acquire_utility_code = load_buffer_utility("BufferGetAndValidate", context=context) +buffer_format_check_code = load_buffer_utility("BufferFormatCheck", context=context) + +# See utility code BufferFormatFromTypeInfo +_typeinfo_to_format_code = load_buffer_utility("TypeInfoToFormat") diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Builtin.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Builtin.py new file mode 100644 index 0000000000000000000000000000000000000000..46dea9282bead154aef485374c71cce4dc8cb139 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Builtin.py @@ -0,0 +1,644 @@ +# +# Builtin Definitions +# + +from __future__ import absolute_import + +from .StringEncoding import EncodedString +from .Symtab import BuiltinScope, StructOrUnionScope, ModuleScope, Entry +from .Code import UtilityCode, TempitaUtilityCode +from .TypeSlots import Signature +from . import PyrexTypes + + +# C-level implementations of builtin types, functions and methods + +iter_next_utility_code = UtilityCode.load("IterNext", "ObjectHandling.c") +getattr_utility_code = UtilityCode.load("GetAttr", "ObjectHandling.c") +getattr3_utility_code = UtilityCode.load("GetAttr3", "Builtins.c") +pyexec_utility_code = UtilityCode.load("PyExec", "Builtins.c") +pyexec_globals_utility_code = UtilityCode.load("PyExecGlobals", "Builtins.c") +globals_utility_code = UtilityCode.load("Globals", "Builtins.c") + +builtin_utility_code = { + 'StopAsyncIteration': UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"), +} + + +# mapping from builtins to their C-level equivalents + +class _BuiltinOverride(object): + def __init__(self, py_name, args, ret_type, cname, py_equiv="*", + utility_code=None, sig=None, func_type=None, + is_strict_signature=False, builtin_return_type=None, + nogil=None): + self.py_name, self.cname, self.py_equiv = py_name, cname, py_equiv + self.args, self.ret_type = args, ret_type + self.func_type, self.sig = func_type, sig + self.builtin_return_type = builtin_return_type + self.is_strict_signature = is_strict_signature + self.utility_code = utility_code + self.nogil = nogil + + def build_func_type(self, sig=None, self_arg=None): + if sig is None: + sig = Signature(self.args, self.ret_type, nogil=self.nogil) + sig.exception_check = False # not needed for the current builtins + func_type = sig.function_type(self_arg) + if self.is_strict_signature: + func_type.is_strict_signature = True + if self.builtin_return_type: + func_type.return_type = builtin_types[self.builtin_return_type] + return func_type + + +class BuiltinAttribute(object): + def __init__(self, py_name, cname=None, field_type=None, field_type_name=None): + self.py_name = py_name + self.cname = cname or py_name + self.field_type_name = field_type_name # can't do the lookup before the type is declared! + self.field_type = field_type + + def declare_in_type(self, self_type): + if self.field_type_name is not None: + # lazy type lookup + field_type = builtin_scope.lookup(self.field_type_name).type + else: + field_type = self.field_type or PyrexTypes.py_object_type + entry = self_type.scope.declare(self.py_name, self.cname, field_type, None, 'private') + entry.is_variable = True + + +class BuiltinFunction(_BuiltinOverride): + def declare_in_scope(self, scope): + func_type, sig = self.func_type, self.sig + if func_type is None: + func_type = self.build_func_type(sig) + scope.declare_builtin_cfunction(self.py_name, func_type, self.cname, + self.py_equiv, self.utility_code) + + +class BuiltinMethod(_BuiltinOverride): + def declare_in_type(self, self_type): + method_type, sig = self.func_type, self.sig + if method_type is None: + # override 'self' type (first argument) + self_arg = PyrexTypes.CFuncTypeArg("", self_type, None) + self_arg.not_none = True + self_arg.accept_builtin_subtypes = True + method_type = self.build_func_type(sig, self_arg) + self_type.scope.declare_builtin_cfunction( + self.py_name, method_type, self.cname, utility_code=self.utility_code) + + +class BuiltinProperty(object): + # read only for now + def __init__(self, py_name, property_type, call_cname, + exception_value=None, exception_check=None, utility_code=None): + self.py_name = py_name + self.property_type = property_type + self.call_cname = call_cname + self.utility_code = utility_code + self.exception_value = exception_value + self.exception_check = exception_check + + def declare_in_type(self, self_type): + self_type.scope.declare_cproperty( + self.py_name, + self.property_type, + self.call_cname, + exception_value=self.exception_value, + exception_check=self.exception_check, + utility_code=self.utility_code + ) + + +builtin_function_table = [ + # name, args, return, C API func, py equiv = "*" + BuiltinFunction('abs', "d", "d", "fabs", + is_strict_signature=True, nogil=True), + BuiltinFunction('abs', "f", "f", "fabsf", + is_strict_signature=True, nogil=True), + BuiltinFunction('abs', "i", "i", "abs", + is_strict_signature=True, nogil=True), + BuiltinFunction('abs', "l", "l", "labs", + is_strict_signature=True, nogil=True), + BuiltinFunction('abs', None, None, "__Pyx_abs_longlong", + utility_code = UtilityCode.load("abs_longlong", "Builtins.c"), + func_type = PyrexTypes.CFuncType( + PyrexTypes.c_longlong_type, [ + PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_longlong_type, None) + ], + is_strict_signature = True, nogil=True)), + ] + list( + BuiltinFunction('abs', None, None, "/*abs_{0}*/".format(t.specialization_name()), + func_type = PyrexTypes.CFuncType( + t, + [PyrexTypes.CFuncTypeArg("arg", t, None)], + is_strict_signature = True, nogil=True)) + for t in (PyrexTypes.c_uint_type, PyrexTypes.c_ulong_type, PyrexTypes.c_ulonglong_type) + ) + list( + BuiltinFunction('abs', None, None, "__Pyx_c_abs{0}".format(t.funcsuffix), + func_type = PyrexTypes.CFuncType( + t.real_type, [ + PyrexTypes.CFuncTypeArg("arg", t, None) + ], + is_strict_signature = True, nogil=True)) + for t in (PyrexTypes.c_float_complex_type, + PyrexTypes.c_double_complex_type, + PyrexTypes.c_longdouble_complex_type) + ) + [ + BuiltinFunction('abs', "O", "O", "__Pyx_PyNumber_Absolute", + utility_code=UtilityCode.load("py_abs", "Builtins.c")), + #('all', "", "", ""), + #('any', "", "", ""), + #('ascii', "", "", ""), + #('bin', "", "", ""), + BuiltinFunction('callable', "O", "b", "__Pyx_PyCallable_Check", + utility_code = UtilityCode.load("CallableCheck", "ObjectHandling.c")), + #('chr', "", "", ""), + #('cmp', "", "", "", ""), # int PyObject_Cmp(PyObject *o1, PyObject *o2, int *result) + #('compile', "", "", ""), # PyObject* Py_CompileString( char *str, char *filename, int start) + BuiltinFunction('delattr', "OO", "r", "PyObject_DelAttr"), + BuiltinFunction('dir', "O", "O", "PyObject_Dir"), + BuiltinFunction('divmod', "OO", "O", "PyNumber_Divmod"), + BuiltinFunction('exec', "O", "O", "__Pyx_PyExecGlobals", + utility_code = pyexec_globals_utility_code), + BuiltinFunction('exec', "OO", "O", "__Pyx_PyExec2", + utility_code = pyexec_utility_code), + BuiltinFunction('exec', "OOO", "O", "__Pyx_PyExec3", + utility_code = pyexec_utility_code), + #('eval', "", "", ""), + #('execfile', "", "", ""), + #('filter', "", "", ""), + BuiltinFunction('getattr3', "OOO", "O", "__Pyx_GetAttr3", "getattr", + utility_code=getattr3_utility_code), # Pyrex legacy + BuiltinFunction('getattr', "OOO", "O", "__Pyx_GetAttr3", + utility_code=getattr3_utility_code), + BuiltinFunction('getattr', "OO", "O", "__Pyx_GetAttr", + utility_code=getattr_utility_code), + BuiltinFunction('hasattr', "OO", "b", "__Pyx_HasAttr", + utility_code = UtilityCode.load("HasAttr", "Builtins.c")), + BuiltinFunction('hash', "O", "h", "PyObject_Hash"), + #('hex', "", "", ""), + #('id', "", "", ""), + #('input', "", "", ""), + BuiltinFunction('intern', "O", "O", "__Pyx_Intern", + utility_code = UtilityCode.load("Intern", "Builtins.c")), + BuiltinFunction('isinstance', "OO", "b", "PyObject_IsInstance"), + BuiltinFunction('issubclass', "OO", "b", "PyObject_IsSubclass"), + BuiltinFunction('iter', "OO", "O", "PyCallIter_New"), + BuiltinFunction('iter', "O", "O", "PyObject_GetIter"), + BuiltinFunction('len', "O", "z", "PyObject_Length"), + BuiltinFunction('locals', "", "O", "__pyx_locals"), + #('map', "", "", ""), + #('max', "", "", ""), + #('min', "", "", ""), + BuiltinFunction('next', "O", "O", "__Pyx_PyIter_Next", + utility_code = iter_next_utility_code), # not available in Py2 => implemented here + BuiltinFunction('next', "OO", "O", "__Pyx_PyIter_Next2", + utility_code = iter_next_utility_code), # not available in Py2 => implemented here + #('oct', "", "", ""), + #('open', "ss", "O", "PyFile_FromString"), # not in Py3 +] + [ + BuiltinFunction('ord', None, None, "__Pyx_long_cast", + func_type=PyrexTypes.CFuncType( + PyrexTypes.c_long_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)], + is_strict_signature=True)) + for c_type in [PyrexTypes.c_py_ucs4_type, PyrexTypes.c_py_unicode_type] +] + [ + BuiltinFunction('ord', None, None, "__Pyx_uchar_cast", + func_type=PyrexTypes.CFuncType( + PyrexTypes.c_uchar_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)], + is_strict_signature=True)) + for c_type in [PyrexTypes.c_char_type, PyrexTypes.c_schar_type, PyrexTypes.c_uchar_type] +] + [ + BuiltinFunction('ord', None, None, "__Pyx_PyObject_Ord", + utility_code=UtilityCode.load_cached("object_ord", "Builtins.c"), + func_type=PyrexTypes.CFuncType( + PyrexTypes.c_long_type, [ + PyrexTypes.CFuncTypeArg("c", PyrexTypes.py_object_type, None) + ], + exception_value="(long)(Py_UCS4)-1")), + BuiltinFunction('pow', "OOO", "O", "PyNumber_Power"), + BuiltinFunction('pow', "OO", "O", "__Pyx_PyNumber_Power2", + utility_code = UtilityCode.load("pow2", "Builtins.c")), + #('range', "", "", ""), + #('raw_input', "", "", ""), + #('reduce', "", "", ""), + BuiltinFunction('reload', "O", "O", "PyImport_ReloadModule"), + BuiltinFunction('repr', "O", "O", "PyObject_Repr"), # , builtin_return_type='str'), # add in Cython 3.1 + #('round', "", "", ""), + BuiltinFunction('setattr', "OOO", "r", "PyObject_SetAttr"), + #('sum', "", "", ""), + #('sorted', "", "", ""), + #('type', "O", "O", "PyObject_Type"), + BuiltinFunction('unichr', "i", "O", "PyUnicode_FromOrdinal", builtin_return_type='unicode'), + #('unicode', "", "", ""), + #('vars', "", "", ""), + #('zip', "", "", ""), + # Can't do these easily until we have builtin type entries. + #('typecheck', "OO", "i", "PyObject_TypeCheck", False), + #('issubtype', "OO", "i", "PyType_IsSubtype", False), + + # Put in namespace append optimization. + BuiltinFunction('__Pyx_PyObject_Append', "OO", "O", "__Pyx_PyObject_Append"), + + # This is conditionally looked up based on a compiler directive. + BuiltinFunction('__Pyx_Globals', "", "O", "__Pyx_Globals", + utility_code=globals_utility_code), +] + + +# Builtin types +# bool +# buffer +# classmethod +# dict +# enumerate +# file +# float +# int +# list +# long +# object +# property +# slice +# staticmethod +# super +# str +# tuple +# type +# xrange + +builtin_types_table = [ + + ("type", "PyType_Type", []), + +# This conflicts with the C++ bool type, and unfortunately +# C++ is too liberal about PyObject* <-> bool conversions, +# resulting in unintuitive runtime behavior and segfaults. +# ("bool", "PyBool_Type", []), + + ("int", "PyInt_Type", []), + ("long", "PyLong_Type", []), + ("float", "PyFloat_Type", []), + + ("complex", "PyComplex_Type", [BuiltinAttribute('cval', field_type_name = 'Py_complex'), + BuiltinAttribute('real', 'cval.real', field_type = PyrexTypes.c_double_type), + BuiltinAttribute('imag', 'cval.imag', field_type = PyrexTypes.c_double_type), + ]), + + ("basestring", "PyBaseString_Type", [ + BuiltinMethod("join", "TO", "T", "__Pyx_PyBaseString_Join", + utility_code=UtilityCode.load("StringJoin", "StringTools.c")), + BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + ("bytearray", "PyByteArray_Type", [ + BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + ("bytes", "PyBytes_Type", [BuiltinMethod("join", "TO", "O", "__Pyx_PyBytes_Join", + utility_code=UtilityCode.load("StringJoin", "StringTools.c")), + BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + ("str", "PyString_Type", [BuiltinMethod("join", "TO", "O", "__Pyx_PyString_Join", + builtin_return_type='basestring', + utility_code=UtilityCode.load("StringJoin", "StringTools.c")), + BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + ("unicode", "PyUnicode_Type", [BuiltinMethod("__contains__", "TO", "b", "PyUnicode_Contains"), + BuiltinMethod("join", "TO", "T", "PyUnicode_Join"), + BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + + ("tuple", "PyTuple_Type", [BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + + ("list", "PyList_Type", [BuiltinMethod("insert", "TzO", "r", "PyList_Insert"), + BuiltinMethod("reverse", "T", "r", "PyList_Reverse"), + BuiltinMethod("append", "TO", "r", "__Pyx_PyList_Append", + utility_code=UtilityCode.load("ListAppend", "Optimize.c")), + BuiltinMethod("extend", "TO", "r", "__Pyx_PyList_Extend", + utility_code=UtilityCode.load("ListExtend", "Optimize.c")), + BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply", + utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")), + ]), + + ("dict", "PyDict_Type", [BuiltinMethod("__contains__", "TO", "b", "PyDict_Contains"), + BuiltinMethod("has_key", "TO", "b", "PyDict_Contains"), + BuiltinMethod("items", "T", "O", "__Pyx_PyDict_Items", + utility_code=UtilityCode.load("py_dict_items", "Builtins.c")), + BuiltinMethod("keys", "T", "O", "__Pyx_PyDict_Keys", + utility_code=UtilityCode.load("py_dict_keys", "Builtins.c")), + BuiltinMethod("values", "T", "O", "__Pyx_PyDict_Values", + utility_code=UtilityCode.load("py_dict_values", "Builtins.c")), + BuiltinMethod("iteritems", "T", "O", "__Pyx_PyDict_IterItems", + utility_code=UtilityCode.load("py_dict_iteritems", "Builtins.c")), + BuiltinMethod("iterkeys", "T", "O", "__Pyx_PyDict_IterKeys", + utility_code=UtilityCode.load("py_dict_iterkeys", "Builtins.c")), + BuiltinMethod("itervalues", "T", "O", "__Pyx_PyDict_IterValues", + utility_code=UtilityCode.load("py_dict_itervalues", "Builtins.c")), + BuiltinMethod("viewitems", "T", "O", "__Pyx_PyDict_ViewItems", + utility_code=UtilityCode.load("py_dict_viewitems", "Builtins.c")), + BuiltinMethod("viewkeys", "T", "O", "__Pyx_PyDict_ViewKeys", + utility_code=UtilityCode.load("py_dict_viewkeys", "Builtins.c")), + BuiltinMethod("viewvalues", "T", "O", "__Pyx_PyDict_ViewValues", + utility_code=UtilityCode.load("py_dict_viewvalues", "Builtins.c")), + BuiltinMethod("clear", "T", "r", "__Pyx_PyDict_Clear", + utility_code=UtilityCode.load("py_dict_clear", "Optimize.c")), + BuiltinMethod("copy", "T", "T", "PyDict_Copy")]), + + ("slice", "PySlice_Type", [BuiltinAttribute('start'), + BuiltinAttribute('stop'), + BuiltinAttribute('step'), + ]), +# ("file", "PyFile_Type", []), # not in Py3 + + ("set", "PySet_Type", [BuiltinMethod("clear", "T", "r", "PySet_Clear"), + # discard() and remove() have a special treatment for unhashable values + BuiltinMethod("discard", "TO", "r", "__Pyx_PySet_Discard", + utility_code=UtilityCode.load("py_set_discard", "Optimize.c")), + BuiltinMethod("remove", "TO", "r", "__Pyx_PySet_Remove", + utility_code=UtilityCode.load("py_set_remove", "Optimize.c")), + # update is actually variadic (see Github issue #1645) +# BuiltinMethod("update", "TO", "r", "__Pyx_PySet_Update", +# utility_code=UtilityCode.load_cached("PySet_Update", "Builtins.c")), + BuiltinMethod("add", "TO", "r", "PySet_Add"), + BuiltinMethod("pop", "T", "O", "PySet_Pop")]), + ("frozenset", "PyFrozenSet_Type", []), + ("Exception", "((PyTypeObject*)PyExc_Exception)[0]", []), + ("StopAsyncIteration", "((PyTypeObject*)__Pyx_PyExc_StopAsyncIteration)[0]", []), + ("memoryview", "PyMemoryView_Type", [ + # TODO - format would be nice, but hard to get + # __len__ can be accessed through a direct lookup of the buffer (but probably in Optimize.c) + # error checking would ideally be limited api only + BuiltinProperty("ndim", PyrexTypes.c_int_type, '__Pyx_PyMemoryView_Get_ndim', + exception_value="-1", exception_check=True, + utility_code=TempitaUtilityCode.load_cached( + "memoryview_get_from_buffer", "Builtins.c", + context=dict(name="ndim") + ) + ), + BuiltinProperty("readonly", PyrexTypes.c_bint_type, '__Pyx_PyMemoryView_Get_readonly', + exception_value="-1", exception_check=True, + utility_code=TempitaUtilityCode.load_cached( + "memoryview_get_from_buffer", "Builtins.c", + context=dict(name="readonly") + ) + ), + BuiltinProperty("itemsize", PyrexTypes.c_py_ssize_t_type, '__Pyx_PyMemoryView_Get_itemsize', + exception_value="-1", exception_check=True, + utility_code=TempitaUtilityCode.load_cached( + "memoryview_get_from_buffer", "Builtins.c", + context=dict(name="itemsize") + ) + )] + ) +] + + +types_that_construct_their_instance = frozenset({ + # some builtin types do not always return an instance of + # themselves - these do: + 'type', 'bool', 'long', 'float', 'complex', + 'bytes', 'unicode', 'bytearray', + 'tuple', 'list', 'dict', 'set', 'frozenset', + # 'str', # only in Py3.x + # 'file', # only in Py2.x + 'memoryview' +}) + + +builtin_structs_table = [ + ('Py_buffer', 'Py_buffer', + [("buf", PyrexTypes.c_void_ptr_type), + ("obj", PyrexTypes.py_object_type), + ("len", PyrexTypes.c_py_ssize_t_type), + ("itemsize", PyrexTypes.c_py_ssize_t_type), + ("readonly", PyrexTypes.c_bint_type), + ("ndim", PyrexTypes.c_int_type), + ("format", PyrexTypes.c_char_ptr_type), + ("shape", PyrexTypes.c_py_ssize_t_ptr_type), + ("strides", PyrexTypes.c_py_ssize_t_ptr_type), + ("suboffsets", PyrexTypes.c_py_ssize_t_ptr_type), + ("smalltable", PyrexTypes.CArrayType(PyrexTypes.c_py_ssize_t_type, 2)), + ("internal", PyrexTypes.c_void_ptr_type), + ]), + ('Py_complex', 'Py_complex', + [('real', PyrexTypes.c_double_type), + ('imag', PyrexTypes.c_double_type), + ]) +] + +# set up builtin scope + +builtin_scope = BuiltinScope() + +def init_builtin_funcs(): + for bf in builtin_function_table: + bf.declare_in_scope(builtin_scope) + +builtin_types = {} + +def init_builtin_types(): + global builtin_types + for name, cname, methods in builtin_types_table: + utility = builtin_utility_code.get(name) + if name == 'frozenset': + objstruct_cname = 'PySetObject' + elif name == 'bytearray': + objstruct_cname = 'PyByteArrayObject' + elif name == 'bool': + objstruct_cname = None + elif name == 'Exception': + objstruct_cname = "PyBaseExceptionObject" + elif name == 'StopAsyncIteration': + objstruct_cname = "PyBaseExceptionObject" + else: + objstruct_cname = 'Py%sObject' % name.capitalize() + type_class = PyrexTypes.BuiltinObjectType + if name in ['dict', 'list', 'set', 'frozenset']: + type_class = PyrexTypes.BuiltinTypeConstructorObjectType + elif name == 'tuple': + type_class = PyrexTypes.PythonTupleTypeConstructor + the_type = builtin_scope.declare_builtin_type(name, cname, utility, objstruct_cname, + type_class=type_class) + builtin_types[name] = the_type + for method in methods: + method.declare_in_type(the_type) + +def init_builtin_structs(): + for name, cname, attribute_types in builtin_structs_table: + scope = StructOrUnionScope(name) + for attribute_name, attribute_type in attribute_types: + scope.declare_var(attribute_name, attribute_type, None, + attribute_name, allow_pyobject=True) + builtin_scope.declare_struct_or_union( + name, "struct", scope, 1, None, cname = cname) + + +def init_builtins(): + #Errors.init_thread() # hopefully not needed - we should not emit warnings ourselves + init_builtin_structs() + init_builtin_types() + init_builtin_funcs() + + entry = builtin_scope.declare_var( + '__debug__', PyrexTypes.c_const_type(PyrexTypes.c_bint_type), + pos=None, cname='__pyx_assertions_enabled()', is_cdef=True) + entry.utility_code = UtilityCode.load_cached("AssertionsEnabled", "Exceptions.c") + + global type_type, list_type, tuple_type, dict_type, set_type, frozenset_type, slice_type + global bytes_type, str_type, unicode_type, basestring_type, bytearray_type + global float_type, int_type, long_type, bool_type, complex_type + global memoryview_type, py_buffer_type + global sequence_types + type_type = builtin_scope.lookup('type').type + list_type = builtin_scope.lookup('list').type + tuple_type = builtin_scope.lookup('tuple').type + dict_type = builtin_scope.lookup('dict').type + set_type = builtin_scope.lookup('set').type + frozenset_type = builtin_scope.lookup('frozenset').type + slice_type = builtin_scope.lookup('slice').type + + bytes_type = builtin_scope.lookup('bytes').type + str_type = builtin_scope.lookup('str').type + unicode_type = builtin_scope.lookup('unicode').type + basestring_type = builtin_scope.lookup('basestring').type + bytearray_type = builtin_scope.lookup('bytearray').type + memoryview_type = builtin_scope.lookup('memoryview').type + + float_type = builtin_scope.lookup('float').type + int_type = builtin_scope.lookup('int').type + long_type = builtin_scope.lookup('long').type + bool_type = builtin_scope.lookup('bool').type + complex_type = builtin_scope.lookup('complex').type + + sequence_types = ( + list_type, + tuple_type, + bytes_type, + str_type, + unicode_type, + basestring_type, + bytearray_type, + memoryview_type, + ) + + # Set up type inference links between equivalent Python/C types + bool_type.equivalent_type = PyrexTypes.c_bint_type + PyrexTypes.c_bint_type.equivalent_type = bool_type + + float_type.equivalent_type = PyrexTypes.c_double_type + PyrexTypes.c_double_type.equivalent_type = float_type + + complex_type.equivalent_type = PyrexTypes.c_double_complex_type + PyrexTypes.c_double_complex_type.equivalent_type = complex_type + + py_buffer_type = builtin_scope.lookup('Py_buffer').type + + +init_builtins() + +############################## +# Support for a few standard library modules that Cython understands (currently typing and dataclasses) +############################## +_known_module_scopes = {} + +def get_known_standard_library_module_scope(module_name): + mod = _known_module_scopes.get(module_name) + if mod: + return mod + + if module_name == "typing": + mod = ModuleScope(module_name, None, None) + for name, tp in [ + ('Dict', dict_type), + ('List', list_type), + ('Tuple', tuple_type), + ('Set', set_type), + ('FrozenSet', frozenset_type), + ]: + name = EncodedString(name) + entry = mod.declare_type(name, tp, pos = None) + var_entry = Entry(name, None, PyrexTypes.py_object_type) + var_entry.is_pyglobal = True + var_entry.is_variable = True + var_entry.scope = mod + entry.as_variable = var_entry + entry.known_standard_library_import = "%s.%s" % (module_name, name) + + for name in ['ClassVar', 'Optional']: + name = EncodedString(name) + indexed_type = PyrexTypes.SpecialPythonTypeConstructor(EncodedString("typing."+name)) + entry = mod.declare_type(name, indexed_type, pos = None) + var_entry = Entry(name, None, PyrexTypes.py_object_type) + var_entry.is_pyglobal = True + var_entry.is_variable = True + var_entry.scope = mod + entry.as_variable = var_entry + entry.known_standard_library_import = "%s.%s" % (module_name, name) + _known_module_scopes[module_name] = mod + elif module_name == "dataclasses": + mod = ModuleScope(module_name, None, None) + indexed_type = PyrexTypes.SpecialPythonTypeConstructor(EncodedString("dataclasses.InitVar")) + initvar_string = EncodedString("InitVar") + entry = mod.declare_type(initvar_string, indexed_type, pos = None) + var_entry = Entry(initvar_string, None, PyrexTypes.py_object_type) + var_entry.is_pyglobal = True + var_entry.scope = mod + entry.as_variable = var_entry + entry.known_standard_library_import = "%s.InitVar" % module_name + for name in ["dataclass", "field"]: + mod.declare_var(EncodedString(name), PyrexTypes.py_object_type, pos=None) + _known_module_scopes[module_name] = mod + elif module_name == "functools": + mod = ModuleScope(module_name, None, None) + for name in ["total_ordering"]: + mod.declare_var(EncodedString(name), PyrexTypes.py_object_type, pos=None) + _known_module_scopes[module_name] = mod + + return mod + + +def get_known_standard_library_entry(qualified_name): + name_parts = qualified_name.split(".") + module_name = EncodedString(name_parts[0]) + rest = name_parts[1:] + + if len(rest) > 1: # for now, we don't know how to deal with any nested modules + return None + + mod = get_known_standard_library_module_scope(module_name) + + # eventually handle more sophisticated multiple lookups if needed + if mod and rest: + return mod.lookup_here(rest[0]) + return None + + +def exprnode_to_known_standard_library_name(node, env): + qualified_name_parts = [] + known_name = None + while node.is_attribute: + qualified_name_parts.append(node.attribute) + node = node.obj + if node.is_name: + entry = env.lookup(node.name) + if entry and entry.known_standard_library_import: + if get_known_standard_library_entry( + entry.known_standard_library_import): + known_name = entry.known_standard_library_import + else: + standard_env = get_known_standard_library_module_scope( + entry.known_standard_library_import) + if standard_env: + qualified_name_parts.append(standard_env.name) + known_name = ".".join(reversed(qualified_name_parts)) + return known_name diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CmdLine.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CmdLine.py new file mode 100644 index 0000000000000000000000000000000000000000..776636c3234fb04cbd2d3a50ae092de9e2c900ec --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CmdLine.py @@ -0,0 +1,251 @@ +# +# Cython - Command Line Parsing +# + +from __future__ import absolute_import + +import sys +import os +from argparse import ArgumentParser, Action, SUPPRESS +from . import Options + + +if sys.version_info < (3, 3): + # TODO: This workaround can be removed in Cython 3.1 + FileNotFoundError = IOError + + +class ParseDirectivesAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + old_directives = dict(getattr(namespace, self.dest, + Options.get_directive_defaults())) + directives = Options.parse_directive_list( + values, relaxed_bool=True, current_settings=old_directives) + setattr(namespace, self.dest, directives) + + +class ParseOptionsAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + options = dict(getattr(namespace, self.dest, {})) + for opt in values.split(','): + if '=' in opt: + n, v = opt.split('=', 1) + v = v.lower() not in ('false', 'f', '0', 'no') + else: + n, v = opt, True + options[n] = v + setattr(namespace, self.dest, options) + + +class ParseCompileTimeEnvAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + old_env = dict(getattr(namespace, self.dest, {})) + new_env = Options.parse_compile_time_env(values, current_settings=old_env) + setattr(namespace, self.dest, new_env) + + +class ActivateAllWarningsAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + directives = getattr(namespace, 'compiler_directives', {}) + directives.update(Options.extra_warnings) + namespace.compiler_directives = directives + + +class SetLenientAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + namespace.error_on_unknown_names = False + namespace.error_on_uninitialized = False + + +class SetGDBDebugAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + namespace.gdb_debug = True + namespace.output_dir = os.curdir + + +class SetGDBDebugOutputAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + namespace.gdb_debug = True + namespace.output_dir = values + + +class SetAnnotateCoverageAction(Action): + def __call__(self, parser, namespace, values, option_string=None): + namespace.annotate = True + namespace.annotate_coverage_xml = values + + +def create_cython_argparser(): + description = "Cython (https://cython.org/) is a compiler for code written in the "\ + "Cython language. Cython is based on Pyrex by Greg Ewing." + + parser = ArgumentParser(description=description, argument_default=SUPPRESS) + + parser.add_argument("-V", "--version", dest='show_version', action='store_const', const=1, + help='Display version number of cython compiler') + parser.add_argument("-l", "--create-listing", dest='use_listing_file', action='store_const', const=1, + help='Write error messages to a listing file') + parser.add_argument("-I", "--include-dir", dest='include_path', action='append', + help='Search for include files in named directory ' + '(multiple include directories are allowed).') + parser.add_argument("-o", "--output-file", dest='output_file', action='store', type=str, + help='Specify name of generated C file') + parser.add_argument("-t", "--timestamps", dest='timestamps', action='store_const', const=1, + help='Only compile newer source files') + parser.add_argument("-f", "--force", dest='timestamps', action='store_const', const=0, + help='Compile all source files (overrides implied -t)') + parser.add_argument("-v", "--verbose", dest='verbose', action='count', + help='Be verbose, print file names on multiple compilation') + parser.add_argument("-p", "--embed-positions", dest='embed_pos_in_docstring', action='store_const', const=1, + help='If specified, the positions in Cython files of each ' + 'function definition is embedded in its docstring.') + parser.add_argument("--cleanup", dest='generate_cleanup_code', action='store', type=int, + help='Release interned objects on python exit, for memory debugging. ' + 'Level indicates aggressiveness, default 0 releases nothing.') + parser.add_argument("-w", "--working", dest='working_path', action='store', type=str, + help='Sets the working directory for Cython (the directory modules are searched from)') + parser.add_argument("--gdb", action=SetGDBDebugAction, nargs=0, + help='Output debug information for cygdb') + parser.add_argument("--gdb-outdir", action=SetGDBDebugOutputAction, type=str, + help='Specify gdb debug information output directory. Implies --gdb.') + parser.add_argument("-D", "--no-docstrings", dest='docstrings', action='store_false', + help='Strip docstrings from the compiled module.') + parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate', + help='Produce a colorized HTML version of the source.') + parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate', + help='Produce a colorized HTML version of the source ' + 'which includes entire generated C/C++-code.') + parser.add_argument("--annotate-coverage", dest='annotate_coverage_xml', action=SetAnnotateCoverageAction, type=str, + help='Annotate and include coverage information from cov.xml.') + parser.add_argument("--line-directives", dest='emit_linenums', action='store_true', + help='Produce #line directives pointing to the .pyx source') + parser.add_argument("-+", "--cplus", dest='cplus', action='store_const', const=1, + help='Output a C++ rather than C file.') + parser.add_argument('--embed', action='store_const', const='main', + help='Generate a main() function that embeds the Python interpreter. ' + 'Pass --embed= for a name other than main().') + parser.add_argument('-2', dest='language_level', action='store_const', const=2, + help='Compile based on Python-2 syntax and code semantics.') + parser.add_argument('-3', dest='language_level', action='store_const', const=3, + help='Compile based on Python-3 syntax and code semantics.') + parser.add_argument('--3str', dest='language_level', action='store_const', const='3str', + help='Compile based on Python-3 syntax and code semantics without ' + 'assuming unicode by default for string literals under Python 2.') + parser.add_argument("--lenient", action=SetLenientAction, nargs=0, + help='Change some compile time errors to runtime errors to ' + 'improve Python compatibility') + parser.add_argument("--capi-reexport-cincludes", dest='capi_reexport_cincludes', action='store_true', + help='Add cincluded headers to any auto-generated header files.') + parser.add_argument("--fast-fail", dest='fast_fail', action='store_true', + help='Abort the compilation on the first error') + parser.add_argument("-Werror", "--warning-errors", dest='warning_errors', action='store_true', + help='Make all warnings into errors') + parser.add_argument("-Wextra", "--warning-extra", action=ActivateAllWarningsAction, nargs=0, + help='Enable extra warnings') + + parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...', + dest='compiler_directives', type=str, + action=ParseDirectivesAction, + help='Overrides a compiler directive') + parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...', + dest='compile_time_env', type=str, + action=ParseCompileTimeEnvAction, + help='Provides compile time env like DEF would do.') + parser.add_argument("--module-name", + dest='module_name', type=str, action='store', + help='Fully qualified module name. If not given, is ' + 'deduced from the import path if source file is in ' + 'a package, or equals the filename otherwise.') + parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources') + parser.add_argument('sources', nargs='*', default=[]) + + # TODO: add help + parser.add_argument("-z", "--pre-import", dest='pre_import', action='store', type=str, help=SUPPRESS) + parser.add_argument("--convert-range", dest='convert_range', action='store_true', help=SUPPRESS) + parser.add_argument("--no-c-in-traceback", dest='c_line_in_traceback', action='store_false', help=SUPPRESS) + parser.add_argument("--cimport-from-pyx", dest='cimport_from_pyx', action='store_true', help=SUPPRESS) + parser.add_argument("--old-style-globals", dest='old_style_globals', action='store_true', help=SUPPRESS) + + # debug stuff: + from . import DebugFlags + for name in vars(DebugFlags): + if name.startswith("debug"): + option_name = name.replace('_', '-') + parser.add_argument("--" + option_name, action='store_true', help=SUPPRESS) + + return parser + + +def parse_command_line_raw(parser, args): + # special handling for --embed and --embed=xxxx as they aren't correctly parsed + def filter_out_embed_options(args): + with_embed, without_embed = [], [] + for x in args: + if x == '--embed' or x.startswith('--embed='): + with_embed.append(x) + else: + without_embed.append(x) + return with_embed, without_embed + + with_embed, args_without_embed = filter_out_embed_options(args) + + arguments, unknown = parser.parse_known_args(args_without_embed) + + sources = arguments.sources + del arguments.sources + + # unknown can be either debug, embed or input files or really unknown + for option in unknown: + if option.startswith('-'): + parser.error("unknown option " + option) + else: + sources.append(option) + + # embed-stuff must be handled extra: + for x in with_embed: + if x == '--embed': + name = 'main' # default value + else: + name = x[len('--embed='):] + setattr(arguments, 'embed', name) + + return arguments, sources + + +def parse_command_line(args): + parser = create_cython_argparser() + arguments, sources = parse_command_line_raw(parser, args) + + work_dir = getattr(arguments, 'working_path', '') + for source in sources: + if work_dir and not os.path.isabs(source): + source = os.path.join(work_dir, source) + if not os.path.exists(source): + import errno + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), source) + + options = Options.CompilationOptions(Options.default_options) + for name, value in vars(arguments).items(): + if name.startswith('debug'): + from . import DebugFlags + if name in dir(DebugFlags): + setattr(DebugFlags, name, value) + else: + parser.error("Unknown debug flag: %s\n" % name) + elif hasattr(Options, name): + setattr(Options, name, value) + else: + setattr(options, name, value) + + if options.use_listing_file and len(sources) > 1: + parser.error("cython: Only one source file allowed when using -o\n") + if len(sources) == 0 and not options.show_version: + parser.error("cython: Need at least one source file\n") + if Options.embed and len(sources) > 1: + parser.error("cython: Only one source file allowed when using --embed\n") + if options.module_name: + if options.timestamps: + parser.error("cython: Cannot use --module-name with --timestamps\n") + if len(sources) > 1: + parser.error("cython: Only one source file allowed when using --module-name\n") + return options, sources diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..2e86cbf140c7935e9e8d7e6560328edb1c271b8e Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.cp39-win_amd64.pyd differ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.pxd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e915c6fea3534b4fed1e4df99093f4c3a4721082 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.pxd @@ -0,0 +1,131 @@ +# cython: language_level=3 + +cimport cython +from ..StringIOTree cimport StringIOTree + + +cdef class UtilityCodeBase(object): + cpdef format_code(self, code_string, replace_empty_lines=*) + + +cdef class UtilityCode(UtilityCodeBase): + cdef public object name + cdef public object proto + cdef public object impl + cdef public object init + cdef public object cleanup + cdef public object proto_block + cdef public object requires + cdef public dict _cache + cdef public list specialize_list + cdef public object file + + cpdef none_or_sub(self, s, context) + + +cdef class FunctionState: + cdef public set names_taken + cdef public object owner + cdef public object scope + + cdef public object error_label + cdef public size_t label_counter + cdef public set labels_used + cdef public object return_label + cdef public object continue_label + cdef public object break_label + cdef public list yield_labels + + cdef public object return_from_error_cleanup_label # not used in __init__ ? + + cdef public object exc_vars + cdef public object current_except + cdef public bint in_try_finally + cdef public bint can_trace + cdef public bint gil_owned + + cdef public list temps_allocated + cdef public dict temps_free + cdef public dict temps_used_type + cdef public set zombie_temps + cdef public size_t temp_counter + cdef public list collect_temps_stack + + cdef public object closure_temps + cdef public bint should_declare_error_indicator + cdef public bint uses_error_indicator + cdef public bint error_without_exception + + cdef public bint needs_refnanny + + @cython.locals(n=size_t) + cpdef new_label(self, name=*) + cpdef tuple get_loop_labels(self) + cpdef set_loop_labels(self, labels) + cpdef tuple get_all_labels(self) + cpdef set_all_labels(self, labels) + cpdef start_collecting_temps(self) + cpdef stop_collecting_temps(self) + + cpdef list temps_in_use(self) + +cdef class IntConst: + cdef public object cname + cdef public object value + cdef public bint is_long + +cdef class PyObjectConst: + cdef public object cname + cdef public object type + +cdef class StringConst: + cdef public object cname + cdef public object text + cdef public object escaped_value + cdef public dict py_strings + cdef public list py_versions + + @cython.locals(intern=bint, is_str=bint, is_unicode=bint) + cpdef get_py_string_const(self, encoding, identifier=*, is_str=*, py3str_cstring=*) + +## cdef class PyStringConst: +## cdef public object cname +## cdef public object encoding +## cdef public bint is_str +## cdef public bint is_unicode +## cdef public bint intern + +#class GlobalState(object): + +#def funccontext_property(name): + +cdef class CCodeWriter(object): + cdef readonly StringIOTree buffer + cdef readonly list pyclass_stack + cdef readonly object globalstate + cdef readonly object funcstate + cdef object code_config + cdef object last_pos + cdef object last_marked_pos + cdef Py_ssize_t level + cdef public Py_ssize_t call_level # debug-only, see Nodes.py + cdef bint bol + + cpdef write(self, s) + @cython.final + cdef _write_lines(self, s) + cpdef _write_to_buffer(self, s) + cpdef put(self, code) + cpdef put_safe(self, code) + cpdef putln(self, code=*, bint safe=*) + @cython.final + cdef increase_indent(self) + @cython.final + cdef decrease_indent(self) + @cython.final + cdef indent(self) + + +cdef class PyrexCodeWriter: + cdef public object f + cdef public Py_ssize_t level diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.py new file mode 100644 index 0000000000000000000000000000000000000000..4bc39ef7d8bbca7253a50bded9920b5ff1146a96 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.py @@ -0,0 +1,2725 @@ +# cython: language_level=3str +# cython: auto_pickle=False +# +# Code output module +# + +from __future__ import absolute_import + +import cython +cython.declare(os=object, re=object, operator=object, textwrap=object, + Template=object, Naming=object, Options=object, StringEncoding=object, + Utils=object, SourceDescriptor=object, StringIOTree=object, + DebugFlags=object, basestring=object, defaultdict=object, + closing=object, partial=object) + +import hashlib +import operator +import os +import re +import shutil +import textwrap +from string import Template +from functools import partial +from contextlib import closing, contextmanager +from collections import defaultdict + +from . import Naming +from . import Options +from . import DebugFlags +from . import StringEncoding +from .. import Utils +from .Scanning import SourceDescriptor +from ..StringIOTree import StringIOTree + +try: + from __builtin__ import basestring +except ImportError: + from builtins import str as basestring + + +non_portable_builtins_map = { + # builtins that have different names in different Python versions + 'bytes' : ('PY_MAJOR_VERSION < 3', 'str'), + 'unicode' : ('PY_MAJOR_VERSION >= 3', 'str'), + 'basestring' : ('PY_MAJOR_VERSION >= 3', 'str'), + 'xrange' : ('PY_MAJOR_VERSION >= 3', 'range'), + 'raw_input' : ('PY_MAJOR_VERSION >= 3', 'input'), +} + +ctypedef_builtins_map = { + # types of builtins in "ctypedef class" statements which we don't + # import either because the names conflict with C types or because + # the type simply is not exposed. + 'py_int' : '&PyInt_Type', + 'py_long' : '&PyLong_Type', + 'py_float' : '&PyFloat_Type', + 'wrapper_descriptor' : '&PyWrapperDescr_Type', +} + +basicsize_builtins_map = { + # builtins whose type has a different tp_basicsize than sizeof(...) + 'PyTypeObject': 'PyHeapTypeObject', +} + +uncachable_builtins = [ + # Global/builtin names that cannot be cached because they may or may not + # be available at import time, for various reasons: + ## - Py3.7+ + 'breakpoint', # might deserve an implementation in Cython + ## - Py3.4+ + '__loader__', + '__spec__', + ## - Py3+ + 'BlockingIOError', + 'BrokenPipeError', + 'ChildProcessError', + 'ConnectionAbortedError', + 'ConnectionError', + 'ConnectionRefusedError', + 'ConnectionResetError', + 'FileExistsError', + 'FileNotFoundError', + 'InterruptedError', + 'IsADirectoryError', + 'ModuleNotFoundError', + 'NotADirectoryError', + 'PermissionError', + 'ProcessLookupError', + 'RecursionError', + 'ResourceWarning', + #'StopAsyncIteration', # backported + 'TimeoutError', + '__build_class__', + 'ascii', # might deserve an implementation in Cython + #'exec', # implemented in Cython + ## - platform specific + 'WindowsError', + ## - others + '_', # e.g. used by gettext +] + +special_py_methods = cython.declare(frozenset, frozenset(( + '__cinit__', '__dealloc__', '__richcmp__', '__next__', + '__await__', '__aiter__', '__anext__', + '__getreadbuffer__', '__getwritebuffer__', '__getsegcount__', + '__getcharbuffer__', '__getbuffer__', '__releasebuffer__', +))) + +modifier_output_mapper = { + 'inline': 'CYTHON_INLINE' +}.get + + +class IncludeCode(object): + """ + An include file and/or verbatim C code to be included in the + generated sources. + """ + # attributes: + # + # pieces {order: unicode}: pieces of C code to be generated. + # For the included file, the key "order" is zero. + # For verbatim include code, the "order" is the "order" + # attribute of the original IncludeCode where this piece + # of C code was first added. This is needed to prevent + # duplication if the same include code is found through + # multiple cimports. + # location int: where to put this include in the C sources, one + # of the constants INITIAL, EARLY, LATE + # order int: sorting order (automatically set by increasing counter) + + # Constants for location. If the same include occurs with different + # locations, the earliest one takes precedense. + INITIAL = 0 + EARLY = 1 + LATE = 2 + + counter = 1 # Counter for "order" + + def __init__(self, include=None, verbatim=None, late=True, initial=False): + self.order = self.counter + type(self).counter += 1 + self.pieces = {} + + if include: + if include[0] == '<' and include[-1] == '>': + self.pieces[0] = u'#include {0}'.format(include) + late = False # system include is never late + else: + self.pieces[0] = u'#include "{0}"'.format(include) + + if verbatim: + self.pieces[self.order] = verbatim + + if initial: + self.location = self.INITIAL + elif late: + self.location = self.LATE + else: + self.location = self.EARLY + + def dict_update(self, d, key): + """ + Insert `self` in dict `d` with key `key`. If that key already + exists, update the attributes of the existing value with `self`. + """ + if key in d: + other = d[key] + other.location = min(self.location, other.location) + other.pieces.update(self.pieces) + else: + d[key] = self + + def sortkey(self): + return self.order + + def mainpiece(self): + """ + Return the main piece of C code, corresponding to the include + file. If there was no include file, return None. + """ + return self.pieces.get(0) + + def write(self, code): + # Write values of self.pieces dict, sorted by the keys + for k in sorted(self.pieces): + code.putln(self.pieces[k]) + + +def get_utility_dir(): + # make this a function and not global variables: + # http://trac.cython.org/cython_trac/ticket/475 + Cython_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + return os.path.join(Cython_dir, "Utility") + +read_utilities_hook = None +""" +Override the hook for reading a utilities file that contains code fragments used +by the codegen. + +The hook functions takes the path of the utilities file, and returns a list +of strings, one per line. + +The default behavior is to open a file relative to get_utility_dir(). +""" + +def read_utilities_from_utility_dir(path): + """ + Read all lines of the file at the provided path from a path relative + to get_utility_dir(). + """ + filename = os.path.join(get_utility_dir(), path) + with closing(Utils.open_source_file(filename, encoding='UTF-8')) as f: + return f.readlines() + +# by default, read utilities from the utility directory. +read_utilities_hook = read_utilities_from_utility_dir + +class UtilityCodeBase(object): + """ + Support for loading utility code from a file. + + Code sections in the file can be specified as follows: + + ##### MyUtility.proto ##### + + [proto declarations] + + ##### MyUtility.init ##### + + [code run at module initialization] + + ##### MyUtility ##### + #@requires: MyOtherUtility + #@substitute: naming + + [definitions] + + ##### MyUtility ##### + #@substitute: tempita + + [requires tempita substitution + - context can't be specified here though so only + tempita utility that requires no external context + will benefit from this tag + - only necessary when @required from non-tempita code] + + for prototypes and implementation respectively. For non-python or + -cython files backslashes should be used instead. 5 to 30 comment + characters may be used on either side. + + If the @cname decorator is not used and this is a CythonUtilityCode, + one should pass in the 'name' keyword argument to be used for name + mangling of such entries. + """ + + is_cython_utility = False + _utility_cache = {} + + @classmethod + def _add_utility(cls, utility, type, lines, begin_lineno, tags=None): + if utility is None: + return + + code = '\n'.join(lines) + if tags and 'substitute' in tags and 'naming' in tags['substitute']: + try: + code = Template(code).substitute(vars(Naming)) + except (KeyError, ValueError) as e: + raise RuntimeError("Error parsing templated utility code of type '%s' at line %d: %s" % ( + type, begin_lineno, e)) + + # remember correct line numbers at least until after templating + code = '\n' * begin_lineno + code + + if type == 'proto': + utility[0] = code + elif type == 'impl': + utility[1] = code + else: + all_tags = utility[2] + all_tags[type] = code + + if tags: + all_tags = utility[2] + for name, values in tags.items(): + all_tags.setdefault(name, set()).update(values) + + @classmethod + def load_utilities_from_file(cls, path): + utilities = cls._utility_cache.get(path) + if utilities: + return utilities + + _, ext = os.path.splitext(path) + if ext in ('.pyx', '.py', '.pxd', '.pxi'): + comment = '#' + strip_comments = partial(re.compile(r'^\s*#(?!\s*cython\s*:).*').sub, '') + rstrip = StringEncoding._unicode.rstrip + else: + comment = '/' + strip_comments = partial(re.compile(r'^\s*//.*|/\*[^*]*\*/').sub, '') + rstrip = partial(re.compile(r'\s+(\\?)$').sub, r'\1') + match_special = re.compile( + (r'^%(C)s{5,30}\s*(?P(?:\w|\.)+)\s*%(C)s{5,30}|' + r'^%(C)s+@(?P\w+)\s*:\s*(?P(?:\w|[.:])+)') % + {'C': comment}).match + match_type = re.compile(r'(.+)[.](proto(?:[.]\S+)?|impl|init|cleanup)$').match + + all_lines = read_utilities_hook(path) + + utilities = defaultdict(lambda: [None, None, {}]) + lines = [] + tags = defaultdict(set) + utility = type = None + begin_lineno = 0 + + for lineno, line in enumerate(all_lines): + m = match_special(line) + if m: + if m.group('name'): + cls._add_utility(utility, type, lines, begin_lineno, tags) + + begin_lineno = lineno + 1 + del lines[:] + tags.clear() + + name = m.group('name') + mtype = match_type(name) + if mtype: + name, type = mtype.groups() + else: + type = 'impl' + utility = utilities[name] + else: + tags[m.group('tag')].add(m.group('value')) + lines.append('') # keep line number correct + else: + lines.append(rstrip(strip_comments(line))) + + if utility is None: + raise ValueError("Empty utility code file") + + # Don't forget to add the last utility code + cls._add_utility(utility, type, lines, begin_lineno, tags) + + utilities = dict(utilities) # un-defaultdict-ify + cls._utility_cache[path] = utilities + return utilities + + @classmethod + def load(cls, util_code_name, from_file, **kwargs): + """ + Load utility code from a file specified by from_file (relative to + Cython/Utility) and name util_code_name. + """ + + if '::' in util_code_name: + from_file, util_code_name = util_code_name.rsplit('::', 1) + assert from_file + utilities = cls.load_utilities_from_file(from_file) + proto, impl, tags = utilities[util_code_name] + + if tags: + if "substitute" in tags and "tempita" in tags["substitute"]: + if not issubclass(cls, TempitaUtilityCode): + return TempitaUtilityCode.load(util_code_name, from_file, **kwargs) + orig_kwargs = kwargs.copy() + for name, values in tags.items(): + if name in kwargs: + continue + # only pass lists when we have to: most argument expect one value or None + if name == 'requires': + if orig_kwargs: + values = [cls.load(dep, from_file, **orig_kwargs) + for dep in sorted(values)] + else: + # dependencies are rarely unique, so use load_cached() when we can + values = [cls.load_cached(dep, from_file) + for dep in sorted(values)] + elif name == 'substitute': + # don't want to pass "naming" or "tempita" to the constructor + # since these will have been handled + values = values - {'naming', 'tempita'} + if not values: + continue + elif not values: + values = None + elif len(values) == 1: + values = list(values)[0] + kwargs[name] = values + + if proto is not None: + kwargs['proto'] = proto + if impl is not None: + kwargs['impl'] = impl + + if 'name' not in kwargs: + kwargs['name'] = util_code_name + + if 'file' not in kwargs and from_file: + kwargs['file'] = from_file + return cls(**kwargs) + + @classmethod + def load_cached(cls, utility_code_name, from_file, __cache={}): + """ + Calls .load(), but using a per-type cache based on utility name and file name. + """ + key = (utility_code_name, from_file, cls) + try: + return __cache[key] + except KeyError: + pass + code = __cache[key] = cls.load(utility_code_name, from_file) + return code + + @classmethod + def load_as_string(cls, util_code_name, from_file, **kwargs): + """ + Load a utility code as a string. Returns (proto, implementation) + """ + util = cls.load(util_code_name, from_file, **kwargs) + proto, impl = util.proto, util.impl + return util.format_code(proto), util.format_code(impl) + + def format_code(self, code_string, replace_empty_lines=re.compile(r'\n\n+').sub): + """ + Format a code section for output. + """ + if code_string: + code_string = replace_empty_lines('\n', code_string.strip()) + '\n\n' + return code_string + + def __str__(self): + return "<%s(%s)>" % (type(self).__name__, self.name) + + def get_tree(self, **kwargs): + return None + + def __deepcopy__(self, memodict=None): + # No need to deep-copy utility code since it's essentially immutable. + return self + + +class UtilityCode(UtilityCodeBase): + """ + Stores utility code to add during code generation. + + See GlobalState.put_utility_code. + + hashes/equals by instance + + proto C prototypes + impl implementation code + init code to call on module initialization + requires utility code dependencies + proto_block the place in the resulting file where the prototype should + end up + name name of the utility code (or None) + file filename of the utility code file this utility was loaded + from (or None) + """ + + def __init__(self, proto=None, impl=None, init=None, cleanup=None, requires=None, + proto_block='utility_code_proto', name=None, file=None): + # proto_block: Which code block to dump prototype in. See GlobalState. + self.proto = proto + self.impl = impl + self.init = init + self.cleanup = cleanup + self.requires = requires + self._cache = {} + self.specialize_list = [] + self.proto_block = proto_block + self.name = name + self.file = file + + def __hash__(self): + return hash((self.proto, self.impl)) + + def __eq__(self, other): + if self is other: + return True + self_type, other_type = type(self), type(other) + if self_type is not other_type and not (isinstance(other, self_type) or isinstance(self, other_type)): + return False + + self_proto = getattr(self, 'proto', None) + other_proto = getattr(other, 'proto', None) + return (self_proto, self.impl) == (other_proto, other.impl) + + def none_or_sub(self, s, context): + """ + Format a string in this utility code with context. If None, do nothing. + """ + if s is None: + return None + return s % context + + def specialize(self, pyrex_type=None, **data): + name = self.name + if pyrex_type is not None: + data['type'] = pyrex_type.empty_declaration_code() + data['type_name'] = pyrex_type.specialization_name() + name = "%s[%s]" % (name, data['type_name']) + # Dicts aren't hashable... + key = tuple(sorted(data.items())) + try: + return self._cache[key] + except KeyError: + if self.requires is None: + requires = None + else: + requires = [r.specialize(data) for r in self.requires] + + s = self._cache[key] = UtilityCode( + self.none_or_sub(self.proto, data), + self.none_or_sub(self.impl, data), + self.none_or_sub(self.init, data), + self.none_or_sub(self.cleanup, data), + requires, + self.proto_block, + name, + ) + + self.specialize_list.append(s) + return s + + def inject_string_constants(self, impl, output): + """Replace 'PYIDENT("xyz")' by a constant Python identifier cname. + """ + if 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl: + return False, impl + + replacements = {} + def externalise(matchobj): + key = matchobj.groups() + try: + cname = replacements[key] + except KeyError: + str_type, name = key + cname = replacements[key] = output.get_py_string_const( + StringEncoding.EncodedString(name), identifier=str_type == 'IDENT').cname + return cname + + impl = re.sub(r'PY(IDENT|UNICODE)\("([^"]+)"\)', externalise, impl) + assert 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl + return True, impl + + def inject_unbound_methods(self, impl, output): + """Replace 'UNBOUND_METHOD(type, "name")' by a constant Python identifier cname. + """ + if 'CALL_UNBOUND_METHOD(' not in impl: + return False, impl + + def externalise(matchobj): + type_cname, method_name, obj_cname, args = matchobj.groups() + args = [arg.strip() for arg in args[1:].split(',')] if args else [] + assert len(args) < 3, "CALL_UNBOUND_METHOD() does not support %d call arguments" % len(args) + return output.cached_unbound_method_call_code(obj_cname, type_cname, method_name, args) + + impl = re.sub( + r'CALL_UNBOUND_METHOD\(' + r'([a-zA-Z_]+),' # type cname + r'\s*"([^"]+)",' # method name + r'\s*([^),]+)' # object cname + r'((?:,[^),]+)*)' # args* + r'\)', externalise, impl) + assert 'CALL_UNBOUND_METHOD(' not in impl + + return True, impl + + def wrap_c_strings(self, impl): + """Replace CSTRING('''xyz''') by a C compatible string + """ + if 'CSTRING(' not in impl: + return impl + + def split_string(matchobj): + content = matchobj.group(1).replace('"', '\042') + return ''.join( + '"%s\\n"\n' % line if not line.endswith('\\') or line.endswith('\\\\') else '"%s"\n' % line[:-1] + for line in content.splitlines()) + + impl = re.sub(r'CSTRING\(\s*"""([^"]*(?:"[^"]+)*)"""\s*\)', split_string, impl) + assert 'CSTRING(' not in impl + return impl + + def put_code(self, output): + if self.requires: + for dependency in self.requires: + output.use_utility_code(dependency) + if self.proto: + writer = output[self.proto_block] + writer.putln("/* %s.proto */" % self.name) + writer.put_or_include( + self.format_code(self.proto), '%s_proto' % self.name) + if self.impl: + impl = self.format_code(self.wrap_c_strings(self.impl)) + is_specialised1, impl = self.inject_string_constants(impl, output) + is_specialised2, impl = self.inject_unbound_methods(impl, output) + writer = output['utility_code_def'] + writer.putln("/* %s */" % self.name) + if not (is_specialised1 or is_specialised2): + # no module specific adaptations => can be reused + writer.put_or_include(impl, '%s_impl' % self.name) + else: + writer.put(impl) + if self.init: + writer = output['init_globals'] + writer.putln("/* %s.init */" % self.name) + if isinstance(self.init, basestring): + writer.put(self.format_code(self.init)) + else: + self.init(writer, output.module_pos) + # 'init' code can end with an 'if' statement for an error condition like: + # if (check_ok()) ; else + writer.putln(writer.error_goto_if_PyErr(output.module_pos)) + writer.putln() + if self.cleanup and Options.generate_cleanup_code: + writer = output['cleanup_globals'] + writer.putln("/* %s.cleanup */" % self.name) + if isinstance(self.cleanup, basestring): + writer.put_or_include( + self.format_code(self.cleanup), + '%s_cleanup' % self.name) + else: + self.cleanup(writer, output.module_pos) + + +def sub_tempita(s, context, file=None, name=None): + "Run tempita on string s with given context." + if not s: + return None + + if file: + context['__name'] = "%s:%s" % (file, name) + elif name: + context['__name'] = name + + from ..Tempita import sub + return sub(s, **context) + + +class TempitaUtilityCode(UtilityCode): + def __init__(self, name=None, proto=None, impl=None, init=None, file=None, context=None, **kwargs): + if context is None: + context = {} + proto = sub_tempita(proto, context, file, name) + impl = sub_tempita(impl, context, file, name) + init = sub_tempita(init, context, file, name) + super(TempitaUtilityCode, self).__init__( + proto, impl, init=init, name=name, file=file, **kwargs) + + @classmethod + def load_cached(cls, utility_code_name, from_file=None, context=None, __cache={}): + context_key = tuple(sorted(context.items())) if context else None + assert hash(context_key) is not None # raise TypeError if not hashable + key = (cls, from_file, utility_code_name, context_key) + try: + return __cache[key] + except KeyError: + pass + code = __cache[key] = cls.load(utility_code_name, from_file, context=context) + return code + + def none_or_sub(self, s, context): + """ + Format a string in this utility code with context. If None, do nothing. + """ + if s is None: + return None + return sub_tempita(s, context, self.file, self.name) + + +class LazyUtilityCode(UtilityCodeBase): + """ + Utility code that calls a callback with the root code writer when + available. Useful when you only have 'env' but not 'code'. + """ + __name__ = '' + requires = None + + def __init__(self, callback): + self.callback = callback + + def put_code(self, globalstate): + utility = self.callback(globalstate.rootwriter) + globalstate.use_utility_code(utility) + + +class FunctionState(object): + # return_label string function return point label + # error_label string error catch point label + # error_without_exception boolean Can go to the error label without an exception (e.g. __next__ can return NULL) + # continue_label string loop continue point label + # break_label string loop break point label + # return_from_error_cleanup_label string + # label_counter integer counter for naming labels + # in_try_finally boolean inside try of try...finally + # exc_vars (string * 3) exception variables for reraise, or None + # can_trace boolean line tracing is supported in the current context + # scope Scope the scope object of the current function + + # Not used for now, perhaps later + def __init__(self, owner, names_taken=set(), scope=None): + self.names_taken = names_taken + self.owner = owner + self.scope = scope + + self.error_label = None + self.label_counter = 0 + self.labels_used = set() + self.return_label = self.new_label() + self.new_error_label() + self.continue_label = None + self.break_label = None + self.yield_labels = [] + + self.in_try_finally = 0 + self.exc_vars = None + self.current_except = None + self.can_trace = False + self.gil_owned = True + + self.temps_allocated = [] # of (name, type, manage_ref, static) + self.temps_free = {} # (type, manage_ref) -> list of free vars with same type/managed status + self.temps_used_type = {} # name -> (type, manage_ref) + self.zombie_temps = set() # temps that must not be reused after release + self.temp_counter = 0 + self.closure_temps = None + + # This is used to collect temporaries, useful to find out which temps + # need to be privatized in parallel sections + self.collect_temps_stack = [] + + # This is used for the error indicator, which needs to be local to the + # function. It used to be global, which relies on the GIL being held. + # However, exceptions may need to be propagated through 'nogil' + # sections, in which case we introduce a race condition. + self.should_declare_error_indicator = False + self.uses_error_indicator = False + + self.error_without_exception = False + + self.needs_refnanny = False + + # safety checks + + def validate_exit(self): + # validate that all allocated temps have been freed + if self.temps_allocated: + leftovers = self.temps_in_use() + if leftovers: + msg = "TEMPGUARD: Temps left over at end of '%s': %s" % (self.scope.name, ', '.join([ + '%s [%s]' % (name, ctype) + for name, ctype, is_pytemp in sorted(leftovers)]), + ) + #print(msg) + raise RuntimeError(msg) + + # labels + + def new_label(self, name=None): + n = self.label_counter + self.label_counter = n + 1 + label = "%s%d" % (Naming.label_prefix, n) + if name is not None: + label += '_' + name + return label + + def new_yield_label(self, expr_type='yield'): + label = self.new_label('resume_from_%s' % expr_type) + num_and_label = (len(self.yield_labels) + 1, label) + self.yield_labels.append(num_and_label) + return num_and_label + + def new_error_label(self, prefix=""): + old_err_lbl = self.error_label + self.error_label = self.new_label(prefix + 'error') + return old_err_lbl + + def get_loop_labels(self): + return ( + self.continue_label, + self.break_label) + + def set_loop_labels(self, labels): + (self.continue_label, + self.break_label) = labels + + def new_loop_labels(self, prefix=""): + old_labels = self.get_loop_labels() + self.set_loop_labels( + (self.new_label(prefix + "continue"), + self.new_label(prefix + "break"))) + return old_labels + + def get_all_labels(self): + return ( + self.continue_label, + self.break_label, + self.return_label, + self.error_label) + + def set_all_labels(self, labels): + (self.continue_label, + self.break_label, + self.return_label, + self.error_label) = labels + + def all_new_labels(self): + old_labels = self.get_all_labels() + new_labels = [] + for old_label, name in zip(old_labels, ['continue', 'break', 'return', 'error']): + if old_label: + new_labels.append(self.new_label(name)) + else: + new_labels.append(old_label) + self.set_all_labels(new_labels) + return old_labels + + def use_label(self, lbl): + self.labels_used.add(lbl) + + def label_used(self, lbl): + return lbl in self.labels_used + + # temp handling + + def allocate_temp(self, type, manage_ref, static=False, reusable=True): + """ + Allocates a temporary (which may create a new one or get a previously + allocated and released one of the same type). Type is simply registered + and handed back, but will usually be a PyrexType. + + If type.needs_refcounting, manage_ref comes into play. If manage_ref is set to + True, the temp will be decref-ed on return statements and in exception + handling clauses. Otherwise the caller has to deal with any reference + counting of the variable. + + If not type.needs_refcounting, then manage_ref will be ignored, but it + still has to be passed. It is recommended to pass False by convention + if it is known that type will never be a reference counted type. + + static=True marks the temporary declaration with "static". + This is only used when allocating backing store for a module-level + C array literals. + + if reusable=False, the temp will not be reused after release. + + A C string referring to the variable is returned. + """ + if type.is_cv_qualified and not type.is_reference: + type = type.cv_base_type + elif type.is_reference and not type.is_fake_reference: + type = type.ref_base_type + elif type.is_cfunction: + from . import PyrexTypes + type = PyrexTypes.c_ptr_type(type) # A function itself isn't an l-value + elif type.is_cpp_class and not type.is_fake_reference and self.scope.directives['cpp_locals']: + self.scope.use_utility_code(UtilityCode.load_cached("OptionalLocals", "CppSupport.cpp")) + if not type.needs_refcounting: + # Make manage_ref canonical, so that manage_ref will always mean + # a decref is needed. + manage_ref = False + + freelist = self.temps_free.get((type, manage_ref)) + if reusable and freelist is not None and freelist[0]: + result = freelist[0].pop() + freelist[1].remove(result) + else: + while True: + self.temp_counter += 1 + result = "%s%d" % (Naming.codewriter_temp_prefix, self.temp_counter) + if result not in self.names_taken: break + self.temps_allocated.append((result, type, manage_ref, static)) + if not reusable: + self.zombie_temps.add(result) + self.temps_used_type[result] = (type, manage_ref) + if DebugFlags.debug_temp_code_comments: + self.owner.putln("/* %s allocated (%s)%s */" % (result, type, "" if reusable else " - zombie")) + + if self.collect_temps_stack: + self.collect_temps_stack[-1].add((result, type)) + + return result + + def release_temp(self, name): + """ + Releases a temporary so that it can be reused by other code needing + a temp of the same type. + """ + type, manage_ref = self.temps_used_type[name] + freelist = self.temps_free.get((type, manage_ref)) + if freelist is None: + freelist = ([], set()) # keep order in list and make lookups in set fast + self.temps_free[(type, manage_ref)] = freelist + if name in freelist[1]: + raise RuntimeError("Temp %s freed twice!" % name) + if name not in self.zombie_temps: + freelist[0].append(name) + freelist[1].add(name) + if DebugFlags.debug_temp_code_comments: + self.owner.putln("/* %s released %s*/" % ( + name, " - zombie" if name in self.zombie_temps else "")) + + def temps_in_use(self): + """Return a list of (cname,type,manage_ref) tuples of temp names and their type + that are currently in use. + """ + used = [] + for name, type, manage_ref, static in self.temps_allocated: + freelist = self.temps_free.get((type, manage_ref)) + if freelist is None or name not in freelist[1]: + used.append((name, type, manage_ref and type.needs_refcounting)) + return used + + def temps_holding_reference(self): + """Return a list of (cname,type) tuples of temp names and their type + that are currently in use. This includes only temps + with a reference counted type which owns its reference. + """ + return [(name, type) + for name, type, manage_ref in self.temps_in_use() + if manage_ref and type.needs_refcounting] + + def all_managed_temps(self): + """Return a list of (cname, type) tuples of refcount-managed Python objects. + """ + return [(cname, type) + for cname, type, manage_ref, static in self.temps_allocated + if manage_ref] + + def all_free_managed_temps(self): + """Return a list of (cname, type) tuples of refcount-managed Python + objects that are not currently in use. This is used by + try-except and try-finally blocks to clean up temps in the + error case. + """ + return sorted([ # Enforce deterministic order. + (cname, type) + for (type, manage_ref), freelist in self.temps_free.items() if manage_ref + for cname in freelist[0] + ]) + + def start_collecting_temps(self): + """ + Useful to find out which temps were used in a code block + """ + self.collect_temps_stack.append(set()) + + def stop_collecting_temps(self): + return self.collect_temps_stack.pop() + + def init_closure_temps(self, scope): + self.closure_temps = ClosureTempAllocator(scope) + + +class NumConst(object): + """Global info about a Python number constant held by GlobalState. + + cname string + value string + py_type string int, long, float + value_code string evaluation code if different from value + """ + + def __init__(self, cname, value, py_type, value_code=None): + self.cname = cname + self.value = value + self.py_type = py_type + self.value_code = value_code or value + + +class PyObjectConst(object): + """Global info about a generic constant held by GlobalState. + """ + # cname string + # type PyrexType + + def __init__(self, cname, type): + self.cname = cname + self.type = type + + +cython.declare(possible_unicode_identifier=object, possible_bytes_identifier=object, + replace_identifier=object, find_alphanums=object) +possible_unicode_identifier = re.compile(br"(?![0-9])\w+$".decode('ascii'), re.U).match +possible_bytes_identifier = re.compile(r"(?![0-9])\w+$".encode('ASCII')).match +replace_identifier = re.compile(r'[^a-zA-Z0-9_]+').sub +find_alphanums = re.compile('([a-zA-Z0-9]+)').findall + +class StringConst(object): + """Global info about a C string constant held by GlobalState. + """ + # cname string + # text EncodedString or BytesLiteral + # py_strings {(identifier, encoding) : PyStringConst} + + def __init__(self, cname, text, byte_string): + self.cname = cname + self.text = text + self.escaped_value = StringEncoding.escape_byte_string(byte_string) + self.py_strings = None + self.py_versions = [] + + def add_py_version(self, version): + if not version: + self.py_versions = [2, 3] + elif version not in self.py_versions: + self.py_versions.append(version) + + def get_py_string_const(self, encoding, identifier=None, + is_str=False, py3str_cstring=None): + py_strings = self.py_strings + text = self.text + + is_str = bool(identifier or is_str) + is_unicode = encoding is None and not is_str + + if encoding is None: + # unicode string + encoding_key = None + else: + # bytes or str + encoding = encoding.lower() + if encoding in ('utf8', 'utf-8', 'ascii', 'usascii', 'us-ascii'): + encoding = None + encoding_key = None + else: + encoding_key = ''.join(find_alphanums(encoding)) + + key = (is_str, is_unicode, encoding_key, py3str_cstring) + if py_strings is not None: + try: + return py_strings[key] + except KeyError: + pass + else: + self.py_strings = {} + + if identifier: + intern = True + elif identifier is None: + if isinstance(text, bytes): + intern = bool(possible_bytes_identifier(text)) + else: + intern = bool(possible_unicode_identifier(text)) + else: + intern = False + if intern: + prefix = Naming.interned_prefixes['str'] + else: + prefix = Naming.py_const_prefix + + if encoding_key: + encoding_prefix = '_%s' % encoding_key + else: + encoding_prefix = '' + + pystring_cname = "%s%s%s_%s" % ( + prefix, + (is_str and 's') or (is_unicode and 'u') or 'b', + encoding_prefix, + self.cname[len(Naming.const_prefix):]) + + py_string = PyStringConst( + pystring_cname, encoding, is_unicode, is_str, py3str_cstring, intern) + self.py_strings[key] = py_string + return py_string + +class PyStringConst(object): + """Global info about a Python string constant held by GlobalState. + """ + # cname string + # py3str_cstring string + # encoding string + # intern boolean + # is_unicode boolean + # is_str boolean + + def __init__(self, cname, encoding, is_unicode, is_str=False, + py3str_cstring=None, intern=False): + self.cname = cname + self.py3str_cstring = py3str_cstring + self.encoding = encoding + self.is_str = is_str + self.is_unicode = is_unicode + self.intern = intern + + def __lt__(self, other): + return self.cname < other.cname + + +class GlobalState(object): + # filename_table {string : int} for finding filename table indexes + # filename_list [string] filenames in filename table order + # input_file_contents dict contents (=list of lines) of any file that was used as input + # to create this output C code. This is + # used to annotate the comments. + # + # utility_codes set IDs of used utility code (to avoid reinsertion) + # + # declared_cnames {string:Entry} used in a transition phase to merge pxd-declared + # constants etc. into the pyx-declared ones (i.e, + # check if constants are already added). + # In time, hopefully the literals etc. will be + # supplied directly instead. + # + # const_cnames_used dict global counter for unique constant identifiers + # + + # parts {string:CCodeWriter} + + + # interned_strings + # consts + # interned_nums + + # directives set Temporary variable used to track + # the current set of directives in the code generation + # process. + + directives = {} + + code_layout = [ + 'h_code', + 'filename_table', + 'utility_code_proto_before_types', + 'numeric_typedefs', # Let these detailed individual parts stay!, + 'complex_type_declarations', # as the proper solution is to make a full DAG... + 'type_declarations', # More coarse-grained blocks would simply hide + 'utility_code_proto', # the ugliness, not fix it + 'module_declarations', + 'typeinfo', + 'before_global_var', + 'global_var', + 'string_decls', + 'decls', + 'late_includes', + 'module_state', + 'module_state_clear', + 'module_state_traverse', + 'module_state_defines', # redefines names used in module_state/_clear/_traverse + 'module_code', # user code goes here + 'pystring_table', + 'cached_builtins', + 'cached_constants', + 'init_constants', + 'init_globals', # (utility code called at init-time) + 'init_module', + 'cleanup_globals', + 'cleanup_module', + 'main_method', + 'utility_code_pragmas', # silence some irrelevant warnings in utility code + 'utility_code_def', + 'utility_code_pragmas_end', # clean-up the utility_code_pragmas + 'end' + ] + + # h files can only have a much smaller list of sections + h_code_layout = [ + 'h_code', + 'utility_code_proto_before_types', + 'type_declarations', + 'utility_code_proto', + 'end' + ] + + def __init__(self, writer, module_node, code_config, common_utility_include_dir=None): + self.filename_table = {} + self.filename_list = [] + self.input_file_contents = {} + self.utility_codes = set() + self.declared_cnames = {} + self.in_utility_code_generation = False + self.code_config = code_config + self.common_utility_include_dir = common_utility_include_dir + self.parts = {} + self.module_node = module_node # because some utility code generation needs it + # (generating backwards-compatible Get/ReleaseBuffer + + self.const_cnames_used = {} + self.string_const_index = {} + self.dedup_const_index = {} + self.pyunicode_ptr_const_index = {} + self.num_const_index = {} + self.py_constants = [] + self.cached_cmethods = {} + self.initialised_constants = set() + + writer.set_global_state(self) + self.rootwriter = writer + + def initialize_main_c_code(self): + rootwriter = self.rootwriter + for i, part in enumerate(self.code_layout): + w = self.parts[part] = rootwriter.insertion_point() + if i > 0: + w.putln("/* #### Code section: %s ### */" % part) + + if not Options.cache_builtins: + del self.parts['cached_builtins'] + else: + w = self.parts['cached_builtins'] + w.enter_cfunc_scope() + w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {") + + w = self.parts['cached_constants'] + w.enter_cfunc_scope() + w.putln("") + w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {") + w.put_declare_refcount_context() + w.put_setup_refcount_context(StringEncoding.EncodedString("__Pyx_InitCachedConstants")) + + w = self.parts['init_globals'] + w.enter_cfunc_scope() + w.putln("") + w.putln("static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {") + + w = self.parts['init_constants'] + w.enter_cfunc_scope() + w.putln("") + w.putln("static CYTHON_SMALL_CODE int __Pyx_InitConstants(void) {") + + if not Options.generate_cleanup_code: + del self.parts['cleanup_globals'] + else: + w = self.parts['cleanup_globals'] + w.enter_cfunc_scope() + w.putln("") + w.putln("static CYTHON_SMALL_CODE void __Pyx_CleanupGlobals(void) {") + + code = self.parts['utility_code_proto'] + code.putln("") + code.putln("/* --- Runtime support code (head) --- */") + + code = self.parts['utility_code_def'] + if self.code_config.emit_linenums: + code.write('\n#line 1 "cython_utility"\n') + code.putln("") + code.putln("/* --- Runtime support code --- */") + + def initialize_main_h_code(self): + rootwriter = self.rootwriter + for part in self.h_code_layout: + self.parts[part] = rootwriter.insertion_point() + + def finalize_main_c_code(self): + self.close_global_decls() + + # + # utility_code_def + # + code = self.parts['utility_code_def'] + util = TempitaUtilityCode.load_cached("TypeConversions", "TypeConversion.c") + code.put(util.format_code(util.impl)) + code.putln("") + + # + # utility code pragmas + # + code = self.parts['utility_code_pragmas'] + util = UtilityCode.load_cached("UtilityCodePragmas", "ModuleSetupCode.c") + code.putln(util.format_code(util.impl)) + code.putln("") + code = self.parts['utility_code_pragmas_end'] + util = UtilityCode.load_cached("UtilityCodePragmasEnd", "ModuleSetupCode.c") + code.putln(util.format_code(util.impl)) + code.putln("") + + def __getitem__(self, key): + return self.parts[key] + + # + # Global constants, interned objects, etc. + # + def close_global_decls(self): + # This is called when it is known that no more global declarations will + # declared. + self.generate_const_declarations() + if Options.cache_builtins: + w = self.parts['cached_builtins'] + w.putln("return 0;") + if w.label_used(w.error_label): + w.put_label(w.error_label) + w.putln("return -1;") + w.putln("}") + w.exit_cfunc_scope() + + w = self.parts['cached_constants'] + w.put_finish_refcount_context() + w.putln("return 0;") + if w.label_used(w.error_label): + w.put_label(w.error_label) + w.put_finish_refcount_context() + w.putln("return -1;") + w.putln("}") + w.exit_cfunc_scope() + + for part in ['init_globals', 'init_constants']: + w = self.parts[part] + w.putln("return 0;") + if w.label_used(w.error_label): + w.put_label(w.error_label) + w.putln("return -1;") + w.putln("}") + w.exit_cfunc_scope() + + if Options.generate_cleanup_code: + w = self.parts['cleanup_globals'] + w.putln("}") + w.exit_cfunc_scope() + + if Options.generate_cleanup_code: + w = self.parts['cleanup_module'] + w.putln("}") + w.exit_cfunc_scope() + + def put_pyobject_decl(self, entry): + self['global_var'].putln("static PyObject *%s;" % entry.cname) + + # constant handling at code generation time + + def get_cached_constants_writer(self, target=None): + if target is not None: + if target in self.initialised_constants: + # Return None on second/later calls to prevent duplicate creation code. + return None + self.initialised_constants.add(target) + return self.parts['cached_constants'] + + def get_int_const(self, str_value, longness=False): + py_type = longness and 'long' or 'int' + try: + c = self.num_const_index[(str_value, py_type)] + except KeyError: + c = self.new_num_const(str_value, py_type) + return c + + def get_float_const(self, str_value, value_code): + try: + c = self.num_const_index[(str_value, 'float')] + except KeyError: + c = self.new_num_const(str_value, 'float', value_code) + return c + + def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None): + if dedup_key is not None: + const = self.dedup_const_index.get(dedup_key) + if const is not None: + return const + # create a new Python object constant + const = self.new_py_const(type, prefix) + if (cleanup_level is not None + and cleanup_level <= Options.generate_cleanup_code + # Note that this function is used for all argument defaults + # which aren't just Python objects + and type.needs_refcounting): + cleanup_writer = self.parts['cleanup_globals'] + cleanup_writer.putln('Py_CLEAR(%s);' % const.cname) + if dedup_key is not None: + self.dedup_const_index[dedup_key] = const + return const + + def get_string_const(self, text, py_version=None): + # return a C string constant, creating a new one if necessary + if text.is_unicode: + byte_string = text.utf8encode() + else: + byte_string = text.byteencode() + try: + c = self.string_const_index[byte_string] + except KeyError: + c = self.new_string_const(text, byte_string) + c.add_py_version(py_version) + return c + + def get_pyunicode_ptr_const(self, text): + # return a Py_UNICODE[] constant, creating a new one if necessary + assert text.is_unicode + try: + c = self.pyunicode_ptr_const_index[text] + except KeyError: + c = self.pyunicode_ptr_const_index[text] = self.new_const_cname() + return c + + def get_py_string_const(self, text, identifier=None, + is_str=False, unicode_value=None): + # return a Python string constant, creating a new one if necessary + py3str_cstring = None + if is_str and unicode_value is not None \ + and unicode_value.utf8encode() != text.byteencode(): + py3str_cstring = self.get_string_const(unicode_value, py_version=3) + c_string = self.get_string_const(text, py_version=2) + else: + c_string = self.get_string_const(text) + py_string = c_string.get_py_string_const( + text.encoding, identifier, is_str, py3str_cstring) + return py_string + + def get_interned_identifier(self, text): + return self.get_py_string_const(text, identifier=True) + + def new_string_const(self, text, byte_string): + cname = self.new_string_const_cname(byte_string) + c = StringConst(cname, text, byte_string) + self.string_const_index[byte_string] = c + return c + + def new_num_const(self, value, py_type, value_code=None): + cname = self.new_num_const_cname(value, py_type) + c = NumConst(cname, value, py_type, value_code) + self.num_const_index[(value, py_type)] = c + return c + + def new_py_const(self, type, prefix=''): + cname = self.new_const_cname(prefix) + c = PyObjectConst(cname, type) + self.py_constants.append(c) + return c + + def new_string_const_cname(self, bytes_value): + # Create a new globally-unique nice name for a C string constant. + value = bytes_value.decode('ASCII', 'ignore') + return self.new_const_cname(value=value) + + def unique_const_cname(self, format_str): # type: (str) -> str + used = self.const_cnames_used + cname = value = format_str.format(sep='', counter='') + while cname in used: + counter = used[value] = used[value] + 1 + cname = format_str.format(sep='_', counter=counter) + used[cname] = 1 + return cname + + def new_num_const_cname(self, value, py_type): # type: (str, str) -> str + if py_type == 'long': + value += 'L' + py_type = 'int' + prefix = Naming.interned_prefixes[py_type] + + value = value.replace('.', '_').replace('+', '_').replace('-', 'neg_') + if len(value) > 42: + # update tests/run/large_integer_T5290.py in case the amount is changed + cname = self.unique_const_cname( + prefix + "large{counter}_" + value[:18] + "_xxx_" + value[-18:]) + else: + cname = "%s%s" % (prefix, value) + return cname + + def new_const_cname(self, prefix='', value=''): + value = replace_identifier('_', value)[:32].strip('_') + name_suffix = self.unique_const_cname(value + "{sep}{counter}") + if prefix: + prefix = Naming.interned_prefixes[prefix] + else: + prefix = Naming.const_prefix + return "%s%s" % (prefix, name_suffix) + + def get_cached_unbound_method(self, type_cname, method_name): + key = (type_cname, method_name) + try: + cname = self.cached_cmethods[key] + except KeyError: + cname = self.cached_cmethods[key] = self.new_const_cname( + 'umethod', '%s_%s' % (type_cname, method_name)) + return cname + + def cached_unbound_method_call_code(self, obj_cname, type_cname, method_name, arg_cnames): + # admittedly, not the best place to put this method, but it is reused by UtilityCode and ExprNodes ... + utility_code_name = "CallUnboundCMethod%d" % len(arg_cnames) + self.use_utility_code(UtilityCode.load_cached(utility_code_name, "ObjectHandling.c")) + cache_cname = self.get_cached_unbound_method(type_cname, method_name) + args = [obj_cname] + arg_cnames + return "__Pyx_%s(&%s, %s)" % ( + utility_code_name, + cache_cname, + ', '.join(args), + ) + + def add_cached_builtin_decl(self, entry): + if entry.is_builtin and entry.is_const: + if self.should_declare(entry.cname, entry): + self.put_pyobject_decl(entry) + w = self.parts['cached_builtins'] + condition = None + if entry.name in non_portable_builtins_map: + condition, replacement = non_portable_builtins_map[entry.name] + w.putln('#if %s' % condition) + self.put_cached_builtin_init( + entry.pos, StringEncoding.EncodedString(replacement), + entry.cname) + w.putln('#else') + self.put_cached_builtin_init( + entry.pos, StringEncoding.EncodedString(entry.name), + entry.cname) + if condition: + w.putln('#endif') + + def put_cached_builtin_init(self, pos, name, cname): + w = self.parts['cached_builtins'] + interned_cname = self.get_interned_identifier(name).cname + self.use_utility_code( + UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c")) + w.putln('%s = __Pyx_GetBuiltinName(%s); if (!%s) %s' % ( + cname, + interned_cname, + cname, + w.error_goto(pos))) + + def generate_const_declarations(self): + self.generate_cached_methods_decls() + self.generate_string_constants() + self.generate_num_constants() + self.generate_object_constant_decls() + + def generate_object_constant_decls(self): + consts = [(len(c.cname), c.cname, c) + for c in self.py_constants] + consts.sort() + for _, cname, c in consts: + self.parts['module_state'].putln("%s;" % c.type.declaration_code(cname)) + self.parts['module_state_defines'].putln( + "#define %s %s->%s" % (cname, Naming.modulestateglobal_cname, cname)) + if not c.type.needs_refcounting: + # Note that py_constants is used for all argument defaults + # which aren't necessarily PyObjects, so aren't appropriate + # to clear. + continue + self.parts['module_state_clear'].putln( + "Py_CLEAR(clear_module_state->%s);" % cname) + self.parts['module_state_traverse'].putln( + "Py_VISIT(traverse_module_state->%s);" % cname) + + def generate_cached_methods_decls(self): + if not self.cached_cmethods: + return + + decl = self.parts['decls'] + init = self.parts['init_constants'] + cnames = [] + for (type_cname, method_name), cname in sorted(self.cached_cmethods.items()): + cnames.append(cname) + method_name_cname = self.get_interned_identifier(StringEncoding.EncodedString(method_name)).cname + decl.putln('static __Pyx_CachedCFunction %s = {0, 0, 0, 0, 0};' % ( + cname)) + # split type reference storage as it might not be static + init.putln('%s.type = (PyObject*)&%s;' % ( + cname, type_cname)) + # method name string isn't static in limited api + init.putln('%s.method_name = &%s;' % ( + cname, method_name_cname)) + + if Options.generate_cleanup_code: + cleanup = self.parts['cleanup_globals'] + for cname in cnames: + cleanup.putln("Py_CLEAR(%s.method);" % cname) + + def generate_string_constants(self): + c_consts = [(len(c.cname), c.cname, c) for c in self.string_const_index.values()] + c_consts.sort() + py_strings = [] + + decls_writer = self.parts['string_decls'] + for _, cname, c in c_consts: + conditional = False + if c.py_versions and (2 not in c.py_versions or 3 not in c.py_versions): + conditional = True + decls_writer.putln("#if PY_MAJOR_VERSION %s 3" % ( + (2 in c.py_versions) and '<' or '>=')) + decls_writer.putln('static const char %s[] = "%s";' % ( + cname, StringEncoding.split_string_literal(c.escaped_value))) + if conditional: + decls_writer.putln("#endif") + if c.py_strings is not None: + for py_string in c.py_strings.values(): + py_strings.append((c.cname, len(py_string.cname), py_string)) + + for c, cname in sorted(self.pyunicode_ptr_const_index.items()): + utf16_array, utf32_array = StringEncoding.encode_pyunicode_string(c) + if utf16_array: + # Narrow and wide representations differ + decls_writer.putln("#ifdef Py_UNICODE_WIDE") + decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf32_array)) + if utf16_array: + decls_writer.putln("#else") + decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf16_array)) + decls_writer.putln("#endif") + + init_constants = self.parts['init_constants'] + if py_strings: + self.use_utility_code(UtilityCode.load_cached("InitStrings", "StringTools.c")) + py_strings.sort() + w = self.parts['pystring_table'] + w.putln("") + w.putln("static int __Pyx_CreateStringTabAndInitStrings(void) {") + # the stringtab is a function local rather than a global to + # ensure that it doesn't conflict with module state + w.putln("__Pyx_StringTabEntry %s[] = {" % Naming.stringtab_cname) + for py_string_args in py_strings: + c_cname, _, py_string = py_string_args + if not py_string.is_str or not py_string.encoding or \ + py_string.encoding in ('ASCII', 'USASCII', 'US-ASCII', + 'UTF8', 'UTF-8'): + encoding = '0' + else: + encoding = '"%s"' % py_string.encoding.lower() + + self.parts['module_state'].putln("PyObject *%s;" % py_string.cname) + self.parts['module_state_defines'].putln("#define %s %s->%s" % ( + py_string.cname, + Naming.modulestateglobal_cname, + py_string.cname)) + self.parts['module_state_clear'].putln("Py_CLEAR(clear_module_state->%s);" % + py_string.cname) + self.parts['module_state_traverse'].putln("Py_VISIT(traverse_module_state->%s);" % + py_string.cname) + if py_string.py3str_cstring: + w.putln("#if PY_MAJOR_VERSION >= 3") + w.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % ( + py_string.cname, + py_string.py3str_cstring.cname, + py_string.py3str_cstring.cname, + '0', 1, 0, + py_string.intern + )) + w.putln("#else") + w.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % ( + py_string.cname, + c_cname, + c_cname, + encoding, + py_string.is_unicode, + py_string.is_str, + py_string.intern + )) + if py_string.py3str_cstring: + w.putln("#endif") + w.putln("{0, 0, 0, 0, 0, 0, 0}") + w.putln("};") + w.putln("return __Pyx_InitStrings(%s);" % Naming.stringtab_cname) + w.putln("}") + + init_constants.putln( + "if (__Pyx_CreateStringTabAndInitStrings() < 0) %s;" % + init_constants.error_goto(self.module_pos)) + + def generate_num_constants(self): + consts = [(c.py_type, c.value[0] == '-', len(c.value), c.value, c.value_code, c) + for c in self.num_const_index.values()] + consts.sort() + init_constants = self.parts['init_constants'] + for py_type, _, _, value, value_code, c in consts: + cname = c.cname + self.parts['module_state'].putln("PyObject *%s;" % cname) + self.parts['module_state_defines'].putln("#define %s %s->%s" % ( + cname, Naming.modulestateglobal_cname, cname)) + self.parts['module_state_clear'].putln( + "Py_CLEAR(clear_module_state->%s);" % cname) + self.parts['module_state_traverse'].putln( + "Py_VISIT(traverse_module_state->%s);" % cname) + if py_type == 'float': + function = 'PyFloat_FromDouble(%s)' + elif py_type == 'long': + function = 'PyLong_FromString((char *)"%s", 0, 0)' + elif Utils.long_literal(value): + function = 'PyInt_FromString((char *)"%s", 0, 0)' + elif len(value.lstrip('-')) > 4: + function = "PyInt_FromLong(%sL)" + else: + function = "PyInt_FromLong(%s)" + init_constants.putln('%s = %s; %s' % ( + cname, function % value_code, + init_constants.error_goto_if_null(cname, self.module_pos))) + + # The functions below are there in a transition phase only + # and will be deprecated. They are called from Nodes.BlockNode. + # The copy&paste duplication is intentional in order to be able + # to see quickly how BlockNode worked, until this is replaced. + + def should_declare(self, cname, entry): + if cname in self.declared_cnames: + other = self.declared_cnames[cname] + assert str(entry.type) == str(other.type) + assert entry.init == other.init + return False + else: + self.declared_cnames[cname] = entry + return True + + # + # File name state + # + + def lookup_filename(self, source_desc): + entry = source_desc.get_filenametable_entry() + try: + index = self.filename_table[entry] + except KeyError: + index = len(self.filename_list) + self.filename_list.append(source_desc) + self.filename_table[entry] = index + return index + + def commented_file_contents(self, source_desc): + try: + return self.input_file_contents[source_desc] + except KeyError: + pass + source_file = source_desc.get_lines(encoding='ASCII', + error_handling='ignore') + try: + F = [u' * ' + line.rstrip().replace( + u'*/', u'*[inserted by cython to avoid comment closer]/' + ).replace( + u'/*', u'/[inserted by cython to avoid comment start]*' + ) + for line in source_file] + finally: + if hasattr(source_file, 'close'): + source_file.close() + if not F: F.append(u'') + self.input_file_contents[source_desc] = F + return F + + # + # Utility code state + # + + def use_utility_code(self, utility_code): + """ + Adds code to the C file. utility_code should + a) implement __eq__/__hash__ for the purpose of knowing whether the same + code has already been included + b) implement put_code, which takes a globalstate instance + + See UtilityCode. + """ + if utility_code and utility_code not in self.utility_codes: + self.utility_codes.add(utility_code) + utility_code.put_code(self) + + def use_entry_utility_code(self, entry): + if entry is None: + return + if entry.utility_code: + self.use_utility_code(entry.utility_code) + if entry.utility_code_definition: + self.use_utility_code(entry.utility_code_definition) + + +def funccontext_property(func): + name = func.__name__ + attribute_of = operator.attrgetter(name) + def get(self): + return attribute_of(self.funcstate) + def set(self, value): + setattr(self.funcstate, name, value) + return property(get, set) + + +class CCodeConfig(object): + # emit_linenums boolean write #line pragmas? + # emit_code_comments boolean copy the original code into C comments? + # c_line_in_traceback boolean append the c file and line number to the traceback for exceptions? + + def __init__(self, emit_linenums=True, emit_code_comments=True, c_line_in_traceback=True): + self.emit_code_comments = emit_code_comments + self.emit_linenums = emit_linenums + self.c_line_in_traceback = c_line_in_traceback + + +class CCodeWriter(object): + """ + Utility class to output C code. + + When creating an insertion point one must care about the state that is + kept: + - formatting state (level, bol) is cloned and used in insertion points + as well + - labels, temps, exc_vars: One must construct a scope in which these can + exist by calling enter_cfunc_scope/exit_cfunc_scope (these are for + sanity checking and forward compatibility). Created insertion points + looses this scope and cannot access it. + - marker: Not copied to insertion point + - filename_table, filename_list, input_file_contents: All codewriters + coming from the same root share the same instances simultaneously. + """ + + # f file output file + # buffer StringIOTree + + # level int indentation level + # bol bool beginning of line? + # marker string comment to emit before next line + # funcstate FunctionState contains state local to a C function used for code + # generation (labels and temps state etc.) + # globalstate GlobalState contains state global for a C file (input file info, + # utility code, declared constants etc.) + # pyclass_stack list used during recursive code generation to pass information + # about the current class one is in + # code_config CCodeConfig configuration options for the C code writer + + @cython.locals(create_from='CCodeWriter') + def __init__(self, create_from=None, buffer=None, copy_formatting=False): + if buffer is None: buffer = StringIOTree() + self.buffer = buffer + self.last_pos = None + self.last_marked_pos = None + self.pyclass_stack = [] + + self.funcstate = None + self.globalstate = None + self.code_config = None + self.level = 0 + self.call_level = 0 + self.bol = 1 + + if create_from is not None: + # Use same global state + self.set_global_state(create_from.globalstate) + self.funcstate = create_from.funcstate + # Clone formatting state + if copy_formatting: + self.level = create_from.level + self.bol = create_from.bol + self.call_level = create_from.call_level + self.last_pos = create_from.last_pos + self.last_marked_pos = create_from.last_marked_pos + + def create_new(self, create_from, buffer, copy_formatting): + # polymorphic constructor -- very slightly more versatile + # than using __class__ + result = CCodeWriter(create_from, buffer, copy_formatting) + return result + + def set_global_state(self, global_state): + assert self.globalstate is None # prevent overwriting once it's set + self.globalstate = global_state + self.code_config = global_state.code_config + + def copyto(self, f): + self.buffer.copyto(f) + + def getvalue(self): + return self.buffer.getvalue() + + def write(self, s): + if '\n' in s: + self._write_lines(s) + else: + self._write_to_buffer(s) + + def _write_lines(self, s): + # Cygdb needs to know which Cython source line corresponds to which C line. + # Therefore, we write this information into "self.buffer.markers" and then write it from there + # into cython_debug/cython_debug_info_* (see ModuleNode._serialize_lineno_map). + filename_line = self.last_marked_pos[:2] if self.last_marked_pos else (None, 0) + self.buffer.markers.extend([filename_line] * s.count('\n')) + + self._write_to_buffer(s) + + def _write_to_buffer(self, s): + self.buffer.write(s) + + def insertion_point(self): + other = self.create_new(create_from=self, buffer=self.buffer.insertion_point(), copy_formatting=True) + return other + + def new_writer(self): + """ + Creates a new CCodeWriter connected to the same global state, which + can later be inserted using insert. + """ + return CCodeWriter(create_from=self) + + def insert(self, writer): + """ + Inserts the contents of another code writer (created with + the same global state) in the current location. + + It is ok to write to the inserted writer also after insertion. + """ + assert writer.globalstate is self.globalstate + self.buffer.insert(writer.buffer) + + # Properties delegated to function scope + @funccontext_property + def label_counter(self): pass + @funccontext_property + def return_label(self): pass + @funccontext_property + def error_label(self): pass + @funccontext_property + def labels_used(self): pass + @funccontext_property + def continue_label(self): pass + @funccontext_property + def break_label(self): pass + @funccontext_property + def return_from_error_cleanup_label(self): pass + @funccontext_property + def yield_labels(self): pass + + def label_interceptor(self, new_labels, orig_labels, skip_to_label=None, pos=None, trace=True): + """ + Helper for generating multiple label interceptor code blocks. + + @param new_labels: the new labels that should be intercepted + @param orig_labels: the original labels that we should dispatch to after the interception + @param skip_to_label: a label to skip to before starting the code blocks + @param pos: the node position to mark for each interceptor block + @param trace: add a trace line for the pos marker or not + """ + for label, orig_label in zip(new_labels, orig_labels): + if not self.label_used(label): + continue + if skip_to_label: + # jump over the whole interception block + self.put_goto(skip_to_label) + skip_to_label = None + + if pos is not None: + self.mark_pos(pos, trace=trace) + self.put_label(label) + yield (label, orig_label) + self.put_goto(orig_label) + + # Functions delegated to function scope + def new_label(self, name=None): return self.funcstate.new_label(name) + def new_error_label(self, *args): return self.funcstate.new_error_label(*args) + def new_yield_label(self, *args): return self.funcstate.new_yield_label(*args) + def get_loop_labels(self): return self.funcstate.get_loop_labels() + def set_loop_labels(self, labels): return self.funcstate.set_loop_labels(labels) + def new_loop_labels(self, *args): return self.funcstate.new_loop_labels(*args) + def get_all_labels(self): return self.funcstate.get_all_labels() + def set_all_labels(self, labels): return self.funcstate.set_all_labels(labels) + def all_new_labels(self): return self.funcstate.all_new_labels() + def use_label(self, lbl): return self.funcstate.use_label(lbl) + def label_used(self, lbl): return self.funcstate.label_used(lbl) + + + def enter_cfunc_scope(self, scope=None): + self.funcstate = FunctionState(self, scope=scope) + + def exit_cfunc_scope(self): + self.funcstate.validate_exit() + self.funcstate = None + + # constant handling + + def get_py_int(self, str_value, longness): + return self.globalstate.get_int_const(str_value, longness).cname + + def get_py_float(self, str_value, value_code): + return self.globalstate.get_float_const(str_value, value_code).cname + + def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None): + return self.globalstate.get_py_const(type, prefix, cleanup_level, dedup_key).cname + + def get_string_const(self, text): + return self.globalstate.get_string_const(text).cname + + def get_pyunicode_ptr_const(self, text): + return self.globalstate.get_pyunicode_ptr_const(text) + + def get_py_string_const(self, text, identifier=None, + is_str=False, unicode_value=None): + return self.globalstate.get_py_string_const( + text, identifier, is_str, unicode_value).cname + + def get_argument_default_const(self, type): + return self.globalstate.get_py_const(type).cname + + def intern(self, text): + return self.get_py_string_const(text) + + def intern_identifier(self, text): + return self.get_py_string_const(text, identifier=True) + + def get_cached_constants_writer(self, target=None): + return self.globalstate.get_cached_constants_writer(target) + + # code generation + + def putln(self, code="", safe=False): + if self.last_pos and self.bol: + self.emit_marker() + if self.code_config.emit_linenums and self.last_marked_pos: + source_desc, line, _ = self.last_marked_pos + self._write_lines('\n#line %s "%s"\n' % (line, source_desc.get_escaped_description())) + if code: + if safe: + self.put_safe(code) + else: + self.put(code) + self._write_lines("\n") + self.bol = 1 + + def mark_pos(self, pos, trace=True): + if pos is None: + return + if self.last_marked_pos and self.last_marked_pos[:2] == pos[:2]: + return + self.last_pos = (pos, trace) + + def emit_marker(self): + pos, trace = self.last_pos + self.last_marked_pos = pos + self.last_pos = None + self._write_lines("\n") + if self.code_config.emit_code_comments: + self.indent() + self._write_lines("/* %s */\n" % self._build_marker(pos)) + if trace and self.funcstate and self.funcstate.can_trace and self.globalstate.directives['linetrace']: + self.indent() + self._write_lines('__Pyx_TraceLine(%d,%d,%s)\n' % ( + pos[1], not self.funcstate.gil_owned, self.error_goto(pos))) + + def _build_marker(self, pos): + source_desc, line, col = pos + assert isinstance(source_desc, SourceDescriptor) + contents = self.globalstate.commented_file_contents(source_desc) + lines = contents[max(0, line-3):line] # line numbers start at 1 + lines[-1] += u' # <<<<<<<<<<<<<<' + lines += contents[line:line+2] + return u'"%s":%d\n%s\n' % (source_desc.get_escaped_description(), line, u'\n'.join(lines)) + + def put_safe(self, code): + # put code, but ignore {} + self.write(code) + self.bol = 0 + + def put_or_include(self, code, name): + include_dir = self.globalstate.common_utility_include_dir + if include_dir and len(code) > 1024: + include_file = "%s_%s.h" % ( + name, hashlib.sha1(code.encode('utf8')).hexdigest()) + path = os.path.join(include_dir, include_file) + if not os.path.exists(path): + tmp_path = '%s.tmp%s' % (path, os.getpid()) + with closing(Utils.open_new_file(tmp_path)) as f: + f.write(code) + shutil.move(tmp_path, path) + code = '#include "%s"\n' % path + self.put(code) + + def put(self, code): + fix_indent = False + if "{" in code: + dl = code.count("{") + else: + dl = 0 + if "}" in code: + dl -= code.count("}") + if dl < 0: + self.level += dl + elif dl == 0 and code[0] == "}": + # special cases like "} else {" need a temporary dedent + fix_indent = True + self.level -= 1 + if self.bol: + self.indent() + self.write(code) + self.bol = 0 + if dl > 0: + self.level += dl + elif fix_indent: + self.level += 1 + + def putln_tempita(self, code, **context): + from ..Tempita import sub + self.putln(sub(code, **context)) + + def put_tempita(self, code, **context): + from ..Tempita import sub + self.put(sub(code, **context)) + + def increase_indent(self): + self.level += 1 + + def decrease_indent(self): + self.level -= 1 + + def begin_block(self): + self.putln("{") + self.increase_indent() + + def end_block(self): + self.decrease_indent() + self.putln("}") + + def indent(self): + self._write_to_buffer(" " * self.level) + + def get_py_version_hex(self, pyversion): + return "0x%02X%02X%02X%02X" % (tuple(pyversion) + (0,0,0,0))[:4] + + def put_label(self, lbl): + if lbl in self.funcstate.labels_used: + self.putln("%s:;" % lbl) + + def put_goto(self, lbl): + self.funcstate.use_label(lbl) + self.putln("goto %s;" % lbl) + + def put_var_declaration(self, entry, storage_class="", + dll_linkage=None, definition=True): + #print "Code.put_var_declaration:", entry.name, "definition =", definition ### + if entry.visibility == 'private' and not (definition or entry.defined_in_pxd): + #print "...private and not definition, skipping", entry.cname ### + return + if entry.visibility == "private" and not entry.used: + #print "...private and not used, skipping", entry.cname ### + return + if not entry.cf_used: + self.put('CYTHON_UNUSED ') + if storage_class: + self.put("%s " % storage_class) + if entry.is_cpp_optional: + self.put(entry.type.cpp_optional_declaration_code( + entry.cname, dll_linkage=dll_linkage)) + else: + self.put(entry.type.declaration_code( + entry.cname, dll_linkage=dll_linkage)) + if entry.init is not None: + self.put_safe(" = %s" % entry.type.literal_code(entry.init)) + elif entry.type.is_pyobject: + self.put(" = NULL") + self.putln(";") + self.funcstate.scope.use_entry_utility_code(entry) + + def put_temp_declarations(self, func_context): + for name, type, manage_ref, static in func_context.temps_allocated: + if type.is_cpp_class and not type.is_fake_reference and func_context.scope.directives['cpp_locals']: + decl = type.cpp_optional_declaration_code(name) + else: + decl = type.declaration_code(name) + if type.is_pyobject: + self.putln("%s = NULL;" % decl) + elif type.is_memoryviewslice: + self.putln("%s = %s;" % (decl, type.literal_code(type.default_value))) + else: + self.putln("%s%s;" % (static and "static " or "", decl)) + + if func_context.should_declare_error_indicator: + if self.funcstate.uses_error_indicator: + unused = '' + else: + unused = 'CYTHON_UNUSED ' + # Initialize these variables to silence compiler warnings + self.putln("%sint %s = 0;" % (unused, Naming.lineno_cname)) + self.putln("%sconst char *%s = NULL;" % (unused, Naming.filename_cname)) + self.putln("%sint %s = 0;" % (unused, Naming.clineno_cname)) + + def put_generated_by(self): + self.putln(Utils.GENERATED_BY_MARKER) + self.putln("") + + def put_h_guard(self, guard): + self.putln("#ifndef %s" % guard) + self.putln("#define %s" % guard) + + def unlikely(self, cond): + if Options.gcc_branch_hints: + return 'unlikely(%s)' % cond + else: + return cond + + def build_function_modifiers(self, modifiers, mapper=modifier_output_mapper): + if not modifiers: + return '' + return '%s ' % ' '.join([mapper(m,m) for m in modifiers]) + + # Python objects and reference counting + + def entry_as_pyobject(self, entry): + type = entry.type + if (not entry.is_self_arg and not entry.type.is_complete() + or entry.type.is_extension_type): + return "(PyObject *)" + entry.cname + else: + return entry.cname + + def as_pyobject(self, cname, type): + from .PyrexTypes import py_object_type, typecast + return typecast(py_object_type, type, cname) + + def put_gotref(self, cname, type): + type.generate_gotref(self, cname) + + def put_giveref(self, cname, type): + type.generate_giveref(self, cname) + + def put_xgiveref(self, cname, type): + type.generate_xgiveref(self, cname) + + def put_xgotref(self, cname, type): + type.generate_xgotref(self, cname) + + def put_incref(self, cname, type, nanny=True): + # Note: original put_Memslice_Incref/Decref also added in some utility code + # this is unnecessary since the relevant utility code is loaded anyway if a memoryview is used + # and so has been removed. However, it's potentially a feature that might be useful here + type.generate_incref(self, cname, nanny=nanny) + + def put_xincref(self, cname, type, nanny=True): + type.generate_xincref(self, cname, nanny=nanny) + + def put_decref(self, cname, type, nanny=True, have_gil=True): + type.generate_decref(self, cname, nanny=nanny, have_gil=have_gil) + + def put_xdecref(self, cname, type, nanny=True, have_gil=True): + type.generate_xdecref(self, cname, nanny=nanny, have_gil=have_gil) + + def put_decref_clear(self, cname, type, clear_before_decref=False, nanny=True, have_gil=True): + type.generate_decref_clear(self, cname, clear_before_decref=clear_before_decref, + nanny=nanny, have_gil=have_gil) + + def put_xdecref_clear(self, cname, type, clear_before_decref=False, nanny=True, have_gil=True): + type.generate_xdecref_clear(self, cname, clear_before_decref=clear_before_decref, + nanny=nanny, have_gil=have_gil) + + def put_decref_set(self, cname, type, rhs_cname): + type.generate_decref_set(self, cname, rhs_cname) + + def put_xdecref_set(self, cname, type, rhs_cname): + type.generate_xdecref_set(self, cname, rhs_cname) + + def put_incref_memoryviewslice(self, slice_cname, type, have_gil): + # TODO ideally this would just be merged into "put_incref" + type.generate_incref_memoryviewslice(self, slice_cname, have_gil=have_gil) + + def put_var_incref_memoryviewslice(self, entry, have_gil): + self.put_incref_memoryviewslice(entry.cname, entry.type, have_gil=have_gil) + + def put_var_gotref(self, entry): + self.put_gotref(entry.cname, entry.type) + + def put_var_giveref(self, entry): + self.put_giveref(entry.cname, entry.type) + + def put_var_xgotref(self, entry): + self.put_xgotref(entry.cname, entry.type) + + def put_var_xgiveref(self, entry): + self.put_xgiveref(entry.cname, entry.type) + + def put_var_incref(self, entry, **kwds): + self.put_incref(entry.cname, entry.type, **kwds) + + def put_var_xincref(self, entry, **kwds): + self.put_xincref(entry.cname, entry.type, **kwds) + + def put_var_decref(self, entry, **kwds): + self.put_decref(entry.cname, entry.type, **kwds) + + def put_var_xdecref(self, entry, **kwds): + self.put_xdecref(entry.cname, entry.type, **kwds) + + def put_var_decref_clear(self, entry, **kwds): + self.put_decref_clear(entry.cname, entry.type, clear_before_decref=entry.in_closure, **kwds) + + def put_var_decref_set(self, entry, rhs_cname, **kwds): + self.put_decref_set(entry.cname, entry.type, rhs_cname, **kwds) + + def put_var_xdecref_set(self, entry, rhs_cname, **kwds): + self.put_xdecref_set(entry.cname, entry.type, rhs_cname, **kwds) + + def put_var_xdecref_clear(self, entry, **kwds): + self.put_xdecref_clear(entry.cname, entry.type, clear_before_decref=entry.in_closure, **kwds) + + def put_var_decrefs(self, entries, used_only = 0): + for entry in entries: + if not used_only or entry.used: + if entry.xdecref_cleanup: + self.put_var_xdecref(entry) + else: + self.put_var_decref(entry) + + def put_var_xdecrefs(self, entries): + for entry in entries: + self.put_var_xdecref(entry) + + def put_var_xdecrefs_clear(self, entries): + for entry in entries: + self.put_var_xdecref_clear(entry) + + def put_init_to_py_none(self, cname, type, nanny=True): + from .PyrexTypes import py_object_type, typecast + py_none = typecast(type, py_object_type, "Py_None") + if nanny: + self.putln("%s = %s; __Pyx_INCREF(Py_None);" % (cname, py_none)) + else: + self.putln("%s = %s; Py_INCREF(Py_None);" % (cname, py_none)) + + def put_init_var_to_py_none(self, entry, template = "%s", nanny=True): + code = template % entry.cname + #if entry.type.is_extension_type: + # code = "((PyObject*)%s)" % code + self.put_init_to_py_none(code, entry.type, nanny) + if entry.in_closure: + self.put_giveref('Py_None') + + def put_pymethoddef(self, entry, term, allow_skip=True, wrapper_code_writer=None): + is_reverse_number_slot = False + if entry.is_special or entry.name == '__getattribute__': + from . import TypeSlots + is_reverse_number_slot = True + if entry.name not in special_py_methods and not TypeSlots.is_reverse_number_slot(entry.name): + if entry.name == '__getattr__' and not self.globalstate.directives['fast_getattr']: + pass + # Python's typeobject.c will automatically fill in our slot + # in add_operators() (called by PyType_Ready) with a value + # that's better than ours. + elif allow_skip: + return + + method_flags = entry.signature.method_flags() + if not method_flags: + return + if entry.is_special: + method_flags += [TypeSlots.method_coexist] + func_ptr = wrapper_code_writer.put_pymethoddef_wrapper(entry) if wrapper_code_writer else entry.func_cname + # Add required casts, but try not to shadow real warnings. + cast = entry.signature.method_function_type() + if cast != 'PyCFunction': + func_ptr = '(void*)(%s)%s' % (cast, func_ptr) + entry_name = entry.name.as_c_string_literal() + if is_reverse_number_slot: + # Unlike most special functions, reverse number operator slots are actually generated here + # (to ensure that they can be looked up). However, they're sometimes guarded by the preprocessor + # so a bit of extra logic is needed + slot = TypeSlots.get_slot_table(self.globalstate.directives).get_slot_by_method_name(entry.name) + preproc_guard = slot.preprocessor_guard_code() + if preproc_guard: + self.putln(preproc_guard) + self.putln( + '{%s, (PyCFunction)%s, %s, %s}%s' % ( + entry_name, + func_ptr, + "|".join(method_flags), + entry.doc_cname if entry.doc else '0', + term)) + if is_reverse_number_slot and preproc_guard: + self.putln("#endif") + + def put_pymethoddef_wrapper(self, entry): + func_cname = entry.func_cname + if entry.is_special: + method_flags = entry.signature.method_flags() or [] + from .TypeSlots import method_noargs + if method_noargs in method_flags: + # Special NOARGS methods really take no arguments besides 'self', but PyCFunction expects one. + func_cname = Naming.method_wrapper_prefix + func_cname + self.putln("static PyObject *%s(PyObject *self, CYTHON_UNUSED PyObject *arg) {" % func_cname) + func_call = "%s(self)" % entry.func_cname + if entry.name == "__next__": + self.putln("PyObject *res = %s;" % func_call) + # tp_iternext can return NULL without an exception + self.putln("if (!res && !PyErr_Occurred()) { PyErr_SetNone(PyExc_StopIteration); }") + self.putln("return res;") + else: + self.putln("return %s;" % func_call) + self.putln("}") + return func_cname + + # GIL methods + + def use_fast_gil_utility_code(self): + if self.globalstate.directives['fast_gil']: + self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c")) + else: + self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c")) + + def put_ensure_gil(self, declare_gilstate=True, variable=None): + """ + Acquire the GIL. The generated code is safe even when no PyThreadState + has been allocated for this thread (for threads not initialized by + using the Python API). Additionally, the code generated by this method + may be called recursively. + """ + self.globalstate.use_utility_code( + UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c")) + self.use_fast_gil_utility_code() + self.putln("#ifdef WITH_THREAD") + if not variable: + variable = '__pyx_gilstate_save' + if declare_gilstate: + self.put("PyGILState_STATE ") + self.putln("%s = __Pyx_PyGILState_Ensure();" % variable) + self.putln("#endif") + + def put_release_ensured_gil(self, variable=None): + """ + Releases the GIL, corresponds to `put_ensure_gil`. + """ + self.use_fast_gil_utility_code() + if not variable: + variable = '__pyx_gilstate_save' + self.putln("#ifdef WITH_THREAD") + self.putln("__Pyx_PyGILState_Release(%s);" % variable) + self.putln("#endif") + + def put_acquire_gil(self, variable=None, unknown_gil_state=True): + """ + Acquire the GIL. The thread's thread state must have been initialized + by a previous `put_release_gil` + """ + self.use_fast_gil_utility_code() + self.putln("#ifdef WITH_THREAD") + self.putln("__Pyx_FastGIL_Forget();") + if variable: + self.putln('_save = %s;' % variable) + if unknown_gil_state: + self.putln("if (_save) {") + self.putln("Py_BLOCK_THREADS") + if unknown_gil_state: + self.putln("}") + self.putln("#endif") + + def put_release_gil(self, variable=None, unknown_gil_state=True): + "Release the GIL, corresponds to `put_acquire_gil`." + self.use_fast_gil_utility_code() + self.putln("#ifdef WITH_THREAD") + self.putln("PyThreadState *_save;") + self.putln("_save = NULL;") + if unknown_gil_state: + # we don't *know* that we don't have the GIL (since we may be inside a nogil function, + # and Py_UNBLOCK_THREADS is unsafe without the GIL) + self.putln("if (PyGILState_Check()) {") + self.putln("Py_UNBLOCK_THREADS") + if unknown_gil_state: + self.putln("}") + if variable: + self.putln('%s = _save;' % variable) + self.putln("__Pyx_FastGIL_Remember();") + self.putln("#endif") + + def declare_gilstate(self): + self.putln("#ifdef WITH_THREAD") + self.putln("PyGILState_STATE __pyx_gilstate_save;") + self.putln("#endif") + + # error handling + + def put_error_if_neg(self, pos, value): + # TODO this path is almost _never_ taken, yet this macro makes is slower! + # return self.putln("if (unlikely(%s < 0)) %s" % (value, self.error_goto(pos))) + return self.putln("if (%s < 0) %s" % (value, self.error_goto(pos))) + + def put_error_if_unbound(self, pos, entry, in_nogil_context=False, unbound_check_code=None): + if entry.from_closure: + func = '__Pyx_RaiseClosureNameError' + self.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseClosureNameError", "ObjectHandling.c")) + elif entry.type.is_memoryviewslice and in_nogil_context: + func = '__Pyx_RaiseUnboundMemoryviewSliceNogil' + self.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseUnboundMemoryviewSliceNogil", "ObjectHandling.c")) + elif entry.type.is_cpp_class and entry.is_cglobal: + func = '__Pyx_RaiseCppGlobalNameError' + self.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseCppGlobalNameError", "ObjectHandling.c")) + elif entry.type.is_cpp_class and entry.is_variable and not entry.is_member and entry.scope.is_c_class_scope: + # there doesn't seem to be a good way to detecting an instance-attribute of a C class + # (is_member is only set for class attributes) + func = '__Pyx_RaiseCppAttributeError' + self.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseCppAttributeError", "ObjectHandling.c")) + else: + func = '__Pyx_RaiseUnboundLocalError' + self.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseUnboundLocalError", "ObjectHandling.c")) + + if not unbound_check_code: + unbound_check_code = entry.type.check_for_null_code(entry.cname) + self.putln('if (unlikely(!%s)) { %s("%s"); %s }' % ( + unbound_check_code, + func, + entry.name, + self.error_goto(pos))) + + def set_error_info(self, pos, used=False): + self.funcstate.should_declare_error_indicator = True + if used: + self.funcstate.uses_error_indicator = True + return "__PYX_MARK_ERR_POS(%s, %s)" % ( + self.lookup_filename(pos[0]), + pos[1]) + + def error_goto(self, pos, used=True): + lbl = self.funcstate.error_label + self.funcstate.use_label(lbl) + if pos is None: + return 'goto %s;' % lbl + self.funcstate.should_declare_error_indicator = True + if used: + self.funcstate.uses_error_indicator = True + return "__PYX_ERR(%s, %s, %s)" % ( + self.lookup_filename(pos[0]), + pos[1], + lbl) + + def error_goto_if(self, cond, pos): + return "if (%s) %s" % (self.unlikely(cond), self.error_goto(pos)) + + def error_goto_if_null(self, cname, pos): + return self.error_goto_if("!%s" % cname, pos) + + def error_goto_if_neg(self, cname, pos): + # Add extra parentheses to silence clang warnings about constant conditions. + return self.error_goto_if("(%s < 0)" % cname, pos) + + def error_goto_if_PyErr(self, pos): + return self.error_goto_if("PyErr_Occurred()", pos) + + def lookup_filename(self, filename): + return self.globalstate.lookup_filename(filename) + + def put_declare_refcount_context(self): + self.putln('__Pyx_RefNannyDeclarations') + + def put_setup_refcount_context(self, name, acquire_gil=False): + name = name.as_c_string_literal() # handle unicode names + if acquire_gil: + self.globalstate.use_utility_code( + UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c")) + self.putln('__Pyx_RefNannySetupContext(%s, %d);' % (name, acquire_gil and 1 or 0)) + + def put_finish_refcount_context(self, nogil=False): + self.putln("__Pyx_RefNannyFinishContextNogil()" if nogil else "__Pyx_RefNannyFinishContext();") + + def put_add_traceback(self, qualified_name, include_cline=True): + """ + Build a Python traceback for propagating exceptions. + + qualified_name should be the qualified name of the function. + """ + qualified_name = qualified_name.as_c_string_literal() # handle unicode names + format_tuple = ( + qualified_name, + Naming.clineno_cname if include_cline else 0, + Naming.lineno_cname, + Naming.filename_cname, + ) + + self.funcstate.uses_error_indicator = True + self.putln('__Pyx_AddTraceback(%s, %s, %s, %s);' % format_tuple) + + def put_unraisable(self, qualified_name, nogil=False): + """ + Generate code to print a Python warning for an unraisable exception. + + qualified_name should be the qualified name of the function. + """ + format_tuple = ( + qualified_name, + Naming.clineno_cname, + Naming.lineno_cname, + Naming.filename_cname, + self.globalstate.directives['unraisable_tracebacks'], + nogil, + ) + self.funcstate.uses_error_indicator = True + self.putln('__Pyx_WriteUnraisable("%s", %s, %s, %s, %d, %d);' % format_tuple) + self.globalstate.use_utility_code( + UtilityCode.load_cached("WriteUnraisableException", "Exceptions.c")) + + def put_trace_declarations(self): + self.putln('__Pyx_TraceDeclarations') + + def put_trace_frame_init(self, codeobj=None): + if codeobj: + self.putln('__Pyx_TraceFrameInit(%s)' % codeobj) + + def put_trace_call(self, name, pos, nogil=False): + self.putln('__Pyx_TraceCall("%s", %s[%s], %s, %d, %s);' % ( + name, Naming.filetable_cname, self.lookup_filename(pos[0]), pos[1], nogil, self.error_goto(pos))) + + def put_trace_exception(self): + self.putln("__Pyx_TraceException();") + + def put_trace_return(self, retvalue_cname, nogil=False): + self.putln("__Pyx_TraceReturn(%s, %d);" % (retvalue_cname, nogil)) + + def putln_openmp(self, string): + self.putln("#ifdef _OPENMP") + self.putln(string) + self.putln("#endif /* _OPENMP */") + + def undef_builtin_expect(self, cond): + """ + Redefine the macros likely() and unlikely to no-ops, depending on + condition 'cond' + """ + self.putln("#if %s" % cond) + self.putln(" #undef likely") + self.putln(" #undef unlikely") + self.putln(" #define likely(x) (x)") + self.putln(" #define unlikely(x) (x)") + self.putln("#endif") + + def redef_builtin_expect(self, cond): + self.putln("#if %s" % cond) + self.putln(" #undef likely") + self.putln(" #undef unlikely") + self.putln(" #define likely(x) __builtin_expect(!!(x), 1)") + self.putln(" #define unlikely(x) __builtin_expect(!!(x), 0)") + self.putln("#endif") + + +class PyrexCodeWriter(object): + # f file output file + # level int indentation level + + def __init__(self, outfile_name): + self.f = Utils.open_new_file(outfile_name) + self.level = 0 + + def putln(self, code): + self.f.write("%s%s\n" % (" " * self.level, code)) + + def indent(self): + self.level += 1 + + def dedent(self): + self.level -= 1 + + +class PyxCodeWriter(object): + """ + Can be used for writing out some Cython code. + """ + + def __init__(self, buffer=None, indent_level=0, context=None, encoding='ascii'): + self.buffer = buffer or StringIOTree() + self.level = indent_level + self.original_level = indent_level + self.context = context + self.encoding = encoding + + def indent(self, levels=1): + self.level += levels + return True + + def dedent(self, levels=1): + self.level -= levels + + @contextmanager + def indenter(self, line): + """ + with pyx_code.indenter("for i in range(10):"): + pyx_code.putln("print i") + """ + self.putln(line) + self.indent() + yield + self.dedent() + + def empty(self): + return self.buffer.empty() + + def getvalue(self): + result = self.buffer.getvalue() + if isinstance(result, bytes): + result = result.decode(self.encoding) + return result + + def putln(self, line, context=None): + context = context or self.context + if context: + line = sub_tempita(line, context) + self._putln(line) + + def _putln(self, line): + self.buffer.write(u"%s%s\n" % (self.level * u" ", line)) + + def put_chunk(self, chunk, context=None): + context = context or self.context + if context: + chunk = sub_tempita(chunk, context) + + chunk = textwrap.dedent(chunk) + for line in chunk.splitlines(): + self._putln(line) + + def insertion_point(self): + return type(self)(self.buffer.insertion_point(), self.level, self.context) + + def reset(self): + # resets the buffer so that nothing gets written. Most useful + # for abandoning all work in a specific insertion point + self.buffer.reset() + self.level = self.original_level + + def named_insertion_point(self, name): + setattr(self, name, self.insertion_point()) + + +class ClosureTempAllocator(object): + def __init__(self, klass): + self.klass = klass + self.temps_allocated = {} + self.temps_free = {} + self.temps_count = 0 + + def reset(self): + for type, cnames in self.temps_allocated.items(): + self.temps_free[type] = list(cnames) + + def allocate_temp(self, type): + if type not in self.temps_allocated: + self.temps_allocated[type] = [] + self.temps_free[type] = [] + elif self.temps_free[type]: + return self.temps_free[type].pop(0) + cname = '%s%d' % (Naming.codewriter_temp_prefix, self.temps_count) + self.klass.declare_var(pos=None, name=cname, cname=cname, type=type, is_cdef=True) + self.temps_allocated[type].append(cname) + self.temps_count += 1 + return cname diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CodeGeneration.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CodeGeneration.py new file mode 100644 index 0000000000000000000000000000000000000000..e64049c7f5d88a2ab52c26bd74948f6be8a0e333 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CodeGeneration.py @@ -0,0 +1,35 @@ +from __future__ import absolute_import + +from .Visitor import VisitorTransform +from .Nodes import StatListNode + + +class ExtractPxdCode(VisitorTransform): + """ + Finds nodes in a pxd file that should generate code, and + returns them in a StatListNode. + + The result is a tuple (StatListNode, ModuleScope), i.e. + everything that is needed from the pxd after it is processed. + + A purer approach would be to separately compile the pxd code, + but the result would have to be slightly more sophisticated + than pure strings (functions + wanted interned strings + + wanted utility code + wanted cached objects) so for now this + approach is taken. + """ + + def __call__(self, root): + self.funcs = [] + self.visitchildren(root) + return (StatListNode(root.pos, stats=self.funcs), root.scope) + + def visit_FuncDefNode(self, node): + self.funcs.append(node) + # Do not visit children, nested funcdefnodes will + # also be moved by this action... + return node + + def visit_Node(self, node): + self.visitchildren(node) + return node diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CythonScope.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CythonScope.py new file mode 100644 index 0000000000000000000000000000000000000000..f73be007086bed44b46312ad27687ceaa19c4ed8 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CythonScope.py @@ -0,0 +1,181 @@ +from __future__ import absolute_import + +from .Symtab import ModuleScope +from .PyrexTypes import * +from .UtilityCode import CythonUtilityCode +from .Errors import error +from .Scanning import StringSourceDescriptor +from . import MemoryView +from .StringEncoding import EncodedString + + +class CythonScope(ModuleScope): + is_cython_builtin = 1 + _cythonscope_initialized = False + + def __init__(self, context): + ModuleScope.__init__(self, u'cython', None, None) + self.pxd_file_loaded = True + self.populate_cython_scope() + # The Main.Context object + self.context = context + + for fused_type in (cy_integral_type, cy_floating_type, cy_numeric_type): + entry = self.declare_typedef(fused_type.name, + fused_type, + None, + cname='') + entry.in_cinclude = True + + def is_cpp(self): + # Allow C++ utility code in C++ contexts. + return self.context.cpp + + def lookup_type(self, name): + # This function should go away when types are all first-level objects. + type = parse_basic_type(name) + if type: + return type + + return super(CythonScope, self).lookup_type(name) + + def lookup(self, name): + entry = super(CythonScope, self).lookup(name) + + if entry is None and not self._cythonscope_initialized: + self.load_cythonscope() + entry = super(CythonScope, self).lookup(name) + + return entry + + def find_module(self, module_name, pos): + error("cython.%s is not available" % module_name, pos) + + def find_submodule(self, module_name, as_package=False): + entry = self.entries.get(module_name, None) + if not entry: + self.load_cythonscope() + entry = self.entries.get(module_name, None) + + if entry and entry.as_module: + return entry.as_module + else: + # TODO: fix find_submodule control flow so that we're not + # expected to create a submodule here (to protect CythonScope's + # possible immutability). Hack ourselves out of the situation + # for now. + raise error((StringSourceDescriptor(u"cython", u""), 0, 0), + "cython.%s is not available" % module_name) + + def lookup_qualified_name(self, qname): + # ExprNode.as_cython_attribute generates qnames and we untangle it here... + name_path = qname.split(u'.') + scope = self + while len(name_path) > 1: + scope = scope.lookup_here(name_path[0]) + if scope: + scope = scope.as_module + del name_path[0] + if scope is None: + return None + else: + return scope.lookup_here(name_path[0]) + + def populate_cython_scope(self): + # These are used to optimize isinstance in FinalOptimizePhase + type_object = self.declare_typedef( + 'PyTypeObject', + base_type = c_void_type, + pos = None, + cname = 'PyTypeObject') + type_object.is_void = True + type_object_type = type_object.type + + self.declare_cfunction( + 'PyObject_TypeCheck', + CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None), + CFuncTypeArg("t", c_ptr_type(type_object_type), None)]), + pos = None, + defining = 1, + cname = 'PyObject_TypeCheck') + + def load_cythonscope(self): + """ + Creates some entries for testing purposes and entries for + cython.array() and for cython.view.*. + """ + if self._cythonscope_initialized: + return + + self._cythonscope_initialized = True + cython_testscope_utility_code.declare_in_scope( + self, cython_scope=self) + cython_test_extclass_utility_code.declare_in_scope( + self, cython_scope=self) + + # + # The view sub-scope + # + self.viewscope = viewscope = ModuleScope(u'view', self, None) + self.declare_module('view', viewscope, None).as_module = viewscope + viewscope.is_cython_builtin = True + viewscope.pxd_file_loaded = True + + cythonview_testscope_utility_code.declare_in_scope( + viewscope, cython_scope=self) + + view_utility_scope = MemoryView.view_utility_code.declare_in_scope( + self.viewscope, cython_scope=self, + allowlist=MemoryView.view_utility_allowlist) + + # Marks the types as being cython_builtin_type so that they can be + # extended from without Cython attempting to import cython.view + ext_types = [ entry.type + for entry in view_utility_scope.entries.values() + if entry.type.is_extension_type ] + for ext_type in ext_types: + ext_type.is_cython_builtin_type = 1 + + # self.entries["array"] = view_utility_scope.entries.pop("array") + + # dataclasses scope + dc_str = EncodedString(u'dataclasses') + dataclassesscope = ModuleScope(dc_str, self, context=None) + self.declare_module(dc_str, dataclassesscope, pos=None).as_module = dataclassesscope + dataclassesscope.is_cython_builtin = True + dataclassesscope.pxd_file_loaded = True + # doesn't actually have any contents + + +def create_cython_scope(context): + # One could in fact probably make it a singleton, + # but not sure yet whether any code mutates it (which would kill reusing + # it across different contexts) + return CythonScope(context) + +# Load test utilities for the cython scope + +def load_testscope_utility(cy_util_name, **kwargs): + return CythonUtilityCode.load(cy_util_name, "TestCythonScope.pyx", **kwargs) + + +undecorated_methods_protos = UtilityCode(proto=u""" + /* These methods are undecorated and have therefore no prototype */ + static PyObject *__pyx_TestClass_cdef_method( + struct __pyx_TestClass_obj *self, int value); + static PyObject *__pyx_TestClass_cpdef_method( + struct __pyx_TestClass_obj *self, int value, int skip_dispatch); + static PyObject *__pyx_TestClass_def_method( + PyObject *self, PyObject *value); +""") + +cython_testscope_utility_code = load_testscope_utility("TestScope") + +test_cython_utility_dep = load_testscope_utility("TestDep") + +cython_test_extclass_utility_code = \ + load_testscope_utility("TestClass", name="TestClass", + requires=[undecorated_methods_protos, + test_cython_utility_dep]) + +cythonview_testscope_utility_code = load_testscope_utility("View.TestScope") diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Dataclass.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Dataclass.py new file mode 100644 index 0000000000000000000000000000000000000000..1b41bf9e6a0c709e80c995548170fa6707e6bdbd --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Dataclass.py @@ -0,0 +1,839 @@ +# functions to transform a c class into a dataclass + +from collections import OrderedDict +from textwrap import dedent +import operator + +from . import ExprNodes +from . import Nodes +from . import PyrexTypes +from . import Builtin +from . import Naming +from .Errors import error, warning +from .Code import UtilityCode, TempitaUtilityCode, PyxCodeWriter +from .Visitor import VisitorTransform +from .StringEncoding import EncodedString +from .TreeFragment import TreeFragment +from .ParseTreeTransforms import NormalizeTree, SkipDeclarations +from .Options import copy_inherited_directives + +_dataclass_loader_utilitycode = None + +def make_dataclasses_module_callnode(pos): + global _dataclass_loader_utilitycode + if not _dataclass_loader_utilitycode: + python_utility_code = UtilityCode.load_cached("Dataclasses_fallback", "Dataclasses.py") + python_utility_code = EncodedString(python_utility_code.impl) + _dataclass_loader_utilitycode = TempitaUtilityCode.load( + "SpecificModuleLoader", "Dataclasses.c", + context={'cname': "dataclasses", 'py_code': python_utility_code.as_c_string_literal()}) + return ExprNodes.PythonCapiCallNode( + pos, "__Pyx_Load_dataclasses_Module", + PyrexTypes.CFuncType(PyrexTypes.py_object_type, []), + utility_code=_dataclass_loader_utilitycode, + args=[], + ) + +def make_dataclass_call_helper(pos, callable, kwds): + utility_code = UtilityCode.load_cached("DataclassesCallHelper", "Dataclasses.c") + func_type = PyrexTypes.CFuncType( + PyrexTypes.py_object_type, [ + PyrexTypes.CFuncTypeArg("callable", PyrexTypes.py_object_type, None), + PyrexTypes.CFuncTypeArg("kwds", PyrexTypes.py_object_type, None) + ], + ) + return ExprNodes.PythonCapiCallNode( + pos, + function_name="__Pyx_DataclassesCallHelper", + func_type=func_type, + utility_code=utility_code, + args=[callable, kwds], + ) + + +class RemoveAssignmentsToNames(VisitorTransform, SkipDeclarations): + """ + Cython (and Python) normally treats + + class A: + x = 1 + + as generating a class attribute. However for dataclasses the `= 1` should be interpreted as + a default value to initialize an instance attribute with. + This transform therefore removes the `x=1` assignment so that the class attribute isn't + generated, while recording what it has removed so that it can be used in the initialization. + """ + def __init__(self, names): + super(RemoveAssignmentsToNames, self).__init__() + self.names = names + self.removed_assignments = {} + + def visit_CClassNode(self, node): + self.visitchildren(node) + return node + + def visit_PyClassNode(self, node): + return node # go no further + + def visit_FuncDefNode(self, node): + return node # go no further + + def visit_SingleAssignmentNode(self, node): + if node.lhs.is_name and node.lhs.name in self.names: + if node.lhs.name in self.removed_assignments: + warning(node.pos, ("Multiple assignments for '%s' in dataclass; " + "using most recent") % node.lhs.name, 1) + self.removed_assignments[node.lhs.name] = node.rhs + return [] + return node + + # I believe cascaded assignment is always a syntax error with annotations + # so there's no need to define visit_CascadedAssignmentNode + + def visit_Node(self, node): + self.visitchildren(node) + return node + + +class TemplateCode(object): + """ + Adds the ability to keep track of placeholder argument names to PyxCodeWriter. + + Also adds extra_stats which are nodes bundled at the end when this + is converted to a tree. + """ + _placeholder_count = 0 + + def __init__(self, writer=None, placeholders=None, extra_stats=None): + self.writer = PyxCodeWriter() if writer is None else writer + self.placeholders = {} if placeholders is None else placeholders + self.extra_stats = [] if extra_stats is None else extra_stats + + def add_code_line(self, code_line): + self.writer.putln(code_line) + + def add_code_lines(self, code_lines): + for line in code_lines: + self.writer.putln(line) + + def reset(self): + # don't attempt to reset placeholders - it really doesn't matter if + # we have unused placeholders + self.writer.reset() + + def empty(self): + return self.writer.empty() + + def indenter(self): + return self.writer.indenter() + + def new_placeholder(self, field_names, value): + name = self._new_placeholder_name(field_names) + self.placeholders[name] = value + return name + + def add_extra_statements(self, statements): + if self.extra_stats is None: + assert False, "Can only use add_extra_statements on top-level writer" + self.extra_stats.extend(statements) + + def _new_placeholder_name(self, field_names): + while True: + name = "DATACLASS_PLACEHOLDER_%d" % self._placeholder_count + if (name not in self.placeholders + and name not in field_names): + # make sure name isn't already used and doesn't + # conflict with a variable name (which is unlikely but possible) + break + self._placeholder_count += 1 + return name + + def generate_tree(self, level='c_class'): + stat_list_node = TreeFragment( + self.writer.getvalue(), + level=level, + pipeline=[NormalizeTree(None)], + ).substitute(self.placeholders) + + stat_list_node.stats += self.extra_stats + return stat_list_node + + def insertion_point(self): + new_writer = self.writer.insertion_point() + return TemplateCode( + writer=new_writer, + placeholders=self.placeholders, + extra_stats=self.extra_stats + ) + + +class _MISSING_TYPE(object): + pass +MISSING = _MISSING_TYPE() + + +class Field(object): + """ + Field is based on the dataclasses.field class from the standard library module. + It is used internally during the generation of Cython dataclasses to keep track + of the settings for individual attributes. + + Attributes of this class are stored as nodes so they can be used in code construction + more readily (i.e. we store BoolNode rather than bool) + """ + default = MISSING + default_factory = MISSING + private = False + + literal_keys = ("repr", "hash", "init", "compare", "metadata") + + # default values are defined by the CPython dataclasses.field + def __init__(self, pos, default=MISSING, default_factory=MISSING, + repr=None, hash=None, init=None, + compare=None, metadata=None, + is_initvar=False, is_classvar=False, + **additional_kwds): + if default is not MISSING: + self.default = default + if default_factory is not MISSING: + self.default_factory = default_factory + self.repr = repr or ExprNodes.BoolNode(pos, value=True) + self.hash = hash or ExprNodes.NoneNode(pos) + self.init = init or ExprNodes.BoolNode(pos, value=True) + self.compare = compare or ExprNodes.BoolNode(pos, value=True) + self.metadata = metadata or ExprNodes.NoneNode(pos) + self.is_initvar = is_initvar + self.is_classvar = is_classvar + + for k, v in additional_kwds.items(): + # There should not be any additional keywords! + error(v.pos, "cython.dataclasses.field() got an unexpected keyword argument '%s'" % k) + + for field_name in self.literal_keys: + field_value = getattr(self, field_name) + if not field_value.is_literal: + error(field_value.pos, + "cython.dataclasses.field parameter '%s' must be a literal value" % field_name) + + def iterate_record_node_arguments(self): + for key in (self.literal_keys + ('default', 'default_factory')): + value = getattr(self, key) + if value is not MISSING: + yield key, value + + +def process_class_get_fields(node): + var_entries = node.scope.var_entries + # order of definition is used in the dataclass + var_entries = sorted(var_entries, key=operator.attrgetter('pos')) + var_names = [entry.name for entry in var_entries] + + # don't treat `x = 1` as an assignment of a class attribute within the dataclass + transform = RemoveAssignmentsToNames(var_names) + transform(node) + default_value_assignments = transform.removed_assignments + + base_type = node.base_type + fields = OrderedDict() + while base_type: + if base_type.is_external or not base_type.scope.implemented: + warning(node.pos, "Cannot reliably handle Cython dataclasses with base types " + "in external modules since it is not possible to tell what fields they have", 2) + if base_type.dataclass_fields: + fields = base_type.dataclass_fields.copy() + break + base_type = base_type.base_type + + for entry in var_entries: + name = entry.name + is_initvar = entry.declared_with_pytyping_modifier("dataclasses.InitVar") + # TODO - classvars aren't included in "var_entries" so are missed here + # and thus this code is never triggered + is_classvar = entry.declared_with_pytyping_modifier("typing.ClassVar") + if name in default_value_assignments: + assignment = default_value_assignments[name] + if (isinstance(assignment, ExprNodes.CallNode) and ( + assignment.function.as_cython_attribute() == "dataclasses.field" or + Builtin.exprnode_to_known_standard_library_name( + assignment.function, node.scope) == "dataclasses.field")): + # I believe most of this is well-enforced when it's treated as a directive + # but it doesn't hurt to make sure + valid_general_call = (isinstance(assignment, ExprNodes.GeneralCallNode) + and isinstance(assignment.positional_args, ExprNodes.TupleNode) + and not assignment.positional_args.args + and (assignment.keyword_args is None or isinstance(assignment.keyword_args, ExprNodes.DictNode))) + valid_simple_call = (isinstance(assignment, ExprNodes.SimpleCallNode) and not assignment.args) + if not (valid_general_call or valid_simple_call): + error(assignment.pos, "Call to 'cython.dataclasses.field' must only consist " + "of compile-time keyword arguments") + continue + keyword_args = assignment.keyword_args.as_python_dict() if valid_general_call and assignment.keyword_args else {} + if 'default' in keyword_args and 'default_factory' in keyword_args: + error(assignment.pos, "cannot specify both default and default_factory") + continue + field = Field(node.pos, **keyword_args) + else: + if assignment.type in [Builtin.list_type, Builtin.dict_type, Builtin.set_type]: + # The standard library module generates a TypeError at runtime + # in this situation. + # Error message is copied from CPython + error(assignment.pos, "mutable default for field {1} is not allowed: " + "use default_factory".format(assignment.type.name, name)) + + field = Field(node.pos, default=assignment) + else: + field = Field(node.pos) + field.is_initvar = is_initvar + field.is_classvar = is_classvar + if entry.visibility == "private": + field.private = True + fields[name] = field + node.entry.type.dataclass_fields = fields + return fields + + +def handle_cclass_dataclass(node, dataclass_args, analyse_decs_transform): + # default argument values from https://docs.python.org/3/library/dataclasses.html + kwargs = dict(init=True, repr=True, eq=True, + order=False, unsafe_hash=False, + frozen=False, kw_only=False) + if dataclass_args is not None: + if dataclass_args[0]: + error(node.pos, "cython.dataclasses.dataclass takes no positional arguments") + for k, v in dataclass_args[1].items(): + if k not in kwargs: + error(node.pos, + "cython.dataclasses.dataclass() got an unexpected keyword argument '%s'" % k) + if not isinstance(v, ExprNodes.BoolNode): + error(node.pos, + "Arguments passed to cython.dataclasses.dataclass must be True or False") + kwargs[k] = v.value + + kw_only = kwargs['kw_only'] + + fields = process_class_get_fields(node) + + dataclass_module = make_dataclasses_module_callnode(node.pos) + + # create __dataclass_params__ attribute. I try to use the exact + # `_DataclassParams` class defined in the standard library module if at all possible + # for maximum duck-typing compatibility. + dataclass_params_func = ExprNodes.AttributeNode(node.pos, obj=dataclass_module, + attribute=EncodedString("_DataclassParams")) + dataclass_params_keywords = ExprNodes.DictNode.from_pairs( + node.pos, + [ (ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(k)), + ExprNodes.BoolNode(node.pos, value=v)) + for k, v in kwargs.items() ] + + [ (ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(k)), + ExprNodes.BoolNode(node.pos, value=v)) + for k, v in [('kw_only', kw_only), ('match_args', False), + ('slots', False), ('weakref_slot', False)] + ]) + dataclass_params = make_dataclass_call_helper( + node.pos, dataclass_params_func, dataclass_params_keywords) + dataclass_params_assignment = Nodes.SingleAssignmentNode( + node.pos, + lhs = ExprNodes.NameNode(node.pos, name=EncodedString("__dataclass_params__")), + rhs = dataclass_params) + + dataclass_fields_stats = _set_up_dataclass_fields(node, fields, dataclass_module) + + stats = Nodes.StatListNode(node.pos, + stats=[dataclass_params_assignment] + dataclass_fields_stats) + + code = TemplateCode() + generate_init_code(code, kwargs['init'], node, fields, kw_only) + generate_repr_code(code, kwargs['repr'], node, fields) + generate_eq_code(code, kwargs['eq'], node, fields) + generate_order_code(code, kwargs['order'], node, fields) + generate_hash_code(code, kwargs['unsafe_hash'], kwargs['eq'], kwargs['frozen'], node, fields) + + stats.stats += code.generate_tree().stats + + # turn off annotation typing, so all arguments to __init__ are accepted as + # generic objects and thus can accept _HAS_DEFAULT_FACTORY. + # Type conversion comes later + comp_directives = Nodes.CompilerDirectivesNode(node.pos, + directives=copy_inherited_directives(node.scope.directives, annotation_typing=False), + body=stats) + + comp_directives.analyse_declarations(node.scope) + # probably already in this scope, but it doesn't hurt to make sure + analyse_decs_transform.enter_scope(node, node.scope) + analyse_decs_transform.visit(comp_directives) + analyse_decs_transform.exit_scope() + + node.body.stats.append(comp_directives) + + +def generate_init_code(code, init, node, fields, kw_only): + """ + Notes on CPython generated "__init__": + * Implemented in `_init_fn`. + * The use of the `dataclasses._HAS_DEFAULT_FACTORY` sentinel value as + the default argument for fields that need constructing with a factory + function is copied from the CPython implementation. (`None` isn't + suitable because it could also be a value for the user to pass.) + There's no real reason why it needs importing from the dataclasses module + though - it could equally be a value generated by Cython when the module loads. + * seen_default and the associated error message are copied directly from Python + * Call to user-defined __post_init__ function (if it exists) is copied from + CPython. + + Cython behaviour deviates a little here (to be decided if this is right...) + Because the class variable from the assignment does not exist Cython fields will + return None (or whatever their type default is) if not initialized while Python + dataclasses will fall back to looking up the class variable. + """ + if not init or node.scope.lookup_here("__init__"): + return + + # selfname behaviour copied from the cpython module + selfname = "__dataclass_self__" if "self" in fields else "self" + args = [selfname] + + if kw_only: + args.append("*") + + function_start_point = code.insertion_point() + code = code.insertion_point() + + # create a temp to get _HAS_DEFAULT_FACTORY + dataclass_module = make_dataclasses_module_callnode(node.pos) + has_default_factory = ExprNodes.AttributeNode( + node.pos, + obj=dataclass_module, + attribute=EncodedString("_HAS_DEFAULT_FACTORY") + ) + + default_factory_placeholder = code.new_placeholder(fields, has_default_factory) + + seen_default = False + for name, field in fields.items(): + entry = node.scope.lookup(name) + if entry.annotation: + annotation = u": %s" % entry.annotation.string.value + else: + annotation = u"" + assignment = u'' + if field.default is not MISSING or field.default_factory is not MISSING: + seen_default = True + if field.default_factory is not MISSING: + ph_name = default_factory_placeholder + else: + ph_name = code.new_placeholder(fields, field.default) # 'default' should be a node + assignment = u" = %s" % ph_name + elif seen_default and not kw_only and field.init.value: + error(entry.pos, ("non-default argument '%s' follows default argument " + "in dataclass __init__") % name) + code.reset() + return + + if field.init.value: + args.append(u"%s%s%s" % (name, annotation, assignment)) + + if field.is_initvar: + continue + elif field.default_factory is MISSING: + if field.init.value: + code.add_code_line(u" %s.%s = %s" % (selfname, name, name)) + elif assignment: + # not an argument to the function, but is still initialized + code.add_code_line(u" %s.%s%s" % (selfname, name, assignment)) + else: + ph_name = code.new_placeholder(fields, field.default_factory) + if field.init.value: + # close to: + # def __init__(self, name=_PLACEHOLDER_VALUE): + # self.name = name_default_factory() if name is _PLACEHOLDER_VALUE else name + code.add_code_line(u" %s.%s = %s() if %s is %s else %s" % ( + selfname, name, ph_name, name, default_factory_placeholder, name)) + else: + # still need to use the default factory to initialize + code.add_code_line(u" %s.%s = %s()" % ( + selfname, name, ph_name)) + + if node.scope.lookup("__post_init__"): + post_init_vars = ", ".join(name for name, field in fields.items() + if field.is_initvar) + code.add_code_line(" %s.__post_init__(%s)" % (selfname, post_init_vars)) + + if code.empty(): + code.add_code_line(" pass") + + args = u", ".join(args) + function_start_point.add_code_line(u"def __init__(%s):" % args) + + +def generate_repr_code(code, repr, node, fields): + """ + The core of the CPython implementation is just: + ['return self.__class__.__qualname__ + f"(' + + ', '.join([f"{f.name}={{self.{f.name}!r}}" + for f in fields]) + + ')"'], + + The only notable difference here is self.__class__.__qualname__ -> type(self).__name__ + which is because Cython currently supports Python 2. + + However, it also has some guards for recursive repr invocations. In the standard + library implementation they're done with a wrapper decorator that captures a set + (with the set keyed by id and thread). Here we create a set as a thread local + variable and key only by id. + """ + if not repr or node.scope.lookup("__repr__"): + return + + # The recursive guard is likely a little costly, so skip it if possible. + # is_gc_simple defines where it can contain recursive objects + needs_recursive_guard = False + for name in fields.keys(): + entry = node.scope.lookup(name) + type_ = entry.type + if type_.is_memoryviewslice: + type_ = type_.dtype + if not type_.is_pyobject: + continue # no GC + if not type_.is_gc_simple: + needs_recursive_guard = True + break + + if needs_recursive_guard: + code.add_code_line("__pyx_recursive_repr_guard = __import__('threading').local()") + code.add_code_line("__pyx_recursive_repr_guard.running = set()") + code.add_code_line("def __repr__(self):") + if needs_recursive_guard: + code.add_code_line(" key = id(self)") + code.add_code_line(" guard_set = self.__pyx_recursive_repr_guard.running") + code.add_code_line(" if key in guard_set: return '...'") + code.add_code_line(" guard_set.add(key)") + code.add_code_line(" try:") + strs = [u"%s={self.%s!r}" % (name, name) + for name, field in fields.items() + if field.repr.value and not field.is_initvar] + format_string = u", ".join(strs) + + code.add_code_line(u' name = getattr(type(self), "__qualname__", type(self).__name__)') + code.add_code_line(u" return f'{name}(%s)'" % format_string) + if needs_recursive_guard: + code.add_code_line(" finally:") + code.add_code_line(" guard_set.remove(key)") + + +def generate_cmp_code(code, op, funcname, node, fields): + if node.scope.lookup_here(funcname): + return + + names = [name for name, field in fields.items() if (field.compare.value and not field.is_initvar)] + + code.add_code_lines([ + "def %s(self, other):" % funcname, + " if other.__class__ is not self.__class__:" + " return NotImplemented", + # + " cdef %s other_cast" % node.class_name, + " other_cast = <%s>other" % node.class_name, + ]) + + # The Python implementation of dataclasses.py does a tuple comparison + # (roughly): + # return self._attributes_to_tuple() {op} other._attributes_to_tuple() + # + # For the Cython implementation a tuple comparison isn't an option because + # not all attributes can be converted to Python objects and stored in a tuple + # + # TODO - better diagnostics of whether the types support comparison before + # generating the code. Plus, do we want to convert C structs to dicts and + # compare them that way (I think not, but it might be in demand)? + checks = [] + op_without_equals = op.replace('=', '') + + for name in names: + if op != '==': + # tuple comparison rules - early elements take precedence + code.add_code_line(" if self.%s %s other_cast.%s: return True" % ( + name, op_without_equals, name)) + code.add_code_line(" if self.%s != other_cast.%s: return False" % ( + name, name)) + if "=" in op: + code.add_code_line(" return True") # "() == ()" is True + else: + code.add_code_line(" return False") + + +def generate_eq_code(code, eq, node, fields): + if not eq: + return + generate_cmp_code(code, "==", "__eq__", node, fields) + + +def generate_order_code(code, order, node, fields): + if not order: + return + + for op, name in [("<", "__lt__"), + ("<=", "__le__"), + (">", "__gt__"), + (">=", "__ge__")]: + generate_cmp_code(code, op, name, node, fields) + + +def generate_hash_code(code, unsafe_hash, eq, frozen, node, fields): + """ + Copied from CPython implementation - the intention is to follow this as far as + is possible: + # +------------------- unsafe_hash= parameter + # | +----------- eq= parameter + # | | +--- frozen= parameter + # | | | + # v v v | | | + # | no | yes | <--- class has explicitly defined __hash__ + # +=======+=======+=======+========+========+ + # | False | False | False | | | No __eq__, use the base class __hash__ + # +-------+-------+-------+--------+--------+ + # | False | False | True | | | No __eq__, use the base class __hash__ + # +-------+-------+-------+--------+--------+ + # | False | True | False | None | | <-- the default, not hashable + # +-------+-------+-------+--------+--------+ + # | False | True | True | add | | Frozen, so hashable, allows override + # +-------+-------+-------+--------+--------+ + # | True | False | False | add | raise | Has no __eq__, but hashable + # +-------+-------+-------+--------+--------+ + # | True | False | True | add | raise | Has no __eq__, but hashable + # +-------+-------+-------+--------+--------+ + # | True | True | False | add | raise | Not frozen, but hashable + # +-------+-------+-------+--------+--------+ + # | True | True | True | add | raise | Frozen, so hashable + # +=======+=======+=======+========+========+ + # For boxes that are blank, __hash__ is untouched and therefore + # inherited from the base class. If the base is object, then + # id-based hashing is used. + + The Python implementation creates a tuple of all the fields, then hashes them. + This implementation creates a tuple of all the hashes of all the fields and hashes that. + The reason for this slight difference is to avoid to-Python conversions for anything + that Cython knows how to hash directly (It doesn't look like this currently applies to + anything though...). + """ + + hash_entry = node.scope.lookup_here("__hash__") + if hash_entry: + # TODO ideally assignment of __hash__ to None shouldn't trigger this + # but difficult to get the right information here + if unsafe_hash: + # error message taken from CPython dataclasses module + error(node.pos, "Cannot overwrite attribute __hash__ in class %s" % node.class_name) + return + + if not unsafe_hash: + if not eq: + return + if not frozen: + code.add_extra_statements([ + Nodes.SingleAssignmentNode( + node.pos, + lhs=ExprNodes.NameNode(node.pos, name=EncodedString("__hash__")), + rhs=ExprNodes.NoneNode(node.pos), + ) + ]) + return + + names = [ + name for name, field in fields.items() + if not field.is_initvar and ( + field.compare.value if field.hash.value is None else field.hash.value) + ] + + # make a tuple of the hashes + hash_tuple_items = u", ".join(u"self.%s" % name for name in names) + if hash_tuple_items: + hash_tuple_items += u"," # ensure that one arg form is a tuple + + # if we're here we want to generate a hash + code.add_code_lines([ + "def __hash__(self):", + " return hash((%s))" % hash_tuple_items, + ]) + + +def get_field_type(pos, entry): + """ + sets the .type attribute for a field + + Returns the annotation if possible (since this is what the dataclasses + module does). If not (for example, attributes defined with cdef) then + it creates a string fallback. + """ + if entry.annotation: + # Right now it doesn't look like cdef classes generate an + # __annotations__ dict, therefore it's safe to just return + # entry.annotation + # (TODO: remove .string if we ditch PEP563) + return entry.annotation.string + # If they do in future then we may need to look up into that + # to duplicating the node. The code below should do this: + #class_name_node = ExprNodes.NameNode(pos, name=entry.scope.name) + #annotations = ExprNodes.AttributeNode( + # pos, obj=class_name_node, + # attribute=EncodedString("__annotations__") + #) + #return ExprNodes.IndexNode( + # pos, base=annotations, + # index=ExprNodes.StringNode(pos, value=entry.name) + #) + else: + # it's slightly unclear what the best option is here - we could + # try to return PyType_Type. This case should only happen with + # attributes defined with cdef so Cython is free to make it's own + # decision + s = EncodedString(entry.type.declaration_code("", for_display=1)) + return ExprNodes.StringNode(pos, value=s) + + +class FieldRecordNode(ExprNodes.ExprNode): + """ + __dataclass_fields__ contains a bunch of field objects recording how each field + of the dataclass was initialized (mainly corresponding to the arguments passed to + the "field" function). This node is used for the attributes of these field objects. + + If possible, coerces `arg` to a Python object. + Otherwise, generates a sensible backup string. + """ + subexprs = ['arg'] + + def __init__(self, pos, arg): + super(FieldRecordNode, self).__init__(pos, arg=arg) + + def analyse_types(self, env): + self.arg.analyse_types(env) + self.type = self.arg.type + return self + + def coerce_to_pyobject(self, env): + if self.arg.type.can_coerce_to_pyobject(env): + return self.arg.coerce_to_pyobject(env) + else: + # A string representation of the code that gave the field seems like a reasonable + # fallback. This'll mostly happen for "default" and "default_factory" where the + # type may be a C-type that can't be converted to Python. + return self._make_string() + + def _make_string(self): + from .AutoDocTransforms import AnnotationWriter + writer = AnnotationWriter(description="Dataclass field") + string = writer.write(self.arg) + return ExprNodes.StringNode(self.pos, value=EncodedString(string)) + + def generate_evaluation_code(self, code): + return self.arg.generate_evaluation_code(code) + + +def _set_up_dataclass_fields(node, fields, dataclass_module): + # For defaults and default_factories containing things like lambda, + # they're already declared in the class scope, and it creates a big + # problem if multiple copies are floating around in both the __init__ + # function, and in the __dataclass_fields__ structure. + # Therefore, create module-level constants holding these values and + # pass those around instead + # + # If possible we use the `Field` class defined in the standard library + # module so that the information stored here is as close to a regular + # dataclass as is possible. + variables_assignment_stats = [] + for name, field in fields.items(): + if field.private: + continue # doesn't appear in the public interface + for attrname in [ "default", "default_factory" ]: + field_default = getattr(field, attrname) + if field_default is MISSING or field_default.is_literal or field_default.is_name: + # some simple cases where we don't need to set up + # the variable as a module-level constant + continue + global_scope = node.scope.global_scope() + module_field_name = global_scope.mangle( + global_scope.mangle(Naming.dataclass_field_default_cname, node.class_name), + name) + # create an entry in the global scope for this variable to live + field_node = ExprNodes.NameNode(field_default.pos, name=EncodedString(module_field_name)) + field_node.entry = global_scope.declare_var( + field_node.name, type=field_default.type or PyrexTypes.unspecified_type, + pos=field_default.pos, cname=field_node.name, is_cdef=True, + # TODO: do we need to set 'pytyping_modifiers' here? + ) + # replace the field so that future users just receive the namenode + setattr(field, attrname, field_node) + + variables_assignment_stats.append( + Nodes.SingleAssignmentNode(field_default.pos, lhs=field_node, rhs=field_default)) + + placeholders = {} + field_func = ExprNodes.AttributeNode(node.pos, obj=dataclass_module, + attribute=EncodedString("field")) + dc_fields = ExprNodes.DictNode(node.pos, key_value_pairs=[]) + dc_fields_namevalue_assignments = [] + + for name, field in fields.items(): + if field.private: + continue # doesn't appear in the public interface + type_placeholder_name = "PLACEHOLDER_%s" % name + placeholders[type_placeholder_name] = get_field_type( + node.pos, node.scope.entries[name] + ) + + # defining these make the fields introspect more like a Python dataclass + field_type_placeholder_name = "PLACEHOLDER_FIELD_TYPE_%s" % name + if field.is_initvar: + placeholders[field_type_placeholder_name] = ExprNodes.AttributeNode( + node.pos, obj=dataclass_module, + attribute=EncodedString("_FIELD_INITVAR") + ) + elif field.is_classvar: + # TODO - currently this isn't triggered + placeholders[field_type_placeholder_name] = ExprNodes.AttributeNode( + node.pos, obj=dataclass_module, + attribute=EncodedString("_FIELD_CLASSVAR") + ) + else: + placeholders[field_type_placeholder_name] = ExprNodes.AttributeNode( + node.pos, obj=dataclass_module, + attribute=EncodedString("_FIELD") + ) + + dc_field_keywords = ExprNodes.DictNode.from_pairs( + node.pos, + [(ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(k)), + FieldRecordNode(node.pos, arg=v)) + for k, v in field.iterate_record_node_arguments()] + + ) + dc_field_call = make_dataclass_call_helper( + node.pos, field_func, dc_field_keywords + ) + dc_fields.key_value_pairs.append( + ExprNodes.DictItemNode( + node.pos, + key=ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(name)), + value=dc_field_call)) + dc_fields_namevalue_assignments.append( + dedent(u"""\ + __dataclass_fields__[{0!r}].name = {0!r} + __dataclass_fields__[{0!r}].type = {1} + __dataclass_fields__[{0!r}]._field_type = {2} + """).format(name, type_placeholder_name, field_type_placeholder_name)) + + dataclass_fields_assignment = \ + Nodes.SingleAssignmentNode(node.pos, + lhs = ExprNodes.NameNode(node.pos, + name=EncodedString("__dataclass_fields__")), + rhs = dc_fields) + + dc_fields_namevalue_assignments = u"\n".join(dc_fields_namevalue_assignments) + dc_fields_namevalue_assignments = TreeFragment(dc_fields_namevalue_assignments, + level="c_class", + pipeline=[NormalizeTree(None)]) + dc_fields_namevalue_assignments = dc_fields_namevalue_assignments.substitute(placeholders) + + return (variables_assignment_stats + + [dataclass_fields_assignment] + + dc_fields_namevalue_assignments.stats) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/DebugFlags.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/DebugFlags.py new file mode 100644 index 0000000000000000000000000000000000000000..e830ab1849cf506ec10ab38ebd850a0a398c0431 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/DebugFlags.py @@ -0,0 +1,21 @@ +# Can be enabled at the command line with --debug-xxx. + +debug_disposal_code = 0 +debug_temp_alloc = 0 +debug_coercion = 0 + +# Write comments into the C code that show where temporary variables +# are allocated and released. +debug_temp_code_comments = 0 + +# Write a call trace of the code generation phase into the C code. +debug_trace_code_generation = 0 + +# Do not replace exceptions with user-friendly error messages. +debug_no_exception_intercept = 0 + +# Print a message each time a new stage in the pipeline is entered. +debug_verbose_pipeline = 0 + +# Raise an exception when an error is encountered. +debug_exception_on_error = 0 diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Errors.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Errors.py new file mode 100644 index 0000000000000000000000000000000000000000..f3be0fd8b0216f57c46914f8fc421cb181582590 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Errors.py @@ -0,0 +1,300 @@ +# +# Errors +# + +from __future__ import absolute_import + +try: + from __builtin__ import basestring as any_string_type +except ImportError: + any_string_type = (bytes, str) + +import sys +from contextlib import contextmanager + +try: + from threading import local as _threadlocal +except ImportError: + class _threadlocal(object): pass + +threadlocal = _threadlocal() + +from ..Utils import open_new_file +from . import DebugFlags +from . import Options + + +class PyrexError(Exception): + pass + + +class PyrexWarning(Exception): + pass + +class CannotSpecialize(PyrexError): + pass + +def context(position): + source = position[0] + assert not (isinstance(source, any_string_type)), ( + "Please replace filename strings with Scanning.FileSourceDescriptor instances %r" % source) + try: + F = source.get_lines() + except UnicodeDecodeError: + # file has an encoding problem + s = u"[unprintable code]\n" + else: + s = u''.join(F[max(0, position[1]-6):position[1]]) + s = u'...\n%s%s^\n' % (s, u' '*(position[2])) + s = u'%s\n%s%s\n' % (u'-'*60, s, u'-'*60) + return s + +def format_position(position): + if position: + return u"%s:%d:%d: " % (position[0].get_error_description(), + position[1], position[2]) + return u'' + +def format_error(message, position): + if position: + pos_str = format_position(position) + cont = context(position) + message = u'\nError compiling Cython file:\n%s\n%s%s' % (cont, pos_str, message or u'') + return message + +class CompileError(PyrexError): + + def __init__(self, position = None, message = u""): + self.position = position + self.message_only = message + self.formatted_message = format_error(message, position) + self.reported = False + Exception.__init__(self, self.formatted_message) + # Python Exception subclass pickling is broken, + # see https://bugs.python.org/issue1692335 + self.args = (position, message) + + def __str__(self): + return self.formatted_message + +class CompileWarning(PyrexWarning): + + def __init__(self, position = None, message = ""): + self.position = position + Exception.__init__(self, format_position(position) + message) + +class InternalError(Exception): + # If this is ever raised, there is a bug in the compiler. + + def __init__(self, message): + self.message_only = message + Exception.__init__(self, u"Internal compiler error: %s" + % message) + +class AbortError(Exception): + # Throw this to stop the compilation immediately. + + def __init__(self, message): + self.message_only = message + Exception.__init__(self, u"Abort error: %s" % message) + +class CompilerCrash(CompileError): + # raised when an unexpected exception occurs in a transform + def __init__(self, pos, context, message, cause, stacktrace=None): + if message: + message = u'\n' + message + else: + message = u'\n' + self.message_only = message + if context: + message = u"Compiler crash in %s%s" % (context, message) + if stacktrace: + import traceback + message += ( + u'\n\nCompiler crash traceback from this point on:\n' + + u''.join(traceback.format_tb(stacktrace))) + if cause: + if not stacktrace: + message += u'\n' + message += u'%s: %s' % (cause.__class__.__name__, cause) + CompileError.__init__(self, pos, message) + # Python Exception subclass pickling is broken, + # see https://bugs.python.org/issue1692335 + self.args = (pos, context, message, cause, stacktrace) + +class NoElementTreeInstalledException(PyrexError): + """raised when the user enabled options.gdb_debug but no ElementTree + implementation was found + """ + +def open_listing_file(path, echo_to_stderr=True): + # Begin a new error listing. If path is None, no file + # is opened, the error counter is just reset. + if path is not None: + threadlocal.cython_errors_listing_file = open_new_file(path) + else: + threadlocal.cython_errors_listing_file = None + if echo_to_stderr: + threadlocal.cython_errors_echo_file = sys.stderr + else: + threadlocal.cython_errors_echo_file = None + threadlocal.cython_errors_count = 0 + +def close_listing_file(): + if threadlocal.cython_errors_listing_file: + threadlocal.cython_errors_listing_file.close() + threadlocal.cython_errors_listing_file = None + +def report_error(err, use_stack=True): + error_stack = threadlocal.cython_errors_stack + if error_stack and use_stack: + error_stack[-1].append(err) + else: + # See Main.py for why dual reporting occurs. Quick fix for now. + if err.reported: return + err.reported = True + try: line = u"%s\n" % err + except UnicodeEncodeError: + # Python <= 2.5 does this for non-ASCII Unicode exceptions + line = format_error(getattr(err, 'message_only', "[unprintable exception message]"), + getattr(err, 'position', None)) + u'\n' + listing_file = threadlocal.cython_errors_listing_file + if listing_file: + try: listing_file.write(line) + except UnicodeEncodeError: + listing_file.write(line.encode('ASCII', 'replace')) + echo_file = threadlocal.cython_errors_echo_file + if echo_file: + try: echo_file.write(line) + except UnicodeEncodeError: + echo_file.write(line.encode('ASCII', 'replace')) + threadlocal.cython_errors_count += 1 + if Options.fast_fail: + raise AbortError("fatal errors") + +def error(position, message): + #print("Errors.error:", repr(position), repr(message)) ### + if position is None: + raise InternalError(message) + err = CompileError(position, message) + if DebugFlags.debug_exception_on_error: raise Exception(err) # debug + report_error(err) + return err + + +LEVEL = 1 # warn about all errors level 1 or higher + +def _write_file_encode(file, line): + try: + file.write(line) + except UnicodeEncodeError: + file.write(line.encode('ascii', 'replace')) + + +def performance_hint(position, message, env): + if not env.directives['show_performance_hints']: + return + warn = CompileWarning(position, message) + line = "performance hint: %s\n" % warn + listing_file = threadlocal.cython_errors_listing_file + if listing_file: + _write_file_encode(listing_file, line) + echo_file = threadlocal.cython_errors_echo_file + if echo_file: + _write_file_encode(echo_file, line) + return warn + + +def message(position, message, level=1): + if level < LEVEL: + return + warn = CompileWarning(position, message) + line = u"note: %s\n" % warn + listing_file = threadlocal.cython_errors_listing_file + if listing_file: + _write_file_encode(listing_file, line) + echo_file = threadlocal.cython_errors_echo_file + if echo_file: + _write_file_encode(echo_file, line) + return warn + + +def warning(position, message, level=0): + if level < LEVEL: + return + if Options.warning_errors and position: + return error(position, message) + warn = CompileWarning(position, message) + line = u"warning: %s\n" % warn + listing_file = threadlocal.cython_errors_listing_file + if listing_file: + _write_file_encode(listing_file, line) + echo_file = threadlocal.cython_errors_echo_file + if echo_file: + _write_file_encode(echo_file, line) + return warn + + +def warn_once(position, message, level=0): + if level < LEVEL: + return + warn_once_seen = threadlocal.cython_errors_warn_once_seen + if message in warn_once_seen: + return + warn = CompileWarning(position, message) + line = u"warning: %s\n" % warn + listing_file = threadlocal.cython_errors_listing_file + if listing_file: + _write_file_encode(listing_file, line) + echo_file = threadlocal.cython_errors_echo_file + if echo_file: + _write_file_encode(echo_file, line) + warn_once_seen.add(message) + return warn + + +# These functions can be used to momentarily suppress errors. + +def hold_errors(): + errors = [] + threadlocal.cython_errors_stack.append(errors) + return errors + + +def release_errors(ignore=False): + held_errors = threadlocal.cython_errors_stack.pop() + if not ignore: + for err in held_errors: + report_error(err) + + +def held_errors(): + return threadlocal.cython_errors_stack[-1] + + +# same as context manager: + +@contextmanager +def local_errors(ignore=False): + errors = hold_errors() + try: + yield errors + finally: + release_errors(ignore=ignore) + + +# Keep all global state in thread local storage to support parallel cythonisation in distutils. + +def init_thread(): + threadlocal.cython_errors_count = 0 + threadlocal.cython_errors_listing_file = None + threadlocal.cython_errors_echo_file = None + threadlocal.cython_errors_warn_once_seen = set() + threadlocal.cython_errors_stack = [] + +def reset(): + threadlocal.cython_errors_warn_once_seen.clear() + del threadlocal.cython_errors_stack[:] + +def get_errors_count(): + return threadlocal.cython_errors_count diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/ExprNodes.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/ExprNodes.py new file mode 100644 index 0000000000000000000000000000000000000000..775d2f582fd2db8cfade96cce0719d21b184ce7a --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/ExprNodes.py @@ -0,0 +1,14743 @@ +# +# Parse tree nodes for expressions +# + +from __future__ import absolute_import + +import cython +cython.declare(error=object, warning=object, warn_once=object, InternalError=object, + CompileError=object, UtilityCode=object, TempitaUtilityCode=object, + StringEncoding=object, operator=object, local_errors=object, report_error=object, + Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object, + list_type=object, tuple_type=object, set_type=object, dict_type=object, + unicode_type=object, str_type=object, bytes_type=object, type_type=object, + Builtin=object, Symtab=object, Utils=object, find_coercion_error=object, + debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object, + bytearray_type=object, slice_type=object, memoryview_type=object, + builtin_sequence_types=object, _py_int_types=object, + IS_PYTHON3=cython.bint) + +import re +import sys +import copy +import os.path +import operator + +from .Errors import ( + error, warning, InternalError, CompileError, report_error, local_errors, + CannotSpecialize, performance_hint) +from .Code import UtilityCode, TempitaUtilityCode +from . import StringEncoding +from . import Naming +from . import Nodes +from .Nodes import Node, utility_code_for_imports, SingleAssignmentNode +from . import PyrexTypes +from .PyrexTypes import py_object_type, typecast, error_type, \ + unspecified_type +from . import TypeSlots +from .Builtin import ( + list_type, tuple_type, set_type, dict_type, type_type, + unicode_type, str_type, bytes_type, bytearray_type, basestring_type, + slice_type, long_type, sequence_types as builtin_sequence_types, memoryview_type, +) +from . import Builtin +from . import Symtab +from .. import Utils +from .Annotate import AnnotationItem +from . import Future +from ..Debugging import print_call_chain +from .DebugFlags import debug_disposal_code, debug_coercion + +from .Pythran import (to_pythran, is_pythran_supported_type, is_pythran_supported_operation_type, + is_pythran_expr, pythran_func_type, pythran_binop_type, pythran_unaryop_type, has_np_pythran, + pythran_indexing_code, pythran_indexing_type, is_pythran_supported_node_or_none, pythran_type, + pythran_is_numpy_func_supported, pythran_get_func_include_file, pythran_functor) +from .PyrexTypes import PythranExpr + +try: + from __builtin__ import basestring +except ImportError: + # Python 3 + basestring = str + any_string_type = (bytes, str) +else: + # Python 2 + any_string_type = (bytes, unicode) + + +if sys.version_info[0] >= 3: + IS_PYTHON3 = True + _py_int_types = int +else: + IS_PYTHON3 = False + _py_int_types = (int, long) + + +class NotConstant(object): + _obj = None + + def __new__(cls): + if NotConstant._obj is None: + NotConstant._obj = super(NotConstant, cls).__new__(cls) + + return NotConstant._obj + + def __repr__(self): + return "" + +not_a_constant = NotConstant() +constant_value_not_set = object() + +# error messages when coercing from key[0] to key[1] +coercion_error_dict = { + # string related errors + (unicode_type, str_type): ("Cannot convert Unicode string to 'str' implicitly." + " This is not portable and requires explicit encoding."), + (unicode_type, bytes_type): "Cannot convert Unicode string to 'bytes' implicitly, encoding required.", + (unicode_type, PyrexTypes.c_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.", + (unicode_type, PyrexTypes.c_const_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.", + (unicode_type, PyrexTypes.c_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.", + (unicode_type, PyrexTypes.c_const_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.", + (bytes_type, unicode_type): "Cannot convert 'bytes' object to unicode implicitly, decoding required", + (bytes_type, str_type): "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.", + (bytes_type, basestring_type): ("Cannot convert 'bytes' object to basestring implicitly." + " This is not portable to Py3."), + (bytes_type, PyrexTypes.c_py_unicode_ptr_type): "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.", + (bytes_type, PyrexTypes.c_const_py_unicode_ptr_type): ( + "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'."), + (basestring_type, bytes_type): "Cannot convert 'basestring' object to bytes implicitly. This is not portable.", + (str_type, unicode_type): ("str objects do not support coercion to unicode," + " use a unicode string literal instead (u'')"), + (str_type, bytes_type): "Cannot convert 'str' to 'bytes' implicitly. This is not portable.", + (str_type, PyrexTypes.c_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).", + (str_type, PyrexTypes.c_const_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).", + (str_type, PyrexTypes.c_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).", + (str_type, PyrexTypes.c_const_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).", + (str_type, PyrexTypes.c_py_unicode_ptr_type): "'str' objects do not support coercion to C types (use 'unicode'?).", + (str_type, PyrexTypes.c_const_py_unicode_ptr_type): ( + "'str' objects do not support coercion to C types (use 'unicode'?)."), + (PyrexTypes.c_char_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required", + (PyrexTypes.c_const_char_ptr_type, unicode_type): ( + "Cannot convert 'char*' to unicode implicitly, decoding required"), + (PyrexTypes.c_uchar_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required", + (PyrexTypes.c_const_uchar_ptr_type, unicode_type): ( + "Cannot convert 'char*' to unicode implicitly, decoding required"), +} + +def find_coercion_error(type_tuple, default, env): + err = coercion_error_dict.get(type_tuple) + if err is None: + return default + elif (env.directives['c_string_encoding'] and + any(t in type_tuple for t in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_uchar_ptr_type, + PyrexTypes.c_const_char_ptr_type, PyrexTypes.c_const_uchar_ptr_type))): + if type_tuple[1].is_pyobject: + return default + elif env.directives['c_string_encoding'] in ('ascii', 'default'): + return default + else: + return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name + else: + return err + + +def default_str_type(env): + return { + 'bytes': bytes_type, + 'bytearray': bytearray_type, + 'str': str_type, + 'unicode': unicode_type + }.get(env.directives['c_string_type']) + + +def check_negative_indices(*nodes): + """ + Raise a warning on nodes that are known to have negative numeric values. + Used to find (potential) bugs inside of "wraparound=False" sections. + """ + for node in nodes: + if node is None or ( + not isinstance(node.constant_result, _py_int_types) and + not isinstance(node.constant_result, float)): + continue + if node.constant_result < 0: + warning(node.pos, + "the result of using negative indices inside of " + "code sections marked as 'wraparound=False' is " + "undefined", level=1) + + +def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None): + if not seq_node.is_sequence_constructor: + if seq_type is None: + seq_type = seq_node.infer_type(env) + if seq_type is tuple_type: + # tuples are immutable => we can safely follow assignments + if seq_node.cf_state and len(seq_node.cf_state) == 1: + try: + seq_node = seq_node.cf_state[0].rhs + except AttributeError: + pass + if seq_node is not None and seq_node.is_sequence_constructor: + if index_node is not None and index_node.has_constant_result(): + try: + item = seq_node.args[index_node.constant_result] + except (ValueError, TypeError, IndexError): + pass + else: + return item.infer_type(env) + # if we're lucky, all items have the same type + item_types = {item.infer_type(env) for item in seq_node.args} + if len(item_types) == 1: + return item_types.pop() + return None + + +def make_dedup_key(outer_type, item_nodes): + """ + Recursively generate a deduplication key from a sequence of values. + Includes Cython node types to work around the fact that (1, 2.0) == (1.0, 2), for example. + + @param outer_type: The type of the outer container. + @param item_nodes: A sequence of constant nodes that will be traversed recursively. + @return: A tuple that can be used as a dict key for deduplication. + """ + item_keys = [ + (py_object_type, None, type(None)) if node is None + # For sequences and their "mult_factor", see TupleNode. + else make_dedup_key(node.type, [node.mult_factor if node.is_literal else None] + node.args) if node.is_sequence_constructor + else make_dedup_key(node.type, (node.start, node.stop, node.step)) if node.is_slice + # For constants, look at the Python value type if we don't know the concrete Cython type. + else (node.type, node.constant_result, + type(node.constant_result) if node.type is py_object_type else None) if node.has_constant_result() + # IdentifierStringNode doesn't usually have a "constant_result" set because: + # 1. it doesn't usually have unicode_value + # 2. it's often created later in the compilation process after ConstantFolding + # but should be cacheable + else (node.type, node.value, node.unicode_value, "IdentifierStringNode") if isinstance(node, IdentifierStringNode) + else None # something we cannot handle => short-circuit below + for node in item_nodes + ] + if None in item_keys: + return None + return outer_type, tuple(item_keys) + + +# Returns a block of code to translate the exception, +# plus a boolean indicating whether to check for Python exceptions. +def get_exception_handler(exception_value): + if exception_value is None: + return "__Pyx_CppExn2PyErr();", False + elif (exception_value.type == PyrexTypes.c_char_type + and exception_value.value == '*'): + return "__Pyx_CppExn2PyErr();", True + elif exception_value.type.is_pyobject: + return ( + 'try { throw; } catch(const std::exception& exn) {' + 'PyErr_SetString(%s, exn.what());' + '} catch(...) { PyErr_SetNone(%s); }' % ( + exception_value.entry.cname, + exception_value.entry.cname), + False) + else: + return ( + '%s(); if (!PyErr_Occurred())' + 'PyErr_SetString(PyExc_RuntimeError, ' + '"Error converting c++ exception.");' % ( + exception_value.entry.cname), + False) + + +def maybe_check_py_error(code, check_py_exception, pos, nogil): + if check_py_exception: + if nogil: + code.globalstate.use_utility_code( + UtilityCode.load_cached("ErrOccurredWithGIL", "Exceptions.c")) + code.putln(code.error_goto_if("__Pyx_ErrOccurredWithGIL()", pos)) + else: + code.putln(code.error_goto_if("PyErr_Occurred()", pos)) + + +def translate_cpp_exception(code, pos, inside, py_result, exception_value, nogil): + raise_py_exception, check_py_exception = get_exception_handler(exception_value) + code.putln("try {") + code.putln("%s" % inside) + if py_result: + code.putln(code.error_goto_if_null(py_result, pos)) + maybe_check_py_error(code, check_py_exception, pos, nogil) + code.putln("} catch(...) {") + if nogil: + code.put_ensure_gil(declare_gilstate=True) + code.putln(raise_py_exception) + if nogil: + code.put_release_ensured_gil() + code.putln(code.error_goto(pos)) + code.putln("}") + +def needs_cpp_exception_conversion(node): + assert node.exception_check == "+" + if node.exception_value is None: + return True + # exception_value can be a NameNode + # (in which case it's used as a handler function and no conversion is needed) + if node.exception_value.is_name: + return False + # or a CharNode with a value of "*" + if isinstance(node.exception_value, CharNode) and node.exception_value.value == "*": + return True + # Most other const-nodes are disallowed after "+" by the parser + return False + + +# Used to handle the case where an lvalue expression and an overloaded assignment +# both have an exception declaration. +def translate_double_cpp_exception(code, pos, lhs_type, lhs_code, rhs_code, lhs_exc_val, assign_exc_val, nogil): + handle_lhs_exc, lhc_check_py_exc = get_exception_handler(lhs_exc_val) + handle_assignment_exc, assignment_check_py_exc = get_exception_handler(assign_exc_val) + code.putln("try {") + code.putln(lhs_type.declaration_code("__pyx_local_lvalue = %s;" % lhs_code)) + maybe_check_py_error(code, lhc_check_py_exc, pos, nogil) + code.putln("try {") + code.putln("__pyx_local_lvalue = %s;" % rhs_code) + maybe_check_py_error(code, assignment_check_py_exc, pos, nogil) + # Catch any exception from the overloaded assignment. + code.putln("} catch(...) {") + if nogil: + code.put_ensure_gil(declare_gilstate=True) + code.putln(handle_assignment_exc) + if nogil: + code.put_release_ensured_gil() + code.putln(code.error_goto(pos)) + code.putln("}") + # Catch any exception from evaluating lhs. + code.putln("} catch(...) {") + if nogil: + code.put_ensure_gil(declare_gilstate=True) + code.putln(handle_lhs_exc) + if nogil: + code.put_release_ensured_gil() + code.putln(code.error_goto(pos)) + code.putln('}') + + +class ExprNode(Node): + # subexprs [string] Class var holding names of subexpr node attrs + # type PyrexType Type of the result + # result_code string Code fragment + # result_ctype string C type of result_code if different from type + # is_temp boolean Result is in a temporary variable + # is_sequence_constructor + # boolean Is a list or tuple constructor expression + # is_starred boolean Is a starred expression (e.g. '*a') + # use_managed_ref boolean use ref-counted temps/assignments/etc. + # result_is_used boolean indicates that the result will be dropped and the + # result_code/temp_result can safely be set to None + # is_numpy_attribute boolean Is a Numpy module attribute + # annotation ExprNode or None PEP526 annotation for names or expressions + # generator_arg_tag None or Node A tag to mark ExprNodes that potentially need to + # be changed to a generator argument + + result_ctype = None + type = None + annotation = None + temp_code = None + old_temp = None # error checker for multiple frees etc. + use_managed_ref = True # can be set by optimisation transforms + result_is_used = True + is_numpy_attribute = False + generator_arg_tag = None + + # The Analyse Expressions phase for expressions is split + # into two sub-phases: + # + # Analyse Types + # Determines the result type of the expression based + # on the types of its sub-expressions, and inserts + # coercion nodes into the expression tree where needed. + # Marks nodes which will need to have temporary variables + # allocated. + # + # Allocate Temps + # Allocates temporary variables where needed, and fills + # in the result_code field of each node. + # + # ExprNode provides some convenience routines which + # perform both of the above phases. These should only + # be called from statement nodes, and only when no + # coercion nodes need to be added around the expression + # being analysed. In that case, the above two phases + # should be invoked separately. + # + # Framework code in ExprNode provides much of the common + # processing for the various phases. It makes use of the + # 'subexprs' class attribute of ExprNodes, which should + # contain a list of the names of attributes which can + # hold sub-nodes or sequences of sub-nodes. + # + # The framework makes use of a number of abstract methods. + # Their responsibilities are as follows. + # + # Declaration Analysis phase + # + # analyse_target_declaration + # Called during the Analyse Declarations phase to analyse + # the LHS of an assignment or argument of a del statement. + # Nodes which cannot be the LHS of an assignment need not + # implement it. + # + # Expression Analysis phase + # + # analyse_types + # - Call analyse_types on all sub-expressions. + # - Check operand types, and wrap coercion nodes around + # sub-expressions where needed. + # - Set the type of this node. + # - If a temporary variable will be required for the + # result, set the is_temp flag of this node. + # + # analyse_target_types + # Called during the Analyse Types phase to analyse + # the LHS of an assignment or argument of a del + # statement. Similar responsibilities to analyse_types. + # + # target_code + # Called by the default implementation of allocate_target_temps. + # Should return a C lvalue for assigning to the node. The default + # implementation calls calculate_result_code. + # + # check_const + # - Check that this node and its subnodes form a + # legal constant expression. If so, do nothing, + # otherwise call not_const. + # + # The default implementation of check_const + # assumes that the expression is not constant. + # + # check_const_addr + # - Same as check_const, except check that the + # expression is a C lvalue whose address is + # constant. Otherwise, call addr_not_const. + # + # The default implementation of calc_const_addr + # assumes that the expression is not a constant + # lvalue. + # + # Code Generation phase + # + # generate_evaluation_code + # - Call generate_evaluation_code for sub-expressions. + # - Perform the functions of generate_result_code + # (see below). + # - If result is temporary, call generate_disposal_code + # on all sub-expressions. + # + # A default implementation of generate_evaluation_code + # is provided which uses the following abstract methods: + # + # generate_result_code + # - Generate any C statements necessary to calculate + # the result of this node from the results of its + # sub-expressions. + # + # calculate_result_code + # - Should return a C code fragment evaluating to the + # result. This is only called when the result is not + # a temporary. + # + # generate_assignment_code + # Called on the LHS of an assignment. + # - Call generate_evaluation_code for sub-expressions. + # - Generate code to perform the assignment. + # - If the assignment absorbed a reference, call + # generate_post_assignment_code on the RHS, + # otherwise call generate_disposal_code on it. + # + # generate_deletion_code + # Called on an argument of a del statement. + # - Call generate_evaluation_code for sub-expressions. + # - Generate code to perform the deletion. + # - Call generate_disposal_code on all sub-expressions. + # + # + + is_sequence_constructor = False + is_dict_literal = False + is_set_literal = False + is_string_literal = False + is_attribute = False + is_subscript = False + is_slice = False + + is_buffer_access = False + is_memview_index = False + is_memview_slice = False + is_memview_broadcast = False + is_memview_copy_assignment = False + + is_temp = False + has_temp_moved = False # if True then attempting to do anything but free the temp is invalid + is_target = False + is_starred = False + + constant_result = constant_value_not_set + + child_attrs = property(fget=operator.attrgetter('subexprs')) + + def analyse_annotations(self, env): + pass + + def not_implemented(self, method_name): + print_call_chain(method_name, "not implemented") + raise InternalError( + "%s.%s not implemented" % (self.__class__.__name__, method_name)) + + def is_lvalue(self): + return 0 + + def is_addressable(self): + return self.is_lvalue() and not self.type.is_memoryviewslice + + def is_ephemeral(self): + # An ephemeral node is one whose result is in + # a Python temporary and we suspect there are no + # other references to it. Certain operations are + # disallowed on such values, since they are + # likely to result in a dangling pointer. + return self.type.is_pyobject and self.is_temp + + def subexpr_nodes(self): + # Extract a list of subexpression nodes based + # on the contents of the subexprs class attribute. + nodes = [] + for name in self.subexprs: + item = getattr(self, name) + if item is not None: + if type(item) is list: + nodes.extend(item) + else: + nodes.append(item) + return nodes + + def result(self): + if self.is_temp: + #if not self.temp_code: + # pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)' + # raise RuntimeError("temp result name not set in %s at %r" % ( + # self.__class__.__name__, pos)) + return self.temp_code + else: + return self.calculate_result_code() + + def _make_move_result_rhs(self, result, optional=False): + if optional and not (self.is_temp and self.type.is_cpp_class and not self.type.is_reference): + return result + self.has_temp_moved = True + return "{}({})".format("__PYX_STD_MOVE_IF_SUPPORTED" if optional else "std::move", result) + + def move_result_rhs(self): + return self._make_move_result_rhs(self.result(), optional=True) + + def move_result_rhs_as(self, type): + result = self.result_as(type) + if not (type.is_reference or type.needs_refcounting): + requires_move = type.is_rvalue_reference and self.is_temp + result = self._make_move_result_rhs(result, optional=not requires_move) + return result + + def pythran_result(self, type_=None): + if is_pythran_supported_node_or_none(self): + return to_pythran(self) + + assert type_ is not None + return to_pythran(self, type_) + + def is_c_result_required(self): + """ + Subtypes may return False here if result temp allocation can be skipped. + """ + return True + + def result_as(self, type = None): + # Return the result code cast to the specified C type. + if (self.is_temp and self.type.is_pyobject and + type != py_object_type): + # Allocated temporaries are always PyObject *, which may not + # reflect the actual type (e.g. an extension type) + return typecast(type, py_object_type, self.result()) + return typecast(type, self.ctype(), self.result()) + + def py_result(self): + # Return the result code cast to PyObject *. + return self.result_as(py_object_type) + + def ctype(self): + # Return the native C type of the result (i.e. the + # C type of the result_code expression). + return self.result_ctype or self.type + + def get_constant_c_result_code(self): + # Return the constant value of this node as a result code + # string, or None if the node is not constant. This method + # can be called when the constant result code is required + # before the code generation phase. + # + # The return value is a string that can represent a simple C + # value, a constant C name or a constant C expression. If the + # node type depends on Python code, this must return None. + return None + + def calculate_constant_result(self): + # Calculate the constant compile time result value of this + # expression and store it in ``self.constant_result``. Does + # nothing by default, thus leaving ``self.constant_result`` + # unknown. If valid, the result can be an arbitrary Python + # value. + # + # This must only be called when it is assured that all + # sub-expressions have a valid constant_result value. The + # ConstantFolding transform will do this. + pass + + def has_constant_result(self): + return self.constant_result is not constant_value_not_set and \ + self.constant_result is not not_a_constant + + def compile_time_value(self, denv): + # Return value of compile-time expression, or report error. + error(self.pos, "Invalid compile-time expression") + + def compile_time_value_error(self, e): + error(self.pos, "Error in compile-time expression: %s: %s" % ( + e.__class__.__name__, e)) + + # ------------- Declaration Analysis ---------------- + + def analyse_target_declaration(self, env): + error(self.pos, "Cannot assign to or delete this") + + def analyse_assignment_expression_target_declaration(self, env): + error(self.pos, "Cannot use anything except a name in an assignment expression") + + # ------------- Expression Analysis ---------------- + + def analyse_const_expression(self, env): + # Called during the analyse_declarations phase of a + # constant expression. Analyses the expression's type, + # checks whether it is a legal const expression, + # and determines its value. + node = self.analyse_types(env) + node.check_const() + return node + + def analyse_expressions(self, env): + # Convenience routine performing both the Type + # Analysis and Temp Allocation phases for a whole + # expression. + return self.analyse_types(env) + + def analyse_target_expression(self, env, rhs): + # Convenience routine performing both the Type + # Analysis and Temp Allocation phases for the LHS of + # an assignment. + return self.analyse_target_types(env) + + def analyse_boolean_expression(self, env): + # Analyse expression and coerce to a boolean. + node = self.analyse_types(env) + bool = node.coerce_to_boolean(env) + return bool + + def analyse_temp_boolean_expression(self, env): + # Analyse boolean expression and coerce result into + # a temporary. This is used when a branch is to be + # performed on the result and we won't have an + # opportunity to ensure disposal code is executed + # afterwards. By forcing the result into a temporary, + # we ensure that all disposal has been done by the + # time we get the result. + node = self.analyse_types(env) + return node.coerce_to_boolean(env).coerce_to_simple(env) + + # --------------- Type Inference ----------------- + + def type_dependencies(self, env): + # Returns the list of entries whose types must be determined + # before the type of self can be inferred. + if getattr(self, 'type', None) is not None: + return () + return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ()) + + def infer_type(self, env): + # Attempt to deduce the type of self. + # Differs from analyse_types as it avoids unnecessary + # analysis of subexpressions, but can assume everything + # in self.type_dependencies() has been resolved. + type = getattr(self, 'type', None) + if type is not None: + return type + entry = getattr(self, 'entry', None) + if entry is not None: + return entry.type + self.not_implemented("infer_type") + + def nonlocally_immutable(self): + # Returns whether this variable is a safe reference, i.e. + # can't be modified as part of globals or closures. + return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction + + def inferable_item_node(self, index=0): + """ + Return a node that represents the (type) result of an indexing operation, + e.g. for tuple unpacking or iteration. + """ + return IndexNode(self.pos, base=self, index=IntNode( + self.pos, value=str(index), constant_result=index, type=PyrexTypes.c_py_ssize_t_type)) + + # --------------- Type Analysis ------------------ + + def analyse_as_module(self, env): + # If this node can be interpreted as a reference to a + # cimported module, return its scope, else None. + return None + + def analyse_as_type(self, env): + # If this node can be interpreted as a reference to a + # type, return that type, else None. + return None + + def analyse_as_specialized_type(self, env): + type = self.analyse_as_type(env) + if type and type.is_fused and env.fused_to_specific: + # while it would be nice to test "if entry.type in env.fused_to_specific" + # rather than try/catch this doesn't work reliably (mainly for nested fused types) + try: + return type.specialize(env.fused_to_specific) + except KeyError: + pass + if type and type.is_fused: + error(self.pos, "Type is not specific") + return type + + def analyse_as_extension_type(self, env): + # If this node can be interpreted as a reference to an + # extension type or builtin type, return its type, else None. + return None + + def analyse_types(self, env): + self.not_implemented("analyse_types") + + def analyse_target_types(self, env): + return self.analyse_types(env) + + def nogil_check(self, env): + # By default, any expression based on Python objects is + # prevented in nogil environments. Subtypes must override + # this if they can work without the GIL. + if self.type and self.type.is_pyobject: + self.gil_error() + + def gil_assignment_check(self, env): + if env.nogil and self.type.is_pyobject: + error(self.pos, "Assignment of Python object not allowed without gil") + + def check_const(self): + self.not_const() + return False + + def not_const(self): + error(self.pos, "Not allowed in a constant expression") + + def check_const_addr(self): + self.addr_not_const() + return False + + def addr_not_const(self): + error(self.pos, "Address is not constant") + + # ----------------- Result Allocation ----------------- + + def result_in_temp(self): + # Return true if result is in a temporary owned by + # this node or one of its subexpressions. Overridden + # by certain nodes which can share the result of + # a subnode. + return self.is_temp + + def target_code(self): + # Return code fragment for use as LHS of a C assignment. + return self.calculate_result_code() + + def calculate_result_code(self): + self.not_implemented("calculate_result_code") + +# def release_target_temp(self, env): +# # Release temporaries used by LHS of an assignment. +# self.release_subexpr_temps(env) + + def allocate_temp_result(self, code): + if self.temp_code: + raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos)) + type = self.type + if not type.is_void: + if type.is_pyobject: + type = PyrexTypes.py_object_type + elif not (self.result_is_used or type.is_memoryviewslice or self.is_c_result_required()): + self.temp_code = None + return + self.temp_code = code.funcstate.allocate_temp( + type, manage_ref=self.use_managed_ref) + else: + self.temp_code = None + + def release_temp_result(self, code): + if not self.temp_code: + if not self.result_is_used: + # not used anyway, so ignore if not set up + return + pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)' + if self.old_temp: + raise RuntimeError("temp %s released multiple times in %s at %r" % ( + self.old_temp, self.__class__.__name__, pos)) + else: + raise RuntimeError("no temp, but release requested in %s at %r" % ( + self.__class__.__name__, pos)) + code.funcstate.release_temp(self.temp_code) + self.old_temp = self.temp_code + self.temp_code = None + + # ---------------- Code Generation ----------------- + + def make_owned_reference(self, code): + """ + Make sure we own a reference to result. + If the result is in a temp, it is already a new reference. + """ + if not self.result_in_temp(): + code.put_incref(self.result(), self.ctype()) + + def make_owned_memoryviewslice(self, code): + """ + Make sure we own the reference to this memoryview slice. + """ + # TODO ideally this would be shared with "make_owned_reference" + if not self.result_in_temp(): + code.put_incref_memoryviewslice(self.result(), self.type, + have_gil=not self.in_nogil_context) + + def generate_evaluation_code(self, code): + # Generate code to evaluate this node and + # its sub-expressions, and dispose of any + # temporary results of its sub-expressions. + self.generate_subexpr_evaluation_code(code) + + code.mark_pos(self.pos) + if self.is_temp: + self.allocate_temp_result(code) + + self.generate_result_code(code) + if self.is_temp and not (self.type.is_string or self.type.is_pyunicode_ptr): + # If we are temp we do not need to wait until this node is disposed + # before disposing children. + self.generate_subexpr_disposal_code(code) + self.free_subexpr_temps(code) + + def generate_subexpr_evaluation_code(self, code): + for node in self.subexpr_nodes(): + node.generate_evaluation_code(code) + + def generate_result_code(self, code): + self.not_implemented("generate_result_code") + + def generate_disposal_code(self, code): + if self.has_temp_moved: + code.globalstate.use_utility_code( + UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp")) + if self.is_temp: + if self.type.is_string or self.type.is_pyunicode_ptr: + # postponed from self.generate_evaluation_code() + self.generate_subexpr_disposal_code(code) + self.free_subexpr_temps(code) + if self.result(): + code.put_decref_clear(self.result(), self.ctype(), + have_gil=not self.in_nogil_context) + else: + # Already done if self.is_temp + self.generate_subexpr_disposal_code(code) + + def generate_subexpr_disposal_code(self, code): + # Generate code to dispose of temporary results + # of all sub-expressions. + for node in self.subexpr_nodes(): + node.generate_disposal_code(code) + + def generate_post_assignment_code(self, code): + if self.is_temp: + if self.type.is_string or self.type.is_pyunicode_ptr: + # postponed from self.generate_evaluation_code() + self.generate_subexpr_disposal_code(code) + self.free_subexpr_temps(code) + elif self.type.is_pyobject: + code.putln("%s = 0;" % self.result()) + elif self.type.is_memoryviewslice: + code.putln("%s.memview = NULL;" % self.result()) + code.putln("%s.data = NULL;" % self.result()) + + if self.has_temp_moved: + code.globalstate.use_utility_code( + UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp")) + else: + self.generate_subexpr_disposal_code(code) + + def generate_assignment_code(self, rhs, code, overloaded_assignment=False, + exception_check=None, exception_value=None): + # Stub method for nodes which are not legal as + # the LHS of an assignment. An error will have + # been reported earlier. + pass + + def generate_deletion_code(self, code, ignore_nonexisting=False): + # Stub method for nodes that are not legal as + # the argument of a del statement. An error + # will have been reported earlier. + pass + + def free_temps(self, code): + if self.is_temp: + if not self.type.is_void: + self.release_temp_result(code) + else: + self.free_subexpr_temps(code) + + def free_subexpr_temps(self, code): + for sub in self.subexpr_nodes(): + sub.free_temps(code) + + def generate_function_definitions(self, env, code): + pass + + # ----Generation of small bits of reference counting -- + + def generate_decref_set(self, code, rhs): + code.put_decref_set(self.result(), self.ctype(), rhs) + + def generate_xdecref_set(self, code, rhs): + code.put_xdecref_set(self.result(), self.ctype(), rhs) + + def generate_gotref(self, code, handle_null=False, + maybe_null_extra_check=True): + if not (handle_null and self.cf_is_null): + if (handle_null and self.cf_maybe_null + and maybe_null_extra_check): + self.generate_xgotref(code) + else: + code.put_gotref(self.result(), self.ctype()) + + def generate_xgotref(self, code): + code.put_xgotref(self.result(), self.ctype()) + + def generate_giveref(self, code): + code.put_giveref(self.result(), self.ctype()) + + def generate_xgiveref(self, code): + code.put_xgiveref(self.result(), self.ctype()) + + # ---------------- Annotation --------------------- + + def annotate(self, code): + for node in self.subexpr_nodes(): + node.annotate(code) + + # ----------------- Coercion ---------------------- + + def coerce_to(self, dst_type, env): + # Coerce the result so that it can be assigned to + # something of type dst_type. If processing is necessary, + # wraps this node in a coercion node and returns that. + # Otherwise, returns this node unchanged. + # + # This method is called during the analyse_expressions + # phase of the src_node's processing. + # + # Note that subclasses that override this (especially + # ConstNodes) must not (re-)set their own .type attribute + # here. Since expression nodes may turn up in different + # places in the tree (e.g. inside of CloneNodes in cascaded + # assignments), this method must return a new node instance + # if it changes the type. + # + src = self + src_type = self.type + + if self.check_for_coercion_error(dst_type, env): + return self + + used_as_reference = dst_type.is_reference + if used_as_reference and not src_type.is_reference: + dst_type = dst_type.ref_base_type + + if src_type.is_cv_qualified: + src_type = src_type.cv_base_type + + if src_type.is_fused or dst_type.is_fused: + # See if we are coercing a fused function to a pointer to a + # specialized function + if (src_type.is_cfunction and not dst_type.is_fused and + dst_type.is_ptr and dst_type.base_type.is_cfunction): + + dst_type = dst_type.base_type + + for signature in src_type.get_all_specialized_function_types(): + if signature.same_as(dst_type): + src.type = signature + src.entry = src.type.entry + src.entry.used = True + return self + + if src_type.is_fused: + error(self.pos, "Type is not specialized") + elif src_type.is_null_ptr and dst_type.is_ptr: + # NULL can be implicitly cast to any pointer type + return self + else: + error(self.pos, "Cannot coerce to a type that is not specialized") + + self.type = error_type + return self + + if self.coercion_type is not None: + # This is purely for error checking purposes! + node = NameNode(self.pos, name='', type=self.coercion_type) + node.coerce_to(dst_type, env) + + if dst_type.is_memoryviewslice: + from . import MemoryView + if not src.type.is_memoryviewslice: + if src.type.is_pyobject: + src = CoerceToMemViewSliceNode(src, dst_type, env) + elif src.type.is_array: + src = CythonArrayNode.from_carray(src, env).coerce_to(dst_type, env) + elif not src_type.is_error: + error(self.pos, + "Cannot convert '%s' to memoryviewslice" % (src_type,)) + else: + if src.type.writable_needed: + dst_type.writable_needed = True + if not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast, + copying=self.is_memview_copy_assignment): + if src.type.dtype.same_as(dst_type.dtype): + msg = "Memoryview '%s' not conformable to memoryview '%s'." + tup = src.type, dst_type + else: + msg = "Different base types for memoryviews (%s, %s)" + tup = src.type.dtype, dst_type.dtype + + error(self.pos, msg % tup) + + elif dst_type.is_pyobject: + # We never need a type check when assigning None to a Python object type. + if src.is_none: + pass + elif src.constant_result is None: + src = NoneNode(src.pos).coerce_to(dst_type, env) + else: + if not src.type.is_pyobject: + if dst_type is bytes_type and src.type.is_int: + src = CoerceIntToBytesNode(src, env) + else: + src = CoerceToPyTypeNode(src, env, type=dst_type) + # FIXME: I would expect that CoerceToPyTypeNode(type=dst_type) returns a value of type dst_type + # but it doesn't for ctuples. Thus, we add a PyTypeTestNode which then triggers the + # Python conversion and becomes useless. That sems backwards and inefficient. + # We should not need a PyTypeTestNode after a previous conversion above. + if not src.type.subtype_of(dst_type): + src = PyTypeTestNode(src, dst_type, env) + elif is_pythran_expr(dst_type) and is_pythran_supported_type(src.type): + # We let the compiler decide whether this is valid + return src + elif is_pythran_expr(src.type): + if is_pythran_supported_type(dst_type): + # Match the case were a pythran expr is assigned to a value, or vice versa. + # We let the C++ compiler decide whether this is valid or not! + return src + # Else, we need to convert the Pythran expression to a Python object + src = CoerceToPyTypeNode(src, env, type=dst_type) + elif src.type.is_pyobject: + if used_as_reference and dst_type.is_cpp_class: + warning( + self.pos, + "Cannot pass Python object as C++ data structure reference (%s &), will pass by copy." % dst_type) + src = CoerceFromPyTypeNode(dst_type, src, env) + elif (dst_type.is_complex + and src_type != dst_type + and dst_type.assignable_from(src_type)): + src = CoerceToComplexNode(src, dst_type, env) + elif (src_type is PyrexTypes.soft_complex_type + and src_type != dst_type + and not dst_type.assignable_from(src_type)): + src = coerce_from_soft_complex(src, dst_type, env) + else: + # neither src nor dst are py types + # Added the string comparison, since for c types that + # is enough, but Cython gets confused when the types are + # in different pxi files. + # TODO: Remove this hack and require shared declarations. + if not (src.type == dst_type or str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)): + self.fail_assignment(dst_type) + return src + + def fail_assignment(self, dst_type): + extra_diagnostics = dst_type.assignment_failure_extra_info(self.type) + if extra_diagnostics: + extra_diagnostics = ". " + extra_diagnostics + error(self.pos, "Cannot assign type '%s' to '%s'%s" % ( + self.type, dst_type, extra_diagnostics)) + + def check_for_coercion_error(self, dst_type, env, fail=False, default=None): + if fail and not default: + default = "Cannot assign type '%(FROM)s' to '%(TO)s'" + message = find_coercion_error((self.type, dst_type), default, env) + if message is not None: + error(self.pos, message % {'FROM': self.type, 'TO': dst_type}) + return True + if fail: + self.fail_assignment(dst_type) + return True + return False + + def coerce_to_pyobject(self, env): + return self.coerce_to(PyrexTypes.py_object_type, env) + + def coerce_to_boolean(self, env): + # Coerce result to something acceptable as + # a boolean value. + + # if it's constant, calculate the result now + if self.has_constant_result(): + bool_value = bool(self.constant_result) + return BoolNode(self.pos, value=bool_value, + constant_result=bool_value) + + type = self.type + if type.is_enum or type.is_error: + return self + elif type is PyrexTypes.c_bint_type: + return self + elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float: + return CoerceToBooleanNode(self, env) + elif type.is_cpp_class and type.scope and type.scope.lookup("operator bool"): + return SimpleCallNode( + self.pos, + function=AttributeNode( + self.pos, obj=self, attribute=StringEncoding.EncodedString('operator bool')), + args=[]).analyse_types(env) + elif type.is_ctuple: + bool_value = len(type.components) == 0 + return BoolNode(self.pos, value=bool_value, + constant_result=bool_value) + else: + error(self.pos, "Type '%s' not acceptable as a boolean" % type) + return self + + def coerce_to_integer(self, env): + # If not already some C integer type, coerce to longint. + if self.type.is_int: + return self + else: + return self.coerce_to(PyrexTypes.c_long_type, env) + + def coerce_to_temp(self, env): + # Ensure that the result is in a temporary. + if self.result_in_temp(): + return self + else: + return CoerceToTempNode(self, env) + + def coerce_to_simple(self, env): + # Ensure that the result is simple (see is_simple). + if self.is_simple(): + return self + else: + return self.coerce_to_temp(env) + + def is_simple(self): + # A node is simple if its result is something that can + # be referred to without performing any operations, e.g. + # a constant, local var, C global var, struct member + # reference, or temporary. + return self.result_in_temp() + + def may_be_none(self): + if self.type and not (self.type.is_pyobject or + self.type.is_memoryviewslice): + return False + if self.has_constant_result(): + return self.constant_result is not None + return True + + def as_cython_attribute(self): + return None + + def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()): + # Wraps the node in a NoneCheckNode if it is not known to be + # not-None (e.g. because it is a Python literal). + if self.may_be_none(): + return NoneCheckNode(self, error, message, format_args) + else: + return self + + @classmethod + def from_node(cls, node, **kwargs): + """Instantiate this node class from another node, properly + copying over all attributes that one would forget otherwise. + """ + attributes = "cf_state cf_maybe_null cf_is_null constant_result".split() + for attr_name in attributes: + if attr_name in kwargs: + continue + try: + value = getattr(node, attr_name) + except AttributeError: + pass + else: + kwargs[attr_name] = value + return cls(node.pos, **kwargs) + + def get_known_standard_library_import(self): + """ + Gets the module.path that this node was imported from. + + Many nodes do not have one, or it is ambiguous, in which case + this function returns a false value. + """ + return None + + +class AtomicExprNode(ExprNode): + # Abstract base class for expression nodes which have + # no sub-expressions. + + subexprs = [] + + # Override to optimize -- we know we have no children + def generate_subexpr_evaluation_code(self, code): + pass + def generate_subexpr_disposal_code(self, code): + pass + +class PyConstNode(AtomicExprNode): + # Abstract base class for constant Python values. + + is_literal = 1 + type = py_object_type + nogil_check = None + + def is_simple(self): + return 1 + + def may_be_none(self): + return False + + def analyse_types(self, env): + return self + + def calculate_result_code(self): + return self.value + + def generate_result_code(self, code): + pass + + +class NoneNode(PyConstNode): + # The constant value None + + is_none = 1 + value = "Py_None" + + constant_result = None + + def compile_time_value(self, denv): + return None + + def may_be_none(self): + return True + + def coerce_to(self, dst_type, env): + if not (dst_type.is_pyobject or dst_type.is_memoryviewslice or dst_type.is_error): + # Catch this error early and loudly. + error(self.pos, "Cannot assign None to %s" % dst_type) + return super(NoneNode, self).coerce_to(dst_type, env) + + +class EllipsisNode(PyConstNode): + # '...' in a subscript list. + + value = "Py_Ellipsis" + + constant_result = Ellipsis + + def compile_time_value(self, denv): + return Ellipsis + + +class ConstNode(AtomicExprNode): + # Abstract base type for literal constant nodes. + # + # value string C code fragment + + is_literal = 1 + nogil_check = None + + def is_simple(self): + return 1 + + def nonlocally_immutable(self): + return 1 + + def may_be_none(self): + return False + + def analyse_types(self, env): + return self # Types are held in class variables + + def check_const(self): + return True + + def get_constant_c_result_code(self): + return self.calculate_result_code() + + def calculate_result_code(self): + return str(self.value) + + def generate_result_code(self, code): + pass + + +class BoolNode(ConstNode): + type = PyrexTypes.c_bint_type + # The constant value True or False + + def calculate_constant_result(self): + self.constant_result = self.value + + def compile_time_value(self, denv): + return self.value + + def calculate_result_code(self): + if self.type.is_pyobject: + return 'Py_True' if self.value else 'Py_False' + else: + return str(int(self.value)) + + def coerce_to(self, dst_type, env): + if dst_type == self.type: + return self + if dst_type is py_object_type and self.type is Builtin.bool_type: + return self + if dst_type.is_pyobject and self.type.is_int: + return BoolNode( + self.pos, value=self.value, + constant_result=self.constant_result, + type=Builtin.bool_type) + if dst_type.is_int and self.type.is_pyobject: + return BoolNode( + self.pos, value=self.value, + constant_result=self.constant_result, + type=PyrexTypes.c_bint_type) + return ConstNode.coerce_to(self, dst_type, env) + + +class NullNode(ConstNode): + type = PyrexTypes.c_null_ptr_type + value = "NULL" + constant_result = 0 + + def get_constant_c_result_code(self): + return self.value + + +class CharNode(ConstNode): + type = PyrexTypes.c_char_type + + def calculate_constant_result(self): + self.constant_result = ord(self.value) + + def compile_time_value(self, denv): + return ord(self.value) + + def calculate_result_code(self): + return "'%s'" % StringEncoding.escape_char(self.value) + + +class IntNode(ConstNode): + + # unsigned "" or "U" + # longness "" or "L" or "LL" + # is_c_literal True/False/None creator considers this a C integer literal + + unsigned = "" + longness = "" + is_c_literal = None # unknown + + # hex_value and base_10_value are designed only to simplify + # writing tests to get a consistent representation of value + @property + def hex_value(self): + return Utils.strip_py2_long_suffix(hex(Utils.str_to_number(self.value))) + + @property + def base_10_value(self): + return str(Utils.str_to_number(self.value)) + + def __init__(self, pos, **kwds): + ExprNode.__init__(self, pos, **kwds) + if 'type' not in kwds: + self.type = self.find_suitable_type_for_value() + + def find_suitable_type_for_value(self): + if self.constant_result is constant_value_not_set: + try: + self.calculate_constant_result() + except ValueError: + pass + # we ignore 'is_c_literal = True' and instead map signed 32bit + # integers as C long values + if self.is_c_literal or \ + not self.has_constant_result() or \ + self.unsigned or self.longness == 'LL': + # clearly a C literal + rank = (self.longness == 'LL') and 2 or 1 + suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"] + if self.type: + suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type) + else: + # C literal or Python literal - split at 32bit boundary + if -2**31 <= self.constant_result < 2**31: + if self.type and self.type.is_int: + suitable_type = self.type + else: + suitable_type = PyrexTypes.c_long_type + else: + suitable_type = PyrexTypes.py_object_type + return suitable_type + + def coerce_to(self, dst_type, env): + if self.type is dst_type: + return self + elif dst_type.is_float: + if self.has_constant_result(): + return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type, + constant_result=float(self.constant_result)) + else: + return FloatNode(self.pos, value=self.value, type=dst_type, + constant_result=not_a_constant) + if dst_type.is_numeric and not dst_type.is_complex: + node = IntNode(self.pos, value=self.value, constant_result=self.constant_result, + type=dst_type, is_c_literal=True, + unsigned=self.unsigned, longness=self.longness) + return node + elif dst_type.is_pyobject: + node = IntNode(self.pos, value=self.value, constant_result=self.constant_result, + type=PyrexTypes.py_object_type, is_c_literal=False, + unsigned=self.unsigned, longness=self.longness) + else: + # FIXME: not setting the type here to keep it working with + # complex numbers. Should they be special cased? + node = IntNode(self.pos, value=self.value, constant_result=self.constant_result, + unsigned=self.unsigned, longness=self.longness) + # We still need to perform normal coerce_to processing on the + # result, because we might be coercing to an extension type, + # in which case a type test node will be needed. + return ConstNode.coerce_to(node, dst_type, env) + + def coerce_to_boolean(self, env): + return IntNode( + self.pos, value=self.value, + constant_result=self.constant_result, + type=PyrexTypes.c_bint_type, + unsigned=self.unsigned, longness=self.longness) + + def generate_evaluation_code(self, code): + if self.type.is_pyobject: + # pre-allocate a Python version of the number + # (In hex if sufficiently large to cope with Python's string-to-int limitations. + # We use quite a small value of "sufficiently large" - 10**13 is picked as + # the approximate point where hex strings become shorter) + value = Utils.str_to_number(self.value) + formatter = hex if value > (10**13) else str + plain_integer_string = formatter(value) + plain_integer_string = Utils.strip_py2_long_suffix(plain_integer_string) + self.result_code = code.get_py_int(plain_integer_string, self.longness) + else: + self.result_code = self.get_constant_c_result_code() + + def get_constant_c_result_code(self): + unsigned, longness = self.unsigned, self.longness + literal = self.value_as_c_integer_string() + if not (unsigned or longness) and self.type.is_int and literal[0] == '-' and literal[1] != '0': + # negative decimal literal => guess longness from type to prevent wrap-around + if self.type.rank >= PyrexTypes.c_longlong_type.rank: + longness = 'LL' + elif self.type.rank >= PyrexTypes.c_long_type.rank: + longness = 'L' + return literal + unsigned + longness + + def value_as_c_integer_string(self): + value = self.value + if len(value) <= 2: + # too short to go wrong (and simplifies code below) + return value + neg_sign = '' + if value[0] == '-': + neg_sign = '-' + value = value[1:] + if value[0] == '0': + literal_type = value[1] # 0'o' - 0'b' - 0'x' + # 0x123 hex literals and 0123 octal literals work nicely in C + # but C-incompatible Py3 oct/bin notations need conversion + if neg_sign and literal_type in 'oOxX0123456789' and value[2:].isdigit(): + # negative hex/octal literal => prevent C compiler from using + # unsigned integer types by converting to decimal (see C standard 6.4.4.1) + value = str(Utils.str_to_number(value)) + elif literal_type in 'oO': + value = '0' + value[2:] # '0o123' => '0123' + elif literal_type in 'bB': + value = str(int(value[2:], 2)) + elif value.isdigit() and not self.unsigned and not self.longness: + if not neg_sign: + # C compilers do not consider unsigned types for decimal literals, + # but they do for hex (see C standard 6.4.4.1) + value = '0x%X' % int(value) + return neg_sign + value + + def calculate_result_code(self): + return self.result_code + + def calculate_constant_result(self): + self.constant_result = Utils.str_to_number(self.value) + + def compile_time_value(self, denv): + return Utils.str_to_number(self.value) + +class FloatNode(ConstNode): + type = PyrexTypes.c_double_type + + def calculate_constant_result(self): + self.constant_result = float(self.value) + + def compile_time_value(self, denv): + float_value = float(self.value) + str_float_value = ("%.330f" % float_value).strip('0') + str_value = Utils.normalise_float_repr(self.value) + if str_value not in (str_float_value, repr(float_value).lstrip('0')): + warning(self.pos, "Using this floating point value with DEF may lose precision, using %r" % float_value) + return float_value + + def coerce_to(self, dst_type, env): + if dst_type.is_pyobject and self.type.is_float: + return FloatNode( + self.pos, value=self.value, + constant_result=self.constant_result, + type=Builtin.float_type) + if dst_type.is_float and self.type.is_pyobject: + return FloatNode( + self.pos, value=self.value, + constant_result=self.constant_result, + type=dst_type) + return ConstNode.coerce_to(self, dst_type, env) + + def calculate_result_code(self): + return self.result_code + + def get_constant_c_result_code(self): + strval = self.value + assert isinstance(strval, basestring) + cmpval = repr(float(strval)) + if cmpval == 'nan': + return "(Py_HUGE_VAL * 0)" + elif cmpval == 'inf': + return "Py_HUGE_VAL" + elif cmpval == '-inf': + return "(-Py_HUGE_VAL)" + else: + return strval + + def generate_evaluation_code(self, code): + c_value = self.get_constant_c_result_code() + if self.type.is_pyobject: + self.result_code = code.get_py_float(self.value, c_value) + else: + self.result_code = c_value + + +def _analyse_name_as_type(name, pos, env): + ctype = PyrexTypes.parse_basic_type(name) + if ctype is not None and env.in_c_type_context: + return ctype + + global_scope = env.global_scope() + global_entry = global_scope.lookup(name) + if global_entry and global_entry.is_type: + type = global_entry.type + if (not env.in_c_type_context + and type is Builtin.int_type + and global_scope.context.language_level == 2): + # While we still support Python2 this needs to be downgraded + # to a generic Python object to include both int and long. + # With language_level > 3, we keep the type but also accept 'long' in Py2. + type = py_object_type + if type and (type.is_pyobject or env.in_c_type_context): + return type + ctype = ctype or type + + # This is fairly heavy, so it's worth trying some easier things above. + from .TreeFragment import TreeFragment + with local_errors(ignore=True): + pos = (pos[0], pos[1], pos[2]-7) + try: + declaration = TreeFragment(u"sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos) + except CompileError: + pass + else: + sizeof_node = declaration.root.stats[0].expr + if isinstance(sizeof_node, SizeofTypeNode): + sizeof_node = sizeof_node.analyse_types(env) + if isinstance(sizeof_node, SizeofTypeNode): + type = sizeof_node.arg_type + if type and (type.is_pyobject or env.in_c_type_context): + return type + ctype = ctype or type + return ctype + + +class BytesNode(ConstNode): + # A char* or bytes literal + # + # value BytesLiteral + + is_string_literal = True + # start off as Python 'bytes' to support len() in O(1) + type = bytes_type + + def calculate_constant_result(self): + self.constant_result = self.value + + def as_sliced_node(self, start, stop, step=None): + value = StringEncoding.bytes_literal(self.value[start:stop:step], self.value.encoding) + return BytesNode(self.pos, value=value, constant_result=value) + + def compile_time_value(self, denv): + return self.value.byteencode() + + def analyse_as_type(self, env): + return _analyse_name_as_type(self.value.decode('ISO8859-1'), self.pos, env) + + def can_coerce_to_char_literal(self): + return len(self.value) == 1 + + def coerce_to_boolean(self, env): + # This is special because testing a C char* for truth directly + # would yield the wrong result. + bool_value = bool(self.value) + return BoolNode(self.pos, value=bool_value, constant_result=bool_value) + + def coerce_to(self, dst_type, env): + if self.type == dst_type: + return self + if dst_type.is_int: + if not self.can_coerce_to_char_literal(): + error(self.pos, "Only single-character string literals can be coerced into ints.") + return self + if dst_type.is_unicode_char: + error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.") + return self + return CharNode(self.pos, value=self.value, + constant_result=ord(self.value)) + + node = BytesNode(self.pos, value=self.value, constant_result=self.constant_result) + if dst_type.is_pyobject: + if dst_type in (py_object_type, Builtin.bytes_type): + node.type = Builtin.bytes_type + else: + self.check_for_coercion_error(dst_type, env, fail=True) + return node + elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type): + node.type = dst_type + return node + elif dst_type in (PyrexTypes.c_uchar_ptr_type, PyrexTypes.c_const_uchar_ptr_type, PyrexTypes.c_void_ptr_type): + node.type = (PyrexTypes.c_const_char_ptr_type if dst_type == PyrexTypes.c_const_uchar_ptr_type + else PyrexTypes.c_char_ptr_type) + return CastNode(node, dst_type) + elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type): + # Exclude the case of passing a C string literal into a non-const C++ string. + if not dst_type.is_cpp_class or dst_type.is_const: + node.type = dst_type + return node + + # We still need to perform normal coerce_to processing on the + # result, because we might be coercing to an extension type, + # in which case a type test node will be needed. + return ConstNode.coerce_to(node, dst_type, env) + + def generate_evaluation_code(self, code): + if self.type.is_pyobject: + result = code.get_py_string_const(self.value) + elif self.type.is_const: + result = code.get_string_const(self.value) + else: + # not const => use plain C string literal and cast to mutable type + literal = self.value.as_c_string_literal() + # C++ may require a cast + result = typecast(self.type, PyrexTypes.c_void_ptr_type, literal) + self.result_code = result + + def get_constant_c_result_code(self): + return None # FIXME + + def calculate_result_code(self): + return self.result_code + + +class UnicodeNode(ConstNode): + # A Py_UNICODE* or unicode literal + # + # value EncodedString + # bytes_value BytesLiteral the literal parsed as bytes string + # ('-3' unicode literals only) + + is_string_literal = True + bytes_value = None + type = unicode_type + + def calculate_constant_result(self): + self.constant_result = self.value + + def analyse_as_type(self, env): + return _analyse_name_as_type(self.value, self.pos, env) + + def as_sliced_node(self, start, stop, step=None): + if StringEncoding.string_contains_surrogates(self.value[:stop]): + # this is unsafe as it may give different results + # in different runtimes + return None + value = StringEncoding.EncodedString(self.value[start:stop:step]) + value.encoding = self.value.encoding + if self.bytes_value is not None: + bytes_value = StringEncoding.bytes_literal( + self.bytes_value[start:stop:step], self.bytes_value.encoding) + else: + bytes_value = None + return UnicodeNode( + self.pos, value=value, bytes_value=bytes_value, + constant_result=value) + + def coerce_to(self, dst_type, env): + if dst_type is self.type: + pass + elif dst_type.is_unicode_char: + if not self.can_coerce_to_char_literal(): + error(self.pos, + "Only single-character Unicode string literals or " + "surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.") + return self + int_value = ord(self.value) + return IntNode(self.pos, type=dst_type, value=str(int_value), + constant_result=int_value) + elif not dst_type.is_pyobject: + if dst_type.is_string and self.bytes_value is not None: + # special case: '-3' enforced unicode literal used in a + # C char* context + return BytesNode(self.pos, value=self.bytes_value).coerce_to(dst_type, env) + if dst_type.is_pyunicode_ptr: + return UnicodeNode(self.pos, value=self.value, type=dst_type) + error(self.pos, + "Unicode literals do not support coercion to C types other " + "than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* " + "(for strings).") + elif dst_type not in (py_object_type, Builtin.basestring_type): + self.check_for_coercion_error(dst_type, env, fail=True) + return self + + def can_coerce_to_char_literal(self): + return len(self.value) == 1 + ## or (len(self.value) == 2 + ## and (0xD800 <= self.value[0] <= 0xDBFF) + ## and (0xDC00 <= self.value[1] <= 0xDFFF)) + + def coerce_to_boolean(self, env): + bool_value = bool(self.value) + return BoolNode(self.pos, value=bool_value, constant_result=bool_value) + + def contains_surrogates(self): + return StringEncoding.string_contains_surrogates(self.value) + + def generate_evaluation_code(self, code): + if self.type.is_pyobject: + # FIXME: this should go away entirely! + # Since string_contains_lone_surrogates() returns False for surrogate pairs in Py2/UCS2, + # Py2 can generate different code from Py3 here. Let's hope we get away with claiming that + # the processing of surrogate pairs in code was always ambiguous and lead to different results + # on P16/32bit Unicode platforms. + if StringEncoding.string_contains_lone_surrogates(self.value): + # lone (unpaired) surrogates are not really portable and cannot be + # decoded by the UTF-8 codec in Py3.3 + self.result_code = code.get_py_const(py_object_type, 'ustring') + data_cname = code.get_string_const( + StringEncoding.BytesLiteral(self.value.encode('unicode_escape'))) + const_code = code.get_cached_constants_writer(self.result_code) + if const_code is None: + return # already initialised + const_code.mark_pos(self.pos) + const_code.putln( + "%s = PyUnicode_DecodeUnicodeEscape(%s, sizeof(%s) - 1, NULL); %s" % ( + self.result_code, + data_cname, + data_cname, + const_code.error_goto_if_null(self.result_code, self.pos))) + const_code.put_error_if_neg( + self.pos, "__Pyx_PyUnicode_READY(%s)" % self.result_code) + else: + self.result_code = code.get_py_string_const(self.value) + else: + self.result_code = code.get_pyunicode_ptr_const(self.value) + + def calculate_result_code(self): + return self.result_code + + def compile_time_value(self, env): + return self.value + + +class StringNode(PyConstNode): + # A Python str object, i.e. a byte string in Python 2.x and a + # unicode string in Python 3.x + # + # value BytesLiteral (or EncodedString with ASCII content) + # unicode_value EncodedString or None + # is_identifier boolean + + type = str_type + is_string_literal = True + is_identifier = None + unicode_value = None + + def calculate_constant_result(self): + if self.unicode_value is not None: + # only the Unicode value is portable across Py2/3 + self.constant_result = self.unicode_value + + def analyse_as_type(self, env): + return _analyse_name_as_type(self.unicode_value or self.value.decode('ISO8859-1'), self.pos, env) + + def as_sliced_node(self, start, stop, step=None): + value = type(self.value)(self.value[start:stop:step]) + value.encoding = self.value.encoding + if self.unicode_value is not None: + if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]): + # this is unsafe as it may give different results in different runtimes + return None + unicode_value = StringEncoding.EncodedString( + self.unicode_value[start:stop:step]) + else: + unicode_value = None + return StringNode( + self.pos, value=value, unicode_value=unicode_value, + constant_result=value, is_identifier=self.is_identifier) + + def coerce_to(self, dst_type, env): + if dst_type is not py_object_type and not str_type.subtype_of(dst_type): +# if dst_type is Builtin.bytes_type: +# # special case: bytes = 'str literal' +# return BytesNode(self.pos, value=self.value) + if not dst_type.is_pyobject: + return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env) + if dst_type is not Builtin.basestring_type: + self.check_for_coercion_error(dst_type, env, fail=True) + return self + + def can_coerce_to_char_literal(self): + return not self.is_identifier and len(self.value) == 1 + + def generate_evaluation_code(self, code): + self.result_code = code.get_py_string_const( + self.value, identifier=self.is_identifier, is_str=True, + unicode_value=self.unicode_value) + + def get_constant_c_result_code(self): + return None + + def calculate_result_code(self): + return self.result_code + + def compile_time_value(self, env): + if self.value.is_unicode: + return self.value + if not IS_PYTHON3: + # use plain str/bytes object in Py2 + return self.value.byteencode() + # in Py3, always return a Unicode string + if self.unicode_value is not None: + return self.unicode_value + return self.value.decode('iso8859-1') + + +class IdentifierStringNode(StringNode): + # A special str value that represents an identifier (bytes in Py2, + # unicode in Py3). + is_identifier = True + + +class ImagNode(AtomicExprNode): + # Imaginary number literal + # + # value string imaginary part (float value) + + type = PyrexTypes.c_double_complex_type + + def calculate_constant_result(self): + self.constant_result = complex(0.0, float(self.value)) + + def compile_time_value(self, denv): + return complex(0.0, float(self.value)) + + def analyse_types(self, env): + self.type.create_declaration_utility_code(env) + return self + + def may_be_none(self): + return False + + def coerce_to(self, dst_type, env): + if self.type is dst_type: + return self + node = ImagNode(self.pos, value=self.value) + if dst_type.is_pyobject: + node.is_temp = 1 + node.type = Builtin.complex_type + # We still need to perform normal coerce_to processing on the + # result, because we might be coercing to an extension type, + # in which case a type test node will be needed. + return AtomicExprNode.coerce_to(node, dst_type, env) + + gil_message = "Constructing complex number" + + def calculate_result_code(self): + if self.type.is_pyobject: + return self.result() + else: + return "%s(0, %r)" % (self.type.from_parts, float(self.value)) + + def generate_result_code(self, code): + if self.type.is_pyobject: + code.putln( + "%s = PyComplex_FromDoubles(0.0, %r); %s" % ( + self.result(), + float(self.value), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class NewExprNode(AtomicExprNode): + + # C++ new statement + # + # cppclass node c++ class to create + + type = None + + def infer_type(self, env): + type = self.cppclass.analyse_as_type(env) + if type is None or not type.is_cpp_class: + error(self.pos, "new operator can only be applied to a C++ class") + self.type = error_type + return + self.cpp_check(env) + constructor = type.get_constructor(self.pos) + self.class_type = type + self.entry = constructor + self.type = constructor.type + return self.type + + def analyse_types(self, env): + if self.type is None: + self.infer_type(env) + return self + + def may_be_none(self): + return False + + def generate_result_code(self, code): + pass + + def calculate_result_code(self): + return "new " + self.class_type.empty_declaration_code() + + +class NameNode(AtomicExprNode): + # Reference to a local or global variable name. + # + # name string Python name of the variable + # entry Entry Symbol table entry + # type_entry Entry For extension type names, the original type entry + # cf_is_null boolean Is uninitialized before this node + # cf_maybe_null boolean Maybe uninitialized before this node + # allow_null boolean Don't raise UnboundLocalError + # nogil boolean Whether it is used in a nogil context + + is_name = True + is_cython_module = False + cython_attribute = None + lhs_of_first_assignment = False # TODO: remove me + is_used_as_rvalue = 0 + entry = None + type_entry = None + cf_maybe_null = True + cf_is_null = False + allow_null = False + nogil = False + inferred_type = None + + def as_cython_attribute(self): + return self.cython_attribute + + def type_dependencies(self, env): + if self.entry is None: + self.entry = env.lookup(self.name) + if self.entry is not None and self.entry.type.is_unspecified: + return (self,) + else: + return () + + def infer_type(self, env): + if self.entry is None: + self.entry = env.lookup(self.name) + if self.entry is None or self.entry.type is unspecified_type: + if self.inferred_type is not None: + return self.inferred_type + return py_object_type + elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \ + self.name == self.entry.type.name: + # Unfortunately the type attribute of type objects + # is used for the pointer to the type they represent. + return type_type + elif self.entry.type.is_cfunction: + if self.entry.scope.is_builtin_scope: + # special case: optimised builtin functions must be treated as Python objects + return py_object_type + else: + # special case: referring to a C function must return its pointer + return PyrexTypes.CPtrType(self.entry.type) + else: + # If entry is inferred as pyobject it's safe to use local + # NameNode's inferred_type. + if self.entry.type.is_pyobject and self.inferred_type: + # Overflow may happen if integer + if not (self.inferred_type.is_int and self.entry.might_overflow): + return self.inferred_type + return self.entry.type + + def compile_time_value(self, denv): + try: + return denv.lookup(self.name) + except KeyError: + error(self.pos, "Compile-time name '%s' not defined" % self.name) + + def get_constant_c_result_code(self): + if not self.entry or self.entry.type.is_pyobject: + return None + return self.entry.cname + + def coerce_to(self, dst_type, env): + # If coercing to a generic pyobject and this is a builtin + # C function with a Python equivalent, manufacture a NameNode + # referring to the Python builtin. + #print "NameNode.coerce_to:", self.name, dst_type ### + if dst_type is py_object_type: + entry = self.entry + if entry and entry.is_cfunction: + var_entry = entry.as_variable + if var_entry: + if var_entry.is_builtin and var_entry.is_const: + var_entry = env.declare_builtin(var_entry.name, self.pos) + node = NameNode(self.pos, name = self.name) + node.entry = var_entry + node.analyse_rvalue_entry(env) + return node + + return super(NameNode, self).coerce_to(dst_type, env) + + def declare_from_annotation(self, env, as_target=False): + """Implements PEP 526 annotation typing in a fairly relaxed way. + + Annotations are ignored for global variables. + All other annotations are stored on the entry in the symbol table. + String literals are allowed and not evaluated. + The ambiguous Python types 'int' and 'long' are not evaluated - the 'cython.int' form must be used instead. + """ + name = self.name + annotation = self.annotation + entry = self.entry or env.lookup_here(name) + if not entry: + # annotations never create global cdef names + if env.is_module_scope: + return + + modifiers = () + if ( + # name: "description" => not a type, but still a declared variable or attribute + annotation.expr.is_string_literal + # don't do type analysis from annotations if not asked to, but still collect the annotation + or not env.directives['annotation_typing'] + ): + atype = None + elif env.is_py_class_scope: + # For Python class scopes every attribute is a Python object + atype = py_object_type + else: + modifiers, atype = annotation.analyse_type_annotation(env) + + if atype is None: + atype = unspecified_type if as_target and env.directives['infer_types'] != False else py_object_type + elif atype.is_fused and env.fused_to_specific: + try: + atype = atype.specialize(env.fused_to_specific) + except CannotSpecialize: + error(self.pos, + "'%s' cannot be specialized since its type is not a fused argument to this function" % + self.name) + atype = error_type + + visibility = 'private' + if env.is_c_dataclass_scope: + # handle "frozen" directive - full inspection of the dataclass directives happens + # in Dataclass.py + is_frozen = env.is_c_dataclass_scope == "frozen" + if atype.is_pyobject or atype.can_coerce_to_pyobject(env): + visibility = 'readonly' if is_frozen else 'public' + # If the object can't be coerced that's fine - we just don't create a property + + if as_target and env.is_c_class_scope and not (atype.is_pyobject or atype.is_error): + # TODO: this will need revising slightly if annotated cdef attributes are implemented + atype = py_object_type + warning(annotation.pos, "Annotation ignored since class-level attributes must be Python objects. " + "Were you trying to set up an instance attribute?", 2) + + entry = self.entry = env.declare_var( + name, atype, self.pos, is_cdef=not as_target, visibility=visibility, + pytyping_modifiers=modifiers) + + # Even if the entry already exists, make sure we're supplying an annotation if we can. + if annotation and not entry.annotation: + entry.annotation = annotation + + def analyse_as_module(self, env): + # Try to interpret this as a reference to a cimported module. + # Returns the module scope, or None. + entry = self.entry + if not entry: + entry = env.lookup(self.name) + if entry and entry.as_module: + return entry.as_module + if entry and entry.known_standard_library_import: + scope = Builtin.get_known_standard_library_module_scope(entry.known_standard_library_import) + if scope and scope.is_module_scope: + return scope + return None + + def analyse_as_type(self, env): + type = None + if self.cython_attribute: + type = PyrexTypes.parse_basic_type(self.cython_attribute) + elif env.in_c_type_context: + type = PyrexTypes.parse_basic_type(self.name) + if type: + return type + + entry = self.entry + if not entry: + entry = env.lookup(self.name) + if entry and not entry.is_type and entry.known_standard_library_import: + entry = Builtin.get_known_standard_library_entry(entry.known_standard_library_import) + if entry and entry.is_type: + # Infer equivalent C types instead of Python types when possible. + type = entry.type + if not env.in_c_type_context and type is Builtin.long_type: + # Try to give a helpful warning when users write plain C type names. + warning(self.pos, "Found Python 2.x type 'long' in a Python annotation. Did you mean to use 'cython.long'?") + type = py_object_type + elif type.is_pyobject and type.equivalent_type: + type = type.equivalent_type + elif type is Builtin.int_type and env.global_scope().context.language_level == 2: + # While we still support Python 2 this must be a plain object + # so that it can be either int or long. With language_level=3(str), + # we pick up the type but accept both int and long in Py2. + type = py_object_type + return type + if self.name == 'object': + # This is normally parsed as "simple C type", but not if we don't parse C types. + return py_object_type + + # Try to give a helpful warning when users write plain C type names. + if not env.in_c_type_context and PyrexTypes.parse_basic_type(self.name): + warning(self.pos, "Found C type '%s' in a Python annotation. Did you mean to use 'cython.%s'?" % (self.name, self.name)) + + return None + + def analyse_as_extension_type(self, env): + # Try to interpret this as a reference to an extension type. + # Returns the extension type, or None. + entry = self.entry + if not entry: + entry = env.lookup(self.name) + if entry and entry.is_type: + if entry.type.is_extension_type or entry.type.is_builtin_type: + return entry.type + return None + + def analyse_target_declaration(self, env): + return self._analyse_target_declaration(env, is_assignment_expression=False) + + def analyse_assignment_expression_target_declaration(self, env): + return self._analyse_target_declaration(env, is_assignment_expression=True) + + def _analyse_target_declaration(self, env, is_assignment_expression): + self.is_target = True + if not self.entry: + if is_assignment_expression: + self.entry = env.lookup_assignment_expression_target(self.name) + else: + self.entry = env.lookup_here(self.name) + if self.entry: + self.entry.known_standard_library_import = "" # already exists somewhere and so is now ambiguous + if not self.entry and self.annotation is not None: + # name : type = ... + is_dataclass = env.is_c_dataclass_scope + # In a dataclass, an assignment should not prevent a name from becoming an instance attribute. + # Hence, "as_target = not is_dataclass". + self.declare_from_annotation(env, as_target=not is_dataclass) + elif (self.entry and self.entry.is_inherited and + self.annotation and env.is_c_dataclass_scope): + error(self.pos, "Cannot redeclare inherited fields in Cython dataclasses") + if not self.entry: + if env.directives['warn.undeclared']: + warning(self.pos, "implicit declaration of '%s'" % self.name, 1) + if env.directives['infer_types'] != False: + type = unspecified_type + else: + type = py_object_type + if is_assignment_expression: + self.entry = env.declare_assignment_expression_target(self.name, type, self.pos) + else: + self.entry = env.declare_var(self.name, type, self.pos) + if self.entry.is_declared_generic: + self.result_ctype = py_object_type + if self.entry.as_module: + # cimported modules namespace can shadow actual variables + self.entry.is_variable = 1 + + def analyse_types(self, env): + self.initialized_check = env.directives['initializedcheck'] + entry = self.entry + if entry is None: + entry = env.lookup(self.name) + if not entry: + entry = env.declare_builtin(self.name, self.pos) + if entry and entry.is_builtin and entry.is_const: + self.is_literal = True + if not entry: + self.type = PyrexTypes.error_type + return self + self.entry = entry + entry.used = 1 + if entry.type.is_buffer: + from . import Buffer + Buffer.used_buffer_aux_vars(entry) + self.analyse_rvalue_entry(env) + return self + + def analyse_target_types(self, env): + self.analyse_entry(env, is_target=True) + + entry = self.entry + if entry.is_cfunction and entry.as_variable: + # FIXME: unify "is_overridable" flags below + if (entry.is_overridable or entry.type.is_overridable) or not self.is_lvalue() and entry.fused_cfunction: + # We need this for assigning to cpdef names and for the fused 'def' TreeFragment + entry = self.entry = entry.as_variable + self.type = entry.type + + if self.type.is_const: + error(self.pos, "Assignment to const '%s'" % self.name) + if not self.is_lvalue(): + error(self.pos, "Assignment to non-lvalue '%s'" % self.name) + self.type = PyrexTypes.error_type + entry.used = 1 + if entry.type.is_buffer: + from . import Buffer + Buffer.used_buffer_aux_vars(entry) + return self + + def analyse_rvalue_entry(self, env): + #print "NameNode.analyse_rvalue_entry:", self.name ### + #print "Entry:", self.entry.__dict__ ### + self.analyse_entry(env) + entry = self.entry + + if entry.is_declared_generic: + self.result_ctype = py_object_type + + if entry.is_pyglobal or entry.is_builtin: + if entry.is_builtin and entry.is_const: + self.is_temp = 0 + else: + self.is_temp = 1 + + self.is_used_as_rvalue = 1 + elif entry.type.is_memoryviewslice: + self.is_temp = False + self.is_used_as_rvalue = True + self.use_managed_ref = True + return self + + def nogil_check(self, env): + self.nogil = True + if self.is_used_as_rvalue: + entry = self.entry + if entry.is_builtin: + if not entry.is_const: # cached builtins are ok + self.gil_error() + elif entry.is_pyglobal: + self.gil_error() + + gil_message = "Accessing Python global or builtin" + + def analyse_entry(self, env, is_target=False): + #print "NameNode.analyse_entry:", self.name ### + self.check_identifier_kind() + entry = self.entry + type = entry.type + if (not is_target and type.is_pyobject and self.inferred_type and + self.inferred_type.is_builtin_type): + # assume that type inference is smarter than the static entry + type = self.inferred_type + self.type = type + + def check_identifier_kind(self): + # Check that this is an appropriate kind of name for use in an + # expression. Also finds the variable entry associated with + # an extension type. + entry = self.entry + if entry.is_type and entry.type.is_extension_type: + self.type_entry = entry + if entry.is_type and (entry.type.is_enum or entry.type.is_cpp_enum): + py_entry = Symtab.Entry(self.name, None, py_object_type) + py_entry.is_pyglobal = True + py_entry.scope = self.entry.scope + self.entry = py_entry + elif not (entry.is_const or entry.is_variable or + entry.is_builtin or entry.is_cfunction or + entry.is_cpp_class): + if self.entry.as_variable: + self.entry = self.entry.as_variable + elif not self.is_cython_module: + error(self.pos, "'%s' is not a constant, variable or function identifier" % self.name) + + def is_cimported_module_without_shadow(self, env): + if self.is_cython_module or self.cython_attribute: + return False + entry = self.entry or env.lookup(self.name) + return entry.as_module and not entry.is_variable + + def is_simple(self): + # If it's not a C variable, it'll be in a temp. + return 1 + + def may_be_none(self): + if self.cf_state and self.type and (self.type.is_pyobject or + self.type.is_memoryviewslice): + # guard against infinite recursion on self-dependencies + if getattr(self, '_none_checking', False): + # self-dependency - either this node receives a None + # value from *another* node, or it can not reference + # None at this point => safe to assume "not None" + return False + self._none_checking = True + # evaluate control flow state to see if there were any + # potential None values assigned to the node so far + may_be_none = False + for assignment in self.cf_state: + if assignment.rhs.may_be_none(): + may_be_none = True + break + del self._none_checking + return may_be_none + return super(NameNode, self).may_be_none() + + def nonlocally_immutable(self): + if ExprNode.nonlocally_immutable(self): + return True + entry = self.entry + if not entry or entry.in_closure: + return False + return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly + + def calculate_target_results(self, env): + pass + + def check_const(self): + entry = self.entry + if entry is not None and not ( + entry.is_const or + entry.is_cfunction or + entry.is_builtin or + entry.type.is_const): + self.not_const() + return False + return True + + def check_const_addr(self): + entry = self.entry + if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin): + self.addr_not_const() + return False + return True + + def is_lvalue(self): + return ( + self.entry.is_variable and + not self.entry.is_readonly + ) or ( + self.entry.is_cfunction and + self.entry.is_overridable + ) + + def is_addressable(self): + return self.entry.is_variable and not self.type.is_memoryviewslice + + def is_ephemeral(self): + # Name nodes are never ephemeral, even if the + # result is in a temporary. + return 0 + + def calculate_result_code(self): + entry = self.entry + if not entry: + return "" # There was an error earlier + if self.entry.is_cpp_optional and not self.is_target: + return "(*%s)" % entry.cname + return entry.cname + + def generate_result_code(self, code): + entry = self.entry + if entry is None: + return # There was an error earlier + if entry.utility_code: + code.globalstate.use_utility_code(entry.utility_code) + if entry.is_builtin and entry.is_const: + return # Lookup already cached + elif entry.is_pyclass_attr: + assert entry.type.is_pyobject, "Python global or builtin not a Python object" + interned_cname = code.intern_identifier(self.entry.name) + if entry.is_builtin: + namespace = Naming.builtins_cname + else: # entry.is_pyglobal + namespace = entry.scope.namespace_cname + if not self.cf_is_null: + code.putln( + '%s = PyObject_GetItem(%s, %s);' % ( + self.result(), + namespace, + interned_cname)) + code.putln('if (unlikely(!%s)) {' % self.result()) + code.putln('PyErr_Clear();') + code.globalstate.use_utility_code( + UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c")) + code.putln( + '__Pyx_GetModuleGlobalName(%s, %s);' % ( + self.result(), + interned_cname)) + if not self.cf_is_null: + code.putln("}") + code.putln(code.error_goto_if_null(self.result(), self.pos)) + self.generate_gotref(code) + + elif entry.is_builtin and not entry.scope.is_module_scope: + # known builtin + assert entry.type.is_pyobject, "Python global or builtin not a Python object" + interned_cname = code.intern_identifier(self.entry.name) + code.globalstate.use_utility_code( + UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c")) + code.putln( + '%s = __Pyx_GetBuiltinName(%s); %s' % ( + self.result(), + interned_cname, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + elif entry.is_pyglobal or (entry.is_builtin and entry.scope.is_module_scope): + # name in class body, global name or unknown builtin + assert entry.type.is_pyobject, "Python global or builtin not a Python object" + interned_cname = code.intern_identifier(self.entry.name) + if entry.scope.is_module_scope: + code.globalstate.use_utility_code( + UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c")) + code.putln( + '__Pyx_GetModuleGlobalName(%s, %s); %s' % ( + self.result(), + interned_cname, + code.error_goto_if_null(self.result(), self.pos))) + else: + # FIXME: is_pyglobal is also used for class namespace + code.globalstate.use_utility_code( + UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c")) + code.putln( + '__Pyx_GetNameInClass(%s, %s, %s); %s' % ( + self.result(), + entry.scope.namespace_cname, + interned_cname, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice: + # Raise UnboundLocalError for objects and memoryviewslices + raise_unbound = ( + (self.cf_maybe_null or self.cf_is_null) and not self.allow_null) + + memslice_check = entry.type.is_memoryviewslice and self.initialized_check + optional_cpp_check = entry.is_cpp_optional and self.initialized_check + + if optional_cpp_check: + unbound_check_code = entry.type.cpp_optional_check_for_null_code(entry.cname) + else: + unbound_check_code = entry.type.check_for_null_code(entry.cname) + + if unbound_check_code and raise_unbound and (entry.type.is_pyobject or memslice_check or optional_cpp_check): + code.put_error_if_unbound(self.pos, entry, self.in_nogil_context, unbound_check_code=unbound_check_code) + + elif entry.is_cglobal and entry.is_cpp_optional and self.initialized_check: + unbound_check_code = entry.type.cpp_optional_check_for_null_code(entry.cname) + code.put_error_if_unbound(self.pos, entry, unbound_check_code=unbound_check_code) + + def generate_assignment_code(self, rhs, code, overloaded_assignment=False, + exception_check=None, exception_value=None): + #print "NameNode.generate_assignment_code:", self.name ### + entry = self.entry + if entry is None: + return # There was an error earlier + + if (self.entry.type.is_ptr and isinstance(rhs, ListNode) + and not self.lhs_of_first_assignment and not rhs.in_module_scope): + error(self.pos, "Literal list must be assigned to pointer at time of declaration") + + # is_pyglobal seems to be True for module level-globals only. + # We use this to access class->tp_dict if necessary. + if entry.is_pyglobal: + assert entry.type.is_pyobject, "Python global or builtin not a Python object" + interned_cname = code.intern_identifier(self.entry.name) + namespace = self.entry.scope.namespace_cname + if entry.is_member: + # if the entry is a member we have to cheat: SetAttr does not work + # on types, so we create a descriptor which is then added to tp_dict. + setter = '__Pyx_SetItemOnTypeDict' + elif entry.scope.is_module_scope: + setter = 'PyDict_SetItem' + namespace = Naming.moddict_cname + elif entry.is_pyclass_attr: + # Special-case setting __new__ + n = "SetNewInClass" if self.name == "__new__" else "SetNameInClass" + code.globalstate.use_utility_code(UtilityCode.load_cached(n, "ObjectHandling.c")) + setter = '__Pyx_' + n + else: + assert False, repr(entry) + code.put_error_if_neg( + self.pos, + '%s(%s, %s, %s)' % ( + setter, + namespace, + interned_cname, + rhs.py_result())) + if debug_disposal_code: + print("NameNode.generate_assignment_code:") + print("...generating disposal code for %s" % rhs) + rhs.generate_disposal_code(code) + rhs.free_temps(code) + if entry.is_member: + # in Py2.6+, we need to invalidate the method cache + code.putln("PyType_Modified(%s);" % + entry.scope.parent_type.typeptr_cname) + else: + if self.type.is_memoryviewslice: + self.generate_acquire_memoryviewslice(rhs, code) + + elif self.type.is_buffer: + # Generate code for doing the buffer release/acquisition. + # This might raise an exception in which case the assignment (done + # below) will not happen. + # + # The reason this is not in a typetest-like node is because the + # variables that the acquired buffer info is stored to is allocated + # per entry and coupled with it. + self.generate_acquire_buffer(rhs, code) + assigned = False + if self.type.is_pyobject: + #print "NameNode.generate_assignment_code: to", self.name ### + #print "...from", rhs ### + #print "...LHS type", self.type, "ctype", self.ctype() ### + #print "...RHS type", rhs.type, "ctype", rhs.ctype() ### + if self.use_managed_ref: + rhs.make_owned_reference(code) + is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure + if is_external_ref: + self.generate_gotref(code, handle_null=True) + assigned = True + if entry.is_cglobal: + self.generate_decref_set(code, rhs.result_as(self.ctype())) + else: + if not self.cf_is_null: + if self.cf_maybe_null: + self.generate_xdecref_set(code, rhs.result_as(self.ctype())) + else: + self.generate_decref_set(code, rhs.result_as(self.ctype())) + else: + assigned = False + if is_external_ref: + rhs.generate_giveref(code) + if not self.type.is_memoryviewslice: + if not assigned: + if overloaded_assignment: + result = rhs.move_result_rhs() + if exception_check == '+': + translate_cpp_exception( + code, self.pos, + '%s = %s;' % (self.result(), result), + self.result() if self.type.is_pyobject else None, + exception_value, self.in_nogil_context) + else: + code.putln('%s = %s;' % (self.result(), result)) + else: + result = rhs.move_result_rhs_as(self.ctype()) + + if is_pythran_expr(self.type): + code.putln('new (&%s) decltype(%s){%s};' % (self.result(), self.result(), result)) + elif result != self.result(): + code.putln('%s = %s;' % (self.result(), result)) + if debug_disposal_code: + print("NameNode.generate_assignment_code:") + print("...generating post-assignment code for %s" % rhs) + rhs.generate_post_assignment_code(code) + elif rhs.result_in_temp(): + rhs.generate_post_assignment_code(code) + + rhs.free_temps(code) + + def generate_acquire_memoryviewslice(self, rhs, code): + """ + Slices, coercions from objects, return values etc are new references. + We have a borrowed reference in case of dst = src + """ + from . import MemoryView + + MemoryView.put_acquire_memoryviewslice( + lhs_cname=self.result(), + lhs_type=self.type, + lhs_pos=self.pos, + rhs=rhs, + code=code, + have_gil=not self.in_nogil_context, + first_assignment=self.cf_is_null) + + def generate_acquire_buffer(self, rhs, code): + # rhstmp is only used in case the rhs is a complicated expression leading to + # the object, to avoid repeating the same C expression for every reference + # to the rhs. It does NOT hold a reference. + pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp + if pretty_rhs: + rhstmp = rhs.result_as(self.ctype()) + else: + rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False) + code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype()))) + + from . import Buffer + Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry, + is_initialized=not self.lhs_of_first_assignment, + pos=self.pos, code=code) + + if not pretty_rhs: + code.putln("%s = 0;" % rhstmp) + code.funcstate.release_temp(rhstmp) + + def generate_deletion_code(self, code, ignore_nonexisting=False): + if self.entry is None: + return # There was an error earlier + elif self.entry.is_pyclass_attr: + namespace = self.entry.scope.namespace_cname + interned_cname = code.intern_identifier(self.entry.name) + if ignore_nonexisting: + key_error_code = 'PyErr_Clear(); else' + else: + # minor hack: fake a NameError on KeyError + key_error_code = ( + '{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' % + self.entry.name) + code.putln( + 'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {' + ' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s' + ' %s ' + '}' % (namespace, interned_cname, + key_error_code, + code.error_goto(self.pos))) + elif self.entry.is_pyglobal: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c")) + interned_cname = code.intern_identifier(self.entry.name) + del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % ( + Naming.module_cname, interned_cname) + if ignore_nonexisting: + code.putln( + 'if (unlikely(%s < 0)) {' + ' if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s ' + '}' % (del_code, code.error_goto(self.pos))) + else: + code.put_error_if_neg(self.pos, del_code) + elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice: + if not self.cf_is_null: + if self.cf_maybe_null and not ignore_nonexisting: + code.put_error_if_unbound(self.pos, self.entry) + + if self.entry.in_closure: + # generator + self.generate_gotref(code, handle_null=True, maybe_null_extra_check=ignore_nonexisting) + if ignore_nonexisting and self.cf_maybe_null: + code.put_xdecref_clear(self.result(), self.ctype(), + have_gil=not self.nogil) + else: + code.put_decref_clear(self.result(), self.ctype(), + have_gil=not self.nogil) + else: + error(self.pos, "Deletion of C names not supported") + + def annotate(self, code): + if getattr(self, 'is_called', False): + pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1) + if self.type.is_pyobject: + style, text = 'py_call', 'python function (%s)' + else: + style, text = 'c_call', 'c function (%s)' + code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name))) + + def get_known_standard_library_import(self): + if self.entry: + return self.entry.known_standard_library_import + return None + +class BackquoteNode(ExprNode): + # `expr` + # + # arg ExprNode + + type = py_object_type + + subexprs = ['arg'] + + def analyse_types(self, env): + self.arg = self.arg.analyse_types(env) + self.arg = self.arg.coerce_to_pyobject(env) + self.is_temp = 1 + return self + + gil_message = "Backquote expression" + + def calculate_constant_result(self): + self.constant_result = repr(self.arg.constant_result) + + def generate_result_code(self, code): + code.putln( + "%s = PyObject_Repr(%s); %s" % ( + self.result(), + self.arg.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class ImportNode(ExprNode): + # Used as part of import statement implementation. + # Implements result = + # __import__(module_name, globals(), None, name_list, level) + # + # module_name StringNode dotted name of module. Empty module + # name means importing the parent package according + # to level + # name_list ListNode or None list of names to be imported + # level int relative import level: + # -1: attempt both relative import and absolute import; + # 0: absolute import; + # >0: the number of parent directories to search + # relative to the current module. + # None: decide the level according to language level and + # directives + # get_top_level_module int true: return top-level module, false: return imported module + # module_names TupleNode the separate names of the module and submodules, or None + + type = py_object_type + module_names = None + get_top_level_module = False + is_temp = True + + subexprs = ['module_name', 'name_list', 'module_names'] + + def analyse_types(self, env): + if self.level is None: + # For modules in packages, and without 'absolute_import' enabled, try relative (Py2) import first. + if env.global_scope().parent_module and ( + env.directives['py2_import'] or + Future.absolute_import not in env.global_scope().context.future_directives): + self.level = -1 + else: + self.level = 0 + module_name = self.module_name.analyse_types(env) + self.module_name = module_name.coerce_to_pyobject(env) + assert self.module_name.is_string_literal + if self.name_list: + name_list = self.name_list.analyse_types(env) + self.name_list = name_list.coerce_to_pyobject(env) + elif '.' in self.module_name.value: + self.module_names = TupleNode(self.module_name.pos, args=[ + IdentifierStringNode(self.module_name.pos, value=part, constant_result=part) + for part in map(StringEncoding.EncodedString, self.module_name.value.split('.')) + ]).analyse_types(env) + return self + + gil_message = "Python import" + + def generate_result_code(self, code): + assert self.module_name.is_string_literal + module_name = self.module_name.value + + if self.level <= 0 and not self.name_list and not self.get_top_level_module: + if self.module_names: + assert self.module_names.is_literal # make sure we create the tuple only once + if self.level == 0: + utility_code = UtilityCode.load_cached("ImportDottedModule", "ImportExport.c") + helper_func = "__Pyx_ImportDottedModule" + else: + utility_code = UtilityCode.load_cached("ImportDottedModuleRelFirst", "ImportExport.c") + helper_func = "__Pyx_ImportDottedModuleRelFirst" + code.globalstate.use_utility_code(utility_code) + import_code = "%s(%s, %s)" % ( + helper_func, + self.module_name.py_result(), + self.module_names.py_result() if self.module_names else 'NULL', + ) + else: + code.globalstate.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c")) + import_code = "__Pyx_Import(%s, %s, %d)" % ( + self.module_name.py_result(), + self.name_list.py_result() if self.name_list else '0', + self.level) + + if self.level <= 0 and module_name in utility_code_for_imports: + helper_func, code_name, code_file = utility_code_for_imports[module_name] + code.globalstate.use_utility_code(UtilityCode.load_cached(code_name, code_file)) + import_code = '%s(%s)' % (helper_func, import_code) + + code.putln("%s = %s; %s" % ( + self.result(), + import_code, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + def get_known_standard_library_import(self): + return self.module_name.value + + +class ScopedExprNode(ExprNode): + # Abstract base class for ExprNodes that have their own local + # scope, such as generator expressions. + # + # expr_scope Scope the inner scope of the expression + + subexprs = [] + expr_scope = None + + # does this node really have a local scope, e.g. does it leak loop + # variables or not? non-leaking Py3 behaviour is default, except + # for list comprehensions where the behaviour differs in Py2 and + # Py3 (set in Parsing.py based on parser context) + has_local_scope = True + + def init_scope(self, outer_scope, expr_scope=None): + if expr_scope is not None: + self.expr_scope = expr_scope + elif self.has_local_scope: + self.expr_scope = Symtab.ComprehensionScope(outer_scope) + elif not self.expr_scope: # don't unset if it's already been set + self.expr_scope = None + + def analyse_declarations(self, env): + self.init_scope(env) + + def analyse_scoped_declarations(self, env): + # this is called with the expr_scope as env + pass + + def analyse_types(self, env): + # no recursion here, the children will be analysed separately below + return self + + def analyse_scoped_expressions(self, env): + # this is called with the expr_scope as env + return self + + def generate_evaluation_code(self, code): + # set up local variables and free their references on exit + generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code + if not self.has_local_scope or not self.expr_scope.var_entries: + # no local variables => delegate, done + generate_inner_evaluation_code(code) + return + + code.putln('{ /* enter inner scope */') + py_entries = [] + for _, entry in sorted(item for item in self.expr_scope.entries.items() if item[0]): + if not entry.in_closure: + if entry.type.is_pyobject and entry.used: + py_entries.append(entry) + if not py_entries: + # no local Python references => no cleanup required + generate_inner_evaluation_code(code) + code.putln('} /* exit inner scope */') + return + + # must free all local Python references at each exit point + old_loop_labels = code.new_loop_labels() + old_error_label = code.new_error_label() + + generate_inner_evaluation_code(code) + + # normal (non-error) exit + self._generate_vars_cleanup(code, py_entries) + + # error/loop body exit points + exit_scope = code.new_label('exit_scope') + code.put_goto(exit_scope) + for label, old_label in ([(code.error_label, old_error_label)] + + list(zip(code.get_loop_labels(), old_loop_labels))): + if code.label_used(label): + code.put_label(label) + self._generate_vars_cleanup(code, py_entries) + code.put_goto(old_label) + code.put_label(exit_scope) + code.putln('} /* exit inner scope */') + + code.set_loop_labels(old_loop_labels) + code.error_label = old_error_label + + def _generate_vars_cleanup(self, code, py_entries): + for entry in py_entries: + if entry.is_cglobal: + code.put_var_gotref(entry) + code.put_var_decref_set(entry, "Py_None") + else: + code.put_var_xdecref_clear(entry) + + +class IteratorNode(ScopedExprNode): + # Used as part of for statement implementation. + # + # Implements result = iter(sequence) + # + # sequence ExprNode + + type = py_object_type + iter_func_ptr = None + counter_cname = None + reversed = False # currently only used for list/tuple types (see Optimize.py) + is_async = False + has_local_scope = False + + subexprs = ['sequence'] + + def analyse_types(self, env): + if self.expr_scope: + env = self.expr_scope # actually evaluate sequence in this scope instead + self.sequence = self.sequence.analyse_types(env) + if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \ + not self.sequence.type.is_string: + # C array iteration will be transformed later on + self.type = self.sequence.type + elif self.sequence.type.is_cpp_class: + return CppIteratorNode(self.pos, sequence=self.sequence).analyse_types(env) + elif self.is_reversed_cpp_iteration(): + sequence = self.sequence.arg_tuple.args[0].arg + return CppIteratorNode(self.pos, sequence=sequence, reversed=True).analyse_types(env) + else: + self.sequence = self.sequence.coerce_to_pyobject(env) + if self.sequence.type in (list_type, tuple_type): + self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable") + self.is_temp = 1 + return self + + gil_message = "Iterating over Python object" + + _func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType( + PyrexTypes.py_object_type, [ + PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None), + ])) + + def is_reversed_cpp_iteration(self): + """ + Returns True if the 'reversed' function is applied to a C++ iterable. + + This supports C++ classes with reverse_iterator implemented. + """ + if not (isinstance(self.sequence, SimpleCallNode) and + self.sequence.arg_tuple and len(self.sequence.arg_tuple.args) == 1): + return False + func = self.sequence.function + if func.is_name and func.name == "reversed": + if not func.entry.is_builtin: + return False + arg = self.sequence.arg_tuple.args[0] + if isinstance(arg, CoercionNode) and arg.arg.is_name: + arg = arg.arg.entry + return arg.type.is_cpp_class + return False + + def type_dependencies(self, env): + return self.sequence.type_dependencies(self.expr_scope or env) + + def infer_type(self, env): + sequence_type = self.sequence.infer_type(env) + if sequence_type.is_array or sequence_type.is_ptr: + return sequence_type + elif sequence_type.is_cpp_class: + begin = sequence_type.scope.lookup("begin") + if begin is not None: + return begin.type.return_type + elif sequence_type.is_pyobject: + return sequence_type + return py_object_type + + def generate_result_code(self, code): + sequence_type = self.sequence.type + if sequence_type.is_cpp_class: + assert False, "Should have been changed to CppIteratorNode" + if sequence_type.is_array or sequence_type.is_ptr: + raise InternalError("for in carray slice not transformed") + + is_builtin_sequence = sequence_type in (list_type, tuple_type) + if not is_builtin_sequence: + # reversed() not currently optimised (see Optimize.py) + assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects" + self.may_be_a_sequence = not sequence_type.is_builtin_type + if self.may_be_a_sequence: + code.putln( + "if (likely(PyList_CheckExact(%s)) || PyTuple_CheckExact(%s)) {" % ( + self.sequence.py_result(), + self.sequence.py_result())) + + if is_builtin_sequence or self.may_be_a_sequence: + code.putln("%s = %s; __Pyx_INCREF(%s);" % ( + self.result(), + self.sequence.py_result(), + self.result(), + )) + self.counter_cname = code.funcstate.allocate_temp( + PyrexTypes.c_py_ssize_t_type, manage_ref=False) + if self.reversed: + if sequence_type is list_type: + len_func = '__Pyx_PyList_GET_SIZE' + else: + len_func = '__Pyx_PyTuple_GET_SIZE' + code.putln("%s = %s(%s);" % (self.counter_cname, len_func, self.result())) + code.putln("#if !CYTHON_ASSUME_SAFE_MACROS") + code.putln(code.error_goto_if_neg(self.counter_cname, self.pos)) + code.putln("#endif") + code.putln("--%s;" % self.counter_cname) # len -> last item + else: + code.putln("%s = 0;" % self.counter_cname) + + if not is_builtin_sequence: + self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False) + if self.may_be_a_sequence: + code.putln("%s = NULL;" % self.iter_func_ptr) + code.putln("} else {") + code.put("%s = -1; " % self.counter_cname) + + code.putln("%s = PyObject_GetIter(%s); %s" % ( + self.result(), + self.sequence.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + # PyObject_GetIter() fails if "tp_iternext" is not set, but the check below + # makes it visible to the C compiler that the pointer really isn't NULL, so that + # it can distinguish between the special cases and the generic case + code.putln("%s = __Pyx_PyObject_GetIterNextFunc(%s); %s" % ( + self.iter_func_ptr, self.py_result(), + code.error_goto_if_null(self.iter_func_ptr, self.pos))) + if self.may_be_a_sequence: + code.putln("}") + + def generate_next_sequence_item(self, test_name, result_name, code): + assert self.counter_cname, "internal error: counter_cname temp not prepared" + assert test_name in ('List', 'Tuple') + + final_size = '__Pyx_Py%s_GET_SIZE(%s)' % (test_name, self.py_result()) + size_is_safe = False + if self.sequence.is_sequence_constructor: + item_count = len(self.sequence.args) + if self.sequence.mult_factor is None: + final_size = item_count + size_is_safe = True + elif isinstance(self.sequence.mult_factor.constant_result, _py_int_types): + final_size = item_count * self.sequence.mult_factor.constant_result + size_is_safe = True + + if size_is_safe: + code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size)) + else: + code.putln("{") + code.putln("Py_ssize_t %s = %s;" % (Naming.quick_temp_cname, final_size)) + code.putln("#if !CYTHON_ASSUME_SAFE_MACROS") + code.putln(code.error_goto_if_neg(Naming.quick_temp_cname, self.pos)) + code.putln("#endif") + code.putln("if (%s >= %s) break;" % (self.counter_cname, Naming.quick_temp_cname)) + code.putln("}") + + if self.reversed: + inc_dec = '--' + else: + inc_dec = '++' + code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS") + code.putln( + "%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % ( + result_name, + test_name, + self.py_result(), + self.counter_cname, + result_name, + self.counter_cname, + inc_dec, + # use the error label to avoid C compiler warnings if we only use it below + code.error_goto_if_neg('0', self.pos) + )) + code.putln("#else") + code.putln( + "%s = __Pyx_PySequence_ITEM(%s, %s); %s%s; %s" % ( + result_name, + self.py_result(), + self.counter_cname, + self.counter_cname, + inc_dec, + code.error_goto_if_null(result_name, self.pos))) + code.put_gotref(result_name, py_object_type) + code.putln("#endif") + + def generate_iter_next_result_code(self, result_name, code): + sequence_type = self.sequence.type + if self.reversed: + code.putln("if (%s < 0) break;" % self.counter_cname) + if sequence_type is list_type: + self.generate_next_sequence_item('List', result_name, code) + return + elif sequence_type is tuple_type: + self.generate_next_sequence_item('Tuple', result_name, code) + return + + if self.may_be_a_sequence: + code.putln("if (likely(!%s)) {" % self.iter_func_ptr) + code.putln("if (likely(PyList_CheckExact(%s))) {" % self.py_result()) + self.generate_next_sequence_item('List', result_name, code) + code.putln("} else {") + self.generate_next_sequence_item('Tuple', result_name, code) + code.putln("}") + code.put("} else ") + + code.putln("{") + code.putln( + "%s = %s(%s);" % ( + result_name, + self.iter_func_ptr, + self.py_result())) + code.putln("if (unlikely(!%s)) {" % result_name) + code.putln("PyObject* exc_type = PyErr_Occurred();") + code.putln("if (exc_type) {") + code.putln("if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();") + code.putln("else %s" % code.error_goto(self.pos)) + code.putln("}") + code.putln("break;") + code.putln("}") + code.put_gotref(result_name, py_object_type) + code.putln("}") + + def free_temps(self, code): + if self.counter_cname: + code.funcstate.release_temp(self.counter_cname) + if self.iter_func_ptr: + code.funcstate.release_temp(self.iter_func_ptr) + self.iter_func_ptr = None + ExprNode.free_temps(self, code) + + +class CppIteratorNode(ExprNode): + # Iteration over a C++ container. + # Created at the analyse_types stage by IteratorNode + cpp_sequence_cname = None + cpp_attribute_op = "." + extra_dereference = "" + is_temp = True + reversed = False + + subexprs = ['sequence'] + + def get_iterator_func_names(self): + return ("begin", "end") if not self.reversed else ("rbegin", "rend") + + def analyse_types(self, env): + sequence_type = self.sequence.type + if sequence_type.is_ptr: + sequence_type = sequence_type.base_type + begin_name, end_name = self.get_iterator_func_names() + begin = sequence_type.scope.lookup(begin_name) + end = sequence_type.scope.lookup(end_name) + if (begin is None + or not begin.type.is_cfunction + or begin.type.args): + error(self.pos, "missing %s() on %s" % (begin_name, self.sequence.type)) + self.type = error_type + return self + if (end is None + or not end.type.is_cfunction + or end.type.args): + error(self.pos, "missing %s() on %s" % (end_name, self.sequence.type)) + self.type = error_type + return self + iter_type = begin.type.return_type + if iter_type.is_cpp_class: + if env.directives['cpp_locals']: + self.extra_dereference = "*" + if env.lookup_operator_for_types( + self.pos, + "!=", + [iter_type, end.type.return_type]) is None: + error(self.pos, "missing operator!= on result of %s() on %s" % (begin_name, self.sequence.type)) + self.type = error_type + return self + if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None: + error(self.pos, "missing operator++ on result of %s() on %s" % (begin_name, self.sequence.type)) + self.type = error_type + return self + if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None: + error(self.pos, "missing operator* on result of %s() on %s" % (begin_name, self.sequence.type)) + self.type = error_type + return self + self.type = iter_type + elif iter_type.is_ptr: + if not (iter_type == end.type.return_type): + error(self.pos, "incompatible types for %s() and %s()" % (begin_name, end_name)) + self.type = iter_type + else: + error(self.pos, "result type of %s() on %s must be a C++ class or pointer" % (begin_name, self.sequence.type)) + self.type = error_type + return self + + def generate_result_code(self, code): + sequence_type = self.sequence.type + begin_name, _ = self.get_iterator_func_names() + # essentially 3 options: + if self.sequence.is_simple(): + # 1) Sequence can be accessed directly, like a name; + # assigning to it may break the container, but that's the responsibility + # of the user + code.putln("%s = %s%s%s();" % ( + self.result(), + self.sequence.result(), + self.cpp_attribute_op, + begin_name)) + else: + # (while it'd be nice to limit the scope of the loop temp, it's essentially + # impossible to do while supporting generators) + temp_type = sequence_type + if temp_type.is_reference: + # 2) Sequence is a reference (often obtained by dereferencing a pointer); + # make the temp a pointer so we are not sensitive to users reassigning + # the pointer than it came from + temp_type = PyrexTypes.CPtrType(sequence_type.ref_base_type) + if temp_type.is_ptr or code.globalstate.directives['cpp_locals']: + self.cpp_attribute_op = "->" + # 3) (otherwise) sequence comes from a function call or similar, so we must + # create a temp to store it in + self.cpp_sequence_cname = code.funcstate.allocate_temp(temp_type, manage_ref=False) + code.putln("%s = %s%s;" % (self.cpp_sequence_cname, + "&" if temp_type.is_ptr else "", + self.sequence.move_result_rhs())) + code.putln("%s = %s%s%s();" % ( + self.result(), + self.cpp_sequence_cname, + self.cpp_attribute_op, + begin_name)) + + def generate_iter_next_result_code(self, result_name, code): + # end call isn't cached to support containers that allow adding while iterating + # (much as this is usually a bad idea) + _, end_name = self.get_iterator_func_names() + code.putln("if (!(%s%s != %s%s%s())) break;" % ( + self.extra_dereference, + self.result(), + self.cpp_sequence_cname or self.sequence.result(), + self.cpp_attribute_op, + end_name)) + code.putln("%s = *%s%s;" % ( + result_name, + self.extra_dereference, + self.result())) + code.putln("++%s%s;" % (self.extra_dereference, self.result())) + + def generate_subexpr_disposal_code(self, code): + if not self.cpp_sequence_cname: + # the sequence is accessed directly so any temporary result in its + # subexpressions must remain available until the iterator is not needed + return + ExprNode.generate_subexpr_disposal_code(self, code) + + def free_subexpr_temps(self, code): + if not self.cpp_sequence_cname: + # the sequence is accessed directly so any temporary result in its + # subexpressions must remain available until the iterator is not needed + return + ExprNode.free_subexpr_temps(self, code) + + def generate_disposal_code(self, code): + if not self.cpp_sequence_cname: + # postponed from CppIteratorNode.generate_subexpr_disposal_code + # and CppIteratorNode.free_subexpr_temps + ExprNode.generate_subexpr_disposal_code(self, code) + ExprNode.free_subexpr_temps(self, code) + ExprNode.generate_disposal_code(self, code) + + def free_temps(self, code): + if self.cpp_sequence_cname: + code.funcstate.release_temp(self.cpp_sequence_cname) + # skip over IteratorNode since we don't use any of the temps it does + ExprNode.free_temps(self, code) + + +class NextNode(AtomicExprNode): + # Used as part of for statement implementation. + # Implements result = next(iterator) + # Created during analyse_types phase. + # The iterator is not owned by this node. + # + # iterator IteratorNode + + def __init__(self, iterator): + AtomicExprNode.__init__(self, iterator.pos) + self.iterator = iterator + + def nogil_check(self, env): + # ignore - errors (if any) are already handled by IteratorNode + pass + + def type_dependencies(self, env): + return self.iterator.type_dependencies(env) + + def infer_type(self, env, iterator_type=None): + if iterator_type is None: + iterator_type = self.iterator.infer_type(env) + if iterator_type.is_ptr or iterator_type.is_array: + return iterator_type.base_type + elif iterator_type.is_cpp_class: + item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type + item_type = PyrexTypes.remove_cv_ref(item_type, remove_fakeref=True) + return item_type + else: + # Avoid duplication of complicated logic. + fake_index_node = IndexNode( + self.pos, + base=self.iterator.sequence, + index=IntNode(self.pos, value='PY_SSIZE_T_MAX', + type=PyrexTypes.c_py_ssize_t_type)) + return fake_index_node.infer_type(env) + + def analyse_types(self, env): + self.type = self.infer_type(env, self.iterator.type) + self.is_temp = 1 + return self + + def generate_result_code(self, code): + self.iterator.generate_iter_next_result_code(self.result(), code) + + +class AsyncIteratorNode(ScopedExprNode): + # Used as part of 'async for' statement implementation. + # + # Implements result = sequence.__aiter__() + # + # sequence ExprNode + + subexprs = ['sequence'] + + is_async = True + type = py_object_type + is_temp = 1 + has_local_scope = False + + def infer_type(self, env): + return py_object_type + + def analyse_types(self, env): + if self.expr_scope: + env = self.expr_scope + self.sequence = self.sequence.analyse_types(env) + if not self.sequence.type.is_pyobject: + error(self.pos, "async for loops not allowed on C/C++ types") + self.sequence = self.sequence.coerce_to_pyobject(env) + return self + + def generate_result_code(self, code): + code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c")) + code.putln("%s = __Pyx_Coroutine_GetAsyncIter(%s); %s" % ( + self.result(), + self.sequence.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class AsyncNextNode(AtomicExprNode): + # Used as part of 'async for' statement implementation. + # Implements result = iterator.__anext__() + # Created during analyse_types phase. + # The iterator is not owned by this node. + # + # iterator IteratorNode + + type = py_object_type + is_temp = 1 + + def __init__(self, iterator): + AtomicExprNode.__init__(self, iterator.pos) + self.iterator = iterator + + def infer_type(self, env): + return py_object_type + + def analyse_types(self, env): + return self + + def generate_result_code(self, code): + code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c")) + code.putln("%s = __Pyx_Coroutine_AsyncIterNext(%s); %s" % ( + self.result(), + self.iterator.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class WithExitCallNode(ExprNode): + # The __exit__() call of a 'with' statement. Used in both the + # except and finally clauses. + + # with_stat WithStatNode the surrounding 'with' statement + # args TupleNode or ResultStatNode the exception info tuple + # await_expr AwaitExprNode the await expression of an 'async with' statement + + subexprs = ['args', 'await_expr'] + test_if_run = True + await_expr = None + + def analyse_types(self, env): + self.args = self.args.analyse_types(env) + if self.await_expr: + self.await_expr = self.await_expr.analyse_types(env) + self.type = PyrexTypes.c_bint_type + self.is_temp = True + return self + + def generate_evaluation_code(self, code): + if self.test_if_run: + # call only if it was not already called (and decref-cleared) + code.putln("if (%s) {" % self.with_stat.exit_var) + + self.args.generate_evaluation_code(code) + result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False) + + code.mark_pos(self.pos) + code.globalstate.use_utility_code(UtilityCode.load_cached( + "PyObjectCall", "ObjectHandling.c")) + code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % ( + result_var, + self.with_stat.exit_var, + self.args.result())) + code.put_decref_clear(self.with_stat.exit_var, type=py_object_type) + self.args.generate_disposal_code(code) + self.args.free_temps(code) + + code.putln(code.error_goto_if_null(result_var, self.pos)) + code.put_gotref(result_var, py_object_type) + + if self.await_expr: + # FIXME: result_var temp currently leaks into the closure + self.await_expr.generate_evaluation_code(code, source_cname=result_var, decref_source=True) + code.putln("%s = %s;" % (result_var, self.await_expr.py_result())) + self.await_expr.generate_post_assignment_code(code) + self.await_expr.free_temps(code) + + if self.result_is_used: + self.allocate_temp_result(code) + code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var)) + code.put_decref_clear(result_var, type=py_object_type) + if self.result_is_used: + code.put_error_if_neg(self.pos, self.result()) + code.funcstate.release_temp(result_var) + if self.test_if_run: + code.putln("}") + + +class ExcValueNode(AtomicExprNode): + # Node created during analyse_types phase + # of an ExceptClauseNode to fetch the current + # exception value. + + type = py_object_type + + def __init__(self, pos): + ExprNode.__init__(self, pos) + + def set_var(self, var): + self.var = var + + def calculate_result_code(self): + return self.var + + def generate_result_code(self, code): + pass + + def analyse_types(self, env): + return self + + +class TempNode(ExprNode): + # Node created during analyse_types phase + # of some nodes to hold a temporary value. + # + # Note: One must call "allocate" and "release" on + # the node during code generation to get/release the temp. + # This is because the temp result is often used outside of + # the regular cycle. + + subexprs = [] + + def __init__(self, pos, type, env=None): + ExprNode.__init__(self, pos) + self.type = type + if type.is_pyobject: + self.result_ctype = py_object_type + self.is_temp = 1 + + def analyse_types(self, env): + return self + + def analyse_target_declaration(self, env): + self.is_target = True + + def generate_result_code(self, code): + pass + + def allocate(self, code): + self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True) + + def release(self, code): + code.funcstate.release_temp(self.temp_cname) + self.temp_cname = None + + def result(self): + try: + return self.temp_cname + except: + assert False, "Remember to call allocate/release on TempNode" + raise + + # Do not participate in normal temp alloc/dealloc: + def allocate_temp_result(self, code): + pass + + def release_temp_result(self, code): + pass + +class PyTempNode(TempNode): + # TempNode holding a Python value. + + def __init__(self, pos, env): + TempNode.__init__(self, pos, PyrexTypes.py_object_type, env) + +class RawCNameExprNode(ExprNode): + subexprs = [] + + def __init__(self, pos, type=None, cname=None): + ExprNode.__init__(self, pos, type=type) + if cname is not None: + self.cname = cname + + def analyse_types(self, env): + return self + + def set_cname(self, cname): + self.cname = cname + + def result(self): + return self.cname + + def generate_result_code(self, code): + pass + + +#------------------------------------------------------------------- +# +# F-strings +# +#------------------------------------------------------------------- + + +class JoinedStrNode(ExprNode): + # F-strings + # + # values [UnicodeNode|FormattedValueNode] Substrings of the f-string + # + type = unicode_type + is_temp = True + gil_message = "String concatenation" + + subexprs = ['values'] + + def analyse_types(self, env): + self.values = [v.analyse_types(env).coerce_to_pyobject(env) for v in self.values] + return self + + def may_be_none(self): + # PyUnicode_Join() always returns a Unicode string or raises an exception + return False + + def generate_evaluation_code(self, code): + code.mark_pos(self.pos) + num_items = len(self.values) + list_var = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + ulength_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False) + max_char_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ucs4_type, manage_ref=False) + + code.putln('%s = PyTuple_New(%s); %s' % ( + list_var, + num_items, + code.error_goto_if_null(list_var, self.pos))) + code.put_gotref(list_var, py_object_type) + code.putln("%s = 0;" % ulength_var) + code.putln("%s = 127;" % max_char_var) # at least ASCII character range + + for i, node in enumerate(self.values): + node.generate_evaluation_code(code) + node.make_owned_reference(code) + + ulength = "__Pyx_PyUnicode_GET_LENGTH(%s)" % node.py_result() + max_char_value = "__Pyx_PyUnicode_MAX_CHAR_VALUE(%s)" % node.py_result() + is_ascii = False + if isinstance(node, UnicodeNode): + try: + # most strings will be ASCII or at least Latin-1 + node.value.encode('iso8859-1') + max_char_value = '255' + node.value.encode('us-ascii') + is_ascii = True + except UnicodeEncodeError: + if max_char_value != '255': + # not ISO8859-1 => check BMP limit + max_char = max(map(ord, node.value)) + if max_char < 0xD800: + # BMP-only, no surrogate pairs used + max_char_value = '65535' + ulength = str(len(node.value)) + elif max_char >= 65536: + # clearly outside of BMP, and not on a 16-bit Unicode system + max_char_value = '1114111' + ulength = str(len(node.value)) + else: + # not really worth implementing a check for surrogate pairs here + # drawback: C code can differ when generating on Py2 with 2-byte Unicode + pass + else: + ulength = str(len(node.value)) + elif isinstance(node, FormattedValueNode) and node.value.type.is_numeric: + is_ascii = True # formatted C numbers are always ASCII + + if not is_ascii: + code.putln("%s = (%s > %s) ? %s : %s;" % ( + max_char_var, max_char_value, max_char_var, max_char_value, max_char_var)) + code.putln("%s += %s;" % (ulength_var, ulength)) + + node.generate_giveref(code) + code.putln('PyTuple_SET_ITEM(%s, %s, %s);' % (list_var, i, node.py_result())) + node.generate_post_assignment_code(code) + node.free_temps(code) + + code.mark_pos(self.pos) + self.allocate_temp_result(code) + code.globalstate.use_utility_code(UtilityCode.load_cached("JoinPyUnicode", "StringTools.c")) + code.putln('%s = __Pyx_PyUnicode_Join(%s, %d, %s, %s); %s' % ( + self.result(), + list_var, + num_items, + ulength_var, + max_char_var, + code.error_goto_if_null(self.py_result(), self.pos))) + self.generate_gotref(code) + + code.put_decref_clear(list_var, py_object_type) + code.funcstate.release_temp(list_var) + code.funcstate.release_temp(ulength_var) + code.funcstate.release_temp(max_char_var) + + +class FormattedValueNode(ExprNode): + # {}-delimited portions of an f-string + # + # value ExprNode The expression itself + # conversion_char str or None Type conversion (!s, !r, !a, none, or 'd' for integer conversion) + # format_spec JoinedStrNode or None Format string passed to __format__ + # c_format_spec str or None If not None, formatting can be done at the C level + + subexprs = ['value', 'format_spec'] + + type = unicode_type + is_temp = True + c_format_spec = None + gil_message = "String formatting" + + find_conversion_func = { + 's': 'PyObject_Unicode', + 'r': 'PyObject_Repr', + 'a': 'PyObject_ASCII', # NOTE: mapped to PyObject_Repr() in Py2 + 'd': '__Pyx_PyNumber_IntOrLong', # NOTE: internal mapping for '%d' formatting + }.get + + def may_be_none(self): + # PyObject_Format() always returns a Unicode string or raises an exception + return False + + def analyse_types(self, env): + self.value = self.value.analyse_types(env) + if not self.format_spec or self.format_spec.is_string_literal: + c_format_spec = self.format_spec.value if self.format_spec else self.value.type.default_format_spec + if self.value.type.can_coerce_to_pystring(env, format_spec=c_format_spec): + self.c_format_spec = c_format_spec + + if self.format_spec: + self.format_spec = self.format_spec.analyse_types(env).coerce_to_pyobject(env) + if self.c_format_spec is None: + self.value = self.value.coerce_to_pyobject(env) + if not self.format_spec and (not self.conversion_char or self.conversion_char == 's'): + if self.value.type is unicode_type and not self.value.may_be_none(): + # value is definitely a unicode string and we don't format it any special + return self.value + return self + + def generate_result_code(self, code): + if self.c_format_spec is not None and not self.value.type.is_pyobject: + convert_func_call = self.value.type.convert_to_pystring( + self.value.result(), code, self.c_format_spec) + code.putln("%s = %s; %s" % ( + self.result(), + convert_func_call, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + return + + value_result = self.value.py_result() + value_is_unicode = self.value.type is unicode_type and not self.value.may_be_none() + if self.format_spec: + format_func = '__Pyx_PyObject_Format' + format_spec = self.format_spec.py_result() + else: + # common case: expect simple Unicode pass-through if no format spec + format_func = '__Pyx_PyObject_FormatSimple' + # passing a Unicode format string in Py2 forces PyObject_Format() to also return a Unicode string + format_spec = Naming.empty_unicode + + conversion_char = self.conversion_char + if conversion_char == 's' and value_is_unicode: + # no need to pipe unicode strings through str() + conversion_char = None + + if conversion_char: + fn = self.find_conversion_func(conversion_char) + assert fn is not None, "invalid conversion character found: '%s'" % conversion_char + value_result = '%s(%s)' % (fn, value_result) + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectFormatAndDecref", "StringTools.c")) + format_func += 'AndDecref' + elif self.format_spec: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectFormat", "StringTools.c")) + else: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectFormatSimple", "StringTools.c")) + + code.putln("%s = %s(%s, %s); %s" % ( + self.result(), + format_func, + value_result, + format_spec, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +#------------------------------------------------------------------- +# +# Parallel nodes (cython.parallel.thread(savailable|id)) +# +#------------------------------------------------------------------- + +class ParallelThreadsAvailableNode(AtomicExprNode): + """ + Note: this is disabled and not a valid directive at this moment + + Implements cython.parallel.threadsavailable(). If we are called from the + sequential part of the application, we need to call omp_get_max_threads(), + and in the parallel part we can just call omp_get_num_threads() + """ + + type = PyrexTypes.c_int_type + + def analyse_types(self, env): + self.is_temp = True + # env.add_include_file("omp.h") + return self + + def generate_result_code(self, code): + code.putln("#ifdef _OPENMP") + code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" % + self.temp_code) + code.putln("else %s = omp_get_num_threads();" % self.temp_code) + code.putln("#else") + code.putln("%s = 1;" % self.temp_code) + code.putln("#endif") + + def result(self): + return self.temp_code + + +class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode): + """ + Implements cython.parallel.threadid() + """ + + type = PyrexTypes.c_int_type + + def analyse_types(self, env): + self.is_temp = True + # env.add_include_file("omp.h") + return self + + def generate_result_code(self, code): + code.putln("#ifdef _OPENMP") + code.putln("%s = omp_get_thread_num();" % self.temp_code) + code.putln("#else") + code.putln("%s = 0;" % self.temp_code) + code.putln("#endif") + + def result(self): + return self.temp_code + + +#------------------------------------------------------------------- +# +# Trailer nodes +# +#------------------------------------------------------------------- + + +class _IndexingBaseNode(ExprNode): + # Base class for indexing nodes. + # + # base ExprNode the value being indexed + + def is_ephemeral(self): + # in most cases, indexing will return a safe reference to an object in a container, + # so we consider the result safe if the base object is + return self.base.is_ephemeral() or self.base.type in ( + basestring_type, str_type, bytes_type, bytearray_type, unicode_type) + + def check_const_addr(self): + return self.base.check_const_addr() and self.index.check_const() + + def is_lvalue(self): + # NOTE: references currently have both is_reference and is_ptr + # set. Since pointers and references have different lvalue + # rules, we must be careful to separate the two. + if self.type.is_reference: + if self.type.ref_base_type.is_array: + # fixed-sized arrays aren't l-values + return False + elif self.type.is_ptr: + # non-const pointers can always be reassigned + return True + # Just about everything else returned by the index operator + # can be an lvalue. + return True + + +class IndexNode(_IndexingBaseNode): + # Sequence indexing. + # + # base ExprNode + # index ExprNode + # type_indices [PyrexType] + # + # is_fused_index boolean Whether the index is used to specialize a + # c(p)def function + + subexprs = ['base', 'index'] + type_indices = None + + is_subscript = True + is_fused_index = False + + def calculate_constant_result(self): + self.constant_result = self.base.constant_result[self.index.constant_result] + + def compile_time_value(self, denv): + base = self.base.compile_time_value(denv) + index = self.index.compile_time_value(denv) + try: + return base[index] + except Exception as e: + self.compile_time_value_error(e) + + def is_simple(self): + base = self.base + return (base.is_simple() and self.index.is_simple() + and base.type and (base.type.is_ptr or base.type.is_array)) + + def may_be_none(self): + base_type = self.base.type + if base_type: + if base_type.is_string: + return False + if isinstance(self.index, SliceNode): + # slicing! + if base_type in (bytes_type, bytearray_type, str_type, unicode_type, + basestring_type, list_type, tuple_type): + return False + return ExprNode.may_be_none(self) + + def analyse_target_declaration(self, env): + pass + + def analyse_as_type(self, env): + base_type = self.base.analyse_as_type(env) + if base_type: + if base_type.is_cpp_class or base_type.python_type_constructor_name: + if self.index.is_sequence_constructor: + template_values = self.index.args + else: + template_values = [self.index] + type_node = Nodes.TemplatedTypeNode( + pos=self.pos, + positional_args=template_values, + keyword_args=None) + return type_node.analyse(env, base_type=base_type) + elif self.index.is_slice or self.index.is_sequence_constructor: + # memory view + from . import MemoryView + env.use_utility_code(MemoryView.view_utility_code) + axes = [self.index] if self.index.is_slice else list(self.index.args) + return PyrexTypes.MemoryViewSliceType(base_type, MemoryView.get_axes_specs(env, axes)) + elif not base_type.is_pyobject: + # C array + index = self.index.compile_time_value(env) + if index is not None: + try: + index = int(index) + except (ValueError, TypeError): + pass + else: + return PyrexTypes.CArrayType(base_type, index) + error(self.pos, "Array size must be a compile time constant") + return None + + def analyse_pytyping_modifiers(self, env): + # Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]" + # TODO: somehow bring this together with TemplatedTypeNode.analyse_pytyping_modifiers() + modifiers = [] + modifier_node = self + while modifier_node.is_subscript: + modifier_type = modifier_node.base.analyse_as_type(env) + if (modifier_type and modifier_type.python_type_constructor_name + and modifier_type.modifier_name): + modifiers.append(modifier_type.modifier_name) + modifier_node = modifier_node.index + return modifiers + + def type_dependencies(self, env): + return self.base.type_dependencies(env) + self.index.type_dependencies(env) + + def infer_type(self, env): + base_type = self.base.infer_type(env) + if self.index.is_slice: + # slicing! + if base_type.is_string: + # sliced C strings must coerce to Python + return bytes_type + elif base_type.is_pyunicode_ptr: + # sliced Py_UNICODE* strings must coerce to Python + return unicode_type + elif base_type in (unicode_type, bytes_type, str_type, + bytearray_type, list_type, tuple_type): + # slicing these returns the same type + return base_type + elif base_type.is_memoryviewslice: + return base_type + else: + # TODO: Handle buffers (hopefully without too much redundancy). + return py_object_type + + index_type = self.index.infer_type(env) + if index_type and index_type.is_int or isinstance(self.index, IntNode): + # indexing! + if base_type is unicode_type: + # Py_UCS4 will automatically coerce to a unicode string + # if required, so this is safe. We only infer Py_UCS4 + # when the index is a C integer type. Otherwise, we may + # need to use normal Python item access, in which case + # it's faster to return the one-char unicode string than + # to receive it, throw it away, and potentially rebuild it + # on a subsequent PyObject coercion. + return PyrexTypes.c_py_ucs4_type + elif base_type is str_type: + # always returns str - Py2: bytes, Py3: unicode + return base_type + elif base_type is bytearray_type: + return PyrexTypes.c_uchar_type + elif isinstance(self.base, BytesNode): + #if env.global_scope().context.language_level >= 3: + # # inferring 'char' can be made to work in Python 3 mode + # return PyrexTypes.c_char_type + # Py2/3 return different types on indexing bytes objects + return py_object_type + elif base_type in (tuple_type, list_type): + # if base is a literal, take a look at its values + item_type = infer_sequence_item_type( + env, self.base, self.index, seq_type=base_type) + if item_type is not None: + return item_type + elif base_type.is_ptr or base_type.is_array: + return base_type.base_type + elif base_type.is_ctuple and isinstance(self.index, IntNode): + if self.index.has_constant_result(): + index = self.index.constant_result + if index < 0: + index += base_type.size + if 0 <= index < base_type.size: + return base_type.components[index] + elif base_type.is_memoryviewslice: + if base_type.ndim == 0: + pass # probably an error, but definitely don't know what to do - return pyobject for now + if base_type.ndim == 1: + return base_type.dtype + else: + return PyrexTypes.MemoryViewSliceType(base_type.dtype, base_type.axes[1:]) + + if self.index.is_sequence_constructor and base_type.is_memoryviewslice: + inferred_type = base_type + for a in self.index.args: + if not inferred_type.is_memoryviewslice: + break # something's gone wrong + inferred_type = IndexNode(self.pos, base=ExprNode(self.base.pos, type=inferred_type), + index=a).infer_type(env) + else: + return inferred_type + + if base_type.is_cpp_class: + class FakeOperand: + def __init__(self, **kwds): + self.__dict__.update(kwds) + operands = [ + FakeOperand(pos=self.pos, type=base_type), + FakeOperand(pos=self.pos, type=index_type), + ] + index_func = env.lookup_operator('[]', operands) + if index_func is not None: + return index_func.type.return_type + + if is_pythran_expr(base_type) and is_pythran_expr(index_type): + index_with_type = (self.index, index_type) + return PythranExpr(pythran_indexing_type(base_type, [index_with_type])) + + # may be slicing or indexing, we don't know + if base_type in (unicode_type, str_type): + # these types always returns their own type on Python indexing/slicing + return base_type + else: + # TODO: Handle buffers (hopefully without too much redundancy). + return py_object_type + + def analyse_types(self, env): + return self.analyse_base_and_index_types(env, getting=True) + + def analyse_target_types(self, env): + node = self.analyse_base_and_index_types(env, setting=True) + if node.type.is_const: + error(self.pos, "Assignment to const dereference") + if node is self and not node.is_lvalue(): + error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type) + return node + + def analyse_base_and_index_types(self, env, getting=False, setting=False, + analyse_base=True): + # Note: This might be cleaned up by having IndexNode + # parsed in a saner way and only construct the tuple if + # needed. + if analyse_base: + self.base = self.base.analyse_types(env) + + if self.base.type.is_error: + # Do not visit child tree if base is undeclared to avoid confusing + # error messages + self.type = PyrexTypes.error_type + return self + + is_slice = self.index.is_slice + if not env.directives['wraparound']: + if is_slice: + check_negative_indices(self.index.start, self.index.stop) + else: + check_negative_indices(self.index) + + # Potentially overflowing index value. + if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value): + self.index = self.index.coerce_to_pyobject(env) + + is_memslice = self.base.type.is_memoryviewslice + # Handle the case where base is a literal char* (and we expect a string, not an int) + if not is_memslice and (isinstance(self.base, BytesNode) or is_slice): + if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array): + self.base = self.base.coerce_to_pyobject(env) + + replacement_node = self.analyse_as_buffer_operation(env, getting) + if replacement_node is not None: + return replacement_node + + self.nogil = env.nogil + base_type = self.base.type + + if not base_type.is_cfunction: + self.index = self.index.analyse_types(env) + self.original_index_type = self.index.type + if self.original_index_type.is_reference: + self.original_index_type = self.original_index_type.ref_base_type + + if base_type.is_unicode_char: + # we infer Py_UNICODE/Py_UCS4 for unicode strings in some + # cases, but indexing must still work for them + if setting: + warning(self.pos, "cannot assign to Unicode string index", level=1) + elif self.index.constant_result in (0, -1): + # uchar[0] => uchar + return self.base + self.base = self.base.coerce_to_pyobject(env) + base_type = self.base.type + + if base_type.is_pyobject: + return self.analyse_as_pyobject(env, is_slice, getting, setting) + elif base_type.is_ptr or base_type.is_array: + return self.analyse_as_c_array(env, is_slice) + elif base_type.is_cpp_class: + return self.analyse_as_cpp(env, setting) + elif base_type.is_cfunction: + return self.analyse_as_c_function(env) + elif base_type.is_ctuple: + return self.analyse_as_c_tuple(env, getting, setting) + else: + error(self.pos, + "Attempting to index non-array type '%s'" % + base_type) + self.type = PyrexTypes.error_type + return self + + def analyse_as_pyobject(self, env, is_slice, getting, setting): + base_type = self.base.type + if self.index.type.is_unicode_char and base_type is not dict_type: + # TODO: eventually fold into case below and remove warning, once people have adapted their code + warning(self.pos, + "Item lookup of unicode character codes now always converts to a Unicode string. " + "Use an explicit C integer cast to get back the previous integer lookup behaviour.", level=1) + self.index = self.index.coerce_to_pyobject(env) + self.is_temp = 1 + elif self.index.type.is_int and base_type is not dict_type: + if (getting + and not env.directives['boundscheck'] + and (base_type in (list_type, tuple_type, bytearray_type)) + and (not self.index.type.signed + or not env.directives['wraparound'] + or (isinstance(self.index, IntNode) and + self.index.has_constant_result() and self.index.constant_result >= 0)) + ): + self.is_temp = 0 + else: + self.is_temp = 1 + self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env) + self.original_index_type.create_to_py_utility_code(env) + else: + self.index = self.index.coerce_to_pyobject(env) + self.is_temp = 1 + + if self.index.type.is_int and base_type is unicode_type: + # Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string + # if required, so this is fast and safe + self.type = PyrexTypes.c_py_ucs4_type + elif self.index.type.is_int and base_type is bytearray_type: + if setting: + self.type = PyrexTypes.c_uchar_type + else: + # not using 'uchar' to enable fast and safe error reporting as '-1' + self.type = PyrexTypes.c_int_type + elif is_slice and base_type in (bytes_type, bytearray_type, str_type, unicode_type, list_type, tuple_type): + self.type = base_type + else: + item_type = None + if base_type in (list_type, tuple_type) and self.index.type.is_int: + item_type = infer_sequence_item_type( + env, self.base, self.index, seq_type=base_type) + if base_type in (list_type, tuple_type, dict_type): + # do the None check explicitly (not in a helper) to allow optimising it away + self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable") + if item_type is None or not item_type.is_pyobject: + # Even if we inferred a C type as result, we will read a Python object, so trigger coercion if needed. + # We could potentially use "item_type.equivalent_type" here, but that may trigger assumptions + # about the actual runtime item types, rather than just their ability to coerce to the C "item_type". + self.type = py_object_type + else: + self.type = item_type + + self.wrap_in_nonecheck_node(env, getting) + return self + + def analyse_as_c_array(self, env, is_slice): + base_type = self.base.type + self.type = base_type.base_type + if self.type.is_cpp_class: + self.type = PyrexTypes.CReferenceType(self.type) + if is_slice: + self.type = base_type + elif self.index.type.is_pyobject: + self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env) + elif not self.index.type.is_int: + error(self.pos, "Invalid index type '%s'" % self.index.type) + return self + + def analyse_as_cpp(self, env, setting): + base_type = self.base.type + function = env.lookup_operator("[]", [self.base, self.index]) + if function is None: + error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type)) + self.type = PyrexTypes.error_type + self.result_code = "" + return self + func_type = function.type + if func_type.is_ptr: + func_type = func_type.base_type + self.exception_check = func_type.exception_check + self.exception_value = func_type.exception_value + if self.exception_check: + if not setting: + self.is_temp = True + if needs_cpp_exception_conversion(self): + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + self.index = self.index.coerce_to(func_type.args[0].type, env) + self.type = func_type.return_type + if setting and not func_type.return_type.is_reference: + error(self.pos, "Can't set non-reference result '%s'" % self.type) + return self + + def analyse_as_c_function(self, env): + base_type = self.base.type + if base_type.is_fused: + self.parse_indexed_fused_cdef(env) + else: + self.type_indices = self.parse_index_as_types(env) + self.index = None # FIXME: use a dedicated Node class instead of generic IndexNode + if base_type.templates is None: + error(self.pos, "Can only parameterize template functions.") + self.type = error_type + elif self.type_indices is None: + # Error recorded earlier. + self.type = error_type + elif len(base_type.templates) != len(self.type_indices): + error(self.pos, "Wrong number of template arguments: expected %s, got %s" % ( + (len(base_type.templates), len(self.type_indices)))) + self.type = error_type + else: + self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices))) + # FIXME: use a dedicated Node class instead of generic IndexNode + return self + + def analyse_as_c_tuple(self, env, getting, setting): + base_type = self.base.type + if isinstance(self.index, IntNode) and self.index.has_constant_result(): + index = self.index.constant_result + if -base_type.size <= index < base_type.size: + if index < 0: + index += base_type.size + self.type = base_type.components[index] + else: + error(self.pos, + "Index %s out of bounds for '%s'" % + (index, base_type)) + self.type = PyrexTypes.error_type + return self + else: + self.base = self.base.coerce_to_pyobject(env) + return self.analyse_base_and_index_types(env, getting=getting, setting=setting, analyse_base=False) + + def analyse_as_buffer_operation(self, env, getting): + """ + Analyse buffer indexing and memoryview indexing/slicing + """ + if isinstance(self.index, TupleNode): + indices = self.index.args + else: + indices = [self.index] + + base = self.base + base_type = base.type + replacement_node = None + if base_type.is_memoryviewslice: + # memoryviewslice indexing or slicing + from . import MemoryView + if base.is_memview_slice: + # For memory views, "view[i][j]" is the same as "view[i, j]" => use the latter for speed. + merged_indices = base.merged_indices(indices) + if merged_indices is not None: + base = base.base + base_type = base.type + indices = merged_indices + have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim) + if have_slices: + replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=base) + else: + replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=base) + elif base_type.is_buffer or base_type.is_pythran_expr: + if base_type.is_pythran_expr or len(indices) == base_type.ndim: + # Buffer indexing + is_buffer_access = True + indices = [index.analyse_types(env) for index in indices] + if base_type.is_pythran_expr: + do_replacement = all( + index.type.is_int or index.is_slice or index.type.is_pythran_expr + for index in indices) + if do_replacement: + for i,index in enumerate(indices): + if index.is_slice: + index = SliceIntNode(index.pos, start=index.start, stop=index.stop, step=index.step) + index = index.analyse_types(env) + indices[i] = index + else: + do_replacement = all(index.type.is_int for index in indices) + if do_replacement: + replacement_node = BufferIndexNode(self.pos, indices=indices, base=base) + # On cloning, indices is cloned. Otherwise, unpack index into indices. + assert not isinstance(self.index, CloneNode) + + if replacement_node is not None: + replacement_node = replacement_node.analyse_types(env, getting) + return replacement_node + + def wrap_in_nonecheck_node(self, env, getting): + if not env.directives['nonecheck'] or not self.base.may_be_none(): + return + self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable") + + def parse_index_as_types(self, env, required=True): + if isinstance(self.index, TupleNode): + indices = self.index.args + else: + indices = [self.index] + type_indices = [] + for index in indices: + type_indices.append(index.analyse_as_type(env)) + if type_indices[-1] is None: + if required: + error(index.pos, "not parsable as a type") + return None + return type_indices + + def parse_indexed_fused_cdef(self, env): + """ + Interpret fused_cdef_func[specific_type1, ...] + + Note that if this method is called, we are an indexed cdef function + with fused argument types, and this IndexNode will be replaced by the + NameNode with specific entry just after analysis of expressions by + AnalyseExpressionsTransform. + """ + self.type = PyrexTypes.error_type + + self.is_fused_index = True + + base_type = self.base.type + positions = [] + + if self.index.is_name or self.index.is_attribute: + positions.append(self.index.pos) + elif isinstance(self.index, TupleNode): + for arg in self.index.args: + positions.append(arg.pos) + specific_types = self.parse_index_as_types(env, required=False) + + if specific_types is None: + self.index = self.index.analyse_types(env) + + if not self.base.entry.as_variable: + error(self.pos, "Can only index fused functions with types") + else: + # A cpdef function indexed with Python objects + self.base.entry = self.entry = self.base.entry.as_variable + self.base.type = self.type = self.entry.type + + self.base.is_temp = True + self.is_temp = True + + self.entry.used = True + + self.is_fused_index = False + return + + for i, type in enumerate(specific_types): + specific_types[i] = type.specialize_fused(env) + + fused_types = base_type.get_fused_types() + if len(specific_types) > len(fused_types): + return error(self.pos, "Too many types specified") + elif len(specific_types) < len(fused_types): + t = fused_types[len(specific_types)] + return error(self.pos, "Not enough types specified to specialize " + "the function, %s is still fused" % t) + + # See if our index types form valid specializations + for pos, specific_type, fused_type in zip(positions, + specific_types, + fused_types): + if not any([specific_type.same_as(t) for t in fused_type.types]): + return error(pos, "Type not in fused type") + + if specific_type is None or specific_type.is_error: + return + + fused_to_specific = dict(zip(fused_types, specific_types)) + type = base_type.specialize(fused_to_specific) + + if type.is_fused: + # Only partially specific, this is invalid + error(self.pos, + "Index operation makes function only partially specific") + else: + # Fully specific, find the signature with the specialized entry + for signature in self.base.type.get_all_specialized_function_types(): + if type.same_as(signature): + self.type = signature + + if self.base.is_attribute: + # Pretend to be a normal attribute, for cdef extension + # methods + self.entry = signature.entry + self.is_attribute = True + self.obj = self.base.obj + + self.type.entry.used = True + self.base.type = signature + self.base.entry = signature.entry + + break + else: + # This is a bug + raise InternalError("Couldn't find the right signature") + + gil_message = "Indexing Python object" + + def calculate_result_code(self): + if self.base.type in (list_type, tuple_type, bytearray_type): + if self.base.type is list_type: + index_code = "PyList_GET_ITEM(%s, %s)" + elif self.base.type is tuple_type: + index_code = "PyTuple_GET_ITEM(%s, %s)" + elif self.base.type is bytearray_type: + index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))" + else: + assert False, "unexpected base type in indexing: %s" % self.base.type + elif self.base.type.is_cfunction: + return "%s<%s>" % ( + self.base.result(), + ",".join([param.empty_declaration_code() for param in self.type_indices])) + elif self.base.type.is_ctuple: + index = self.index.constant_result + if index < 0: + index += self.base.type.size + return "%s.f%s" % (self.base.result(), index) + else: + if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type: + error(self.pos, "Invalid use of pointer slice") + return + index_code = "(%s[%s])" + return index_code % (self.base.result(), self.index.result()) + + def extra_index_params(self, code): + if self.index.type.is_int: + is_list = self.base.type is list_type + wraparound = ( + bool(code.globalstate.directives['wraparound']) and + self.original_index_type.signed and + not (isinstance(self.index.constant_result, _py_int_types) + and self.index.constant_result >= 0)) + boundscheck = bool(code.globalstate.directives['boundscheck']) + return ", %s, %d, %s, %d, %d, %d" % ( + self.original_index_type.empty_declaration_code(), + self.original_index_type.signed and 1 or 0, + self.original_index_type.to_py_function, + is_list, wraparound, boundscheck) + else: + return "" + + def generate_result_code(self, code): + if not self.is_temp: + # all handled in self.calculate_result_code() + return + + utility_code = None + error_value = None + if self.type.is_pyobject: + error_value = 'NULL' + if self.index.type.is_int: + if self.base.type is list_type: + function = "__Pyx_GetItemInt_List" + elif self.base.type is tuple_type: + function = "__Pyx_GetItemInt_Tuple" + else: + function = "__Pyx_GetItemInt" + utility_code = TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c") + else: + if self.base.type is dict_type: + function = "__Pyx_PyDict_GetItem" + utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c") + elif self.base.type is py_object_type and self.index.type in (str_type, unicode_type): + # obj[str] is probably doing a dict lookup + function = "__Pyx_PyObject_Dict_GetItem" + utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c") + else: + function = "__Pyx_PyObject_GetItem" + code.globalstate.use_utility_code( + TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c")) + utility_code = UtilityCode.load_cached("ObjectGetItem", "ObjectHandling.c") + elif self.type.is_unicode_char and self.base.type is unicode_type: + assert self.index.type.is_int + function = "__Pyx_GetItemInt_Unicode" + error_value = '(Py_UCS4)-1' + utility_code = UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c") + elif self.base.type is bytearray_type: + assert self.index.type.is_int + assert self.type.is_int + function = "__Pyx_GetItemInt_ByteArray" + error_value = '-1' + utility_code = UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c") + elif not (self.base.type.is_cpp_class and self.exception_check): + assert False, "unexpected type %s and base type %s for indexing (%s)" % ( + self.type, self.base.type, self.pos) + + if utility_code is not None: + code.globalstate.use_utility_code(utility_code) + + if self.index.type.is_int: + index_code = self.index.result() + else: + index_code = self.index.py_result() + + if self.base.type.is_cpp_class and self.exception_check: + translate_cpp_exception(code, self.pos, + "%s = %s[%s];" % (self.result(), self.base.result(), + self.index.result()), + self.result() if self.type.is_pyobject else None, + self.exception_value, self.in_nogil_context) + else: + error_check = '!%s' if error_value == 'NULL' else '%%s == %s' % error_value + code.putln( + "%s = %s(%s, %s%s); %s" % ( + self.result(), + function, + self.base.py_result(), + index_code, + self.extra_index_params(code), + code.error_goto_if(error_check % self.result(), self.pos))) + if self.type.is_pyobject: + self.generate_gotref(code) + + def generate_setitem_code(self, value_code, code): + if self.index.type.is_int: + if self.base.type is bytearray_type: + code.globalstate.use_utility_code( + UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c")) + function = "__Pyx_SetItemInt_ByteArray" + else: + code.globalstate.use_utility_code( + UtilityCode.load_cached("SetItemInt", "ObjectHandling.c")) + function = "__Pyx_SetItemInt" + index_code = self.index.result() + else: + index_code = self.index.py_result() + if self.base.type is dict_type: + function = "PyDict_SetItem" + # It would seem that we could specialized lists/tuples, but that + # shouldn't happen here. + # Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as + # index instead of an object, and bad conversion here would give + # the wrong exception. Also, tuples are supposed to be immutable, + # and raise a TypeError when trying to set their entries + # (PyTuple_SetItem() is for creating new tuples from scratch). + else: + function = "PyObject_SetItem" + code.putln(code.error_goto_if_neg( + "%s(%s, %s, %s%s)" % ( + function, + self.base.py_result(), + index_code, + value_code, + self.extra_index_params(code)), + self.pos)) + + def generate_assignment_code(self, rhs, code, overloaded_assignment=False, + exception_check=None, exception_value=None): + self.generate_subexpr_evaluation_code(code) + + if self.type.is_pyobject: + self.generate_setitem_code(rhs.py_result(), code) + elif self.base.type is bytearray_type: + value_code = self._check_byte_value(code, rhs) + self.generate_setitem_code(value_code, code) + elif self.base.type.is_cpp_class and self.exception_check and self.exception_check == '+': + if overloaded_assignment and exception_check and self.exception_value != exception_value: + # Handle the case that both the index operator and the assignment + # operator have a c++ exception handler and they are not the same. + translate_double_cpp_exception(code, self.pos, self.type, + self.result(), rhs.result(), self.exception_value, + exception_value, self.in_nogil_context) + else: + # Handle the case that only the index operator has a + # c++ exception handler, or that + # both exception handlers are the same. + translate_cpp_exception(code, self.pos, + "%s = %s;" % (self.result(), rhs.result()), + self.result() if self.type.is_pyobject else None, + self.exception_value, self.in_nogil_context) + else: + code.putln( + "%s = %s;" % (self.result(), rhs.result())) + + self.generate_subexpr_disposal_code(code) + self.free_subexpr_temps(code) + rhs.generate_disposal_code(code) + rhs.free_temps(code) + + def _check_byte_value(self, code, rhs): + # TODO: should we do this generally on downcasts, or just here? + assert rhs.type.is_int, repr(rhs.type) + value_code = rhs.result() + if rhs.has_constant_result(): + if 0 <= rhs.constant_result < 256: + return value_code + needs_cast = True # make at least the C compiler happy + warning(rhs.pos, + "value outside of range(0, 256)" + " when assigning to byte: %s" % rhs.constant_result, + level=1) + else: + needs_cast = rhs.type != PyrexTypes.c_uchar_type + + if not self.nogil: + conditions = [] + if rhs.is_literal or rhs.type.signed: + conditions.append('%s < 0' % value_code) + if (rhs.is_literal or not + (rhs.is_temp and rhs.type in ( + PyrexTypes.c_uchar_type, PyrexTypes.c_char_type, + PyrexTypes.c_schar_type))): + conditions.append('%s > 255' % value_code) + if conditions: + code.putln("if (unlikely(%s)) {" % ' || '.join(conditions)) + code.putln( + 'PyErr_SetString(PyExc_ValueError,' + ' "byte must be in range(0, 256)"); %s' % + code.error_goto(self.pos)) + code.putln("}") + + if needs_cast: + value_code = '((unsigned char)%s)' % value_code + return value_code + + def generate_deletion_code(self, code, ignore_nonexisting=False): + self.generate_subexpr_evaluation_code(code) + #if self.type.is_pyobject: + if self.index.type.is_int: + function = "__Pyx_DelItemInt" + index_code = self.index.result() + code.globalstate.use_utility_code( + UtilityCode.load_cached("DelItemInt", "ObjectHandling.c")) + else: + index_code = self.index.py_result() + if self.base.type is dict_type: + function = "PyDict_DelItem" + else: + function = "PyObject_DelItem" + code.putln(code.error_goto_if_neg( + "%s(%s, %s%s)" % ( + function, + self.base.py_result(), + index_code, + self.extra_index_params(code)), + self.pos)) + self.generate_subexpr_disposal_code(code) + self.free_subexpr_temps(code) + + +class BufferIndexNode(_IndexingBaseNode): + """ + Indexing of buffers and memoryviews. This node is created during type + analysis from IndexNode and replaces it. + + Attributes: + base - base node being indexed + indices - list of indexing expressions + """ + + subexprs = ['base', 'indices'] + + is_buffer_access = True + + # Whether we're assigning to a buffer (in that case it needs to be writable) + writable_needed = False + + # Any indexing temp variables that we need to clean up. + index_temps = () + + def analyse_target_types(self, env): + self.analyse_types(env, getting=False) + + def analyse_types(self, env, getting=True): + """ + Analyse types for buffer indexing only. Overridden by memoryview + indexing and slicing subclasses + """ + # self.indices are already analyzed + if not self.base.is_name and not is_pythran_expr(self.base.type): + error(self.pos, "Can only index buffer variables") + self.type = error_type + return self + + if not getting: + if not self.base.entry.type.writable: + error(self.pos, "Writing to readonly buffer") + else: + self.writable_needed = True + if self.base.type.is_buffer: + self.base.entry.buffer_aux.writable_needed = True + + self.none_error_message = "'NoneType' object is not subscriptable" + self.analyse_buffer_index(env, getting) + self.wrap_in_nonecheck_node(env) + return self + + def analyse_buffer_index(self, env, getting): + if is_pythran_expr(self.base.type): + index_with_type_list = [(idx, idx.type) for idx in self.indices] + self.type = PythranExpr(pythran_indexing_type(self.base.type, index_with_type_list)) + else: + self.base = self.base.coerce_to_simple(env) + self.type = self.base.type.dtype + self.buffer_type = self.base.type + + if getting and (self.type.is_pyobject or self.type.is_pythran_expr): + self.is_temp = True + + def analyse_assignment(self, rhs): + """ + Called by IndexNode when this node is assigned to, + with the rhs of the assignment + """ + + def wrap_in_nonecheck_node(self, env): + if not env.directives['nonecheck'] or not self.base.may_be_none(): + return + self.base = self.base.as_none_safe_node(self.none_error_message) + + def nogil_check(self, env): + if self.is_buffer_access or self.is_memview_index: + if self.type.is_pyobject: + error(self.pos, "Cannot access buffer with object dtype without gil") + self.type = error_type + + def calculate_result_code(self): + return "(*%s)" % self.buffer_ptr_code + + def buffer_entry(self): + base = self.base + if self.base.is_nonecheck: + base = base.arg + return base.type.get_entry(base) + + def get_index_in_temp(self, code, ivar): + ret = code.funcstate.allocate_temp( + PyrexTypes.widest_numeric_type( + ivar.type, + PyrexTypes.c_ssize_t_type if ivar.type.signed else PyrexTypes.c_size_t_type), + manage_ref=False) + code.putln("%s = %s;" % (ret, ivar.result())) + return ret + + def buffer_lookup_code(self, code): + """ + ndarray[1, 2, 3] and memslice[1, 2, 3] + """ + if self.in_nogil_context: + if self.is_buffer_access or self.is_memview_index: + if code.globalstate.directives['boundscheck']: + warning(self.pos, "Use boundscheck(False) for faster access", level=1) + + # Assign indices to temps of at least (s)size_t to allow further index calculations. + self.index_temps = index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices] + + # Generate buffer access code using these temps + from . import Buffer + buffer_entry = self.buffer_entry() + if buffer_entry.type.is_buffer: + negative_indices = buffer_entry.type.negative_indices + else: + negative_indices = Buffer.buffer_defaults['negative_indices'] + + return buffer_entry, Buffer.put_buffer_lookup_code( + entry=buffer_entry, + index_signeds=[ivar.type.signed for ivar in self.indices], + index_cnames=index_temps, + directives=code.globalstate.directives, + pos=self.pos, code=code, + negative_indices=negative_indices, + in_nogil_context=self.in_nogil_context) + + def generate_assignment_code(self, rhs, code, overloaded_assignment=False): + self.generate_subexpr_evaluation_code(code) + self.generate_buffer_setitem_code(rhs, code) + self.generate_subexpr_disposal_code(code) + self.free_subexpr_temps(code) + rhs.generate_disposal_code(code) + rhs.free_temps(code) + + def generate_buffer_setitem_code(self, rhs, code, op=""): + base_type = self.base.type + if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type): + obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False) + # We have got to do this because we have to declare pythran objects + # at the beginning of the functions. + # Indeed, Cython uses "goto" statement for error management, and + # RAII doesn't work with that kind of construction. + # Moreover, the way Pythran expressions are made is that they don't + # support move-assignation easily. + # This, we explicitly destroy then in-place new objects in this + # case. + code.putln("__Pyx_call_destructor(%s);" % obj) + code.putln("new (&%s) decltype(%s){%s};" % (obj, obj, self.base.pythran_result())) + code.putln("%s%s %s= %s;" % ( + obj, + pythran_indexing_code(self.indices), + op, + rhs.pythran_result())) + code.funcstate.release_temp(obj) + return + + # Used from generate_assignment_code and InPlaceAssignmentNode + buffer_entry, ptrexpr = self.buffer_lookup_code(code) + + if self.buffer_type.dtype.is_pyobject: + # Must manage refcounts. XDecref what is already there + # and incref what we put in (NumPy allows there to be NULL) + ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type, + manage_ref=False) + rhs_code = rhs.result() + code.putln("%s = %s;" % (ptr, ptrexpr)) + code.put_xgotref("*%s" % ptr, self.buffer_type.dtype) + code.putln("__Pyx_INCREF(%s); __Pyx_XDECREF(*%s);" % ( + rhs_code, ptr)) + code.putln("*%s %s= %s;" % (ptr, op, rhs_code)) + code.put_xgiveref("*%s" % ptr, self.buffer_type.dtype) + code.funcstate.release_temp(ptr) + else: + # Simple case + code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result())) + + def generate_result_code(self, code): + if is_pythran_expr(self.base.type): + res = self.result() + code.putln("__Pyx_call_destructor(%s);" % res) + code.putln("new (&%s) decltype(%s){%s%s};" % ( + res, + res, + self.base.pythran_result(), + pythran_indexing_code(self.indices))) + return + buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code) + if self.type.is_pyobject: + # is_temp is True, so must pull out value and incref it. + # NOTE: object temporary results for nodes are declared + # as PyObject *, so we need a cast + res = self.result() + code.putln("%s = (PyObject *) *%s;" % (res, self.buffer_ptr_code)) + # NumPy does (occasionally) allow NULL to denote None. + code.putln("if (unlikely(%s == NULL)) %s = Py_None;" % (res, res)) + code.putln("__Pyx_INCREF((PyObject*)%s);" % res) + + def free_subexpr_temps(self, code): + for temp in self.index_temps: + code.funcstate.release_temp(temp) + self.index_temps = () + super(BufferIndexNode, self).free_subexpr_temps(code) + + +class MemoryViewIndexNode(BufferIndexNode): + + is_memview_index = True + is_buffer_access = False + + def analyse_types(self, env, getting=True): + # memoryviewslice indexing or slicing + from . import MemoryView + + self.is_pythran_mode = has_np_pythran(env) + indices = self.indices + have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim) + + if not getting: + self.writable_needed = True + if self.base.is_name or self.base.is_attribute: + self.base.entry.type.writable_needed = True + + self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim) + axes = [] + + index_type = PyrexTypes.c_py_ssize_t_type + new_indices = [] + + if len(indices) - len(newaxes) > self.base.type.ndim: + self.type = error_type + error(indices[self.base.type.ndim].pos, + "Too many indices specified for type %s" % self.base.type) + return self + + axis_idx = 0 + for i, index in enumerate(indices[:]): + index = index.analyse_types(env) + if index.is_none: + self.is_memview_slice = True + new_indices.append(index) + axes.append(('direct', 'strided')) + continue + + access, packing = self.base.type.axes[axis_idx] + axis_idx += 1 + + if index.is_slice: + self.is_memview_slice = True + if index.step.is_none: + axes.append((access, packing)) + else: + axes.append((access, 'strided')) + + # Coerce start, stop and step to temps of the right type + for attr in ('start', 'stop', 'step'): + value = getattr(index, attr) + if not value.is_none: + value = value.coerce_to(index_type, env) + #value = value.coerce_to_temp(env) + setattr(index, attr, value) + new_indices.append(value) + + elif index.type.is_int or index.type.is_pyobject: + if index.type.is_pyobject: + performance_hint(index.pos, "Index should be typed for more efficient access", env) + + self.is_memview_index = True + index = index.coerce_to(index_type, env) + indices[i] = index + new_indices.append(index) + + else: + self.type = error_type + error(index.pos, "Invalid index for memoryview specified, type %s" % index.type) + return self + + ### FIXME: replace by MemoryViewSliceNode if is_memview_slice ? + self.is_memview_index = self.is_memview_index and not self.is_memview_slice + self.indices = new_indices + # All indices with all start/stop/step for slices. + # We need to keep this around. + self.original_indices = indices + self.nogil = env.nogil + + self.analyse_operation(env, getting, axes) + self.wrap_in_nonecheck_node(env) + return self + + def analyse_operation(self, env, getting, axes): + self.none_error_message = "Cannot index None memoryview slice" + self.analyse_buffer_index(env, getting) + + def analyse_broadcast_operation(self, rhs): + """ + Support broadcasting for slice assignment. + E.g. + m_2d[...] = m_1d # or, + m_1d[...] = m_2d # if the leading dimension has extent 1 + """ + if self.type.is_memoryviewslice: + lhs = self + if lhs.is_memview_broadcast or rhs.is_memview_broadcast: + lhs.is_memview_broadcast = True + rhs.is_memview_broadcast = True + + def analyse_as_memview_scalar_assignment(self, rhs): + lhs = self.analyse_assignment(rhs) + if lhs: + rhs.is_memview_copy_assignment = lhs.is_memview_copy_assignment + return lhs + return self + + +class MemoryViewSliceNode(MemoryViewIndexNode): + + is_memview_slice = True + + # No-op slicing operation, this node will be replaced + is_ellipsis_noop = False + is_memview_scalar_assignment = False + is_memview_index = False + is_memview_broadcast = False + + def analyse_ellipsis_noop(self, env, getting): + """Slicing operations needing no evaluation, i.e. m[...] or m[:, :]""" + ### FIXME: replace directly + self.is_ellipsis_noop = all( + index.is_slice and index.start.is_none and index.stop.is_none and index.step.is_none + for index in self.indices) + + if self.is_ellipsis_noop: + self.type = self.base.type + + def analyse_operation(self, env, getting, axes): + from . import MemoryView + + if not getting: + self.is_memview_broadcast = True + self.none_error_message = "Cannot assign to None memoryview slice" + else: + self.none_error_message = "Cannot slice None memoryview slice" + + self.analyse_ellipsis_noop(env, getting) + if self.is_ellipsis_noop: + return + + self.index = None + self.is_temp = True + self.use_managed_ref = True + + if not MemoryView.validate_axes(self.pos, axes): + self.type = error_type + return + + self.type = PyrexTypes.MemoryViewSliceType(self.base.type.dtype, axes) + + if not (self.base.is_simple() or self.base.result_in_temp()): + self.base = self.base.coerce_to_temp(env) + + def analyse_assignment(self, rhs): + if not rhs.type.is_memoryviewslice and ( + self.type.dtype.assignable_from(rhs.type) or + rhs.type.is_pyobject): + # scalar assignment + return MemoryCopyScalar(self.pos, self) + else: + return MemoryCopySlice(self.pos, self) + + def merged_indices(self, indices): + """Return a new list of indices/slices with 'indices' merged into the current ones + according to slicing rules. + Is used to implement "view[i][j]" => "view[i, j]". + Return None if the indices cannot (easily) be merged at compile time. + """ + if not indices: + return None + # NOTE: Need to evaluate "self.original_indices" here as they might differ from "self.indices". + new_indices = self.original_indices[:] + indices = indices[:] + for i, s in enumerate(self.original_indices): + if s.is_slice: + if s.start.is_none and s.stop.is_none and s.step.is_none: + # Full slice found, replace by index. + new_indices[i] = indices[0] + indices.pop(0) + if not indices: + return new_indices + else: + # Found something non-trivial, e.g. a partial slice. + return None + elif not s.type.is_int: + # Not a slice, not an integer index => could be anything... + return None + if indices: + if len(new_indices) + len(indices) > self.base.type.ndim: + return None + new_indices += indices + return new_indices + + def is_simple(self): + if self.is_ellipsis_noop: + # TODO: fix SimpleCallNode.is_simple() + return self.base.is_simple() or self.base.result_in_temp() + + return self.result_in_temp() + + def calculate_result_code(self): + """This is called in case this is a no-op slicing node""" + return self.base.result() + + def generate_result_code(self, code): + if self.is_ellipsis_noop: + return ### FIXME: remove + buffer_entry = self.buffer_entry() + have_gil = not self.in_nogil_context + + # TODO Mark: this is insane, do it better + have_slices = False + it = iter(self.indices) + for index in self.original_indices: + if index.is_slice: + have_slices = True + if not index.start.is_none: + index.start = next(it) + if not index.stop.is_none: + index.stop = next(it) + if not index.step.is_none: + index.step = next(it) + else: + next(it) + + assert not list(it) + + buffer_entry.generate_buffer_slice_code( + code, self.original_indices, self.result(), self.type, + have_gil=have_gil, have_slices=have_slices, + directives=code.globalstate.directives) + + def generate_assignment_code(self, rhs, code, overloaded_assignment=False): + if self.is_ellipsis_noop: + self.generate_subexpr_evaluation_code(code) + else: + self.generate_evaluation_code(code) + + if self.is_memview_scalar_assignment: + self.generate_memoryviewslice_assign_scalar_code(rhs, code) + else: + self.generate_memoryviewslice_setslice_code(rhs, code) + + if self.is_ellipsis_noop: + self.generate_subexpr_disposal_code(code) + else: + self.generate_disposal_code(code) + + rhs.generate_disposal_code(code) + rhs.free_temps(code) + + +class MemoryCopyNode(ExprNode): + """ + Wraps a memoryview slice for slice assignment. + + dst: destination mememoryview slice + """ + + subexprs = ['dst'] + + def __init__(self, pos, dst): + super(MemoryCopyNode, self).__init__(pos) + self.dst = dst + self.type = dst.type + + def generate_assignment_code(self, rhs, code, overloaded_assignment=False): + self.dst.generate_evaluation_code(code) + self._generate_assignment_code(rhs, code) + self.dst.generate_disposal_code(code) + self.dst.free_temps(code) + rhs.generate_disposal_code(code) + rhs.free_temps(code) + + +class MemoryCopySlice(MemoryCopyNode): + """ + Copy the contents of slice src to slice dst. Does not support indirect + slices. + + memslice1[...] = memslice2 + memslice1[:] = memslice2 + """ + + is_memview_copy_assignment = True + copy_slice_cname = "__pyx_memoryview_copy_contents" + + def _generate_assignment_code(self, src, code): + dst = self.dst + + src.type.assert_direct_dims(src.pos) + dst.type.assert_direct_dims(dst.pos) + + code.putln(code.error_goto_if_neg( + "%s(%s, %s, %d, %d, %d)" % (self.copy_slice_cname, + src.result(), dst.result(), + src.type.ndim, dst.type.ndim, + dst.type.dtype.is_pyobject), + dst.pos)) + + +class MemoryCopyScalar(MemoryCopyNode): + """ + Assign a scalar to a slice. dst must be simple, scalar will be assigned + to a correct type and not just something assignable. + + memslice1[...] = 0.0 + memslice1[:] = 0.0 + """ + + def __init__(self, pos, dst): + super(MemoryCopyScalar, self).__init__(pos, dst) + self.type = dst.type.dtype + + def _generate_assignment_code(self, scalar, code): + from . import MemoryView + + self.dst.type.assert_direct_dims(self.dst.pos) + + dtype = self.dst.type.dtype + type_decl = dtype.declaration_code("") + slice_decl = self.dst.type.declaration_code("") + + code.begin_block() + code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result())) + if self.dst.result_in_temp() or self.dst.is_simple(): + dst_temp = self.dst.result() + else: + code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, self.dst.result())) + dst_temp = "__pyx_temp_slice" + + force_strided = False + indices = self.dst.original_indices + for idx in indices: + if isinstance(idx, SliceNode) and not (idx.start.is_none and + idx.stop.is_none and + idx.step.is_none): + force_strided = True + + slice_iter_obj = MemoryView.slice_iter(self.dst.type, dst_temp, + self.dst.type.ndim, code, + force_strided=force_strided) + p = slice_iter_obj.start_loops() + + if dtype.is_pyobject: + code.putln("Py_DECREF(*(PyObject **) %s);" % p) + + code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p)) + + if dtype.is_pyobject: + code.putln("Py_INCREF(__pyx_temp_scalar);") + + slice_iter_obj.end_loops() + code.end_block() + + +class SliceIndexNode(ExprNode): + # 2-element slice indexing + # + # base ExprNode + # start ExprNode or None + # stop ExprNode or None + # slice ExprNode or None constant slice object + # nogil bool used internally + + subexprs = ['base', 'start', 'stop', 'slice'] + nogil = False + + slice = None + + def infer_type(self, env): + base_type = self.base.infer_type(env) + if base_type.is_string or base_type.is_cpp_class: + return bytes_type + elif base_type.is_pyunicode_ptr: + return unicode_type + elif base_type in (bytes_type, bytearray_type, str_type, unicode_type, + basestring_type, list_type, tuple_type): + return base_type + elif base_type.is_ptr or base_type.is_array: + return PyrexTypes.c_array_type(base_type.base_type, None) + return py_object_type + + def inferable_item_node(self, index=0): + # slicing shouldn't change the result type of the base, but the index might + if index is not not_a_constant and self.start: + if self.start.has_constant_result(): + index += self.start.constant_result + else: + index = not_a_constant + return self.base.inferable_item_node(index) + + def may_be_none(self): + base_type = self.base.type + if base_type: + if base_type.is_string: + return False + if base_type in (bytes_type, str_type, unicode_type, + basestring_type, list_type, tuple_type): + return False + return ExprNode.may_be_none(self) + + def calculate_constant_result(self): + if self.start is None: + start = None + else: + start = self.start.constant_result + if self.stop is None: + stop = None + else: + stop = self.stop.constant_result + self.constant_result = self.base.constant_result[start:stop] + + def compile_time_value(self, denv): + base = self.base.compile_time_value(denv) + if self.start is None: + start = 0 + else: + start = self.start.compile_time_value(denv) + if self.stop is None: + stop = None + else: + stop = self.stop.compile_time_value(denv) + try: + return base[start:stop] + except Exception as e: + self.compile_time_value_error(e) + + def analyse_target_declaration(self, env): + pass + + def analyse_target_types(self, env): + node = self.analyse_types(env, getting=False) + # when assigning, we must accept any Python type + if node.type.is_pyobject: + node.type = py_object_type + return node + + def analyse_types(self, env, getting=True): + self.base = self.base.analyse_types(env) + + if self.base.type.is_buffer or self.base.type.is_pythran_expr or self.base.type.is_memoryviewslice: + none_node = NoneNode(self.pos) + index = SliceNode(self.pos, + start=self.start or none_node, + stop=self.stop or none_node, + step=none_node) + index_node = IndexNode(self.pos, index=index, base=self.base) + return index_node.analyse_base_and_index_types( + env, getting=getting, setting=not getting, + analyse_base=False) + + if self.start: + self.start = self.start.analyse_types(env) + if self.stop: + self.stop = self.stop.analyse_types(env) + + if not env.directives['wraparound']: + check_negative_indices(self.start, self.stop) + + base_type = self.base.type + if base_type.is_array and not getting: + # cannot assign directly to C array => try to assign by making a copy + if not self.start and not self.stop: + self.type = base_type + else: + self.type = PyrexTypes.CPtrType(base_type.base_type) + elif base_type.is_string or base_type.is_cpp_string: + self.type = default_str_type(env) + elif base_type.is_pyunicode_ptr: + self.type = unicode_type + elif base_type.is_ptr: + self.type = base_type + elif base_type.is_array: + # we need a ptr type here instead of an array type, as + # array types can result in invalid type casts in the C + # code + self.type = PyrexTypes.CPtrType(base_type.base_type) + else: + self.base = self.base.coerce_to_pyobject(env) + self.type = py_object_type + if base_type.is_builtin_type: + # slicing builtin types returns something of the same type + self.type = base_type + self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable") + + if self.type is py_object_type: + if (not self.start or self.start.is_literal) and \ + (not self.stop or self.stop.is_literal): + # cache the constant slice object, in case we need it + none_node = NoneNode(self.pos) + self.slice = SliceNode( + self.pos, + start=copy.deepcopy(self.start or none_node), + stop=copy.deepcopy(self.stop or none_node), + step=none_node + ).analyse_types(env) + else: + c_int = PyrexTypes.c_py_ssize_t_type + + def allow_none(node, default_value, env): + # Coerce to Py_ssize_t, but allow None as meaning the default slice bound. + from .UtilNodes import EvalWithTempExprNode, ResultRefNode + + node_ref = ResultRefNode(node) + new_expr = CondExprNode( + node.pos, + true_val=IntNode( + node.pos, + type=c_int, + value=default_value, + constant_result=int(default_value) if default_value.isdigit() else not_a_constant, + ), + false_val=node_ref.coerce_to(c_int, env), + test=PrimaryCmpNode( + node.pos, + operand1=node_ref, + operator='is', + operand2=NoneNode(node.pos), + ).analyse_types(env) + ).analyse_result_type(env) + return EvalWithTempExprNode(node_ref, new_expr) + + if self.start: + if self.start.type.is_pyobject: + self.start = allow_none(self.start, '0', env) + self.start = self.start.coerce_to(c_int, env) + if self.stop: + if self.stop.type.is_pyobject: + self.stop = allow_none(self.stop, 'PY_SSIZE_T_MAX', env) + self.stop = self.stop.coerce_to(c_int, env) + self.is_temp = 1 + return self + + def analyse_as_type(self, env): + base_type = self.base.analyse_as_type(env) + if base_type: + if not self.start and not self.stop: + # memory view + from . import MemoryView + env.use_utility_code(MemoryView.view_utility_code) + none_node = NoneNode(self.pos) + slice_node = SliceNode( + self.pos, + start=none_node, + stop=none_node, + step=none_node, + ) + return PyrexTypes.MemoryViewSliceType( + base_type, MemoryView.get_axes_specs(env, [slice_node])) + return None + + def nogil_check(self, env): + self.nogil = env.nogil + return super(SliceIndexNode, self).nogil_check(env) + + gil_message = "Slicing Python object" + + get_slice_utility_code = TempitaUtilityCode.load( + "SliceObject", "ObjectHandling.c", context={'access': 'Get'}) + + set_slice_utility_code = TempitaUtilityCode.load( + "SliceObject", "ObjectHandling.c", context={'access': 'Set'}) + + def coerce_to(self, dst_type, env): + if ((self.base.type.is_string or self.base.type.is_cpp_string) + and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)): + if (dst_type not in (bytes_type, bytearray_type) + and not env.directives['c_string_encoding']): + error(self.pos, + "default encoding required for conversion from '%s' to '%s'" % + (self.base.type, dst_type)) + self.type = dst_type + if dst_type.is_array and self.base.type.is_array: + if not self.start and not self.stop: + # redundant slice building, copy C arrays directly + return self.base.coerce_to(dst_type, env) + # else: check array size if possible + return super(SliceIndexNode, self).coerce_to(dst_type, env) + + def generate_result_code(self, code): + if not self.type.is_pyobject: + error(self.pos, + "Slicing is not currently supported for '%s'." % self.type) + return + + base_result = self.base.result() + result = self.result() + start_code = self.start_code() + stop_code = self.stop_code() + if self.base.type.is_string: + base_result = self.base.result() + if self.base.type not in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type): + base_result = '((const char*)%s)' % base_result + if self.type is bytearray_type: + type_name = 'ByteArray' + else: + type_name = self.type.name.title() + if self.stop is None: + code.putln( + "%s = __Pyx_Py%s_FromString(%s + %s); %s" % ( + result, + type_name, + base_result, + start_code, + code.error_goto_if_null(result, self.pos))) + else: + code.putln( + "%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % ( + result, + type_name, + base_result, + start_code, + stop_code, + start_code, + code.error_goto_if_null(result, self.pos))) + elif self.base.type.is_pyunicode_ptr: + base_result = self.base.result() + if self.base.type != PyrexTypes.c_py_unicode_ptr_type: + base_result = '((const Py_UNICODE*)%s)' % base_result + if self.stop is None: + code.putln( + "%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % ( + result, + base_result, + start_code, + code.error_goto_if_null(result, self.pos))) + else: + code.putln( + "%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % ( + result, + base_result, + start_code, + stop_code, + start_code, + code.error_goto_if_null(result, self.pos))) + + elif self.base.type is unicode_type: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c")) + code.putln( + "%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % ( + result, + base_result, + start_code, + stop_code, + code.error_goto_if_null(result, self.pos))) + elif self.type is py_object_type: + code.globalstate.use_utility_code(self.get_slice_utility_code) + (has_c_start, has_c_stop, c_start, c_stop, + py_start, py_stop, py_slice) = self.get_slice_config() + code.putln( + "%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % ( + result, + self.base.py_result(), + c_start, c_stop, + py_start, py_stop, py_slice, + has_c_start, has_c_stop, + bool(code.globalstate.directives['wraparound']), + code.error_goto_if_null(result, self.pos))) + else: + if self.base.type is list_type: + code.globalstate.use_utility_code( + TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c")) + cfunc = '__Pyx_PyList_GetSlice' + elif self.base.type is tuple_type: + code.globalstate.use_utility_code( + TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c")) + cfunc = '__Pyx_PyTuple_GetSlice' + else: + cfunc = 'PySequence_GetSlice' + code.putln( + "%s = %s(%s, %s, %s); %s" % ( + result, + cfunc, + self.base.py_result(), + start_code, + stop_code, + code.error_goto_if_null(result, self.pos))) + self.generate_gotref(code) + + def generate_assignment_code(self, rhs, code, overloaded_assignment=False, + exception_check=None, exception_value=None): + self.generate_subexpr_evaluation_code(code) + if self.type.is_pyobject: + code.globalstate.use_utility_code(self.set_slice_utility_code) + has_c_start, has_c_stop, c_start, c_stop, py_start, py_stop, py_slice = self.get_slice_config() + code.put_error_if_neg(self.pos, + "__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % ( + self.base.py_result(), + rhs.py_result(), + c_start, c_stop, + py_start, py_stop, py_slice, + has_c_start, has_c_stop, + bool(code.globalstate.directives['wraparound']))) + else: + start_offset = self.start_code() if self.start else '0' + if rhs.type.is_array: + array_length = rhs.type.size + self.generate_slice_guard_code(code, array_length) + else: + array_length = '%s - %s' % (self.stop_code(), start_offset) + + code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c")) + code.putln("memcpy(&(%s[%s]), %s, sizeof(%s[0]) * (%s));" % ( + self.base.result(), start_offset, + rhs.result(), + self.base.result(), array_length + )) + + self.generate_subexpr_disposal_code(code) + self.free_subexpr_temps(code) + rhs.generate_disposal_code(code) + rhs.free_temps(code) + + def generate_deletion_code(self, code, ignore_nonexisting=False): + if not self.base.type.is_pyobject: + error(self.pos, + "Deleting slices is only supported for Python types, not '%s'." % self.type) + return + self.generate_subexpr_evaluation_code(code) + code.globalstate.use_utility_code(self.set_slice_utility_code) + (has_c_start, has_c_stop, c_start, c_stop, + py_start, py_stop, py_slice) = self.get_slice_config() + code.put_error_if_neg(self.pos, + "__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % ( + self.base.py_result(), + c_start, c_stop, + py_start, py_stop, py_slice, + has_c_start, has_c_stop, + bool(code.globalstate.directives['wraparound']))) + self.generate_subexpr_disposal_code(code) + self.free_subexpr_temps(code) + + def get_slice_config(self): + has_c_start, c_start, py_start = False, '0', 'NULL' + if self.start: + has_c_start = not self.start.type.is_pyobject + if has_c_start: + c_start = self.start.result() + else: + py_start = '&%s' % self.start.py_result() + has_c_stop, c_stop, py_stop = False, '0', 'NULL' + if self.stop: + has_c_stop = not self.stop.type.is_pyobject + if has_c_stop: + c_stop = self.stop.result() + else: + py_stop = '&%s' % self.stop.py_result() + py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL' + return (has_c_start, has_c_stop, c_start, c_stop, + py_start, py_stop, py_slice) + + def generate_slice_guard_code(self, code, target_size): + if not self.base.type.is_array: + return + slice_size = self.base.type.size + try: + total_length = slice_size = int(slice_size) + except ValueError: + total_length = None + + start = stop = None + if self.stop: + stop = self.stop.result() + try: + stop = int(stop) + if stop < 0: + if total_length is None: + slice_size = '%s + %d' % (slice_size, stop) + else: + slice_size += stop + else: + slice_size = stop + stop = None + except ValueError: + pass + + if self.start: + start = self.start.result() + try: + start = int(start) + if start < 0: + if total_length is None: + start = '%s + %d' % (self.base.type.size, start) + else: + start += total_length + if isinstance(slice_size, _py_int_types): + slice_size -= start + else: + slice_size = '%s - (%s)' % (slice_size, start) + start = None + except ValueError: + pass + + runtime_check = None + compile_time_check = False + try: + int_target_size = int(target_size) + except ValueError: + int_target_size = None + else: + compile_time_check = isinstance(slice_size, _py_int_types) + + if compile_time_check and slice_size < 0: + if int_target_size > 0: + error(self.pos, "Assignment to empty slice.") + elif compile_time_check and start is None and stop is None: + # we know the exact slice length + if int_target_size != slice_size: + error(self.pos, "Assignment to slice of wrong length, expected %s, got %s" % ( + slice_size, target_size)) + elif start is not None: + if stop is None: + stop = slice_size + runtime_check = "(%s)-(%s)" % (stop, start) + elif stop is not None: + runtime_check = stop + else: + runtime_check = slice_size + + if runtime_check: + code.putln("if (unlikely((%s) != (%s))) {" % (runtime_check, target_size)) + if self.nogil: + code.put_ensure_gil() + code.putln( + 'PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length,' + ' expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d",' + ' (Py_ssize_t)(%s), (Py_ssize_t)(%s));' % ( + target_size, runtime_check)) + if self.nogil: + code.put_release_ensured_gil() + code.putln(code.error_goto(self.pos)) + code.putln("}") + + def start_code(self): + if self.start: + return self.start.result() + else: + return "0" + + def stop_code(self): + if self.stop: + return self.stop.result() + elif self.base.type.is_array: + return self.base.type.size + else: + return "PY_SSIZE_T_MAX" + + def calculate_result_code(self): + # self.result() is not used, but this method must exist + return "" + + +class SliceNode(ExprNode): + # start:stop:step in subscript list + # + # start ExprNode + # stop ExprNode + # step ExprNode + + subexprs = ['start', 'stop', 'step'] + is_slice = True + type = slice_type + is_temp = 1 + + def calculate_constant_result(self): + self.constant_result = slice( + self.start.constant_result, + self.stop.constant_result, + self.step.constant_result) + + def compile_time_value(self, denv): + start = self.start.compile_time_value(denv) + stop = self.stop.compile_time_value(denv) + step = self.step.compile_time_value(denv) + try: + return slice(start, stop, step) + except Exception as e: + self.compile_time_value_error(e) + + def may_be_none(self): + return False + + def analyse_types(self, env): + start = self.start.analyse_types(env) + stop = self.stop.analyse_types(env) + step = self.step.analyse_types(env) + self.start = start.coerce_to_pyobject(env) + self.stop = stop.coerce_to_pyobject(env) + self.step = step.coerce_to_pyobject(env) + if self.start.is_literal and self.stop.is_literal and self.step.is_literal: + self.is_literal = True + self.is_temp = False + return self + + gil_message = "Constructing Python slice object" + + def calculate_result_code(self): + return self.result_code + + def generate_result_code(self, code): + if self.is_literal: + dedup_key = make_dedup_key(self.type, (self,)) + self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2, dedup_key=dedup_key) + code = code.get_cached_constants_writer(self.result_code) + if code is None: + return # already initialised + code.mark_pos(self.pos) + + code.putln( + "%s = PySlice_New(%s, %s, %s); %s" % ( + self.result(), + self.start.py_result(), + self.stop.py_result(), + self.step.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + if self.is_literal: + self.generate_giveref(code) + +class SliceIntNode(SliceNode): + # start:stop:step in subscript list + # This is just a node to hold start,stop and step nodes that can be + # converted to integers. This does not generate a slice python object. + # + # start ExprNode + # stop ExprNode + # step ExprNode + + is_temp = 0 + + def calculate_constant_result(self): + self.constant_result = slice( + self.start.constant_result, + self.stop.constant_result, + self.step.constant_result) + + def compile_time_value(self, denv): + start = self.start.compile_time_value(denv) + stop = self.stop.compile_time_value(denv) + step = self.step.compile_time_value(denv) + try: + return slice(start, stop, step) + except Exception as e: + self.compile_time_value_error(e) + + def may_be_none(self): + return False + + def analyse_types(self, env): + self.start = self.start.analyse_types(env) + self.stop = self.stop.analyse_types(env) + self.step = self.step.analyse_types(env) + + if not self.start.is_none: + self.start = self.start.coerce_to_integer(env) + if not self.stop.is_none: + self.stop = self.stop.coerce_to_integer(env) + if not self.step.is_none: + self.step = self.step.coerce_to_integer(env) + + if self.start.is_literal and self.stop.is_literal and self.step.is_literal: + self.is_literal = True + self.is_temp = False + return self + + def calculate_result_code(self): + pass + + def generate_result_code(self, code): + for a in self.start,self.stop,self.step: + if isinstance(a, CloneNode): + a.arg.result() + + +class CallNode(ExprNode): + + # allow overriding the default 'may_be_none' behaviour + may_return_none = None + + def infer_type(self, env): + # TODO(robertwb): Reduce redundancy with analyse_types. + function = self.function + func_type = function.infer_type(env) + if isinstance(function, NewExprNode): + # note: needs call to infer_type() above + return PyrexTypes.CPtrType(function.class_type) + if func_type is py_object_type: + # function might have lied for safety => try to find better type + entry = getattr(function, 'entry', None) + if entry is not None: + func_type = entry.type or func_type + if func_type.is_ptr: + func_type = func_type.base_type + if func_type.is_cfunction: + if getattr(self.function, 'entry', None) and hasattr(self, 'args'): + alternatives = self.function.entry.all_alternatives() + arg_types = [arg.infer_type(env) for arg in self.args] + func_entry = PyrexTypes.best_match(arg_types, alternatives) + if func_entry: + func_type = func_entry.type + if func_type.is_ptr: + func_type = func_type.base_type + return func_type.return_type + return func_type.return_type + elif func_type is type_type: + if function.is_name and function.entry and function.entry.type: + result_type = function.entry.type + if result_type.is_extension_type: + return result_type + elif result_type.is_builtin_type: + if function.entry.name == 'float': + return PyrexTypes.c_double_type + elif function.entry.name in Builtin.types_that_construct_their_instance: + return result_type + func_type = self.function.analyse_as_type(env) + if func_type and (func_type.is_struct_or_union or func_type.is_cpp_class): + return func_type + return py_object_type + + def type_dependencies(self, env): + # TODO: Update when Danilo's C++ code merged in to handle the + # the case of function overloading. + return self.function.type_dependencies(env) + + def is_simple(self): + # C function calls could be considered simple, but they may + # have side-effects that may hit when multiple operations must + # be effected in order, e.g. when constructing the argument + # sequence for a function call or comparing values. + return False + + def may_be_none(self): + if self.may_return_none is not None: + return self.may_return_none + func_type = self.function.type + if func_type is type_type and self.function.is_name: + entry = self.function.entry + if entry.type.is_extension_type: + return False + if (entry.type.is_builtin_type and + entry.name in Builtin.types_that_construct_their_instance): + return False + return ExprNode.may_be_none(self) + + def set_py_result_type(self, function, func_type=None): + if func_type is None: + func_type = function.type + if func_type is Builtin.type_type and ( + function.is_name and + function.entry and + function.entry.is_builtin and + function.entry.name in Builtin.types_that_construct_their_instance): + # calling a builtin type that returns a specific object type + if function.entry.name == 'float': + # the following will come true later on in a transform + self.type = PyrexTypes.c_double_type + self.result_ctype = PyrexTypes.c_double_type + else: + self.type = Builtin.builtin_types[function.entry.name] + self.result_ctype = py_object_type + self.may_return_none = False + elif function.is_name and function.type_entry: + # We are calling an extension type constructor. As long as we do not + # support __new__(), the result type is clear + self.type = function.type_entry.type + self.result_ctype = py_object_type + self.may_return_none = False + else: + self.type = py_object_type + + def analyse_as_type_constructor(self, env): + type = self.function.analyse_as_type(env) + if type and type.is_struct_or_union: + args, kwds = self.explicit_args_kwds() + items = [] + for arg, member in zip(args, type.scope.var_entries): + items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg)) + if kwds: + items += kwds.key_value_pairs + self.key_value_pairs = items + self.__class__ = DictNode + self.analyse_types(env) # FIXME + self.coerce_to(type, env) + return True + elif type and type.is_cpp_class: + self.args = [ arg.analyse_types(env) for arg in self.args ] + constructor = type.scope.lookup("") + if not constructor: + error(self.function.pos, "no constructor found for C++ type '%s'" % self.function.name) + self.type = error_type + return self + self.function = RawCNameExprNode(self.function.pos, constructor.type) + self.function.entry = constructor + self.function.set_cname(type.empty_declaration_code()) + self.analyse_c_function_call(env) + self.type = type + return True + + def is_lvalue(self): + return self.type.is_reference + + def nogil_check(self, env): + func_type = self.function_type() + if func_type.is_pyobject: + self.gil_error() + elif not func_type.is_error and not getattr(func_type, 'nogil', False): + self.gil_error() + + gil_message = "Calling gil-requiring function" + + +class SimpleCallNode(CallNode): + # Function call without keyword, * or ** args. + # + # function ExprNode + # args [ExprNode] + # arg_tuple ExprNode or None used internally + # self ExprNode or None used internally + # coerced_self ExprNode or None used internally + # wrapper_call bool used internally + # has_optional_args bool used internally + # nogil bool used internally + + subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple'] + + self = None + coerced_self = None + arg_tuple = None + wrapper_call = False + has_optional_args = False + nogil = False + analysed = False + overflowcheck = False + + def compile_time_value(self, denv): + function = self.function.compile_time_value(denv) + args = [arg.compile_time_value(denv) for arg in self.args] + try: + return function(*args) + except Exception as e: + self.compile_time_value_error(e) + + @classmethod + def for_cproperty(cls, pos, obj, entry): + # Create a call node for C property access. + property_scope = entry.scope + getter_entry = property_scope.lookup_here(entry.name) + assert getter_entry, "Getter not found in scope %s: %s" % (property_scope, property_scope.entries) + function = NameNode(pos, name=entry.name, entry=getter_entry, type=getter_entry.type) + node = cls(pos, function=function, args=[obj]) + return node + + def analyse_as_type(self, env): + attr = self.function.as_cython_attribute() + if attr == 'pointer': + if len(self.args) != 1: + error(self.args.pos, "only one type allowed.") + else: + type = self.args[0].analyse_as_type(env) + if not type: + error(self.args[0].pos, "Unknown type") + else: + return PyrexTypes.CPtrType(type) + elif attr == 'typeof': + if len(self.args) != 1: + error(self.args.pos, "only one type allowed.") + operand = self.args[0].analyse_types(env) + return operand.type + + def explicit_args_kwds(self): + return self.args, None + + def analyse_types(self, env): + if self.analysed: + return self + self.analysed = True + if self.analyse_as_type_constructor(env): + return self + self.function.is_called = 1 + self.function = self.function.analyse_types(env) + function = self.function + + if function.is_attribute and function.entry and function.entry.is_cmethod: + # Take ownership of the object from which the attribute + # was obtained, because we need to pass it as 'self'. + self.self = function.obj + function.obj = CloneNode(self.self) + + func_type = self.function_type() + self.is_numpy_call_with_exprs = False + if (has_np_pythran(env) and function.is_numpy_attribute and + pythran_is_numpy_func_supported(function)): + has_pythran_args = True + self.arg_tuple = TupleNode(self.pos, args = self.args) + self.arg_tuple = self.arg_tuple.analyse_types(env) + for arg in self.arg_tuple.args: + has_pythran_args &= is_pythran_supported_node_or_none(arg) + self.is_numpy_call_with_exprs = bool(has_pythran_args) + if self.is_numpy_call_with_exprs: + env.add_include_file(pythran_get_func_include_file(function)) + return NumPyMethodCallNode.from_node( + self, + function_cname=pythran_functor(function), + arg_tuple=self.arg_tuple, + type=PythranExpr(pythran_func_type(function, self.arg_tuple.args)), + ) + elif func_type.is_pyobject: + self.arg_tuple = TupleNode(self.pos, args = self.args) + self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env) + self.args = None + self.set_py_result_type(function, func_type) + self.is_temp = 1 + else: + self.args = [ arg.analyse_types(env) for arg in self.args ] + self.analyse_c_function_call(env) + if func_type.exception_check == '+': + self.is_temp = True + + return self + + def function_type(self): + # Return the type of the function being called, coercing a function + # pointer to a function if necessary. If the function has fused + # arguments, return the specific type. + func_type = self.function.type + + if func_type.is_ptr: + func_type = func_type.base_type + + return func_type + + def analyse_c_function_call(self, env): + func_type = self.function.type + if func_type is error_type: + self.type = error_type + return + + if func_type.is_cfunction and func_type.is_static_method: + if self.self and self.self.type.is_extension_type: + # To support this we'd need to pass self to determine whether + # it was overloaded in Python space (possibly via a Cython + # superclass turning a cdef method into a cpdef one). + error(self.pos, "Cannot call a static method on an instance variable.") + args = self.args + elif self.self: + args = [self.self] + self.args + else: + args = self.args + + if func_type.is_cpp_class: + overloaded_entry = self.function.type.scope.lookup("operator()") + if overloaded_entry is None: + self.type = PyrexTypes.error_type + self.result_code = "" + return + elif hasattr(self.function, 'entry'): + overloaded_entry = self.function.entry + elif self.function.is_subscript and self.function.is_fused_index: + overloaded_entry = self.function.type.entry + else: + overloaded_entry = None + + if overloaded_entry: + if self.function.type.is_fused: + functypes = self.function.type.get_all_specialized_function_types() + alternatives = [f.entry for f in functypes] + else: + alternatives = overloaded_entry.all_alternatives() + + entry = PyrexTypes.best_match([arg.type for arg in args], + alternatives, self.pos, env, args) + + if not entry: + self.type = PyrexTypes.error_type + self.result_code = "" + return + + entry.used = True + if not func_type.is_cpp_class: + self.function.entry = entry + self.function.type = entry.type + func_type = self.function_type() + else: + entry = None + func_type = self.function_type() + if not func_type.is_cfunction: + error(self.pos, "Calling non-function type '%s'" % func_type) + self.type = PyrexTypes.error_type + self.result_code = "" + return + + # Check no. of args + max_nargs = len(func_type.args) + expected_nargs = max_nargs - func_type.optional_arg_count + actual_nargs = len(args) + if func_type.optional_arg_count and expected_nargs != actual_nargs: + self.has_optional_args = 1 + self.is_temp = 1 + + # check 'self' argument + if entry and entry.is_cmethod and func_type.args and not func_type.is_static_method: + formal_arg = func_type.args[0] + arg = args[0] + if formal_arg.not_none: + if self.self: + self.self = self.self.as_none_safe_node( + "'NoneType' object has no attribute '%{0}s'".format('.30' if len(entry.name) <= 30 else ''), + error='PyExc_AttributeError', + format_args=[entry.name]) + else: + # unbound method + arg = arg.as_none_safe_node( + "descriptor '%s' requires a '%s' object but received a 'NoneType'", + format_args=[entry.name, formal_arg.type.name]) + if self.self: + if formal_arg.accept_builtin_subtypes: + arg = CMethodSelfCloneNode(self.self) + else: + arg = CloneNode(self.self) + arg = self.coerced_self = arg.coerce_to(formal_arg.type, env) + elif formal_arg.type.is_builtin_type: + # special case: unbound methods of builtins accept subtypes + arg = arg.coerce_to(formal_arg.type, env) + if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode): + arg.exact_builtin_type = False + args[0] = arg + + # Coerce arguments + some_args_in_temps = False + for i in range(min(max_nargs, actual_nargs)): + formal_arg = func_type.args[i] + formal_type = formal_arg.type + arg = args[i].coerce_to(formal_type, env) + if formal_arg.not_none: + # C methods must do the None checks at *call* time + arg = arg.as_none_safe_node( + "cannot pass None into a C function argument that is declared 'not None'") + if arg.is_temp: + if i > 0: + # first argument in temp doesn't impact subsequent arguments + some_args_in_temps = True + elif arg.type.is_pyobject and not env.nogil: + if i == 0 and self.self is not None: + # a method's cloned "self" argument is ok + pass + elif arg.nonlocally_immutable(): + # plain local variables are ok + pass + else: + # we do not safely own the argument's reference, + # but we must make sure it cannot be collected + # before we return from the function, so we create + # an owned temp reference to it + if i > 0: # first argument doesn't matter + some_args_in_temps = True + arg = arg.coerce_to_temp(env) + args[i] = arg + + # handle additional varargs parameters + for i in range(max_nargs, actual_nargs): + arg = args[i] + if arg.type.is_pyobject: + if arg.type is str_type: + arg_ctype = PyrexTypes.c_char_ptr_type + else: + arg_ctype = arg.type.default_coerced_ctype() + if arg_ctype is None: + error(self.args[i-1].pos, + "Python object cannot be passed as a varargs parameter") + else: + args[i] = arg = arg.coerce_to(arg_ctype, env) + if arg.is_temp and i > 0: + some_args_in_temps = True + + if some_args_in_temps: + # if some args are temps and others are not, they may get + # constructed in the wrong order (temps first) => make + # sure they are either all temps or all not temps (except + # for the last argument, which is evaluated last in any + # case) + for i in range(actual_nargs-1): + if i == 0 and self.self is not None: + continue # self is ok + arg = args[i] + if arg.nonlocally_immutable(): + # locals, C functions, unassignable types are safe. + pass + elif arg.type.is_cpp_class: + # Assignment has side effects, avoid. + pass + elif env.nogil and arg.type.is_pyobject: + # can't copy a Python reference into a temp in nogil + # env (this is safe: a construction would fail in + # nogil anyway) + pass + else: + #self.args[i] = arg.coerce_to_temp(env) + # instead: issue a warning + if i > 0 or i == 1 and self.self is not None: # skip first arg + warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0) + break + + self.args[:] = args + + # Calc result type and code fragment + if isinstance(self.function, NewExprNode): + self.type = PyrexTypes.CPtrType(self.function.class_type) + else: + self.type = func_type.return_type + + if self.function.is_name or self.function.is_attribute: + func_entry = self.function.entry + if func_entry and (func_entry.utility_code or func_entry.utility_code_definition): + self.is_temp = 1 # currently doesn't work for self.calculate_result_code() + + if self.type.is_pyobject: + self.result_ctype = py_object_type + self.is_temp = 1 + elif func_type.exception_value is not None or func_type.exception_check: + self.is_temp = 1 + elif self.type.is_memoryviewslice: + self.is_temp = 1 + # func_type.exception_check = True + + if self.is_temp and self.type.is_reference: + self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type) + + # C++ exception handler + if func_type.exception_check == '+': + if needs_cpp_exception_conversion(func_type): + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + + self.overflowcheck = env.directives['overflowcheck'] + + def calculate_result_code(self): + return self.c_call_code() + + def c_call_code(self): + func_type = self.function_type() + if self.type is PyrexTypes.error_type or not func_type.is_cfunction: + return "" + formal_args = func_type.args + arg_list_code = [] + args = list(zip(formal_args, self.args)) + max_nargs = len(func_type.args) + expected_nargs = max_nargs - func_type.optional_arg_count + actual_nargs = len(self.args) + for formal_arg, actual_arg in args[:expected_nargs]: + arg_code = actual_arg.move_result_rhs_as(formal_arg.type) + arg_list_code.append(arg_code) + + if func_type.is_overridable: + arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod))) + + if func_type.optional_arg_count: + if expected_nargs == actual_nargs: + optional_args = 'NULL' + else: + optional_args = "&%s" % self.opt_arg_struct + arg_list_code.append(optional_args) + + for actual_arg in self.args[len(formal_args):]: + arg_list_code.append(actual_arg.move_result_rhs()) + + result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code)) + return result + + def is_c_result_required(self): + func_type = self.function_type() + if not func_type.exception_value or func_type.exception_check == '+': + return False # skip allocation of unused result temp + return True + + def generate_evaluation_code(self, code): + function = self.function + if function.is_name or function.is_attribute: + code.globalstate.use_entry_utility_code(function.entry) + + abs_function_cnames = ('abs', 'labs', '__Pyx_abs_longlong') + is_signed_int = self.type.is_int and self.type.signed + if self.overflowcheck and is_signed_int and function.result() in abs_function_cnames: + code.globalstate.use_utility_code(UtilityCode.load_cached("Common", "Overflow.c")) + code.putln('if (unlikely(%s == __PYX_MIN(%s))) {\ + PyErr_SetString(PyExc_OverflowError,\ + "Trying to take the absolute value of the most negative integer is not defined."); %s; }' % ( + self.args[0].result(), + self.args[0].type.empty_declaration_code(), + code.error_goto(self.pos))) + + if not function.type.is_pyobject or len(self.arg_tuple.args) > 1 or ( + self.arg_tuple.args and self.arg_tuple.is_literal): + super(SimpleCallNode, self).generate_evaluation_code(code) + return + + # Special case 0-args and try to avoid explicit tuple creation for Python calls with 1 arg. + arg = self.arg_tuple.args[0] if self.arg_tuple.args else None + subexprs = (self.self, self.coerced_self, function, arg) + for subexpr in subexprs: + if subexpr is not None: + subexpr.generate_evaluation_code(code) + + code.mark_pos(self.pos) + assert self.is_temp + self.allocate_temp_result(code) + + if arg is None: + code.globalstate.use_utility_code(UtilityCode.load_cached( + "PyObjectCallNoArg", "ObjectHandling.c")) + code.putln( + "%s = __Pyx_PyObject_CallNoArg(%s); %s" % ( + self.result(), + function.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + else: + code.globalstate.use_utility_code(UtilityCode.load_cached( + "PyObjectCallOneArg", "ObjectHandling.c")) + code.putln( + "%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % ( + self.result(), + function.py_result(), + arg.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + + self.generate_gotref(code) + + for subexpr in subexprs: + if subexpr is not None: + subexpr.generate_disposal_code(code) + subexpr.free_temps(code) + + def generate_result_code(self, code): + func_type = self.function_type() + if func_type.is_pyobject: + arg_code = self.arg_tuple.py_result() + code.globalstate.use_utility_code(UtilityCode.load_cached( + "PyObjectCall", "ObjectHandling.c")) + code.putln( + "%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % ( + self.result(), + self.function.py_result(), + arg_code, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + elif func_type.is_cfunction: + nogil = not code.funcstate.gil_owned + if self.has_optional_args: + actual_nargs = len(self.args) + expected_nargs = len(func_type.args) - func_type.optional_arg_count + self.opt_arg_struct = code.funcstate.allocate_temp( + func_type.op_arg_struct.base_type, manage_ref=True) + code.putln("%s.%s = %s;" % ( + self.opt_arg_struct, + Naming.pyrex_prefix + "n", + len(self.args) - expected_nargs)) + args = list(zip(func_type.args, self.args)) + for formal_arg, actual_arg in args[expected_nargs:actual_nargs]: + code.putln("%s.%s = %s;" % ( + self.opt_arg_struct, + func_type.opt_arg_cname(formal_arg.name), + actual_arg.result_as(formal_arg.type))) + exc_checks = [] + if self.type.is_pyobject and self.is_temp: + exc_checks.append("!%s" % self.result()) + elif self.type.is_memoryviewslice: + assert self.is_temp + exc_checks.append(self.type.error_condition(self.result())) + elif func_type.exception_check != '+': + exc_val = func_type.exception_value + exc_check = func_type.exception_check + if exc_val is not None: + exc_checks.append("%s == %s" % (self.result(), func_type.return_type.cast_code(exc_val))) + if exc_check: + if nogil: + if not exc_checks: + perf_hint_entry = getattr(self.function, "entry", None) + PyrexTypes.write_noexcept_performance_hint( + self.pos, code.funcstate.scope, + function_name=perf_hint_entry.name if perf_hint_entry else None, + void_return=self.type.is_void, is_call=True) + code.globalstate.use_utility_code( + UtilityCode.load_cached("ErrOccurredWithGIL", "Exceptions.c")) + exc_checks.append("__Pyx_ErrOccurredWithGIL()") + else: + exc_checks.append("PyErr_Occurred()") + if self.is_temp or exc_checks: + rhs = self.c_call_code() + if self.result(): + lhs = "%s = " % self.result() + if self.is_temp and self.type.is_pyobject: + #return_type = self.type # func_type.return_type + #print "SimpleCallNode.generate_result_code: casting", rhs, \ + # "from", return_type, "to pyobject" ### + rhs = typecast(py_object_type, self.type, rhs) + else: + lhs = "" + if func_type.exception_check == '+': + translate_cpp_exception(code, self.pos, '%s%s;' % (lhs, rhs), + self.result() if self.type.is_pyobject else None, + func_type.exception_value, nogil) + else: + if exc_checks: + goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos) + else: + goto_error = "" + code.putln("%s%s; %s" % (lhs, rhs, goto_error)) + if self.type.is_pyobject and self.result(): + self.generate_gotref(code) + if self.has_optional_args: + code.funcstate.release_temp(self.opt_arg_struct) + + +class NumPyMethodCallNode(ExprNode): + # Pythran call to a NumPy function or method. + # + # function_cname string the function/method to call + # arg_tuple TupleNode the arguments as an args tuple + + subexprs = ['arg_tuple'] + is_temp = True + may_return_none = True + + def generate_evaluation_code(self, code): + code.mark_pos(self.pos) + self.allocate_temp_result(code) + + assert self.arg_tuple.mult_factor is None + args = self.arg_tuple.args + for arg in args: + arg.generate_evaluation_code(code) + + code.putln("// function evaluation code for numpy function") + code.putln("__Pyx_call_destructor(%s);" % self.result()) + code.putln("new (&%s) decltype(%s){%s{}(%s)};" % ( + self.result(), + self.result(), + self.function_cname, + ", ".join(a.pythran_result() for a in args))) + + +class PyMethodCallNode(SimpleCallNode): + # Specialised call to a (potential) PyMethodObject with non-constant argument tuple. + # Allows the self argument to be injected directly instead of repacking a tuple for it. + # + # function ExprNode the function/method object to call + # arg_tuple TupleNode the arguments for the args tuple + + subexprs = ['function', 'arg_tuple'] + is_temp = True + + def generate_evaluation_code(self, code): + code.mark_pos(self.pos) + self.allocate_temp_result(code) + + self.function.generate_evaluation_code(code) + assert self.arg_tuple.mult_factor is None + args = self.arg_tuple.args + for arg in args: + arg.generate_evaluation_code(code) + + # make sure function is in temp so that we can replace the reference below if it's a method + reuse_function_temp = self.function.is_temp + if reuse_function_temp: + function = self.function.result() + else: + function = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + self.function.make_owned_reference(code) + code.put("%s = %s; " % (function, self.function.py_result())) + self.function.generate_disposal_code(code) + self.function.free_temps(code) + + self_arg = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + code.putln("%s = NULL;" % self_arg) + arg_offset_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) + code.putln("%s = 0;" % arg_offset_cname) + + def attribute_is_likely_method(attr): + obj = attr.obj + if obj.is_name and obj.entry.is_pyglobal: + return False # more likely to be a function + return True + + if self.function.is_attribute: + likely_method = 'likely' if attribute_is_likely_method(self.function) else 'unlikely' + elif self.function.is_name and self.function.cf_state: + # not an attribute itself, but might have been assigned from one (e.g. bound method) + for assignment in self.function.cf_state: + value = assignment.rhs + if value and value.is_attribute and value.obj.type and value.obj.type.is_pyobject: + if attribute_is_likely_method(value): + likely_method = 'likely' + break + else: + likely_method = 'unlikely' + else: + likely_method = 'unlikely' + + code.putln("#if CYTHON_UNPACK_METHODS") + code.putln("if (%s(PyMethod_Check(%s))) {" % (likely_method, function)) + code.putln("%s = PyMethod_GET_SELF(%s);" % (self_arg, function)) + # the following is always true in Py3 (kept only for safety), + # but is false for unbound methods in Py2 + code.putln("if (likely(%s)) {" % self_arg) + code.putln("PyObject* function = PyMethod_GET_FUNCTION(%s);" % function) + code.put_incref(self_arg, py_object_type) + code.put_incref("function", py_object_type) + # free method object as early to possible to enable reuse from CPython's freelist + code.put_decref_set(function, py_object_type, "function") + code.putln("%s = 1;" % arg_offset_cname) + code.putln("}") + code.putln("}") + code.putln("#endif") # CYTHON_UNPACK_METHODS + # TODO may need to deal with unused variables in the #else case + + # actually call the function + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectFastCall", "ObjectHandling.c")) + + code.putln("{") + # To avoid passing an out-of-bounds argument pointer in the no-args case, + # we need at least two entries, so we pad with NULL and point to that. + # See https://github.com/cython/cython/issues/5668 + code.putln("PyObject *__pyx_callargs[%d] = {%s, %s};" % ( + (len(args) + 1) if args else 2, + self_arg, + ', '.join(arg.py_result() for arg in args) if args else "NULL", + )) + code.putln("%s = __Pyx_PyObject_FastCall(%s, __pyx_callargs+1-%s, %d+%s);" % ( + self.result(), + function, + arg_offset_cname, + len(args), + arg_offset_cname)) + + code.put_xdecref_clear(self_arg, py_object_type) + code.funcstate.release_temp(self_arg) + code.funcstate.release_temp(arg_offset_cname) + for arg in args: + arg.generate_disposal_code(code) + arg.free_temps(code) + code.putln(code.error_goto_if_null(self.result(), self.pos)) + self.generate_gotref(code) + + if reuse_function_temp: + self.function.generate_disposal_code(code) + self.function.free_temps(code) + else: + code.put_decref_clear(function, py_object_type) + code.funcstate.release_temp(function) + code.putln("}") + + +class InlinedDefNodeCallNode(CallNode): + # Inline call to defnode + # + # function PyCFunctionNode + # function_name NameNode + # args [ExprNode] + + subexprs = ['args', 'function_name'] + is_temp = 1 + type = py_object_type + function = None + function_name = None + + def can_be_inlined(self): + func_type= self.function.def_node + if func_type.star_arg or func_type.starstar_arg: + return False + if len(func_type.args) != len(self.args): + return False + if func_type.num_kwonly_args: + return False # actually wrong number of arguments + return True + + def analyse_types(self, env): + self.function_name = self.function_name.analyse_types(env) + + self.args = [ arg.analyse_types(env) for arg in self.args ] + func_type = self.function.def_node + actual_nargs = len(self.args) + + # Coerce arguments + some_args_in_temps = False + for i in range(actual_nargs): + formal_type = func_type.args[i].type + arg = self.args[i].coerce_to(formal_type, env) + if arg.is_temp: + if i > 0: + # first argument in temp doesn't impact subsequent arguments + some_args_in_temps = True + elif arg.type.is_pyobject and not env.nogil: + if arg.nonlocally_immutable(): + # plain local variables are ok + pass + else: + # we do not safely own the argument's reference, + # but we must make sure it cannot be collected + # before we return from the function, so we create + # an owned temp reference to it + if i > 0: # first argument doesn't matter + some_args_in_temps = True + arg = arg.coerce_to_temp(env) + self.args[i] = arg + + if some_args_in_temps: + # if some args are temps and others are not, they may get + # constructed in the wrong order (temps first) => make + # sure they are either all temps or all not temps (except + # for the last argument, which is evaluated last in any + # case) + for i in range(actual_nargs-1): + arg = self.args[i] + if arg.nonlocally_immutable(): + # locals, C functions, unassignable types are safe. + pass + elif arg.type.is_cpp_class: + # Assignment has side effects, avoid. + pass + elif env.nogil and arg.type.is_pyobject: + # can't copy a Python reference into a temp in nogil + # env (this is safe: a construction would fail in + # nogil anyway) + pass + else: + #self.args[i] = arg.coerce_to_temp(env) + # instead: issue a warning + if i > 0: + warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0) + break + return self + + def generate_result_code(self, code): + arg_code = [self.function_name.py_result()] + func_type = self.function.def_node + for arg, proto_arg in zip(self.args, func_type.args): + if arg.type.is_pyobject: + arg_code.append(arg.result_as(proto_arg.type)) + else: + arg_code.append(arg.result()) + arg_code = ', '.join(arg_code) + code.putln( + "%s = %s(%s); %s" % ( + self.result(), + self.function.def_node.entry.pyfunc_cname, + arg_code, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class PythonCapiFunctionNode(ExprNode): + subexprs = [] + + def __init__(self, pos, py_name, cname, func_type, utility_code = None): + ExprNode.__init__(self, pos, name=py_name, cname=cname, + type=func_type, utility_code=utility_code) + + def analyse_types(self, env): + return self + + def generate_result_code(self, code): + if self.utility_code: + code.globalstate.use_utility_code(self.utility_code) + + def calculate_result_code(self): + return self.cname + + +class PythonCapiCallNode(SimpleCallNode): + # Python C-API Function call (only created in transforms) + + # By default, we assume that the call never returns None, as this + # is true for most C-API functions in CPython. If this does not + # apply to a call, set the following to True (or None to inherit + # the default behaviour). + may_return_none = False + + def __init__(self, pos, function_name, func_type, + utility_code = None, py_name=None, **kwargs): + self.type = func_type.return_type + self.result_ctype = self.type + self.function = PythonCapiFunctionNode( + pos, py_name, function_name, func_type, + utility_code = utility_code) + # call this last so that we can override the constructed + # attributes above with explicit keyword arguments if required + SimpleCallNode.__init__(self, pos, **kwargs) + + +class CachedBuiltinMethodCallNode(CallNode): + # Python call to a method of a known Python builtin (only created in transforms) + + subexprs = ['obj', 'args'] + is_temp = True + + def __init__(self, call_node, obj, method_name, args): + super(CachedBuiltinMethodCallNode, self).__init__( + call_node.pos, + obj=obj, method_name=method_name, args=args, + may_return_none=call_node.may_return_none, + type=call_node.type) + + def may_be_none(self): + if self.may_return_none is not None: + return self.may_return_none + return ExprNode.may_be_none(self) + + def generate_result_code(self, code): + type_cname = self.obj.type.cname + obj_cname = self.obj.py_result() + args = [arg.py_result() for arg in self.args] + call_code = code.globalstate.cached_unbound_method_call_code( + obj_cname, type_cname, self.method_name, args) + code.putln("%s = %s; %s" % ( + self.result(), call_code, + code.error_goto_if_null(self.result(), self.pos) + )) + self.generate_gotref(code) + + +class GeneralCallNode(CallNode): + # General Python function call, including keyword, + # * and ** arguments. + # + # function ExprNode + # positional_args ExprNode Tuple of positional arguments + # keyword_args ExprNode or None Dict of keyword arguments + + type = py_object_type + + subexprs = ['function', 'positional_args', 'keyword_args'] + + nogil_check = Node.gil_error + + def compile_time_value(self, denv): + function = self.function.compile_time_value(denv) + positional_args = self.positional_args.compile_time_value(denv) + keyword_args = self.keyword_args.compile_time_value(denv) + try: + return function(*positional_args, **keyword_args) + except Exception as e: + self.compile_time_value_error(e) + + def explicit_args_kwds(self): + if (self.keyword_args and not self.keyword_args.is_dict_literal or + not self.positional_args.is_sequence_constructor): + raise CompileError(self.pos, + 'Compile-time keyword arguments must be explicit.') + return self.positional_args.args, self.keyword_args + + def analyse_types(self, env): + if self.analyse_as_type_constructor(env): + return self + self.function = self.function.analyse_types(env) + if not self.function.type.is_pyobject: + if self.function.type.is_error: + self.type = error_type + return self + if hasattr(self.function, 'entry'): + node = self.map_to_simple_call_node() + if node is not None and node is not self: + return node.analyse_types(env) + elif self.function.entry.as_variable: + self.function = self.function.coerce_to_pyobject(env) + elif node is self: + error(self.pos, + "Non-trivial keyword arguments and starred " + "arguments not allowed in cdef functions.") + else: + # error was already reported + pass + else: + self.function = self.function.coerce_to_pyobject(env) + if self.keyword_args: + self.keyword_args = self.keyword_args.analyse_types(env) + self.positional_args = self.positional_args.analyse_types(env) + self.positional_args = \ + self.positional_args.coerce_to_pyobject(env) + self.set_py_result_type(self.function) + self.is_temp = 1 + return self + + def map_to_simple_call_node(self): + """ + Tries to map keyword arguments to declared positional arguments. + Returns self to try a Python call, None to report an error + or a SimpleCallNode if the mapping succeeds. + """ + if not isinstance(self.positional_args, TupleNode): + # has starred argument + return self + if not self.keyword_args.is_dict_literal: + # keywords come from arbitrary expression => nothing to do here + return self + function = self.function + entry = getattr(function, 'entry', None) + if not entry: + return self + function_type = entry.type + if function_type.is_ptr: + function_type = function_type.base_type + if not function_type.is_cfunction: + return self + + pos_args = self.positional_args.args + kwargs = self.keyword_args + declared_args = function_type.args + if entry.is_cmethod: + declared_args = declared_args[1:] # skip 'self' + + if len(pos_args) > len(declared_args): + error(self.pos, "function call got too many positional arguments, " + "expected %d, got %s" % (len(declared_args), + len(pos_args))) + return None + + matched_args = { + arg.name for arg in declared_args[:len(pos_args)] + if arg.name + } + unmatched_args = declared_args[len(pos_args):] + matched_kwargs_count = 0 + args = list(pos_args) + + # check for duplicate keywords + seen = set(matched_args) + has_errors = False + for arg in kwargs.key_value_pairs: + name = arg.key.value + if name in seen: + error(arg.pos, "argument '%s' passed twice" % name) + has_errors = True + # continue to report more errors if there are any + seen.add(name) + + # match keywords that are passed in order + for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs): + name = arg.key.value + if decl_arg.name == name: + matched_args.add(name) + matched_kwargs_count += 1 + args.append(arg.value) + else: + break + + # match keyword arguments that are passed out-of-order, but keep + # the evaluation of non-simple arguments in order by moving them + # into temps + from .UtilNodes import EvalWithTempExprNode, LetRefNode + temps = [] + if len(kwargs.key_value_pairs) > matched_kwargs_count: + unmatched_args = declared_args[len(args):] + keywords = dict([ (arg.key.value, (i+len(pos_args), arg)) + for i, arg in enumerate(kwargs.key_value_pairs) ]) + first_missing_keyword = None + for decl_arg in unmatched_args: + name = decl_arg.name + if name not in keywords: + # missing keyword argument => either done or error + if not first_missing_keyword: + first_missing_keyword = name + continue + elif first_missing_keyword: + if entry.as_variable: + # we might be able to convert the function to a Python + # object, which then allows full calling semantics + # with default values in gaps - currently, we only + # support optional arguments at the end + return self + # wasn't the last keyword => gaps are not supported + error(self.pos, "C function call is missing " + "argument '%s'" % first_missing_keyword) + return None + pos, arg = keywords[name] + matched_args.add(name) + matched_kwargs_count += 1 + if arg.value.is_simple(): + args.append(arg.value) + else: + temp = LetRefNode(arg.value) + assert temp.is_simple() + args.append(temp) + temps.append((pos, temp)) + + if temps: + # may have to move preceding non-simple args into temps + final_args = [] + new_temps = [] + first_temp_arg = temps[0][-1] + for arg_value in args: + if arg_value is first_temp_arg: + break # done + if arg_value.is_simple(): + final_args.append(arg_value) + else: + temp = LetRefNode(arg_value) + new_temps.append(temp) + final_args.append(temp) + if new_temps: + args = final_args + temps = new_temps + [ arg for i,arg in sorted(temps) ] + + # check for unexpected keywords + for arg in kwargs.key_value_pairs: + name = arg.key.value + if name not in matched_args: + has_errors = True + error(arg.pos, + "C function got unexpected keyword argument '%s'" % + name) + + if has_errors: + # error was reported already + return None + + # all keywords mapped to positional arguments + # if we are missing arguments, SimpleCallNode will figure it out + node = SimpleCallNode(self.pos, function=function, args=args) + for temp in temps[::-1]: + node = EvalWithTempExprNode(temp, node) + return node + + def generate_result_code(self, code): + if self.type.is_error: return + if self.keyword_args: + kwargs = self.keyword_args.py_result() + else: + kwargs = 'NULL' + code.globalstate.use_utility_code(UtilityCode.load_cached( + "PyObjectCall", "ObjectHandling.c")) + code.putln( + "%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % ( + self.result(), + self.function.py_result(), + self.positional_args.py_result(), + kwargs, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class AsTupleNode(ExprNode): + # Convert argument to tuple. Used for normalising + # the * argument of a function call. + # + # arg ExprNode + + subexprs = ['arg'] + is_temp = 1 + + def calculate_constant_result(self): + self.constant_result = tuple(self.arg.constant_result) + + def compile_time_value(self, denv): + arg = self.arg.compile_time_value(denv) + try: + return tuple(arg) + except Exception as e: + self.compile_time_value_error(e) + + def analyse_types(self, env): + self.arg = self.arg.analyse_types(env).coerce_to_pyobject(env) + if self.arg.type is tuple_type: + return self.arg.as_none_safe_node("'NoneType' object is not iterable") + self.type = tuple_type + return self + + def may_be_none(self): + return False + + nogil_check = Node.gil_error + gil_message = "Constructing Python tuple" + + def generate_result_code(self, code): + cfunc = "__Pyx_PySequence_Tuple" if self.arg.type in (py_object_type, tuple_type) else "PySequence_Tuple" + code.putln( + "%s = %s(%s); %s" % ( + self.result(), + cfunc, self.arg.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class MergedDictNode(ExprNode): + # Helper class for keyword arguments and other merged dicts. + # + # keyword_args [DictNode or other ExprNode] + + subexprs = ['keyword_args'] + is_temp = 1 + type = dict_type + reject_duplicates = True + + def calculate_constant_result(self): + result = {} + reject_duplicates = self.reject_duplicates + for item in self.keyword_args: + if item.is_dict_literal: + # process items in order + items = ((key.constant_result, value.constant_result) + for key, value in item.key_value_pairs) + else: + items = item.constant_result.iteritems() + + for key, value in items: + if reject_duplicates and key in result: + raise ValueError("duplicate keyword argument found: %s" % key) + result[key] = value + + self.constant_result = result + + def compile_time_value(self, denv): + result = {} + reject_duplicates = self.reject_duplicates + for item in self.keyword_args: + if item.is_dict_literal: + # process items in order + items = [(key.compile_time_value(denv), value.compile_time_value(denv)) + for key, value in item.key_value_pairs] + else: + items = item.compile_time_value(denv).iteritems() + + try: + for key, value in items: + if reject_duplicates and key in result: + raise ValueError("duplicate keyword argument found: %s" % key) + result[key] = value + except Exception as e: + self.compile_time_value_error(e) + return result + + def type_dependencies(self, env): + return () + + def infer_type(self, env): + return dict_type + + def analyse_types(self, env): + self.keyword_args = [ + arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node( + # FIXME: CPython's error message starts with the runtime function name + 'argument after ** must be a mapping, not NoneType') + for arg in self.keyword_args + ] + + return self + + def may_be_none(self): + return False + + gil_message = "Constructing Python dict" + + def generate_evaluation_code(self, code): + code.mark_pos(self.pos) + self.allocate_temp_result(code) + + args = iter(self.keyword_args) + item = next(args) + item.generate_evaluation_code(code) + if item.type is not dict_type: + # CPython supports calling functions with non-dicts, so do we + code.putln('if (likely(PyDict_CheckExact(%s))) {' % + item.py_result()) + + if item.is_dict_literal: + item.make_owned_reference(code) + code.putln("%s = %s;" % (self.result(), item.py_result())) + item.generate_post_assignment_code(code) + else: + code.putln("%s = PyDict_Copy(%s); %s" % ( + self.result(), + item.py_result(), + code.error_goto_if_null(self.result(), item.pos))) + self.generate_gotref(code) + item.generate_disposal_code(code) + + if item.type is not dict_type: + code.putln('} else {') + code.globalstate.use_utility_code(UtilityCode.load_cached( + "PyObjectCallOneArg", "ObjectHandling.c")) + code.putln("%s = __Pyx_PyObject_CallOneArg((PyObject*)&PyDict_Type, %s); %s" % ( + self.result(), + item.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + item.generate_disposal_code(code) + code.putln('}') + item.free_temps(code) + + helpers = set() + for item in args: + if item.is_dict_literal: + # inline update instead of creating an intermediate dict + for arg in item.key_value_pairs: + arg.generate_evaluation_code(code) + if self.reject_duplicates: + code.putln("if (unlikely(PyDict_Contains(%s, %s))) {" % ( + self.result(), + arg.key.py_result())) + helpers.add("RaiseDoubleKeywords") + # FIXME: find out function name at runtime! + code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % ( + arg.key.py_result(), + code.error_goto(self.pos))) + code.putln("}") + code.put_error_if_neg(arg.key.pos, "PyDict_SetItem(%s, %s, %s)" % ( + self.result(), + arg.key.py_result(), + arg.value.py_result())) + arg.generate_disposal_code(code) + arg.free_temps(code) + else: + item.generate_evaluation_code(code) + if self.reject_duplicates: + # merge mapping into kwdict one by one as we need to check for duplicates + helpers.add("MergeKeywords") + code.put_error_if_neg(item.pos, "__Pyx_MergeKeywords(%s, %s)" % ( + self.result(), item.py_result())) + else: + # simple case, just add all entries + helpers.add("RaiseMappingExpected") + code.putln("if (unlikely(PyDict_Update(%s, %s) < 0)) {" % ( + self.result(), item.py_result())) + code.putln("if (PyErr_ExceptionMatches(PyExc_AttributeError)) " + "__Pyx_RaiseMappingExpectedError(%s);" % item.py_result()) + code.putln(code.error_goto(item.pos)) + code.putln("}") + item.generate_disposal_code(code) + item.free_temps(code) + + for helper in sorted(helpers): + code.globalstate.use_utility_code(UtilityCode.load_cached(helper, "FunctionArguments.c")) + + def annotate(self, code): + for item in self.keyword_args: + item.annotate(code) + + +class AttributeNode(ExprNode): + # obj.attribute + # + # obj ExprNode + # attribute string + # needs_none_check boolean Used if obj is an extension type. + # If set to True, it is known that the type is not None. + # + # Used internally: + # + # is_py_attr boolean Is a Python getattr operation + # member string C name of struct member + # is_called boolean Function call is being done on result + # entry Entry Symbol table entry of attribute + + is_attribute = 1 + subexprs = ['obj'] + + entry = None + is_called = 0 + needs_none_check = True + is_memslice_transpose = False + is_special_lookup = False + is_py_attr = 0 + + def as_cython_attribute(self): + if (isinstance(self.obj, NameNode) and + self.obj.is_cython_module and not + self.attribute == u"parallel"): + return self.attribute + + cy = self.obj.as_cython_attribute() + if cy: + return "%s.%s" % (cy, self.attribute) + return None + + def coerce_to(self, dst_type, env): + # If coercing to a generic pyobject and this is a cpdef function + # we can create the corresponding attribute + if dst_type is py_object_type: + entry = self.entry + if entry and entry.is_cfunction and entry.as_variable: + # must be a cpdef function + self.is_temp = 1 + self.entry = entry.as_variable + self.analyse_as_python_attribute(env) + return self + elif entry and entry.is_cfunction and self.obj.type is not Builtin.type_type: + # "bound" cdef function. + # This implementation is likely a little inefficient and could be improved. + # Essentially it does: + # __import__("functools").partial(coerce_to_object(self), self.obj) + from .UtilNodes import EvalWithTempExprNode, ResultRefNode + # take self.obj out to a temp because it's used twice + obj_node = ResultRefNode(self.obj, type=self.obj.type) + obj_node.result_ctype = self.obj.result_ctype + self.obj = obj_node + unbound_node = ExprNode.coerce_to(self, dst_type, env) + utility_code=UtilityCode.load_cached( + "PyMethodNew2Arg", "ObjectHandling.c" + ) + func_type = PyrexTypes.CFuncType( + PyrexTypes.py_object_type, [ + PyrexTypes.CFuncTypeArg("func", PyrexTypes.py_object_type, None), + PyrexTypes.CFuncTypeArg("self", PyrexTypes.py_object_type, None) + ], + ) + binding_call = PythonCapiCallNode( + self.pos, + function_name="__Pyx_PyMethod_New2Arg", + func_type=func_type, + args=[unbound_node, obj_node], + utility_code=utility_code, + ) + complete_call = EvalWithTempExprNode(obj_node, binding_call) + return complete_call.analyse_types(env) + return ExprNode.coerce_to(self, dst_type, env) + + def calculate_constant_result(self): + attr = self.attribute + if attr.startswith("__") and attr.endswith("__"): + return + self.constant_result = getattr(self.obj.constant_result, attr) + + def compile_time_value(self, denv): + attr = self.attribute + if attr.startswith("__") and attr.endswith("__"): + error(self.pos, + "Invalid attribute name '%s' in compile-time expression" % attr) + return None + obj = self.obj.compile_time_value(denv) + try: + return getattr(obj, attr) + except Exception as e: + self.compile_time_value_error(e) + + def type_dependencies(self, env): + return self.obj.type_dependencies(env) + + def infer_type(self, env): + # FIXME: this is way too redundant with analyse_types() + node = self.analyse_as_cimported_attribute_node(env, target=False) + if node is not None: + if node.entry.type and node.entry.type.is_cfunction: + # special-case - function converted to pointer + return PyrexTypes.CPtrType(node.entry.type) + else: + return node.entry.type + node = self.analyse_as_type_attribute(env) + if node is not None: + return node.entry.type + obj_type = self.obj.infer_type(env) + self.analyse_attribute(env, obj_type=obj_type) + if obj_type.is_builtin_type and self.type.is_cfunction: + # special case: C-API replacements for C methods of + # builtin types cannot be inferred as C functions as + # that would prevent their use as bound methods + return py_object_type + elif self.entry and self.entry.is_cmethod: + # special case: bound methods should not be inferred + # as their unbound method types + return py_object_type + return self.type + + def analyse_target_declaration(self, env): + self.is_target = True + + def analyse_target_types(self, env): + node = self.analyse_types(env, target = 1) + if node.type.is_const: + error(self.pos, "Assignment to const attribute '%s'" % self.attribute) + if not node.is_lvalue(): + error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type) + return node + + def analyse_types(self, env, target = 0): + if not self.type: + self.type = PyrexTypes.error_type # default value if it isn't analysed successfully + self.initialized_check = env.directives['initializedcheck'] + node = self.analyse_as_cimported_attribute_node(env, target) + if node is None and not target: + node = self.analyse_as_type_attribute(env) + if node is None: + node = self.analyse_as_ordinary_attribute_node(env, target) + assert node is not None + if (node.is_attribute or node.is_name) and node.entry: + node.entry.used = True + if node.is_attribute: + node.wrap_obj_in_nonecheck(env) + return node + + def analyse_as_cimported_attribute_node(self, env, target): + # Try to interpret this as a reference to an imported + # C const, type, var or function. If successful, mutates + # this node into a NameNode and returns 1, otherwise + # returns 0. + module_scope = self.obj.analyse_as_module(env) + if module_scope: + entry = module_scope.lookup_here(self.attribute) + if entry and not entry.known_standard_library_import and ( + entry.is_cglobal or entry.is_cfunction + or entry.is_type or entry.is_const): + return self.as_name_node(env, entry, target) + if self.is_cimported_module_without_shadow(env): + # TODO: search for submodule + error(self.pos, "cimported module has no attribute '%s'" % self.attribute) + return self + return None + + def analyse_as_type_attribute(self, env): + # Try to interpret this as a reference to an unbound + # C method of an extension type or builtin type. If successful, + # creates a corresponding NameNode and returns it, otherwise + # returns None. + if self.obj.is_string_literal: + return + type = self.obj.analyse_as_type(env) + if type: + if type.is_extension_type or type.is_builtin_type or type.is_cpp_class: + entry = type.scope.lookup_here(self.attribute) + if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction): + if type.is_builtin_type: + if not self.is_called: + # must handle this as Python object + return None + ubcm_entry = entry + else: + ubcm_entry = self._create_unbound_cmethod_entry(type, entry, env) + ubcm_entry.overloaded_alternatives = [ + self._create_unbound_cmethod_entry(type, overloaded_alternative, env) + for overloaded_alternative in entry.overloaded_alternatives + ] + return self.as_name_node(env, ubcm_entry, target=False) + elif type.is_enum or type.is_cpp_enum: + if self.attribute in type.values: + for entry in type.entry.enum_values: + if entry.name == self.attribute: + return self.as_name_node(env, entry, target=False) + else: + error(self.pos, "%s not a known value of %s" % (self.attribute, type)) + else: + error(self.pos, "%s not a known value of %s" % (self.attribute, type)) + return None + + def _create_unbound_cmethod_entry(self, type, entry, env): + # Create a temporary entry describing the unbound C method in `entry` + # as an ordinary function. + if entry.func_cname and entry.type.op_arg_struct is None: + cname = entry.func_cname + if entry.type.is_static_method or ( + env.parent_scope and env.parent_scope.is_cpp_class_scope): + ctype = entry.type + elif type.is_cpp_class: + error(self.pos, "%s not a static member of %s" % (entry.name, type)) + ctype = PyrexTypes.error_type + else: + # Fix self type. + ctype = copy.copy(entry.type) + ctype.args = ctype.args[:] + ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None) + else: + cname = "%s->%s" % (type.vtabptr_cname, entry.cname) + ctype = entry.type + ubcm_entry = Symtab.Entry(entry.name, cname, ctype) + ubcm_entry.is_cfunction = 1 + ubcm_entry.func_cname = entry.func_cname + ubcm_entry.is_unbound_cmethod = 1 + ubcm_entry.scope = entry.scope + return ubcm_entry + + def analyse_as_type(self, env): + module_scope = self.obj.analyse_as_module(env) + if module_scope: + return module_scope.lookup_type(self.attribute) + if not self.obj.is_string_literal: + base_type = self.obj.analyse_as_type(env) + if base_type and getattr(base_type, 'scope', None) is not None: + return base_type.scope.lookup_type(self.attribute) + return None + + def analyse_as_extension_type(self, env): + # Try to interpret this as a reference to an extension type + # in a cimported module. Returns the extension type, or None. + module_scope = self.obj.analyse_as_module(env) + if module_scope: + entry = module_scope.lookup_here(self.attribute) + if entry and entry.is_type: + if entry.type.is_extension_type or entry.type.is_builtin_type: + return entry.type + return None + + def analyse_as_module(self, env): + # Try to interpret this as a reference to a cimported module + # in another cimported module. Returns the module scope, or None. + module_scope = self.obj.analyse_as_module(env) + if module_scope: + entry = module_scope.lookup_here(self.attribute) + if entry and entry.as_module: + return entry.as_module + return None + + def as_name_node(self, env, entry, target): + # Create a corresponding NameNode from this node and complete the + # analyse_types phase. + node = NameNode.from_node(self, name=self.attribute, entry=entry) + if target: + node = node.analyse_target_types(env) + else: + node = node.analyse_rvalue_entry(env) + node.entry.used = 1 + return node + + def analyse_as_ordinary_attribute_node(self, env, target): + self.obj = self.obj.analyse_types(env) + self.analyse_attribute(env) + if self.entry and self.entry.is_cmethod and not self.is_called: +# error(self.pos, "C method can only be called") + pass + ## Reference to C array turns into pointer to first element. + #while self.type.is_array: + # self.type = self.type.element_ptr_type() + if self.is_py_attr: + if not target: + self.is_temp = 1 + self.result_ctype = py_object_type + elif target and self.obj.type.is_builtin_type: + error(self.pos, "Assignment to an immutable object field") + elif self.entry and self.entry.is_cproperty: + if not target: + return SimpleCallNode.for_cproperty(self.pos, self.obj, self.entry).analyse_types(env) + # TODO: implement writable C-properties? + error(self.pos, "Assignment to a read-only property") + #elif self.type.is_memoryviewslice and not target: + # self.is_temp = True + return self + + def analyse_attribute(self, env, obj_type = None): + # Look up attribute and set self.type and self.member. + immutable_obj = obj_type is not None # used during type inference + self.is_py_attr = 0 + self.member = self.attribute + if obj_type is None: + if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr: + self.obj = self.obj.coerce_to_pyobject(env) + obj_type = self.obj.type + else: + if obj_type.is_string or obj_type.is_pyunicode_ptr: + obj_type = py_object_type + if obj_type.is_ptr or obj_type.is_array: + obj_type = obj_type.base_type + self.op = "->" + elif obj_type.is_extension_type or obj_type.is_builtin_type: + self.op = "->" + elif obj_type.is_reference and obj_type.is_fake_reference: + self.op = "->" + else: + self.op = "." + if obj_type.has_attributes: + if obj_type.attributes_known(): + entry = obj_type.scope.lookup_here(self.attribute) + if obj_type.is_memoryviewslice and not entry: + if self.attribute == 'T': + self.is_memslice_transpose = True + self.is_temp = True + self.use_managed_ref = True + self.type = self.obj.type.transpose(self.pos) + return + else: + obj_type.declare_attribute(self.attribute, env, self.pos) + entry = obj_type.scope.lookup_here(self.attribute) + if entry and entry.is_member: + entry = None + else: + error(self.pos, + "Cannot select attribute of incomplete type '%s'" + % obj_type) + self.type = PyrexTypes.error_type + return + self.entry = entry + if entry: + if obj_type.is_extension_type and entry.name == "__weakref__": + error(self.pos, "Illegal use of special attribute __weakref__") + + # def methods need the normal attribute lookup + # because they do not have struct entries + # fused function go through assignment synthesis + # (foo = pycfunction(foo_func_obj)) and need to go through + # regular Python lookup as well + if entry.is_cproperty: + self.type = entry.type + return + elif (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod: + self.type = entry.type + self.member = entry.cname + return + else: + # If it's not a variable or C method, it must be a Python + # method of an extension type, so we treat it like a Python + # attribute. + pass + # If we get here, the base object is not a struct/union/extension + # type, or it is an extension type and the attribute is either not + # declared or is declared as a Python method. Treat it as a Python + # attribute reference. + self.analyse_as_python_attribute(env, obj_type, immutable_obj) + + def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False): + if obj_type is None: + obj_type = self.obj.type + # mangle private '__*' Python attributes used inside of a class + self.attribute = env.mangle_class_private_name(self.attribute) + self.member = self.attribute + self.type = py_object_type + self.is_py_attr = 1 + + if not obj_type.is_pyobject and not obj_type.is_error: + # Expose python methods for immutable objects. + if (obj_type.is_string or obj_type.is_cpp_string + or obj_type.is_buffer or obj_type.is_memoryviewslice + or obj_type.is_numeric + or (obj_type.is_ctuple and obj_type.can_coerce_to_pyobject(env)) + or (obj_type.is_struct and obj_type.can_coerce_to_pyobject(env))): + if not immutable_obj: + self.obj = self.obj.coerce_to_pyobject(env) + elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute) + and self.obj.entry.as_variable + and self.obj.entry.as_variable.type.is_pyobject): + # might be an optimised builtin function => unpack it + if not immutable_obj: + self.obj = self.obj.coerce_to_pyobject(env) + else: + error(self.pos, + "Object of type '%s' has no attribute '%s'" % + (obj_type, self.attribute)) + + def wrap_obj_in_nonecheck(self, env): + if not env.directives['nonecheck']: + return + + msg = None + format_args = () + if (self.obj.type.is_extension_type and self.needs_none_check and not + self.is_py_attr): + msg = "'NoneType' object has no attribute '%{0}s'".format('.30' if len(self.attribute) <= 30 else '') + format_args = (self.attribute,) + elif self.obj.type.is_memoryviewslice: + if self.is_memslice_transpose: + msg = "Cannot transpose None memoryview slice" + else: + entry = self.obj.type.scope.lookup_here(self.attribute) + if entry: + # copy/is_c_contig/shape/strides etc + msg = "Cannot access '%s' attribute of None memoryview slice" + format_args = (entry.name,) + + if msg: + self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError', + format_args=format_args) + + def nogil_check(self, env): + if self.is_py_attr: + self.gil_error() + + gil_message = "Accessing Python attribute" + + def is_cimported_module_without_shadow(self, env): + return self.obj.is_cimported_module_without_shadow(env) + + def is_simple(self): + if self.obj: + return self.result_in_temp() or self.obj.is_simple() + else: + return NameNode.is_simple(self) + + def is_lvalue(self): + if self.obj: + return True + else: + return NameNode.is_lvalue(self) + + def is_ephemeral(self): + if self.obj: + return self.obj.is_ephemeral() + else: + return NameNode.is_ephemeral(self) + + def calculate_result_code(self): + result = self.calculate_access_code() + if self.entry and self.entry.is_cpp_optional and not self.is_target: + result = "(*%s)" % result + return result + + def calculate_access_code(self): + # Does the job of calculate_result_code but doesn't dereference cpp_optionals + # Therefore allowing access to the holder variable + obj = self.obj + obj_code = obj.result_as(obj.type) + #print "...obj_code =", obj_code ### + if self.entry and self.entry.is_cmethod: + if obj.type.is_extension_type and not self.entry.is_builtin_cmethod: + if self.entry.final_func_cname: + return self.entry.final_func_cname + + if self.type.from_fused: + # If the attribute was specialized through indexing, make + # sure to get the right fused name, as our entry was + # replaced by our parent index node + # (AnalyseExpressionsTransform) + self.member = self.entry.cname + + return "((struct %s *)%s%s%s)->%s" % ( + obj.type.vtabstruct_cname, obj_code, self.op, + obj.type.vtabslot_cname, self.member) + elif self.result_is_used: + return self.member + # Generating no code at all for unused access to optimised builtin + # methods fixes the problem that some optimisations only exist as + # macros, i.e. there is no function pointer to them, so we would + # generate invalid C code here. + return + elif obj.type.is_complex: + return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code) + else: + if obj.type.is_builtin_type and self.entry and self.entry.is_variable: + # accessing a field of a builtin type, need to cast better than result_as() does + obj_code = obj.type.cast_code(obj.result(), to_object_struct = True) + return "%s%s%s" % (obj_code, self.op, self.member) + + def generate_result_code(self, code): + if self.is_py_attr: + if self.is_special_lookup: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c")) + lookup_func_name = '__Pyx_PyObject_LookupSpecial' + else: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c")) + lookup_func_name = '__Pyx_PyObject_GetAttrStr' + code.putln( + '%s = %s(%s, %s); %s' % ( + self.result(), + lookup_func_name, + self.obj.py_result(), + code.intern_identifier(self.attribute), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + elif self.type.is_memoryviewslice: + if self.is_memslice_transpose: + # transpose the slice + for access, packing in self.type.axes: + if access == 'ptr': + error(self.pos, "Transposing not supported for slices " + "with indirect dimensions") + return + + code.putln("%s = %s;" % (self.result(), self.obj.result())) + code.put_incref_memoryviewslice(self.result(), self.type, + have_gil=True) + + T = "__pyx_memslice_transpose(&%s)" % self.result() + code.putln(code.error_goto_if_neg(T, self.pos)) + elif self.initialized_check: + code.putln( + 'if (unlikely(!%s.memview)) {' + 'PyErr_SetString(PyExc_AttributeError,' + '"Memoryview is not initialized");' + '%s' + '}' % (self.result(), code.error_goto(self.pos))) + elif self.entry.is_cpp_optional and self.initialized_check: + if self.is_target: + undereferenced_result = self.result() + else: + assert not self.is_temp # calculate_access_code() only makes sense for non-temps + undereferenced_result = self.calculate_access_code() + unbound_check_code = self.type.cpp_optional_check_for_null_code(undereferenced_result) + code.put_error_if_unbound(self.pos, self.entry, unbound_check_code=unbound_check_code) + else: + # result_code contains what is needed, but we may need to insert + # a check and raise an exception + if self.obj.type and self.obj.type.is_extension_type: + pass + elif self.entry and self.entry.is_cmethod: + # C method implemented as function call with utility code + code.globalstate.use_entry_utility_code(self.entry) + + def generate_disposal_code(self, code): + if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose: + # mirror condition for putting the memview incref here: + code.put_xdecref_clear(self.result(), self.type, have_gil=True) + else: + ExprNode.generate_disposal_code(self, code) + + def generate_assignment_code(self, rhs, code, overloaded_assignment=False, + exception_check=None, exception_value=None): + self.obj.generate_evaluation_code(code) + if self.is_py_attr: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c")) + code.put_error_if_neg(self.pos, + '__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % ( + self.obj.py_result(), + code.intern_identifier(self.attribute), + rhs.py_result())) + rhs.generate_disposal_code(code) + rhs.free_temps(code) + elif self.obj.type.is_complex: + code.putln("__Pyx_SET_C%s%s(%s, %s);" % ( + self.member.upper(), + self.obj.type.implementation_suffix, + self.obj.result_as(self.obj.type), + rhs.result_as(self.ctype()))) + rhs.generate_disposal_code(code) + rhs.free_temps(code) + else: + select_code = self.result() + if self.type.is_pyobject and self.use_managed_ref: + rhs.make_owned_reference(code) + rhs.generate_giveref(code) + code.put_gotref(select_code, self.type) + code.put_decref(select_code, self.ctype()) + elif self.type.is_memoryviewslice: + from . import MemoryView + MemoryView.put_assign_to_memviewslice( + select_code, rhs, rhs.result(), self.type, code) + + if not self.type.is_memoryviewslice: + code.putln( + "%s = %s;" % ( + select_code, + rhs.move_result_rhs_as(self.ctype()))) + #rhs.result())) + rhs.generate_post_assignment_code(code) + rhs.free_temps(code) + self.obj.generate_disposal_code(code) + self.obj.free_temps(code) + + def generate_deletion_code(self, code, ignore_nonexisting=False): + self.obj.generate_evaluation_code(code) + if self.is_py_attr or (self.entry.scope.is_property_scope + and u'__del__' in self.entry.scope.entries): + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c")) + code.put_error_if_neg(self.pos, + '__Pyx_PyObject_DelAttrStr(%s, %s)' % ( + self.obj.py_result(), + code.intern_identifier(self.attribute))) + else: + error(self.pos, "Cannot delete C attribute of extension type") + self.obj.generate_disposal_code(code) + self.obj.free_temps(code) + + def annotate(self, code): + if self.is_py_attr: + style, text = 'py_attr', 'python attribute (%s)' + else: + style, text = 'c_attr', 'c attribute (%s)' + code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute))) + + def get_known_standard_library_import(self): + module_name = self.obj.get_known_standard_library_import() + if module_name: + return StringEncoding.EncodedString("%s.%s" % (module_name, self.attribute)) + return None + + +#------------------------------------------------------------------- +# +# Constructor nodes +# +#------------------------------------------------------------------- + +class StarredUnpackingNode(ExprNode): + # A starred expression like "*a" + # + # This is only allowed in sequence assignment or construction such as + # + # a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4] + # + # and will be special cased during type analysis (or generate an error + # if it's found at unexpected places). + # + # target ExprNode + + subexprs = ['target'] + is_starred = 1 + type = py_object_type + is_temp = 1 + starred_expr_allowed_here = False + + def __init__(self, pos, target): + ExprNode.__init__(self, pos, target=target) + + def analyse_declarations(self, env): + if not self.starred_expr_allowed_here: + error(self.pos, "starred expression is not allowed here") + self.target.analyse_declarations(env) + + def infer_type(self, env): + return self.target.infer_type(env) + + def analyse_types(self, env): + if not self.starred_expr_allowed_here: + error(self.pos, "starred expression is not allowed here") + self.target = self.target.analyse_types(env) + self.type = self.target.type + return self + + def analyse_target_declaration(self, env): + self.target.analyse_target_declaration(env) + + def analyse_target_types(self, env): + self.target = self.target.analyse_target_types(env) + self.type = self.target.type + return self + + def calculate_result_code(self): + return "" + + def generate_result_code(self, code): + pass + + +class SequenceNode(ExprNode): + # Base class for list and tuple constructor nodes. + # Contains common code for performing sequence unpacking. + # + # args [ExprNode] + # unpacked_items [ExprNode] or None + # coerced_unpacked_items [ExprNode] or None + # mult_factor ExprNode the integer number of content repetitions ([1,2]*3) + + subexprs = ['args', 'mult_factor'] + + is_sequence_constructor = 1 + unpacked_items = None + mult_factor = None + slow = False # trade speed for code size (e.g. use PyTuple_Pack()) + + def compile_time_value_list(self, denv): + return [arg.compile_time_value(denv) for arg in self.args] + + def replace_starred_target_node(self): + # replace a starred node in the targets by the contained expression + self.starred_assignment = False + args = [] + for arg in self.args: + if arg.is_starred: + if self.starred_assignment: + error(arg.pos, "more than 1 starred expression in assignment") + self.starred_assignment = True + arg = arg.target + arg.is_starred = True + args.append(arg) + self.args = args + + def analyse_target_declaration(self, env): + self.replace_starred_target_node() + for arg in self.args: + arg.analyse_target_declaration(env) + + def analyse_types(self, env, skip_children=False): + for i, arg in enumerate(self.args): + if not skip_children: + arg = arg.analyse_types(env) + self.args[i] = arg.coerce_to_pyobject(env) + if self.mult_factor: + mult_factor = self.mult_factor.analyse_types(env) + if not mult_factor.type.is_int: + mult_factor = mult_factor.coerce_to_pyobject(env) + self.mult_factor = mult_factor.coerce_to_simple(env) + self.is_temp = 1 + # not setting self.type here, subtypes do this + return self + + def coerce_to_ctuple(self, dst_type, env): + if self.type == dst_type: + return self + assert not self.mult_factor + if len(self.args) != dst_type.size: + error(self.pos, "trying to coerce sequence to ctuple of wrong length, expected %d, got %d" % ( + dst_type.size, len(self.args))) + coerced_args = [arg.coerce_to(type, env) for arg, type in zip(self.args, dst_type.components)] + return TupleNode(self.pos, args=coerced_args, type=dst_type, is_temp=True) + + def _create_merge_node_if_necessary(self, env): + self._flatten_starred_args() + if not any(arg.is_starred for arg in self.args): + return self + # convert into MergedSequenceNode by building partial sequences + args = [] + values = [] + for arg in self.args: + if arg.is_starred: + if values: + args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True)) + values = [] + args.append(arg.target) + else: + values.append(arg) + if values: + args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True)) + node = MergedSequenceNode(self.pos, args, self.type) + if self.mult_factor: + node = binop_node( + self.pos, '*', node, self.mult_factor.coerce_to_pyobject(env), + inplace=True, type=self.type, is_temp=True) + return node + + def _flatten_starred_args(self): + args = [] + for arg in self.args: + if arg.is_starred and arg.target.is_sequence_constructor and not arg.target.mult_factor: + args.extend(arg.target.args) + else: + args.append(arg) + self.args[:] = args + + def may_be_none(self): + return False + + def analyse_target_types(self, env): + if self.mult_factor: + error(self.pos, "can't assign to multiplied sequence") + self.unpacked_items = [] + self.coerced_unpacked_items = [] + self.any_coerced_items = False + for i, arg in enumerate(self.args): + arg = self.args[i] = arg.analyse_target_types(env) + if arg.is_starred: + if not arg.type.assignable_from(list_type): + error(arg.pos, + "starred target must have Python object (list) type") + if arg.type is py_object_type: + arg.type = list_type + unpacked_item = PyTempNode(self.pos, env) + coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env) + if unpacked_item is not coerced_unpacked_item: + self.any_coerced_items = True + self.unpacked_items.append(unpacked_item) + self.coerced_unpacked_items.append(coerced_unpacked_item) + self.type = py_object_type + return self + + def generate_result_code(self, code): + self.generate_operation_code(code) + + def generate_sequence_packing_code(self, code, target=None, plain=False): + if target is None: + target = self.result() + size_factor = c_mult = '' + mult_factor = None + + if self.mult_factor and not plain: + mult_factor = self.mult_factor + if mult_factor.type.is_int: + c_mult = mult_factor.result() + if (isinstance(mult_factor.constant_result, _py_int_types) and + mult_factor.constant_result > 0): + size_factor = ' * %s' % mult_factor.constant_result + elif mult_factor.type.signed: + size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult) + else: + size_factor = ' * (%s)' % (c_mult,) + + if self.type is tuple_type and (self.is_literal or self.slow) and not c_mult: + # use PyTuple_Pack() to avoid generating huge amounts of one-time code + code.putln('%s = PyTuple_Pack(%d, %s); %s' % ( + target, + len(self.args), + ', '.join(arg.py_result() for arg in self.args), + code.error_goto_if_null(target, self.pos))) + code.put_gotref(target, py_object_type) + elif self.type.is_ctuple: + for i, arg in enumerate(self.args): + code.putln("%s.f%s = %s;" % ( + target, i, arg.result())) + else: + # build the tuple/list step by step, potentially multiplying it as we go + if self.type is list_type: + create_func, set_item_func = 'PyList_New', '__Pyx_PyList_SET_ITEM' + elif self.type is tuple_type: + create_func, set_item_func = 'PyTuple_New', '__Pyx_PyTuple_SET_ITEM' + else: + raise InternalError("sequence packing for unexpected type %s" % self.type) + arg_count = len(self.args) + code.putln("%s = %s(%s%s); %s" % ( + target, create_func, arg_count, size_factor, + code.error_goto_if_null(target, self.pos))) + code.put_gotref(target, py_object_type) + + if c_mult: + # FIXME: can't use a temp variable here as the code may + # end up in the constant building function. Temps + # currently don't work there. + + #counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False) + counter = Naming.quick_temp_cname + code.putln('{ Py_ssize_t %s;' % counter) + if arg_count == 1: + offset = counter + else: + offset = '%s * %s' % (counter, arg_count) + code.putln('for (%s=0; %s < %s; %s++) {' % ( + counter, counter, c_mult, counter + )) + else: + offset = '' + + for i in range(arg_count): + arg = self.args[i] + if c_mult or not arg.result_in_temp(): + code.put_incref(arg.result(), arg.ctype()) + arg.generate_giveref(code) + code.putln("if (%s(%s, %s, %s)) %s;" % ( + set_item_func, + target, + (offset and i) and ('%s + %s' % (offset, i)) or (offset or i), + arg.py_result(), + code.error_goto(self.pos))) + + if c_mult: + code.putln('}') + #code.funcstate.release_temp(counter) + code.putln('}') + + if mult_factor is not None and mult_factor.type.is_pyobject: + code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % ( + Naming.quick_temp_cname, target, mult_factor.py_result(), + code.error_goto_if_null(Naming.quick_temp_cname, self.pos) + )) + code.put_gotref(Naming.quick_temp_cname, py_object_type) + code.put_decref(target, py_object_type) + code.putln('%s = %s;' % (target, Naming.quick_temp_cname)) + code.putln('}') + + def generate_subexpr_disposal_code(self, code): + if self.mult_factor and self.mult_factor.type.is_int: + super(SequenceNode, self).generate_subexpr_disposal_code(code) + elif self.type is tuple_type and (self.is_literal or self.slow): + super(SequenceNode, self).generate_subexpr_disposal_code(code) + else: + # We call generate_post_assignment_code here instead + # of generate_disposal_code, because values were stored + # in the tuple using a reference-stealing operation. + for arg in self.args: + arg.generate_post_assignment_code(code) + # Should NOT call free_temps -- this is invoked by the default + # generate_evaluation_code which will do that. + if self.mult_factor: + self.mult_factor.generate_disposal_code(code) + + def generate_assignment_code(self, rhs, code, overloaded_assignment=False, + exception_check=None, exception_value=None): + if self.starred_assignment: + self.generate_starred_assignment_code(rhs, code) + else: + self.generate_parallel_assignment_code(rhs, code) + + for item in self.unpacked_items: + item.release(code) + rhs.free_temps(code) + + _func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType( + PyrexTypes.py_object_type, [ + PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None), + ])) + + def generate_parallel_assignment_code(self, rhs, code): + # Need to work around the fact that generate_evaluation_code + # allocates the temps in a rather hacky way -- the assignment + # is evaluated twice, within each if-block. + for item in self.unpacked_items: + item.allocate(code) + special_unpack = (rhs.type is py_object_type + or rhs.type in (tuple_type, list_type) + or not rhs.type.is_builtin_type) + long_enough_for_a_loop = len(self.unpacked_items) > 3 + + if special_unpack: + self.generate_special_parallel_unpacking_code( + code, rhs, use_loop=long_enough_for_a_loop) + else: + code.putln("{") + self.generate_generic_parallel_unpacking_code( + code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop) + code.putln("}") + + for value_node in self.coerced_unpacked_items: + value_node.generate_evaluation_code(code) + for i in range(len(self.args)): + self.args[i].generate_assignment_code( + self.coerced_unpacked_items[i], code) + + def generate_special_parallel_unpacking_code(self, code, rhs, use_loop): + sequence_type_test = '1' + none_check = "likely(%s != Py_None)" % rhs.py_result() + if rhs.type is list_type: + sequence_types = ['List'] + if rhs.may_be_none(): + sequence_type_test = none_check + elif rhs.type is tuple_type: + sequence_types = ['Tuple'] + if rhs.may_be_none(): + sequence_type_test = none_check + else: + sequence_types = ['Tuple', 'List'] + tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result() + list_check = 'PyList_CheckExact(%s)' % rhs.py_result() + sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check) + + code.putln("if (%s) {" % sequence_type_test) + code.putln("PyObject* sequence = %s;" % rhs.py_result()) + + # list/tuple => check size + code.putln("Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);") + code.putln("if (unlikely(size != %d)) {" % len(self.args)) + code.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")) + code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % ( + len(self.args), len(self.args))) + code.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")) + code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);") + # < 0 => exception + code.putln(code.error_goto(self.pos)) + code.putln("}") + + code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS") + # unpack items from list/tuple in unrolled loop (can't fail) + if len(sequence_types) == 2: + code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0]) + for i, item in enumerate(self.unpacked_items): + code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % ( + item.result(), sequence_types[0], i)) + if len(sequence_types) == 2: + code.putln("} else {") + for i, item in enumerate(self.unpacked_items): + code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % ( + item.result(), sequence_types[1], i)) + code.putln("}") + for item in self.unpacked_items: + code.put_incref(item.result(), item.ctype()) + + code.putln("#else") + # in non-CPython, use the PySequence protocol (which can fail) + if not use_loop: + for i, item in enumerate(self.unpacked_items): + code.putln("%s = PySequence_ITEM(sequence, %d); %s" % ( + item.result(), i, + code.error_goto_if_null(item.result(), self.pos))) + code.put_gotref(item.result(), item.type) + else: + code.putln("{") + code.putln("Py_ssize_t i;") + code.putln("PyObject** temps[%s] = {%s};" % ( + len(self.unpacked_items), + ','.join(['&%s' % item.result() for item in self.unpacked_items]))) + code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items)) + code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % ( + code.error_goto_if_null('item', self.pos))) + code.put_gotref('item', py_object_type) + code.putln("*(temps[i]) = item;") + code.putln("}") + code.putln("}") + + code.putln("#endif") + rhs.generate_disposal_code(code) + + if sequence_type_test == '1': + code.putln("}") # all done + elif sequence_type_test == none_check: + # either tuple/list or None => save some code by generating the error directly + code.putln("} else {") + code.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c")) + code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos)) + code.putln("}") # all done + else: + code.putln("} else {") # needs iteration fallback code + self.generate_generic_parallel_unpacking_code( + code, rhs, self.unpacked_items, use_loop=use_loop) + code.putln("}") + + def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True): + code.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")) + code.globalstate.use_utility_code( + UtilityCode.load_cached("IterFinish", "ObjectHandling.c")) + code.putln("Py_ssize_t index = -1;") # must be at the start of a C block! + + if use_loop: + code.putln("PyObject** temps[%s] = {%s};" % ( + len(self.unpacked_items), + ','.join(['&%s' % item.result() for item in unpacked_items]))) + + iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + code.putln( + "%s = PyObject_GetIter(%s); %s" % ( + iterator_temp, + rhs.py_result(), + code.error_goto_if_null(iterator_temp, self.pos))) + code.put_gotref(iterator_temp, py_object_type) + rhs.generate_disposal_code(code) + + iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False) + code.putln("%s = __Pyx_PyObject_GetIterNextFunc(%s);" % ( + iternext_func, iterator_temp)) + + unpacking_error_label = code.new_label('unpacking_failed') + unpack_code = "%s(%s)" % (iternext_func, iterator_temp) + if use_loop: + code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items)) + code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code) + code.put_goto(unpacking_error_label) + code.put_gotref("item", py_object_type) + code.putln("*(temps[index]) = item;") + code.putln("}") + else: + for i, item in enumerate(unpacked_items): + code.put( + "index = %d; %s = %s; if (unlikely(!%s)) " % ( + i, + item.result(), + unpack_code, + item.result())) + code.put_goto(unpacking_error_label) + item.generate_gotref(code) + + if terminate: + code.globalstate.use_utility_code( + UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c")) + code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % ( + unpack_code, + len(unpacked_items))) + code.putln("%s = NULL;" % iternext_func) + code.put_decref_clear(iterator_temp, py_object_type) + + unpacking_done_label = code.new_label('unpacking_done') + code.put_goto(unpacking_done_label) + + code.put_label(unpacking_error_label) + code.put_decref_clear(iterator_temp, py_object_type) + code.putln("%s = NULL;" % iternext_func) + code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);") + code.putln(code.error_goto(self.pos)) + code.put_label(unpacking_done_label) + + code.funcstate.release_temp(iternext_func) + if terminate: + code.funcstate.release_temp(iterator_temp) + iterator_temp = None + + return iterator_temp + + def generate_starred_assignment_code(self, rhs, code): + for i, arg in enumerate(self.args): + if arg.is_starred: + starred_target = self.unpacked_items[i] + unpacked_fixed_items_left = self.unpacked_items[:i] + unpacked_fixed_items_right = self.unpacked_items[i+1:] + break + else: + assert False + + iterator_temp = None + if unpacked_fixed_items_left: + for item in unpacked_fixed_items_left: + item.allocate(code) + code.putln('{') + iterator_temp = self.generate_generic_parallel_unpacking_code( + code, rhs, unpacked_fixed_items_left, + use_loop=True, terminate=False) + for i, item in enumerate(unpacked_fixed_items_left): + value_node = self.coerced_unpacked_items[i] + value_node.generate_evaluation_code(code) + code.putln('}') + + starred_target.allocate(code) + target_list = starred_target.result() + code.putln("%s = %s(%s); %s" % ( + target_list, + "__Pyx_PySequence_ListKeepNew" if ( + not iterator_temp and rhs.is_temp and rhs.type in (py_object_type, list_type)) + else "PySequence_List", + iterator_temp or rhs.py_result(), + code.error_goto_if_null(target_list, self.pos))) + starred_target.generate_gotref(code) + + if iterator_temp: + code.put_decref_clear(iterator_temp, py_object_type) + code.funcstate.release_temp(iterator_temp) + else: + rhs.generate_disposal_code(code) + + if unpacked_fixed_items_right: + code.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")) + length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False) + code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list)) + code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right))) + code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % ( + len(unpacked_fixed_items_left), length_temp, + code.error_goto(self.pos))) + code.putln('}') + + for item in unpacked_fixed_items_right[::-1]: + item.allocate(code) + for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1], + self.coerced_unpacked_items[::-1])): + code.putln('#if CYTHON_COMPILING_IN_CPYTHON') + code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % ( + item.py_result(), target_list, length_temp, i+1)) + # resize the list the hard way + code.putln("((PyVarObject*)%s)->ob_size--;" % target_list) + code.putln('#else') + code.putln("%s = PySequence_ITEM(%s, %s-%d); " % ( + item.py_result(), target_list, length_temp, i+1)) + code.putln('#endif') + item.generate_gotref(code) + coerced_arg.generate_evaluation_code(code) + + code.putln('#if !CYTHON_COMPILING_IN_CPYTHON') + sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True) + code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % ( + sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right), + code.error_goto_if_null(sublist_temp, self.pos))) + code.put_gotref(sublist_temp, py_object_type) + code.funcstate.release_temp(length_temp) + code.put_decref(target_list, py_object_type) + code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp)) + code.putln('#else') + code.putln('CYTHON_UNUSED_VAR(%s);' % sublist_temp) + code.funcstate.release_temp(sublist_temp) + code.putln('#endif') + + for i, arg in enumerate(self.args): + arg.generate_assignment_code(self.coerced_unpacked_items[i], code) + + def annotate(self, code): + for arg in self.args: + arg.annotate(code) + if self.unpacked_items: + for arg in self.unpacked_items: + arg.annotate(code) + for arg in self.coerced_unpacked_items: + arg.annotate(code) + + +class TupleNode(SequenceNode): + # Tuple constructor. + + type = tuple_type + is_partly_literal = False + + gil_message = "Constructing Python tuple" + + def infer_type(self, env): + if self.mult_factor or not self.args: + return tuple_type + arg_types = [arg.infer_type(env) for arg in self.args] + if any(type.is_pyobject or type.is_memoryviewslice or type.is_unspecified or type.is_fused + for type in arg_types): + return tuple_type + return env.declare_tuple_type(self.pos, arg_types).type + + def analyse_types(self, env, skip_children=False): + # reset before re-analysing + if self.is_literal: + self.is_literal = False + if self.is_partly_literal: + self.is_partly_literal = False + + if len(self.args) == 0: + self.is_temp = False + self.is_literal = True + return self + + if not skip_children: + for i, arg in enumerate(self.args): + if arg.is_starred: + arg.starred_expr_allowed_here = True + self.args[i] = arg.analyse_types(env) + if (not self.mult_factor and + not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_memoryviewslice or arg.type.is_fused) + for arg in self.args)): + self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type + self.is_temp = 1 + return self + + node = SequenceNode.analyse_types(self, env, skip_children=True) + node = node._create_merge_node_if_necessary(env) + if not node.is_sequence_constructor: + return node + + if not all(child.is_literal for child in node.args): + return node + if not node.mult_factor or ( + node.mult_factor.is_literal and + isinstance(node.mult_factor.constant_result, _py_int_types)): + node.is_temp = False + node.is_literal = True + else: + if not node.mult_factor.type.is_pyobject and not node.mult_factor.type.is_int: + node.mult_factor = node.mult_factor.coerce_to_pyobject(env) + node.is_temp = True + node.is_partly_literal = True + return node + + def analyse_as_type(self, env): + # ctuple type + if not self.args: + return None + item_types = [arg.analyse_as_type(env) for arg in self.args] + if any(t is None for t in item_types): + return None + entry = env.declare_tuple_type(self.pos, item_types) + return entry.type + + def coerce_to(self, dst_type, env): + if self.type.is_ctuple: + if dst_type.is_ctuple and self.type.size == dst_type.size: + return self.coerce_to_ctuple(dst_type, env) + elif dst_type is tuple_type or dst_type is py_object_type: + coerced_args = [arg.coerce_to_pyobject(env) for arg in self.args] + return TupleNode( + self.pos, + args=coerced_args, + type=tuple_type, + mult_factor=self.mult_factor, + is_temp=1, + ).analyse_types(env, skip_children=True) + else: + return self.coerce_to_pyobject(env).coerce_to(dst_type, env) + elif dst_type.is_ctuple and not self.mult_factor: + return self.coerce_to_ctuple(dst_type, env) + else: + return SequenceNode.coerce_to(self, dst_type, env) + + def as_list(self): + t = ListNode(self.pos, args=self.args, mult_factor=self.mult_factor) + if isinstance(self.constant_result, tuple): + t.constant_result = list(self.constant_result) + return t + + def is_simple(self): + # either temp or constant => always simple + return True + + def nonlocally_immutable(self): + # either temp or constant => always safe + return True + + def calculate_result_code(self): + if len(self.args) > 0: + return self.result_code + else: + return Naming.empty_tuple + + def calculate_constant_result(self): + self.constant_result = tuple([ + arg.constant_result for arg in self.args]) + + def compile_time_value(self, denv): + values = self.compile_time_value_list(denv) + try: + return tuple(values) + except Exception as e: + self.compile_time_value_error(e) + + def generate_operation_code(self, code): + if len(self.args) == 0: + # result_code is Naming.empty_tuple + return + + if self.is_literal or self.is_partly_literal: + # The "mult_factor" is part of the deduplication if it is also constant, i.e. when + # we deduplicate the multiplied result. Otherwise, only deduplicate the constant part. + dedup_key = make_dedup_key(self.type, [self.mult_factor if self.is_literal else None] + self.args) + tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2, dedup_key=dedup_key) + const_code = code.get_cached_constants_writer(tuple_target) + if const_code is not None: + # constant is not yet initialised + const_code.mark_pos(self.pos) + self.generate_sequence_packing_code(const_code, tuple_target, plain=not self.is_literal) + const_code.put_giveref(tuple_target, py_object_type) + if self.is_literal: + self.result_code = tuple_target + elif self.mult_factor.type.is_int: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PySequenceMultiply", "ObjectHandling.c")) + code.putln('%s = __Pyx_PySequence_Multiply(%s, %s); %s' % ( + self.result(), tuple_target, self.mult_factor.result(), + code.error_goto_if_null(self.result(), self.pos) + )) + self.generate_gotref(code) + else: + code.putln('%s = PyNumber_Multiply(%s, %s); %s' % ( + self.result(), tuple_target, self.mult_factor.py_result(), + code.error_goto_if_null(self.result(), self.pos) + )) + self.generate_gotref(code) + else: + self.type.entry.used = True + self.generate_sequence_packing_code(code) + + +class ListNode(SequenceNode): + # List constructor. + + # obj_conversion_errors [PyrexError] used internally + # orignial_args [ExprNode] used internally + + obj_conversion_errors = [] + type = list_type + in_module_scope = False + + gil_message = "Constructing Python list" + + def type_dependencies(self, env): + return () + + def infer_type(self, env): + # TODO: Infer non-object list arrays. + return list_type + + def analyse_expressions(self, env): + for arg in self.args: + if arg.is_starred: + arg.starred_expr_allowed_here = True + node = SequenceNode.analyse_expressions(self, env) + return node.coerce_to_pyobject(env) + + def analyse_types(self, env): + with local_errors(ignore=True) as errors: + self.original_args = list(self.args) + node = SequenceNode.analyse_types(self, env) + node.obj_conversion_errors = errors + if env.is_module_scope: + self.in_module_scope = True + node = node._create_merge_node_if_necessary(env) + return node + + def coerce_to(self, dst_type, env): + if dst_type.is_pyobject: + for err in self.obj_conversion_errors: + report_error(err) + self.obj_conversion_errors = [] + if not self.type.subtype_of(dst_type): + error(self.pos, "Cannot coerce list to type '%s'" % dst_type) + elif (dst_type.is_array or dst_type.is_ptr) and dst_type.base_type is not PyrexTypes.c_void_type: + array_length = len(self.args) + if self.mult_factor: + if isinstance(self.mult_factor.constant_result, _py_int_types): + if self.mult_factor.constant_result <= 0: + error(self.pos, "Cannot coerce non-positively multiplied list to '%s'" % dst_type) + else: + array_length *= self.mult_factor.constant_result + else: + error(self.pos, "Cannot coerce dynamically multiplied list to '%s'" % dst_type) + base_type = dst_type.base_type + self.type = PyrexTypes.CArrayType(base_type, array_length) + for i in range(len(self.original_args)): + arg = self.args[i] + if isinstance(arg, CoerceToPyTypeNode): + arg = arg.arg + self.args[i] = arg.coerce_to(base_type, env) + elif dst_type.is_cpp_class: + # TODO(robertwb): Avoid object conversion for vector/list/set. + return TypecastNode(self.pos, operand=self, type=PyrexTypes.py_object_type).coerce_to(dst_type, env) + elif self.mult_factor: + error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type) + elif dst_type.is_struct: + if len(self.args) > len(dst_type.scope.var_entries): + error(self.pos, "Too many members for '%s'" % dst_type) + else: + if len(self.args) < len(dst_type.scope.var_entries): + warning(self.pos, "Too few members for '%s'" % dst_type, 1) + for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)): + if isinstance(arg, CoerceToPyTypeNode): + arg = arg.arg + self.args[i] = arg.coerce_to(member.type, env) + self.type = dst_type + elif dst_type.is_ctuple: + return self.coerce_to_ctuple(dst_type, env) + else: + self.type = error_type + error(self.pos, "Cannot coerce list to type '%s'" % dst_type) + return self + + def as_list(self): # dummy for compatibility with TupleNode + return self + + def as_tuple(self): + t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor) + if isinstance(self.constant_result, list): + t.constant_result = tuple(self.constant_result) + return t + + def allocate_temp_result(self, code): + if self.type.is_array: + if self.in_module_scope: + self.temp_code = code.funcstate.allocate_temp( + self.type, manage_ref=False, static=True, reusable=False) + else: + # To be valid C++, we must allocate the memory on the stack + # manually and be sure not to reuse it for something else. + # Yes, this means that we leak a temp array variable. + self.temp_code = code.funcstate.allocate_temp( + self.type, manage_ref=False, reusable=False) + else: + SequenceNode.allocate_temp_result(self, code) + + def calculate_constant_result(self): + if self.mult_factor: + raise ValueError() # may exceed the compile time memory + self.constant_result = [ + arg.constant_result for arg in self.args] + + def compile_time_value(self, denv): + l = self.compile_time_value_list(denv) + if self.mult_factor: + l *= self.mult_factor.compile_time_value(denv) + return l + + def generate_operation_code(self, code): + if self.type.is_pyobject: + for err in self.obj_conversion_errors: + report_error(err) + self.generate_sequence_packing_code(code) + elif self.type.is_array: + if self.mult_factor: + code.putln("{") + code.putln("Py_ssize_t %s;" % Naming.quick_temp_cname) + code.putln("for ({i} = 0; {i} < {count}; {i}++) {{".format( + i=Naming.quick_temp_cname, count=self.mult_factor.result())) + offset = '+ (%d * %s)' % (len(self.args), Naming.quick_temp_cname) + else: + offset = '' + for i, arg in enumerate(self.args): + if arg.type.is_array: + code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c")) + code.putln("memcpy(&(%s[%s%s]), %s, sizeof(%s[0]));" % ( + self.result(), i, offset, + arg.result(), self.result() + )) + else: + code.putln("%s[%s%s] = %s;" % ( + self.result(), + i, + offset, + arg.result())) + if self.mult_factor: + code.putln("}") + code.putln("}") + elif self.type.is_struct: + for arg, member in zip(self.args, self.type.scope.var_entries): + code.putln("%s.%s = %s;" % ( + self.result(), + member.cname, + arg.result())) + else: + raise InternalError("List type never specified") + + +class ComprehensionNode(ScopedExprNode): + # A list/set/dict comprehension + + child_attrs = ["loop"] + + is_temp = True + constant_result = not_a_constant + + def infer_type(self, env): + return self.type + + def analyse_declarations(self, env): + self.append.target = self # this is used in the PyList_Append of the inner loop + self.init_scope(env) + # setup loop scope + if isinstance(self.loop, Nodes._ForInStatNode): + assert isinstance(self.loop.iterator, ScopedExprNode), self.loop.iterator + self.loop.iterator.init_scope(None, env) + else: + assert isinstance(self.loop, Nodes.ForFromStatNode), self.loop + + def analyse_scoped_declarations(self, env): + self.loop.analyse_declarations(env) + + def analyse_types(self, env): + if not self.has_local_scope: + self.loop = self.loop.analyse_expressions(env) + return self + + def analyse_scoped_expressions(self, env): + if self.has_local_scope: + self.loop = self.loop.analyse_expressions(env) + return self + + def may_be_none(self): + return False + + def generate_result_code(self, code): + self.generate_operation_code(code) + + def generate_operation_code(self, code): + if self.type is Builtin.list_type: + create_code = 'PyList_New(0)' + elif self.type is Builtin.set_type: + create_code = 'PySet_New(NULL)' + elif self.type is Builtin.dict_type: + create_code = 'PyDict_New()' + else: + raise InternalError("illegal type for comprehension: %s" % self.type) + code.putln('%s = %s; %s' % ( + self.result(), create_code, + code.error_goto_if_null(self.result(), self.pos))) + + self.generate_gotref(code) + self.loop.generate_execution_code(code) + + def annotate(self, code): + self.loop.annotate(code) + + +class ComprehensionAppendNode(Node): + # Need to be careful to avoid infinite recursion: + # target must not be in child_attrs/subexprs + + child_attrs = ['expr'] + target = None + + type = PyrexTypes.c_int_type + + def analyse_expressions(self, env): + self.expr = self.expr.analyse_expressions(env) + if not self.expr.type.is_pyobject: + self.expr = self.expr.coerce_to_pyobject(env) + return self + + def generate_execution_code(self, code): + if self.target.type is list_type: + code.globalstate.use_utility_code( + UtilityCode.load_cached("ListCompAppend", "Optimize.c")) + function = "__Pyx_ListComp_Append" + elif self.target.type is set_type: + function = "PySet_Add" + else: + raise InternalError( + "Invalid type for comprehension node: %s" % self.target.type) + + self.expr.generate_evaluation_code(code) + code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % ( + function, + self.target.result(), + self.expr.result() + ), self.pos)) + self.expr.generate_disposal_code(code) + self.expr.free_temps(code) + + def generate_function_definitions(self, env, code): + self.expr.generate_function_definitions(env, code) + + def annotate(self, code): + self.expr.annotate(code) + +class DictComprehensionAppendNode(ComprehensionAppendNode): + child_attrs = ['key_expr', 'value_expr'] + + def analyse_expressions(self, env): + self.key_expr = self.key_expr.analyse_expressions(env) + if not self.key_expr.type.is_pyobject: + self.key_expr = self.key_expr.coerce_to_pyobject(env) + self.value_expr = self.value_expr.analyse_expressions(env) + if not self.value_expr.type.is_pyobject: + self.value_expr = self.value_expr.coerce_to_pyobject(env) + return self + + def generate_execution_code(self, code): + self.key_expr.generate_evaluation_code(code) + self.value_expr.generate_evaluation_code(code) + code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % ( + self.target.result(), + self.key_expr.result(), + self.value_expr.result() + ), self.pos)) + self.key_expr.generate_disposal_code(code) + self.key_expr.free_temps(code) + self.value_expr.generate_disposal_code(code) + self.value_expr.free_temps(code) + + def generate_function_definitions(self, env, code): + self.key_expr.generate_function_definitions(env, code) + self.value_expr.generate_function_definitions(env, code) + + def annotate(self, code): + self.key_expr.annotate(code) + self.value_expr.annotate(code) + + +class InlinedGeneratorExpressionNode(ExprNode): + # An inlined generator expression for which the result is calculated + # inside of the loop and returned as a single, first and only Generator + # return value. + # This will only be created by transforms when replacing safe builtin + # calls on generator expressions. + # + # gen GeneratorExpressionNode the generator, not containing any YieldExprNodes + # orig_func String the name of the builtin function this node replaces + # target ExprNode or None a 'target' for a ComprehensionAppend node + + subexprs = ["gen"] + orig_func = None + target = None + is_temp = True + type = py_object_type + + def __init__(self, pos, gen, comprehension_type=None, **kwargs): + gbody = gen.def_node.gbody + gbody.is_inlined = True + if comprehension_type is not None: + assert comprehension_type in (list_type, set_type, dict_type), comprehension_type + gbody.inlined_comprehension_type = comprehension_type + kwargs.update( + target=RawCNameExprNode(pos, comprehension_type, Naming.retval_cname), + type=comprehension_type, + ) + super(InlinedGeneratorExpressionNode, self).__init__(pos, gen=gen, **kwargs) + + def may_be_none(self): + return self.orig_func not in ('any', 'all', 'sorted') + + def infer_type(self, env): + return self.type + + def analyse_types(self, env): + self.gen = self.gen.analyse_expressions(env) + return self + + def generate_result_code(self, code): + code.putln("%s = __Pyx_Generator_Next(%s); %s" % ( + self.result(), self.gen.result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class MergedSequenceNode(ExprNode): + """ + Merge a sequence of iterables into a set/list/tuple. + + The target collection is determined by self.type, which must be set externally. + + args [ExprNode] + """ + subexprs = ['args'] + is_temp = True + gil_message = "Constructing Python collection" + + def __init__(self, pos, args, type): + if type in (list_type, tuple_type) and args and args[0].is_sequence_constructor: + # construct a list directly from the first argument that we can then extend + if args[0].type is not list_type: + args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True, mult_factor=args[0].mult_factor) + ExprNode.__init__(self, pos, args=args, type=type) + + def calculate_constant_result(self): + result = [] + for item in self.args: + if item.is_sequence_constructor and item.mult_factor: + if item.mult_factor.constant_result <= 0: + continue + # otherwise, adding each item once should be enough + if item.is_set_literal or item.is_sequence_constructor: + # process items in order + items = (arg.constant_result for arg in item.args) + else: + items = item.constant_result + result.extend(items) + if self.type is set_type: + result = set(result) + elif self.type is tuple_type: + result = tuple(result) + else: + assert self.type is list_type + self.constant_result = result + + def compile_time_value(self, denv): + result = [] + for item in self.args: + if item.is_sequence_constructor and item.mult_factor: + if item.mult_factor.compile_time_value(denv) <= 0: + continue + if item.is_set_literal or item.is_sequence_constructor: + # process items in order + items = (arg.compile_time_value(denv) for arg in item.args) + else: + items = item.compile_time_value(denv) + result.extend(items) + if self.type is set_type: + try: + result = set(result) + except Exception as e: + self.compile_time_value_error(e) + elif self.type is tuple_type: + result = tuple(result) + else: + assert self.type is list_type + return result + + def type_dependencies(self, env): + return () + + def infer_type(self, env): + return self.type + + def analyse_types(self, env): + args = [ + arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node( + # FIXME: CPython's error message starts with the runtime function name + 'argument after * must be an iterable, not NoneType') + for arg in self.args + ] + + if len(args) == 1 and args[0].type is self.type: + # strip this intermediate node and use the bare collection + return args[0] + + assert self.type in (set_type, list_type, tuple_type) + + self.args = args + return self + + def may_be_none(self): + return False + + def generate_evaluation_code(self, code): + code.mark_pos(self.pos) + self.allocate_temp_result(code) + + is_set = self.type is set_type + + args = iter(self.args) + item = next(args) + item.generate_evaluation_code(code) + if (is_set and item.is_set_literal or + not is_set and item.is_sequence_constructor and item.type is list_type): + code.putln("%s = %s;" % (self.result(), item.py_result())) + item.generate_post_assignment_code(code) + else: + code.putln("%s = %s(%s); %s" % ( + self.result(), + 'PySet_New' if is_set + else "__Pyx_PySequence_ListKeepNew" if item.is_temp and item.type in (py_object_type, list_type) + else "PySequence_List", + item.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + item.generate_disposal_code(code) + item.free_temps(code) + + helpers = set() + if is_set: + add_func = "PySet_Add" + extend_func = "__Pyx_PySet_Update" + else: + add_func = "__Pyx_ListComp_Append" + extend_func = "__Pyx_PyList_Extend" + + for item in args: + if (is_set and (item.is_set_literal or item.is_sequence_constructor) or + (item.is_sequence_constructor and not item.mult_factor)): + if not is_set and item.args: + helpers.add(("ListCompAppend", "Optimize.c")) + for arg in item.args: + arg.generate_evaluation_code(code) + code.put_error_if_neg(arg.pos, "%s(%s, %s)" % ( + add_func, + self.result(), + arg.py_result())) + arg.generate_disposal_code(code) + arg.free_temps(code) + continue + + if is_set: + helpers.add(("PySet_Update", "Builtins.c")) + else: + helpers.add(("ListExtend", "Optimize.c")) + + item.generate_evaluation_code(code) + code.put_error_if_neg(item.pos, "%s(%s, %s)" % ( + extend_func, + self.result(), + item.py_result())) + item.generate_disposal_code(code) + item.free_temps(code) + + if self.type is tuple_type: + code.putln("{") + code.putln("PyObject *%s = PyList_AsTuple(%s);" % ( + Naming.quick_temp_cname, + self.result())) + code.put_decref(self.result(), py_object_type) + code.putln("%s = %s; %s" % ( + self.result(), + Naming.quick_temp_cname, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + code.putln("}") + + for helper in sorted(helpers): + code.globalstate.use_utility_code(UtilityCode.load_cached(*helper)) + + def annotate(self, code): + for item in self.args: + item.annotate(code) + + +class SetNode(ExprNode): + """ + Set constructor. + """ + subexprs = ['args'] + type = set_type + is_set_literal = True + gil_message = "Constructing Python set" + + def analyse_types(self, env): + for i in range(len(self.args)): + arg = self.args[i] + arg = arg.analyse_types(env) + self.args[i] = arg.coerce_to_pyobject(env) + self.type = set_type + self.is_temp = 1 + return self + + def may_be_none(self): + return False + + def calculate_constant_result(self): + self.constant_result = {arg.constant_result for arg in self.args} + + def compile_time_value(self, denv): + values = [arg.compile_time_value(denv) for arg in self.args] + try: + return set(values) + except Exception as e: + self.compile_time_value_error(e) + + def generate_evaluation_code(self, code): + for arg in self.args: + arg.generate_evaluation_code(code) + self.allocate_temp_result(code) + code.putln( + "%s = PySet_New(0); %s" % ( + self.result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + for arg in self.args: + code.put_error_if_neg( + self.pos, + "PySet_Add(%s, %s)" % (self.result(), arg.py_result())) + arg.generate_disposal_code(code) + arg.free_temps(code) + + +class DictNode(ExprNode): + # Dictionary constructor. + # + # key_value_pairs [DictItemNode] + # exclude_null_values [boolean] Do not add NULL values to dict + # + # obj_conversion_errors [PyrexError] used internally + + subexprs = ['key_value_pairs'] + is_temp = 1 + exclude_null_values = False + type = dict_type + is_dict_literal = True + reject_duplicates = False + + obj_conversion_errors = [] + + @classmethod + def from_pairs(cls, pos, pairs): + return cls(pos, key_value_pairs=[ + DictItemNode(pos, key=k, value=v) for k, v in pairs]) + + def calculate_constant_result(self): + self.constant_result = dict([ + item.constant_result for item in self.key_value_pairs]) + + def compile_time_value(self, denv): + pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv)) + for item in self.key_value_pairs] + try: + return dict(pairs) + except Exception as e: + self.compile_time_value_error(e) + + def type_dependencies(self, env): + return () + + def infer_type(self, env): + # TODO: Infer struct constructors. + return dict_type + + def analyse_types(self, env): + with local_errors(ignore=True) as errors: + self.key_value_pairs = [ + item.analyse_types(env) + for item in self.key_value_pairs + ] + self.obj_conversion_errors = errors + return self + + def may_be_none(self): + return False + + def coerce_to(self, dst_type, env): + if dst_type.is_pyobject: + self.release_errors() + if self.type.is_struct_or_union: + if not dict_type.subtype_of(dst_type): + error(self.pos, "Cannot interpret struct as non-dict type '%s'" % dst_type) + return DictNode(self.pos, key_value_pairs=[ + DictItemNode(item.pos, key=item.key.coerce_to_pyobject(env), + value=item.value.coerce_to_pyobject(env)) + for item in self.key_value_pairs]) + if not self.type.subtype_of(dst_type): + error(self.pos, "Cannot interpret dict as type '%s'" % dst_type) + elif dst_type.is_struct_or_union: + self.type = dst_type + if not dst_type.is_struct and len(self.key_value_pairs) != 1: + error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type) + elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries): + warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1) + for item in self.key_value_pairs: + if isinstance(item.key, CoerceToPyTypeNode): + item.key = item.key.arg + if not item.key.is_string_literal: + error(item.key.pos, "Invalid struct field identifier") + item.key = StringNode(item.key.pos, value="") + else: + key = str(item.key.value) # converts string literals to unicode in Py3 + member = dst_type.scope.lookup_here(key) + if not member: + error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key)) + else: + value = item.value + if isinstance(value, CoerceToPyTypeNode): + value = value.arg + item.value = value.coerce_to(member.type, env) + else: + return super(DictNode, self).coerce_to(dst_type, env) + return self + + def release_errors(self): + for err in self.obj_conversion_errors: + report_error(err) + self.obj_conversion_errors = [] + + gil_message = "Constructing Python dict" + + def generate_evaluation_code(self, code): + # Custom method used here because key-value + # pairs are evaluated and used one at a time. + code.mark_pos(self.pos) + self.allocate_temp_result(code) + + is_dict = self.type.is_pyobject + if is_dict: + self.release_errors() + code.putln( + "%s = __Pyx_PyDict_NewPresized(%d); %s" % ( + self.result(), + len(self.key_value_pairs), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + keys_seen = set() + key_type = None + needs_error_helper = False + + for item in self.key_value_pairs: + item.generate_evaluation_code(code) + if is_dict: + if self.exclude_null_values: + code.putln('if (%s) {' % item.value.py_result()) + key = item.key + if self.reject_duplicates: + if keys_seen is not None: + # avoid runtime 'in' checks for literals that we can do at compile time + if not key.is_string_literal: + keys_seen = None + elif key.value in keys_seen: + # FIXME: this could be a compile time error, at least in Cython code + keys_seen = None + elif key_type is not type(key.value): + if key_type is None: + key_type = type(key.value) + keys_seen.add(key.value) + else: + # different types => may not be able to compare at compile time + keys_seen = None + else: + keys_seen.add(key.value) + + if keys_seen is None: + code.putln('if (unlikely(PyDict_Contains(%s, %s))) {' % ( + self.result(), key.py_result())) + # currently only used in function calls + needs_error_helper = True + code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % ( + key.py_result(), + code.error_goto(item.pos))) + code.putln("} else {") + + code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % ( + self.result(), + item.key.py_result(), + item.value.py_result())) + if self.reject_duplicates and keys_seen is None: + code.putln('}') + if self.exclude_null_values: + code.putln('}') + else: + if item.value.type.is_array: + code.putln("memcpy(%s.%s, %s, sizeof(%s));" % ( + self.result(), + item.key.value, + item.value.result(), + item.value.result())) + else: + code.putln("%s.%s = %s;" % ( + self.result(), + item.key.value, + item.value.result())) + item.generate_disposal_code(code) + item.free_temps(code) + + if needs_error_helper: + code.globalstate.use_utility_code( + UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c")) + + def annotate(self, code): + for item in self.key_value_pairs: + item.annotate(code) + + def as_python_dict(self): + # returns a dict with constant keys and Node values + # (only works on DictNodes where the keys are ConstNodes or PyConstNode) + return dict([(key.value, value) for key, value in self.key_value_pairs]) + + +class DictItemNode(ExprNode): + # Represents a single item in a DictNode + # + # key ExprNode + # value ExprNode + subexprs = ['key', 'value'] + + nogil_check = None # Parent DictNode takes care of it + + def calculate_constant_result(self): + self.constant_result = ( + self.key.constant_result, self.value.constant_result) + + def analyse_types(self, env): + self.key = self.key.analyse_types(env) + self.value = self.value.analyse_types(env) + self.key = self.key.coerce_to_pyobject(env) + self.value = self.value.coerce_to_pyobject(env) + return self + + def generate_evaluation_code(self, code): + self.key.generate_evaluation_code(code) + self.value.generate_evaluation_code(code) + + def generate_disposal_code(self, code): + self.key.generate_disposal_code(code) + self.value.generate_disposal_code(code) + + def free_temps(self, code): + self.key.free_temps(code) + self.value.free_temps(code) + + def __iter__(self): + return iter([self.key, self.value]) + + +class SortedDictKeysNode(ExprNode): + # build sorted list of dict keys, e.g. for dir() + subexprs = ['arg'] + + is_temp = True + + def __init__(self, arg): + ExprNode.__init__(self, arg.pos, arg=arg) + self.type = Builtin.list_type + + def analyse_types(self, env): + arg = self.arg.analyse_types(env) + if arg.type is Builtin.dict_type: + arg = arg.as_none_safe_node( + "'NoneType' object is not iterable") + self.arg = arg + return self + + def may_be_none(self): + return False + + def generate_result_code(self, code): + dict_result = self.arg.py_result() + if self.arg.type is Builtin.dict_type: + code.putln('%s = PyDict_Keys(%s); %s' % ( + self.result(), dict_result, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + else: + # originally used PyMapping_Keys() here, but that may return a tuple + code.globalstate.use_utility_code(UtilityCode.load_cached( + 'PyObjectCallMethod0', 'ObjectHandling.c')) + keys_cname = code.intern_identifier(StringEncoding.EncodedString("keys")) + code.putln('%s = __Pyx_PyObject_CallMethod0(%s, %s); %s' % ( + self.result(), dict_result, keys_cname, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + code.putln("if (unlikely(!PyList_Check(%s))) {" % self.result()) + self.generate_decref_set(code, "PySequence_List(%s)" % self.result()) + code.putln(code.error_goto_if_null(self.result(), self.pos)) + self.generate_gotref(code) + code.putln("}") + code.put_error_if_neg( + self.pos, 'PyList_Sort(%s)' % self.py_result()) + + +class ModuleNameMixin(object): + def get_py_mod_name(self, code): + return code.get_py_string_const( + self.module_name, identifier=True) + + def get_py_qualified_name(self, code): + return code.get_py_string_const( + self.qualname, identifier=True) + + +class ClassNode(ExprNode, ModuleNameMixin): + # Helper class used in the implementation of Python + # class definitions. Constructs a class object given + # a name, tuple of bases and class dictionary. + # + # name EncodedString Name of the class + # class_def_node PyClassDefNode PyClassDefNode defining this class + # doc ExprNode or None Doc string + # module_name EncodedString Name of defining module + + subexprs = ['doc'] + type = py_object_type + is_temp = True + + def analyse_annotations(self, env): + pass + + def infer_type(self, env): + # TODO: could return 'type' in some cases + return py_object_type + + def analyse_types(self, env): + if self.doc: + self.doc = self.doc.analyse_types(env) + self.doc = self.doc.coerce_to_pyobject(env) + env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c")) + return self + + def may_be_none(self): + return True + + gil_message = "Constructing Python class" + + def generate_result_code(self, code): + class_def_node = self.class_def_node + cname = code.intern_identifier(self.name) + + if self.doc: + code.put_error_if_neg(self.pos, + 'PyDict_SetItem(%s, %s, %s)' % ( + class_def_node.dict.py_result(), + code.intern_identifier( + StringEncoding.EncodedString("__doc__")), + self.doc.py_result())) + py_mod_name = self.get_py_mod_name(code) + qualname = self.get_py_qualified_name(code) + code.putln( + '%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % ( + self.result(), + class_def_node.bases.py_result(), + class_def_node.dict.py_result(), + cname, + qualname, + py_mod_name, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class Py3ClassNode(ExprNode): + # Helper class used in the implementation of Python3+ + # class definitions. Constructs a class object given + # a name, tuple of bases and class dictionary. + # + # name EncodedString Name of the class + # module_name EncodedString Name of defining module + # class_def_node PyClassDefNode PyClassDefNode defining this class + # calculate_metaclass bool should call CalculateMetaclass() + # allow_py2_metaclass bool should look for Py2 metaclass + # force_type bool always create a "new style" class, even with no bases + + subexprs = [] + type = py_object_type + force_type = False + is_temp = True + + def infer_type(self, env): + # TODO: could return 'type' in some cases + return py_object_type + + def analyse_types(self, env): + return self + + def may_be_none(self): + return True + + gil_message = "Constructing Python class" + + def analyse_annotations(self, env): + from .AutoDocTransforms import AnnotationWriter + position = self.class_def_node.pos + dict_items = [ + DictItemNode( + entry.pos, + key=IdentifierStringNode(entry.pos, value=entry.name), + value=entry.annotation.string + ) + for entry in env.entries.values() if entry.annotation + ] + # Annotations dict shouldn't exist for classes which don't declare any. + if dict_items: + annotations_dict = DictNode(position, key_value_pairs=dict_items) + lhs = NameNode(position, name=StringEncoding.EncodedString(u"__annotations__")) + lhs.entry = env.lookup_here(lhs.name) or env.declare_var(lhs.name, dict_type, position) + node = SingleAssignmentNode(position, lhs=lhs, rhs=annotations_dict) + node.analyse_declarations(env) + self.class_def_node.body.stats.insert(0, node) + + def generate_result_code(self, code): + code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c")) + cname = code.intern_identifier(self.name) + class_def_node = self.class_def_node + mkw = class_def_node.mkw.py_result() if class_def_node.mkw else 'NULL' + if class_def_node.metaclass: + metaclass = class_def_node.metaclass.py_result() + elif self.force_type: + metaclass = "((PyObject*)&PyType_Type)" + else: + metaclass = "((PyObject*)&__Pyx_DefaultClassType)" + code.putln( + '%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % ( + self.result(), + metaclass, + cname, + class_def_node.bases.py_result(), + class_def_node.dict.py_result(), + mkw, + self.calculate_metaclass, + self.allow_py2_metaclass, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class PyClassMetaclassNode(ExprNode): + # Helper class holds Python3 metaclass object + # + # class_def_node PyClassDefNode PyClassDefNode defining this class + + subexprs = [] + + def analyse_types(self, env): + self.type = py_object_type + self.is_temp = True + return self + + def may_be_none(self): + return True + + def generate_result_code(self, code): + bases = self.class_def_node.bases + mkw = self.class_def_node.mkw + if mkw: + code.globalstate.use_utility_code( + UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c")) + call = "__Pyx_Py3MetaclassGet(%s, %s)" % ( + bases.result(), + mkw.result()) + else: + code.globalstate.use_utility_code( + UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c")) + call = "__Pyx_CalculateMetaclass(NULL, %s)" % ( + bases.result()) + code.putln( + "%s = %s; %s" % ( + self.result(), call, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class PyClassNamespaceNode(ExprNode, ModuleNameMixin): + # Helper class holds Python3 namespace object + # + # All this are not owned by this node + # class_def_node PyClassDefNode PyClassDefNode defining this class + # doc ExprNode or None Doc string (owned) + + subexprs = ['doc'] + + def analyse_types(self, env): + if self.doc: + self.doc = self.doc.analyse_types(env).coerce_to_pyobject(env) + self.type = py_object_type + self.is_temp = 1 + return self + + def may_be_none(self): + return True + + def generate_result_code(self, code): + cname = code.intern_identifier(self.name) + py_mod_name = self.get_py_mod_name(code) + qualname = self.get_py_qualified_name(code) + class_def_node = self.class_def_node + null = "(PyObject *) NULL" + doc_code = self.doc.result() if self.doc else null + mkw = class_def_node.mkw.py_result() if class_def_node.mkw else null + metaclass = class_def_node.metaclass.py_result() if class_def_node.metaclass else null + code.putln( + "%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % ( + self.result(), + metaclass, + class_def_node.bases.result(), + cname, + qualname, + mkw, + py_mod_name, + doc_code, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class ClassCellInjectorNode(ExprNode): + # Initialize CyFunction.func_classobj + is_temp = True + type = py_object_type + subexprs = [] + is_active = False + + def analyse_expressions(self, env): + return self + + def generate_result_code(self, code): + assert self.is_active + code.putln( + '%s = PyList_New(0); %s' % ( + self.result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + def generate_injection_code(self, code, classobj_cname): + assert self.is_active + code.globalstate.use_utility_code( + UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c")) + code.put_error_if_neg(self.pos, '__Pyx_CyFunction_InitClassCell(%s, %s)' % ( + self.result(), classobj_cname)) + + +class ClassCellNode(ExprNode): + # Class Cell for noargs super() + subexprs = [] + is_temp = True + is_generator = False + type = py_object_type + + def analyse_types(self, env): + return self + + def generate_result_code(self, code): + if not self.is_generator: + code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % ( + self.result(), + Naming.self_cname)) + else: + code.putln('%s = %s->classobj;' % ( + self.result(), Naming.generator_cname)) + code.putln( + 'if (!%s) { PyErr_SetString(PyExc_SystemError, ' + '"super(): empty __class__ cell"); %s }' % ( + self.result(), + code.error_goto(self.pos))) + code.put_incref(self.result(), py_object_type) + + +class PyCFunctionNode(ExprNode, ModuleNameMixin): + # Helper class used in the implementation of Python + # functions. Constructs a PyCFunction object + # from a PyMethodDef struct. + # + # pymethdef_cname string PyMethodDef structure + # binding bool + # def_node DefNode the Python function node + # module_name EncodedString Name of defining module + # code_object CodeObjectNode the PyCodeObject creator node + + subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict', + 'annotations_dict'] + + code_object = None + binding = False + def_node = None + defaults = None + defaults_struct = None + defaults_pyobjects = 0 + defaults_tuple = None + defaults_kwdict = None + annotations_dict = None + + type = py_object_type + is_temp = 1 + + specialized_cpdefs = None + is_specialization = False + + @classmethod + def from_defnode(cls, node, binding): + return cls(node.pos, + def_node=node, + pymethdef_cname=node.entry.pymethdef_cname, + binding=binding or node.specialized_cpdefs, + specialized_cpdefs=node.specialized_cpdefs, + code_object=CodeObjectNode(node)) + + def analyse_types(self, env): + if self.binding: + self.analyse_default_args(env) + return self + + def analyse_default_args(self, env): + """ + Handle non-literal function's default arguments. + """ + nonliteral_objects = [] + nonliteral_other = [] + default_args = [] + default_kwargs = [] + annotations = [] + + # For global cpdef functions and def/cpdef methods in cdef classes, we must use global constants + # for default arguments to avoid the dependency on the CyFunction object as 'self' argument + # in the underlying C function. Basically, cpdef functions/methods are static C functions, + # so their optional arguments must be static, too. + # TODO: change CyFunction implementation to pass both function object and owning object for method calls + must_use_constants = env.is_c_class_scope or (self.def_node.is_wrapper and env.is_module_scope) + + for arg in self.def_node.args: + if arg.default: + if not must_use_constants: + if arg.default.is_literal: + arg.default = DefaultLiteralArgNode(arg.pos, arg.default) + if arg.default.type: + arg.default = arg.default.coerce_to(arg.type, env) + else: + arg.is_dynamic = True + if arg.type.is_pyobject: + nonliteral_objects.append(arg) + else: + nonliteral_other.append(arg) + if arg.default.type and arg.default.type.can_coerce_to_pyobject(env): + if arg.kw_only: + default_kwargs.append(arg) + else: + default_args.append(arg) + if arg.annotation: + arg.annotation = arg.annotation.analyse_types(env) + annotations.append((arg.pos, arg.name, arg.annotation.string)) + + for arg in (self.def_node.star_arg, self.def_node.starstar_arg): + if arg and arg.annotation: + arg.annotation = arg.annotation.analyse_types(env) + annotations.append((arg.pos, arg.name, arg.annotation.string)) + + annotation = self.def_node.return_type_annotation + if annotation: + self.def_node.return_type_annotation = annotation.analyse_types(env) + annotations.append((annotation.pos, StringEncoding.EncodedString("return"), + annotation.string)) + + if nonliteral_objects or nonliteral_other: + module_scope = env.global_scope() + cname = module_scope.next_id(Naming.defaults_struct_prefix) + scope = Symtab.StructOrUnionScope(cname) + self.defaults = [] + for arg in nonliteral_objects: + type_ = arg.type + if type_.is_buffer: + type_ = type_.base + entry = scope.declare_var(arg.name, type_, None, + Naming.arg_prefix + arg.name, + allow_pyobject=True) + self.defaults.append((arg, entry)) + for arg in nonliteral_other: + entry = scope.declare_var(arg.name, arg.type, None, + Naming.arg_prefix + arg.name, + allow_pyobject=False, allow_memoryview=True) + self.defaults.append((arg, entry)) + entry = module_scope.declare_struct_or_union( + None, 'struct', scope, 1, None, cname=cname) + self.defaults_struct = scope + self.defaults_pyobjects = len(nonliteral_objects) + for arg, entry in self.defaults: + arg.default_value = '%s->%s' % ( + Naming.dynamic_args_cname, entry.cname) + self.def_node.defaults_struct = self.defaults_struct.name + + if default_args or default_kwargs: + if self.defaults_struct is None: + if default_args: + defaults_tuple = TupleNode(self.pos, args=[ + arg.default for arg in default_args]) + self.defaults_tuple = defaults_tuple.analyse_types(env).coerce_to_pyobject(env) + if default_kwargs: + defaults_kwdict = DictNode(self.pos, key_value_pairs=[ + DictItemNode( + arg.pos, + key=IdentifierStringNode(arg.pos, value=arg.name), + value=arg.default) + for arg in default_kwargs]) + self.defaults_kwdict = defaults_kwdict.analyse_types(env) + elif not self.specialized_cpdefs: + # Fused dispatch functions do not support (dynamic) default arguments, only the specialisations do. + if default_args: + defaults_tuple = DefaultsTupleNode( + self.pos, default_args, self.defaults_struct) + else: + defaults_tuple = NoneNode(self.pos) + if default_kwargs: + defaults_kwdict = DefaultsKwDictNode( + self.pos, default_kwargs, self.defaults_struct) + else: + defaults_kwdict = NoneNode(self.pos) + + defaults_getter = Nodes.DefNode( + self.pos, args=[], star_arg=None, starstar_arg=None, + body=Nodes.ReturnStatNode( + self.pos, return_type=py_object_type, + value=TupleNode( + self.pos, args=[defaults_tuple, defaults_kwdict])), + decorators=None, + name=StringEncoding.EncodedString("__defaults__")) + # defaults getter must never live in class scopes, it's always a module function + module_scope = env.global_scope() + defaults_getter.analyse_declarations(module_scope) + defaults_getter = defaults_getter.analyse_expressions(module_scope) + defaults_getter.body = defaults_getter.body.analyse_expressions( + defaults_getter.local_scope) + defaults_getter.py_wrapper_required = False + defaults_getter.pymethdef_required = False + self.def_node.defaults_getter = defaults_getter + if annotations: + annotations_dict = DictNode(self.pos, key_value_pairs=[ + DictItemNode( + pos, key=IdentifierStringNode(pos, value=name), + value=value) + for pos, name, value in annotations]) + self.annotations_dict = annotations_dict.analyse_types(env) + + def may_be_none(self): + return False + + gil_message = "Constructing Python function" + + def closure_result_code(self): + return "NULL" + + def generate_result_code(self, code): + if self.binding: + self.generate_cyfunction_code(code) + else: + self.generate_pycfunction_code(code) + + def generate_pycfunction_code(self, code): + py_mod_name = self.get_py_mod_name(code) + code.putln( + '%s = PyCFunction_NewEx(&%s, %s, %s); %s' % ( + self.result(), + self.pymethdef_cname, + self.closure_result_code(), + py_mod_name, + code.error_goto_if_null(self.result(), self.pos))) + + self.generate_gotref(code) + + def generate_cyfunction_code(self, code): + if self.specialized_cpdefs: + def_node = self.specialized_cpdefs[0] + else: + def_node = self.def_node + + if self.specialized_cpdefs or self.is_specialization: + code.globalstate.use_utility_code( + UtilityCode.load_cached("FusedFunction", "CythonFunction.c")) + constructor = "__pyx_FusedFunction_New" + else: + code.globalstate.use_utility_code( + UtilityCode.load_cached("CythonFunction", "CythonFunction.c")) + constructor = "__Pyx_CyFunction_New" + + if self.code_object: + code_object_result = self.code_object.py_result() + else: + code_object_result = 'NULL' + + flags = [] + if def_node.is_staticmethod: + flags.append('__Pyx_CYFUNCTION_STATICMETHOD') + elif def_node.is_classmethod: + flags.append('__Pyx_CYFUNCTION_CLASSMETHOD') + + if def_node.local_scope.parent_scope.is_c_class_scope and not def_node.entry.is_anonymous: + flags.append('__Pyx_CYFUNCTION_CCLASS') + + if def_node.is_coroutine: + flags.append('__Pyx_CYFUNCTION_COROUTINE') + + if flags: + flags = ' | '.join(flags) + else: + flags = '0' + + code.putln( + '%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % ( + self.result(), + constructor, + self.pymethdef_cname, + flags, + self.get_py_qualified_name(code), + self.closure_result_code(), + self.get_py_mod_name(code), + Naming.moddict_cname, + code_object_result, + code.error_goto_if_null(self.result(), self.pos))) + + self.generate_gotref(code) + + if def_node.requires_classobj: + assert code.pyclass_stack, "pyclass_stack is empty" + class_node = code.pyclass_stack[-1] + code.put_incref(self.py_result(), py_object_type) + code.putln( + 'PyList_Append(%s, %s);' % ( + class_node.class_cell.result(), + self.result())) + self.generate_giveref(code) + + if self.defaults: + code.putln( + 'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % ( + self.result(), self.defaults_struct.name, + self.defaults_pyobjects, code.error_goto(self.pos))) + defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % ( + self.defaults_struct.name, self.result()) + for arg, entry in self.defaults: + arg.generate_assignment_code(code, target='%s->%s' % ( + defaults, entry.cname)) + + if self.defaults_tuple: + code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % ( + self.result(), self.defaults_tuple.py_result())) + if not self.specialized_cpdefs: + # disable introspection functions for fused dispatcher function since the user never sees it + # TODO: this is mostly disabled because the attributes end up pointing to ones belonging + # to the specializations - ideally this would be fixed instead + if self.defaults_kwdict: + code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % ( + self.result(), self.defaults_kwdict.py_result())) + if def_node.defaults_getter: + code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % ( + self.result(), def_node.defaults_getter.entry.pyfunc_cname)) + if self.annotations_dict: + code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % ( + self.result(), self.annotations_dict.py_result())) + + +class InnerFunctionNode(PyCFunctionNode): + # Special PyCFunctionNode that depends on a closure class + + binding = True + needs_closure_code = True + + def closure_result_code(self): + if self.needs_closure_code: + return "((PyObject*)%s)" % Naming.cur_scope_cname + return "NULL" + + +class CodeObjectNode(ExprNode): + # Create a PyCodeObject for a CyFunction instance. + # + # def_node DefNode the Python function node + # varnames TupleNode a tuple with all local variable names + + subexprs = ['varnames'] + is_temp = False + result_code = None + + def __init__(self, def_node): + ExprNode.__init__(self, def_node.pos, def_node=def_node) + args = list(def_node.args) + # if we have args/kwargs, then the first two in var_entries are those + local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name] + self.varnames = TupleNode( + def_node.pos, + args=[IdentifierStringNode(arg.pos, value=arg.name) + for arg in args + local_vars], + is_temp=0, + is_literal=1) + + def may_be_none(self): + return False + + def calculate_result_code(self, code=None): + if self.result_code is None: + self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2) + return self.result_code + + def generate_result_code(self, code): + if self.result_code is None: + self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2) + + code = code.get_cached_constants_writer(self.result_code) + if code is None: + return # already initialised + code.mark_pos(self.pos) + func = self.def_node + func_name = code.get_py_string_const( + func.name, identifier=True, is_str=False, unicode_value=func.name) + # FIXME: better way to get the module file path at module init time? Encoding to use? + file_path = StringEncoding.bytes_literal(func.pos[0].get_filenametable_entry().encode('utf8'), 'utf8') + file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True) + + # This combination makes CPython create a new dict for "frame.f_locals" (see GH #1836). + flags = ['CO_OPTIMIZED', 'CO_NEWLOCALS'] + + if self.def_node.star_arg: + flags.append('CO_VARARGS') + if self.def_node.starstar_arg: + flags.append('CO_VARKEYWORDS') + if self.def_node.is_asyncgen: + flags.append('CO_ASYNC_GENERATOR') + elif self.def_node.is_coroutine: + flags.append('CO_COROUTINE') + elif self.def_node.is_generator: + flags.append('CO_GENERATOR') + + code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % ( + self.result_code, + len(func.args) - func.num_kwonly_args, # argcount + func.num_posonly_args, # posonlyargcount (Py3.8+ only) + func.num_kwonly_args, # kwonlyargcount (Py3 only) + len(self.varnames.args), # nlocals + '|'.join(flags) or '0', # flags + Naming.empty_bytes, # code + Naming.empty_tuple, # consts + Naming.empty_tuple, # names (FIXME) + self.varnames.result(), # varnames + Naming.empty_tuple, # freevars (FIXME) + Naming.empty_tuple, # cellvars (FIXME) + file_path_const, # filename + func_name, # name + self.pos[1], # firstlineno + Naming.empty_bytes, # lnotab + code.error_goto_if_null(self.result_code, self.pos), + )) + + +class DefaultLiteralArgNode(ExprNode): + # CyFunction's literal argument default value + # + # Evaluate literal only once. + + subexprs = [] + is_literal = True + is_temp = False + + def __init__(self, pos, arg): + super(DefaultLiteralArgNode, self).__init__(pos) + self.arg = arg + self.constant_result = arg.constant_result + self.type = self.arg.type + self.evaluated = False + + def analyse_types(self, env): + return self + + def generate_result_code(self, code): + pass + + def generate_evaluation_code(self, code): + if not self.evaluated: + self.arg.generate_evaluation_code(code) + self.evaluated = True + + def result(self): + return self.type.cast_code(self.arg.result()) + + +class DefaultNonLiteralArgNode(ExprNode): + # CyFunction's non-literal argument default value + + subexprs = [] + + def __init__(self, pos, arg, defaults_struct): + super(DefaultNonLiteralArgNode, self).__init__(pos) + self.arg = arg + self.defaults_struct = defaults_struct + + def analyse_types(self, env): + self.type = self.arg.type + self.is_temp = False + return self + + def generate_result_code(self, code): + pass + + def result(self): + return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % ( + self.defaults_struct.name, Naming.self_cname, + self.defaults_struct.lookup(self.arg.name).cname) + + +class DefaultsTupleNode(TupleNode): + # CyFunction's __defaults__ tuple + + def __init__(self, pos, defaults, defaults_struct): + args = [] + for arg in defaults: + if not arg.default.is_literal: + arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct) + else: + arg = arg.default + args.append(arg) + super(DefaultsTupleNode, self).__init__(pos, args=args) + + def analyse_types(self, env, skip_children=False): + return super(DefaultsTupleNode, self).analyse_types(env, skip_children).coerce_to_pyobject(env) + + +class DefaultsKwDictNode(DictNode): + # CyFunction's __kwdefaults__ dict + + def __init__(self, pos, defaults, defaults_struct): + items = [] + for arg in defaults: + name = IdentifierStringNode(arg.pos, value=arg.name) + if not arg.default.is_literal: + arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct) + else: + arg = arg.default + items.append(DictItemNode(arg.pos, key=name, value=arg)) + super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items) + + +class LambdaNode(InnerFunctionNode): + # Lambda expression node (only used as a function reference) + # + # args [CArgDeclNode] formal arguments + # star_arg PyArgDeclNode or None * argument + # starstar_arg PyArgDeclNode or None ** argument + # lambda_name string a module-globally unique lambda name + # result_expr ExprNode + # def_node DefNode the underlying function 'def' node + + child_attrs = ['def_node'] + + name = StringEncoding.EncodedString('') + + def analyse_declarations(self, env): + if hasattr(self, "lambda_name"): + # this if-statement makes it safe to run twice + return + self.lambda_name = self.def_node.lambda_name = env.next_id('lambda') + self.def_node.no_assignment_synthesis = True + self.def_node.pymethdef_required = True + self.def_node.is_cyfunction = True + self.def_node.analyse_declarations(env) + self.pymethdef_cname = self.def_node.entry.pymethdef_cname + env.add_lambda_def(self.def_node) + + def analyse_types(self, env): + self.def_node = self.def_node.analyse_expressions(env) + return super(LambdaNode, self).analyse_types(env) + + def generate_result_code(self, code): + self.def_node.generate_execution_code(code) + super(LambdaNode, self).generate_result_code(code) + + +class GeneratorExpressionNode(LambdaNode): + # A generator expression, e.g. (i for i in range(10)) + # + # Result is a generator. + # + # loop ForStatNode the for-loop, containing a YieldExprNode + # def_node DefNode the underlying generator 'def' node + # call_parameters [ExprNode] (Internal) parameters passed to the DefNode call + + name = StringEncoding.EncodedString('genexpr') + binding = False + + child_attrs = LambdaNode.child_attrs + ["call_parameters"] + subexprs = LambdaNode.subexprs + ["call_parameters"] + + def __init__(self, pos, *args, **kwds): + super(GeneratorExpressionNode, self).__init__(pos, *args, **kwds) + self.call_parameters = [] + + def analyse_declarations(self, env): + if hasattr(self, "genexpr_name"): + # this if-statement makes it safe to run twice + return + self.genexpr_name = env.next_id('genexpr') + super(GeneratorExpressionNode, self).analyse_declarations(env) + # No pymethdef required + self.def_node.pymethdef_required = False + self.def_node.py_wrapper_required = False + self.def_node.is_cyfunction = False + # Force genexpr signature + self.def_node.entry.signature = TypeSlots.pyfunction_noargs + # setup loop scope + if isinstance(self.loop, Nodes._ForInStatNode): + assert isinstance(self.loop.iterator, ScopedExprNode) + self.loop.iterator.init_scope(None, env) + else: + assert isinstance(self.loop, Nodes.ForFromStatNode) + + def generate_result_code(self, code): + args_to_call = ([self.closure_result_code()] + + [ cp.result() for cp in self.call_parameters ]) + args_to_call = ", ".join(args_to_call) + code.putln( + '%s = %s(%s); %s' % ( + self.result(), + self.def_node.entry.pyfunc_cname, + args_to_call, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class YieldExprNode(ExprNode): + # Yield expression node + # + # arg ExprNode the value to return from the generator + # label_num integer yield label number + # is_yield_from boolean is a YieldFromExprNode to delegate to another generator + + subexprs = ['arg'] + type = py_object_type + label_num = 0 + is_yield_from = False + is_await = False + in_async_gen = False + expr_keyword = 'yield' + + def analyse_types(self, env): + if not self.label_num or (self.is_yield_from and self.in_async_gen): + error(self.pos, "'%s' not supported here" % self.expr_keyword) + self.is_temp = 1 + if self.arg is not None: + self.arg = self.arg.analyse_types(env) + if not self.arg.type.is_pyobject: + self.coerce_yield_argument(env) + return self + + def coerce_yield_argument(self, env): + self.arg = self.arg.coerce_to_pyobject(env) + + def generate_evaluation_code(self, code): + if self.arg: + self.arg.generate_evaluation_code(code) + self.arg.make_owned_reference(code) + code.putln( + "%s = %s;" % ( + Naming.retval_cname, + self.arg.result_as(py_object_type))) + self.arg.generate_post_assignment_code(code) + self.arg.free_temps(code) + else: + code.put_init_to_py_none(Naming.retval_cname, py_object_type) + self.generate_yield_code(code) + + def generate_yield_code(self, code): + """ + Generate the code to return the argument in 'Naming.retval_cname' + and to continue at the yield label. + """ + label_num, label_name = code.new_yield_label( + self.expr_keyword.replace(' ', '_')) + code.use_label(label_name) + + saved = [] + code.funcstate.closure_temps.reset() + for cname, type, manage_ref in code.funcstate.temps_in_use(): + save_cname = code.funcstate.closure_temps.allocate_temp(type) + saved.append((cname, save_cname, type)) + if type.is_cpp_class: + code.globalstate.use_utility_code( + UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp")) + cname = "__PYX_STD_MOVE_IF_SUPPORTED(%s)" % cname + else: + code.put_xgiveref(cname, type) + code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname)) + + code.put_xgiveref(Naming.retval_cname, py_object_type) + profile = code.globalstate.directives['profile'] + linetrace = code.globalstate.directives['linetrace'] + if profile or linetrace: + code.put_trace_return(Naming.retval_cname, + nogil=not code.funcstate.gil_owned) + code.put_finish_refcount_context() + + if code.funcstate.current_except is not None: + # inside of an except block => save away currently handled exception + code.putln("__Pyx_Coroutine_SwapException(%s);" % Naming.generator_cname) + else: + # no exceptions being handled => restore exception state of caller + code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname) + + code.putln("/* return from %sgenerator, %sing value */" % ( + 'async ' if self.in_async_gen else '', + 'await' if self.is_await else 'yield')) + code.putln("%s->resume_label = %d;" % ( + Naming.generator_cname, label_num)) + if self.in_async_gen and not self.is_await: + # __Pyx__PyAsyncGenValueWrapperNew() steals a reference to the return value + code.putln("return __Pyx__PyAsyncGenValueWrapperNew(%s);" % Naming.retval_cname) + else: + code.putln("return %s;" % Naming.retval_cname) + + code.put_label(label_name) + for cname, save_cname, type in saved: + save_cname = "%s->%s" % (Naming.cur_scope_cname, save_cname) + if type.is_cpp_class: + save_cname = "__PYX_STD_MOVE_IF_SUPPORTED(%s)" % save_cname + code.putln('%s = %s;' % (cname, save_cname)) + if type.is_pyobject: + code.putln('%s = 0;' % save_cname) + code.put_xgotref(cname, type) + elif type.is_memoryviewslice: + code.putln('%s.memview = NULL; %s.data = NULL;' % (save_cname, save_cname)) + self.generate_sent_value_handling_code(code, Naming.sent_value_cname) + if self.result_is_used: + self.allocate_temp_result(code) + code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname)) + code.put_incref(self.result(), py_object_type) + + def generate_sent_value_handling_code(self, code, value_cname): + code.putln(code.error_goto_if_null(value_cname, self.pos)) + + +class _YieldDelegationExprNode(YieldExprNode): + def yield_from_func(self, code): + raise NotImplementedError() + + def generate_evaluation_code(self, code, source_cname=None, decref_source=False): + if source_cname is None: + self.arg.generate_evaluation_code(code) + code.putln("%s = %s(%s, %s);" % ( + Naming.retval_cname, + self.yield_from_func(code), + Naming.generator_cname, + self.arg.py_result() if source_cname is None else source_cname)) + if source_cname is None: + self.arg.generate_disposal_code(code) + self.arg.free_temps(code) + elif decref_source: + code.put_decref_clear(source_cname, py_object_type) + code.put_xgotref(Naming.retval_cname, py_object_type) + + code.putln("if (likely(%s)) {" % Naming.retval_cname) + self.generate_yield_code(code) + code.putln("} else {") + # either error or sub-generator has normally terminated: return value => node result + if self.result_is_used: + self.fetch_iteration_result(code) + else: + self.handle_iteration_exception(code) + code.putln("}") + + def fetch_iteration_result(self, code): + # YieldExprNode has allocated the result temp for us + code.putln("%s = NULL;" % self.result()) + code.put_error_if_neg(self.pos, "__Pyx_PyGen_FetchStopIterationValue(&%s)" % self.result()) + self.generate_gotref(code) + + def handle_iteration_exception(self, code): + code.putln("PyObject* exc_type = __Pyx_PyErr_CurrentExceptionType();") + code.putln("if (exc_type) {") + code.putln("if (likely(exc_type == PyExc_StopIteration || (exc_type != PyExc_GeneratorExit &&" + " __Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))) PyErr_Clear();") + code.putln("else %s" % code.error_goto(self.pos)) + code.putln("}") + + +class YieldFromExprNode(_YieldDelegationExprNode): + # "yield from GEN" expression + is_yield_from = True + expr_keyword = 'yield from' + + def coerce_yield_argument(self, env): + if not self.arg.type.is_string: + # FIXME: support C arrays and C++ iterators? + error(self.pos, "yielding from non-Python object not supported") + self.arg = self.arg.coerce_to_pyobject(env) + + def yield_from_func(self, code): + code.globalstate.use_utility_code(UtilityCode.load_cached("GeneratorYieldFrom", "Coroutine.c")) + return "__Pyx_Generator_Yield_From" + + +class AwaitExprNode(_YieldDelegationExprNode): + # 'await' expression node + # + # arg ExprNode the Awaitable value to await + # label_num integer yield label number + + is_await = True + expr_keyword = 'await' + + def coerce_yield_argument(self, env): + if self.arg is not None: + # FIXME: use same check as in YieldFromExprNode.coerce_yield_argument() ? + self.arg = self.arg.coerce_to_pyobject(env) + + def yield_from_func(self, code): + code.globalstate.use_utility_code(UtilityCode.load_cached("CoroutineYieldFrom", "Coroutine.c")) + return "__Pyx_Coroutine_Yield_From" + + +class AwaitIterNextExprNode(AwaitExprNode): + # 'await' expression node as part of 'async for' iteration + # + # Breaks out of loop on StopAsyncIteration exception. + + def _generate_break(self, code): + code.globalstate.use_utility_code(UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c")) + code.putln("PyObject* exc_type = __Pyx_PyErr_CurrentExceptionType();") + code.putln("if (unlikely(exc_type && (exc_type == __Pyx_PyExc_StopAsyncIteration || (" + " exc_type != PyExc_StopIteration && exc_type != PyExc_GeneratorExit &&" + " __Pyx_PyErr_GivenExceptionMatches(exc_type, __Pyx_PyExc_StopAsyncIteration))))) {") + code.putln("PyErr_Clear();") + code.putln("break;") + code.putln("}") + + def fetch_iteration_result(self, code): + assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop" + self._generate_break(code) + super(AwaitIterNextExprNode, self).fetch_iteration_result(code) + + def generate_sent_value_handling_code(self, code, value_cname): + assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop" + code.putln("if (unlikely(!%s)) {" % value_cname) + self._generate_break(code) + # all non-break exceptions are errors, as in parent class + code.putln(code.error_goto(self.pos)) + code.putln("}") + + +class GlobalsExprNode(AtomicExprNode): + type = dict_type + is_temp = 1 + + def analyse_types(self, env): + env.use_utility_code(Builtin.globals_utility_code) + return self + + gil_message = "Constructing globals dict" + + def may_be_none(self): + return False + + def generate_result_code(self, code): + code.putln('%s = __Pyx_Globals(); %s' % ( + self.result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + +class LocalsDictItemNode(DictItemNode): + def analyse_types(self, env): + self.key = self.key.analyse_types(env) + self.value = self.value.analyse_types(env) + self.key = self.key.coerce_to_pyobject(env) + if self.value.type.can_coerce_to_pyobject(env): + self.value = self.value.coerce_to_pyobject(env) + else: + self.value = None + return self + + +class FuncLocalsExprNode(DictNode): + def __init__(self, pos, env): + local_vars = sorted([ + entry.name for entry in env.entries.values() if entry.name]) + items = [LocalsDictItemNode( + pos, key=IdentifierStringNode(pos, value=var), + value=NameNode(pos, name=var, allow_null=True)) + for var in local_vars] + DictNode.__init__(self, pos, key_value_pairs=items, + exclude_null_values=True) + + def analyse_types(self, env): + node = super(FuncLocalsExprNode, self).analyse_types(env) + node.key_value_pairs = [ i for i in node.key_value_pairs + if i.value is not None ] + return node + + +class PyClassLocalsExprNode(AtomicExprNode): + def __init__(self, pos, pyclass_dict): + AtomicExprNode.__init__(self, pos) + self.pyclass_dict = pyclass_dict + + def analyse_types(self, env): + self.type = self.pyclass_dict.type + self.is_temp = False + return self + + def may_be_none(self): + return False + + def result(self): + return self.pyclass_dict.result() + + def generate_result_code(self, code): + pass + + +def LocalsExprNode(pos, scope_node, env): + if env.is_module_scope: + return GlobalsExprNode(pos) + if env.is_py_class_scope: + return PyClassLocalsExprNode(pos, scope_node.dict) + return FuncLocalsExprNode(pos, env) + + +#------------------------------------------------------------------- +# +# Unary operator nodes +# +#------------------------------------------------------------------- + +compile_time_unary_operators = { + 'not': operator.not_, + '~': operator.inv, + '-': operator.neg, + '+': operator.pos, +} + +class UnopNode(ExprNode): + # operator string + # operand ExprNode + # + # Processing during analyse_expressions phase: + # + # analyse_c_operation + # Called when the operand is not a pyobject. + # - Check operand type and coerce if needed. + # - Determine result type and result code fragment. + # - Allocate temporary for result if needed. + + subexprs = ['operand'] + infix = True + is_inc_dec_op = False + + def calculate_constant_result(self): + func = compile_time_unary_operators[self.operator] + self.constant_result = func(self.operand.constant_result) + + def compile_time_value(self, denv): + func = compile_time_unary_operators.get(self.operator) + if not func: + error(self.pos, + "Unary '%s' not supported in compile-time expression" + % self.operator) + operand = self.operand.compile_time_value(denv) + try: + return func(operand) + except Exception as e: + self.compile_time_value_error(e) + + def infer_type(self, env): + operand_type = self.operand.infer_type(env) + if operand_type.is_cpp_class or operand_type.is_ptr: + cpp_type = operand_type.find_cpp_operation_type(self.operator) + if cpp_type is not None: + return cpp_type + return self.infer_unop_type(env, operand_type) + + def infer_unop_type(self, env, operand_type): + if operand_type.is_pyobject: + return py_object_type + else: + return operand_type + + def may_be_none(self): + if self.operand.type and self.operand.type.is_builtin_type: + if self.operand.type is not type_type: + return False + return ExprNode.may_be_none(self) + + def analyse_types(self, env): + self.operand = self.operand.analyse_types(env) + if self.is_pythran_operation(env): + self.type = PythranExpr(pythran_unaryop_type(self.operator, self.operand.type)) + self.is_temp = 1 + elif self.is_py_operation(): + self.coerce_operand_to_pyobject(env) + self.type = py_object_type + self.is_temp = 1 + elif self.is_cpp_operation(): + self.analyse_cpp_operation(env) + else: + self.analyse_c_operation(env) + return self + + def check_const(self): + return self.operand.check_const() + + def is_py_operation(self): + return self.operand.type.is_pyobject or self.operand.type.is_ctuple + + def is_pythran_operation(self, env): + np_pythran = has_np_pythran(env) + op_type = self.operand.type + return np_pythran and (op_type.is_buffer or op_type.is_pythran_expr) + + def nogil_check(self, env): + if self.is_py_operation(): + self.gil_error() + + def is_cpp_operation(self): + type = self.operand.type + return type.is_cpp_class + + def coerce_operand_to_pyobject(self, env): + self.operand = self.operand.coerce_to_pyobject(env) + + def generate_result_code(self, code): + if self.type.is_pythran_expr: + code.putln("// Pythran unaryop") + code.putln("__Pyx_call_destructor(%s);" % self.result()) + code.putln("new (&%s) decltype(%s){%s%s};" % ( + self.result(), + self.result(), + self.operator, + self.operand.pythran_result())) + elif self.operand.type.is_pyobject: + self.generate_py_operation_code(code) + elif self.is_temp: + if self.is_cpp_operation() and self.exception_check == '+': + translate_cpp_exception(code, self.pos, + "%s = %s %s;" % (self.result(), self.operator, self.operand.result()), + self.result() if self.type.is_pyobject else None, + self.exception_value, self.in_nogil_context) + else: + code.putln("%s = %s %s;" % (self.result(), self.operator, self.operand.result())) + + def generate_py_operation_code(self, code): + function = self.py_operation_function(code) + code.putln( + "%s = %s(%s); %s" % ( + self.result(), + function, + self.operand.py_result(), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + + def type_error(self): + if not self.operand.type.is_error: + error(self.pos, "Invalid operand type for '%s' (%s)" % + (self.operator, self.operand.type)) + self.type = PyrexTypes.error_type + + def analyse_cpp_operation(self, env, overload_check=True): + operand_types = [self.operand.type] + if self.is_inc_dec_op and not self.is_prefix: + operand_types.append(PyrexTypes.c_int_type) + entry = env.lookup_operator_for_types(self.pos, self.operator, operand_types) + if overload_check and not entry: + self.type_error() + return + if entry: + self.exception_check = entry.type.exception_check + self.exception_value = entry.type.exception_value + if self.exception_check == '+': + self.is_temp = True + if needs_cpp_exception_conversion(self): + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + else: + self.exception_check = '' + self.exception_value = '' + if self.is_inc_dec_op and not self.is_prefix: + cpp_type = self.operand.type.find_cpp_operation_type( + self.operator, operand_type=PyrexTypes.c_int_type + ) + else: + cpp_type = self.operand.type.find_cpp_operation_type(self.operator) + if overload_check and cpp_type is None: + error(self.pos, "'%s' operator not defined for %s" % ( + self.operator, type)) + self.type_error() + return + self.type = cpp_type + + +class NotNode(UnopNode): + # 'not' operator + # + # operand ExprNode + operator = '!' + + type = PyrexTypes.c_bint_type + + def calculate_constant_result(self): + self.constant_result = not self.operand.constant_result + + def compile_time_value(self, denv): + operand = self.operand.compile_time_value(denv) + try: + return not operand + except Exception as e: + self.compile_time_value_error(e) + + def infer_unop_type(self, env, operand_type): + return PyrexTypes.c_bint_type + + def analyse_types(self, env): + self.operand = self.operand.analyse_types(env) + operand_type = self.operand.type + if operand_type.is_cpp_class: + self.analyse_cpp_operation(env) + else: + self.operand = self.operand.coerce_to_boolean(env) + return self + + def calculate_result_code(self): + return "(!%s)" % self.operand.result() + + +class UnaryPlusNode(UnopNode): + # unary '+' operator + + operator = '+' + + def analyse_c_operation(self, env): + self.type = PyrexTypes.widest_numeric_type( + self.operand.type, PyrexTypes.c_int_type) + + def py_operation_function(self, code): + return "PyNumber_Positive" + + def calculate_result_code(self): + if self.is_cpp_operation(): + return "(+%s)" % self.operand.result() + else: + return self.operand.result() + + +class UnaryMinusNode(UnopNode): + # unary '-' operator + + operator = '-' + + def analyse_c_operation(self, env): + if self.operand.type.is_numeric: + self.type = PyrexTypes.widest_numeric_type( + self.operand.type, PyrexTypes.c_int_type) + elif self.operand.type.is_enum: + self.type = PyrexTypes.c_int_type + else: + self.type_error() + if self.type.is_complex: + self.infix = False + + def py_operation_function(self, code): + return "PyNumber_Negative" + + def calculate_result_code(self): + if self.infix: + return "(-%s)" % self.operand.result() + else: + return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result()) + + def get_constant_c_result_code(self): + value = self.operand.get_constant_c_result_code() + if value: + return "(-%s)" % value + +class TildeNode(UnopNode): + # unary '~' operator + + def analyse_c_operation(self, env): + if self.operand.type.is_int: + self.type = PyrexTypes.widest_numeric_type( + self.operand.type, PyrexTypes.c_int_type) + elif self.operand.type.is_enum: + self.type = PyrexTypes.c_int_type + else: + self.type_error() + + def py_operation_function(self, code): + return "PyNumber_Invert" + + def calculate_result_code(self): + return "(~%s)" % self.operand.result() + + +class CUnopNode(UnopNode): + + def is_py_operation(self): + return False + +class DereferenceNode(CUnopNode): + # unary * operator + + operator = '*' + + def infer_unop_type(self, env, operand_type): + if operand_type.is_ptr: + return operand_type.base_type + else: + return PyrexTypes.error_type + + def analyse_c_operation(self, env): + if self.operand.type.is_ptr: + if env.is_cpp: + self.type = PyrexTypes.CReferenceType(self.operand.type.base_type) + else: + self.type = self.operand.type.base_type + else: + self.type_error() + + def calculate_result_code(self): + return "(*%s)" % self.operand.result() + + +class DecrementIncrementNode(CUnopNode): + # unary ++/-- operator + is_inc_dec_op = True + + def type_error(self): + if not self.operand.type.is_error: + if self.is_prefix: + error(self.pos, "No match for 'operator%s' (operand type is '%s')" % + (self.operator, self.operand.type)) + else: + error(self.pos, "No 'operator%s(int)' declared for postfix '%s' (operand type is '%s')" % + (self.operator, self.operator, self.operand.type)) + self.type = PyrexTypes.error_type + + def analyse_c_operation(self, env): + if self.operand.type.is_numeric: + self.type = PyrexTypes.widest_numeric_type( + self.operand.type, PyrexTypes.c_int_type) + elif self.operand.type.is_ptr: + self.type = self.operand.type + else: + self.type_error() + + def calculate_result_code(self): + if self.is_prefix: + return "(%s%s)" % (self.operator, self.operand.result()) + else: + return "(%s%s)" % (self.operand.result(), self.operator) + +def inc_dec_constructor(is_prefix, operator): + return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds) + + +class AmpersandNode(CUnopNode): + # The C address-of operator. + # + # operand ExprNode + operator = '&' + + def infer_unop_type(self, env, operand_type): + return PyrexTypes.c_ptr_type(operand_type) + + def analyse_types(self, env): + self.operand = self.operand.analyse_types(env) + argtype = self.operand.type + if argtype.is_cpp_class: + self.analyse_cpp_operation(env, overload_check=False) + if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()): + if argtype.is_memoryviewslice: + self.error("Cannot take address of memoryview slice") + else: + self.error("Taking address of non-lvalue (type %s)" % argtype) + return self + if argtype.is_pyobject: + self.error("Cannot take address of Python %s" % ( + "variable '%s'" % self.operand.name if self.operand.is_name else + "object attribute '%s'" % self.operand.attribute if self.operand.is_attribute else + "object")) + return self + if not argtype.is_cpp_class or not self.type: + self.type = PyrexTypes.c_ptr_type(argtype) + return self + + def check_const(self): + return self.operand.check_const_addr() + + def error(self, mess): + error(self.pos, mess) + self.type = PyrexTypes.error_type + self.result_code = "" + + def calculate_result_code(self): + return "(&%s)" % self.operand.result() + + def generate_result_code(self, code): + if (self.operand.type.is_cpp_class and self.exception_check == '+'): + translate_cpp_exception(code, self.pos, + "%s = %s %s;" % (self.result(), self.operator, self.operand.result()), + self.result() if self.type.is_pyobject else None, + self.exception_value, self.in_nogil_context) + + +unop_node_classes = { + "+": UnaryPlusNode, + "-": UnaryMinusNode, + "~": TildeNode, +} + +def unop_node(pos, operator, operand): + # Construct unnop node of appropriate class for + # given operator. + if isinstance(operand, IntNode) and operator == '-': + return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)), + longness=operand.longness, unsigned=operand.unsigned) + elif isinstance(operand, UnopNode) and operand.operator == operator in '+-': + warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5) + return unop_node_classes[operator](pos, + operator = operator, + operand = operand) + + +class TypecastNode(ExprNode): + # C type cast + # + # operand ExprNode + # base_type CBaseTypeNode + # declarator CDeclaratorNode + # typecheck boolean + # + # If used from a transform, one can if wanted specify the attribute + # "type" directly and leave base_type and declarator to None + + subexprs = ['operand'] + base_type = declarator = type = None + + def type_dependencies(self, env): + return () + + def infer_type(self, env): + if self.type is None: + base_type = self.base_type.analyse(env) + _, self.type = self.declarator.analyse(base_type, env) + return self.type + + def analyse_types(self, env): + if self.type is None: + base_type = self.base_type.analyse(env) + _, self.type = self.declarator.analyse(base_type, env) + if self.operand.has_constant_result(): + # Must be done after self.type is resolved. + self.calculate_constant_result() + if self.type.is_cfunction: + error(self.pos, + "Cannot cast to a function type") + self.type = PyrexTypes.error_type + self.operand = self.operand.analyse_types(env) + if self.type is PyrexTypes.c_bint_type: + # short circuit this to a coercion + return self.operand.coerce_to_boolean(env) + to_py = self.type.is_pyobject + from_py = self.operand.type.is_pyobject + if from_py and not to_py and self.operand.is_ephemeral(): + if not self.type.is_numeric and not self.type.is_cpp_class: + error(self.pos, "Casting temporary Python object to non-numeric non-Python type") + if to_py and not from_py: + if self.type is bytes_type and self.operand.type.is_int: + return CoerceIntToBytesNode(self.operand, env) + elif self.operand.type.can_coerce_to_pyobject(env): + self.result_ctype = py_object_type + self.operand = self.operand.coerce_to(self.type, env) + else: + if self.operand.type.is_ptr: + if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct): + error(self.pos, "Python objects cannot be cast from pointers of primitive types") + else: + # Should this be an error? + warning(self.pos, "No conversion from %s to %s, python object pointer used." % ( + self.operand.type, self.type)) + self.operand = self.operand.coerce_to_simple(env) + elif from_py and not to_py: + if self.type.create_from_py_utility_code(env): + self.operand = self.operand.coerce_to(self.type, env) + elif self.type.is_ptr: + if not (self.type.base_type.is_void or self.type.base_type.is_struct): + error(self.pos, "Python objects cannot be cast to pointers of primitive types") + else: + warning(self.pos, "No conversion from %s to %s, python object pointer used." % ( + self.type, self.operand.type)) + elif from_py and to_py: + if self.typecheck: + self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True) + elif isinstance(self.operand, SliceIndexNode): + # This cast can influence the created type of string slices. + self.operand = self.operand.coerce_to(self.type, env) + elif self.type.is_complex and self.operand.type.is_complex: + self.operand = self.operand.coerce_to_simple(env) + elif self.operand.type.is_fused: + self.operand = self.operand.coerce_to(self.type, env) + #self.type = self.operand.type + if self.type.is_ptr and self.type.base_type.is_cfunction and self.type.base_type.nogil: + op_type = self.operand.type + if op_type.is_ptr: + op_type = op_type.base_type + if op_type.is_cfunction and not op_type.nogil: + warning(self.pos, + "Casting a GIL-requiring function into a nogil function circumvents GIL validation", 1) + return self + + def is_simple(self): + # either temp or a C cast => no side effects other than the operand's + return self.operand.is_simple() + + def is_ephemeral(self): + # either temp or a C cast => no side effects other than the operand's + return self.operand.is_ephemeral() + + def nonlocally_immutable(self): + return self.is_temp or self.operand.nonlocally_immutable() + + def nogil_check(self, env): + if self.type and self.type.is_pyobject and self.is_temp: + self.gil_error() + + def check_const(self): + return self.operand.check_const() + + def calculate_constant_result(self): + self.constant_result = self.calculate_result_code(self.operand.constant_result) + + def calculate_result_code(self, operand_result = None): + if operand_result is None: + operand_result = self.operand.result() + if self.type.is_complex: + operand_result = self.operand.result() + if self.operand.type.is_complex: + real_part = self.type.real_type.cast_code( + self.operand.type.real_code(operand_result)) + imag_part = self.type.real_type.cast_code( + self.operand.type.imag_code(operand_result)) + else: + real_part = self.type.real_type.cast_code(operand_result) + imag_part = "0" + return "%s(%s, %s)" % ( + self.type.from_parts, + real_part, + imag_part) + else: + return self.type.cast_code(operand_result) + + def get_constant_c_result_code(self): + operand_result = self.operand.get_constant_c_result_code() + if operand_result: + return self.type.cast_code(operand_result) + + def result_as(self, type): + if self.type.is_pyobject and not self.is_temp: + # Optimise away some unnecessary casting + return self.operand.result_as(type) + else: + return ExprNode.result_as(self, type) + + def generate_result_code(self, code): + if self.is_temp: + code.putln( + "%s = (PyObject *)%s;" % ( + self.result(), + self.operand.result())) + code.put_incref(self.result(), self.ctype()) + + +ERR_START = "Start may not be given" +ERR_NOT_STOP = "Stop must be provided to indicate shape" +ERR_STEPS = ("Strides may only be given to indicate contiguity. " + "Consider slicing it after conversion") +ERR_NOT_POINTER = "Can only create cython.array from pointer or array" +ERR_BASE_TYPE = "Pointer base type does not match cython.array base type" + + +class CythonArrayNode(ExprNode): + """ + Used when a pointer of base_type is cast to a memoryviewslice with that + base type. i.e. + + p + + creates a fortran-contiguous cython.array. + + We leave the type set to object so coercions to object are more efficient + and less work. Acquiring a memoryviewslice from this will be just as + efficient. ExprNode.coerce_to() will do the additional typecheck on + self.compile_time_type + + This also handles my_c_array + + + operand ExprNode the thing we're casting + base_type_node MemoryViewSliceTypeNode the cast expression node + """ + + subexprs = ['operand', 'shapes'] + + shapes = None + is_temp = True + mode = "c" + array_dtype = None + + shape_type = PyrexTypes.c_py_ssize_t_type + + def analyse_types(self, env): + from . import MemoryView + + self.operand = self.operand.analyse_types(env) + if self.array_dtype: + array_dtype = self.array_dtype + else: + array_dtype = self.base_type_node.base_type_node.analyse(env) + axes = self.base_type_node.axes + + self.type = error_type + self.shapes = [] + ndim = len(axes) + + # Base type of the pointer or C array we are converting + base_type = self.operand.type + + if not self.operand.type.is_ptr and not self.operand.type.is_array: + error(self.operand.pos, ERR_NOT_POINTER) + return self + + # Dimension sizes of C array + array_dimension_sizes = [] + if base_type.is_array: + while base_type.is_array: + array_dimension_sizes.append(base_type.size) + base_type = base_type.base_type + elif base_type.is_ptr: + base_type = base_type.base_type + else: + error(self.pos, "unexpected base type %s found" % base_type) + return self + + if not (base_type.same_as(array_dtype) or base_type.is_void): + error(self.operand.pos, ERR_BASE_TYPE) + return self + elif self.operand.type.is_array and len(array_dimension_sizes) != ndim: + error(self.operand.pos, + "Expected %d dimensions, array has %d dimensions" % + (ndim, len(array_dimension_sizes))) + return self + + # Verify the start, stop and step values + # In case of a C array, use the size of C array in each dimension to + # get an automatic cast + for axis_no, axis in enumerate(axes): + if not axis.start.is_none: + error(axis.start.pos, ERR_START) + return self + + if axis.stop.is_none: + if array_dimension_sizes: + dimsize = array_dimension_sizes[axis_no] + axis.stop = IntNode(self.pos, value=str(dimsize), + constant_result=dimsize, + type=PyrexTypes.c_int_type) + else: + error(axis.pos, ERR_NOT_STOP) + return self + + axis.stop = axis.stop.analyse_types(env) + shape = axis.stop.coerce_to(self.shape_type, env) + if not shape.is_literal: + shape.coerce_to_temp(env) + + self.shapes.append(shape) + + first_or_last = axis_no in (0, ndim - 1) + if not axis.step.is_none and first_or_last: + # '1' in the first or last dimension denotes F or C contiguity + axis.step = axis.step.analyse_types(env) + if (not axis.step.type.is_int and axis.step.is_literal and not + axis.step.type.is_error): + error(axis.step.pos, "Expected an integer literal") + return self + + if axis.step.compile_time_value(env) != 1: + error(axis.step.pos, ERR_STEPS) + return self + + if axis_no == 0: + self.mode = "fortran" + + elif not axis.step.is_none and not first_or_last: + # step provided in some other dimension + error(axis.step.pos, ERR_STEPS) + return self + + if not self.operand.is_name: + self.operand = self.operand.coerce_to_temp(env) + + axes = [('direct', 'follow')] * len(axes) + if self.mode == "fortran": + axes[0] = ('direct', 'contig') + else: + axes[-1] = ('direct', 'contig') + + self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes) + self.coercion_type.validate_memslice_dtype(self.pos) + self.type = self.get_cython_array_type(env) + MemoryView.use_cython_array_utility_code(env) + env.use_utility_code(MemoryView.typeinfo_to_format_code) + return self + + def allocate_temp_result(self, code): + if self.temp_code: + raise RuntimeError("temp allocated multiple times") + + self.temp_code = code.funcstate.allocate_temp(self.type, True) + + def infer_type(self, env): + return self.get_cython_array_type(env) + + def get_cython_array_type(self, env): + cython_scope = env.global_scope().context.cython_scope + cython_scope.load_cythonscope() + return cython_scope.viewscope.lookup("array").type + + def generate_result_code(self, code): + from . import Buffer + + shapes = [self.shape_type.cast_code(shape.result()) + for shape in self.shapes] + dtype = self.coercion_type.dtype + + shapes_temp = code.funcstate.allocate_temp(py_object_type, True) + format_temp = code.funcstate.allocate_temp(py_object_type, True) + + itemsize = "sizeof(%s)" % dtype.empty_declaration_code() + type_info = Buffer.get_type_information_cname(code, dtype) + + if self.operand.type.is_ptr: + code.putln("if (!%s) {" % self.operand.result()) + code.putln( 'PyErr_SetString(PyExc_ValueError,' + '"Cannot create cython.array from NULL pointer");') + code.putln(code.error_goto(self.operand.pos)) + code.putln("}") + + code.putln("%s = __pyx_format_from_typeinfo(&%s); %s" % ( + format_temp, + type_info, + code.error_goto_if_null(format_temp, self.pos), + )) + code.put_gotref(format_temp, py_object_type) + + buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes) + code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s); %s' % ( + shapes_temp, + buildvalue_fmt, + ", ".join(shapes), + code.error_goto_if_null(shapes_temp, self.pos), + )) + code.put_gotref(shapes_temp, py_object_type) + + code.putln('%s = __pyx_array_new(%s, %s, PyBytes_AS_STRING(%s), (char *) "%s", (char *) %s); %s' % ( + self.result(), + shapes_temp, itemsize, format_temp, self.mode, self.operand.result(), + code.error_goto_if_null(self.result(), self.pos), + )) + self.generate_gotref(code) + + def dispose(temp): + code.put_decref_clear(temp, py_object_type) + code.funcstate.release_temp(temp) + + dispose(shapes_temp) + dispose(format_temp) + + @classmethod + def from_carray(cls, src_node, env): + """ + Given a C array type, return a CythonArrayNode + """ + pos = src_node.pos + base_type = src_node.type + + none_node = NoneNode(pos) + axes = [] + + while base_type.is_array: + axes.append(SliceNode(pos, start=none_node, stop=none_node, + step=none_node)) + base_type = base_type.base_type + axes[-1].step = IntNode(pos, value="1", is_c_literal=True) + + memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes, + base_type_node=base_type) + result = CythonArrayNode(pos, base_type_node=memslicenode, + operand=src_node, array_dtype=base_type) + result = result.analyse_types(env) + return result + +class SizeofNode(ExprNode): + # Abstract base class for sizeof(x) expression nodes. + + type = PyrexTypes.c_size_t_type + + def check_const(self): + return True + + def generate_result_code(self, code): + pass + + +class SizeofTypeNode(SizeofNode): + # C sizeof function applied to a type + # + # base_type CBaseTypeNode + # declarator CDeclaratorNode + + subexprs = [] + arg_type = None + + def analyse_types(self, env): + # we may have incorrectly interpreted a dotted name as a type rather than an attribute + # this could be better handled by more uniformly treating types as runtime-available objects + if 0 and self.base_type.module_path: + path = self.base_type.module_path + obj = env.lookup(path[0]) + if obj.as_module is None: + operand = NameNode(pos=self.pos, name=path[0]) + for attr in path[1:]: + operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr) + operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name) + node = SizeofVarNode(self.pos, operand=operand).analyse_types(env) + return node + if self.arg_type is None: + base_type = self.base_type.analyse(env) + _, arg_type = self.declarator.analyse(base_type, env) + self.arg_type = arg_type + self.check_type() + return self + + def check_type(self): + arg_type = self.arg_type + if not arg_type: + return + if arg_type.is_pyobject and not arg_type.is_extension_type: + error(self.pos, "Cannot take sizeof Python object") + elif arg_type.is_void: + error(self.pos, "Cannot take sizeof void") + elif not arg_type.is_complete(): + error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type) + + def calculate_result_code(self): + if self.arg_type.is_extension_type: + # the size of the pointer is boring + # we want the size of the actual struct + arg_code = self.arg_type.declaration_code("", deref=1) + else: + arg_code = self.arg_type.empty_declaration_code() + return "(sizeof(%s))" % arg_code + + +class SizeofVarNode(SizeofNode): + # C sizeof function applied to a variable + # + # operand ExprNode + + subexprs = ['operand'] + + def analyse_types(self, env): + # We may actually be looking at a type rather than a variable... + # If we are, traditional analysis would fail... + operand_as_type = self.operand.analyse_as_type(env) + if operand_as_type: + self.arg_type = operand_as_type + if self.arg_type.is_fused: + try: + self.arg_type = self.arg_type.specialize(env.fused_to_specific) + except CannotSpecialize: + error(self.operand.pos, + "Type cannot be specialized since it is not a fused argument to this function") + self.__class__ = SizeofTypeNode + self.check_type() + else: + self.operand = self.operand.analyse_types(env) + return self + + def calculate_result_code(self): + return "(sizeof(%s))" % self.operand.result() + + def generate_result_code(self, code): + pass + + +class TypeidNode(ExprNode): + # C++ typeid operator applied to a type or variable + # + # operand ExprNode + # arg_type ExprNode + # is_variable boolean + + subexprs = ['operand'] + + arg_type = None + is_variable = None + is_temp = 1 + + def get_type_info_type(self, env): + env_module = env + while not env_module.is_module_scope: + env_module = env_module.outer_scope + typeinfo_module = env_module.find_module('libcpp.typeinfo', self.pos) + typeinfo_entry = typeinfo_module.lookup('type_info') + return PyrexTypes.CFakeReferenceType(PyrexTypes.c_const_type(typeinfo_entry.type)) + + cpp_message = 'typeid operator' + + def analyse_types(self, env): + if not self.type: + self.type = PyrexTypes.error_type # default value if it isn't analysed successfully + self.cpp_check(env) + type_info = self.get_type_info_type(env) + if not type_info: + self.error("The 'libcpp.typeinfo' module must be cimported to use the typeid() operator") + return self + if self.operand is None: + return self # already analysed, no need to repeat + self.type = type_info + as_type = self.operand.analyse_as_specialized_type(env) + if as_type: + self.arg_type = as_type + self.is_type = True + self.operand = None # nothing further uses self.operand - will only cause problems if its used in code generation + else: + self.arg_type = self.operand.analyse_types(env) + self.is_type = False + self.operand = None # nothing further uses self.operand - will only cause problems if its used in code generation + if self.arg_type.type.is_pyobject: + self.error("Cannot use typeid on a Python object") + return self + elif self.arg_type.type.is_void: + self.error("Cannot use typeid on void") + return self + elif not self.arg_type.type.is_complete(): + self.error("Cannot use typeid on incomplete type '%s'" % self.arg_type.type) + return self + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + return self + + def error(self, mess): + error(self.pos, mess) + self.type = PyrexTypes.error_type + self.result_code = "" + + def check_const(self): + return True + + def calculate_result_code(self): + return self.temp_code + + def generate_result_code(self, code): + if self.is_type: + arg_code = self.arg_type.empty_declaration_code() + else: + arg_code = self.arg_type.result() + translate_cpp_exception(code, self.pos, + "%s = typeid(%s);" % (self.temp_code, arg_code), + None, None, self.in_nogil_context) + +class TypeofNode(ExprNode): + # Compile-time type of an expression, as a string. + # + # operand ExprNode + # literal StringNode # internal + + literal = None + type = py_object_type + + subexprs = ['literal'] # 'operand' will be ignored after type analysis! + + def analyse_types(self, env): + self.operand = self.operand.analyse_types(env) + value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name()) + literal = StringNode(self.pos, value=value) + literal = literal.analyse_types(env) + self.literal = literal.coerce_to_pyobject(env) + return self + + def analyse_as_type(self, env): + self.operand = self.operand.analyse_types(env) + return self.operand.type + + def may_be_none(self): + return False + + def generate_evaluation_code(self, code): + self.literal.generate_evaluation_code(code) + + def calculate_result_code(self): + return self.literal.calculate_result_code() + +#------------------------------------------------------------------- +# +# Binary operator nodes +# +#------------------------------------------------------------------- + +try: + matmul_operator = operator.matmul +except AttributeError: + def matmul_operator(a, b): + try: + func = a.__matmul__ + except AttributeError: + func = b.__rmatmul__ + return func(a, b) + +compile_time_binary_operators = { + '<': operator.lt, + '<=': operator.le, + '==': operator.eq, + '!=': operator.ne, + '>=': operator.ge, + '>': operator.gt, + 'is': operator.is_, + 'is_not': operator.is_not, + '+': operator.add, + '&': operator.and_, + '/': operator.truediv, + '//': operator.floordiv, + '<<': operator.lshift, + '%': operator.mod, + '*': operator.mul, + '|': operator.or_, + '**': operator.pow, + '>>': operator.rshift, + '-': operator.sub, + '^': operator.xor, + '@': matmul_operator, + 'in': lambda x, seq: x in seq, + 'not_in': lambda x, seq: x not in seq, +} + +def get_compile_time_binop(node): + func = compile_time_binary_operators.get(node.operator) + if not func: + error(node.pos, + "Binary '%s' not supported in compile-time expression" + % node.operator) + return func + + +class BinopNode(ExprNode): + # operator string + # operand1 ExprNode + # operand2 ExprNode + # + # Processing during analyse_expressions phase: + # + # analyse_c_operation + # Called when neither operand is a pyobject. + # - Check operand types and coerce if needed. + # - Determine result type and result code fragment. + # - Allocate temporary for result if needed. + + subexprs = ['operand1', 'operand2'] + inplace = False + + def calculate_constant_result(self): + func = compile_time_binary_operators[self.operator] + self.constant_result = func( + self.operand1.constant_result, + self.operand2.constant_result) + + def compile_time_value(self, denv): + func = get_compile_time_binop(self) + operand1 = self.operand1.compile_time_value(denv) + operand2 = self.operand2.compile_time_value(denv) + try: + return func(operand1, operand2) + except Exception as e: + self.compile_time_value_error(e) + + def infer_type(self, env): + return self.result_type(self.operand1.infer_type(env), + self.operand2.infer_type(env), env) + + def analyse_types(self, env): + self.operand1 = self.operand1.analyse_types(env) + self.operand2 = self.operand2.analyse_types(env) + self.analyse_operation(env) + return self + + def analyse_operation(self, env): + if self.is_pythran_operation(env): + self.type = self.result_type(self.operand1.type, + self.operand2.type, env) + assert self.type.is_pythran_expr + self.is_temp = 1 + elif self.is_py_operation(): + self.coerce_operands_to_pyobjects(env) + self.type = self.result_type(self.operand1.type, + self.operand2.type, env) + assert self.type.is_pyobject + self.is_temp = 1 + elif self.is_cpp_operation(): + self.analyse_cpp_operation(env) + else: + self.analyse_c_operation(env) + + def is_py_operation(self): + return self.is_py_operation_types(self.operand1.type, self.operand2.type) + + def is_py_operation_types(self, type1, type2): + return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple + + def is_pythran_operation(self, env): + return self.is_pythran_operation_types(self.operand1.type, self.operand2.type, env) + + def is_pythran_operation_types(self, type1, type2, env): + # Support only expr op supported_type, or supported_type op expr + return has_np_pythran(env) and \ + (is_pythran_supported_operation_type(type1) and is_pythran_supported_operation_type(type2)) and \ + (is_pythran_expr(type1) or is_pythran_expr(type2)) + + def is_cpp_operation(self): + return (self.operand1.type.is_cpp_class + or self.operand2.type.is_cpp_class) + + def analyse_cpp_operation(self, env): + entry = env.lookup_operator(self.operator, [self.operand1, self.operand2]) + if not entry: + self.type_error() + return + func_type = entry.type + self.exception_check = func_type.exception_check + self.exception_value = func_type.exception_value + if self.exception_check == '+': + # Used by NumBinopNodes to break up expressions involving multiple + # operators so that exceptions can be handled properly. + self.is_temp = 1 + if needs_cpp_exception_conversion(self): + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + if func_type.is_ptr: + func_type = func_type.base_type + if len(func_type.args) == 1: + self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env) + else: + self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env) + self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env) + self.type = func_type.return_type + + def result_type(self, type1, type2, env): + if self.is_pythran_operation_types(type1, type2, env): + return PythranExpr(pythran_binop_type(self.operator, type1, type2)) + if self.is_py_operation_types(type1, type2): + if type2.is_string: + type2 = Builtin.bytes_type + elif type2.is_pyunicode_ptr: + type2 = Builtin.unicode_type + if type1.is_string: + type1 = Builtin.bytes_type + elif type1.is_pyunicode_ptr: + type1 = Builtin.unicode_type + if type1.is_builtin_type or type2.is_builtin_type: + if type1 is type2 and self.operator in '**%+|&^': + # FIXME: at least these operators should be safe - others? + return type1 + result_type = self.infer_builtin_types_operation(type1, type2) + if result_type is not None: + return result_type + return py_object_type + elif type1.is_error or type2.is_error: + return PyrexTypes.error_type + else: + return self.compute_c_result_type(type1, type2) + + def infer_builtin_types_operation(self, type1, type2): + return None + + def nogil_check(self, env): + if self.is_py_operation(): + self.gil_error() + + def coerce_operands_to_pyobjects(self, env): + self.operand1 = self.operand1.coerce_to_pyobject(env) + self.operand2 = self.operand2.coerce_to_pyobject(env) + + def check_const(self): + return self.operand1.check_const() and self.operand2.check_const() + + def is_ephemeral(self): + return (super(BinopNode, self).is_ephemeral() or + self.operand1.is_ephemeral() or self.operand2.is_ephemeral()) + + def generate_result_code(self, code): + type1 = self.operand1.type + type2 = self.operand2.type + if self.type.is_pythran_expr: + code.putln("// Pythran binop") + code.putln("__Pyx_call_destructor(%s);" % self.result()) + if self.operator == '**': + code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::power{}(%s, %s)};" % ( + self.result(), + self.result(), + self.operand1.pythran_result(), + self.operand2.pythran_result())) + else: + code.putln("new (&%s) decltype(%s){%s %s %s};" % ( + self.result(), + self.result(), + self.operand1.pythran_result(), + self.operator, + self.operand2.pythran_result())) + elif type1.is_pyobject or type2.is_pyobject: + function = self.py_operation_function(code) + extra_args = ", Py_None" if self.operator == '**' else "" + op1_result = self.operand1.py_result() if type1.is_pyobject else self.operand1.result() + op2_result = self.operand2.py_result() if type2.is_pyobject else self.operand2.result() + code.putln( + "%s = %s(%s, %s%s); %s" % ( + self.result(), + function, + op1_result, + op2_result, + extra_args, + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + elif self.is_temp: + # C++ overloaded operators with exception values are currently all + # handled through temporaries. + if self.is_cpp_operation() and self.exception_check == '+': + translate_cpp_exception(code, self.pos, + "%s = %s;" % (self.result(), self.calculate_result_code()), + self.result() if self.type.is_pyobject else None, + self.exception_value, self.in_nogil_context) + else: + code.putln("%s = %s;" % (self.result(), self.calculate_result_code())) + + def type_error(self): + if not (self.operand1.type.is_error + or self.operand2.type.is_error): + error(self.pos, "Invalid operand types for '%s' (%s; %s)" % + (self.operator, self.operand1.type, + self.operand2.type)) + self.type = PyrexTypes.error_type + + +class CBinopNode(BinopNode): + + def analyse_types(self, env): + node = BinopNode.analyse_types(self, env) + if node.is_py_operation(): + node.type = PyrexTypes.error_type + return node + + def py_operation_function(self, code): + return "" + + def calculate_result_code(self): + return "(%s %s %s)" % ( + self.operand1.result(), + self.operator, + self.operand2.result()) + + def compute_c_result_type(self, type1, type2): + cpp_type = None + if type1.is_cpp_class or type1.is_ptr: + cpp_type = type1.find_cpp_operation_type(self.operator, type2) + if cpp_type is None and (type2.is_cpp_class or type2.is_ptr): + cpp_type = type2.find_cpp_operation_type(self.operator, type1) + # FIXME: do we need to handle other cases here? + return cpp_type + + +def c_binop_constructor(operator): + def make_binop_node(pos, **operands): + return CBinopNode(pos, operator=operator, **operands) + return make_binop_node + +class NumBinopNode(BinopNode): + # Binary operation taking numeric arguments. + + infix = True + overflow_check = False + overflow_bit_node = None + + def analyse_c_operation(self, env): + type1 = self.operand1.type + type2 = self.operand2.type + self.type = self.compute_c_result_type(type1, type2) + if not self.type: + self.type_error() + return + if self.type.is_complex: + self.infix = False + if (self.type.is_int + and env.directives['overflowcheck'] + and self.operator in self.overflow_op_names): + if (self.operator in ('+', '*') + and self.operand1.has_constant_result() + and not self.operand2.has_constant_result()): + self.operand1, self.operand2 = self.operand2, self.operand1 + self.overflow_check = True + self.overflow_fold = env.directives['overflowcheck.fold'] + self.func = self.type.overflow_check_binop( + self.overflow_op_names[self.operator], + env, + const_rhs = self.operand2.has_constant_result()) + self.is_temp = True + if not self.infix or (type1.is_numeric and type2.is_numeric): + self.operand1 = self.operand1.coerce_to(self.type, env) + self.operand2 = self.operand2.coerce_to(self.type, env) + + def compute_c_result_type(self, type1, type2): + if self.c_types_okay(type1, type2): + widest_type = PyrexTypes.widest_numeric_type(type1, type2) + if widest_type is PyrexTypes.c_bint_type: + if self.operator not in '|^&': + # False + False == 0 # not False! + widest_type = PyrexTypes.c_int_type + else: + widest_type = PyrexTypes.widest_numeric_type( + widest_type, PyrexTypes.c_int_type) + return widest_type + else: + return None + + def may_be_none(self): + if self.type and self.type.is_builtin_type: + # if we know the result type, we know the operation, so it can't be None + return False + type1 = self.operand1.type + type2 = self.operand2.type + if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type: + # XXX: I can't think of any case where a binary operation + # on builtin types evaluates to None - add a special case + # here if there is one. + return False + return super(NumBinopNode, self).may_be_none() + + def get_constant_c_result_code(self): + value1 = self.operand1.get_constant_c_result_code() + value2 = self.operand2.get_constant_c_result_code() + if value1 and value2: + return "(%s %s %s)" % (value1, self.operator, value2) + else: + return None + + def c_types_okay(self, type1, type2): + #print "NumBinopNode.c_types_okay:", type1, type2 ### + return (type1.is_numeric or type1.is_enum) \ + and (type2.is_numeric or type2.is_enum) + + def generate_evaluation_code(self, code): + if self.overflow_check: + self.overflow_bit_node = self + self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False) + code.putln("%s = 0;" % self.overflow_bit) + super(NumBinopNode, self).generate_evaluation_code(code) + if self.overflow_check: + code.putln("if (unlikely(%s)) {" % self.overflow_bit) + code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");') + code.putln(code.error_goto(self.pos)) + code.putln("}") + code.funcstate.release_temp(self.overflow_bit) + + def calculate_result_code(self): + if self.overflow_bit_node is not None: + return "%s(%s, %s, &%s)" % ( + self.func, + self.operand1.result(), + self.operand2.result(), + self.overflow_bit_node.overflow_bit) + elif self.type.is_cpp_class or self.infix: + if is_pythran_expr(self.type): + result1, result2 = self.operand1.pythran_result(), self.operand2.pythran_result() + else: + result1, result2 = self.operand1.result(), self.operand2.result() + return "(%s %s %s)" % (result1, self.operator, result2) + else: + func = self.type.binary_op(self.operator) + if func is None: + error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type)) + return "%s(%s, %s)" % ( + func, + self.operand1.result(), + self.operand2.result()) + + def is_py_operation_types(self, type1, type2): + return (type1.is_unicode_char or + type2.is_unicode_char or + BinopNode.is_py_operation_types(self, type1, type2)) + + def py_operation_function(self, code): + function_name = self.py_functions[self.operator] + if self.inplace: + function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace') + return function_name + + py_functions = { + "|": "PyNumber_Or", + "^": "PyNumber_Xor", + "&": "PyNumber_And", + "<<": "PyNumber_Lshift", + ">>": "PyNumber_Rshift", + "+": "PyNumber_Add", + "-": "PyNumber_Subtract", + "*": "PyNumber_Multiply", + "@": "__Pyx_PyNumber_MatrixMultiply", + "/": "__Pyx_PyNumber_Divide", + "//": "PyNumber_FloorDivide", + "%": "PyNumber_Remainder", + "**": "PyNumber_Power", + } + + overflow_op_names = { + "+": "add", + "-": "sub", + "*": "mul", + "<<": "lshift", + } + + +class IntBinopNode(NumBinopNode): + # Binary operation taking integer arguments. + + def c_types_okay(self, type1, type2): + #print "IntBinopNode.c_types_okay:", type1, type2 ### + return (type1.is_int or type1.is_enum) \ + and (type2.is_int or type2.is_enum) + + +class AddNode(NumBinopNode): + # '+' operator. + + def is_py_operation_types(self, type1, type2): + if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr: + return 1 + else: + return NumBinopNode.is_py_operation_types(self, type1, type2) + + def infer_builtin_types_operation(self, type1, type2): + # b'abc' + 'abc' raises an exception in Py3, + # so we can safely infer the Py2 type for bytes here + string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type) + if type1 in string_types and type2 in string_types: + return string_types[max(string_types.index(type1), + string_types.index(type2))] + return None + + def compute_c_result_type(self, type1, type2): + #print "AddNode.compute_c_result_type:", type1, self.operator, type2 ### + if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum): + return type1 + elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum): + return type2 + else: + return NumBinopNode.compute_c_result_type( + self, type1, type2) + + def py_operation_function(self, code): + type1, type2 = self.operand1.type, self.operand2.type + func = None + if type1 is unicode_type or type2 is unicode_type: + if type1 in (unicode_type, str_type) and type2 in (unicode_type, str_type): + is_unicode_concat = True + elif isinstance(self.operand1, FormattedValueNode) or isinstance(self.operand2, FormattedValueNode): + # Assume that even if we don't know the second type, it's going to be a string. + is_unicode_concat = True + else: + # Operation depends on the second type. + is_unicode_concat = False + + if is_unicode_concat: + if self.inplace or self.operand1.is_temp: + code.globalstate.use_utility_code( + UtilityCode.load_cached("UnicodeConcatInPlace", "ObjectHandling.c")) + func = '__Pyx_PyUnicode_Concat' + elif type1 is str_type and type2 is str_type: + code.globalstate.use_utility_code( + UtilityCode.load_cached("StrConcatInPlace", "ObjectHandling.c")) + func = '__Pyx_PyStr_Concat' + + if func: + # any necessary utility code will be got by "NumberAdd" in generate_evaluation_code + if self.inplace or self.operand1.is_temp: + func += 'InPlace' # upper case to indicate unintuitive macro + if self.operand1.may_be_none() or self.operand2.may_be_none(): + func += 'Safe' + return func + + return super(AddNode, self).py_operation_function(code) + + +class SubNode(NumBinopNode): + # '-' operator. + + def compute_c_result_type(self, type1, type2): + if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum): + return type1 + elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array): + return PyrexTypes.c_ptrdiff_t_type + else: + return NumBinopNode.compute_c_result_type( + self, type1, type2) + + +class MulNode(NumBinopNode): + # '*' operator. + is_sequence_mul = False + + def analyse_types(self, env): + self.operand1 = self.operand1.analyse_types(env) + self.operand2 = self.operand2.analyse_types(env) + self.is_sequence_mul = self.calculate_is_sequence_mul() + + # TODO: we could also optimise the case of "[...] * 2 * n", i.e. with an existing 'mult_factor' + if self.is_sequence_mul: + operand1 = self.operand1 + operand2 = self.operand2 + if operand1.is_sequence_constructor and operand1.mult_factor is None: + return self.analyse_sequence_mul(env, operand1, operand2) + elif operand2.is_sequence_constructor and operand2.mult_factor is None: + return self.analyse_sequence_mul(env, operand2, operand1) + + self.analyse_operation(env) + return self + + @staticmethod + def is_builtin_seqmul_type(type): + return type.is_builtin_type and type in builtin_sequence_types and type is not memoryview_type + + def calculate_is_sequence_mul(self): + type1 = self.operand1.type + type2 = self.operand2.type + if type1 is long_type or type1.is_int: + # normalise to (X * int) + type1, type2 = type2, type1 + if type2 is long_type or type2.is_int: + if type1.is_string or type1.is_ctuple: + return True + if self.is_builtin_seqmul_type(type1): + return True + return False + + def analyse_sequence_mul(self, env, seq, mult): + assert seq.mult_factor is None + seq = seq.coerce_to_pyobject(env) + seq.mult_factor = mult + return seq.analyse_types(env) + + def coerce_operands_to_pyobjects(self, env): + if self.is_sequence_mul: + # Keep operands as they are, but ctuples must become Python tuples to multiply them. + if self.operand1.type.is_ctuple: + self.operand1 = self.operand1.coerce_to_pyobject(env) + elif self.operand2.type.is_ctuple: + self.operand2 = self.operand2.coerce_to_pyobject(env) + return + super(MulNode, self).coerce_operands_to_pyobjects(env) + + def is_py_operation_types(self, type1, type2): + return self.is_sequence_mul or super(MulNode, self).is_py_operation_types(type1, type2) + + def py_operation_function(self, code): + if self.is_sequence_mul: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PySequenceMultiply", "ObjectHandling.c")) + return "__Pyx_PySequence_Multiply" if self.operand1.type.is_pyobject else "__Pyx_PySequence_Multiply_Left" + return super(MulNode, self).py_operation_function(code) + + def infer_builtin_types_operation(self, type1, type2): + # let's assume that whatever builtin type you multiply a builtin sequence type with + # will either return a sequence of the same type or fail with an exception + if type1.is_builtin_type and type2.is_builtin_type: + if self.is_builtin_seqmul_type(type1): + return type1 + if self.is_builtin_seqmul_type(type2): + return type2 + # multiplication of containers/numbers with an integer value + # always (?) returns the same type + if type1.is_int: + return type2 + if type2.is_int: + return type1 + return None + + +class MatMultNode(NumBinopNode): + # '@' operator. + + def is_py_operation_types(self, type1, type2): + return True + + def generate_evaluation_code(self, code): + code.globalstate.use_utility_code(UtilityCode.load_cached("MatrixMultiply", "ObjectHandling.c")) + super(MatMultNode, self).generate_evaluation_code(code) + + +class DivNode(NumBinopNode): + # '/' or '//' operator. + + cdivision = None + truedivision = None # == "unknown" if operator == '/' + ctruedivision = False + cdivision_warnings = False + zerodivision_check = None + + def find_compile_time_binary_operator(self, op1, op2): + func = compile_time_binary_operators[self.operator] + if self.operator == '/' and self.truedivision is None: + # => true div for floats, floor div for integers + if isinstance(op1, _py_int_types) and isinstance(op2, _py_int_types): + func = compile_time_binary_operators['//'] + return func + + def calculate_constant_result(self): + op1 = self.operand1.constant_result + op2 = self.operand2.constant_result + func = self.find_compile_time_binary_operator(op1, op2) + self.constant_result = func( + self.operand1.constant_result, + self.operand2.constant_result) + + def compile_time_value(self, denv): + operand1 = self.operand1.compile_time_value(denv) + operand2 = self.operand2.compile_time_value(denv) + try: + func = self.find_compile_time_binary_operator( + operand1, operand2) + return func(operand1, operand2) + except Exception as e: + self.compile_time_value_error(e) + + def _check_truedivision(self, env): + if self.cdivision or env.directives['cdivision']: + self.ctruedivision = False + else: + self.ctruedivision = self.truedivision + + def infer_type(self, env): + self._check_truedivision(env) + return self.result_type( + self.operand1.infer_type(env), + self.operand2.infer_type(env), env) + + def analyse_operation(self, env): + self._check_truedivision(env) + NumBinopNode.analyse_operation(self, env) + if self.is_cpp_operation(): + self.cdivision = True + if not self.type.is_pyobject: + self.zerodivision_check = ( + self.cdivision is None and not env.directives['cdivision'] + and (not self.operand2.has_constant_result() or + self.operand2.constant_result == 0)) + if self.zerodivision_check or env.directives['cdivision_warnings']: + # Need to check ahead of time to warn or raise zero division error + self.operand1 = self.operand1.coerce_to_simple(env) + self.operand2 = self.operand2.coerce_to_simple(env) + + def compute_c_result_type(self, type1, type2): + if self.operator == '/' and self.ctruedivision and not type1.is_cpp_class and not type2.is_cpp_class: + if not type1.is_float and not type2.is_float: + widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type) + widest_type = PyrexTypes.widest_numeric_type(type2, widest_type) + return widest_type + return NumBinopNode.compute_c_result_type(self, type1, type2) + + def zero_division_message(self): + if self.type.is_int: + return "integer division or modulo by zero" + else: + return "float division" + + def generate_evaluation_code(self, code): + if not self.type.is_pyobject and not self.type.is_complex: + if self.cdivision is None: + self.cdivision = ( + code.globalstate.directives['cdivision'] + or self.type.is_float + or ((self.type.is_numeric or self.type.is_enum) and not self.type.signed) + ) + if not self.cdivision: + code.globalstate.use_utility_code( + UtilityCode.load_cached("DivInt", "CMath.c").specialize(self.type)) + NumBinopNode.generate_evaluation_code(self, code) + self.generate_div_warning_code(code) + + def generate_div_warning_code(self, code): + in_nogil = self.in_nogil_context + if not self.type.is_pyobject: + if self.zerodivision_check: + if not self.infix: + zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result()) + else: + zero_test = "%s == 0" % self.operand2.result() + code.putln("if (unlikely(%s)) {" % zero_test) + if in_nogil: + code.put_ensure_gil() + code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message()) + if in_nogil: + code.put_release_ensured_gil() + code.putln(code.error_goto(self.pos)) + code.putln("}") + if self.type.is_int and self.type.signed and self.operator != '%': + code.globalstate.use_utility_code(UtilityCode.load_cached("UnaryNegOverflows", "Overflow.c")) + if self.operand2.type.signed == 2: + # explicitly signed, no runtime check needed + minus1_check = 'unlikely(%s == -1)' % self.operand2.result() + else: + type_of_op2 = self.operand2.type.empty_declaration_code() + minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % ( + type_of_op2, self.operand2.result(), type_of_op2) + code.putln("else if (sizeof(%s) == sizeof(long) && %s " + " && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(%s))) {" % ( + self.type.empty_declaration_code(), + minus1_check, + self.operand1.result())) + if in_nogil: + code.put_ensure_gil() + code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");') + if in_nogil: + code.put_release_ensured_gil() + code.putln(code.error_goto(self.pos)) + code.putln("}") + if code.globalstate.directives['cdivision_warnings'] and self.operator != '/': + code.globalstate.use_utility_code( + UtilityCode.load_cached("CDivisionWarning", "CMath.c")) + code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % ( + self.operand1.result(), + self.operand2.result())) + warning_code = "__Pyx_cdivision_warning(%(FILENAME)s, %(LINENO)s)" % { + 'FILENAME': Naming.filename_cname, + 'LINENO': Naming.lineno_cname, + } + + if in_nogil: + result_code = 'result' + code.putln("int %s;" % result_code) + code.put_ensure_gil() + code.putln(code.set_error_info(self.pos, used=True)) + code.putln("%s = %s;" % (result_code, warning_code)) + code.put_release_ensured_gil() + else: + result_code = warning_code + code.putln(code.set_error_info(self.pos, used=True)) + + code.put("if (unlikely(%s)) " % result_code) + code.put_goto(code.error_label) + code.putln("}") + + def calculate_result_code(self): + if self.type.is_complex or self.is_cpp_operation(): + return NumBinopNode.calculate_result_code(self) + elif self.type.is_float and self.operator == '//': + return "floor(%s / %s)" % ( + self.operand1.result(), + self.operand2.result()) + elif self.truedivision or self.cdivision: + op1 = self.operand1.result() + op2 = self.operand2.result() + if self.truedivision: + if self.type != self.operand1.type: + op1 = self.type.cast_code(op1) + if self.type != self.operand2.type: + op2 = self.type.cast_code(op2) + return "(%s / %s)" % (op1, op2) + else: + return "__Pyx_div_%s(%s, %s)" % ( + self.type.specialization_name(), + self.operand1.result(), + self.operand2.result()) + + +_find_formatting_types = re.compile( + br"%" + br"(?:%|" # %% + br"(?:\([^)]+\))?" # %(name) + br"[-+#,0-9 ]*([a-z])" # %.2f etc. + br")").findall + +# These format conversion types can never trigger a Unicode string conversion in Py2. +_safe_bytes_formats = frozenset({ + # Excludes 's' and 'r', which can generate non-bytes strings. + b'd', b'i', b'o', b'u', b'x', b'X', b'e', b'E', b'f', b'F', b'g', b'G', b'c', b'b', b'a', +}) + + +class ModNode(DivNode): + # '%' operator. + + def is_py_operation_types(self, type1, type2): + return (type1.is_string + or type2.is_string + or NumBinopNode.is_py_operation_types(self, type1, type2)) + + def infer_builtin_types_operation(self, type1, type2): + # b'%s' % xyz raises an exception in Py3<3.5, so it's safe to infer the type for Py2 and later Py3's. + if type1 is unicode_type: + # None + xyz may be implemented by RHS + if type2.is_builtin_type or not self.operand1.may_be_none(): + return type1 + elif type1 in (bytes_type, str_type, basestring_type): + if type2 is unicode_type: + return type2 + elif type2.is_numeric: + return type1 + elif self.operand1.is_string_literal: + if type1 is str_type or type1 is bytes_type: + if set(_find_formatting_types(self.operand1.value)) <= _safe_bytes_formats: + return type1 + return basestring_type + elif type1 is bytes_type and not type2.is_builtin_type: + return None # RHS might implement '% operator differently in Py3 + else: + return basestring_type # either str or unicode, can't tell + return None + + def zero_division_message(self): + if self.type.is_int: + return "integer division or modulo by zero" + else: + return "float divmod()" + + def analyse_operation(self, env): + DivNode.analyse_operation(self, env) + if not self.type.is_pyobject: + if self.cdivision is None: + self.cdivision = env.directives['cdivision'] or not self.type.signed + if not self.cdivision and not self.type.is_int and not self.type.is_float: + error(self.pos, "mod operator not supported for type '%s'" % self.type) + + def generate_evaluation_code(self, code): + if not self.type.is_pyobject and not self.cdivision: + if self.type.is_int: + code.globalstate.use_utility_code( + UtilityCode.load_cached("ModInt", "CMath.c").specialize(self.type)) + else: # float + code.globalstate.use_utility_code( + UtilityCode.load_cached("ModFloat", "CMath.c").specialize( + self.type, math_h_modifier=self.type.math_h_modifier)) + # NOTE: skipping over DivNode here + NumBinopNode.generate_evaluation_code(self, code) + self.generate_div_warning_code(code) + + def calculate_result_code(self): + if self.cdivision: + if self.type.is_float: + return "fmod%s(%s, %s)" % ( + self.type.math_h_modifier, + self.operand1.result(), + self.operand2.result()) + else: + return "(%s %% %s)" % ( + self.operand1.result(), + self.operand2.result()) + else: + return "__Pyx_mod_%s(%s, %s)" % ( + self.type.specialization_name(), + self.operand1.result(), + self.operand2.result()) + + def py_operation_function(self, code): + type1, type2 = self.operand1.type, self.operand2.type + # ("..." % x) must call "x.__rmod__()" for string subtypes. + if type1 is unicode_type: + if self.operand1.may_be_none() or ( + type2.is_extension_type and type2.subtype_of(type1) or + type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)): + return '__Pyx_PyUnicode_FormatSafe' + else: + return 'PyUnicode_Format' + elif type1 is str_type: + if self.operand1.may_be_none() or ( + type2.is_extension_type and type2.subtype_of(type1) or + type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)): + return '__Pyx_PyString_FormatSafe' + else: + return '__Pyx_PyString_Format' + return super(ModNode, self).py_operation_function(code) + + +class PowNode(NumBinopNode): + # '**' operator. + + is_cpow = None + type_was_inferred = False # was the result type affected by cpow==False? + # Intended to allow it to be changed if the node is coerced. + + def _check_cpow(self, env): + if self.is_cpow is not None: + return # already set + self.is_cpow = env.directives['cpow'] + + def infer_type(self, env): + self._check_cpow(env) + return super(PowNode, self).infer_type(env) + + def analyse_types(self, env): + self._check_cpow(env) + return super(PowNode, self).analyse_types(env) + + def analyse_c_operation(self, env): + NumBinopNode.analyse_c_operation(self, env) + if self.type.is_complex: + if self.type.real_type.is_float: + self.operand1 = self.operand1.coerce_to(self.type, env) + self.operand2 = self.operand2.coerce_to(self.type, env) + self.pow_func = self.type.binary_op('**') + else: + error(self.pos, "complex int powers not supported") + self.pow_func = "" + elif self.type.is_float: + self.pow_func = "pow" + self.type.math_h_modifier + elif self.type.is_int: + self.pow_func = "__Pyx_pow_%s" % self.type.empty_declaration_code().replace(' ', '_') + env.use_utility_code( + UtilityCode.load_cached("IntPow", "CMath.c").specialize( + func_name=self.pow_func, + type=self.type.empty_declaration_code(), + signed=self.type.signed and 1 or 0)) + elif not self.type.is_error: + error(self.pos, "got unexpected types for C power operator: %s, %s" % + (self.operand1.type, self.operand2.type)) + + def compute_c_result_type(self, type1, type2): + from numbers import Real + c_result_type = None + op1_is_definitely_positive = ( + self.operand1.has_constant_result() + and self.operand1.constant_result >= 0 + ) or ( + type1.is_int and type1.signed == 0 # definitely unsigned + ) + type2_is_int = type2.is_int or ( + self.operand2.has_constant_result() and + isinstance(self.operand2.constant_result, Real) and + int(self.operand2.constant_result) == self.operand2.constant_result + ) + needs_widening = False + if self.is_cpow: + c_result_type = super(PowNode, self).compute_c_result_type(type1, type2) + if not self.operand2.has_constant_result(): + needs_widening = ( + isinstance(self.operand2.constant_result, _py_int_types) and self.operand2.constant_result < 0 + ) + elif op1_is_definitely_positive or type2_is_int: # cpow==False + # if type2 is an integer then we can't end up going from real to complex + c_result_type = super(PowNode, self).compute_c_result_type(type1, type2) + if not self.operand2.has_constant_result(): + needs_widening = type2.is_int and type2.signed + if needs_widening: + self.type_was_inferred = True + else: + needs_widening = ( + isinstance(self.operand2.constant_result, _py_int_types) and self.operand2.constant_result < 0 + ) + elif self.c_types_okay(type1, type2): + # Allowable result types are double or complex double. + # Return the special "soft complex" type to store it as a + # complex number but with specialized coercions to Python + c_result_type = PyrexTypes.soft_complex_type + self.type_was_inferred = True + if needs_widening: + c_result_type = PyrexTypes.widest_numeric_type(c_result_type, PyrexTypes.c_double_type) + return c_result_type + + def calculate_result_code(self): + # Work around MSVC overloading ambiguity. + def typecast(operand): + if self.type == operand.type: + return operand.result() + else: + return self.type.cast_code(operand.result()) + return "%s(%s, %s)" % ( + self.pow_func, + typecast(self.operand1), + typecast(self.operand2)) + + def py_operation_function(self, code): + if (self.type.is_pyobject and + self.operand1.constant_result == 2 and + isinstance(self.operand1.constant_result, _py_int_types) and + self.operand2.type is py_object_type): + code.globalstate.use_utility_code(UtilityCode.load_cached('PyNumberPow2', 'Optimize.c')) + if self.inplace: + return '__Pyx_PyNumber_InPlacePowerOf2' + else: + return '__Pyx_PyNumber_PowerOf2' + return super(PowNode, self).py_operation_function(code) + + def coerce_to(self, dst_type, env): + if dst_type == self.type: + return self + if (self.is_cpow is None and self.type_was_inferred and + (dst_type.is_float or dst_type.is_int)): + # if we're trying to coerce this directly to a C float or int + # then fall back to the cpow == True behaviour since this is + # almost certainly the user intent. + # However, ensure that the operand types are suitable C types + if self.type is PyrexTypes.soft_complex_type: + def check_types(operand, recurse=True): + if operand.type.is_float or operand.type.is_int: + return True, operand + if recurse and isinstance(operand, CoerceToComplexNode): + return check_types(operand.arg, recurse=False), operand.arg + return False, None + msg_detail = "a non-complex C numeric type" + elif dst_type.is_int: + def check_types(operand): + if operand.type.is_int: + return True, operand + else: + # int, int doesn't seem to involve coercion nodes + return False, None + msg_detail = "an integer C numeric type" + else: + def check_types(operand): + return False, None + check_op1, op1 = check_types(self.operand1) + check_op2, op2 = check_types(self.operand2) + if check_op1 and check_op2: + warning(self.pos, "Treating '**' as if 'cython.cpow(True)' since it " + "is directly assigned to a %s. " + "This is likely to be fragile and we recommend setting " + "'cython.cpow' explicitly." % msg_detail) + self.is_cpow = True + self.operand1 = op1 + self.operand2 = op2 + result = self.analyse_types(env) + if result.type != dst_type: + result = result.coerce_to(dst_type, env) + return result + return super(PowNode, self).coerce_to(dst_type, env) + + +class BoolBinopNode(ExprNode): + """ + Short-circuiting boolean operation. + + Note that this node provides the same code generation method as + BoolBinopResultNode to simplify expression nesting. + + operator string "and"/"or" + operand1 BoolBinopNode/BoolBinopResultNode left operand + operand2 BoolBinopNode/BoolBinopResultNode right operand + """ + subexprs = ['operand1', 'operand2'] + is_temp = True + operator = None + operand1 = None + operand2 = None + + def infer_type(self, env): + type1 = self.operand1.infer_type(env) + type2 = self.operand2.infer_type(env) + return PyrexTypes.independent_spanning_type(type1, type2) + + def may_be_none(self): + if self.operator == 'or': + return self.operand2.may_be_none() + else: + return self.operand1.may_be_none() or self.operand2.may_be_none() + + def calculate_constant_result(self): + operand1 = self.operand1.constant_result + operand2 = self.operand2.constant_result + if self.operator == 'and': + self.constant_result = operand1 and operand2 + else: + self.constant_result = operand1 or operand2 + + def compile_time_value(self, denv): + operand1 = self.operand1.compile_time_value(denv) + operand2 = self.operand2.compile_time_value(denv) + if self.operator == 'and': + return operand1 and operand2 + else: + return operand1 or operand2 + + def is_ephemeral(self): + return self.operand1.is_ephemeral() or self.operand2.is_ephemeral() + + def analyse_types(self, env): + # Note: we do not do any coercion here as we most likely do not know the final type anyway. + # We even accept to set self.type to ErrorType if both operands do not have a spanning type. + # The coercion to the final type and to a "simple" value is left to coerce_to(). + operand1 = self.operand1.analyse_types(env) + operand2 = self.operand2.analyse_types(env) + self.type = PyrexTypes.independent_spanning_type( + operand1.type, operand2.type) + self.operand1 = self._wrap_operand(operand1, env) + self.operand2 = self._wrap_operand(operand2, env) + return self + + def _wrap_operand(self, operand, env): + if not isinstance(operand, (BoolBinopNode, BoolBinopResultNode)): + operand = BoolBinopResultNode(operand, self.type, env) + return operand + + def wrap_operands(self, env): + """ + Must get called by transforms that want to create a correct BoolBinopNode + after the type analysis phase. + """ + self.operand1 = self._wrap_operand(self.operand1, env) + self.operand2 = self._wrap_operand(self.operand2, env) + + def coerce_to_boolean(self, env): + return self.coerce_to(PyrexTypes.c_bint_type, env) + + def coerce_to(self, dst_type, env): + operand1 = self.operand1.coerce_to(dst_type, env) + operand2 = self.operand2.coerce_to(dst_type, env) + return BoolBinopNode.from_node( + self, type=dst_type, + operator=self.operator, + operand1=operand1, operand2=operand2) + + def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through): + code.mark_pos(self.pos) + + outer_labels = (and_label, or_label) + if self.operator == 'and': + my_label = and_label = code.new_label('next_and') + else: + my_label = or_label = code.new_label('next_or') + self.operand1.generate_bool_evaluation_code( + code, final_result_temp, final_result_type, and_label, or_label, end_label, my_label) + + and_label, or_label = outer_labels + + code.put_label(my_label) + self.operand2.generate_bool_evaluation_code( + code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through) + + def generate_evaluation_code(self, code): + self.allocate_temp_result(code) + result_type = PyrexTypes.py_object_type if self.type.is_pyobject else self.type + or_label = and_label = None + end_label = code.new_label('bool_binop_done') + self.generate_bool_evaluation_code(code, self.result(), result_type, and_label, or_label, end_label, end_label) + code.put_label(end_label) + + gil_message = "Truth-testing Python object" + + def check_const(self): + return self.operand1.check_const() and self.operand2.check_const() + + def generate_subexpr_disposal_code(self, code): + pass # nothing to do here, all done in generate_evaluation_code() + + def free_subexpr_temps(self, code): + pass # nothing to do here, all done in generate_evaluation_code() + + def generate_operand1_test(self, code): + # Generate code to test the truth of the first operand. + if self.type.is_pyobject: + test_result = code.funcstate.allocate_temp( + PyrexTypes.c_bint_type, manage_ref=False) + code.putln( + "%s = __Pyx_PyObject_IsTrue(%s); %s" % ( + test_result, + self.operand1.py_result(), + code.error_goto_if_neg(test_result, self.pos))) + else: + test_result = self.operand1.result() + return (test_result, self.type.is_pyobject) + + +class BoolBinopResultNode(ExprNode): + """ + Intermediate result of a short-circuiting and/or expression. + Tests the result for 'truthiness' and takes care of coercing the final result + of the overall expression to the target type. + + Note that this node provides the same code generation method as + BoolBinopNode to simplify expression nesting. + + arg ExprNode the argument to test + value ExprNode the coerced result value node + """ + + subexprs = ['arg', 'value'] + is_temp = True + arg = None + value = None + + def __init__(self, arg, result_type, env): + # using 'arg' multiple times, so it must be a simple/temp value + arg = arg.coerce_to_simple(env) + # wrap in ProxyNode, in case a transform wants to replace self.arg later + arg = ProxyNode(arg) + super(BoolBinopResultNode, self).__init__( + arg.pos, arg=arg, type=result_type, + value=CloneNode(arg).coerce_to(result_type, env)) + + def coerce_to_boolean(self, env): + return self.coerce_to(PyrexTypes.c_bint_type, env) + + def coerce_to(self, dst_type, env): + # unwrap, coerce, rewrap + arg = self.arg.arg + if dst_type is PyrexTypes.c_bint_type: + arg = arg.coerce_to_boolean(env) + # TODO: unwrap more coercion nodes? + return BoolBinopResultNode(arg, dst_type, env) + + def nogil_check(self, env): + # let's leave all errors to BoolBinopNode + pass + + def generate_operand_test(self, code): + # Generate code to test the truth of the first operand. + if self.arg.type.is_pyobject: + test_result = code.funcstate.allocate_temp( + PyrexTypes.c_bint_type, manage_ref=False) + code.putln( + "%s = __Pyx_PyObject_IsTrue(%s); %s" % ( + test_result, + self.arg.py_result(), + code.error_goto_if_neg(test_result, self.pos))) + else: + test_result = self.arg.result() + return (test_result, self.arg.type.is_pyobject) + + def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through): + code.mark_pos(self.pos) + + # x => x + # x and ... or ... => next 'and' / 'or' + # False ... or x => next 'or' + # True and x => next 'and' + # True or x => True (operand) + + self.arg.generate_evaluation_code(code) + if and_label or or_label: + test_result, uses_temp = self.generate_operand_test(code) + if uses_temp and (and_label and or_label): + # cannot become final result => free early + # disposal: uses_temp and (and_label and or_label) + self.arg.generate_disposal_code(code) + sense = '!' if or_label else '' + code.putln("if (%s%s) {" % (sense, test_result)) + if uses_temp: + code.funcstate.release_temp(test_result) + if not uses_temp or not (and_label and or_label): + # disposal: (not uses_temp) or {not (and_label and or_label) [if]} + self.arg.generate_disposal_code(code) + + if or_label and or_label != fall_through: + # value is false => short-circuit to next 'or' + code.put_goto(or_label) + if and_label: + # value is true => go to next 'and' + if or_label: + code.putln("} else {") + if not uses_temp: + # disposal: (not uses_temp) and {(and_label and or_label) [else]} + self.arg.generate_disposal_code(code) + if and_label != fall_through: + code.put_goto(and_label) + + if not and_label or not or_label: + # if no next 'and' or 'or', we provide the result + if and_label or or_label: + code.putln("} else {") + self.value.generate_evaluation_code(code) + self.value.make_owned_reference(code) + code.putln("%s = %s;" % (final_result_temp, self.value.result_as(final_result_type))) + self.value.generate_post_assignment_code(code) + # disposal: {not (and_label and or_label) [else]} + self.arg.generate_disposal_code(code) + self.value.free_temps(code) + if end_label != fall_through: + code.put_goto(end_label) + + if and_label or or_label: + code.putln("}") + self.arg.free_temps(code) + + def analyse_types(self, env): + return self + + +class CondExprNode(ExprNode): + # Short-circuiting conditional expression. + # + # test ExprNode + # true_val ExprNode + # false_val ExprNode + + true_val = None + false_val = None + is_temp = True + + subexprs = ['test', 'true_val', 'false_val'] + + def type_dependencies(self, env): + return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env) + + def infer_type(self, env): + return PyrexTypes.independent_spanning_type( + self.true_val.infer_type(env), + self.false_val.infer_type(env)) + + def calculate_constant_result(self): + if self.test.constant_result: + self.constant_result = self.true_val.constant_result + else: + self.constant_result = self.false_val.constant_result + + def is_ephemeral(self): + return self.true_val.is_ephemeral() or self.false_val.is_ephemeral() + + def analyse_types(self, env): + self.test = self.test.analyse_temp_boolean_expression(env) + self.true_val = self.true_val.analyse_types(env) + self.false_val = self.false_val.analyse_types(env) + return self.analyse_result_type(env) + + def analyse_result_type(self, env): + true_val_type = self.true_val.type + false_val_type = self.false_val.type + self.type = PyrexTypes.independent_spanning_type(true_val_type, false_val_type) + + if self.type.is_reference: + self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type) + if self.type.is_pyobject: + self.result_ctype = py_object_type + elif self.true_val.is_ephemeral() or self.false_val.is_ephemeral(): + error(self.pos, "Unsafe C derivative of temporary Python reference used in conditional expression") + + if true_val_type.is_pyobject or false_val_type.is_pyobject or self.type.is_pyobject: + if true_val_type != self.type: + self.true_val = self.true_val.coerce_to(self.type, env) + if false_val_type != self.type: + self.false_val = self.false_val.coerce_to(self.type, env) + + if self.type.is_error: + self.type_error() + return self + + def coerce_to_integer(self, env): + if not self.true_val.type.is_int: + self.true_val = self.true_val.coerce_to_integer(env) + if not self.false_val.type.is_int: + self.false_val = self.false_val.coerce_to_integer(env) + self.result_ctype = None + out = self.analyse_result_type(env) + if not out.type.is_int: + # fall back to ordinary coercion since we haven't ended as the correct type + if out is self: + out = super(CondExprNode, out).coerce_to_integer(env) + else: + # I believe `analyse_result_type` always returns a CondExprNode but + # handle the opposite case just in case + out = out.coerce_to_integer(env) + return out + + def coerce_to(self, dst_type, env): + if self.true_val.type != dst_type: + self.true_val = self.true_val.coerce_to(dst_type, env) + if self.false_val.type != dst_type: + self.false_val = self.false_val.coerce_to(dst_type, env) + self.result_ctype = None + out = self.analyse_result_type(env) + if out.type != dst_type: + # fall back to ordinary coercion since we haven't ended as the correct type + if out is self: + out = super(CondExprNode, out).coerce_to(dst_type, env) + else: + # I believe `analyse_result_type` always returns a CondExprNode but + # handle the opposite case just in case + out = out.coerce_to(dst_type, env) + return out + + def type_error(self): + if not (self.true_val.type.is_error or self.false_val.type.is_error): + error(self.pos, "Incompatible types in conditional expression (%s; %s)" % + (self.true_val.type, self.false_val.type)) + self.type = PyrexTypes.error_type + + def check_const(self): + return (self.test.check_const() + and self.true_val.check_const() + and self.false_val.check_const()) + + def generate_evaluation_code(self, code): + # Because subexprs may not be evaluated we can use a more optimal + # subexpr allocation strategy than the default, so override evaluation_code. + + code.mark_pos(self.pos) + self.allocate_temp_result(code) + self.test.generate_evaluation_code(code) + code.putln("if (%s) {" % self.test.result()) + self.eval_and_get(code, self.true_val) + code.putln("} else {") + self.eval_and_get(code, self.false_val) + code.putln("}") + self.test.generate_disposal_code(code) + self.test.free_temps(code) + + def eval_and_get(self, code, expr): + expr.generate_evaluation_code(code) + if self.type.is_memoryviewslice: + expr.make_owned_memoryviewslice(code) + else: + expr.make_owned_reference(code) + code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype()))) + expr.generate_post_assignment_code(code) + expr.free_temps(code) + + def generate_subexpr_disposal_code(self, code): + pass # done explicitly above (cleanup must separately happen within the if/else blocks) + + def free_subexpr_temps(self, code): + pass # done explicitly above (cleanup must separately happen within the if/else blocks) + + +richcmp_constants = { + "<" : "Py_LT", + "<=": "Py_LE", + "==": "Py_EQ", + "!=": "Py_NE", + "<>": "Py_NE", + ">" : "Py_GT", + ">=": "Py_GE", + # the following are faked by special compare functions + "in" : "Py_EQ", + "not_in": "Py_NE", +} + +class CmpNode(object): + # Mixin class containing code common to PrimaryCmpNodes + # and CascadedCmpNodes. + + special_bool_cmp_function = None + special_bool_cmp_utility_code = None + special_bool_extra_args = [] + + def infer_type(self, env): + # TODO: Actually implement this (after merging with -unstable). + return py_object_type + + def calculate_cascaded_constant_result(self, operand1_result): + func = compile_time_binary_operators[self.operator] + operand2_result = self.operand2.constant_result + if (isinstance(operand1_result, any_string_type) and + isinstance(operand2_result, any_string_type) and + type(operand1_result) != type(operand2_result)): + # string comparison of different types isn't portable + return + + if self.operator in ('in', 'not_in'): + if isinstance(self.operand2, (ListNode, TupleNode, SetNode)): + if not self.operand2.args: + self.constant_result = self.operator == 'not_in' + return + elif isinstance(self.operand2, ListNode) and not self.cascade: + # tuples are more efficient to store than lists + self.operand2 = self.operand2.as_tuple() + elif isinstance(self.operand2, DictNode): + if not self.operand2.key_value_pairs: + self.constant_result = self.operator == 'not_in' + return + + self.constant_result = func(operand1_result, operand2_result) + + def cascaded_compile_time_value(self, operand1, denv): + func = get_compile_time_binop(self) + operand2 = self.operand2.compile_time_value(denv) + try: + result = func(operand1, operand2) + except Exception as e: + self.compile_time_value_error(e) + result = None + if result: + cascade = self.cascade + if cascade: + result = result and cascade.cascaded_compile_time_value(operand2, denv) + return result + + def is_cpp_comparison(self): + return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class + + def find_common_int_type(self, env, op, operand1, operand2): + # type1 != type2 and at least one of the types is not a C int + type1 = operand1.type + type2 = operand2.type + type1_can_be_int = False + type2_can_be_int = False + + if operand1.is_string_literal and operand1.can_coerce_to_char_literal(): + type1_can_be_int = True + if operand2.is_string_literal and operand2.can_coerce_to_char_literal(): + type2_can_be_int = True + + if type1.is_int: + if type2_can_be_int: + return type1 + elif type2.is_int: + if type1_can_be_int: + return type2 + elif type1_can_be_int: + if type2_can_be_int: + if Builtin.unicode_type in (type1, type2): + return PyrexTypes.c_py_ucs4_type + else: + return PyrexTypes.c_uchar_type + + return None + + def find_common_type(self, env, op, operand1, common_type=None): + operand2 = self.operand2 + type1 = operand1.type + type2 = operand2.type + + new_common_type = None + + # catch general errors + if (type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or + type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type))): + error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3") + new_common_type = error_type + + # try to use numeric comparisons where possible + elif type1.is_complex or type2.is_complex: + if (op not in ('==', '!=') + and (type1.is_complex or type1.is_numeric) + and (type2.is_complex or type2.is_numeric)): + error(self.pos, "complex types are unordered") + new_common_type = error_type + elif type1.is_pyobject: + new_common_type = Builtin.complex_type if type1.subtype_of(Builtin.complex_type) else py_object_type + elif type2.is_pyobject: + new_common_type = Builtin.complex_type if type2.subtype_of(Builtin.complex_type) else py_object_type + else: + new_common_type = PyrexTypes.widest_numeric_type(type1, type2) + elif type1.is_numeric and type2.is_numeric: + new_common_type = PyrexTypes.widest_numeric_type(type1, type2) + elif common_type is None or not common_type.is_pyobject: + new_common_type = self.find_common_int_type(env, op, operand1, operand2) + + if new_common_type is None: + # fall back to generic type compatibility tests + if type1.is_ctuple or type2.is_ctuple: + new_common_type = py_object_type + elif type1 == type2: + new_common_type = type1 + elif type1.is_pyobject or type2.is_pyobject: + if type2.is_numeric or type2.is_string: + if operand2.check_for_coercion_error(type1, env): + new_common_type = error_type + else: + new_common_type = py_object_type + elif type1.is_numeric or type1.is_string: + if operand1.check_for_coercion_error(type2, env): + new_common_type = error_type + else: + new_common_type = py_object_type + elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2): + new_common_type = py_object_type + else: + # one Python type and one non-Python type, not assignable + self.invalid_types_error(operand1, op, operand2) + new_common_type = error_type + elif type1.assignable_from(type2): + new_common_type = type1 + elif type2.assignable_from(type1): + new_common_type = type2 + else: + # C types that we couldn't handle up to here are an error + self.invalid_types_error(operand1, op, operand2) + new_common_type = error_type + + if new_common_type.is_string and (isinstance(operand1, BytesNode) or + isinstance(operand2, BytesNode)): + # special case when comparing char* to bytes literal: must + # compare string values! + new_common_type = bytes_type + + # recursively merge types + if common_type is None or new_common_type.is_error: + common_type = new_common_type + else: + # we could do a lot better by splitting the comparison + # into a non-Python part and a Python part, but this is + # safer for now + common_type = PyrexTypes.spanning_type(common_type, new_common_type) + + if self.cascade: + common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type) + + return common_type + + def invalid_types_error(self, operand1, op, operand2): + error(self.pos, "Invalid types for '%s' (%s, %s)" % + (op, operand1.type, operand2.type)) + + def is_python_comparison(self): + return (not self.is_ptr_contains() + and not self.is_c_string_contains() + and (self.has_python_operands() + or (self.cascade and self.cascade.is_python_comparison()) + or self.operator in ('in', 'not_in'))) + + def coerce_operands_to(self, dst_type, env): + operand2 = self.operand2 + if operand2.type != dst_type: + self.operand2 = operand2.coerce_to(dst_type, env) + if self.cascade: + self.cascade.coerce_operands_to(dst_type, env) + + def is_python_result(self): + return ((self.has_python_operands() and + self.special_bool_cmp_function is None and + self.operator not in ('is', 'is_not', 'in', 'not_in') and + not self.is_c_string_contains() and + not self.is_ptr_contains()) + or (self.cascade and self.cascade.is_python_result())) + + def is_c_string_contains(self): + return self.operator in ('in', 'not_in') and \ + ((self.operand1.type.is_int + and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or + (self.operand1.type.is_unicode_char + and self.operand2.type is unicode_type)) + + def is_ptr_contains(self): + if self.operator in ('in', 'not_in'): + container_type = self.operand2.type + return (container_type.is_ptr or container_type.is_array) \ + and not container_type.is_string + + def find_special_bool_compare_function(self, env, operand1, result_is_bool=False): + # note: currently operand1 must get coerced to a Python object if we succeed here! + if self.operator in ('==', '!='): + type1, type2 = operand1.type, self.operand2.type + if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type): + if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type: + self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c") + self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals" + return True + elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type: + self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c") + self.special_bool_cmp_function = "__Pyx_PyBytes_Equals" + return True + elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type: + self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c") + self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals" + return True + elif type1 is Builtin.str_type or type2 is Builtin.str_type: + self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c") + self.special_bool_cmp_function = "__Pyx_PyString_Equals" + return True + elif result_is_bool: + from .Optimize import optimise_numeric_binop + result = optimise_numeric_binop( + "Eq" if self.operator == "==" else "Ne", + self, + PyrexTypes.c_bint_type, + operand1, + self.operand2 + ) + if result: + (self.special_bool_cmp_function, + self.special_bool_cmp_utility_code, + self.special_bool_extra_args, + _) = result + return True + elif self.operator in ('in', 'not_in'): + if self.operand2.type is Builtin.dict_type: + self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable") + self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c") + self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF" + return True + elif self.operand2.type is Builtin.set_type: + self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable") + self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySetContains", "ObjectHandling.c") + self.special_bool_cmp_function = "__Pyx_PySet_ContainsTF" + return True + elif self.operand2.type is Builtin.unicode_type: + self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable") + self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c") + self.special_bool_cmp_function = "__Pyx_PyUnicode_ContainsTF" + return True + else: + if not self.operand2.type.is_pyobject: + self.operand2 = self.operand2.coerce_to_pyobject(env) + self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c") + self.special_bool_cmp_function = "__Pyx_PySequence_ContainsTF" + return True + return False + + def generate_operation_code(self, code, result_code, + operand1, op, operand2): + if self.type.is_pyobject: + error_clause = code.error_goto_if_null + got_ref = "__Pyx_XGOTREF(%s); " % result_code + if self.special_bool_cmp_function: + code.globalstate.use_utility_code( + UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c")) + coerce_result = "__Pyx_PyBoolOrNull_FromLong" + else: + coerce_result = "__Pyx_PyBool_FromLong" + else: + error_clause = code.error_goto_if_neg + got_ref = "" + coerce_result = "" + + if self.special_bool_cmp_function: + if operand1.type.is_pyobject: + result1 = operand1.py_result() + else: + result1 = operand1.result() + if operand2.type.is_pyobject: + result2 = operand2.py_result() + else: + result2 = operand2.result() + special_bool_extra_args_result = ", ".join([ + extra_arg.result() for extra_arg in self.special_bool_extra_args + ]) + if self.special_bool_cmp_utility_code: + code.globalstate.use_utility_code(self.special_bool_cmp_utility_code) + code.putln( + "%s = %s(%s(%s, %s, %s)); %s%s" % ( + result_code, + coerce_result, + self.special_bool_cmp_function, + result1, result2, + special_bool_extra_args_result if self.special_bool_extra_args else richcmp_constants[op], + got_ref, + error_clause(result_code, self.pos))) + + elif operand1.type.is_pyobject and op not in ('is', 'is_not'): + assert op not in ('in', 'not_in'), op + assert self.type.is_pyobject or self.type is PyrexTypes.c_bint_type + code.putln("%s = PyObject_RichCompare%s(%s, %s, %s); %s%s" % ( + result_code, + "" if self.type.is_pyobject else "Bool", + operand1.py_result(), + operand2.py_result(), + richcmp_constants[op], + got_ref, + error_clause(result_code, self.pos))) + + elif operand1.type.is_complex: + code.putln("%s = %s(%s%s(%s, %s));" % ( + result_code, + coerce_result, + op == "!=" and "!" or "", + operand1.type.unary_op('eq'), + operand1.result(), + operand2.result())) + + else: + type1 = operand1.type + type2 = operand2.type + if (type1.is_extension_type or type2.is_extension_type) \ + and not type1.same_as(type2): + common_type = py_object_type + elif type1.is_numeric: + common_type = PyrexTypes.widest_numeric_type(type1, type2) + else: + common_type = type1 + code1 = operand1.result_as(common_type) + code2 = operand2.result_as(common_type) + statement = "%s = %s(%s %s %s);" % ( + result_code, + coerce_result, + code1, + self.c_operator(op), + code2) + if self.is_cpp_comparison() and self.exception_check == '+': + translate_cpp_exception( + code, + self.pos, + statement, + result_code if self.type.is_pyobject else None, + self.exception_value, + self.in_nogil_context) + else: + code.putln(statement) + + def c_operator(self, op): + if op == 'is': + return "==" + elif op == 'is_not': + return "!=" + else: + return op + +class PrimaryCmpNode(ExprNode, CmpNode): + # Non-cascaded comparison or first comparison of + # a cascaded sequence. + # + # operator string + # operand1 ExprNode + # operand2 ExprNode + # cascade CascadedCmpNode + + # We don't use the subexprs mechanism, because + # things here are too complicated for it to handle. + # Instead, we override all the framework methods + # which use it. + + child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade', + 'special_bool_extra_args'] + + cascade = None + coerced_operand2 = None + is_memslice_nonecheck = False + + def infer_type(self, env): + type1 = self.operand1.infer_type(env) + type2 = self.operand2.infer_type(env) + + if is_pythran_expr(type1) or is_pythran_expr(type2): + if is_pythran_supported_type(type1) and is_pythran_supported_type(type2): + return PythranExpr(pythran_binop_type(self.operator, type1, type2)) + + # TODO: implement this for other types. + return py_object_type + + def type_dependencies(self, env): + return () + + def calculate_constant_result(self): + assert not self.cascade + self.calculate_cascaded_constant_result(self.operand1.constant_result) + + def compile_time_value(self, denv): + operand1 = self.operand1.compile_time_value(denv) + return self.cascaded_compile_time_value(operand1, denv) + + def unify_cascade_type(self): + cdr = self.cascade + while cdr: + cdr.type = self.type + cdr = cdr.cascade + + def analyse_types(self, env): + self.operand1 = self.operand1.analyse_types(env) + self.operand2 = self.operand2.analyse_types(env) + if self.is_cpp_comparison(): + self.analyse_cpp_comparison(env) + if self.cascade: + error(self.pos, "Cascading comparison not yet supported for cpp types.") + return self + + type1 = self.operand1.type + type2 = self.operand2.type + if is_pythran_expr(type1) or is_pythran_expr(type2): + if is_pythran_supported_type(type1) and is_pythran_supported_type(type2): + self.type = PythranExpr(pythran_binop_type(self.operator, type1, type2)) + self.is_pycmp = False + return self + + if self.analyse_memoryviewslice_comparison(env): + return self + + if self.cascade: + self.cascade = self.cascade.analyse_types(env) + + if self.operator in ('in', 'not_in'): + if self.is_c_string_contains(): + self.is_pycmp = False + common_type = None + if self.cascade: + error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.") + return self + if self.operand2.type is unicode_type: + env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c")) + else: + if self.operand1.type is PyrexTypes.c_uchar_type: + self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env) + if self.operand2.type is not bytes_type: + self.operand2 = self.operand2.coerce_to(bytes_type, env) + env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c")) + self.operand2 = self.operand2.as_none_safe_node( + "argument of type 'NoneType' is not iterable") + elif self.is_ptr_contains(): + if self.cascade: + error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.") + self.type = PyrexTypes.c_bint_type + # Will be transformed by IterationTransform + return self + elif self.find_special_bool_compare_function(env, self.operand1): + if not self.operand1.type.is_pyobject: + self.operand1 = self.operand1.coerce_to_pyobject(env) + common_type = None # if coercion needed, the method call above has already done it + self.is_pycmp = False # result is bint + else: + common_type = py_object_type + self.is_pycmp = True + elif self.find_special_bool_compare_function(env, self.operand1): + if not self.operand1.type.is_pyobject: + self.operand1 = self.operand1.coerce_to_pyobject(env) + common_type = None # if coercion needed, the method call above has already done it + self.is_pycmp = False # result is bint + else: + common_type = self.find_common_type(env, self.operator, self.operand1) + self.is_pycmp = common_type.is_pyobject + + if common_type is not None and not common_type.is_error: + if self.operand1.type != common_type: + self.operand1 = self.operand1.coerce_to(common_type, env) + self.coerce_operands_to(common_type, env) + + if self.cascade: + self.operand2 = self.operand2.coerce_to_simple(env) + self.cascade.coerce_cascaded_operands_to_temp(env) + operand2 = self.cascade.optimise_comparison(self.operand2, env) + if operand2 is not self.operand2: + self.coerced_operand2 = operand2 + if self.is_python_result(): + self.type = PyrexTypes.py_object_type + else: + self.type = PyrexTypes.c_bint_type + self.unify_cascade_type() + if self.is_pycmp or self.cascade or self.special_bool_cmp_function: + # 1) owned reference, 2) reused value, 3) potential function error return value + self.is_temp = 1 + return self + + def analyse_cpp_comparison(self, env): + type1 = self.operand1.type + type2 = self.operand2.type + self.is_pycmp = False + entry = env.lookup_operator(self.operator, [self.operand1, self.operand2]) + if entry is None: + error(self.pos, "Invalid types for '%s' (%s, %s)" % + (self.operator, type1, type2)) + self.type = PyrexTypes.error_type + self.result_code = "" + return + func_type = entry.type + if func_type.is_ptr: + func_type = func_type.base_type + self.exception_check = func_type.exception_check + self.exception_value = func_type.exception_value + if self.exception_check == '+': + self.is_temp = True + if needs_cpp_exception_conversion(self): + env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp")) + if len(func_type.args) == 1: + self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env) + else: + self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env) + self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env) + self.type = func_type.return_type + + def analyse_memoryviewslice_comparison(self, env): + have_none = self.operand1.is_none or self.operand2.is_none + have_slice = (self.operand1.type.is_memoryviewslice or + self.operand2.type.is_memoryviewslice) + ops = ('==', '!=', 'is', 'is_not') + if have_slice and have_none and self.operator in ops: + self.is_pycmp = False + self.type = PyrexTypes.c_bint_type + self.is_memslice_nonecheck = True + return True + + return False + + def coerce_to_boolean(self, env): + if self.is_pycmp: + # coercing to bool => may allow for more efficient comparison code + if self.find_special_bool_compare_function( + env, self.operand1, result_is_bool=True): + self.is_pycmp = False + self.type = PyrexTypes.c_bint_type + self.is_temp = 1 + if self.cascade: + operand2 = self.cascade.optimise_comparison( + self.operand2, env, result_is_bool=True) + if operand2 is not self.operand2: + self.coerced_operand2 = operand2 + self.unify_cascade_type() + return self + # TODO: check if we can optimise parts of the cascade here + return ExprNode.coerce_to_boolean(self, env) + + def has_python_operands(self): + return (self.operand1.type.is_pyobject + or self.operand2.type.is_pyobject) + + def check_const(self): + if self.cascade: + self.not_const() + return False + else: + return self.operand1.check_const() and self.operand2.check_const() + + def calculate_result_code(self): + operand1, operand2 = self.operand1, self.operand2 + if operand1.type.is_complex: + if self.operator == "!=": + negation = "!" + else: + negation = "" + return "(%s%s(%s, %s))" % ( + negation, + operand1.type.binary_op('=='), + operand1.result(), + operand2.result()) + elif self.is_c_string_contains(): + if operand2.type is unicode_type: + method = "__Pyx_UnicodeContainsUCS4" + else: + method = "__Pyx_BytesContains" + if self.operator == "not_in": + negation = "!" + else: + negation = "" + return "(%s%s(%s, %s))" % ( + negation, + method, + operand2.result(), + operand1.result()) + else: + if is_pythran_expr(self.type): + result1, result2 = operand1.pythran_result(), operand2.pythran_result() + else: + result1, result2 = operand1.result(), operand2.result() + if self.is_memslice_nonecheck: + if operand1.type.is_memoryviewslice: + result1 = "((PyObject *) %s.memview)" % result1 + else: + result2 = "((PyObject *) %s.memview)" % result2 + + return "(%s %s %s)" % ( + result1, + self.c_operator(self.operator), + result2) + + def generate_evaluation_code(self, code): + self.operand1.generate_evaluation_code(code) + self.operand2.generate_evaluation_code(code) + for extra_arg in self.special_bool_extra_args: + extra_arg.generate_evaluation_code(code) + if self.is_temp: + self.allocate_temp_result(code) + self.generate_operation_code(code, self.result(), + self.operand1, self.operator, self.operand2) + if self.cascade: + self.cascade.generate_evaluation_code( + code, self.result(), self.coerced_operand2 or self.operand2, + needs_evaluation=self.coerced_operand2 is not None) + self.operand1.generate_disposal_code(code) + self.operand1.free_temps(code) + self.operand2.generate_disposal_code(code) + self.operand2.free_temps(code) + + def generate_subexpr_disposal_code(self, code): + # If this is called, it is a non-cascaded cmp, + # so only need to dispose of the two main operands. + self.operand1.generate_disposal_code(code) + self.operand2.generate_disposal_code(code) + + def free_subexpr_temps(self, code): + # If this is called, it is a non-cascaded cmp, + # so only need to dispose of the two main operands. + self.operand1.free_temps(code) + self.operand2.free_temps(code) + + def annotate(self, code): + self.operand1.annotate(code) + self.operand2.annotate(code) + if self.cascade: + self.cascade.annotate(code) + + +class CascadedCmpNode(Node, CmpNode): + # A CascadedCmpNode is not a complete expression node. It + # hangs off the side of another comparison node, shares + # its left operand with that node, and shares its result + # with the PrimaryCmpNode at the head of the chain. + # + # operator string + # operand2 ExprNode + # cascade CascadedCmpNode + + child_attrs = ['operand2', 'coerced_operand2', 'cascade', + 'special_bool_extra_args'] + + cascade = None + coerced_operand2 = None + constant_result = constant_value_not_set # FIXME: where to calculate this? + + def infer_type(self, env): + # TODO: Actually implement this (after merging with -unstable). + return py_object_type + + def type_dependencies(self, env): + return () + + def has_constant_result(self): + return self.constant_result is not constant_value_not_set and \ + self.constant_result is not not_a_constant + + def analyse_types(self, env): + self.operand2 = self.operand2.analyse_types(env) + if self.cascade: + self.cascade = self.cascade.analyse_types(env) + return self + + def has_python_operands(self): + return self.operand2.type.is_pyobject + + def is_cpp_comparison(self): + # cascaded comparisons aren't currently implemented for c++ classes. + return False + + def optimise_comparison(self, operand1, env, result_is_bool=False): + if self.find_special_bool_compare_function(env, operand1, result_is_bool): + self.is_pycmp = False + self.type = PyrexTypes.c_bint_type + if not operand1.type.is_pyobject: + operand1 = operand1.coerce_to_pyobject(env) + if self.cascade: + operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool) + if operand2 is not self.operand2: + self.coerced_operand2 = operand2 + return operand1 + + def coerce_operands_to_pyobjects(self, env): + self.operand2 = self.operand2.coerce_to_pyobject(env) + if self.operand2.type is dict_type and self.operator in ('in', 'not_in'): + self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable") + if self.cascade: + self.cascade.coerce_operands_to_pyobjects(env) + + def coerce_cascaded_operands_to_temp(self, env): + if self.cascade: + #self.operand2 = self.operand2.coerce_to_temp(env) #CTT + self.operand2 = self.operand2.coerce_to_simple(env) + self.cascade.coerce_cascaded_operands_to_temp(env) + + def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False): + if self.type.is_pyobject: + code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result) + code.put_decref(result, self.type) + else: + code.putln("if (%s) {" % result) + if needs_evaluation: + operand1.generate_evaluation_code(code) + self.operand2.generate_evaluation_code(code) + for extra_arg in self.special_bool_extra_args: + extra_arg.generate_evaluation_code(code) + self.generate_operation_code(code, result, + operand1, self.operator, self.operand2) + if self.cascade: + self.cascade.generate_evaluation_code( + code, result, self.coerced_operand2 or self.operand2, + needs_evaluation=self.coerced_operand2 is not None) + if needs_evaluation: + operand1.generate_disposal_code(code) + operand1.free_temps(code) + # Cascaded cmp result is always temp + self.operand2.generate_disposal_code(code) + self.operand2.free_temps(code) + code.putln("}") + + def annotate(self, code): + self.operand2.annotate(code) + if self.cascade: + self.cascade.annotate(code) + + +binop_node_classes = { + "or": BoolBinopNode, + "and": BoolBinopNode, + "|": IntBinopNode, + "^": IntBinopNode, + "&": IntBinopNode, + "<<": IntBinopNode, + ">>": IntBinopNode, + "+": AddNode, + "-": SubNode, + "*": MulNode, + "@": MatMultNode, + "/": DivNode, + "//": DivNode, + "%": ModNode, + "**": PowNode, +} + + +def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs): + # Construct binop node of appropriate class for + # given operator. + return binop_node_classes[operator]( + pos, + operator=operator, + operand1=operand1, + operand2=operand2, + inplace=inplace, + **kwargs) + + +#------------------------------------------------------------------- +# +# Coercion nodes +# +# Coercion nodes are special in that they are created during +# the analyse_types phase of parse tree processing. +# Their __init__ methods consequently incorporate some aspects +# of that phase. +# +#------------------------------------------------------------------- + +class CoercionNode(ExprNode): + # Abstract base class for coercion nodes. + # + # arg ExprNode node being coerced + + subexprs = ['arg'] + constant_result = not_a_constant + + def __init__(self, arg): + super(CoercionNode, self).__init__(arg.pos) + self.arg = arg + if debug_coercion: + print("%s Coercing %s" % (self, self.arg)) + + def calculate_constant_result(self): + # constant folding can break type coercion, so this is disabled + pass + + def annotate(self, code): + self.arg.annotate(code) + if self.arg.type != self.type: + file, line, col = self.pos + code.annotate((file, line, col-1), AnnotationItem( + style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type))) + + def analyse_types(self, env): + return self + + +class CoerceToMemViewSliceNode(CoercionNode): + """ + Coerce an object to a memoryview slice. This holds a new reference in + a managed temp. + """ + + def __init__(self, arg, dst_type, env): + assert dst_type.is_memoryviewslice + assert not arg.type.is_memoryviewslice + CoercionNode.__init__(self, arg) + self.type = dst_type + self.is_temp = 1 + self.use_managed_ref = True + self.arg = arg + self.type.create_from_py_utility_code(env) + + def generate_result_code(self, code): + code.putln(self.type.from_py_call_code( + self.arg.py_result(), + self.result(), + self.pos, + code + )) + + +class CastNode(CoercionNode): + # Wrap a node in a C type cast. + + def __init__(self, arg, new_type): + CoercionNode.__init__(self, arg) + self.type = new_type + + def may_be_none(self): + return self.arg.may_be_none() + + def calculate_result_code(self): + return self.arg.result_as(self.type) + + def generate_result_code(self, code): + self.arg.generate_result_code(code) + + +class PyTypeTestNode(CoercionNode): + # This node is used to check that a generic Python + # object is an instance of a particular extension type. + # This node borrows the result of its argument node. + + exact_builtin_type = True + + def __init__(self, arg, dst_type, env, notnone=False): + # The arg is known to be a Python object, and + # the dst_type is known to be an extension type. + assert dst_type.is_extension_type or dst_type.is_builtin_type, \ + "PyTypeTest for %s against non extension type %s" % (arg.type, dst_type) + CoercionNode.__init__(self, arg) + self.type = dst_type + self.result_ctype = arg.ctype() + self.notnone = notnone + + nogil_check = Node.gil_error + gil_message = "Python type test" + + def analyse_types(self, env): + return self + + def may_be_none(self): + if self.notnone: + return False + return self.arg.may_be_none() + + def is_simple(self): + return self.arg.is_simple() + + def result_in_temp(self): + return self.arg.result_in_temp() + + def is_ephemeral(self): + return self.arg.is_ephemeral() + + def nonlocally_immutable(self): + return self.arg.nonlocally_immutable() + + def reanalyse(self): + if self.type != self.arg.type or not self.arg.is_temp: + return self + if not self.type.typeobj_is_available(): + return self + if self.arg.may_be_none() and self.notnone: + return self.arg.as_none_safe_node("Cannot convert NoneType to %.200s" % self.type.name) + return self.arg + + def calculate_constant_result(self): + # FIXME + pass + + def calculate_result_code(self): + return self.arg.result() + + def generate_result_code(self, code): + if self.type.typeobj_is_available(): + if self.type.is_builtin_type: + type_test = self.type.type_test_code( + self.arg.py_result(), + self.notnone, exact=self.exact_builtin_type) + code.globalstate.use_utility_code(UtilityCode.load_cached( + "RaiseUnexpectedTypeError", "ObjectHandling.c")) + else: + type_test = self.type.type_test_code( + self.arg.py_result(), self.notnone) + code.globalstate.use_utility_code( + UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c")) + code.putln("if (!(%s)) %s" % ( + type_test, code.error_goto(self.pos))) + else: + error(self.pos, "Cannot test type of extern C class " + "without type object name specification") + + def generate_post_assignment_code(self, code): + self.arg.generate_post_assignment_code(code) + + def allocate_temp_result(self, code): + pass + + def release_temp_result(self, code): + pass + + def free_temps(self, code): + self.arg.free_temps(code) + + def free_subexpr_temps(self, code): + self.arg.free_subexpr_temps(code) + + +class NoneCheckNode(CoercionNode): + # This node is used to check that a Python object is not None and + # raises an appropriate exception (as specified by the creating + # transform). + + is_nonecheck = True + + def __init__(self, arg, exception_type_cname, exception_message, + exception_format_args=()): + CoercionNode.__init__(self, arg) + self.type = arg.type + self.result_ctype = arg.ctype() + self.exception_type_cname = exception_type_cname + self.exception_message = exception_message + self.exception_format_args = tuple(exception_format_args or ()) + + nogil_check = None # this node only guards an operation that would fail already + + def analyse_types(self, env): + return self + + def may_be_none(self): + return False + + def is_simple(self): + return self.arg.is_simple() + + def result_in_temp(self): + return self.arg.result_in_temp() + + def nonlocally_immutable(self): + return self.arg.nonlocally_immutable() + + def calculate_result_code(self): + return self.arg.result() + + def condition(self): + if self.type.is_pyobject: + return self.arg.py_result() + elif self.type.is_memoryviewslice: + return "((PyObject *) %s.memview)" % self.arg.result() + else: + raise Exception("unsupported type") + + @classmethod + def generate(cls, arg, code, exception_message, + exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False): + node = cls(arg, exception_type_cname, exception_message, exception_format_args) + node.in_nogil_context = in_nogil_context + node.put_nonecheck(code) + + @classmethod + def generate_if_needed(cls, arg, code, exception_message, + exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False): + if arg.may_be_none(): + cls.generate(arg, code, exception_message, exception_type_cname, exception_format_args, in_nogil_context) + + def put_nonecheck(self, code): + code.putln( + "if (unlikely(%s == Py_None)) {" % self.condition()) + + if self.in_nogil_context: + code.put_ensure_gil() + + escape = StringEncoding.escape_byte_string + if self.exception_format_args: + code.putln('PyErr_Format(%s, "%s", %s);' % ( + self.exception_type_cname, + StringEncoding.escape_byte_string( + self.exception_message.encode('UTF-8')), + ', '.join([ '"%s"' % escape(str(arg).encode('UTF-8')) + for arg in self.exception_format_args ]))) + else: + code.putln('PyErr_SetString(%s, "%s");' % ( + self.exception_type_cname, + escape(self.exception_message.encode('UTF-8')))) + + if self.in_nogil_context: + code.put_release_ensured_gil() + + code.putln(code.error_goto(self.pos)) + code.putln("}") + + def generate_result_code(self, code): + self.put_nonecheck(code) + + def generate_post_assignment_code(self, code): + self.arg.generate_post_assignment_code(code) + + def free_temps(self, code): + self.arg.free_temps(code) + + +class CoerceToPyTypeNode(CoercionNode): + # This node is used to convert a C data type + # to a Python object. + + type = py_object_type + target_type = py_object_type + is_temp = 1 + + def __init__(self, arg, env, type=py_object_type): + if not arg.type.create_to_py_utility_code(env): + error(arg.pos, "Cannot convert '%s' to Python object" % arg.type) + elif arg.type.is_complex: + # special case: complex coercion is so complex that it + # uses a macro ("__pyx_PyComplex_FromComplex()"), for + # which the argument must be simple + arg = arg.coerce_to_simple(env) + CoercionNode.__init__(self, arg) + if type is py_object_type: + # be specific about some known types + if arg.type.is_string or arg.type.is_cpp_string: + self.type = default_str_type(env) + elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char: + self.type = unicode_type + elif arg.type.is_complex: + self.type = Builtin.complex_type + self.target_type = self.type + elif arg.type.is_string or arg.type.is_cpp_string: + if (type not in (bytes_type, bytearray_type) + and not env.directives['c_string_encoding']): + error(arg.pos, + "default encoding required for conversion from '%s' to '%s'" % + (arg.type, type)) + self.type = self.target_type = type + else: + # FIXME: check that the target type and the resulting type are compatible + self.target_type = type + + gil_message = "Converting to Python object" + + def may_be_none(self): + # FIXME: is this always safe? + return False + + def coerce_to_boolean(self, env): + arg_type = self.arg.type + if (arg_type == PyrexTypes.c_bint_type or + (arg_type.is_pyobject and arg_type.name == 'bool')): + return self.arg.coerce_to_temp(env) + else: + return CoerceToBooleanNode(self, env) + + def coerce_to_integer(self, env): + # If not already some C integer type, coerce to longint. + if self.arg.type.is_int: + return self.arg + else: + return self.arg.coerce_to(PyrexTypes.c_long_type, env) + + def analyse_types(self, env): + # The arg is always already analysed + return self + + def generate_result_code(self, code): + code.putln('%s; %s' % ( + self.arg.type.to_py_call_code( + self.arg.result(), + self.result(), + self.target_type), + code.error_goto_if_null(self.result(), self.pos))) + + self.generate_gotref(code) + + +class CoerceIntToBytesNode(CoerceToPyTypeNode): + # This node is used to convert a C int type to a Python bytes + # object. + + is_temp = 1 + + def __init__(self, arg, env): + arg = arg.coerce_to_simple(env) + CoercionNode.__init__(self, arg) + self.type = Builtin.bytes_type + + def generate_result_code(self, code): + arg = self.arg + arg_result = arg.result() + if arg.type not in (PyrexTypes.c_char_type, + PyrexTypes.c_uchar_type, + PyrexTypes.c_schar_type): + if arg.type.signed: + code.putln("if ((%s < 0) || (%s > 255)) {" % ( + arg_result, arg_result)) + else: + code.putln("if (%s > 255) {" % arg_result) + code.putln('PyErr_SetString(PyExc_OverflowError, ' + '"value too large to pack into a byte"); %s' % ( + code.error_goto(self.pos))) + code.putln('}') + temp = None + if arg.type is not PyrexTypes.c_char_type: + temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False) + code.putln("%s = (char)%s;" % (temp, arg_result)) + arg_result = temp + code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % ( + self.result(), + arg_result, + code.error_goto_if_null(self.result(), self.pos))) + if temp is not None: + code.funcstate.release_temp(temp) + self.generate_gotref(code) + + +class CoerceFromPyTypeNode(CoercionNode): + # This node is used to convert a Python object + # to a C data type. + + # Allow 'None' to map to a difference C value independent of the coercion, e.g. to 'NULL' or '0'. + special_none_cvalue = None + + def __init__(self, result_type, arg, env): + CoercionNode.__init__(self, arg) + self.type = result_type + self.is_temp = 1 + if not result_type.create_from_py_utility_code(env): + error(arg.pos, + "Cannot convert Python object to '%s'" % result_type) + if self.type.is_string or self.type.is_pyunicode_ptr: + if self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal: + warning(arg.pos, + "Obtaining '%s' from externally modifiable global Python value" % result_type, + level=1) + if self.type.is_pyunicode_ptr: + warning(arg.pos, + "Py_UNICODE* has been removed in Python 3.12. This conversion to a " + "Py_UNICODE* will no longer compile in the latest Python versions. " + "Use Python C API functions like PyUnicode_AsWideCharString if you " + "need to obtain a wchar_t* on Windows (and free the string manually after use).", + level=1) + + def analyse_types(self, env): + # The arg is always already analysed + return self + + def is_ephemeral(self): + return (self.type.is_ptr and not self.type.is_array) and self.arg.is_ephemeral() + + def generate_result_code(self, code): + from_py_function = None + # for certain source types, we can do better than the generic coercion + if self.type.is_string and self.arg.type is bytes_type: + if self.type.from_py_function.startswith('__Pyx_PyObject_As'): + from_py_function = '__Pyx_PyBytes' + self.type.from_py_function[len('__Pyx_PyObject'):] + NoneCheckNode.generate_if_needed(self.arg, code, "expected bytes, NoneType found") + + code.putln(self.type.from_py_call_code( + self.arg.py_result(), self.result(), self.pos, code, + from_py_function=from_py_function, + special_none_cvalue=self.special_none_cvalue, + )) + if self.type.is_pyobject: + self.generate_gotref(code) + + def nogil_check(self, env): + error(self.pos, "Coercion from Python not allowed without the GIL") + + +class CoerceToBooleanNode(CoercionNode): + # This node is used when a result needs to be used + # in a boolean context. + + type = PyrexTypes.c_bint_type + + _special_builtins = { + Builtin.list_type: 'PyList_GET_SIZE', + Builtin.tuple_type: 'PyTuple_GET_SIZE', + Builtin.set_type: 'PySet_GET_SIZE', + Builtin.frozenset_type: 'PySet_GET_SIZE', + Builtin.bytes_type: 'PyBytes_GET_SIZE', + Builtin.bytearray_type: 'PyByteArray_GET_SIZE', + Builtin.unicode_type: '__Pyx_PyUnicode_IS_TRUE', + } + + def __init__(self, arg, env): + CoercionNode.__init__(self, arg) + if arg.type.is_pyobject: + self.is_temp = 1 + + def nogil_check(self, env): + if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None: + self.gil_error() + + gil_message = "Truth-testing Python object" + + def check_const(self): + if self.is_temp: + self.not_const() + return False + return self.arg.check_const() + + def calculate_result_code(self): + return "(%s != 0)" % self.arg.result() + + def generate_result_code(self, code): + if not self.is_temp: + return + test_func = self._special_builtins.get(self.arg.type) + if test_func is not None: + checks = ["(%s != Py_None)" % self.arg.py_result()] if self.arg.may_be_none() else [] + checks.append("(%s(%s) != 0)" % (test_func, self.arg.py_result())) + code.putln("%s = %s;" % (self.result(), '&&'.join(checks))) + else: + code.putln( + "%s = __Pyx_PyObject_IsTrue(%s); %s" % ( + self.result(), + self.arg.py_result(), + code.error_goto_if_neg(self.result(), self.pos))) + + def analyse_types(self, env): + return self + + +class CoerceToComplexNode(CoercionNode): + + def __init__(self, arg, dst_type, env): + if arg.type.is_complex: + arg = arg.coerce_to_simple(env) + self.type = dst_type + CoercionNode.__init__(self, arg) + dst_type.create_declaration_utility_code(env) + + def calculate_result_code(self): + if self.arg.type.is_complex: + real_part = self.arg.type.real_code(self.arg.result()) + imag_part = self.arg.type.imag_code(self.arg.result()) + else: + real_part = self.arg.result() + imag_part = "0" + return "%s(%s, %s)" % ( + self.type.from_parts, + real_part, + imag_part) + + def generate_result_code(self, code): + pass + + def analyse_types(self, env): + return self + + +def coerce_from_soft_complex(arg, dst_type, env): + from .UtilNodes import HasGilNode + cfunc_type = PyrexTypes.CFuncType( + PyrexTypes.c_double_type, + [ PyrexTypes.CFuncTypeArg("value", PyrexTypes.soft_complex_type, None), + PyrexTypes.CFuncTypeArg("have_gil", PyrexTypes.c_bint_type, None) ], + exception_value="-1", + exception_check=True, + nogil=True # We can acquire the GIL internally on failure + ) + call = PythonCapiCallNode( + arg.pos, + "__Pyx_SoftComplexToDouble", + cfunc_type, + utility_code = UtilityCode.load_cached("SoftComplexToDouble", "Complex.c"), + args = [arg, HasGilNode(arg.pos)], + ) + call = call.analyse_types(env) + if call.type != dst_type: + call = call.coerce_to(dst_type, env) + return call + + +class CoerceToTempNode(CoercionNode): + # This node is used to force the result of another node + # to be stored in a temporary. It is only used if the + # argument node's result is not already in a temporary. + + def __init__(self, arg, env): + CoercionNode.__init__(self, arg) + self.type = self.arg.type.as_argument_type() + self.constant_result = self.arg.constant_result + self.is_temp = 1 + if self.type.is_pyobject: + self.result_ctype = py_object_type + + gil_message = "Creating temporary Python reference" + + def analyse_types(self, env): + # The arg is always already analysed + return self + + def may_be_none(self): + return self.arg.may_be_none() + + def coerce_to_boolean(self, env): + self.arg = self.arg.coerce_to_boolean(env) + if self.arg.is_simple(): + return self.arg + self.type = self.arg.type + self.result_ctype = self.type + return self + + def generate_result_code(self, code): + #self.arg.generate_evaluation_code(code) # Already done + # by generic generate_subexpr_evaluation_code! + code.putln("%s = %s;" % ( + self.result(), self.arg.result_as(self.ctype()))) + if self.use_managed_ref: + if not self.type.is_memoryviewslice: + code.put_incref(self.result(), self.ctype()) + else: + code.put_incref_memoryviewslice(self.result(), self.type, + have_gil=not self.in_nogil_context) + + +class ProxyNode(CoercionNode): + """ + A node that should not be replaced by transforms or other means, + and hence can be useful to wrap the argument to a clone node + + MyNode -> ProxyNode -> ArgNode + CloneNode -^ + """ + + nogil_check = None + + def __init__(self, arg): + super(ProxyNode, self).__init__(arg) + self.constant_result = arg.constant_result + self.update_type_and_entry() + + def analyse_types(self, env): + self.arg = self.arg.analyse_expressions(env) + self.update_type_and_entry() + return self + + def infer_type(self, env): + return self.arg.infer_type(env) + + def update_type_and_entry(self): + type = getattr(self.arg, 'type', None) + if type: + self.type = type + self.result_ctype = self.arg.result_ctype + arg_entry = getattr(self.arg, 'entry', None) + if arg_entry: + self.entry = arg_entry + + def generate_result_code(self, code): + self.arg.generate_result_code(code) + + def result(self): + return self.arg.result() + + def is_simple(self): + return self.arg.is_simple() + + def may_be_none(self): + return self.arg.may_be_none() + + def generate_evaluation_code(self, code): + self.arg.generate_evaluation_code(code) + + def generate_disposal_code(self, code): + self.arg.generate_disposal_code(code) + + def free_temps(self, code): + self.arg.free_temps(code) + +class CloneNode(CoercionNode): + # This node is employed when the result of another node needs + # to be used multiple times. The argument node's result must + # be in a temporary. This node "borrows" the result from the + # argument node, and does not generate any evaluation or + # disposal code for it. The original owner of the argument + # node is responsible for doing those things. + + subexprs = [] # Arg is not considered a subexpr + nogil_check = None + + def __init__(self, arg): + CoercionNode.__init__(self, arg) + self.constant_result = arg.constant_result + type = getattr(arg, 'type', None) + if type: + self.type = type + self.result_ctype = arg.result_ctype + arg_entry = getattr(arg, 'entry', None) + if arg_entry: + self.entry = arg_entry + + def result(self): + return self.arg.result() + + def may_be_none(self): + return self.arg.may_be_none() + + def type_dependencies(self, env): + return self.arg.type_dependencies(env) + + def infer_type(self, env): + return self.arg.infer_type(env) + + def analyse_types(self, env): + self.type = self.arg.type + self.result_ctype = self.arg.result_ctype + self.is_temp = 1 + arg_entry = getattr(self.arg, 'entry', None) + if arg_entry: + self.entry = arg_entry + return self + + def coerce_to(self, dest_type, env): + if self.arg.is_literal: + return self.arg.coerce_to(dest_type, env) + return super(CloneNode, self).coerce_to(dest_type, env) + + def is_simple(self): + return True # result is always in a temp (or a name) + + def generate_evaluation_code(self, code): + pass + + def generate_result_code(self, code): + pass + + def generate_disposal_code(self, code): + pass + + def generate_post_assignment_code(self, code): + # if we're assigning from a CloneNode then it's "giveref"ed away, so it does + # need a matching incref (ideally this should happen before the assignment though) + if self.is_temp: # should usually be true + code.put_incref(self.result(), self.ctype()) + + def free_temps(self, code): + pass + + +class CppOptionalTempCoercion(CoercionNode): + """ + Used only in CoerceCppTemps - handles cases the temp is actually a OptionalCppClassType (and thus needs dereferencing when on the rhs) + """ + is_temp = False + + @property + def type(self): + return self.arg.type + + def calculate_result_code(self): + return "(*%s)" % self.arg.result() + + def generate_result_code(self, code): + pass + + def _make_move_result_rhs(self, result, optional=False): + # this wouldn't normally get moved (because it isn't a temp), but force it to be because it + # is a thin wrapper around a temp + return super(CppOptionalTempCoercion, self)._make_move_result_rhs(result, optional=False) + + +class CMethodSelfCloneNode(CloneNode): + # Special CloneNode for the self argument of builtin C methods + # that accepts subtypes of the builtin type. This is safe only + # for 'final' subtypes, as subtypes of the declared type may + # override the C method. + + def coerce_to(self, dst_type, env): + if dst_type.is_builtin_type and self.type.subtype_of(dst_type): + return self + return CloneNode.coerce_to(self, dst_type, env) + + +class ModuleRefNode(ExprNode): + # Simple returns the module object + + type = py_object_type + is_temp = False + subexprs = [] + + def analyse_types(self, env): + return self + + def may_be_none(self): + return False + + def calculate_result_code(self): + return Naming.module_cname + + def generate_result_code(self, code): + pass + +class DocstringRefNode(ExprNode): + # Extracts the docstring of the body element + + subexprs = ['body'] + type = py_object_type + is_temp = True + + def __init__(self, pos, body): + ExprNode.__init__(self, pos) + assert body.type.is_pyobject + self.body = body + + def analyse_types(self, env): + return self + + def generate_result_code(self, code): + code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % ( + self.result(), self.body.result(), + code.intern_identifier(StringEncoding.EncodedString("__doc__")), + code.error_goto_if_null(self.result(), self.pos))) + self.generate_gotref(code) + +class AnnotationNode(ExprNode): + # Deals with the two possible uses of an annotation. + # 1. The post PEP-563 use where an annotation is stored + # as a string + # 2. The Cython use where the annotation can indicate an + # object type + # + # Doesn't handle the pre PEP-563 version where the + # annotation is evaluated into a Python Object. + + subexprs = [] + + # 'untyped' is set for fused specializations: + # Once a fused function has been created we don't want + # annotations to override an already set type. + untyped = False + + def __init__(self, pos, expr, string=None): + """string is expected to already be a StringNode or None""" + ExprNode.__init__(self, pos) + if string is None: + # import doesn't work at top of file? + from .AutoDocTransforms import AnnotationWriter + string = StringEncoding.EncodedString( + AnnotationWriter(description="annotation").write(expr)) + string = StringNode(pos, unicode_value=string, value=string.as_utf8_string()) + self.string = string + self.expr = expr + + def analyse_types(self, env): + return self # nothing needs doing + + def analyse_as_type(self, env): + # for compatibility when used as a return_type_node, have this interface too + return self.analyse_type_annotation(env)[1] + + def _warn_on_unknown_annotation(self, env, annotation): + """Method checks for cases when user should be warned that annotation contains unknown types.""" + if isinstance(annotation, SliceIndexNode): + annotation = annotation.base + if annotation.is_name: + # Validate annotation in form `var: type` + if not env.lookup(annotation.name): + warning(annotation.pos, + "Unknown type declaration '%s' in annotation, ignoring" % self.string.value, level=1) + elif annotation.is_attribute and annotation.obj.is_name: + # Validate annotation in form `var: module.type` + if not env.lookup(annotation.obj.name): + # `module` is undeclared + warning(annotation.pos, + "Unknown type declaration '%s' in annotation, ignoring" % self.string.value, level=1) + elif annotation.obj.is_cython_module: + # `module` is cython + module_scope = annotation.obj.analyse_as_module(env) + if module_scope and not module_scope.lookup_type(annotation.attribute): + error(annotation.pos, + "Unknown type declaration '%s' in annotation" % self.string.value) + else: + module_scope = annotation.obj.analyse_as_module(env) + if module_scope and module_scope.pxd_file_loaded: + warning(annotation.pos, + "Unknown type declaration '%s' in annotation, ignoring" % self.string.value, level=1) + else: + warning(annotation.pos, "Unknown type declaration in annotation, ignoring") + + def analyse_type_annotation(self, env, assigned_value=None): + if self.untyped: + # Already applied as a fused type, not re-evaluating it here. + return [], None + annotation = self.expr + explicit_pytype = explicit_ctype = False + if annotation.is_dict_literal: + warning(annotation.pos, + "Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.", level=1) + for name, value in annotation.key_value_pairs: + if not name.is_string_literal: + continue + if name.value in ('type', b'type'): + explicit_pytype = True + if not explicit_ctype: + annotation = value + elif name.value in ('ctype', b'ctype'): + explicit_ctype = True + annotation = value + if explicit_pytype and explicit_ctype: + warning(annotation.pos, "Duplicate type declarations found in signature annotation", level=1) + elif isinstance(annotation, TupleNode): + warning(annotation.pos, + "Tuples cannot be declared as simple tuples of types. Use 'tuple[type1, type2, ...]'.", level=1) + return [], None + + with env.new_c_type_context(in_c_type_context=explicit_ctype): + arg_type = annotation.analyse_as_type(env) + + if arg_type is None: + self._warn_on_unknown_annotation(env, annotation) + return [], arg_type + + if annotation.is_string_literal: + warning(annotation.pos, + "Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.", + level=1) + if explicit_pytype and not explicit_ctype and not (arg_type.is_pyobject or arg_type.equivalent_type): + warning(annotation.pos, + "Python type declaration in signature annotation does not refer to a Python type") + if arg_type.is_complex: + # creating utility code needs to be special-cased for complex types + arg_type.create_declaration_utility_code(env) + + # Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]" + modifiers = annotation.analyse_pytyping_modifiers(env) if annotation.is_subscript else [] + + return modifiers, arg_type + + +class AssignmentExpressionNode(ExprNode): + """ + Also known as a named expression or the walrus operator + + Arguments + lhs - NameNode - not stored directly as an attribute of the node + rhs - ExprNode + + Attributes + rhs - ExprNode + assignment - SingleAssignmentNode + """ + # subexprs and child_attrs are intentionally different here, because the assignment is not an expression + subexprs = ["rhs"] + child_attrs = ["rhs", "assignment"] # This order is important for control-flow (i.e. xdecref) to be right + + is_temp = False + assignment = None + clone_node = None + + def __init__(self, pos, lhs, rhs, **kwds): + super(AssignmentExpressionNode, self).__init__(pos, **kwds) + self.rhs = ProxyNode(rhs) + assign_expr_rhs = CloneNode(self.rhs) + self.assignment = SingleAssignmentNode( + pos, lhs=lhs, rhs=assign_expr_rhs, is_assignment_expression=True) + + @property + def type(self): + return self.rhs.type + + @property + def target_name(self): + return self.assignment.lhs.name + + def infer_type(self, env): + return self.rhs.infer_type(env) + + def analyse_declarations(self, env): + self.assignment.analyse_declarations(env) + + def analyse_types(self, env): + # we're trying to generate code that looks roughly like: + # __pyx_t_1 = rhs + # lhs = __pyx_t_1 + # __pyx_t_1 + # (plus any reference counting that's needed) + + self.rhs = self.rhs.analyse_types(env) + if not self.rhs.arg.is_temp: + if not self.rhs.arg.is_literal: + # for anything but the simplest cases (where it can be used directly) + # we convert rhs to a temp, because CloneNode requires arg to be a temp + self.rhs.arg = self.rhs.arg.coerce_to_temp(env) + else: + # For literals we can optimize by just using the literal twice + # + # We aren't including `self.rhs.is_name` in this optimization + # because that goes wrong for assignment expressions run in + # parallel. e.g. `(a := b) + (b := a + c)`) + # This is a special case of https://github.com/cython/cython/issues/4146 + # TODO - once that's fixed general revisit this code and possibly + # use coerce_to_simple + self.assignment.rhs = copy.copy(self.rhs) + + # TODO - there's a missed optimization in the code generation stage + # for self.rhs.arg.is_temp: an incref/decref pair can be removed + # (but needs a general mechanism to do that) + self.assignment = self.assignment.analyse_types(env) + return self + + def coerce_to(self, dst_type, env): + if dst_type == self.assignment.rhs.type: + # in this quite common case (for example, when both lhs, and self are being coerced to Python) + # we can optimize the coercion out by sharing it between + # this and the assignment + old_rhs_arg = self.rhs.arg + if isinstance(old_rhs_arg, CoerceToTempNode): + old_rhs_arg = old_rhs_arg.arg + rhs_arg = old_rhs_arg.coerce_to(dst_type, env) + if rhs_arg is not old_rhs_arg: + self.rhs.arg = rhs_arg + self.rhs.update_type_and_entry() + # clean up the old coercion node that the assignment has likely generated + if (isinstance(self.assignment.rhs, CoercionNode) + and not isinstance(self.assignment.rhs, CloneNode)): + self.assignment.rhs = self.assignment.rhs.arg + self.assignment.rhs.type = self.assignment.rhs.arg.type + return self + return super(AssignmentExpressionNode, self).coerce_to(dst_type, env) + + def calculate_result_code(self): + return self.rhs.result() + + def generate_result_code(self, code): + # we have to do this manually because it isn't a subexpression + self.assignment.generate_execution_code(code) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..ecd02d788a4ef207aea16b6d8304cb63c161597d Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.cp39-win_amd64.pyd differ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.pxd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.pxd new file mode 100644 index 0000000000000000000000000000000000000000..5338d4fe490aacf8bc3b781ffa82451f1548404e --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.pxd @@ -0,0 +1,111 @@ +# cython: language_level=3 + +cimport cython + +from .Visitor cimport CythonTransform, TreeVisitor + +cdef class ControlBlock: + cdef public set children + cdef public set parents + cdef public set positions + cdef public list stats + cdef public dict gen + cdef public set bounded + + # Big integer bitsets + cdef public object i_input + cdef public object i_output + cdef public object i_gen + cdef public object i_kill + cdef public object i_state + + cpdef bint empty(self) + cpdef detach(self) + cpdef add_child(self, block) + +cdef class ExitBlock(ControlBlock): + cpdef bint empty(self) + +cdef class NameAssignment: + cdef public bint is_arg + cdef public bint is_deletion + cdef public object lhs + cdef public object rhs + cdef public object entry + cdef public object pos + cdef public set refs + cdef public object bit + cdef public object inferred_type + cdef public object rhs_scope + +cdef class AssignmentList: + cdef public object bit + cdef public object mask + cdef public list stats + +cdef class AssignmentCollector(TreeVisitor): + cdef list assignments + +@cython.final +cdef class ControlFlow: + cdef public set blocks + cdef public set entries + cdef public list loops + cdef public list exceptions + + cdef public ControlBlock entry_point + cdef public ExitBlock exit_point + cdef public ControlBlock block + + cdef public dict assmts + + cdef public Py_ssize_t in_try_block + + cpdef newblock(self, ControlBlock parent=*) + cpdef nextblock(self, ControlBlock parent=*) + cpdef bint is_tracked(self, entry) + cpdef bint is_statically_assigned(self, entry) + cpdef mark_position(self, node) + cpdef mark_assignment(self, lhs, rhs, entry, rhs_scope=*) + cpdef mark_argument(self, lhs, rhs, entry) + cpdef mark_deletion(self, node, entry) + cpdef mark_reference(self, node, entry) + + @cython.locals(block=ControlBlock, parent=ControlBlock, unreachable=set) + cpdef normalize(self) + + @cython.locals(bit=object, assmts=AssignmentList, block=ControlBlock) + cpdef initialize(self) + + @cython.locals(assmts=AssignmentList, assmt=NameAssignment) + cpdef set map_one(self, istate, entry) + + @cython.locals(block=ControlBlock, parent=ControlBlock) + cdef reaching_definitions(self) + +cdef class Uninitialized: + pass + +cdef class Unknown: + pass + +cdef class MessageCollection: + cdef set messages + +@cython.locals(dirty=bint, block=ControlBlock, parent=ControlBlock, + assmt=NameAssignment) +cdef check_definitions(ControlFlow flow, dict compiler_directives) + +@cython.final +cdef class ControlFlowAnalysis(CythonTransform): + cdef object gv_ctx + cdef object constant_folder + cdef set reductions + cdef list stack # a stack of (env, flow) tuples + cdef object env + cdef ControlFlow flow + cdef object object_expr + cdef bint in_inplace_assignment + + cpdef mark_assignment(self, lhs, rhs=*, rhs_scope=*) + cpdef mark_position(self, node) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.py new file mode 100644 index 0000000000000000000000000000000000000000..c8575435738eada568f58c894ced50e4afccb449 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.py @@ -0,0 +1,1383 @@ +# cython: language_level=3str +# cython: auto_pickle=True + +from __future__ import absolute_import + +import cython +cython.declare(PyrexTypes=object, ExprNodes=object, Nodes=object, Builtin=object, + Options=object, TreeVisitor=object, CythonTransform=object, + InternalError=object, error=object, warning=object, + fake_rhs_expr=object, TypedExprNode=object) + +from . import Builtin +from . import ExprNodes +from . import Nodes +from . import Options +from . import PyrexTypes + +from .Visitor import TreeVisitor, CythonTransform +from .Errors import error, warning, InternalError + + +class TypedExprNode(ExprNodes.ExprNode): + # Used for declaring assignments of a specified type without a known entry. + def __init__(self, type, may_be_none=None, pos=None): + super(TypedExprNode, self).__init__(pos) + self.type = type + self._may_be_none = may_be_none + + def may_be_none(self): + return self._may_be_none != False + +# Fake rhs to silence "unused variable" warning +fake_rhs_expr = TypedExprNode(PyrexTypes.unspecified_type) + + +class ControlBlock(object): + """Control flow graph node. Sequence of assignments and name references. + + children set of children nodes + parents set of parent nodes + positions set of position markers + + stats list of block statements + gen dict of assignments generated by this block + bounded set of entries that are definitely bounded in this block + + Example: + + a = 1 + b = a + c # 'c' is already bounded or exception here + + stats = [Assignment(a), NameReference(a), NameReference(c), + Assignment(b)] + gen = {Entry(a): Assignment(a), Entry(b): Assignment(b)} + bounded = {Entry(a), Entry(c)} + + """ + + def __init__(self): + self.children = set() + self.parents = set() + self.positions = set() + + self.stats = [] + self.gen = {} + self.bounded = set() + + self.i_input = 0 + self.i_output = 0 + self.i_gen = 0 + self.i_kill = 0 + self.i_state = 0 + + def empty(self): + return (not self.stats and not self.positions) + + def detach(self): + """Detach block from parents and children.""" + for child in self.children: + child.parents.remove(self) + for parent in self.parents: + parent.children.remove(self) + self.parents.clear() + self.children.clear() + + def add_child(self, block): + self.children.add(block) + block.parents.add(self) + + +class ExitBlock(ControlBlock): + """Non-empty exit point block.""" + + def empty(self): + return False + + +class AssignmentList(object): + def __init__(self): + self.stats = [] + + +class ControlFlow(object): + """Control-flow graph. + + entry_point ControlBlock entry point for this graph + exit_point ControlBlock normal exit point + block ControlBlock current block + blocks set children nodes + entries set tracked entries + loops list stack for loop descriptors + exceptions list stack for exception descriptors + in_try_block int track if we're in a try...except or try...finally block + """ + + def __init__(self): + self.blocks = set() + self.entries = set() + self.loops = [] + self.exceptions = [] + + self.entry_point = ControlBlock() + self.exit_point = ExitBlock() + self.blocks.add(self.exit_point) + self.block = self.entry_point + self.in_try_block = 0 + + def newblock(self, parent=None): + """Create floating block linked to `parent` if given. + + NOTE: Block is NOT added to self.blocks + """ + block = ControlBlock() + self.blocks.add(block) + if parent: + parent.add_child(block) + return block + + def nextblock(self, parent=None): + """Create block children block linked to current or `parent` if given. + + NOTE: Block is added to self.blocks + """ + block = ControlBlock() + self.blocks.add(block) + if parent: + parent.add_child(block) + elif self.block: + self.block.add_child(block) + self.block = block + return self.block + + def is_tracked(self, entry): + if entry.is_anonymous: + return False + return (entry.is_local or entry.is_pyclass_attr or entry.is_arg or + entry.from_closure or entry.in_closure or + entry.error_on_uninitialized) + + def is_statically_assigned(self, entry): + if (entry.is_local and entry.is_variable and + (entry.type.is_struct_or_union or + entry.type.is_complex or + entry.type.is_array or + (entry.type.is_cpp_class and not entry.is_cpp_optional))): + # stack allocated structured variable => never uninitialised + return True + return False + + def mark_position(self, node): + """Mark position, will be used to draw graph nodes.""" + if self.block: + self.block.positions.add(node.pos[:2]) + + def mark_assignment(self, lhs, rhs, entry, rhs_scope=None): + if self.block and self.is_tracked(entry): + assignment = NameAssignment(lhs, rhs, entry, rhs_scope=rhs_scope) + self.block.stats.append(assignment) + self.block.gen[entry] = assignment + self.entries.add(entry) + + def mark_argument(self, lhs, rhs, entry): + if self.block and self.is_tracked(entry): + assignment = Argument(lhs, rhs, entry) + self.block.stats.append(assignment) + self.block.gen[entry] = assignment + self.entries.add(entry) + + def mark_deletion(self, node, entry): + if self.block and self.is_tracked(entry): + assignment = NameDeletion(node, entry) + self.block.stats.append(assignment) + self.block.gen[entry] = Uninitialized + self.entries.add(entry) + + def mark_reference(self, node, entry): + if self.block and self.is_tracked(entry): + self.block.stats.append(NameReference(node, entry)) + ## XXX: We don't track expression evaluation order so we can't use + ## XXX: successful reference as initialization sign. + ## # Local variable is definitely bound after this reference + ## if not node.allow_null: + ## self.block.bounded.add(entry) + self.entries.add(entry) + + def normalize(self): + """Delete unreachable and orphan blocks.""" + queue = {self.entry_point} + visited = set() + while queue: + root = queue.pop() + visited.add(root) + for child in root.children: + if child not in visited: + queue.add(child) + unreachable = self.blocks - visited + for block in unreachable: + block.detach() + visited.remove(self.entry_point) + for block in visited: + if block.empty(): + for parent in block.parents: # Re-parent + for child in block.children: + parent.add_child(child) + block.detach() + unreachable.add(block) + self.blocks -= unreachable + + def initialize(self): + """Set initial state, map assignments to bits.""" + self.assmts = {} + + bit = 1 + for entry in self.entries: + assmts = AssignmentList() + assmts.mask = assmts.bit = bit + self.assmts[entry] = assmts + bit <<= 1 + + for block in self.blocks: + for stat in block.stats: + if isinstance(stat, NameAssignment): + stat.bit = bit + assmts = self.assmts[stat.entry] + assmts.stats.append(stat) + assmts.mask |= bit + bit <<= 1 + + for block in self.blocks: + for entry, stat in block.gen.items(): + assmts = self.assmts[entry] + if stat is Uninitialized: + block.i_gen |= assmts.bit + else: + block.i_gen |= stat.bit + block.i_kill |= assmts.mask + block.i_output = block.i_gen + for entry in block.bounded: + block.i_kill |= self.assmts[entry].bit + + for assmts in self.assmts.values(): + self.entry_point.i_gen |= assmts.bit + self.entry_point.i_output = self.entry_point.i_gen + + def map_one(self, istate, entry): + ret = set() + assmts = self.assmts[entry] + if istate & assmts.bit: + if self.is_statically_assigned(entry): + ret.add(StaticAssignment(entry)) + elif entry.from_closure: + ret.add(Unknown) + else: + ret.add(Uninitialized) + for assmt in assmts.stats: + if istate & assmt.bit: + ret.add(assmt) + return ret + + def reaching_definitions(self): + """Per-block reaching definitions analysis.""" + dirty = True + while dirty: + dirty = False + for block in self.blocks: + i_input = 0 + for parent in block.parents: + i_input |= parent.i_output + i_output = (i_input & ~block.i_kill) | block.i_gen + if i_output != block.i_output: + dirty = True + block.i_input = i_input + block.i_output = i_output + + +class LoopDescr(object): + def __init__(self, next_block, loop_block): + self.next_block = next_block + self.loop_block = loop_block + self.exceptions = [] + + +class ExceptionDescr(object): + """Exception handling helper. + + entry_point ControlBlock Exception handling entry point + finally_enter ControlBlock Normal finally clause entry point + finally_exit ControlBlock Normal finally clause exit point + """ + + def __init__(self, entry_point, finally_enter=None, finally_exit=None): + self.entry_point = entry_point + self.finally_enter = finally_enter + self.finally_exit = finally_exit + + +class NameAssignment(object): + def __init__(self, lhs, rhs, entry, rhs_scope=None): + if lhs.cf_state is None: + lhs.cf_state = set() + self.lhs = lhs + self.rhs = rhs + self.entry = entry + self.pos = lhs.pos + self.refs = set() + self.is_arg = False + self.is_deletion = False + self.inferred_type = None + # For generator expression targets, the rhs can have a different scope than the lhs. + self.rhs_scope = rhs_scope + + def __repr__(self): + return '%s(entry=%r)' % (self.__class__.__name__, self.entry) + + def infer_type(self): + self.inferred_type = self.rhs.infer_type(self.rhs_scope or self.entry.scope) + return self.inferred_type + + def type_dependencies(self): + return self.rhs.type_dependencies(self.rhs_scope or self.entry.scope) + + @property + def type(self): + if not self.entry.type.is_unspecified: + return self.entry.type + return self.inferred_type + + +class StaticAssignment(NameAssignment): + """Initialised at declaration time, e.g. stack allocation.""" + def __init__(self, entry): + if not entry.type.is_pyobject: + may_be_none = False + else: + may_be_none = None # unknown + lhs = TypedExprNode( + entry.type, may_be_none=may_be_none, pos=entry.pos) + super(StaticAssignment, self).__init__(lhs, lhs, entry) + + def infer_type(self): + return self.entry.type + + def type_dependencies(self): + return () + + +class Argument(NameAssignment): + def __init__(self, lhs, rhs, entry): + NameAssignment.__init__(self, lhs, rhs, entry) + self.is_arg = True + + +class NameDeletion(NameAssignment): + def __init__(self, lhs, entry): + NameAssignment.__init__(self, lhs, lhs, entry) + self.is_deletion = True + + def infer_type(self): + inferred_type = self.rhs.infer_type(self.entry.scope) + if (not inferred_type.is_pyobject + and inferred_type.can_coerce_to_pyobject(self.entry.scope)): + return PyrexTypes.py_object_type + self.inferred_type = inferred_type + return inferred_type + + +class Uninitialized(object): + """Definitely not initialised yet.""" + + +class Unknown(object): + """Coming from outer closure, might be initialised or not.""" + + +class NameReference(object): + def __init__(self, node, entry): + if node.cf_state is None: + node.cf_state = set() + self.node = node + self.entry = entry + self.pos = node.pos + + def __repr__(self): + return '%s(entry=%r)' % (self.__class__.__name__, self.entry) + + +class ControlFlowState(list): + # Keeps track of Node's entry assignments + # + # cf_is_null [boolean] It is uninitialized + # cf_maybe_null [boolean] May be uninitialized + # is_single [boolean] Has only one assignment at this point + + cf_maybe_null = False + cf_is_null = False + is_single = False + + def __init__(self, state): + if Uninitialized in state: + state.discard(Uninitialized) + self.cf_maybe_null = True + if not state: + self.cf_is_null = True + elif Unknown in state: + state.discard(Unknown) + self.cf_maybe_null = True + else: + if len(state) == 1: + self.is_single = True + # XXX: Remove fake_rhs_expr + super(ControlFlowState, self).__init__( + [i for i in state if i.rhs is not fake_rhs_expr]) + + def one(self): + return self[0] + + +class GVContext(object): + """Graphviz subgraph object.""" + + def __init__(self): + self.blockids = {} + self.nextid = 0 + self.children = [] + self.sources = {} + + def add(self, child): + self.children.append(child) + + def nodeid(self, block): + if block not in self.blockids: + self.blockids[block] = 'block%d' % self.nextid + self.nextid += 1 + return self.blockids[block] + + def extract_sources(self, block): + if not block.positions: + return '' + start = min(block.positions) + stop = max(block.positions) + srcdescr = start[0] + if srcdescr not in self.sources: + self.sources[srcdescr] = list(srcdescr.get_lines()) + lines = self.sources[srcdescr] + return '\\n'.join([l.strip() for l in lines[start[1] - 1:stop[1]]]) + + def render(self, fp, name, annotate_defs=False): + """Render graphviz dot graph""" + fp.write('digraph %s {\n' % name) + fp.write(' node [shape=box];\n') + for child in self.children: + child.render(fp, self, annotate_defs) + fp.write('}\n') + + def escape(self, text): + return text.replace('"', '\\"').replace('\n', '\\n') + + +class GV(object): + """Graphviz DOT renderer.""" + + def __init__(self, name, flow): + self.name = name + self.flow = flow + + def render(self, fp, ctx, annotate_defs=False): + fp.write(' subgraph %s {\n' % self.name) + for block in self.flow.blocks: + label = ctx.extract_sources(block) + if annotate_defs: + for stat in block.stats: + if isinstance(stat, NameAssignment): + label += '\n %s [%s %s]' % ( + stat.entry.name, 'deletion' if stat.is_deletion else 'definition', stat.pos[1]) + elif isinstance(stat, NameReference): + if stat.entry: + label += '\n %s [reference %s]' % (stat.entry.name, stat.pos[1]) + if not label: + label = 'empty' + pid = ctx.nodeid(block) + fp.write(' %s [label="%s"];\n' % (pid, ctx.escape(label))) + for block in self.flow.blocks: + pid = ctx.nodeid(block) + for child in block.children: + fp.write(' %s -> %s;\n' % (pid, ctx.nodeid(child))) + fp.write(' }\n') + + +class MessageCollection(object): + """Collect error/warnings messages first then sort""" + def __init__(self): + self.messages = set() + + def error(self, pos, message): + self.messages.add((pos, True, message)) + + def warning(self, pos, message): + self.messages.add((pos, False, message)) + + def report(self): + for pos, is_error, message in sorted(self.messages): + if is_error: + error(pos, message) + else: + warning(pos, message, 2) + + +def check_definitions(flow, compiler_directives): + flow.initialize() + flow.reaching_definitions() + + # Track down state + assignments = set() + # Node to entry map + references = {} + assmt_nodes = set() + + for block in flow.blocks: + i_state = block.i_input + for stat in block.stats: + i_assmts = flow.assmts[stat.entry] + state = flow.map_one(i_state, stat.entry) + if isinstance(stat, NameAssignment): + stat.lhs.cf_state.update(state) + assmt_nodes.add(stat.lhs) + i_state = i_state & ~i_assmts.mask + if stat.is_deletion: + i_state |= i_assmts.bit + else: + i_state |= stat.bit + assignments.add(stat) + if stat.rhs is not fake_rhs_expr: + stat.entry.cf_assignments.append(stat) + elif isinstance(stat, NameReference): + references[stat.node] = stat.entry + stat.entry.cf_references.append(stat) + stat.node.cf_state.update(state) + ## if not stat.node.allow_null: + ## i_state &= ~i_assmts.bit + ## # after successful read, the state is known to be initialised + state.discard(Uninitialized) + state.discard(Unknown) + for assmt in state: + assmt.refs.add(stat) + + # Check variable usage + warn_maybe_uninitialized = compiler_directives['warn.maybe_uninitialized'] + warn_unused_result = compiler_directives['warn.unused_result'] + warn_unused = compiler_directives['warn.unused'] + warn_unused_arg = compiler_directives['warn.unused_arg'] + + messages = MessageCollection() + + # assignment hints + for node in assmt_nodes: + if Uninitialized in node.cf_state: + node.cf_maybe_null = True + if len(node.cf_state) == 1: + node.cf_is_null = True + else: + node.cf_is_null = False + elif Unknown in node.cf_state: + node.cf_maybe_null = True + else: + node.cf_is_null = False + node.cf_maybe_null = False + + # Find uninitialized references and cf-hints + for node, entry in references.items(): + if Uninitialized in node.cf_state: + node.cf_maybe_null = True + if (not entry.from_closure and len(node.cf_state) == 1 + and entry.name not in entry.scope.scope_predefined_names): + node.cf_is_null = True + if (node.allow_null or entry.from_closure + or entry.is_pyclass_attr or entry.type.is_error): + pass # Can be uninitialized here + elif node.cf_is_null and not entry.in_closure: + if entry.error_on_uninitialized or ( + Options.error_on_uninitialized and ( + entry.type.is_pyobject or entry.type.is_unspecified)): + messages.error( + node.pos, + "local variable '%s' referenced before assignment" + % entry.name) + else: + messages.warning( + node.pos, + "local variable '%s' referenced before assignment" + % entry.name) + elif warn_maybe_uninitialized: + msg = "local variable '%s' might be referenced before assignment" % entry.name + if entry.in_closure: + msg += " (maybe initialized inside a closure)" + messages.warning( + node.pos, + msg) + elif Unknown in node.cf_state: + # TODO: better cross-closure analysis to know when inner functions + # are being called before a variable is being set, and when + # a variable is known to be set before even defining the + # inner function, etc. + node.cf_maybe_null = True + else: + node.cf_is_null = False + node.cf_maybe_null = False + + # Unused result + for assmt in assignments: + if (not assmt.refs and not assmt.entry.is_pyclass_attr + and not assmt.entry.in_closure): + if assmt.entry.cf_references and warn_unused_result: + if assmt.is_arg: + messages.warning(assmt.pos, "Unused argument value '%s'" % + assmt.entry.name) + else: + messages.warning(assmt.pos, "Unused result in '%s'" % + assmt.entry.name) + assmt.lhs.cf_used = False + + # Unused entries + for entry in flow.entries: + if (not entry.cf_references + and not entry.is_pyclass_attr): + if entry.name != '_' and not entry.name.startswith('unused'): + # '_' is often used for unused variables, e.g. in loops + if entry.is_arg: + if warn_unused_arg: + messages.warning(entry.pos, "Unused argument '%s'" % + entry.name) + else: + if warn_unused: + messages.warning(entry.pos, "Unused entry '%s'" % + entry.name) + entry.cf_used = False + + messages.report() + + for node in assmt_nodes: + node.cf_state = ControlFlowState(node.cf_state) + for node in references: + node.cf_state = ControlFlowState(node.cf_state) + + +class AssignmentCollector(TreeVisitor): + def __init__(self): + super(AssignmentCollector, self).__init__() + self.assignments = [] + + def visit_Node(self): + self._visitchildren(self, None, None) + + def visit_SingleAssignmentNode(self, node): + self.assignments.append((node.lhs, node.rhs)) + + def visit_CascadedAssignmentNode(self, node): + for lhs in node.lhs_list: + self.assignments.append((lhs, node.rhs)) + + +class ControlFlowAnalysis(CythonTransform): + + def find_in_stack(self, env): + if env == self.env: + return self.flow + for e, flow in reversed(self.stack): + if e is env: + return flow + assert False + + def visit_ModuleNode(self, node): + dot_output = self.current_directives['control_flow.dot_output'] + self.gv_ctx = GVContext() if dot_output else None + + from .Optimize import ConstantFolding + self.constant_folder = ConstantFolding() + + # Set of NameNode reductions + self.reductions = set() + + self.in_inplace_assignment = False + self.env = node.scope + self.flow = ControlFlow() + self.stack = [] # a stack of (env, flow) tuples + self.object_expr = TypedExprNode(PyrexTypes.py_object_type, may_be_none=True) + self.visitchildren(node) + + check_definitions(self.flow, self.current_directives) + + if dot_output: + annotate_defs = self.current_directives['control_flow.dot_annotate_defs'] + with open(dot_output, 'wt') as fp: + self.gv_ctx.render(fp, 'module', annotate_defs=annotate_defs) + return node + + def visit_FuncDefNode(self, node): + for arg in node.args: + if arg.default: + self.visitchildren(arg) + self.visitchildren(node, ('decorators',)) + self.stack.append((self.env, self.flow)) + self.env = node.local_scope + self.flow = ControlFlow() + + # Collect all entries + for entry in node.local_scope.entries.values(): + if self.flow.is_tracked(entry): + self.flow.entries.add(entry) + + self.mark_position(node) + # Function body block + self.flow.nextblock() + + for arg in node.args: + self._visit(arg) + if node.star_arg: + self.flow.mark_argument(node.star_arg, + TypedExprNode(Builtin.tuple_type, + may_be_none=False), + node.star_arg.entry) + if node.starstar_arg: + self.flow.mark_argument(node.starstar_arg, + TypedExprNode(Builtin.dict_type, + may_be_none=False), + node.starstar_arg.entry) + self._visit(node.body) + # Workaround for generators + if node.is_generator: + self._visit(node.gbody.body) + + # Exit point + if self.flow.block: + self.flow.block.add_child(self.flow.exit_point) + + # Cleanup graph + self.flow.normalize() + check_definitions(self.flow, self.current_directives) + self.flow.blocks.add(self.flow.entry_point) + + if self.gv_ctx is not None: + self.gv_ctx.add(GV(node.local_scope.name, self.flow)) + + self.env, self.flow = self.stack.pop() + return node + + def visit_DefNode(self, node): + node.used = True + return self.visit_FuncDefNode(node) + + def visit_GeneratorBodyDefNode(self, node): + return node + + def visit_CTypeDefNode(self, node): + return node + + def mark_assignment(self, lhs, rhs=None, rhs_scope=None): + if not self.flow.block: + return + if self.flow.exceptions: + exc_descr = self.flow.exceptions[-1] + self.flow.block.add_child(exc_descr.entry_point) + self.flow.nextblock() + + if not rhs: + rhs = self.object_expr + if lhs.is_name: + if lhs.entry is not None: + entry = lhs.entry + else: + entry = self.env.lookup(lhs.name) + if entry is None: # TODO: This shouldn't happen... + return + self.flow.mark_assignment(lhs, rhs, entry, rhs_scope=rhs_scope) + elif lhs.is_sequence_constructor: + for i, arg in enumerate(lhs.args): + if arg.is_starred: + # "a, *b = x" assigns a list to "b" + item_node = TypedExprNode(Builtin.list_type, may_be_none=False, pos=arg.pos) + elif rhs is self.object_expr: + item_node = rhs + else: + item_node = rhs.inferable_item_node(i) + self.mark_assignment(arg, item_node) + else: + self._visit(lhs) + + if self.flow.exceptions: + exc_descr = self.flow.exceptions[-1] + self.flow.block.add_child(exc_descr.entry_point) + self.flow.nextblock() + + def mark_position(self, node): + """Mark position if DOT output is enabled.""" + if self.current_directives['control_flow.dot_output']: + self.flow.mark_position(node) + + def visit_FromImportStatNode(self, node): + for name, target in node.items: + if name != "*": + self.mark_assignment(target) + self.visitchildren(node) + return node + + def visit_AssignmentNode(self, node): + raise InternalError("Unhandled assignment node %s" % type(node)) + + def visit_SingleAssignmentNode(self, node): + self._visit(node.rhs) + self.mark_assignment(node.lhs, node.rhs) + return node + + def visit_CascadedAssignmentNode(self, node): + self._visit(node.rhs) + for lhs in node.lhs_list: + self.mark_assignment(lhs, node.rhs) + return node + + def visit_ParallelAssignmentNode(self, node): + collector = AssignmentCollector() + collector.visitchildren(node) + for lhs, rhs in collector.assignments: + self._visit(rhs) + for lhs, rhs in collector.assignments: + self.mark_assignment(lhs, rhs) + return node + + def visit_InPlaceAssignmentNode(self, node): + self.in_inplace_assignment = True + self.visitchildren(node) + self.in_inplace_assignment = False + self.mark_assignment(node.lhs, self.constant_folder(node.create_binop_node())) + return node + + def visit_DelStatNode(self, node): + for arg in node.args: + if arg.is_name: + entry = arg.entry or self.env.lookup(arg.name) + if entry.in_closure or entry.from_closure: + error(arg.pos, + "can not delete variable '%s' " + "referenced in nested scope" % entry.name) + if not node.ignore_nonexisting: + self._visit(arg) # mark reference + self.flow.mark_deletion(arg, entry) + else: + self._visit(arg) + return node + + def visit_CArgDeclNode(self, node): + entry = self.env.lookup(node.name) + if entry: + may_be_none = not node.not_none + self.flow.mark_argument( + node, TypedExprNode(entry.type, may_be_none), entry) + return node + + def visit_NameNode(self, node): + if self.flow.block: + entry = node.entry or self.env.lookup(node.name) + if entry: + self.flow.mark_reference(node, entry) + + if entry in self.reductions and not self.in_inplace_assignment: + error(node.pos, + "Cannot read reduction variable in loop body") + + return node + + def visit_StatListNode(self, node): + if self.flow.block: + for stat in node.stats: + self._visit(stat) + if not self.flow.block: + stat.is_terminator = True + break + return node + + def visit_Node(self, node): + self.visitchildren(node) + self.mark_position(node) + return node + + def visit_SizeofVarNode(self, node): + return node + + def visit_TypeidNode(self, node): + return node + + def visit_IfStatNode(self, node): + next_block = self.flow.newblock() + parent = self.flow.block + # If clauses + for clause in node.if_clauses: + parent = self.flow.nextblock(parent) + self._visit(clause.condition) + self.flow.nextblock() + self._visit(clause.body) + if self.flow.block: + self.flow.block.add_child(next_block) + # Else clause + if node.else_clause: + self.flow.nextblock(parent=parent) + self._visit(node.else_clause) + if self.flow.block: + self.flow.block.add_child(next_block) + else: + parent.add_child(next_block) + + if next_block.parents: + self.flow.block = next_block + else: + self.flow.block = None + return node + + def visit_AssertStatNode(self, node): + """Essentially an if-condition that wraps a RaiseStatNode. + """ + self.mark_position(node) + next_block = self.flow.newblock() + parent = self.flow.block + # failure case + parent = self.flow.nextblock(parent) + self._visit(node.condition) + self.flow.nextblock() + self._visit(node.exception) + if self.flow.block: + self.flow.block.add_child(next_block) + parent.add_child(next_block) + if next_block.parents: + self.flow.block = next_block + else: + self.flow.block = None + return node + + def visit_WhileStatNode(self, node): + condition_block = self.flow.nextblock() + next_block = self.flow.newblock() + # Condition block + self.flow.loops.append(LoopDescr(next_block, condition_block)) + if node.condition: + self._visit(node.condition) + # Body block + self.flow.nextblock() + self._visit(node.body) + self.flow.loops.pop() + # Loop it + if self.flow.block: + self.flow.block.add_child(condition_block) + self.flow.block.add_child(next_block) + # Else clause + if node.else_clause: + self.flow.nextblock(parent=condition_block) + self._visit(node.else_clause) + if self.flow.block: + self.flow.block.add_child(next_block) + else: + condition_block.add_child(next_block) + + if next_block.parents: + self.flow.block = next_block + else: + self.flow.block = None + return node + + def mark_forloop_target(self, node): + # TODO: Remove redundancy with range optimization... + is_special = False + sequence = node.iterator.sequence + target = node.target + env = node.iterator.expr_scope or self.env + if isinstance(sequence, ExprNodes.SimpleCallNode): + function = sequence.function + if sequence.self is None and function.is_name: + entry = env.lookup(function.name) + if not entry or entry.is_builtin: + if function.name == 'reversed' and len(sequence.args) == 1: + sequence = sequence.args[0] + elif function.name == 'enumerate' and len(sequence.args) == 1: + if target.is_sequence_constructor and len(target.args) == 2: + iterator = sequence.args[0] + if iterator.is_name: + iterator_type = iterator.infer_type(env) + if iterator_type.is_builtin_type: + # assume that builtin types have a length within Py_ssize_t + self.mark_assignment( + target.args[0], + ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX', + type=PyrexTypes.c_py_ssize_t_type), + rhs_scope=node.iterator.expr_scope) + target = target.args[1] + sequence = sequence.args[0] + if isinstance(sequence, ExprNodes.SimpleCallNode): + function = sequence.function + if sequence.self is None and function.is_name: + entry = env.lookup(function.name) + if not entry or entry.is_builtin: + if function.name in ('range', 'xrange'): + is_special = True + for arg in sequence.args[:2]: + self.mark_assignment(target, arg, rhs_scope=node.iterator.expr_scope) + if len(sequence.args) > 2: + self.mark_assignment(target, self.constant_folder( + ExprNodes.binop_node(node.pos, + '+', + sequence.args[0], + sequence.args[2])), + rhs_scope=node.iterator.expr_scope) + + if not is_special: + # A for-loop basically translates to subsequent calls to + # __getitem__(), so using an IndexNode here allows us to + # naturally infer the base type of pointers, C arrays, + # Python strings, etc., while correctly falling back to an + # object type when the base type cannot be handled. + + self.mark_assignment(target, node.item, rhs_scope=node.iterator.expr_scope) + + def visit_AsyncForStatNode(self, node): + return self.visit_ForInStatNode(node) + + def visit_ForInStatNode(self, node): + condition_block = self.flow.nextblock() + next_block = self.flow.newblock() + # Condition with iterator + self.flow.loops.append(LoopDescr(next_block, condition_block)) + self._visit(node.iterator) + # Target assignment + self.flow.nextblock() + + if isinstance(node, Nodes.ForInStatNode): + self.mark_forloop_target(node) + elif isinstance(node, Nodes.AsyncForStatNode): + # not entirely correct, but good enough for now + self.mark_assignment(node.target, node.item) + else: # Parallel + self.mark_assignment(node.target) + + # Body block + if isinstance(node, Nodes.ParallelRangeNode): + # In case of an invalid + self._delete_privates(node, exclude=node.target.entry) + + self.flow.nextblock() + self._visit(node.body) + self.flow.loops.pop() + + # Loop it + if self.flow.block: + self.flow.block.add_child(condition_block) + # Else clause + if node.else_clause: + self.flow.nextblock(parent=condition_block) + self._visit(node.else_clause) + if self.flow.block: + self.flow.block.add_child(next_block) + else: + condition_block.add_child(next_block) + + if next_block.parents: + self.flow.block = next_block + else: + self.flow.block = None + return node + + def _delete_privates(self, node, exclude=None): + for private_node in node.assigned_nodes: + if not exclude or private_node.entry is not exclude: + self.flow.mark_deletion(private_node, private_node.entry) + + def visit_ParallelRangeNode(self, node): + reductions = self.reductions + + # if node.target is None or not a NameNode, an error will have + # been previously issued + if hasattr(node.target, 'entry'): + self.reductions = set(reductions) + + for private_node in node.assigned_nodes: + private_node.entry.error_on_uninitialized = True + pos, reduction = node.assignments[private_node.entry] + if reduction: + self.reductions.add(private_node.entry) + + node = self.visit_ForInStatNode(node) + + self.reductions = reductions + return node + + def visit_ParallelWithBlockNode(self, node): + for private_node in node.assigned_nodes: + private_node.entry.error_on_uninitialized = True + + self._delete_privates(node) + self.visitchildren(node) + self._delete_privates(node) + + return node + + def visit_ForFromStatNode(self, node): + condition_block = self.flow.nextblock() + next_block = self.flow.newblock() + # Condition with iterator + self.flow.loops.append(LoopDescr(next_block, condition_block)) + self._visit(node.bound1) + self._visit(node.bound2) + if node.step is not None: + self._visit(node.step) + # Target assignment + self.flow.nextblock() + self.mark_assignment(node.target, node.bound1) + if node.step is not None: + self.mark_assignment(node.target, self.constant_folder( + ExprNodes.binop_node(node.pos, '+', node.bound1, node.step))) + # Body block + self.flow.nextblock() + self._visit(node.body) + self.flow.loops.pop() + # Loop it + if self.flow.block: + self.flow.block.add_child(condition_block) + # Else clause + if node.else_clause: + self.flow.nextblock(parent=condition_block) + self._visit(node.else_clause) + if self.flow.block: + self.flow.block.add_child(next_block) + else: + condition_block.add_child(next_block) + + if next_block.parents: + self.flow.block = next_block + else: + self.flow.block = None + return node + + def visit_LoopNode(self, node): + raise InternalError("Generic loops are not supported") + + def visit_WithTargetAssignmentStatNode(self, node): + self.mark_assignment(node.lhs, node.with_node.enter_call) + return node + + def visit_WithStatNode(self, node): + self._visit(node.manager) + self._visit(node.enter_call) + self._visit(node.body) + return node + + def visit_TryExceptStatNode(self, node): + # After exception handling + next_block = self.flow.newblock() + # Body block + self.flow.newblock() + # Exception entry point + entry_point = self.flow.newblock() + self.flow.exceptions.append(ExceptionDescr(entry_point)) + self.flow.nextblock() + ## XXX: links to exception handling point should be added by + ## XXX: children nodes + self.flow.block.add_child(entry_point) + self.flow.nextblock() + self.flow.in_try_block += 1 + self._visit(node.body) + self.flow.in_try_block -= 1 + self.flow.exceptions.pop() + + # After exception + if self.flow.block: + if node.else_clause: + self.flow.nextblock() + self._visit(node.else_clause) + if self.flow.block: + self.flow.block.add_child(next_block) + + for clause in node.except_clauses: + self.flow.block = entry_point + if clause.pattern: + for pattern in clause.pattern: + self._visit(pattern) + else: + # TODO: handle * pattern + pass + entry_point = self.flow.newblock(parent=self.flow.block) + self.flow.nextblock() + if clause.target: + self.mark_assignment(clause.target) + self._visit(clause.body) + if self.flow.block: + self.flow.block.add_child(next_block) + + if self.flow.exceptions: + entry_point.add_child(self.flow.exceptions[-1].entry_point) + + if next_block.parents: + self.flow.block = next_block + else: + self.flow.block = None + return node + + def visit_TryFinallyStatNode(self, node): + body_block = self.flow.nextblock() + + # Exception entry point + entry_point = self.flow.newblock() + self.flow.block = entry_point + self._visit(node.finally_except_clause) + + if self.flow.block and self.flow.exceptions: + self.flow.block.add_child(self.flow.exceptions[-1].entry_point) + + # Normal execution + finally_enter = self.flow.newblock() + self.flow.block = finally_enter + self._visit(node.finally_clause) + finally_exit = self.flow.block + + descr = ExceptionDescr(entry_point, finally_enter, finally_exit) + self.flow.exceptions.append(descr) + if self.flow.loops: + self.flow.loops[-1].exceptions.append(descr) + self.flow.block = body_block + body_block.add_child(entry_point) + self.flow.nextblock() + self.flow.in_try_block += 1 + self._visit(node.body) + self.flow.in_try_block -= 1 + self.flow.exceptions.pop() + if self.flow.loops: + self.flow.loops[-1].exceptions.pop() + + if self.flow.block: + self.flow.block.add_child(finally_enter) + if finally_exit: + self.flow.block = self.flow.nextblock(parent=finally_exit) + else: + self.flow.block = None + return node + + def visit_RaiseStatNode(self, node): + self.mark_position(node) + self.visitchildren(node) + if self.flow.exceptions: + self.flow.block.add_child(self.flow.exceptions[-1].entry_point) + self.flow.block = None + if self.flow.in_try_block: + node.in_try_block = True + return node + + def visit_ReraiseStatNode(self, node): + self.mark_position(node) + if self.flow.exceptions: + self.flow.block.add_child(self.flow.exceptions[-1].entry_point) + self.flow.block = None + return node + + def visit_ReturnStatNode(self, node): + self.mark_position(node) + self.visitchildren(node) + + outer_exception_handlers = iter(self.flow.exceptions[::-1]) + for handler in outer_exception_handlers: + if handler.finally_enter: + self.flow.block.add_child(handler.finally_enter) + if handler.finally_exit: + # 'return' goes to function exit, or to the next outer 'finally' clause + exit_point = self.flow.exit_point + for next_handler in outer_exception_handlers: + if next_handler.finally_enter: + exit_point = next_handler.finally_enter + break + handler.finally_exit.add_child(exit_point) + break + else: + if self.flow.block: + self.flow.block.add_child(self.flow.exit_point) + self.flow.block = None + return node + + def visit_BreakStatNode(self, node): + if not self.flow.loops: + #error(node.pos, "break statement not inside loop") + return node + loop = self.flow.loops[-1] + self.mark_position(node) + for exception in loop.exceptions[::-1]: + if exception.finally_enter: + self.flow.block.add_child(exception.finally_enter) + if exception.finally_exit: + exception.finally_exit.add_child(loop.next_block) + break + else: + self.flow.block.add_child(loop.next_block) + self.flow.block = None + return node + + def visit_ContinueStatNode(self, node): + if not self.flow.loops: + #error(node.pos, "continue statement not inside loop") + return node + loop = self.flow.loops[-1] + self.mark_position(node) + for exception in loop.exceptions[::-1]: + if exception.finally_enter: + self.flow.block.add_child(exception.finally_enter) + if exception.finally_exit: + exception.finally_exit.add_child(loop.loop_block) + break + else: + self.flow.block.add_child(loop.loop_block) + self.flow.block = None + return node + + def visit_ComprehensionNode(self, node): + if node.expr_scope: + self.stack.append((self.env, self.flow)) + self.env = node.expr_scope + # Skip append node here + self._visit(node.loop) + if node.expr_scope: + self.env, _ = self.stack.pop() + return node + + def visit_ScopedExprNode(self, node): + # currently this is written to deal with these two types + # (with comprehensions covered in their own function) + assert isinstance(node, (ExprNodes.IteratorNode, ExprNodes.AsyncIteratorNode)), node + if node.expr_scope: + self.stack.append((self.env, self.flow)) + self.flow = self.find_in_stack(node.expr_scope) + self.env = node.expr_scope + self.visitchildren(node) + if node.expr_scope: + self.env, self.flow = self.stack.pop() + return node + + def visit_PyClassDefNode(self, node): + self.visitchildren(node, ('dict', 'metaclass', + 'mkw', 'bases', 'class_result')) + self.flow.mark_assignment(node.target, node.classobj, + self.env.lookup(node.target.name)) + self.stack.append((self.env, self.flow)) + self.env = node.scope + self.flow.nextblock() + if node.doc_node: + self.flow.mark_assignment(node.doc_node, fake_rhs_expr, node.doc_node.entry) + self.visitchildren(node, ('body',)) + self.flow.nextblock() + self.env, _ = self.stack.pop() + return node + + def visit_CClassDefNode(self, node): + # just make sure the nodes scope is findable in-case there is a list comprehension in it + self.stack.append((node.scope, self.flow)) + self.visitchildren(node) + self.stack.pop() + return node + + def visit_AmpersandNode(self, node): + if node.operand.is_name: + # Fake assignment to silence warning + self.mark_assignment(node.operand, fake_rhs_expr) + self.visitchildren(node) + return node diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FusedNode.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FusedNode.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..644cd2206e7a2b1a61f7b861b43ac5850ded231a Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FusedNode.cp39-win_amd64.pyd differ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FusedNode.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FusedNode.py new file mode 100644 index 0000000000000000000000000000000000000000..2ce4142f1cb4ebed9a7e5fc7bc52e200efc9006a --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FusedNode.py @@ -0,0 +1,1015 @@ +from __future__ import absolute_import + +import copy + +from . import (ExprNodes, PyrexTypes, MemoryView, + ParseTreeTransforms, StringEncoding, Errors, + Naming) +from .ExprNodes import CloneNode, ProxyNode, TupleNode +from .Nodes import FuncDefNode, CFuncDefNode, StatListNode, DefNode +from ..Utils import OrderedSet +from .Errors import error, CannotSpecialize + + +class FusedCFuncDefNode(StatListNode): + """ + This node replaces a function with fused arguments. It deep-copies the + function for every permutation of fused types, and allocates a new local + scope for it. It keeps track of the original function in self.node, and + the entry of the original function in the symbol table is given the + 'fused_cfunction' attribute which points back to us. + Then when a function lookup occurs (to e.g. call it), the call can be + dispatched to the right function. + + node FuncDefNode the original function + nodes [FuncDefNode] list of copies of node with different specific types + py_func DefNode the fused python function subscriptable from + Python space + __signatures__ A DictNode mapping signature specialization strings + to PyCFunction nodes + resulting_fused_function PyCFunction for the fused DefNode that delegates + to specializations + fused_func_assignment Assignment of the fused function to the function name + defaults_tuple TupleNode of defaults (letting PyCFunctionNode build + defaults would result in many different tuples) + specialized_pycfuncs List of synthesized pycfunction nodes for the + specializations + code_object CodeObjectNode shared by all specializations and the + fused function + + fused_compound_types All fused (compound) types (e.g. floating[:]) + """ + + __signatures__ = None + resulting_fused_function = None + fused_func_assignment = None + defaults_tuple = None + decorators = None + + child_attrs = StatListNode.child_attrs + [ + '__signatures__', 'resulting_fused_function', 'fused_func_assignment'] + + def __init__(self, node, env): + super(FusedCFuncDefNode, self).__init__(node.pos) + + self.nodes = [] + self.node = node + + is_def = isinstance(self.node, DefNode) + if is_def: + # self.node.decorators = [] + self.copy_def(env) + else: + self.copy_cdef(env) + + # Perform some sanity checks. If anything fails, it's a bug + for n in self.nodes: + assert not n.entry.type.is_fused + assert not n.local_scope.return_type.is_fused + if node.return_type.is_fused: + assert not n.return_type.is_fused + + if not is_def and n.cfunc_declarator.optional_arg_count: + assert n.type.op_arg_struct + + node.entry.fused_cfunction = self + # Copy the nodes as AnalyseDeclarationsTransform will prepend + # self.py_func to self.stats, as we only want specialized + # CFuncDefNodes in self.nodes + self.stats = self.nodes[:] + + def copy_def(self, env): + """ + Create a copy of the original def or lambda function for specialized + versions. + """ + fused_compound_types = PyrexTypes.unique( + [arg.type for arg in self.node.args if arg.type.is_fused]) + fused_types = self._get_fused_base_types(fused_compound_types) + permutations = PyrexTypes.get_all_specialized_permutations(fused_types) + + self.fused_compound_types = fused_compound_types + + if self.node.entry in env.pyfunc_entries: + env.pyfunc_entries.remove(self.node.entry) + + for cname, fused_to_specific in permutations: + copied_node = copy.deepcopy(self.node) + # keep signature object identity for special casing in DefNode.analyse_declarations() + copied_node.entry.signature = self.node.entry.signature + + self._specialize_function_args(copied_node.args, fused_to_specific) + copied_node.return_type = self.node.return_type.specialize( + fused_to_specific) + + copied_node.analyse_declarations(env) + # copied_node.is_staticmethod = self.node.is_staticmethod + # copied_node.is_classmethod = self.node.is_classmethod + self.create_new_local_scope(copied_node, env, fused_to_specific) + self.specialize_copied_def(copied_node, cname, self.node.entry, + fused_to_specific, fused_compound_types) + + PyrexTypes.specialize_entry(copied_node.entry, cname) + copied_node.entry.used = True + env.entries[copied_node.entry.name] = copied_node.entry + + if not self.replace_fused_typechecks(copied_node): + break + + self.orig_py_func = self.node + self.py_func = self.make_fused_cpdef(self.node, env, is_def=True) + + def copy_cdef(self, env): + """ + Create a copy of the original c(p)def function for all specialized + versions. + """ + permutations = self.node.type.get_all_specialized_permutations() + # print 'Node %s has %d specializations:' % (self.node.entry.name, + # len(permutations)) + # import pprint; pprint.pprint([d for cname, d in permutations]) + + # Prevent copying of the python function + self.orig_py_func = orig_py_func = self.node.py_func + self.node.py_func = None + if orig_py_func: + env.pyfunc_entries.remove(orig_py_func.entry) + + fused_types = self.node.type.get_fused_types() + self.fused_compound_types = fused_types + + new_cfunc_entries = [] + for cname, fused_to_specific in permutations: + copied_node = copy.deepcopy(self.node) + + # Make the types in our CFuncType specific. + try: + type = copied_node.type.specialize(fused_to_specific) + except CannotSpecialize: + # unlike for the argument types, specializing the return type can fail + error(copied_node.pos, "Return type is a fused type that cannot " + "be determined from the function arguments") + self.py_func = None # this is just to let the compiler exit gracefully + return + entry = copied_node.entry + type.specialize_entry(entry, cname) + + # Reuse existing Entries (e.g. from .pxd files). + for i, orig_entry in enumerate(env.cfunc_entries): + if entry.cname == orig_entry.cname and type.same_as_resolved_type(orig_entry.type): + copied_node.entry = env.cfunc_entries[i] + if not copied_node.entry.func_cname: + copied_node.entry.func_cname = entry.func_cname + entry = copied_node.entry + type = entry.type + break + else: + new_cfunc_entries.append(entry) + + copied_node.type = type + entry.type, type.entry = type, entry + + entry.used = (entry.used or + self.node.entry.defined_in_pxd or + env.is_c_class_scope or + entry.is_cmethod) + + if self.node.cfunc_declarator.optional_arg_count: + self.node.cfunc_declarator.declare_optional_arg_struct( + type, env, fused_cname=cname) + + copied_node.return_type = type.return_type + self.create_new_local_scope(copied_node, env, fused_to_specific) + + # Make the argument types in the CFuncDeclarator specific + self._specialize_function_args(copied_node.cfunc_declarator.args, + fused_to_specific) + + # If a cpdef, declare all specialized cpdefs (this + # also calls analyse_declarations) + copied_node.declare_cpdef_wrapper(env) + if copied_node.py_func: + env.pyfunc_entries.remove(copied_node.py_func.entry) + + self.specialize_copied_def( + copied_node.py_func, cname, self.node.entry.as_variable, + fused_to_specific, fused_types) + + if not self.replace_fused_typechecks(copied_node): + break + + # replace old entry with new entries + if self.node.entry in env.cfunc_entries: + cindex = env.cfunc_entries.index(self.node.entry) + env.cfunc_entries[cindex:cindex+1] = new_cfunc_entries + else: + env.cfunc_entries.extend(new_cfunc_entries) + + if orig_py_func: + self.py_func = self.make_fused_cpdef(orig_py_func, env, + is_def=False) + else: + self.py_func = orig_py_func + + def _get_fused_base_types(self, fused_compound_types): + """ + Get a list of unique basic fused types, from a list of + (possibly) compound fused types. + """ + base_types = [] + seen = set() + for fused_type in fused_compound_types: + fused_type.get_fused_types(result=base_types, seen=seen) + return base_types + + def _specialize_function_args(self, args, fused_to_specific): + for arg in args: + if arg.type.is_fused: + arg.type = arg.type.specialize(fused_to_specific) + if arg.type.is_memoryviewslice: + arg.type.validate_memslice_dtype(arg.pos) + if arg.annotation: + # TODO might be nice if annotations were specialized instead? + # (Or might be hard to do reliably) + arg.annotation.untyped = True + + def create_new_local_scope(self, node, env, f2s): + """ + Create a new local scope for the copied node and append it to + self.nodes. A new local scope is needed because the arguments with the + fused types are already in the local scope, and we need the specialized + entries created after analyse_declarations on each specialized version + of the (CFunc)DefNode. + f2s is a dict mapping each fused type to its specialized version + """ + node.create_local_scope(env) + node.local_scope.fused_to_specific = f2s + + # This is copied from the original function, set it to false to + # stop recursion + node.has_fused_arguments = False + self.nodes.append(node) + + def specialize_copied_def(self, node, cname, py_entry, f2s, fused_compound_types): + """Specialize the copy of a DefNode given the copied node, + the specialization cname and the original DefNode entry""" + fused_types = self._get_fused_base_types(fused_compound_types) + type_strings = [ + PyrexTypes.specialization_signature_string(fused_type, f2s) + for fused_type in fused_types + ] + + node.specialized_signature_string = '|'.join(type_strings) + + node.entry.pymethdef_cname = PyrexTypes.get_fused_cname( + cname, node.entry.pymethdef_cname) + node.entry.doc = py_entry.doc + node.entry.doc_cname = py_entry.doc_cname + + def replace_fused_typechecks(self, copied_node): + """ + Branch-prune fused type checks like + + if fused_t is int: + ... + + Returns whether an error was issued and whether we should stop in + in order to prevent a flood of errors. + """ + num_errors = Errors.get_errors_count() + transform = ParseTreeTransforms.ReplaceFusedTypeChecks( + copied_node.local_scope) + transform(copied_node) + + if Errors.get_errors_count() > num_errors: + return False + + return True + + def _fused_instance_checks(self, normal_types, pyx_code, env): + """ + Generate Cython code for instance checks, matching an object to + specialized types. + """ + for specialized_type in normal_types: + # all_numeric = all_numeric and specialized_type.is_numeric + py_type_name = specialized_type.py_type_name() + if py_type_name == 'int': + # Support Python 2 long + py_type_name = '(int, long)' + pyx_code.context.update( + py_type_name=py_type_name, + specialized_type_name=specialized_type.specialization_string, + ) + pyx_code.put_chunk( + u""" + if isinstance(arg, {{py_type_name}}): + dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'; break + """) + + def _dtype_name(self, dtype): + name = str(dtype).replace('_', '__').replace(' ', '_') + if dtype.is_typedef: + name = Naming.fused_dtype_prefix + name + return name + + def _dtype_type(self, dtype): + if dtype.is_typedef: + return self._dtype_name(dtype) + return str(dtype) + + def _sizeof_dtype(self, dtype): + if dtype.is_pyobject: + return 'sizeof(void *)' + else: + return "sizeof(%s)" % self._dtype_type(dtype) + + def _buffer_check_numpy_dtype_setup_cases(self, pyx_code): + "Setup some common cases to match dtypes against specializations" + with pyx_code.indenter("if kind in u'iu':"): + pyx_code.putln("pass") + pyx_code.named_insertion_point("dtype_int") + + with pyx_code.indenter("elif kind == u'f':"): + pyx_code.putln("pass") + pyx_code.named_insertion_point("dtype_float") + + with pyx_code.indenter("elif kind == u'c':"): + pyx_code.putln("pass") + pyx_code.named_insertion_point("dtype_complex") + + with pyx_code.indenter("elif kind == u'O':"): + pyx_code.putln("pass") + pyx_code.named_insertion_point("dtype_object") + + match = "dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'" + no_match = "dest_sig[{{dest_sig_idx}}] = None" + def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types, pythran_types): + """ + Match a numpy dtype object to the individual specializations. + """ + self._buffer_check_numpy_dtype_setup_cases(pyx_code) + + for specialized_type in pythran_types+specialized_buffer_types: + final_type = specialized_type + if specialized_type.is_pythran_expr: + specialized_type = specialized_type.org_buffer + dtype = specialized_type.dtype + pyx_code.context.update( + itemsize_match=self._sizeof_dtype(dtype) + " == itemsize", + signed_match="not (%s_is_signed ^ dtype_signed)" % self._dtype_name(dtype), + dtype=dtype, + specialized_type_name=final_type.specialization_string) + + dtypes = [ + (dtype.is_int, pyx_code.dtype_int), + (dtype.is_float, pyx_code.dtype_float), + (dtype.is_complex, pyx_code.dtype_complex) + ] + + for dtype_category, codewriter in dtypes: + if not dtype_category: + continue + cond = '{{itemsize_match}} and (arg.ndim) == %d' % ( + specialized_type.ndim,) + if dtype.is_int: + cond += ' and {{signed_match}}' + + if final_type.is_pythran_expr: + cond += ' and arg_is_pythran_compatible' + + with codewriter.indenter("if %s:" % cond): + #codewriter.putln("print 'buffer match found based on numpy dtype'") + codewriter.putln(self.match) + codewriter.putln("break") + + def _buffer_parse_format_string_check(self, pyx_code, decl_code, + specialized_type, env): + """ + For each specialized type, try to coerce the object to a memoryview + slice of that type. This means obtaining a buffer and parsing the + format string. + TODO: separate buffer acquisition from format parsing + """ + dtype = specialized_type.dtype + if specialized_type.is_buffer: + axes = [('direct', 'strided')] * specialized_type.ndim + else: + axes = specialized_type.axes + + memslice_type = PyrexTypes.MemoryViewSliceType(dtype, axes) + memslice_type.create_from_py_utility_code(env) + pyx_code.context.update( + coerce_from_py_func=memslice_type.from_py_function, + dtype=dtype) + decl_code.putln( + "{{memviewslice_cname}} {{coerce_from_py_func}}(object, int)") + + pyx_code.context.update( + specialized_type_name=specialized_type.specialization_string, + sizeof_dtype=self._sizeof_dtype(dtype), + ndim_dtype=specialized_type.ndim, + dtype_is_struct_obj=int(dtype.is_struct or dtype.is_pyobject)) + + # use the memoryview object to check itemsize and ndim. + # In principle it could check more, but these are the easiest to do quickly + pyx_code.put_chunk( + u""" + # try {{dtype}} + if (((itemsize == -1 and arg_as_memoryview.itemsize == {{sizeof_dtype}}) + or itemsize == {{sizeof_dtype}}) + and arg_as_memoryview.ndim == {{ndim_dtype}}): + {{if dtype_is_struct_obj}} + if __PYX_IS_PYPY2: + # I wasn't able to diagnose why, but PyPy2 fails to convert a + # memoryview to a Cython memoryview in this case + memslice = {{coerce_from_py_func}}(arg, 0) + else: + {{else}} + if True: + {{endif}} + memslice = {{coerce_from_py_func}}(arg_as_memoryview, 0) + if memslice.memview: + __PYX_XCLEAR_MEMVIEW(&memslice, 1) + # print 'found a match for the buffer through format parsing' + %s + break + else: + __pyx_PyErr_Clear() + """ % self.match) + + def _buffer_checks(self, buffer_types, pythran_types, pyx_code, decl_code, accept_none, env): + """ + Generate Cython code to match objects to buffer specializations. + First try to get a numpy dtype object and match it against the individual + specializations. If that fails, try naively to coerce the object + to each specialization, which obtains the buffer each time and tries + to match the format string. + """ + # The first thing to find a match in this loop breaks out of the loop + pyx_code.put_chunk( + u""" + """ + (u"arg_is_pythran_compatible = False" if pythran_types else u"") + u""" + if ndarray is not None: + if isinstance(arg, ndarray): + dtype = arg.dtype + """ + (u"arg_is_pythran_compatible = True" if pythran_types else u"") + u""" + elif __pyx_memoryview_check(arg): + arg_base = arg.base + if isinstance(arg_base, ndarray): + dtype = arg_base.dtype + else: + dtype = None + else: + dtype = None + + itemsize = -1 + if dtype is not None: + itemsize = dtype.itemsize + kind = ord(dtype.kind) + dtype_signed = kind == u'i' + """) + pyx_code.indent(2) + if pythran_types: + pyx_code.put_chunk( + u""" + # Pythran only supports the endianness of the current compiler + byteorder = dtype.byteorder + if byteorder == "<" and not __Pyx_Is_Little_Endian(): + arg_is_pythran_compatible = False + elif byteorder == ">" and __Pyx_Is_Little_Endian(): + arg_is_pythran_compatible = False + if arg_is_pythran_compatible: + cur_stride = itemsize + shape = arg.shape + strides = arg.strides + for i in range(arg.ndim-1, -1, -1): + if (strides[i]) != cur_stride: + arg_is_pythran_compatible = False + break + cur_stride *= shape[i] + else: + arg_is_pythran_compatible = not (arg.flags.f_contiguous and (arg.ndim) > 1) + """) + pyx_code.named_insertion_point("numpy_dtype_checks") + self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types) + pyx_code.dedent(2) + + if accept_none: + # If None is acceptable, then Cython <3.0 matched None with the + # first type. This behaviour isn't ideal, but keep it for backwards + # compatibility. Better behaviour would be to see if subsequent + # arguments give a stronger match. + pyx_code.context.update( + specialized_type_name=buffer_types[0].specialization_string + ) + pyx_code.put_chunk( + """ + if arg is None: + %s + break + """ % self.match) + + # creating a Cython memoryview from a Python memoryview avoids the + # need to get the buffer multiple times, and we can + # also use it to check itemsizes etc + pyx_code.put_chunk( + """ + try: + arg_as_memoryview = memoryview(arg) + except (ValueError, TypeError): + pass + """) + with pyx_code.indenter("else:"): + for specialized_type in buffer_types: + self._buffer_parse_format_string_check( + pyx_code, decl_code, specialized_type, env) + + def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types): + """ + If we have any buffer specializations, write out some variable + declarations and imports. + """ + decl_code.put_chunk( + u""" + ctypedef struct {{memviewslice_cname}}: + void *memview + + void __PYX_XCLEAR_MEMVIEW({{memviewslice_cname}} *, int have_gil) + bint __pyx_memoryview_check(object) + bint __PYX_IS_PYPY2 "(CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION == 2)" + """) + + pyx_code.local_variable_declarations.put_chunk( + u""" + cdef {{memviewslice_cname}} memslice + cdef Py_ssize_t itemsize + cdef bint dtype_signed + cdef Py_UCS4 kind + + itemsize = -1 + """) + + if pythran_types: + pyx_code.local_variable_declarations.put_chunk(u""" + cdef bint arg_is_pythran_compatible + cdef Py_ssize_t cur_stride + """) + + pyx_code.imports.put_chunk( + u""" + cdef type ndarray + ndarray = __Pyx_ImportNumPyArrayTypeIfAvailable() + """) + + pyx_code.imports.put_chunk( + u""" + cdef memoryview arg_as_memoryview + """ + ) + + seen_typedefs = set() + seen_int_dtypes = set() + for buffer_type in all_buffer_types: + dtype = buffer_type.dtype + dtype_name = self._dtype_name(dtype) + if dtype.is_typedef: + if dtype_name not in seen_typedefs: + seen_typedefs.add(dtype_name) + decl_code.putln( + 'ctypedef %s %s "%s"' % (dtype.resolve(), dtype_name, + dtype.empty_declaration_code())) + + if buffer_type.dtype.is_int: + if str(dtype) not in seen_int_dtypes: + seen_int_dtypes.add(str(dtype)) + pyx_code.context.update(dtype_name=dtype_name, + dtype_type=self._dtype_type(dtype)) + pyx_code.local_variable_declarations.put_chunk( + u""" + cdef bint {{dtype_name}}_is_signed + {{dtype_name}}_is_signed = not (<{{dtype_type}}> -1 > 0) + """) + + def _split_fused_types(self, arg): + """ + Specialize fused types and split into normal types and buffer types. + """ + specialized_types = PyrexTypes.get_specialized_types(arg.type) + + # Prefer long over int, etc by sorting (see type classes in PyrexTypes.py) + specialized_types.sort() + + seen_py_type_names = set() + normal_types, buffer_types, pythran_types = [], [], [] + has_object_fallback = False + for specialized_type in specialized_types: + py_type_name = specialized_type.py_type_name() + if py_type_name: + if py_type_name in seen_py_type_names: + continue + seen_py_type_names.add(py_type_name) + if py_type_name == 'object': + has_object_fallback = True + else: + normal_types.append(specialized_type) + elif specialized_type.is_pythran_expr: + pythran_types.append(specialized_type) + elif specialized_type.is_buffer or specialized_type.is_memoryviewslice: + buffer_types.append(specialized_type) + + return normal_types, buffer_types, pythran_types, has_object_fallback + + def _unpack_argument(self, pyx_code): + pyx_code.put_chunk( + u""" + # PROCESSING ARGUMENT {{arg_tuple_idx}} + if {{arg_tuple_idx}} < len(args): + arg = (args)[{{arg_tuple_idx}}] + elif kwargs is not None and '{{arg.name}}' in kwargs: + arg = (kwargs)['{{arg.name}}'] + else: + {{if arg.default}} + arg = (defaults)[{{default_idx}}] + {{else}} + {{if arg_tuple_idx < min_positional_args}} + raise TypeError("Expected at least %d argument%s, got %d" % ( + {{min_positional_args}}, {{'"s"' if min_positional_args != 1 else '""'}}, len(args))) + {{else}} + raise TypeError("Missing keyword-only argument: '%s'" % "{{arg.default}}") + {{endif}} + {{endif}} + """) + + def _fused_signature_index(self, pyx_code): + """ + Generate Cython code for constructing a persistent nested dictionary index of + fused type specialization signatures. + """ + pyx_code.put_chunk( + u""" + if not _fused_sigindex: + for sig in signatures: + sigindex_node = _fused_sigindex + *sig_series, last_type = sig.strip('()').split('|') + for sig_type in sig_series: + if sig_type not in sigindex_node: + sigindex_node[sig_type] = sigindex_node = {} + else: + sigindex_node = sigindex_node[sig_type] + sigindex_node[last_type] = sig + """ + ) + + def make_fused_cpdef(self, orig_py_func, env, is_def): + """ + This creates the function that is indexable from Python and does + runtime dispatch based on the argument types. The function gets the + arg tuple and kwargs dict (or None) and the defaults tuple + as arguments from the Binding Fused Function's tp_call. + """ + from . import TreeFragment, Code, UtilityCode + + fused_types = self._get_fused_base_types([ + arg.type for arg in self.node.args if arg.type.is_fused]) + + context = { + 'memviewslice_cname': MemoryView.memviewslice_cname, + 'func_args': self.node.args, + 'n_fused': len(fused_types), + 'min_positional_args': + self.node.num_required_args - self.node.num_required_kw_args + if is_def else + sum(1 for arg in self.node.args if arg.default is None), + 'name': orig_py_func.entry.name, + } + + pyx_code = Code.PyxCodeWriter(context=context) + decl_code = Code.PyxCodeWriter(context=context) + decl_code.put_chunk( + u""" + cdef extern from *: + void __pyx_PyErr_Clear "PyErr_Clear" () + type __Pyx_ImportNumPyArrayTypeIfAvailable() + int __Pyx_Is_Little_Endian() + """) + decl_code.indent() + + pyx_code.put_chunk( + u""" + def __pyx_fused_cpdef(signatures, args, kwargs, defaults, _fused_sigindex={}): + # FIXME: use a typed signature - currently fails badly because + # default arguments inherit the types we specify here! + + cdef list search_list + cdef dict sigindex_node + + dest_sig = [None] * {{n_fused}} + + if kwargs is not None and not kwargs: + kwargs = None + + cdef Py_ssize_t i + + # instance check body + """) + + pyx_code.indent() # indent following code to function body + pyx_code.named_insertion_point("imports") + pyx_code.named_insertion_point("func_defs") + pyx_code.named_insertion_point("local_variable_declarations") + + fused_index = 0 + default_idx = 0 + all_buffer_types = OrderedSet() + seen_fused_types = set() + for i, arg in enumerate(self.node.args): + if arg.type.is_fused: + arg_fused_types = arg.type.get_fused_types() + if len(arg_fused_types) > 1: + raise NotImplementedError("Determination of more than one fused base " + "type per argument is not implemented.") + fused_type = arg_fused_types[0] + + if arg.type.is_fused and fused_type not in seen_fused_types: + seen_fused_types.add(fused_type) + + context.update( + arg_tuple_idx=i, + arg=arg, + dest_sig_idx=fused_index, + default_idx=default_idx, + ) + + normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg) + self._unpack_argument(pyx_code) + + # 'unrolled' loop, first match breaks out of it + with pyx_code.indenter("while 1:"): + if normal_types: + self._fused_instance_checks(normal_types, pyx_code, env) + if buffer_types or pythran_types: + env.use_utility_code(Code.UtilityCode.load_cached("IsLittleEndian", "ModuleSetupCode.c")) + self._buffer_checks( + buffer_types, pythran_types, pyx_code, decl_code, + arg.accept_none, env) + if has_object_fallback: + pyx_code.context.update(specialized_type_name='object') + pyx_code.putln(self.match) + else: + pyx_code.putln(self.no_match) + pyx_code.putln("break") + + fused_index += 1 + all_buffer_types.update(buffer_types) + all_buffer_types.update(ty.org_buffer for ty in pythran_types) + + if arg.default: + default_idx += 1 + + if all_buffer_types: + self._buffer_declarations(pyx_code, decl_code, all_buffer_types, pythran_types) + env.use_utility_code(Code.UtilityCode.load_cached("Import", "ImportExport.c")) + env.use_utility_code(Code.UtilityCode.load_cached("ImportNumPyArray", "ImportExport.c")) + + self._fused_signature_index(pyx_code) + + pyx_code.put_chunk( + u""" + sigindex_matches = [] + sigindex_candidates = [_fused_sigindex] + + for dst_type in dest_sig: + found_matches = [] + found_candidates = [] + # Make two separate lists: One for signature sub-trees + # with at least one definite match, and another for + # signature sub-trees with only ambiguous matches + # (where `dest_sig[i] is None`). + if dst_type is None: + for sn in sigindex_matches: + found_matches.extend(( sn).values()) + for sn in sigindex_candidates: + found_candidates.extend(( sn).values()) + else: + for search_list in (sigindex_matches, sigindex_candidates): + for sn in search_list: + type_match = ( sn).get(dst_type) + if type_match is not None: + found_matches.append(type_match) + sigindex_matches = found_matches + sigindex_candidates = found_candidates + if not (found_matches or found_candidates): + break + + candidates = sigindex_matches + + if not candidates: + raise TypeError("No matching signature found") + elif len(candidates) > 1: + raise TypeError("Function call with ambiguous argument types") + else: + return (signatures)[candidates[0]] + """) + + fragment_code = pyx_code.getvalue() + # print decl_code.getvalue() + # print fragment_code + from .Optimize import ConstantFolding + fragment = TreeFragment.TreeFragment( + fragment_code, level='module', pipeline=[ConstantFolding()]) + ast = TreeFragment.SetPosTransform(self.node.pos)(fragment.root) + UtilityCode.declare_declarations_in_scope( + decl_code.getvalue(), env.global_scope()) + ast.scope = env + # FIXME: for static methods of cdef classes, we build the wrong signature here: first arg becomes 'self' + ast.analyse_declarations(env) + py_func = ast.stats[-1] # the DefNode + self.fragment_scope = ast.scope + + if isinstance(self.node, DefNode): + py_func.specialized_cpdefs = self.nodes[:] + else: + py_func.specialized_cpdefs = [n.py_func for n in self.nodes] + + return py_func + + def update_fused_defnode_entry(self, env): + copy_attributes = ( + 'name', 'pos', 'cname', 'func_cname', 'pyfunc_cname', + 'pymethdef_cname', 'doc', 'doc_cname', 'is_member', + 'scope' + ) + + entry = self.py_func.entry + + for attr in copy_attributes: + setattr(entry, attr, + getattr(self.orig_py_func.entry, attr)) + + self.py_func.name = self.orig_py_func.name + self.py_func.doc = self.orig_py_func.doc + + env.entries.pop('__pyx_fused_cpdef', None) + if isinstance(self.node, DefNode): + env.entries[entry.name] = entry + else: + env.entries[entry.name].as_variable = entry + + env.pyfunc_entries.append(entry) + + self.py_func.entry.fused_cfunction = self + for node in self.nodes: + if isinstance(self.node, DefNode): + node.fused_py_func = self.py_func + else: + node.py_func.fused_py_func = self.py_func + node.entry.as_variable = entry + + self.synthesize_defnodes() + self.stats.append(self.__signatures__) + + def analyse_expressions(self, env): + """ + Analyse the expressions. Take care to only evaluate default arguments + once and clone the result for all specializations + """ + for fused_compound_type in self.fused_compound_types: + for fused_type in fused_compound_type.get_fused_types(): + for specialization_type in fused_type.types: + if specialization_type.is_complex: + specialization_type.create_declaration_utility_code(env) + + if self.py_func: + self.__signatures__ = self.__signatures__.analyse_expressions(env) + self.py_func = self.py_func.analyse_expressions(env) + self.resulting_fused_function = self.resulting_fused_function.analyse_expressions(env) + self.fused_func_assignment = self.fused_func_assignment.analyse_expressions(env) + + self.defaults = defaults = [] + + for arg in self.node.args: + if arg.default: + arg.default = arg.default.analyse_expressions(env) + if arg.default.is_literal: + defaults.append(copy.copy(arg.default)) + else: + # coerce the argument to temp since CloneNode really requires a temp + defaults.append(ProxyNode(arg.default.coerce_to_temp(env))) + else: + defaults.append(None) + + for i, stat in enumerate(self.stats): + stat = self.stats[i] = stat.analyse_expressions(env) + if isinstance(stat, FuncDefNode) and stat is not self.py_func: + # the dispatcher specifically doesn't want its defaults overriding + for arg, default in zip(stat.args, defaults): + if default is not None: + if default.is_literal: + arg.default = default.coerce_to(arg.type, env) + else: + arg.default = CloneNode(default).analyse_expressions(env).coerce_to(arg.type, env) + + if self.py_func: + args = [CloneNode(default) for default in defaults if default] + self.defaults_tuple = TupleNode(self.pos, args=args) + self.defaults_tuple = self.defaults_tuple.analyse_types(env, skip_children=True).coerce_to_pyobject(env) + self.defaults_tuple = ProxyNode(self.defaults_tuple) + self.code_object = ProxyNode(self.specialized_pycfuncs[0].code_object) + + fused_func = self.resulting_fused_function.arg + fused_func.defaults_tuple = CloneNode(self.defaults_tuple) + fused_func.code_object = CloneNode(self.code_object) + + for i, pycfunc in enumerate(self.specialized_pycfuncs): + pycfunc.code_object = CloneNode(self.code_object) + pycfunc = self.specialized_pycfuncs[i] = pycfunc.analyse_types(env) + pycfunc.defaults_tuple = CloneNode(self.defaults_tuple) + return self + + def synthesize_defnodes(self): + """ + Create the __signatures__ dict of PyCFunctionNode specializations. + """ + if isinstance(self.nodes[0], CFuncDefNode): + nodes = [node.py_func for node in self.nodes] + else: + nodes = self.nodes + + # For the moment, fused functions do not support METH_FASTCALL + for node in nodes: + node.entry.signature.use_fastcall = False + + signatures = [StringEncoding.EncodedString(node.specialized_signature_string) + for node in nodes] + keys = [ExprNodes.StringNode(node.pos, value=sig) + for node, sig in zip(nodes, signatures)] + values = [ExprNodes.PyCFunctionNode.from_defnode(node, binding=True) + for node in nodes] + + self.__signatures__ = ExprNodes.DictNode.from_pairs(self.pos, zip(keys, values)) + + self.specialized_pycfuncs = values + for pycfuncnode in values: + pycfuncnode.is_specialization = True + + def generate_function_definitions(self, env, code): + if self.py_func: + self.py_func.pymethdef_required = True + self.fused_func_assignment.generate_function_definitions(env, code) + + from . import Options + for stat in self.stats: + if isinstance(stat, FuncDefNode) and ( + stat.entry.used or + (Options.cimport_from_pyx and not stat.entry.visibility == 'extern')): + code.mark_pos(stat.pos) + stat.generate_function_definitions(env, code) + + def generate_execution_code(self, code): + # Note: all def function specialization are wrapped in PyCFunction + # nodes in the self.__signatures__ dictnode. + for default in self.defaults: + if default is not None: + default.generate_evaluation_code(code) + + if self.py_func: + self.defaults_tuple.generate_evaluation_code(code) + self.code_object.generate_evaluation_code(code) + + for stat in self.stats: + code.mark_pos(stat.pos) + if isinstance(stat, ExprNodes.ExprNode): + stat.generate_evaluation_code(code) + else: + stat.generate_execution_code(code) + + if self.__signatures__: + self.resulting_fused_function.generate_evaluation_code(code) + + code.putln( + "((__pyx_FusedFunctionObject *) %s)->__signatures__ = %s;" % + (self.resulting_fused_function.result(), + self.__signatures__.result())) + self.__signatures__.generate_giveref(code) + self.__signatures__.generate_post_assignment_code(code) + self.__signatures__.free_temps(code) + + self.fused_func_assignment.generate_execution_code(code) + + # Dispose of results + self.resulting_fused_function.generate_disposal_code(code) + self.resulting_fused_function.free_temps(code) + self.defaults_tuple.generate_disposal_code(code) + self.defaults_tuple.free_temps(code) + self.code_object.generate_disposal_code(code) + self.code_object.free_temps(code) + + for default in self.defaults: + if default is not None: + default.generate_disposal_code(code) + default.free_temps(code) + + def annotate(self, code): + for stat in self.stats: + stat.annotate(code) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Future.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Future.py new file mode 100644 index 0000000000000000000000000000000000000000..8de10c0cb583f206808269ce6dbcf7fcb59c39b4 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Future.py @@ -0,0 +1,16 @@ +def _get_feature(name): + import __future__ + # fall back to a unique fake object for earlier Python versions or Python 3 + return getattr(__future__, name, object()) + +unicode_literals = _get_feature("unicode_literals") +with_statement = _get_feature("with_statement") # dummy +division = _get_feature("division") +print_function = _get_feature("print_function") +absolute_import = _get_feature("absolute_import") +nested_scopes = _get_feature("nested_scopes") # dummy +generators = _get_feature("generators") # dummy +generator_stop = _get_feature("generator_stop") +annotations = _get_feature("annotations") + +del _get_feature diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Interpreter.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Interpreter.py new file mode 100644 index 0000000000000000000000000000000000000000..244397264f76784e90cced119bb18eb57149f9e4 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Interpreter.py @@ -0,0 +1,64 @@ +""" +This module deals with interpreting the parse tree as Python +would have done, in the compiler. + +For now this only covers parse tree to value conversion of +compile-time values. +""" + +from __future__ import absolute_import + +from .Nodes import * +from .ExprNodes import * +from .Errors import CompileError + + +class EmptyScope(object): + def lookup(self, name): + return None + +empty_scope = EmptyScope() + +def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=()): + """ + Tries to interpret a list of compile time option nodes. + The result will be a tuple (optlist, optdict) but where + all expression nodes have been interpreted. The result is + in the form of tuples (value, pos). + + optlist is a list of nodes, while optdict is a DictNode (the + result optdict is a dict) + + If type_env is set, all type nodes will be analysed and the resulting + type set. Otherwise only interpretateable ExprNodes + are allowed, other nodes raises errors. + + A CompileError will be raised if there are problems. + """ + + def interpret(node, ix): + if ix in type_args: + if type_env: + type = node.analyse_as_type(type_env) + if not type: + raise CompileError(node.pos, "Invalid type.") + return (type, node.pos) + else: + raise CompileError(node.pos, "Type not allowed here.") + else: + if (sys.version_info[0] >=3 and + isinstance(node, StringNode) and + node.unicode_value is not None): + return (node.unicode_value, node.pos) + return (node.compile_time_value(empty_scope), node.pos) + + if optlist: + optlist = [interpret(x, ix) for ix, x in enumerate(optlist)] + if optdict: + assert isinstance(optdict, DictNode) + new_optdict = {} + for item in optdict.key_value_pairs: + new_key, dummy = interpret(item.key, None) + new_optdict[new_key] = interpret(item.value, item.key.value) + optdict = new_optdict + return (optlist, new_optdict) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Lexicon.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Lexicon.py new file mode 100644 index 0000000000000000000000000000000000000000..0820c2397e1fb566756460d99be54ce377c0c9e4 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Lexicon.py @@ -0,0 +1,342 @@ +# -*- coding: utf-8 -*- +# cython: language_level=3, py2_import=True +# +# Cython Scanner - Lexical Definitions +# + +from __future__ import absolute_import, unicode_literals + +raw_prefixes = "rR" +bytes_prefixes = "bB" +string_prefixes = "fFuU" + bytes_prefixes +char_prefixes = "cC" +any_string_prefix = raw_prefixes + string_prefixes + char_prefixes +IDENT = 'IDENT' + + +def make_lexicon(): + from ..Plex import \ + Str, Any, AnyBut, AnyChar, Rep, Rep1, Opt, Bol, Eol, Eof, \ + TEXT, IGNORE, Method, State, Lexicon, Range + + nonzero_digit = Any("123456789") + digit = Any("0123456789") + bindigit = Any("01") + octdigit = Any("01234567") + hexdigit = Any("0123456789ABCDEFabcdef") + indentation = Bol + Rep(Any(" \t")) + + # The list of valid unicode identifier characters are pretty slow to generate at runtime, + # and require Python3, so are just included directly here + # (via the generated code block at the bottom of the file) + unicode_start_character = (Any(unicode_start_ch_any) | Range(unicode_start_ch_range)) + unicode_continuation_character = ( + unicode_start_character | + Any(unicode_continuation_ch_any) | Range(unicode_continuation_ch_range)) + + def underscore_digits(d): + return Rep1(d) + Rep(Str("_") + Rep1(d)) + + def prefixed_digits(prefix, digits): + return prefix + Opt(Str("_")) + underscore_digits(digits) + + decimal = underscore_digits(digit) + dot = Str(".") + exponent = Any("Ee") + Opt(Any("+-")) + decimal + decimal_fract = (decimal + dot + Opt(decimal)) | (dot + decimal) + + #name = letter + Rep(letter | digit) + name = unicode_start_character + Rep(unicode_continuation_character) + intconst = (prefixed_digits(nonzero_digit, digit) | # decimal literals with underscores must not start with '0' + (Str("0") + (prefixed_digits(Any("Xx"), hexdigit) | + prefixed_digits(Any("Oo"), octdigit) | + prefixed_digits(Any("Bb"), bindigit) )) | + underscore_digits(Str('0')) # 0_0_0_0... is allowed as a decimal literal + | Rep1(digit) # FIXME: remove these Py2 style decimal/octal literals (PY_VERSION_HEX < 3) + ) + intsuffix = (Opt(Any("Uu")) + Opt(Any("Ll")) + Opt(Any("Ll"))) | (Opt(Any("Ll")) + Opt(Any("Ll")) + Opt(Any("Uu"))) + intliteral = intconst + intsuffix + fltconst = (decimal_fract + Opt(exponent)) | (decimal + exponent) + imagconst = (intconst | fltconst) + Any("jJ") + + # invalid combinations of prefixes are caught in p_string_literal + beginstring = Opt(Rep(Any(string_prefixes + raw_prefixes)) | + Any(char_prefixes) + ) + (Str("'") | Str('"') | Str("'''") | Str('"""')) + two_oct = octdigit + octdigit + three_oct = octdigit + octdigit + octdigit + two_hex = hexdigit + hexdigit + four_hex = two_hex + two_hex + escapeseq = Str("\\") + (two_oct | three_oct | + Str('N{') + Rep(AnyBut('}')) + Str('}') | + Str('u') + four_hex | Str('x') + two_hex | + Str('U') + four_hex + four_hex | AnyChar) + + bra = Any("([{") + ket = Any(")]}") + ellipsis = Str("...") + punct = Any(":,;+-*/|&<>=.%`~^?!@") + diphthong = Str("==", "<>", "!=", "<=", ">=", "<<", ">>", "**", "//", + "+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=", + "<<=", ">>=", "**=", "//=", "->", "@=", "&&", "||", ':=') + spaces = Rep1(Any(" \t\f")) + escaped_newline = Str("\\\n") + lineterm = Eol + Opt(Str("\n")) + + comment = Str("#") + Rep(AnyBut("\n")) + + return Lexicon([ + (name, Method('normalize_ident')), + (intliteral, Method('strip_underscores', symbol='INT')), + (fltconst, Method('strip_underscores', symbol='FLOAT')), + (imagconst, Method('strip_underscores', symbol='IMAG')), + (ellipsis | punct | diphthong, TEXT), + + (bra, Method('open_bracket_action')), + (ket, Method('close_bracket_action')), + (lineterm, Method('newline_action')), + + (beginstring, Method('begin_string_action')), + + (comment, IGNORE), + (spaces, IGNORE), + (escaped_newline, IGNORE), + + State('INDENT', [ + (comment + lineterm, Method('commentline')), + (Opt(spaces) + Opt(comment) + lineterm, IGNORE), + (indentation, Method('indentation_action')), + (Eof, Method('eof_action')) + ]), + + State('SQ_STRING', [ + (escapeseq, 'ESCAPE'), + (Rep1(AnyBut("'\"\n\\")), 'CHARS'), + (Str('"'), 'CHARS'), + (Str("\n"), Method('unclosed_string_action')), + (Str("'"), Method('end_string_action')), + (Eof, 'EOF') + ]), + + State('DQ_STRING', [ + (escapeseq, 'ESCAPE'), + (Rep1(AnyBut('"\n\\')), 'CHARS'), + (Str("'"), 'CHARS'), + (Str("\n"), Method('unclosed_string_action')), + (Str('"'), Method('end_string_action')), + (Eof, 'EOF') + ]), + + State('TSQ_STRING', [ + (escapeseq, 'ESCAPE'), + (Rep1(AnyBut("'\"\n\\")), 'CHARS'), + (Any("'\""), 'CHARS'), + (Str("\n"), 'NEWLINE'), + (Str("'''"), Method('end_string_action')), + (Eof, 'EOF') + ]), + + State('TDQ_STRING', [ + (escapeseq, 'ESCAPE'), + (Rep1(AnyBut('"\'\n\\')), 'CHARS'), + (Any("'\""), 'CHARS'), + (Str("\n"), 'NEWLINE'), + (Str('"""'), Method('end_string_action')), + (Eof, 'EOF') + ]), + + (Eof, Method('eof_action')) + ], + + # FIXME: Plex 1.9 needs different args here from Plex 1.1.4 + #debug_flags = scanner_debug_flags, + #debug_file = scanner_dump_file + ) + + +# BEGIN GENERATED CODE +# Generated with 'cython-generate-lexicon.py' from: +# cpython 3.12.0a7+ (heads/master:4cd1cc843a, Apr 11 2023, 10:32:26) [GCC 11.3.0] + +unicode_start_ch_any = ( + u"\u005f\u00aa\u00b5\u00ba\u02ec\u02ee\u037f\u0386\u038c\u0559\u06d5" + u"\u06ff\u0710\u07b1\u07fa\u081a\u0824\u0828\u093d\u0950\u09b2\u09bd" + u"\u09ce\u09fc\u0a5e\u0abd\u0ad0\u0af9\u0b3d\u0b71\u0b83\u0b9c\u0bd0" + u"\u0c3d\u0c5d\u0c80\u0cbd\u0d3d\u0d4e\u0dbd\u0e32\u0e84\u0ea5\u0eb2" + u"\u0ebd\u0ec6\u0f00\u103f\u1061\u108e\u10c7\u10cd\u1258\u12c0\u17d7" + u"\u17dc\u18aa\u1aa7\u1cfa\u1f59\u1f5b\u1f5d\u1fbe\u2071\u207f\u2102" + u"\u2107\u2115\u2124\u2126\u2128\u214e\u2d27\u2d2d\u2d6f\ua7d3\ua8fb" + u"\ua9cf\uaa7a\uaab1\uaac0\uaac2\ufb1d\ufb3e\ufe71\ufe73\ufe77\ufe79" + u"\ufe7b\ufe7d\U00010808\U0001083c\U00010a00\U00010f27\U00011075\U00011144\U00011147\U00011176\U000111da" + u"\U000111dc\U00011288\U0001133d\U00011350\U000114c7\U00011644\U000116b8\U00011909\U0001193f\U00011941\U000119e1" + u"\U000119e3\U00011a00\U00011a3a\U00011a50\U00011a9d\U00011c40\U00011d46\U00011d98\U00011f02\U00011fb0\U00016f50" + u"\U00016fe3\U0001b132\U0001b155\U0001d4a2\U0001d4bb\U0001d546\U0001e14e\U0001e94b\U0001ee24\U0001ee27\U0001ee39" + u"\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f" + u"\U0001ee64\U0001ee7e" +) +unicode_start_ch_range = ( + u"\u0041\u005a\u0061\u007a\u00c0\u00d6\u00d8\u00f6\u00f8\u02c1\u02c6" + u"\u02d1\u02e0\u02e4\u0370\u0374\u0376\u0377\u037b\u037d\u0388\u038a" + u"\u038e\u03a1\u03a3\u03f5\u03f7\u0481\u048a\u052f\u0531\u0556\u0560" + u"\u0588\u05d0\u05ea\u05ef\u05f2\u0620\u064a\u066e\u066f\u0671\u06d3" + u"\u06e5\u06e6\u06ee\u06ef\u06fa\u06fc\u0712\u072f\u074d\u07a5\u07ca" + u"\u07ea\u07f4\u07f5\u0800\u0815\u0840\u0858\u0860\u086a\u0870\u0887" + u"\u0889\u088e\u08a0\u08c9\u0904\u0939\u0958\u0961\u0971\u0980\u0985" + u"\u098c\u098f\u0990\u0993\u09a8\u09aa\u09b0\u09b6\u09b9\u09dc\u09dd" + u"\u09df\u09e1\u09f0\u09f1\u0a05\u0a0a\u0a0f\u0a10\u0a13\u0a28\u0a2a" + u"\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59\u0a5c\u0a72\u0a74" + u"\u0a85\u0a8d\u0a8f\u0a91\u0a93\u0aa8\u0aaa\u0ab0\u0ab2\u0ab3\u0ab5" + u"\u0ab9\u0ae0\u0ae1\u0b05\u0b0c\u0b0f\u0b10\u0b13\u0b28\u0b2a\u0b30" + u"\u0b32\u0b33\u0b35\u0b39\u0b5c\u0b5d\u0b5f\u0b61\u0b85\u0b8a\u0b8e" + u"\u0b90\u0b92\u0b95\u0b99\u0b9a\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8\u0baa" + u"\u0bae\u0bb9\u0c05\u0c0c\u0c0e\u0c10\u0c12\u0c28\u0c2a\u0c39\u0c58" + u"\u0c5a\u0c60\u0c61\u0c85\u0c8c\u0c8e\u0c90\u0c92\u0ca8\u0caa\u0cb3" + u"\u0cb5\u0cb9\u0cdd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d04\u0d0c\u0d0e" + u"\u0d10\u0d12\u0d3a\u0d54\u0d56\u0d5f\u0d61\u0d7a\u0d7f\u0d85\u0d96" + u"\u0d9a\u0db1\u0db3\u0dbb\u0dc0\u0dc6\u0e01\u0e30\u0e40\u0e46\u0e81" + u"\u0e82\u0e86\u0e8a\u0e8c\u0ea3\u0ea7\u0eb0\u0ec0\u0ec4\u0edc\u0edf" + u"\u0f40\u0f47\u0f49\u0f6c\u0f88\u0f8c\u1000\u102a\u1050\u1055\u105a" + u"\u105d\u1065\u1066\u106e\u1070\u1075\u1081\u10a0\u10c5\u10d0\u10fa" + u"\u10fc\u1248\u124a\u124d\u1250\u1256\u125a\u125d\u1260\u1288\u128a" + u"\u128d\u1290\u12b0\u12b2\u12b5\u12b8\u12be\u12c2\u12c5\u12c8\u12d6" + u"\u12d8\u1310\u1312\u1315\u1318\u135a\u1380\u138f\u13a0\u13f5\u13f8" + u"\u13fd\u1401\u166c\u166f\u167f\u1681\u169a\u16a0\u16ea\u16ee\u16f8" + u"\u1700\u1711\u171f\u1731\u1740\u1751\u1760\u176c\u176e\u1770\u1780" + u"\u17b3\u1820\u1878\u1880\u18a8\u18b0\u18f5\u1900\u191e\u1950\u196d" + u"\u1970\u1974\u1980\u19ab\u19b0\u19c9\u1a00\u1a16\u1a20\u1a54\u1b05" + u"\u1b33\u1b45\u1b4c\u1b83\u1ba0\u1bae\u1baf\u1bba\u1be5\u1c00\u1c23" + u"\u1c4d\u1c4f\u1c5a\u1c7d\u1c80\u1c88\u1c90\u1cba\u1cbd\u1cbf\u1ce9" + u"\u1cec\u1cee\u1cf3\u1cf5\u1cf6\u1d00\u1dbf\u1e00\u1f15\u1f18\u1f1d" + u"\u1f20\u1f45\u1f48\u1f4d\u1f50\u1f57\u1f5f\u1f7d\u1f80\u1fb4\u1fb6" + u"\u1fbc\u1fc2\u1fc4\u1fc6\u1fcc\u1fd0\u1fd3\u1fd6\u1fdb\u1fe0\u1fec" + u"\u1ff2\u1ff4\u1ff6\u1ffc\u2090\u209c\u210a\u2113\u2118\u211d\u212a" + u"\u2139\u213c\u213f\u2145\u2149\u2160\u2188\u2c00\u2ce4\u2ceb\u2cee" + u"\u2cf2\u2cf3\u2d00\u2d25\u2d30\u2d67\u2d80\u2d96\u2da0\u2da6\u2da8" + u"\u2dae\u2db0\u2db6\u2db8\u2dbe\u2dc0\u2dc6\u2dc8\u2dce\u2dd0\u2dd6" + u"\u2dd8\u2dde\u3005\u3007\u3021\u3029\u3031\u3035\u3038\u303c\u3041" + u"\u3096\u309d\u309f\u30a1\u30fa\u30fc\u30ff\u3105\u312f\u3131\u318e" + u"\u31a0\u31bf\u31f0\u31ff\u3400\u4dbf\u4e00\ua48c\ua4d0\ua4fd\ua500" + u"\ua60c\ua610\ua61f\ua62a\ua62b\ua640\ua66e\ua67f\ua69d\ua6a0\ua6ef" + u"\ua717\ua71f\ua722\ua788\ua78b\ua7ca\ua7d0\ua7d1\ua7d5\ua7d9\ua7f2" + u"\ua801\ua803\ua805\ua807\ua80a\ua80c\ua822\ua840\ua873\ua882\ua8b3" + u"\ua8f2\ua8f7\ua8fd\ua8fe\ua90a\ua925\ua930\ua946\ua960\ua97c\ua984" + u"\ua9b2\ua9e0\ua9e4\ua9e6\ua9ef\ua9fa\ua9fe\uaa00\uaa28\uaa40\uaa42" + u"\uaa44\uaa4b\uaa60\uaa76\uaa7e\uaaaf\uaab5\uaab6\uaab9\uaabd\uaadb" + u"\uaadd\uaae0\uaaea\uaaf2\uaaf4\uab01\uab06\uab09\uab0e\uab11\uab16" + u"\uab20\uab26\uab28\uab2e\uab30\uab5a\uab5c\uab69\uab70\uabe2\uac00" + u"\ud7a3\ud7b0\ud7c6\ud7cb\ud7fb\uf900\ufa6d\ufa70\ufad9\ufb00\ufb06" + u"\ufb13\ufb17\ufb1f\ufb28\ufb2a\ufb36\ufb38\ufb3c\ufb40\ufb41\ufb43" + u"\ufb44\ufb46\ufbb1\ufbd3\ufc5d\ufc64\ufd3d\ufd50\ufd8f\ufd92\ufdc7" + u"\ufdf0\ufdf9\ufe7f\ufefc\uff21\uff3a\uff41\uff5a\uff66\uff9d\uffa0" + u"\uffbe\uffc2\uffc7\uffca\uffcf\uffd2\uffd7\uffda\uffdc\U00010000\U0001000b" + u"\U0001000d\U00010026\U00010028\U0001003a\U0001003c\U0001003d\U0001003f\U0001004d\U00010050\U0001005d\U00010080" + u"\U000100fa\U00010140\U00010174\U00010280\U0001029c\U000102a0\U000102d0\U00010300\U0001031f\U0001032d\U0001034a" + u"\U00010350\U00010375\U00010380\U0001039d\U000103a0\U000103c3\U000103c8\U000103cf\U000103d1\U000103d5\U00010400" + u"\U0001049d\U000104b0\U000104d3\U000104d8\U000104fb\U00010500\U00010527\U00010530\U00010563\U00010570\U0001057a" + u"\U0001057c\U0001058a\U0001058c\U00010592\U00010594\U00010595\U00010597\U000105a1\U000105a3\U000105b1\U000105b3" + u"\U000105b9\U000105bb\U000105bc\U00010600\U00010736\U00010740\U00010755\U00010760\U00010767\U00010780\U00010785" + u"\U00010787\U000107b0\U000107b2\U000107ba\U00010800\U00010805\U0001080a\U00010835\U00010837\U00010838\U0001083f" + u"\U00010855\U00010860\U00010876\U00010880\U0001089e\U000108e0\U000108f2\U000108f4\U000108f5\U00010900\U00010915" + u"\U00010920\U00010939\U00010980\U000109b7\U000109be\U000109bf\U00010a10\U00010a13\U00010a15\U00010a17\U00010a19" + u"\U00010a35\U00010a60\U00010a7c\U00010a80\U00010a9c\U00010ac0\U00010ac7\U00010ac9\U00010ae4\U00010b00\U00010b35" + u"\U00010b40\U00010b55\U00010b60\U00010b72\U00010b80\U00010b91\U00010c00\U00010c48\U00010c80\U00010cb2\U00010cc0" + u"\U00010cf2\U00010d00\U00010d23\U00010e80\U00010ea9\U00010eb0\U00010eb1\U00010f00\U00010f1c\U00010f30\U00010f45" + u"\U00010f70\U00010f81\U00010fb0\U00010fc4\U00010fe0\U00010ff6\U00011003\U00011037\U00011071\U00011072\U00011083" + u"\U000110af\U000110d0\U000110e8\U00011103\U00011126\U00011150\U00011172\U00011183\U000111b2\U000111c1\U000111c4" + u"\U00011200\U00011211\U00011213\U0001122b\U0001123f\U00011240\U00011280\U00011286\U0001128a\U0001128d\U0001128f" + u"\U0001129d\U0001129f\U000112a8\U000112b0\U000112de\U00011305\U0001130c\U0001130f\U00011310\U00011313\U00011328" + u"\U0001132a\U00011330\U00011332\U00011333\U00011335\U00011339\U0001135d\U00011361\U00011400\U00011434\U00011447" + u"\U0001144a\U0001145f\U00011461\U00011480\U000114af\U000114c4\U000114c5\U00011580\U000115ae\U000115d8\U000115db" + u"\U00011600\U0001162f\U00011680\U000116aa\U00011700\U0001171a\U00011740\U00011746\U00011800\U0001182b\U000118a0" + u"\U000118df\U000118ff\U00011906\U0001190c\U00011913\U00011915\U00011916\U00011918\U0001192f\U000119a0\U000119a7" + u"\U000119aa\U000119d0\U00011a0b\U00011a32\U00011a5c\U00011a89\U00011ab0\U00011af8\U00011c00\U00011c08\U00011c0a" + u"\U00011c2e\U00011c72\U00011c8f\U00011d00\U00011d06\U00011d08\U00011d09\U00011d0b\U00011d30\U00011d60\U00011d65" + u"\U00011d67\U00011d68\U00011d6a\U00011d89\U00011ee0\U00011ef2\U00011f04\U00011f10\U00011f12\U00011f33\U00012000" + u"\U00012399\U00012400\U0001246e\U00012480\U00012543\U00012f90\U00012ff0\U00013000\U0001342f\U00013441\U00013446" + u"\U00014400\U00014646\U00016800\U00016a38\U00016a40\U00016a5e\U00016a70\U00016abe\U00016ad0\U00016aed\U00016b00" + u"\U00016b2f\U00016b40\U00016b43\U00016b63\U00016b77\U00016b7d\U00016b8f\U00016e40\U00016e7f\U00016f00\U00016f4a" + u"\U00016f93\U00016f9f\U00016fe0\U00016fe1\U00017000\U000187f7\U00018800\U00018cd5\U00018d00\U00018d08\U0001aff0" + u"\U0001aff3\U0001aff5\U0001affb\U0001affd\U0001affe\U0001b000\U0001b122\U0001b150\U0001b152\U0001b164\U0001b167" + u"\U0001b170\U0001b2fb\U0001bc00\U0001bc6a\U0001bc70\U0001bc7c\U0001bc80\U0001bc88\U0001bc90\U0001bc99\U0001d400" + u"\U0001d454\U0001d456\U0001d49c\U0001d49e\U0001d49f\U0001d4a5\U0001d4a6\U0001d4a9\U0001d4ac\U0001d4ae\U0001d4b9" + u"\U0001d4bd\U0001d4c3\U0001d4c5\U0001d505\U0001d507\U0001d50a\U0001d50d\U0001d514\U0001d516\U0001d51c\U0001d51e" + u"\U0001d539\U0001d53b\U0001d53e\U0001d540\U0001d544\U0001d54a\U0001d550\U0001d552\U0001d6a5\U0001d6a8\U0001d6c0" + u"\U0001d6c2\U0001d6da\U0001d6dc\U0001d6fa\U0001d6fc\U0001d714\U0001d716\U0001d734\U0001d736\U0001d74e\U0001d750" + u"\U0001d76e\U0001d770\U0001d788\U0001d78a\U0001d7a8\U0001d7aa\U0001d7c2\U0001d7c4\U0001d7cb\U0001df00\U0001df1e" + u"\U0001df25\U0001df2a\U0001e030\U0001e06d\U0001e100\U0001e12c\U0001e137\U0001e13d\U0001e290\U0001e2ad\U0001e2c0" + u"\U0001e2eb\U0001e4d0\U0001e4eb\U0001e7e0\U0001e7e6\U0001e7e8\U0001e7eb\U0001e7ed\U0001e7ee\U0001e7f0\U0001e7fe" + u"\U0001e800\U0001e8c4\U0001e900\U0001e943\U0001ee00\U0001ee03\U0001ee05\U0001ee1f\U0001ee21\U0001ee22\U0001ee29" + u"\U0001ee32\U0001ee34\U0001ee37\U0001ee4d\U0001ee4f\U0001ee51\U0001ee52\U0001ee61\U0001ee62\U0001ee67\U0001ee6a" + u"\U0001ee6c\U0001ee72\U0001ee74\U0001ee77\U0001ee79\U0001ee7c\U0001ee80\U0001ee89\U0001ee8b\U0001ee9b\U0001eea1" + u"\U0001eea3\U0001eea5\U0001eea9\U0001eeab\U0001eebb\U00020000\U0002a6df\U0002a700\U0002b739\U0002b740\U0002b81d" + u"\U0002b820\U0002cea1\U0002ceb0\U0002ebe0\U0002f800\U0002fa1d\U00030000\U0003134a" +) +unicode_continuation_ch_any = ( + u"\u00b7\u0387\u05bf\u05c7\u0670\u0711\u07fd\u09bc\u09d7\u09fe\u0a3c" + u"\u0a51\u0a75\u0abc\u0b3c\u0b82\u0bd7\u0c3c\u0cbc\u0cf3\u0d57\u0dca" + u"\u0dd6\u0e31\u0eb1\u0f35\u0f37\u0f39\u0fc6\u17dd\u18a9\u1ced\u1cf4" + u"\u2054\u20e1\u2d7f\ua66f\ua802\ua806\ua80b\ua82c\ua9e5\uaa43\uaab0" + u"\uaac1\ufb1e\uff3f\U000101fd\U000102e0\U00010a3f\U000110c2\U00011173\U0001123e\U00011241\U00011357" + u"\U0001145e\U00011940\U000119e4\U00011a47\U00011d3a\U00011d47\U00011f03\U00013440\U00016f4f\U00016fe4\U0001da75" + u"\U0001da84\U0001e08f\U0001e2ae" +) +unicode_continuation_ch_range = ( + u"\u0030\u0039\u0300\u036f\u0483\u0487\u0591\u05bd\u05c1\u05c2\u05c4" + u"\u05c5\u0610\u061a\u064b\u0669\u06d6\u06dc\u06df\u06e4\u06e7\u06e8" + u"\u06ea\u06ed\u06f0\u06f9\u0730\u074a\u07a6\u07b0\u07c0\u07c9\u07eb" + u"\u07f3\u0816\u0819\u081b\u0823\u0825\u0827\u0829\u082d\u0859\u085b" + u"\u0898\u089f\u08ca\u08e1\u08e3\u0903\u093a\u093c\u093e\u094f\u0951" + u"\u0957\u0962\u0963\u0966\u096f\u0981\u0983\u09be\u09c4\u09c7\u09c8" + u"\u09cb\u09cd\u09e2\u09e3\u09e6\u09ef\u0a01\u0a03\u0a3e\u0a42\u0a47" + u"\u0a48\u0a4b\u0a4d\u0a66\u0a71\u0a81\u0a83\u0abe\u0ac5\u0ac7\u0ac9" + u"\u0acb\u0acd\u0ae2\u0ae3\u0ae6\u0aef\u0afa\u0aff\u0b01\u0b03\u0b3e" + u"\u0b44\u0b47\u0b48\u0b4b\u0b4d\u0b55\u0b57\u0b62\u0b63\u0b66\u0b6f" + u"\u0bbe\u0bc2\u0bc6\u0bc8\u0bca\u0bcd\u0be6\u0bef\u0c00\u0c04\u0c3e" + u"\u0c44\u0c46\u0c48\u0c4a\u0c4d\u0c55\u0c56\u0c62\u0c63\u0c66\u0c6f" + u"\u0c81\u0c83\u0cbe\u0cc4\u0cc6\u0cc8\u0cca\u0ccd\u0cd5\u0cd6\u0ce2" + u"\u0ce3\u0ce6\u0cef\u0d00\u0d03\u0d3b\u0d3c\u0d3e\u0d44\u0d46\u0d48" + u"\u0d4a\u0d4d\u0d62\u0d63\u0d66\u0d6f\u0d81\u0d83\u0dcf\u0dd4\u0dd8" + u"\u0ddf\u0de6\u0def\u0df2\u0df3\u0e33\u0e3a\u0e47\u0e4e\u0e50\u0e59" + u"\u0eb3\u0ebc\u0ec8\u0ece\u0ed0\u0ed9\u0f18\u0f19\u0f20\u0f29\u0f3e" + u"\u0f3f\u0f71\u0f84\u0f86\u0f87\u0f8d\u0f97\u0f99\u0fbc\u102b\u103e" + u"\u1040\u1049\u1056\u1059\u105e\u1060\u1062\u1064\u1067\u106d\u1071" + u"\u1074\u1082\u108d\u108f\u109d\u135d\u135f\u1369\u1371\u1712\u1715" + u"\u1732\u1734\u1752\u1753\u1772\u1773\u17b4\u17d3\u17e0\u17e9\u180b" + u"\u180d\u180f\u1819\u1920\u192b\u1930\u193b\u1946\u194f\u19d0\u19da" + u"\u1a17\u1a1b\u1a55\u1a5e\u1a60\u1a7c\u1a7f\u1a89\u1a90\u1a99\u1ab0" + u"\u1abd\u1abf\u1ace\u1b00\u1b04\u1b34\u1b44\u1b50\u1b59\u1b6b\u1b73" + u"\u1b80\u1b82\u1ba1\u1bad\u1bb0\u1bb9\u1be6\u1bf3\u1c24\u1c37\u1c40" + u"\u1c49\u1c50\u1c59\u1cd0\u1cd2\u1cd4\u1ce8\u1cf7\u1cf9\u1dc0\u1dff" + u"\u203f\u2040\u20d0\u20dc\u20e5\u20f0\u2cef\u2cf1\u2de0\u2dff\u302a" + u"\u302f\u3099\u309a\ua620\ua629\ua674\ua67d\ua69e\ua69f\ua6f0\ua6f1" + u"\ua823\ua827\ua880\ua881\ua8b4\ua8c5\ua8d0\ua8d9\ua8e0\ua8f1\ua8ff" + u"\ua909\ua926\ua92d\ua947\ua953\ua980\ua983\ua9b3\ua9c0\ua9d0\ua9d9" + u"\ua9f0\ua9f9\uaa29\uaa36\uaa4c\uaa4d\uaa50\uaa59\uaa7b\uaa7d\uaab2" + u"\uaab4\uaab7\uaab8\uaabe\uaabf\uaaeb\uaaef\uaaf5\uaaf6\uabe3\uabea" + u"\uabec\uabed\uabf0\uabf9\ufe00\ufe0f\ufe20\ufe2f\ufe33\ufe34\ufe4d" + u"\ufe4f\uff10\uff19\uff9e\uff9f\U00010376\U0001037a\U000104a0\U000104a9\U00010a01\U00010a03" + u"\U00010a05\U00010a06\U00010a0c\U00010a0f\U00010a38\U00010a3a\U00010ae5\U00010ae6\U00010d24\U00010d27\U00010d30" + u"\U00010d39\U00010eab\U00010eac\U00010efd\U00010eff\U00010f46\U00010f50\U00010f82\U00010f85\U00011000\U00011002" + u"\U00011038\U00011046\U00011066\U00011070\U00011073\U00011074\U0001107f\U00011082\U000110b0\U000110ba\U000110f0" + u"\U000110f9\U00011100\U00011102\U00011127\U00011134\U00011136\U0001113f\U00011145\U00011146\U00011180\U00011182" + u"\U000111b3\U000111c0\U000111c9\U000111cc\U000111ce\U000111d9\U0001122c\U00011237\U000112df\U000112ea\U000112f0" + u"\U000112f9\U00011300\U00011303\U0001133b\U0001133c\U0001133e\U00011344\U00011347\U00011348\U0001134b\U0001134d" + u"\U00011362\U00011363\U00011366\U0001136c\U00011370\U00011374\U00011435\U00011446\U00011450\U00011459\U000114b0" + u"\U000114c3\U000114d0\U000114d9\U000115af\U000115b5\U000115b8\U000115c0\U000115dc\U000115dd\U00011630\U00011640" + u"\U00011650\U00011659\U000116ab\U000116b7\U000116c0\U000116c9\U0001171d\U0001172b\U00011730\U00011739\U0001182c" + u"\U0001183a\U000118e0\U000118e9\U00011930\U00011935\U00011937\U00011938\U0001193b\U0001193e\U00011942\U00011943" + u"\U00011950\U00011959\U000119d1\U000119d7\U000119da\U000119e0\U00011a01\U00011a0a\U00011a33\U00011a39\U00011a3b" + u"\U00011a3e\U00011a51\U00011a5b\U00011a8a\U00011a99\U00011c2f\U00011c36\U00011c38\U00011c3f\U00011c50\U00011c59" + u"\U00011c92\U00011ca7\U00011ca9\U00011cb6\U00011d31\U00011d36\U00011d3c\U00011d3d\U00011d3f\U00011d45\U00011d50" + u"\U00011d59\U00011d8a\U00011d8e\U00011d90\U00011d91\U00011d93\U00011d97\U00011da0\U00011da9\U00011ef3\U00011ef6" + u"\U00011f00\U00011f01\U00011f34\U00011f3a\U00011f3e\U00011f42\U00011f50\U00011f59\U00013447\U00013455\U00016a60" + u"\U00016a69\U00016ac0\U00016ac9\U00016af0\U00016af4\U00016b30\U00016b36\U00016b50\U00016b59\U00016f51\U00016f87" + u"\U00016f8f\U00016f92\U00016ff0\U00016ff1\U0001bc9d\U0001bc9e\U0001cf00\U0001cf2d\U0001cf30\U0001cf46\U0001d165" + u"\U0001d169\U0001d16d\U0001d172\U0001d17b\U0001d182\U0001d185\U0001d18b\U0001d1aa\U0001d1ad\U0001d242\U0001d244" + u"\U0001d7ce\U0001d7ff\U0001da00\U0001da36\U0001da3b\U0001da6c\U0001da9b\U0001da9f\U0001daa1\U0001daaf\U0001e000" + u"\U0001e006\U0001e008\U0001e018\U0001e01b\U0001e021\U0001e023\U0001e024\U0001e026\U0001e02a\U0001e130\U0001e136" + u"\U0001e140\U0001e149\U0001e2ec\U0001e2f9\U0001e4ec\U0001e4f9\U0001e8d0\U0001e8d6\U0001e944\U0001e94a\U0001e950" + u"\U0001e959\U0001fbf0\U0001fbf9" +) + +# END GENERATED CODE diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Main.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Main.py new file mode 100644 index 0000000000000000000000000000000000000000..80946c0776719637d247baaa87a7af9c988f0f2f --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Main.py @@ -0,0 +1,789 @@ +# +# Cython Top Level +# + +from __future__ import absolute_import, print_function + +import os +import re +import sys +import io + +if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 3): + sys.stderr.write("Sorry, Cython requires Python 2.7 or 3.3+, found %d.%d\n" % tuple(sys.version_info[:2])) + sys.exit(1) + +try: + from __builtin__ import basestring +except ImportError: + basestring = str + +# Do not import Parsing here, import it when needed, because Parsing imports +# Nodes, which globally needs debug command line options initialized to set a +# conditional metaclass. These options are processed by CmdLine called from +# main() in this file. +# import Parsing +from . import Errors +from .StringEncoding import EncodedString +from .Scanning import PyrexScanner, FileSourceDescriptor +from .Errors import PyrexError, CompileError, error, warning +from .Symtab import ModuleScope +from .. import Utils +from . import Options +from .Options import CompilationOptions, default_options +from .CmdLine import parse_command_line +from .Lexicon import (unicode_start_ch_any, unicode_continuation_ch_any, + unicode_start_ch_range, unicode_continuation_ch_range) + + +def _make_range_re(chrs): + out = [] + for i in range(0, len(chrs), 2): + out.append(u"{0}-{1}".format(chrs[i], chrs[i+1])) + return u"".join(out) + +# py2 version looked like r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)*$" +module_name_pattern = u"[{0}{1}][{0}{2}{1}{3}]*".format( + unicode_start_ch_any, _make_range_re(unicode_start_ch_range), + unicode_continuation_ch_any, + _make_range_re(unicode_continuation_ch_range)) +module_name_pattern = re.compile(u"{0}(\\.{0})*$".format(module_name_pattern)) + + +standard_include_path = os.path.abspath( + os.path.join(os.path.dirname(os.path.dirname(__file__)), 'Includes')) + + +class Context(object): + # This class encapsulates the context needed for compiling + # one or more Cython implementation files along with their + # associated and imported declaration files. It includes + # the root of the module import namespace and the list + # of directories to search for include files. + # + # modules {string : ModuleScope} + # include_directories [string] + # future_directives [object] + # language_level int currently 2 or 3 for Python 2/3 + + cython_scope = None + language_level = None # warn when not set but default to Py2 + + def __init__(self, include_directories, compiler_directives, cpp=False, + language_level=None, options=None): + # cython_scope is a hack, set to False by subclasses, in order to break + # an infinite loop. + # Better code organization would fix it. + + from . import Builtin, CythonScope + self.modules = {"__builtin__" : Builtin.builtin_scope} + self.cython_scope = CythonScope.create_cython_scope(self) + self.modules["cython"] = self.cython_scope + self.include_directories = include_directories + self.future_directives = set() + self.compiler_directives = compiler_directives + self.cpp = cpp + self.options = options + + self.pxds = {} # full name -> node tree + self._interned = {} # (type(value), value, *key_args) -> interned_value + + if language_level is not None: + self.set_language_level(language_level) + + self.legacy_implicit_noexcept = self.compiler_directives.get('legacy_implicit_noexcept', False) + + self.gdb_debug_outputwriter = None + + @classmethod + def from_options(cls, options): + return cls(options.include_path, options.compiler_directives, + options.cplus, options.language_level, options=options) + + def set_language_level(self, level): + from .Future import print_function, unicode_literals, absolute_import, division, generator_stop + future_directives = set() + if level == '3str': + level = 3 + else: + level = int(level) + if level >= 3: + future_directives.add(unicode_literals) + if level >= 3: + future_directives.update([print_function, absolute_import, division, generator_stop]) + self.language_level = level + self.future_directives = future_directives + if level >= 3: + self.modules['builtins'] = self.modules['__builtin__'] + + def intern_ustring(self, value, encoding=None): + key = (EncodedString, value, encoding) + try: + return self._interned[key] + except KeyError: + pass + value = EncodedString(value) + if encoding: + value.encoding = encoding + self._interned[key] = value + return value + + # pipeline creation functions can now be found in Pipeline.py + + def process_pxd(self, source_desc, scope, module_name): + from . import Pipeline + if isinstance(source_desc, FileSourceDescriptor) and source_desc._file_type == 'pyx': + source = CompilationSource(source_desc, module_name, os.getcwd()) + result_sink = create_default_resultobj(source, self.options) + pipeline = Pipeline.create_pyx_as_pxd_pipeline(self, result_sink) + result = Pipeline.run_pipeline(pipeline, source) + else: + pipeline = Pipeline.create_pxd_pipeline(self, scope, module_name) + result = Pipeline.run_pipeline(pipeline, source_desc) + return result + + def nonfatal_error(self, exc): + return Errors.report_error(exc) + + def _split_qualified_name(self, qualified_name, relative_import=False): + # Splits qualified_name into parts in form of 2-tuples: (PART_NAME, IS_PACKAGE). + qualified_name_parts = qualified_name.split('.') + last_part = qualified_name_parts.pop() + qualified_name_parts = [(p, True) for p in qualified_name_parts] + if last_part != '__init__': + # If Last part is __init__, then it is omitted. Otherwise, we need to check whether we can find + # __init__.pyx/__init__.py file to determine if last part is package or not. + is_package = False + for suffix in ('.py', '.pyx'): + path = self.search_include_directories( + qualified_name, suffix=suffix, source_pos=None, source_file_path=None, sys_path=not relative_import) + if path: + is_package = self._is_init_file(path) + break + + qualified_name_parts.append((last_part, is_package)) + return qualified_name_parts + + @staticmethod + def _is_init_file(path): + return os.path.basename(path) in ('__init__.pyx', '__init__.py', '__init__.pxd') if path else False + + @staticmethod + def _check_pxd_filename(pos, pxd_pathname, qualified_name): + if not pxd_pathname: + return + pxd_filename = os.path.basename(pxd_pathname) + if '.' in qualified_name and qualified_name == os.path.splitext(pxd_filename)[0]: + warning(pos, "Dotted filenames ('%s') are deprecated." + " Please use the normal Python package directory layout." % pxd_filename, level=1) + + def find_module(self, module_name, from_module=None, pos=None, need_pxd=1, + absolute_fallback=True, relative_import=False): + # Finds and returns the module scope corresponding to + # the given relative or absolute module name. If this + # is the first time the module has been requested, finds + # the corresponding .pxd file and process it. + # If from_module is not None, it must be a module scope, + # and the module will first be searched for relative to + # that module, provided its name is not a dotted name. + debug_find_module = 0 + if debug_find_module: + print("Context.find_module: module_name = %s, from_module = %s, pos = %s, need_pxd = %s" % ( + module_name, from_module, pos, need_pxd)) + + scope = None + pxd_pathname = None + if from_module: + if module_name: + # from .module import ... + qualified_name = from_module.qualify_name(module_name) + else: + # from . import ... + qualified_name = from_module.qualified_name + scope = from_module + from_module = None + else: + qualified_name = module_name + + if not module_name_pattern.match(qualified_name): + raise CompileError(pos or (module_name, 0, 0), + u"'%s' is not a valid module name" % module_name) + + if from_module: + if debug_find_module: + print("...trying relative import") + scope = from_module.lookup_submodule(module_name) + if not scope: + pxd_pathname = self.find_pxd_file(qualified_name, pos, sys_path=not relative_import) + self._check_pxd_filename(pos, pxd_pathname, qualified_name) + if pxd_pathname: + is_package = self._is_init_file(pxd_pathname) + scope = from_module.find_submodule(module_name, as_package=is_package) + if not scope: + if debug_find_module: + print("...trying absolute import") + if absolute_fallback: + qualified_name = module_name + scope = self + for name, is_package in self._split_qualified_name(qualified_name, relative_import=relative_import): + scope = scope.find_submodule(name, as_package=is_package) + if debug_find_module: + print("...scope = %s" % scope) + if not scope.pxd_file_loaded: + if debug_find_module: + print("...pxd not loaded") + if not pxd_pathname: + if debug_find_module: + print("...looking for pxd file") + # Only look in sys.path if we are explicitly looking + # for a .pxd file. + pxd_pathname = self.find_pxd_file(qualified_name, pos, sys_path=need_pxd and not relative_import) + self._check_pxd_filename(pos, pxd_pathname, qualified_name) + if debug_find_module: + print("......found %s" % pxd_pathname) + if not pxd_pathname and need_pxd: + # Set pxd_file_loaded such that we don't need to + # look for the non-existing pxd file next time. + scope.pxd_file_loaded = True + package_pathname = self.search_include_directories( + qualified_name, suffix=".py", source_pos=pos, sys_path=not relative_import) + if package_pathname and package_pathname.endswith(Utils.PACKAGE_FILES): + pass + else: + error(pos, "'%s.pxd' not found" % qualified_name.replace('.', os.sep)) + if pxd_pathname: + scope.pxd_file_loaded = True + try: + if debug_find_module: + print("Context.find_module: Parsing %s" % pxd_pathname) + rel_path = module_name.replace('.', os.sep) + os.path.splitext(pxd_pathname)[1] + if not pxd_pathname.endswith(rel_path): + rel_path = pxd_pathname # safety measure to prevent printing incorrect paths + source_desc = FileSourceDescriptor(pxd_pathname, rel_path) + err, result = self.process_pxd(source_desc, scope, qualified_name) + if err: + raise err + (pxd_codenodes, pxd_scope) = result + self.pxds[module_name] = (pxd_codenodes, pxd_scope) + except CompileError: + pass + return scope + + def find_pxd_file(self, qualified_name, pos=None, sys_path=True, source_file_path=None): + # Search include path (and sys.path if sys_path is True) for + # the .pxd file corresponding to the given fully-qualified + # module name. + # Will find either a dotted filename or a file in a + # package directory. If a source file position is given, + # the directory containing the source file is searched first + # for a dotted filename, and its containing package root + # directory is searched first for a non-dotted filename. + pxd = self.search_include_directories( + qualified_name, suffix=".pxd", source_pos=pos, sys_path=sys_path, source_file_path=source_file_path) + if pxd is None and Options.cimport_from_pyx: + return self.find_pyx_file(qualified_name, pos, sys_path=sys_path) + return pxd + + def find_pyx_file(self, qualified_name, pos=None, sys_path=True, source_file_path=None): + # Search include path for the .pyx file corresponding to the + # given fully-qualified module name, as for find_pxd_file(). + return self.search_include_directories( + qualified_name, suffix=".pyx", source_pos=pos, sys_path=sys_path, source_file_path=source_file_path) + + def find_include_file(self, filename, pos=None, source_file_path=None): + # Search list of include directories for filename. + # Reports an error and returns None if not found. + path = self.search_include_directories( + filename, source_pos=pos, include=True, source_file_path=source_file_path) + if not path: + error(pos, "'%s' not found" % filename) + return path + + def search_include_directories(self, qualified_name, + suffix=None, source_pos=None, include=False, sys_path=False, source_file_path=None): + include_dirs = self.include_directories + if sys_path: + include_dirs = include_dirs + sys.path + # include_dirs must be hashable for caching in @cached_function + include_dirs = tuple(include_dirs + [standard_include_path]) + return search_include_directories( + include_dirs, qualified_name, suffix or "", source_pos, include, source_file_path) + + def find_root_package_dir(self, file_path): + return Utils.find_root_package_dir(file_path) + + def check_package_dir(self, dir, package_names): + return Utils.check_package_dir(dir, tuple(package_names)) + + def c_file_out_of_date(self, source_path, output_path): + if not os.path.exists(output_path): + return 1 + c_time = Utils.modification_time(output_path) + if Utils.file_newer_than(source_path, c_time): + return 1 + pxd_path = Utils.replace_suffix(source_path, ".pxd") + if os.path.exists(pxd_path) and Utils.file_newer_than(pxd_path, c_time): + return 1 + for kind, name in self.read_dependency_file(source_path): + if kind == "cimport": + dep_path = self.find_pxd_file(name, source_file_path=source_path) + elif kind == "include": + dep_path = self.search_include_directories(name, source_file_path=source_path) + else: + continue + if dep_path and Utils.file_newer_than(dep_path, c_time): + return 1 + return 0 + + def find_cimported_module_names(self, source_path): + return [ name for kind, name in self.read_dependency_file(source_path) + if kind == "cimport" ] + + def is_package_dir(self, dir_path): + return Utils.is_package_dir(dir_path) + + def read_dependency_file(self, source_path): + dep_path = Utils.replace_suffix(source_path, ".dep") + if os.path.exists(dep_path): + with open(dep_path, "rU") as f: + chunks = [ line.split(" ", 1) + for line in (l.strip() for l in f) + if " " in line ] + return chunks + else: + return () + + def lookup_submodule(self, name): + # Look up a top-level module. Returns None if not found. + return self.modules.get(name, None) + + def find_submodule(self, name, as_package=False): + # Find a top-level module, creating a new one if needed. + scope = self.lookup_submodule(name) + if not scope: + scope = ModuleScope(name, + parent_module = None, context = self, is_package=as_package) + self.modules[name] = scope + return scope + + def parse(self, source_desc, scope, pxd, full_module_name): + if not isinstance(source_desc, FileSourceDescriptor): + raise RuntimeError("Only file sources for code supported") + source_filename = source_desc.filename + scope.cpp = self.cpp + # Parse the given source file and return a parse tree. + num_errors = Errors.get_errors_count() + try: + with Utils.open_source_file(source_filename) as f: + from . import Parsing + s = PyrexScanner(f, source_desc, source_encoding = f.encoding, + scope = scope, context = self) + tree = Parsing.p_module(s, pxd, full_module_name) + if self.options.formal_grammar: + try: + from ..Parser import ConcreteSyntaxTree + except ImportError: + raise RuntimeError( + "Formal grammar can only be used with compiled Cython with an available pgen.") + ConcreteSyntaxTree.p_module(source_filename) + except UnicodeDecodeError as e: + #import traceback + #traceback.print_exc() + raise self._report_decode_error(source_desc, e) + + if Errors.get_errors_count() > num_errors: + raise CompileError() + return tree + + def _report_decode_error(self, source_desc, exc): + msg = exc.args[-1] + position = exc.args[2] + encoding = exc.args[0] + + line = 1 + column = idx = 0 + with io.open(source_desc.filename, "r", encoding='iso8859-1', newline='') as f: + for line, data in enumerate(f, 1): + idx += len(data) + if idx >= position: + column = position - (idx - len(data)) + 1 + break + + return error((source_desc, line, column), + "Decoding error, missing or incorrect coding= " + "at top of source (cannot decode with encoding %r: %s)" % (encoding, msg)) + + def extract_module_name(self, path, options): + # Find fully_qualified module name from the full pathname + # of a source file. + dir, filename = os.path.split(path) + module_name, _ = os.path.splitext(filename) + if "." in module_name: + return module_name + names = [module_name] + while self.is_package_dir(dir): + parent, package_name = os.path.split(dir) + if parent == dir: + break + names.append(package_name) + dir = parent + names.reverse() + return ".".join(names) + + def setup_errors(self, options, result): + Errors.init_thread() + if options.use_listing_file: + path = result.listing_file = Utils.replace_suffix(result.main_source_file, ".lis") + else: + path = None + Errors.open_listing_file(path=path, echo_to_stderr=options.errors_to_stderr) + + def teardown_errors(self, err, options, result): + source_desc = result.compilation_source.source_desc + if not isinstance(source_desc, FileSourceDescriptor): + raise RuntimeError("Only file sources for code supported") + Errors.close_listing_file() + result.num_errors = Errors.get_errors_count() + if result.num_errors > 0: + err = True + if err and result.c_file: + try: + Utils.castrate_file(result.c_file, os.stat(source_desc.filename)) + except EnvironmentError: + pass + result.c_file = None + + +def get_output_filename(source_filename, cwd, options): + if options.cplus: + c_suffix = ".cpp" + else: + c_suffix = ".c" + suggested_file_name = Utils.replace_suffix(source_filename, c_suffix) + if options.output_file: + out_path = os.path.join(cwd, options.output_file) + if os.path.isdir(out_path): + return os.path.join(out_path, os.path.basename(suggested_file_name)) + else: + return out_path + else: + return suggested_file_name + + +def create_default_resultobj(compilation_source, options): + result = CompilationResult() + result.main_source_file = compilation_source.source_desc.filename + result.compilation_source = compilation_source + source_desc = compilation_source.source_desc + result.c_file = get_output_filename(source_desc.filename, + compilation_source.cwd, options) + result.embedded_metadata = options.embedded_metadata + return result + + +def run_pipeline(source, options, full_module_name=None, context=None): + from . import Pipeline + + # ensure that the inputs are unicode (for Python 2) + if sys.version_info[0] == 2: + source = Utils.decode_filename(source) + if full_module_name: + full_module_name = Utils.decode_filename(full_module_name) + + source_ext = os.path.splitext(source)[1] + options.configure_language_defaults(source_ext[1:]) # py/pyx + if context is None: + context = Context.from_options(options) + + # Set up source object + cwd = os.getcwd() + abs_path = os.path.abspath(source) + full_module_name = full_module_name or context.extract_module_name(source, options) + full_module_name = EncodedString(full_module_name) + + Utils.raise_error_if_module_name_forbidden(full_module_name) + + if options.relative_path_in_code_position_comments: + rel_path = full_module_name.replace('.', os.sep) + source_ext + if not abs_path.endswith(rel_path): + rel_path = source # safety measure to prevent printing incorrect paths + else: + rel_path = abs_path + source_desc = FileSourceDescriptor(abs_path, rel_path) + source = CompilationSource(source_desc, full_module_name, cwd) + + # Set up result object + result = create_default_resultobj(source, options) + + if options.annotate is None: + # By default, decide based on whether an html file already exists. + html_filename = os.path.splitext(result.c_file)[0] + ".html" + if os.path.exists(html_filename): + with io.open(html_filename, "r", encoding="UTF-8") as html_file: + if u' State %d\n" % (key, state['number'])) + for key in ('bol', 'eol', 'eof', 'else'): + state = special_to_state.get(key, None) + if state: + file.write(" %s --> State %d\n" % (key, state['number'])) + + @cython.locals(char_list=list, i=cython.Py_ssize_t, n=cython.Py_ssize_t, c1=cython.long, c2=cython.long) + def chars_to_ranges(self, char_list): + char_list.sort() + i = 0 + n = len(char_list) + result = [] + while i < n: + c1 = ord(char_list[i]) + c2 = c1 + i += 1 + while i < n and ord(char_list[i]) == c2 + 1: + i += 1 + c2 += 1 + result.append((chr(c1), chr(c2))) + return tuple(result) + + def ranges_to_string(self, range_list): + return ','.join(map(self.range_to_string, range_list)) + + def range_to_string(self, range_tuple): + (c1, c2) = range_tuple + if c1 == c2: + return repr(c1) + else: + return "%s..%s" % (repr(c1), repr(c2)) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Regexps.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Regexps.py new file mode 100644 index 0000000000000000000000000000000000000000..99d8c994a55cbf8b0882e89f6d75cadc8dd82358 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Regexps.py @@ -0,0 +1,540 @@ +""" +Python Lexical Analyser + +Regular Expressions +""" +from __future__ import absolute_import + +import types + +from . import Errors + +maxint = 2**31-1 # sentinel value + +# +# Constants +# + +BOL = 'bol' +EOL = 'eol' +EOF = 'eof' + +nl_code = ord('\n') + + +# +# Helper functions +# + +def chars_to_ranges(s): + """ + Return a list of character codes consisting of pairs + [code1a, code1b, code2a, code2b,...] which cover all + the characters in |s|. + """ + char_list = list(s) + char_list.sort() + i = 0 + n = len(char_list) + result = [] + while i < n: + code1 = ord(char_list[i]) + code2 = code1 + 1 + i += 1 + while i < n and code2 >= ord(char_list[i]): + code2 += 1 + i += 1 + result.append(code1) + result.append(code2) + return result + + +def uppercase_range(code1, code2): + """ + If the range of characters from code1 to code2-1 includes any + lower case letters, return the corresponding upper case range. + """ + code3 = max(code1, ord('a')) + code4 = min(code2, ord('z') + 1) + if code3 < code4: + d = ord('A') - ord('a') + return (code3 + d, code4 + d) + else: + return None + + +def lowercase_range(code1, code2): + """ + If the range of characters from code1 to code2-1 includes any + upper case letters, return the corresponding lower case range. + """ + code3 = max(code1, ord('A')) + code4 = min(code2, ord('Z') + 1) + if code3 < code4: + d = ord('a') - ord('A') + return (code3 + d, code4 + d) + else: + return None + + +def CodeRanges(code_list): + """ + Given a list of codes as returned by chars_to_ranges, return + an RE which will match a character in any of the ranges. + """ + re_list = [CodeRange(code_list[i], code_list[i + 1]) for i in range(0, len(code_list), 2)] + return Alt(*re_list) + + +def CodeRange(code1, code2): + """ + CodeRange(code1, code2) is an RE which matches any character + with a code |c| in the range |code1| <= |c| < |code2|. + """ + if code1 <= nl_code < code2: + return Alt(RawCodeRange(code1, nl_code), + RawNewline, + RawCodeRange(nl_code + 1, code2)) + else: + return RawCodeRange(code1, code2) + + +# +# Abstract classes +# + +class RE(object): + """RE is the base class for regular expression constructors. + The following operators are defined on REs: + + re1 + re2 is an RE which matches |re1| followed by |re2| + re1 | re2 is an RE which matches either |re1| or |re2| + """ + + nullable = 1 # True if this RE can match 0 input symbols + match_nl = 1 # True if this RE can match a string ending with '\n' + str = None # Set to a string to override the class's __str__ result + + def build_machine(self, machine, initial_state, final_state, + match_bol, nocase): + """ + This method should add states to |machine| to implement this + RE, starting at |initial_state| and ending at |final_state|. + If |match_bol| is true, the RE must be able to match at the + beginning of a line. If nocase is true, upper and lower case + letters should be treated as equivalent. + """ + raise NotImplementedError("%s.build_machine not implemented" % + self.__class__.__name__) + + def build_opt(self, m, initial_state, c): + """ + Given a state |s| of machine |m|, return a new state + reachable from |s| on character |c| or epsilon. + """ + s = m.new_state() + initial_state.link_to(s) + initial_state.add_transition(c, s) + return s + + def __add__(self, other): + return Seq(self, other) + + def __or__(self, other): + return Alt(self, other) + + def __str__(self): + if self.str: + return self.str + else: + return self.calc_str() + + def check_re(self, num, value): + if not isinstance(value, RE): + self.wrong_type(num, value, "Plex.RE instance") + + def check_string(self, num, value): + if type(value) != type(''): + self.wrong_type(num, value, "string") + + def check_char(self, num, value): + self.check_string(num, value) + if len(value) != 1: + raise Errors.PlexValueError("Invalid value for argument %d of Plex.%s." + "Expected a string of length 1, got: %s" % ( + num, self.__class__.__name__, repr(value))) + + def wrong_type(self, num, value, expected): + if type(value) == types.InstanceType: + got = "%s.%s instance" % ( + value.__class__.__module__, value.__class__.__name__) + else: + got = type(value).__name__ + raise Errors.PlexTypeError("Invalid type for argument %d of Plex.%s " + "(expected %s, got %s" % ( + num, self.__class__.__name__, expected, got)) + +# +# Primitive RE constructors +# ------------------------- +# +# These are the basic REs from which all others are built. +# + + +def Char(c): + """ + Char(c) is an RE which matches the character |c|. + """ + if len(c) == 1: + result = CodeRange(ord(c), ord(c) + 1) + else: + result = SpecialSymbol(c) + result.str = "Char(%s)" % repr(c) + return result + + +class RawCodeRange(RE): + """ + RawCodeRange(code1, code2) is a low-level RE which matches any character + with a code |c| in the range |code1| <= |c| < |code2|, where the range + does not include newline. For internal use only. + """ + nullable = 0 + match_nl = 0 + range = None # (code, code) + uppercase_range = None # (code, code) or None + lowercase_range = None # (code, code) or None + + def __init__(self, code1, code2): + self.range = (code1, code2) + self.uppercase_range = uppercase_range(code1, code2) + self.lowercase_range = lowercase_range(code1, code2) + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + if match_bol: + initial_state = self.build_opt(m, initial_state, BOL) + initial_state.add_transition(self.range, final_state) + if nocase: + if self.uppercase_range: + initial_state.add_transition(self.uppercase_range, final_state) + if self.lowercase_range: + initial_state.add_transition(self.lowercase_range, final_state) + + def calc_str(self): + return "CodeRange(%d,%d)" % (self.code1, self.code2) + + +class _RawNewline(RE): + """ + RawNewline is a low-level RE which matches a newline character. + For internal use only. + """ + nullable = 0 + match_nl = 1 + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + if match_bol: + initial_state = self.build_opt(m, initial_state, BOL) + s = self.build_opt(m, initial_state, EOL) + s.add_transition((nl_code, nl_code + 1), final_state) + + +RawNewline = _RawNewline() + + +class SpecialSymbol(RE): + """ + SpecialSymbol(sym) is an RE which matches the special input + symbol |sym|, which is one of BOL, EOL or EOF. + """ + nullable = 0 + match_nl = 0 + sym = None + + def __init__(self, sym): + self.sym = sym + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + # Sequences 'bol bol' and 'bol eof' are impossible, so only need + # to allow for bol if sym is eol + if match_bol and self.sym == EOL: + initial_state = self.build_opt(m, initial_state, BOL) + initial_state.add_transition(self.sym, final_state) + + +class Seq(RE): + """Seq(re1, re2, re3...) is an RE which matches |re1| followed by + |re2| followed by |re3|...""" + + def __init__(self, *re_list): + nullable = 1 + for i, re in enumerate(re_list): + self.check_re(i, re) + nullable = nullable and re.nullable + self.re_list = re_list + self.nullable = nullable + i = len(re_list) + match_nl = 0 + while i: + i -= 1 + re = re_list[i] + if re.match_nl: + match_nl = 1 + break + if not re.nullable: + break + self.match_nl = match_nl + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + re_list = self.re_list + if len(re_list) == 0: + initial_state.link_to(final_state) + else: + s1 = initial_state + n = len(re_list) + for i, re in enumerate(re_list): + if i < n - 1: + s2 = m.new_state() + else: + s2 = final_state + re.build_machine(m, s1, s2, match_bol, nocase) + s1 = s2 + match_bol = re.match_nl or (match_bol and re.nullable) + + def calc_str(self): + return "Seq(%s)" % ','.join(map(str, self.re_list)) + + +class Alt(RE): + """Alt(re1, re2, re3...) is an RE which matches either |re1| or + |re2| or |re3|...""" + + def __init__(self, *re_list): + self.re_list = re_list + nullable = 0 + match_nl = 0 + nullable_res = [] + non_nullable_res = [] + i = 1 + for re in re_list: + self.check_re(i, re) + if re.nullable: + nullable_res.append(re) + nullable = 1 + else: + non_nullable_res.append(re) + if re.match_nl: + match_nl = 1 + i += 1 + self.nullable_res = nullable_res + self.non_nullable_res = non_nullable_res + self.nullable = nullable + self.match_nl = match_nl + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + for re in self.nullable_res: + re.build_machine(m, initial_state, final_state, match_bol, nocase) + if self.non_nullable_res: + if match_bol: + initial_state = self.build_opt(m, initial_state, BOL) + for re in self.non_nullable_res: + re.build_machine(m, initial_state, final_state, 0, nocase) + + def calc_str(self): + return "Alt(%s)" % ','.join(map(str, self.re_list)) + + +class Rep1(RE): + """Rep1(re) is an RE which matches one or more repetitions of |re|.""" + + def __init__(self, re): + self.check_re(1, re) + self.re = re + self.nullable = re.nullable + self.match_nl = re.match_nl + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + s1 = m.new_state() + s2 = m.new_state() + initial_state.link_to(s1) + self.re.build_machine(m, s1, s2, match_bol or self.re.match_nl, nocase) + s2.link_to(s1) + s2.link_to(final_state) + + def calc_str(self): + return "Rep1(%s)" % self.re + + +class SwitchCase(RE): + """ + SwitchCase(re, nocase) is an RE which matches the same strings as RE, + but treating upper and lower case letters according to |nocase|. If + |nocase| is true, case is ignored, otherwise it is not. + """ + re = None + nocase = None + + def __init__(self, re, nocase): + self.re = re + self.nocase = nocase + self.nullable = re.nullable + self.match_nl = re.match_nl + + def build_machine(self, m, initial_state, final_state, match_bol, nocase): + self.re.build_machine(m, initial_state, final_state, match_bol, + self.nocase) + + def calc_str(self): + if self.nocase: + name = "NoCase" + else: + name = "Case" + return "%s(%s)" % (name, self.re) + + +# +# Composite RE constructors +# ------------------------- +# +# These REs are defined in terms of the primitive REs. +# + +Empty = Seq() +Empty.__doc__ = \ + """ + Empty is an RE which matches the empty string. + """ +Empty.str = "Empty" + + +def Str1(s): + """ + Str1(s) is an RE which matches the literal string |s|. + """ + result = Seq(*tuple(map(Char, s))) + result.str = "Str(%s)" % repr(s) + return result + + +def Str(*strs): + """ + Str(s) is an RE which matches the literal string |s|. + Str(s1, s2, s3, ...) is an RE which matches any of |s1| or |s2| or |s3|... + """ + if len(strs) == 1: + return Str1(strs[0]) + else: + result = Alt(*tuple(map(Str1, strs))) + result.str = "Str(%s)" % ','.join(map(repr, strs)) + return result + + +def Any(s): + """ + Any(s) is an RE which matches any character in the string |s|. + """ + result = CodeRanges(chars_to_ranges(s)) + result.str = "Any(%s)" % repr(s) + return result + + +def AnyBut(s): + """ + AnyBut(s) is an RE which matches any character (including + newline) which is not in the string |s|. + """ + ranges = chars_to_ranges(s) + ranges.insert(0, -maxint) + ranges.append(maxint) + result = CodeRanges(ranges) + result.str = "AnyBut(%s)" % repr(s) + return result + + +AnyChar = AnyBut("") +AnyChar.__doc__ = \ + """ + AnyChar is an RE which matches any single character (including a newline). + """ +AnyChar.str = "AnyChar" + + +def Range(s1, s2=None): + """ + Range(c1, c2) is an RE which matches any single character in the range + |c1| to |c2| inclusive. + Range(s) where |s| is a string of even length is an RE which matches + any single character in the ranges |s[0]| to |s[1]|, |s[2]| to |s[3]|,... + """ + if s2: + result = CodeRange(ord(s1), ord(s2) + 1) + result.str = "Range(%s,%s)" % (s1, s2) + else: + ranges = [] + for i in range(0, len(s1), 2): + ranges.append(CodeRange(ord(s1[i]), ord(s1[i + 1]) + 1)) + result = Alt(*ranges) + result.str = "Range(%s)" % repr(s1) + return result + + +def Opt(re): + """ + Opt(re) is an RE which matches either |re| or the empty string. + """ + result = Alt(re, Empty) + result.str = "Opt(%s)" % re + return result + + +def Rep(re): + """ + Rep(re) is an RE which matches zero or more repetitions of |re|. + """ + result = Opt(Rep1(re)) + result.str = "Rep(%s)" % re + return result + + +def NoCase(re): + """ + NoCase(re) is an RE which matches the same strings as RE, but treating + upper and lower case letters as equivalent. + """ + return SwitchCase(re, nocase=1) + + +def Case(re): + """ + Case(re) is an RE which matches the same strings as RE, but treating + upper and lower case letters as distinct, i.e. it cancels the effect + of any enclosing NoCase(). + """ + return SwitchCase(re, nocase=0) + + +# +# RE Constants +# + +Bol = Char(BOL) +Bol.__doc__ = \ + """ + Bol is an RE which matches the beginning of a line. + """ +Bol.str = "Bol" + +Eol = Char(EOL) +Eol.__doc__ = \ + """ + Eol is an RE which matches the end of a line. + """ +Eol.str = "Eol" + +Eof = Char(EOF) +Eof.__doc__ = \ + """ + Eof is an RE which matches the end of the file. + """ +Eof.str = "Eof" diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..40b3b4e84e0b69d2dd1f015d495d5f152554dc6b Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.cp39-win_amd64.pyd differ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.pxd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.pxd new file mode 100644 index 0000000000000000000000000000000000000000..664b1a6f0cefed86772cccb26f5ad7e0ef85b2d0 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.pxd @@ -0,0 +1,48 @@ +from __future__ import absolute_import + +import cython + +from Cython.Plex.Actions cimport Action + +cdef class Scanner: + + cdef public lexicon + cdef public stream + cdef public name + cdef public unicode buffer + cdef public Py_ssize_t buf_start_pos + cdef public Py_ssize_t next_pos + cdef public Py_ssize_t cur_pos + cdef public Py_ssize_t cur_line + cdef public Py_ssize_t cur_line_start + cdef public Py_ssize_t start_pos + cdef tuple current_scanner_position_tuple + cdef public tuple last_token_position_tuple + cdef public text + cdef public initial_state # int? + cdef public state_name + cdef public list queue + cdef public bint trace + cdef public cur_char + cdef public long input_state + + cdef public level + + @cython.locals(input_state=long) + cdef inline next_char(self) + @cython.locals(action=Action) + cpdef tuple read(self) + cdef inline unread(self, token, value, position) + cdef inline get_current_scan_pos(self) + cdef inline tuple scan_a_token(self) + ##cdef tuple position(self) # used frequently by Parsing.py + + @cython.final + @cython.locals(cur_pos=Py_ssize_t, cur_line=Py_ssize_t, cur_line_start=Py_ssize_t, + input_state=long, next_pos=Py_ssize_t, state=dict, + buf_start_pos=Py_ssize_t, buf_len=Py_ssize_t, buf_index=Py_ssize_t, + trace=bint, discard=Py_ssize_t, data=unicode, buffer=unicode) + cdef run_machine_inlined(self) + + cdef inline begin(self, state) + cdef inline produce(self, value, text = *) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.py new file mode 100644 index 0000000000000000000000000000000000000000..ad85f4465ee6b5e6f0d23fc077c6d7a0eef712f4 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.py @@ -0,0 +1,359 @@ +# cython: language_level=3str +# cython: auto_pickle=False +""" +Python Lexical Analyser + +Scanning an input stream +""" +from __future__ import absolute_import + +import cython + +cython.declare(BOL=object, EOL=object, EOF=object, NOT_FOUND=object) # noqa:E402 + +from . import Errors +from .Regexps import BOL, EOL, EOF + +NOT_FOUND = object() + + +class Scanner(object): + """ + A Scanner is used to read tokens from a stream of characters + using the token set specified by a Plex.Lexicon. + + Constructor: + + Scanner(lexicon, stream, name = '') + + See the docstring of the __init__ method for details. + + Methods: + + See the docstrings of the individual methods for more + information. + + read() --> (value, text) + Reads the next lexical token from the stream. + + position() --> (name, line, col) + Returns the position of the last token read using the + read() method. + + begin(state_name) + Causes scanner to change state. + + produce(value [, text]) + Causes return of a token value to the caller of the + Scanner. + + """ + + # lexicon = None # Lexicon + # stream = None # file-like object + # name = '' + # buffer = '' + # + # These positions are used by the scanner to track its internal state: + # buf_start_pos = 0 # position in input of start of buffer + # next_pos = 0 # position in input of next char to read + # cur_pos = 0 # position in input of current char + # cur_line = 1 # line number of current char + # cur_line_start = 0 # position in input of start of current line + # start_pos = 0 # position in input of start of token + # current_scanner_position_tuple = ("", 0, 0) + # tuple of filename, line number and position in line, really mainly for error reporting + # + # These positions are used to track what was read from the queue + # (which may differ from the internal state when tokens are replaced onto the queue) + # last_token_position_tuple = ("", 0, 0) # tuple of filename, line number and position in line + + # text = None # text of last token read + # initial_state = None # Node + # state_name = '' # Name of initial state + # queue = None # list of tokens and positions to be returned + # trace = 0 + + def __init__(self, lexicon, stream, name='', initial_pos=None): + """ + Scanner(lexicon, stream, name = '') + + |lexicon| is a Plex.Lexicon instance specifying the lexical tokens + to be recognised. + + |stream| can be a file object or anything which implements a + compatible read() method. + + |name| is optional, and may be the name of the file being + scanned or any other identifying string. + """ + self.trace = 0 + + self.buffer = u'' + self.buf_start_pos = 0 + self.next_pos = 0 + self.cur_pos = 0 + self.cur_line = 1 + self.start_pos = 0 + self.current_scanner_position_tuple = ("", 0, 0) + self.last_token_position_tuple = ("", 0, 0) + self.text = None + self.state_name = None + + self.lexicon = lexicon + self.stream = stream + self.name = name + self.queue = [] + self.initial_state = None + self.begin('') + self.next_pos = 0 + self.cur_pos = 0 + self.cur_line_start = 0 + self.cur_char = BOL + self.input_state = 1 + if initial_pos is not None: + self.cur_line, self.cur_line_start = initial_pos[1], -initial_pos[2] + + def read(self): + """ + Read the next lexical token from the stream and return a + tuple (value, text), where |value| is the value associated with + the token as specified by the Lexicon, and |text| is the actual + string read from the stream. Returns (None, '') on end of file. + """ + queue = self.queue + while not queue: + self.text, action = self.scan_a_token() + if action is None: + self.produce(None) + self.eof() + else: + value = action.perform(self, self.text) + if value is not None: + self.produce(value) + result, self.last_token_position_tuple = queue[0] + del queue[0] + return result + + def unread(self, token, value, position): + self.queue.insert(0, ((token, value), position)) + + def get_current_scan_pos(self): + # distinct from the position of the last token due to the queue + return self.current_scanner_position_tuple + + def scan_a_token(self): + """ + Read the next input sequence recognised by the machine + and return (text, action). Returns ('', None) on end of + file. + """ + self.start_pos = self.cur_pos + self.current_scanner_position_tuple = ( + self.name, self.cur_line, self.cur_pos - self.cur_line_start + ) + action = self.run_machine_inlined() + if action is not None: + if self.trace: + print("Scanner: read: Performing %s %d:%d" % ( + action, self.start_pos, self.cur_pos)) + text = self.buffer[ + self.start_pos - self.buf_start_pos: + self.cur_pos - self.buf_start_pos] + return (text, action) + else: + if self.cur_pos == self.start_pos: + if self.cur_char is EOL: + self.next_char() + if self.cur_char is None or self.cur_char is EOF: + return (u'', None) + raise Errors.UnrecognizedInput(self, self.state_name) + + def run_machine_inlined(self): + """ + Inlined version of run_machine for speed. + """ + state = self.initial_state + cur_pos = self.cur_pos + cur_line = self.cur_line + cur_line_start = self.cur_line_start + cur_char = self.cur_char + input_state = self.input_state + next_pos = self.next_pos + buffer = self.buffer + buf_start_pos = self.buf_start_pos + buf_len = len(buffer) + b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \ + None, 0, 0, 0, u'', 0, 0 + + trace = self.trace + while 1: + if trace: + print("State %d, %d/%d:%s -->" % ( + state['number'], input_state, cur_pos, repr(cur_char))) + + # Begin inlined self.save_for_backup() + action = state['action'] + if action is not None: + b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \ + action, cur_pos, cur_line, cur_line_start, cur_char, input_state, next_pos + # End inlined self.save_for_backup() + + c = cur_char + new_state = state.get(c, NOT_FOUND) + if new_state is NOT_FOUND: + new_state = c and state.get('else') + + if new_state: + if trace: + print("State %d" % new_state['number']) + state = new_state + # Begin inlined: self.next_char() + if input_state == 1: + cur_pos = next_pos + # Begin inlined: c = self.read_char() + buf_index = next_pos - buf_start_pos + if buf_index < buf_len: + c = buffer[buf_index] + next_pos += 1 + else: + discard = self.start_pos - buf_start_pos + data = self.stream.read(0x1000) + buffer = self.buffer[discard:] + data + self.buffer = buffer + buf_start_pos += discard + self.buf_start_pos = buf_start_pos + buf_len = len(buffer) + buf_index -= discard + if data: + c = buffer[buf_index] + next_pos += 1 + else: + c = u'' + # End inlined: c = self.read_char() + if c == u'\n': + cur_char = EOL + input_state = 2 + elif not c: + cur_char = EOL + input_state = 4 + else: + cur_char = c + elif input_state == 2: # after EoL (1) -> BoL (3) + cur_char = u'\n' + input_state = 3 + elif input_state == 3: # start new code line + cur_line += 1 + cur_line_start = cur_pos = next_pos + cur_char = BOL + input_state = 1 + elif input_state == 4: # after final line (1) -> EoF (5) + cur_char = EOF + input_state = 5 + else: # input_state == 5 (EoF) + cur_char = u'' + # End inlined self.next_char() + else: # not new_state + if trace: + print("blocked") + # Begin inlined: action = self.back_up() + if b_action is not None: + (action, cur_pos, cur_line, cur_line_start, + cur_char, input_state, next_pos) = \ + (b_action, b_cur_pos, b_cur_line, b_cur_line_start, + b_cur_char, b_input_state, b_next_pos) + else: + action = None + break # while 1 + # End inlined: action = self.back_up() + + self.cur_pos = cur_pos + self.cur_line = cur_line + self.cur_line_start = cur_line_start + self.cur_char = cur_char + self.input_state = input_state + self.next_pos = next_pos + if trace: + if action is not None: + print("Doing %s" % action) + return action + + def next_char(self): + input_state = self.input_state + if self.trace: + print("Scanner: next: %s [%d] %d" % (" " * 20, input_state, self.cur_pos)) + if input_state == 1: + self.cur_pos = self.next_pos + c = self.read_char() + if c == u'\n': + self.cur_char = EOL + self.input_state = 2 + elif not c: + self.cur_char = EOL + self.input_state = 4 + else: + self.cur_char = c + elif input_state == 2: + self.cur_char = u'\n' + self.input_state = 3 + elif input_state == 3: + self.cur_line += 1 + self.cur_line_start = self.cur_pos = self.next_pos + self.cur_char = BOL + self.input_state = 1 + elif input_state == 4: + self.cur_char = EOF + self.input_state = 5 + else: # input_state = 5 + self.cur_char = u'' + if self.trace: + print("--> [%d] %d %r" % (input_state, self.cur_pos, self.cur_char)) + + def position(self): + """ + Return a tuple (name, line, col) representing the location of + the last token read using the read() method. |name| is the + name that was provided to the Scanner constructor; |line| + is the line number in the stream (1-based); |col| is the + position within the line of the first character of the token + (0-based). + """ + return self.last_token_position_tuple + + def get_position(self): + """ + Python accessible wrapper around position(), only for error reporting. + """ + return self.position() + + def begin(self, state_name): + """Set the current state of the scanner to the named state.""" + self.initial_state = ( + self.lexicon.get_initial_state(state_name)) + self.state_name = state_name + + def produce(self, value, text=None): + """ + Called from an action procedure, causes |value| to be returned + as the token value from read(). If |text| is supplied, it is + returned in place of the scanned text. + + produce() can be called more than once during a single call to an action + procedure, in which case the tokens are queued up and returned one + at a time by subsequent calls to read(), until the queue is empty, + whereupon scanning resumes. + """ + if text is None: + text = self.text + self.queue.append(((value, text), self.current_scanner_position_tuple)) + + def eof(self): + """ + Override this method if you want something to be done at + end of file. + """ + pass + + @property + def start_line(self): + return self.last_token_position_tuple[1] diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..90baab58f487161f688de72da07760370aef3217 Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.cp39-win_amd64.pyd differ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.pxd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.pxd new file mode 100644 index 0000000000000000000000000000000000000000..53dd4d58ea9e89b19c02dae4f7ece76a8083156b --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.pxd @@ -0,0 +1,22 @@ +cimport cython + +cdef long maxint + +@cython.final +cdef class TransitionMap: + cdef list map + cdef dict special + + @cython.locals(i=cython.Py_ssize_t, j=cython.Py_ssize_t) + cpdef add(self, event, new_state) + + @cython.locals(i=cython.Py_ssize_t, j=cython.Py_ssize_t) + cpdef add_set(self, event, new_set) + + @cython.locals(i=cython.Py_ssize_t, n=cython.Py_ssize_t, else_set=cython.bint) + cpdef iteritems(self) + + @cython.locals(map=list, lo=cython.Py_ssize_t, mid=cython.Py_ssize_t, hi=cython.Py_ssize_t) + cdef split(self, long code) + + cdef get_special(self, event) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.py new file mode 100644 index 0000000000000000000000000000000000000000..f58dd538e26cea710a86c7ab1983650d0b4d15e5 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.py @@ -0,0 +1,234 @@ +# cython: auto_pickle=False +""" +Plex - Transition Maps + +This version represents state sets directly as dicts for speed. +""" + +maxint = 2**31-1 # sentinel value + + +class TransitionMap(object): + """ + A TransitionMap maps an input event to a set of states. + An input event is one of: a range of character codes, + the empty string (representing an epsilon move), or one + of the special symbols BOL, EOL, EOF. + + For characters, this implementation compactly represents + the map by means of a list: + + [code_0, states_0, code_1, states_1, code_2, states_2, + ..., code_n-1, states_n-1, code_n] + + where |code_i| is a character code, and |states_i| is a + set of states corresponding to characters with codes |c| + in the range |code_i| <= |c| <= |code_i+1|. + + The following invariants hold: + n >= 1 + code_0 == -maxint + code_n == maxint + code_i < code_i+1 for i in 0..n-1 + states_0 == states_n-1 + + Mappings for the special events '', BOL, EOL, EOF are + kept separately in a dictionary. + """ + + def __init__(self, map=None, special=None): + if not map: + map = [-maxint, {}, maxint] + if not special: + special = {} + self.map = map # The list of codes and states + self.special = special # Mapping for special events + + def add(self, event, new_state): + """ + Add transition to |new_state| on |event|. + """ + if type(event) is tuple: + code0, code1 = event + i = self.split(code0) + j = self.split(code1) + map = self.map + while i < j: + map[i + 1][new_state] = 1 + i += 2 + else: + self.get_special(event)[new_state] = 1 + + def add_set(self, event, new_set): + """ + Add transitions to the states in |new_set| on |event|. + """ + if type(event) is tuple: + code0, code1 = event + i = self.split(code0) + j = self.split(code1) + map = self.map + while i < j: + map[i + 1].update(new_set) + i += 2 + else: + self.get_special(event).update(new_set) + + def get_epsilon(self): + """ + Return the mapping for epsilon, or None. + """ + return self.special.get('') + + def iteritems(self): + """ + Return the mapping as an iterable of ((code1, code2), state_set) and + (special_event, state_set) pairs. + """ + result = [] + map = self.map + else_set = map[1] + i = 0 + n = len(map) - 1 + code0 = map[0] + while i < n: + set = map[i + 1] + code1 = map[i + 2] + if set or else_set: + result.append(((code0, code1), set)) + code0 = code1 + i += 2 + for event, set in self.special.items(): + if set: + result.append((event, set)) + return iter(result) + + items = iteritems + + # ------------------- Private methods -------------------- + + def split(self, code): + """ + Search the list for the position of the split point for |code|, + inserting a new split point if necessary. Returns index |i| such + that |code| == |map[i]|. + """ + # We use a funky variation on binary search. + map = self.map + hi = len(map) - 1 + # Special case: code == map[-1] + if code == maxint: + return hi + + # General case + lo = 0 + # loop invariant: map[lo] <= code < map[hi] and hi - lo >= 2 + while hi - lo >= 4: + # Find midpoint truncated to even index + mid = ((lo + hi) // 2) & ~1 + if code < map[mid]: + hi = mid + else: + lo = mid + # map[lo] <= code < map[hi] and hi - lo == 2 + if map[lo] == code: + return lo + else: + map[hi:hi] = [code, map[hi - 1].copy()] + return hi + + def get_special(self, event): + """ + Get state set for special event, adding a new entry if necessary. + """ + special = self.special + set = special.get(event, None) + if not set: + set = {} + special[event] = set + return set + + # --------------------- Conversion methods ----------------------- + + def __str__(self): + map_strs = [] + map = self.map + n = len(map) + i = 0 + while i < n: + code = map[i] + if code == -maxint: + code_str = "-inf" + elif code == maxint: + code_str = "inf" + else: + code_str = str(code) + map_strs.append(code_str) + i += 1 + if i < n: + map_strs.append(state_set_str(map[i])) + i += 1 + special_strs = {} + for event, set in self.special.items(): + special_strs[event] = state_set_str(set) + return "[%s]+%s" % ( + ','.join(map_strs), + special_strs + ) + + # --------------------- Debugging methods ----------------------- + + def check(self): + """Check data structure integrity.""" + if not self.map[-3] < self.map[-1]: + print(self) + assert 0 + + def dump(self, file): + map = self.map + i = 0 + n = len(map) - 1 + while i < n: + self.dump_range(map[i], map[i + 2], map[i + 1], file) + i += 2 + for event, set in self.special.items(): + if set: + if not event: + event = 'empty' + self.dump_trans(event, set, file) + + def dump_range(self, code0, code1, set, file): + if set: + if code0 == -maxint: + if code1 == maxint: + k = "any" + else: + k = "< %s" % self.dump_char(code1) + elif code1 == maxint: + k = "> %s" % self.dump_char(code0 - 1) + elif code0 == code1 - 1: + k = self.dump_char(code0) + else: + k = "%s..%s" % (self.dump_char(code0), + self.dump_char(code1 - 1)) + self.dump_trans(k, set, file) + + def dump_char(self, code): + if 0 <= code <= 255: + return repr(chr(code)) + else: + return "chr(%d)" % code + + def dump_trans(self, key, set, file): + file.write(" %s --> %s\n" % (key, self.dump_set(set))) + + def dump_set(self, set): + return state_set_str(set) + + +# +# State set manipulation functions +# + +def state_set_str(set): + return "[%s]" % ','.join(["S%d" % state.number for state in set]) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..83bb9239abec1bc1da2f9aa85a266d73e264f6a1 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/__init__.py @@ -0,0 +1,35 @@ +""" +Python Lexical Analyser + +The Plex module provides lexical analysers with similar capabilities +to GNU Flex. The following classes and functions are exported; +see the attached docstrings for more information. + + Scanner For scanning a character stream under the + direction of a Lexicon. + + Lexicon For constructing a lexical definition + to be used by a Scanner. + + Str, Any, AnyBut, AnyChar, Seq, Alt, Opt, Rep, Rep1, + Bol, Eol, Eof, Empty + + Regular expression constructors, for building pattern + definitions for a Lexicon. + + State For defining scanner states when creating a + Lexicon. + + TEXT, IGNORE, Begin + + Actions for associating with patterns when + creating a Lexicon. +""" +# flake8: noqa:F401 +from __future__ import absolute_import + +from .Actions import TEXT, IGNORE, Begin, Method +from .Lexicons import Lexicon, State +from .Regexps import RE, Seq, Alt, Rep1, Empty, Str, Any, AnyBut, AnyChar, Range +from .Regexps import Opt, Rep, Bol, Eol, Eof, Case, NoCase +from .Scanners import Scanner diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa81adaff68e06d8e915a6afa375f62f7e5a8fad --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/__init__.py @@ -0,0 +1 @@ +# empty file diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/refnanny.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/refnanny.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..252498e90d52f3103e6db62ea70058c850d0ae80 Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/refnanny.cp39-win_amd64.pyd differ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/refnanny.pyx b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/refnanny.pyx new file mode 100644 index 0000000000000000000000000000000000000000..bc72f62c6af52a55913af48edd7e13a37ded3f15 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/refnanny.pyx @@ -0,0 +1,192 @@ +# cython: language_level=3, auto_pickle=False + +from cpython.ref cimport PyObject, Py_INCREF, Py_CLEAR, Py_XDECREF, Py_XINCREF +from cpython.exc cimport PyErr_Fetch, PyErr_Restore +from cpython.pystate cimport PyThreadState_Get + +cimport cython + +loglevel = 0 +reflog = [] + +cdef log(level, action, obj, lineno): + if reflog is None: + # can happen during finalisation + return + if loglevel >= level: + reflog.append((lineno, action, id(obj))) + +LOG_NONE, LOG_ALL = range(2) + +@cython.final +cdef class Context(object): + cdef readonly object name, filename + cdef readonly dict refs + cdef readonly list errors + cdef readonly Py_ssize_t start + + def __cinit__(self, name, line=0, filename=None): + self.name = name + self.start = line + self.filename = filename + self.refs = {} # id -> (count, [lineno]) + self.errors = [] + + cdef regref(self, obj, Py_ssize_t lineno, bint is_null): + log(LOG_ALL, u'regref', u"" if is_null else obj, lineno) + if is_null: + self.errors.append(f"NULL argument on line {lineno}") + return + id_ = id(obj) + count, linenumbers = self.refs.get(id_, (0, [])) + self.refs[id_] = (count + 1, linenumbers) + linenumbers.append(lineno) + + cdef bint delref(self, obj, Py_ssize_t lineno, bint is_null) except -1: + # returns whether it is ok to do the decref operation + log(LOG_ALL, u'delref', u"" if is_null else obj, lineno) + if is_null: + self.errors.append(f"NULL argument on line {lineno}") + return False + id_ = id(obj) + count, linenumbers = self.refs.get(id_, (0, [])) + if count == 0: + self.errors.append(f"Too many decrefs on line {lineno}, reference acquired on lines {linenumbers!r}") + return False + if count == 1: + del self.refs[id_] + else: + self.refs[id_] = (count - 1, linenumbers) + return True + + cdef end(self): + if self.refs: + msg = u"References leaked:" + for count, linenos in self.refs.itervalues(): + msg += f"\n ({count}) acquired on lines: {u', '.join([f'{x}' for x in linenos])}" + self.errors.append(msg) + return u"\n".join([f'REFNANNY: {error}' for error in self.errors]) if self.errors else None + + +cdef void report_unraisable(filename, Py_ssize_t lineno, object e=None): + try: + if e is None: + import sys + e = sys.exc_info()[1] + print(f"refnanny raised an exception from {filename}:{lineno}: {e}") + finally: + return # We absolutely cannot exit with an exception + + +# All Python operations must happen after any existing +# exception has been fetched, in case we are called from +# exception-handling code. + +cdef PyObject* SetupContext(char* funcname, Py_ssize_t lineno, char* filename) except NULL: + if Context is None: + # Context may be None during finalize phase. + # In that case, we don't want to be doing anything fancy + # like caching and resetting exceptions. + return NULL + cdef (PyObject*) type = NULL, value = NULL, tb = NULL, result = NULL + PyThreadState_Get() # Check that we hold the GIL + PyErr_Fetch(&type, &value, &tb) + try: + ctx = Context(funcname, lineno, filename) + Py_INCREF(ctx) + result = ctx + except Exception, e: + report_unraisable(filename, lineno, e) + PyErr_Restore(type, value, tb) + return result + +cdef void GOTREF(PyObject* ctx, PyObject* p_obj, Py_ssize_t lineno): + if ctx == NULL: return + cdef (PyObject*) type = NULL, value = NULL, tb = NULL + PyErr_Fetch(&type, &value, &tb) + try: + (ctx).regref( + p_obj if p_obj is not NULL else None, + lineno, + is_null=p_obj is NULL, + ) + except: + report_unraisable((ctx).filename, lineno=(ctx).start) + finally: + PyErr_Restore(type, value, tb) + return # swallow any exceptions + +cdef bint GIVEREF_and_report(PyObject* ctx, PyObject* p_obj, Py_ssize_t lineno): + if ctx == NULL: return 1 + cdef (PyObject*) type = NULL, value = NULL, tb = NULL + cdef bint decref_ok = False + PyErr_Fetch(&type, &value, &tb) + try: + decref_ok = (ctx).delref( + p_obj if p_obj is not NULL else None, + lineno, + is_null=p_obj is NULL, + ) + except: + report_unraisable((ctx).filename, lineno=(ctx).start) + finally: + PyErr_Restore(type, value, tb) + return decref_ok # swallow any exceptions + +cdef void GIVEREF(PyObject* ctx, PyObject* p_obj, Py_ssize_t lineno): + GIVEREF_and_report(ctx, p_obj, lineno) + +cdef void INCREF(PyObject* ctx, PyObject* obj, Py_ssize_t lineno): + Py_XINCREF(obj) + PyThreadState_Get() # Check that we hold the GIL + GOTREF(ctx, obj, lineno) + +cdef void DECREF(PyObject* ctx, PyObject* obj, Py_ssize_t lineno): + if GIVEREF_and_report(ctx, obj, lineno): + Py_XDECREF(obj) + PyThreadState_Get() # Check that we hold the GIL + +cdef void FinishContext(PyObject** ctx): + if ctx == NULL or ctx[0] == NULL: return + cdef (PyObject*) type = NULL, value = NULL, tb = NULL + cdef object errors = None + cdef Context context + PyThreadState_Get() # Check that we hold the GIL + PyErr_Fetch(&type, &value, &tb) + try: + context = ctx[0] + errors = context.end() + if errors: + print(f"{context.filename.decode('latin1')}: {context.name.decode('latin1')}()") + print(errors) + context = None + except: + report_unraisable( + context.filename if context is not None else None, + lineno=context.start if context is not None else 0, + ) + finally: + Py_CLEAR(ctx[0]) + PyErr_Restore(type, value, tb) + return # swallow any exceptions + +ctypedef struct RefNannyAPIStruct: + void (*INCREF)(PyObject*, PyObject*, Py_ssize_t) + void (*DECREF)(PyObject*, PyObject*, Py_ssize_t) + void (*GOTREF)(PyObject*, PyObject*, Py_ssize_t) + void (*GIVEREF)(PyObject*, PyObject*, Py_ssize_t) + PyObject* (*SetupContext)(char*, Py_ssize_t, char*) except NULL + void (*FinishContext)(PyObject**) + +cdef RefNannyAPIStruct api +api.INCREF = INCREF +api.DECREF = DECREF +api.GOTREF = GOTREF +api.GIVEREF = GIVEREF +api.SetupContext = SetupContext +api.FinishContext = FinishContext + +cdef extern from "Python.h": + object PyLong_FromVoidPtr(void*) + +RefNannyAPI = PyLong_FromVoidPtr(&api) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Shadow.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Shadow.py new file mode 100644 index 0000000000000000000000000000000000000000..c474813c7ec75cc0b4770b485140c42e233f00f2 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Shadow.py @@ -0,0 +1,609 @@ +# cython.* namespace for pure mode. +from __future__ import absolute_import + +# Possible version formats: "3.1.0", "3.1.0a1", "3.1.0a1.dev0" +__version__ = "3.0.8" + +try: + from __builtin__ import basestring +except ImportError: + basestring = str + + +# BEGIN shameless copy from Cython/minivect/minitypes.py + +class _ArrayType(object): + + is_array = True + subtypes = ['dtype'] + + def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False, + inner_contig=False, broadcasting=None): + self.dtype = dtype + self.ndim = ndim + self.is_c_contig = is_c_contig + self.is_f_contig = is_f_contig + self.inner_contig = inner_contig or is_c_contig or is_f_contig + self.broadcasting = broadcasting + + def __repr__(self): + axes = [":"] * self.ndim + if self.is_c_contig: + axes[-1] = "::1" + elif self.is_f_contig: + axes[0] = "::1" + + return "%s[%s]" % (self.dtype, ", ".join(axes)) + + +def index_type(base_type, item): + """ + Support array type creation by slicing, e.g. double[:, :] specifies + a 2D strided array of doubles. The syntax is the same as for + Cython memoryviews. + """ + class InvalidTypeSpecification(Exception): + pass + + def verify_slice(s): + if s.start or s.stop or s.step not in (None, 1): + raise InvalidTypeSpecification( + "Only a step of 1 may be provided to indicate C or " + "Fortran contiguity") + + if isinstance(item, tuple): + step_idx = None + for idx, s in enumerate(item): + verify_slice(s) + if s.step and (step_idx or idx not in (0, len(item) - 1)): + raise InvalidTypeSpecification( + "Step may only be provided once, and only in the " + "first or last dimension.") + + if s.step == 1: + step_idx = idx + + return _ArrayType(base_type, len(item), + is_c_contig=step_idx == len(item) - 1, + is_f_contig=step_idx == 0) + elif isinstance(item, slice): + verify_slice(item) + return _ArrayType(base_type, 1, is_c_contig=bool(item.step)) + else: + # int[8] etc. + assert int(item) == item # array size must be a plain integer + return array(base_type, item) + +# END shameless copy + + +compiled = False + +_Unspecified = object() + +# Function decorators + +def _empty_decorator(x): + return x + +def locals(**arg_types): + return _empty_decorator + +def test_assert_path_exists(*paths): + return _empty_decorator + +def test_fail_if_path_exists(*paths): + return _empty_decorator + +class _EmptyDecoratorAndManager(object): + def __call__(self, x): + return x + def __enter__(self): + pass + def __exit__(self, exc_type, exc_value, traceback): + pass + +class _Optimization(object): + pass + +cclass = ccall = cfunc = _EmptyDecoratorAndManager() + +annotation_typing = returns = wraparound = boundscheck = initializedcheck = \ + nonecheck = embedsignature = cdivision = cdivision_warnings = \ + always_allow_keywords = profile = linetrace = infer_types = \ + unraisable_tracebacks = freelist = auto_pickle = cpow = trashcan = \ + auto_cpdef = c_api_binop_methods = \ + allow_none_for_extension_args = callspec = show_performance_hints = \ + cpp_locals = py2_import = iterable_coroutine = remove_unreachable = \ + lambda _: _EmptyDecoratorAndManager() + +# Note that fast_getattr is untested and undocumented! +fast_getattr = lambda _: _EmptyDecoratorAndManager() + +exceptval = lambda _=None, check=True: _EmptyDecoratorAndManager() + +overflowcheck = lambda _: _EmptyDecoratorAndManager() +optimize = _Optimization() + + +embedsignature.format = overflowcheck.fold = optimize.use_switch = \ + optimize.unpack_method_calls = lambda arg: _EmptyDecoratorAndManager() + +final = internal = type_version_tag = no_gc_clear = no_gc = total_ordering = \ + ufunc = _empty_decorator + +binding = lambda _: _empty_decorator + +class warn: + undeclared = unreachable = maybe_uninitialized = unused = \ + unused_arg = unused_result = \ + lambda _: _EmptyDecoratorAndManager() + + +_cython_inline = None +def inline(f, *args, **kwds): + if isinstance(f, basestring): + global _cython_inline + if _cython_inline is None: + from Cython.Build.Inline import cython_inline as _cython_inline + return _cython_inline(f, *args, **kwds) + else: + assert len(args) == len(kwds) == 0 + return f + + +def compile(f): + from Cython.Build.Inline import RuntimeCompiledFunction + return RuntimeCompiledFunction(f) + + +# Special functions + +def cdiv(a, b): + if a < 0: + a = -a + b = -b + if b < 0: + return (a + b + 1) // b + return a // b + +def cmod(a, b): + r = a % b + if (a * b) < 0 and r: + r -= b + return r + + +# Emulated language constructs + +def cast(t, *args, **kwargs): + kwargs.pop('typecheck', None) + assert not kwargs + + if isinstance(t, typedef): + return t(*args) + elif isinstance(t, type): # Doesn't work with old-style classes of Python 2.x + if len(args) != 1 or not (args[0] is None or isinstance(args[0], t)): + return t(*args) + + return args[0] + +def sizeof(arg): + return 1 + +def typeof(arg): + return arg.__class__.__name__ + # return type(arg) + +def address(arg): + return pointer(type(arg))([arg]) + +def _is_value_type(t): + if isinstance(t, typedef): + return _is_value_type(t._basetype) + + return isinstance(t, type) and issubclass(t, (StructType, UnionType, ArrayType)) + +def declare(t=None, value=_Unspecified, **kwds): + if value is not _Unspecified: + return cast(t, value) + elif _is_value_type(t): + return t() + else: + return None + +class _nogil(object): + """Support for 'with nogil' statement and @nogil decorator. + """ + def __call__(self, x): + if callable(x): + # Used as function decorator => return the function unchanged. + return x + # Used as conditional context manager or to create an "@nogil(True/False)" decorator => keep going. + return self + + def __enter__(self): + pass + def __exit__(self, exc_class, exc, tb): + return exc_class is None + +nogil = _nogil() +gil = _nogil() +with_gil = _nogil() # Actually not a context manager, but compilation will give the right error. +del _nogil + + +# Emulated types + +class CythonMetaType(type): + + def __getitem__(type, ix): + return array(type, ix) + +CythonTypeObject = CythonMetaType('CythonTypeObject', (object,), {}) + +class CythonType(CythonTypeObject): + + def _pointer(self, n=1): + for i in range(n): + self = pointer(self) + return self + +class PointerType(CythonType): + + def __init__(self, value=None): + if isinstance(value, (ArrayType, PointerType)): + self._items = [cast(self._basetype, a) for a in value._items] + elif isinstance(value, list): + self._items = [cast(self._basetype, a) for a in value] + elif value is None or value == 0: + self._items = [] + else: + raise ValueError + + def __getitem__(self, ix): + if ix < 0: + raise IndexError("negative indexing not allowed in C") + return self._items[ix] + + def __setitem__(self, ix, value): + if ix < 0: + raise IndexError("negative indexing not allowed in C") + self._items[ix] = cast(self._basetype, value) + + def __eq__(self, value): + if value is None and not self._items: + return True + elif type(self) != type(value): + return False + else: + return not self._items and not value._items + + def __repr__(self): + return "%s *" % (self._basetype,) + +class ArrayType(PointerType): + + def __init__(self, value=None): + if value is None: + self._items = [None] * self._n + else: + super(ArrayType, self).__init__(value) + + +class StructType(CythonType): + + def __init__(self, *posargs, **data): + if not (posargs or data): + return + if posargs and data: + raise ValueError('Cannot accept both positional and keyword arguments.') + + # Allow 'cast_from' as single positional or keyword argument. + if data and len(data) == 1 and 'cast_from' in data: + cast_from = data.pop('cast_from') + elif len(posargs) == 1 and type(posargs[0]) is type(self): + cast_from, posargs = posargs[0], () + elif posargs: + for key, arg in zip(self._members, posargs): + setattr(self, key, arg) + return + else: + for key, value in data.items(): + if key not in self._members: + raise ValueError("Invalid struct attribute for %s: %s" % ( + self.__class__.__name__, key)) + setattr(self, key, value) + return + + # do cast + if data: + raise ValueError('Cannot accept keyword arguments when casting.') + if type(cast_from) is not type(self): + raise ValueError('Cannot cast from %s' % cast_from) + for key, value in cast_from.__dict__.items(): + setattr(self, key, value) + + def __setattr__(self, key, value): + if key in self._members: + self.__dict__[key] = cast(self._members[key], value) + else: + raise AttributeError("Struct has no member '%s'" % key) + + +class UnionType(CythonType): + + def __init__(self, cast_from=_Unspecified, **data): + if cast_from is not _Unspecified: + # do type cast + if len(data) > 0: + raise ValueError('Cannot accept keyword arguments when casting.') + if isinstance(cast_from, dict): + datadict = cast_from + elif type(cast_from) is type(self): + datadict = cast_from.__dict__ + else: + raise ValueError('Cannot cast from %s' % cast_from) + else: + datadict = data + if len(datadict) > 1: + raise AttributeError("Union can only store one field at a time.") + for key, value in datadict.items(): + setattr(self, key, value) + + def __setattr__(self, key, value): + if key == '__dict__': + CythonType.__setattr__(self, key, value) + elif key in self._members: + self.__dict__ = {key: cast(self._members[key], value)} + else: + raise AttributeError("Union has no member '%s'" % key) + +def pointer(basetype): + class PointerInstance(PointerType): + _basetype = basetype + return PointerInstance + +def array(basetype, n): + class ArrayInstance(ArrayType): + _basetype = basetype + _n = n + return ArrayInstance + +def struct(**members): + class StructInstance(StructType): + _members = members + for key in members: + setattr(StructInstance, key, None) + return StructInstance + +def union(**members): + class UnionInstance(UnionType): + _members = members + for key in members: + setattr(UnionInstance, key, None) + return UnionInstance + +class typedef(CythonType): + + def __init__(self, type, name=None): + self._basetype = type + self.name = name + + def __call__(self, *arg): + value = cast(self._basetype, *arg) + return value + + def __repr__(self): + return self.name or str(self._basetype) + + __getitem__ = index_type + +class _FusedType(CythonType): + __getitem__ = index_type + + +def fused_type(*args): + if not args: + raise TypeError("Expected at least one type as argument") + + # Find the numeric type with biggest rank if all types are numeric + rank = -1 + for type in args: + if type not in (py_int, py_long, py_float, py_complex): + break + + if type_ordering.index(type) > rank: + result_type = type + else: + return result_type + + # Not a simple numeric type, return a fused type instance. The result + # isn't really meant to be used, as we can't keep track of the context in + # pure-mode. Casting won't do anything in this case. + return _FusedType() + + +def _specialized_from_args(signatures, args, kwargs): + "Perhaps this should be implemented in a TreeFragment in Cython code" + raise Exception("yet to be implemented") + + +py_int = typedef(int, "int") +try: + py_long = typedef(long, "long") +except NameError: # Py3 + py_long = typedef(int, "long") +py_float = typedef(float, "float") +py_complex = typedef(complex, "double complex") + + +# Predefined types + +int_types = [ + 'char', + 'short', + 'Py_UNICODE', + 'int', + 'Py_UCS4', + 'long', + 'longlong', + 'Py_hash_t', + 'Py_ssize_t', + 'size_t', + 'ssize_t', + 'ptrdiff_t', +] +float_types = [ + 'longdouble', + 'double', + 'float', +] +complex_types = [ + 'longdoublecomplex', + 'doublecomplex', + 'floatcomplex', + 'complex', +] +other_types = [ + 'bint', + 'void', + 'Py_tss_t', +] + +to_repr = { + 'longlong': 'long long', + 'longdouble': 'long double', + 'longdoublecomplex': 'long double complex', + 'doublecomplex': 'double complex', + 'floatcomplex': 'float complex', +}.get + +gs = globals() + +# note: cannot simply name the unicode type here as 2to3 gets in the way and replaces it by str +try: + import __builtin__ as builtins +except ImportError: # Py3 + import builtins + +gs['unicode'] = typedef(getattr(builtins, 'unicode', str), 'unicode') +del builtins + +for name in int_types: + reprname = to_repr(name, name) + gs[name] = typedef(py_int, reprname) + if name not in ('Py_UNICODE', 'Py_UCS4', 'Py_hash_t', 'ptrdiff_t') and not name.endswith('size_t'): + gs['u'+name] = typedef(py_int, "unsigned " + reprname) + gs['s'+name] = typedef(py_int, "signed " + reprname) + +for name in float_types: + gs[name] = typedef(py_float, to_repr(name, name)) + +for name in complex_types: + gs[name] = typedef(py_complex, to_repr(name, name)) + +bint = typedef(bool, "bint") +void = typedef(None, "void") +Py_tss_t = typedef(None, "Py_tss_t") + +for t in int_types: + for i in range(1, 4): + gs["%s_%s" % ('p'*i, t)] = gs[t]._pointer(i) + if 'u'+t in gs: + gs["%s_u%s" % ('p'*i, t)] = gs['u'+t]._pointer(i) + gs["%s_s%s" % ('p'*i, t)] = gs['s'+t]._pointer(i) + +for t in float_types + complex_types + other_types: + for i in range(1, 4): + gs["%s_%s" % ('p'*i, t)] = gs[t]._pointer(i) + +del t, i + +NULL = gs['p_void'](0) + +# looks like 'gs' has some users out there by now... +#del gs + +integral = floating = numeric = _FusedType() + +type_ordering = [py_int, py_long, py_float, py_complex] + +class CythonDotParallel(object): + """ + The cython.parallel module. + """ + + __all__ = ['parallel', 'prange', 'threadid'] + + def parallel(self, num_threads=None): + return nogil + + def prange(self, start=0, stop=None, step=1, nogil=False, schedule=None, chunksize=None, num_threads=None): + if stop is None: + stop = start + start = 0 + return range(start, stop, step) + + def threadid(self): + return 0 + + # def threadsavailable(self): + # return 1 + +class CythonDotImportedFromElsewhere(object): + """ + cython.dataclasses just shadows the standard library modules of the same name + """ + def __init__(self, module): + self.__path__ = [] + self.__file__ = None + self.__name__ = module + self.__package__ = module + + def __getattr__(self, attr): + # we typically only expect this to be called once + from importlib import import_module + import sys + try: + mod = import_module(self.__name__) + except ImportError: + # but if they don't exist (Python is not sufficiently up-to-date) then + # you can't use them + raise AttributeError("%s: the standard library module %s is not available" % + (attr, self.__name__)) + sys.modules['cython.%s' % self.__name__] = mod + return getattr(mod, attr) + +class CythonCImports(object): + """ + Simplistic module mock to make cimports sort-of work in Python code. + """ + def __init__(self, module): + self.__path__ = [] + self.__file__ = None + self.__name__ = module + self.__package__ = module + + def __getattr__(self, item): + if item.startswith('__') and item.endswith('__'): + raise AttributeError(item) + try: + return __import__(item) + except ImportError: + import sys + ex = AttributeError(item) + if sys.version_info >= (3, 0): + ex.__cause__ = None + raise ex + + +import math, sys +sys.modules['cython.parallel'] = CythonDotParallel() +sys.modules['cython.cimports'] = CythonCImports('cython.cimports') +sys.modules['cython.cimports.libc'] = CythonCImports('cython.cimports.libc') +sys.modules['cython.cimports.libc.math'] = math +# In pure Python mode @cython.dataclasses.dataclass and dataclass field should just +# shadow the standard library ones (if they are available) +dataclasses = sys.modules['cython.dataclasses'] = CythonDotImportedFromElsewhere('dataclasses') +del math, sys diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/StringIOTree.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/StringIOTree.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..7295e860ec07151dfa4a82dc3db1ff3a4a0adf8c Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/StringIOTree.cp39-win_amd64.pyd differ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/StringIOTree.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/StringIOTree.py new file mode 100644 index 0000000000000000000000000000000000000000..798009758b9d54e81a84fd2a8dd365853919e96e --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/StringIOTree.py @@ -0,0 +1,174 @@ +# cython: auto_pickle=False + +r""" +Implements a buffer with insertion points. When you know you need to +"get back" to a place and write more later, simply call insertion_point() +at that spot and get a new StringIOTree object that is "left behind". + +EXAMPLE: + +>>> a = StringIOTree() +>>> _= a.write('first\n') +>>> b = a.insertion_point() +>>> _= a.write('third\n') +>>> _= b.write('second\n') +>>> a.getvalue().split() +['first', 'second', 'third'] + +>>> c = b.insertion_point() +>>> d = c.insertion_point() +>>> _= d.write('alpha\n') +>>> _= b.write('gamma\n') +>>> _= c.write('beta\n') +>>> b.getvalue().split() +['second', 'alpha', 'beta', 'gamma'] + +>>> try: from cStringIO import StringIO +... except ImportError: from io import StringIO + +>>> i = StringIOTree() +>>> d.insert(i) +>>> _= i.write('inserted\n') +>>> out = StringIO() +>>> a.copyto(out) +>>> out.getvalue().split() +['first', 'second', 'alpha', 'inserted', 'beta', 'gamma', 'third'] +""" + +from __future__ import absolute_import #, unicode_literals + +try: + # Prefer cStringIO since io.StringIO() does not support writing 'str' in Py2. + from cStringIO import StringIO +except ImportError: + from io import StringIO + + +class StringIOTree(object): + """ + See module docs. + """ + + def __init__(self, stream=None): + self.prepended_children = [] + if stream is None: + stream = StringIO() + self.stream = stream + self.write = stream.write + self.markers = [] + + def empty(self): + if self.stream.tell(): + return False + return all([child.empty() for child in self.prepended_children]) if self.prepended_children else True + + def getvalue(self): + content = [] + self._collect_in(content) + return "".join(content) + + def _collect_in(self, target_list): + for x in self.prepended_children: + x._collect_in(target_list) + stream_content = self.stream.getvalue() + if stream_content: + target_list.append(stream_content) + + def copyto(self, target): + """Potentially cheaper than getvalue as no string concatenation + needs to happen.""" + for child in self.prepended_children: + child.copyto(target) + stream_content = self.stream.getvalue() + if stream_content: + target.write(stream_content) + + def commit(self): + # Save what we have written until now so that the buffer + # itself is empty -- this makes it ready for insertion + if self.stream.tell(): + self.prepended_children.append(StringIOTree(self.stream)) + self.prepended_children[-1].markers = self.markers + self.markers = [] + self.stream = StringIO() + self.write = self.stream.write + + def reset(self): + self.prepended_children = [] + self.markers = [] + self.stream = StringIO() + self.write = self.stream.write + + def insert(self, iotree): + """ + Insert a StringIOTree (and all of its contents) at this location. + Further writing to self appears after what is inserted. + """ + self.commit() + self.prepended_children.append(iotree) + + def insertion_point(self): + """ + Returns a new StringIOTree, which is left behind at the current position + (it what is written to the result will appear right before whatever is + next written to self). + + Calling getvalue() or copyto() on the result will only return the + contents written to it. + """ + # Save what we have written until now + # This is so that getvalue on the result doesn't include it. + self.commit() + # Construct the new forked object to return + other = StringIOTree() + self.prepended_children.append(other) + return other + + def allmarkers(self): + children = self.prepended_children + return [m for c in children for m in c.allmarkers()] + self.markers + + """ + # Print the result of allmarkers in a nice human-readable form. Use it only for debugging. + # Prints e.g. + # /path/to/source.pyx: + # cython line 2 maps to 3299-3343 + # cython line 4 maps to 2236-2245 2306 3188-3201 + # /path/to/othersource.pyx: + # cython line 3 maps to 1234-1270 + # ... + # Note: In the example above, 3343 maps to line 2, 3344 does not. + def print_hr_allmarkers(self): + from collections import defaultdict + markers = self.allmarkers() + totmap = defaultdict(lambda: defaultdict(list)) + for c_lineno, (cython_desc, cython_lineno) in enumerate(markers): + if cython_lineno > 0 and cython_desc.filename is not None: + totmap[cython_desc.filename][cython_lineno].append(c_lineno + 1) + reprstr = "" + if totmap == 0: + reprstr += "allmarkers is empty\n" + try: + sorted(totmap.items()) + except: + print(totmap) + print(totmap.items()) + for cython_path, filemap in sorted(totmap.items()): + reprstr += cython_path + ":\n" + for cython_lineno, c_linenos in sorted(filemap.items()): + reprstr += "\tcython line " + str(cython_lineno) + " maps to " + i = 0 + while i < len(c_linenos): + reprstr += str(c_linenos[i]) + flag = False + while i+1 < len(c_linenos) and c_linenos[i+1] == c_linenos[i]+1: + i += 1 + flag = True + if flag: + reprstr += "-" + str(c_linenos[i]) + " " + i += 1 + reprstr += "\n" + + import sys + sys.stdout.write(reprstr) + """ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..41a0ce3d0efa247760db266bace8e34a4b5dd9fa --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/__init__.py @@ -0,0 +1,4 @@ +# The original Tempita implements all of its templating code here. +# Moved it to _tempita.py to make the compilation portable. + +from ._tempita import * diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/_looper.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/_looper.py new file mode 100644 index 0000000000000000000000000000000000000000..4010988300ffd12da5dd136f1f173d584261191e --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/_looper.py @@ -0,0 +1,163 @@ +""" +Helper for looping over sequences, particular in templates. + +Often in a loop in a template it's handy to know what's next up, +previously up, if this is the first or last item in the sequence, etc. +These can be awkward to manage in a normal Python loop, but using the +looper you can get a better sense of the context. Use like:: + + >>> for loop, item in looper(['a', 'b', 'c']): + ... print loop.number, item + ... if not loop.last: + ... print '---' + 1 a + --- + 2 b + --- + 3 c + +""" + +import sys +from Cython.Tempita.compat3 import basestring_ + +__all__ = ['looper'] + + +class looper(object): + """ + Helper for looping (particularly in templates) + + Use this like:: + + for loop, item in looper(seq): + if loop.first: + ... + """ + + def __init__(self, seq): + self.seq = seq + + def __iter__(self): + return looper_iter(self.seq) + + def __repr__(self): + return '<%s for %r>' % ( + self.__class__.__name__, self.seq) + + +class looper_iter(object): + + def __init__(self, seq): + self.seq = list(seq) + self.pos = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.pos >= len(self.seq): + raise StopIteration + result = loop_pos(self.seq, self.pos), self.seq[self.pos] + self.pos += 1 + return result + + if sys.version < "3": + next = __next__ + + +class loop_pos(object): + + def __init__(self, seq, pos): + self.seq = seq + self.pos = pos + + def __repr__(self): + return '' % ( + self.seq[self.pos], self.pos) + + def index(self): + return self.pos + index = property(index) + + def number(self): + return self.pos + 1 + number = property(number) + + def item(self): + return self.seq[self.pos] + item = property(item) + + def __next__(self): + try: + return self.seq[self.pos + 1] + except IndexError: + return None + __next__ = property(__next__) + + if sys.version < "3": + next = __next__ + + def previous(self): + if self.pos == 0: + return None + return self.seq[self.pos - 1] + previous = property(previous) + + def odd(self): + return not self.pos % 2 + odd = property(odd) + + def even(self): + return self.pos % 2 + even = property(even) + + def first(self): + return self.pos == 0 + first = property(first) + + def last(self): + return self.pos == len(self.seq) - 1 + last = property(last) + + def length(self): + return len(self.seq) + length = property(length) + + def first_group(self, getter=None): + """ + Returns true if this item is the start of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.first: + return True + return self._compare_group(self.item, self.previous, getter) + + def last_group(self, getter=None): + """ + Returns true if this item is the end of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.last: + return True + return self._compare_group(self.item, self.__next__, getter) + + def _compare_group(self, item, other, getter): + if getter is None: + return item != other + elif (isinstance(getter, basestring_) + and getter.startswith('.')): + getter = getter[1:] + if getter.endswith('()'): + getter = getter[:-2] + return getattr(item, getter)() != getattr(other, getter)() + else: + return getattr(item, getter) != getattr(other, getter) + elif hasattr(getter, '__call__'): + return getter(item) != getter(other) + else: + return item[getter] != other[getter] diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/_tempita.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/_tempita.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..931ccc35f6b2af8e36cead6d32663bdcfbd209c4 Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/_tempita.cp39-win_amd64.pyd differ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/_tempita.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/_tempita.py new file mode 100644 index 0000000000000000000000000000000000000000..284bb497987f625889a8506a2a515fec95c18f76 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/_tempita.py @@ -0,0 +1,1091 @@ +# cython: language_level=3str + +""" +A small templating language + +This implements a small templating language. This language implements +if/elif/else, for/continue/break, expressions, and blocks of Python +code. The syntax is:: + + {{any expression (function calls etc)}} + {{any expression | filter}} + {{for x in y}}...{{endfor}} + {{if x}}x{{elif y}}y{{else}}z{{endif}} + {{py:x=1}} + {{py: + def foo(bar): + return 'baz' + }} + {{default var = default_value}} + {{# comment}} + +You use this with the ``Template`` class or the ``sub`` shortcut. +The ``Template`` class takes the template string and the name of +the template (for errors) and a default namespace. Then (like +``string.Template``) you can call the ``tmpl.substitute(**kw)`` +method to make a substitution (or ``tmpl.substitute(a_dict)``). + +``sub(content, **kw)`` substitutes the template immediately. You +can use ``__name='tmpl.html'`` to set the name of the template. + +If there are syntax errors ``TemplateError`` will be raised. +""" + +from __future__ import absolute_import + +import re +import sys +import os +import tokenize +from io import StringIO + +from ._looper import looper +from .compat3 import bytes, unicode_, basestring_, next, is_unicode, coerce_text + +__all__ = ['TemplateError', 'Template', 'sub', 'bunch'] + +in_re = re.compile(r'\s+in\s+') +var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I) + + +class TemplateError(Exception): + """Exception raised while parsing a template + """ + + def __init__(self, message, position, name=None): + Exception.__init__(self, message) + self.position = position + self.name = name + + def __str__(self): + msg = ' '.join(self.args) + if self.position: + msg = '%s at line %s column %s' % ( + msg, self.position[0], self.position[1]) + if self.name: + msg += ' in %s' % self.name + return msg + + +class _TemplateContinue(Exception): + pass + + +class _TemplateBreak(Exception): + pass + + +def get_file_template(name, from_template): + path = os.path.join(os.path.dirname(from_template.name), name) + return from_template.__class__.from_filename( + path, namespace=from_template.namespace, + get_template=from_template.get_template) + + +class Template(object): + + default_namespace = { + 'start_braces': '{{', + 'end_braces': '}}', + 'looper': looper, + } + + default_encoding = 'utf8' + default_inherit = None + + def __init__(self, content, name=None, namespace=None, stacklevel=None, + get_template=None, default_inherit=None, line_offset=0, + delimiters=None, delimeters=None): + self.content = content + + # set delimiters + if delimeters: + import warnings + warnings.warn( + "'delimeters' kwarg is being deprecated in favor of correctly" + " spelled 'delimiters'. Please adjust your code.", + DeprecationWarning + ) + if delimiters is None: + delimiters = delimeters + if delimiters is None: + delimiters = (self.default_namespace['start_braces'], + self.default_namespace['end_braces']) + else: + #assert len(delimiters) == 2 and all([isinstance(delimiter, basestring) + # for delimiter in delimiters]) + self.default_namespace = self.__class__.default_namespace.copy() + self.default_namespace['start_braces'] = delimiters[0] + self.default_namespace['end_braces'] = delimiters[1] + self.delimiters = self.delimeters = delimiters # Keep a legacy read-only copy, but don't use it. + + self._unicode = is_unicode(content) + if name is None and stacklevel is not None: + try: + caller = sys._getframe(stacklevel) + except ValueError: + pass + else: + globals = caller.f_globals + lineno = caller.f_lineno + if '__file__' in globals: + name = globals['__file__'] + if name.endswith('.pyc') or name.endswith('.pyo'): + name = name[:-1] + elif '__name__' in globals: + name = globals['__name__'] + else: + name = '' + if lineno: + name += ':%s' % lineno + self.name = name + self._parsed = parse(content, name=name, line_offset=line_offset, delimiters=self.delimiters) + if namespace is None: + namespace = {} + self.namespace = namespace + self.get_template = get_template + if default_inherit is not None: + self.default_inherit = default_inherit + + def from_filename(cls, filename, namespace=None, encoding=None, + default_inherit=None, get_template=get_file_template): + with open(filename, 'rb') as f: + c = f.read() + if encoding: + c = c.decode(encoding) + return cls(content=c, name=filename, namespace=namespace, + default_inherit=default_inherit, get_template=get_template) + + from_filename = classmethod(from_filename) + + def __repr__(self): + return '<%s %s name=%r>' % ( + self.__class__.__name__, + hex(id(self))[2:], self.name) + + def substitute(self, *args, **kw): + if args: + if kw: + raise TypeError( + "You can only give positional *or* keyword arguments") + if len(args) > 1: + raise TypeError( + "You can only give one positional argument") + if not hasattr(args[0], 'items'): + raise TypeError( + "If you pass in a single argument, you must pass in a dictionary-like object (with a .items() method); you gave %r" + % (args[0],)) + kw = args[0] + ns = kw + ns['__template_name__'] = self.name + if self.namespace: + ns.update(self.namespace) + result, defs, inherit = self._interpret(ns) + if not inherit: + inherit = self.default_inherit + if inherit: + result = self._interpret_inherit(result, defs, inherit, ns) + return result + + def _interpret(self, ns): + __traceback_hide__ = True + parts = [] + defs = {} + self._interpret_codes(self._parsed, ns, out=parts, defs=defs) + if '__inherit__' in defs: + inherit = defs.pop('__inherit__') + else: + inherit = None + return ''.join(parts), defs, inherit + + def _interpret_inherit(self, body, defs, inherit_template, ns): + __traceback_hide__ = True + if not self.get_template: + raise TemplateError( + 'You cannot use inheritance without passing in get_template', + position=None, name=self.name) + templ = self.get_template(inherit_template, self) + self_ = TemplateObject(self.name) + for name, value in defs.items(): + setattr(self_, name, value) + self_.body = body + ns = ns.copy() + ns['self'] = self_ + return templ.substitute(ns) + + def _interpret_codes(self, codes, ns, out, defs): + __traceback_hide__ = True + for item in codes: + if isinstance(item, basestring_): + out.append(item) + else: + self._interpret_code(item, ns, out, defs) + + def _interpret_code(self, code, ns, out, defs): + __traceback_hide__ = True + name, pos = code[0], code[1] + if name == 'py': + self._exec(code[2], ns, pos) + elif name == 'continue': + raise _TemplateContinue() + elif name == 'break': + raise _TemplateBreak() + elif name == 'for': + vars, expr, content = code[2], code[3], code[4] + expr = self._eval(expr, ns, pos) + self._interpret_for(vars, expr, content, ns, out, defs) + elif name == 'cond': + parts = code[2:] + self._interpret_if(parts, ns, out, defs) + elif name == 'expr': + parts = code[2].split('|') + base = self._eval(parts[0], ns, pos) + for part in parts[1:]: + func = self._eval(part, ns, pos) + base = func(base) + out.append(self._repr(base, pos)) + elif name == 'default': + var, expr = code[2], code[3] + if var not in ns: + result = self._eval(expr, ns, pos) + ns[var] = result + elif name == 'inherit': + expr = code[2] + value = self._eval(expr, ns, pos) + defs['__inherit__'] = value + elif name == 'def': + name = code[2] + signature = code[3] + parts = code[4] + ns[name] = defs[name] = TemplateDef(self, name, signature, body=parts, ns=ns, + pos=pos) + elif name == 'comment': + return + else: + assert 0, "Unknown code: %r" % name + + def _interpret_for(self, vars, expr, content, ns, out, defs): + __traceback_hide__ = True + for item in expr: + if len(vars) == 1: + ns[vars[0]] = item + else: + if len(vars) != len(item): + raise ValueError( + 'Need %i items to unpack (got %i items)' + % (len(vars), len(item))) + for name, value in zip(vars, item): + ns[name] = value + try: + self._interpret_codes(content, ns, out, defs) + except _TemplateContinue: + continue + except _TemplateBreak: + break + + def _interpret_if(self, parts, ns, out, defs): + __traceback_hide__ = True + # @@: if/else/else gets through + for part in parts: + assert not isinstance(part, basestring_) + name, pos = part[0], part[1] + if name == 'else': + result = True + else: + result = self._eval(part[2], ns, pos) + if result: + self._interpret_codes(part[3], ns, out, defs) + break + + def _eval(self, code, ns, pos): + __traceback_hide__ = True + try: + try: + value = eval(code, self.default_namespace, ns) + except SyntaxError as e: + raise SyntaxError( + 'invalid syntax in expression: %s' % code) + return value + except Exception as e: + if getattr(e, 'args', None): + arg0 = e.args[0] + else: + arg0 = coerce_text(e) + e.args = (self._add_line_info(arg0, pos),) + raise + + def _exec(self, code, ns, pos): + __traceback_hide__ = True + try: + exec(code, self.default_namespace, ns) + except Exception as e: + if e.args: + e.args = (self._add_line_info(e.args[0], pos),) + else: + e.args = (self._add_line_info(None, pos),) + raise + + def _repr(self, value, pos): + __traceback_hide__ = True + try: + if value is None: + return '' + if self._unicode: + try: + value = unicode_(value) + except UnicodeDecodeError: + value = bytes(value) + else: + if not isinstance(value, basestring_): + value = coerce_text(value) + if (is_unicode(value) + and self.default_encoding): + value = value.encode(self.default_encoding) + except Exception as e: + e.args = (self._add_line_info(e.args[0], pos),) + raise + else: + if self._unicode and isinstance(value, bytes): + if not self.default_encoding: + raise UnicodeDecodeError( + 'Cannot decode bytes value %r into unicode ' + '(no default_encoding provided)' % value) + try: + value = value.decode(self.default_encoding) + except UnicodeDecodeError as e: + raise UnicodeDecodeError( + e.encoding, + e.object, + e.start, + e.end, + e.reason + ' in string %r' % value) + elif not self._unicode and is_unicode(value): + if not self.default_encoding: + raise UnicodeEncodeError( + 'Cannot encode unicode value %r into bytes ' + '(no default_encoding provided)' % value) + value = value.encode(self.default_encoding) + return value + + def _add_line_info(self, msg, pos): + msg = "%s at line %s column %s" % ( + msg, pos[0], pos[1]) + if self.name: + msg += " in file %s" % self.name + return msg + + +def sub(content, delimiters=None, **kw): + name = kw.get('__name') + delimeters = kw.pop('delimeters') if 'delimeters' in kw else None # for legacy code + tmpl = Template(content, name=name, delimiters=delimiters, delimeters=delimeters) + return tmpl.substitute(kw) + + +def paste_script_template_renderer(content, vars, filename=None): + tmpl = Template(content, name=filename) + return tmpl.substitute(vars) + + +class bunch(dict): + + def __init__(self, **kw): + for name, value in kw.items(): + setattr(self, name, value) + + def __setattr__(self, name, value): + self[name] = value + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __getitem__(self, key): + if 'default' in self: + try: + return dict.__getitem__(self, key) + except KeyError: + return dict.__getitem__(self, 'default') + else: + return dict.__getitem__(self, key) + + def __repr__(self): + return '<%s %s>' % ( + self.__class__.__name__, + ' '.join(['%s=%r' % (k, v) for k, v in sorted(self.items())])) + + +class TemplateDef(object): + def __init__(self, template, func_name, func_signature, + body, ns, pos, bound_self=None): + self._template = template + self._func_name = func_name + self._func_signature = func_signature + self._body = body + self._ns = ns + self._pos = pos + self._bound_self = bound_self + + def __repr__(self): + return '' % ( + self._func_name, self._func_signature, + self._template.name, self._pos) + + def __str__(self): + return self() + + def __call__(self, *args, **kw): + values = self._parse_signature(args, kw) + ns = self._ns.copy() + ns.update(values) + if self._bound_self is not None: + ns['self'] = self._bound_self + out = [] + subdefs = {} + self._template._interpret_codes(self._body, ns, out, subdefs) + return ''.join(out) + + def __get__(self, obj, type=None): + if obj is None: + return self + return self.__class__( + self._template, self._func_name, self._func_signature, + self._body, self._ns, self._pos, bound_self=obj) + + def _parse_signature(self, args, kw): + values = {} + sig_args, var_args, var_kw, defaults = self._func_signature + extra_kw = {} + for name, value in kw.items(): + if not var_kw and name not in sig_args: + raise TypeError( + 'Unexpected argument %s' % name) + if name in sig_args: + values[sig_args] = value + else: + extra_kw[name] = value + args = list(args) + sig_args = list(sig_args) + while args: + while sig_args and sig_args[0] in values: + sig_args.pop(0) + if sig_args: + name = sig_args.pop(0) + values[name] = args.pop(0) + elif var_args: + values[var_args] = tuple(args) + break + else: + raise TypeError( + 'Extra position arguments: %s' + % ', '.join([repr(v) for v in args])) + for name, value_expr in defaults.items(): + if name not in values: + values[name] = self._template._eval( + value_expr, self._ns, self._pos) + for name in sig_args: + if name not in values: + raise TypeError( + 'Missing argument: %s' % name) + if var_kw: + values[var_kw] = extra_kw + return values + + +class TemplateObject(object): + + def __init__(self, name): + self.__name = name + self.get = TemplateObjectGetter(self) + + def __repr__(self): + return '<%s %s>' % (self.__class__.__name__, self.__name) + + +class TemplateObjectGetter(object): + + def __init__(self, template_obj): + self.__template_obj = template_obj + + def __getattr__(self, attr): + return getattr(self.__template_obj, attr, Empty) + + def __repr__(self): + return '<%s around %r>' % (self.__class__.__name__, self.__template_obj) + + +class _Empty(object): + def __call__(self, *args, **kw): + return self + + def __str__(self): + return '' + + def __repr__(self): + return 'Empty' + + def __unicode__(self): + return u'' + + def __iter__(self): + return iter(()) + + def __bool__(self): + return False + + if sys.version < "3": + __nonzero__ = __bool__ + +Empty = _Empty() +del _Empty + +############################################################ +## Lexing and Parsing +############################################################ + + +def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): + """ + Lex a string into chunks: + + >>> lex('hey') + ['hey'] + >>> lex('hey {{you}}') + ['hey ', ('you', (1, 7))] + >>> lex('hey {{') + Traceback (most recent call last): + ... + TemplateError: No }} to finish last expression at line 1 column 7 + >>> lex('hey }}') + Traceback (most recent call last): + ... + TemplateError: }} outside expression at line 1 column 7 + >>> lex('hey {{ {{') + Traceback (most recent call last): + ... + TemplateError: {{ inside expression at line 1 column 10 + + """ + if delimiters is None: + delimiters = ( Template.default_namespace['start_braces'], + Template.default_namespace['end_braces'] ) + in_expr = False + chunks = [] + last = 0 + last_pos = (line_offset + 1, 1) + + token_re = re.compile(r'%s|%s' % (re.escape(delimiters[0]), + re.escape(delimiters[1]))) + for match in token_re.finditer(s): + expr = match.group(0) + pos = find_position(s, match.end(), last, last_pos) + if expr == delimiters[0] and in_expr: + raise TemplateError('%s inside expression' % delimiters[0], + position=pos, + name=name) + elif expr == delimiters[1] and not in_expr: + raise TemplateError('%s outside expression' % delimiters[1], + position=pos, + name=name) + if expr == delimiters[0]: + part = s[last:match.start()] + if part: + chunks.append(part) + in_expr = True + else: + chunks.append((s[last:match.start()], last_pos)) + in_expr = False + last = match.end() + last_pos = pos + if in_expr: + raise TemplateError('No %s to finish last expression' % delimiters[1], + name=name, position=last_pos) + part = s[last:] + if part: + chunks.append(part) + if trim_whitespace: + chunks = trim_lex(chunks) + return chunks + +statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)') +single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break'] +trail_whitespace_re = re.compile(r'\n\r?[\t ]*$') +lead_whitespace_re = re.compile(r'^[\t ]*\n') + + +def trim_lex(tokens): + r""" + Takes a lexed set of tokens, and removes whitespace when there is + a directive on a line by itself: + + >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) + >>> tokens + [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny'] + >>> trim_lex(tokens) + [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y'] + """ + last_trim = None + for i, current in enumerate(tokens): + if isinstance(current, basestring_): + # we don't trim this + continue + item = current[0] + if not statement_re.search(item) and item not in single_statements: + continue + if not i: + prev = '' + else: + prev = tokens[i - 1] + if i + 1 >= len(tokens): + next_chunk = '' + else: + next_chunk = tokens[i + 1] + if (not isinstance(next_chunk, basestring_) + or not isinstance(prev, basestring_)): + continue + prev_ok = not prev or trail_whitespace_re.search(prev) + if i == 1 and not prev.strip(): + prev_ok = True + if last_trim is not None and last_trim + 2 == i and not prev.strip(): + prev_ok = 'last' + if (prev_ok + and (not next_chunk or lead_whitespace_re.search(next_chunk) + or (i == len(tokens) - 2 and not next_chunk.strip()))): + if prev: + if ((i == 1 and not prev.strip()) + or prev_ok == 'last'): + tokens[i - 1] = '' + else: + m = trail_whitespace_re.search(prev) + # +1 to leave the leading \n on: + prev = prev[:m.start() + 1] + tokens[i - 1] = prev + if next_chunk: + last_trim = i + if i == len(tokens) - 2 and not next_chunk.strip(): + tokens[i + 1] = '' + else: + m = lead_whitespace_re.search(next_chunk) + next_chunk = next_chunk[m.end():] + tokens[i + 1] = next_chunk + return tokens + + +def find_position(string, index, last_index, last_pos): + """Given a string and index, return (line, column)""" + lines = string.count('\n', last_index, index) + if lines > 0: + column = index - string.rfind('\n', last_index, index) + else: + column = last_pos[1] + (index - last_index) + return (last_pos[0] + lines, column) + + +def parse(s, name=None, line_offset=0, delimiters=None): + r""" + Parses a string into a kind of AST + + >>> parse('{{x}}') + [('expr', (1, 3), 'x')] + >>> parse('foo') + ['foo'] + >>> parse('{{if x}}test{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))] + >>> parse('series->{{for x in y}}x={{x}}{{endfor}}') + ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])] + >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}') + [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])] + >>> parse('{{py:x=1}}') + [('py', (1, 3), 'x=1')] + >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] + + Some exceptions:: + + >>> parse('{{continue}}') + Traceback (most recent call last): + ... + TemplateError: continue outside of for loop at line 1 column 3 + >>> parse('{{if x}}foo') + Traceback (most recent call last): + ... + TemplateError: No {{endif}} at line 1 column 3 + >>> parse('{{else}}') + Traceback (most recent call last): + ... + TemplateError: else outside of an if block at line 1 column 3 + >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Unexpected endif at line 1 column 25 + >>> parse('{{if}}{{endif}}') + Traceback (most recent call last): + ... + TemplateError: if with no expression at line 1 column 3 + >>> parse('{{for x y}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Bad for (no "in") in 'x y' at line 1 column 3 + >>> parse('{{py:x=1\ny=2}}') + Traceback (most recent call last): + ... + TemplateError: Multi-line py blocks must start with a newline at line 1 column 3 + """ + if delimiters is None: + delimiters = ( Template.default_namespace['start_braces'], + Template.default_namespace['end_braces'] ) + tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters) + result = [] + while tokens: + next_chunk, tokens = parse_expr(tokens, name) + result.append(next_chunk) + return result + + +def parse_expr(tokens, name, context=()): + if isinstance(tokens[0], basestring_): + return tokens[0], tokens[1:] + expr, pos = tokens[0] + expr = expr.strip() + if expr.startswith('py:'): + expr = expr[3:].lstrip(' \t') + if expr.startswith('\n') or expr.startswith('\r'): + expr = expr.lstrip('\r\n') + if '\r' in expr: + expr = expr.replace('\r\n', '\n') + expr = expr.replace('\r', '') + expr += '\n' + else: + if '\n' in expr: + raise TemplateError( + 'Multi-line py blocks must start with a newline', + position=pos, name=name) + return ('py', pos, expr), tokens[1:] + elif expr in ('continue', 'break'): + if 'for' not in context: + raise TemplateError( + 'continue outside of for loop', + position=pos, name=name) + return (expr, pos), tokens[1:] + elif expr.startswith('if '): + return parse_cond(tokens, name, context) + elif (expr.startswith('elif ') + or expr == 'else'): + raise TemplateError( + '%s outside of an if block' % expr.split()[0], + position=pos, name=name) + elif expr in ('if', 'elif', 'for'): + raise TemplateError( + '%s with no expression' % expr, + position=pos, name=name) + elif expr in ('endif', 'endfor', 'enddef'): + raise TemplateError( + 'Unexpected %s' % expr, + position=pos, name=name) + elif expr.startswith('for '): + return parse_for(tokens, name, context) + elif expr.startswith('default '): + return parse_default(tokens, name, context) + elif expr.startswith('inherit '): + return parse_inherit(tokens, name, context) + elif expr.startswith('def '): + return parse_def(tokens, name, context) + elif expr.startswith('#'): + return ('comment', pos, tokens[0][0]), tokens[1:] + return ('expr', pos, tokens[0][0]), tokens[1:] + + +def parse_cond(tokens, name, context): + start = tokens[0][1] + pieces = [] + context = context + ('if',) + while 1: + if not tokens: + raise TemplateError( + 'Missing {{endif}}', + position=start, name=name) + if (isinstance(tokens[0], tuple) + and tokens[0][0] == 'endif'): + return ('cond', start) + tuple(pieces), tokens[1:] + next_chunk, tokens = parse_one_cond(tokens, name, context) + pieces.append(next_chunk) + + +def parse_one_cond(tokens, name, context): + (first, pos), tokens = tokens[0], tokens[1:] + content = [] + if first.endswith(':'): + first = first[:-1] + if first.startswith('if '): + part = ('if', pos, first[3:].lstrip(), content) + elif first.startswith('elif '): + part = ('elif', pos, first[5:].lstrip(), content) + elif first == 'else': + part = ('else', pos, None, content) + else: + assert 0, "Unexpected token %r at %s" % (first, pos) + while 1: + if not tokens: + raise TemplateError( + 'No {{endif}}', + position=pos, name=name) + if (isinstance(tokens[0], tuple) + and (tokens[0][0] == 'endif' + or tokens[0][0].startswith('elif ') + or tokens[0][0] == 'else')): + return part, tokens + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_for(tokens, name, context): + first, pos = tokens[0] + tokens = tokens[1:] + context = ('for',) + context + content = [] + assert first.startswith('for '), first + if first.endswith(':'): + first = first[:-1] + first = first[3:].strip() + match = in_re.search(first) + if not match: + raise TemplateError( + 'Bad for (no "in") in %r' % first, + position=pos, name=name) + vars = first[:match.start()] + if '(' in vars: + raise TemplateError( + 'You cannot have () in the variable section of a for loop (%r)' + % vars, position=pos, name=name) + vars = tuple([ + v.strip() for v in first[:match.start()].split(',') + if v.strip()]) + expr = first[match.end():] + while 1: + if not tokens: + raise TemplateError( + 'No {{endfor}}', + position=pos, name=name) + if (isinstance(tokens[0], tuple) + and tokens[0][0] == 'endfor'): + return ('for', pos, vars, expr, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_default(tokens, name, context): + first, pos = tokens[0] + assert first.startswith('default ') + first = first.split(None, 1)[1] + parts = first.split('=', 1) + if len(parts) == 1: + raise TemplateError( + "Expression must be {{default var=value}}; no = found in %r" % first, + position=pos, name=name) + var = parts[0].strip() + if ',' in var: + raise TemplateError( + "{{default x, y = ...}} is not supported", + position=pos, name=name) + if not var_re.search(var): + raise TemplateError( + "Not a valid variable name for {{default}}: %r" + % var, position=pos, name=name) + expr = parts[1].strip() + return ('default', pos, var, expr), tokens[1:] + + +def parse_inherit(tokens, name, context): + first, pos = tokens[0] + assert first.startswith('inherit ') + expr = first.split(None, 1)[1] + return ('inherit', pos, expr), tokens[1:] + + +def parse_def(tokens, name, context): + first, start = tokens[0] + tokens = tokens[1:] + assert first.startswith('def ') + first = first.split(None, 1)[1] + if first.endswith(':'): + first = first[:-1] + if '(' not in first: + func_name = first + sig = ((), None, None, {}) + elif not first.endswith(')'): + raise TemplateError("Function definition doesn't end with ): %s" % first, + position=start, name=name) + else: + first = first[:-1] + func_name, sig_text = first.split('(', 1) + sig = parse_signature(sig_text, name, start) + context = context + ('def',) + content = [] + while 1: + if not tokens: + raise TemplateError( + 'Missing {{enddef}}', + position=start, name=name) + if (isinstance(tokens[0], tuple) + and tokens[0][0] == 'enddef'): + return ('def', start, func_name, sig, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_signature(sig_text, name, pos): + tokens = tokenize.generate_tokens(StringIO(sig_text).readline) + sig_args = [] + var_arg = None + var_kw = None + defaults = {} + + def get_token(pos=False): + try: + tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens) + except StopIteration: + return tokenize.ENDMARKER, '' + if pos: + return tok_type, tok_string, (srow, scol), (erow, ecol) + else: + return tok_type, tok_string + while 1: + var_arg_type = None + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER: + break + if tok_type == tokenize.OP and (tok_string == '*' or tok_string == '**'): + var_arg_type = tok_string + tok_type, tok_string = get_token() + if tok_type != tokenize.NAME: + raise TemplateError('Invalid signature: (%s)' % sig_text, + position=pos, name=name) + var_name = tok_string + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','): + if var_arg_type == '*': + var_arg = var_name + elif var_arg_type == '**': + var_kw = var_name + else: + sig_args.append(var_name) + if tok_type == tokenize.ENDMARKER: + break + continue + if var_arg_type is not None: + raise TemplateError('Invalid signature: (%s)' % sig_text, + position=pos, name=name) + if tok_type == tokenize.OP and tok_string == '=': + nest_type = None + unnest_type = None + nest_count = 0 + start_pos = end_pos = None + parts = [] + while 1: + tok_type, tok_string, s, e = get_token(True) + if start_pos is None: + start_pos = s + end_pos = e + if tok_type == tokenize.ENDMARKER and nest_count: + raise TemplateError('Invalid signature: (%s)' % sig_text, + position=pos, name=name) + if (not nest_count and + (tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','))): + default_expr = isolate_expression(sig_text, start_pos, end_pos) + defaults[var_name] = default_expr + sig_args.append(var_name) + break + parts.append((tok_type, tok_string)) + if nest_count and tok_type == tokenize.OP and tok_string == nest_type: + nest_count += 1 + elif nest_count and tok_type == tokenize.OP and tok_string == unnest_type: + nest_count -= 1 + if not nest_count: + nest_type = unnest_type = None + elif not nest_count and tok_type == tokenize.OP and tok_string in ('(', '[', '{'): + nest_type = tok_string + nest_count = 1 + unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type] + return sig_args, var_arg, var_kw, defaults + + +def isolate_expression(string, start_pos, end_pos): + srow, scol = start_pos + srow -= 1 + erow, ecol = end_pos + erow -= 1 + lines = string.splitlines(True) + if srow == erow: + return lines[srow][scol:ecol] + parts = [lines[srow][scol:]] + parts.extend(lines[srow+1:erow]) + if erow < len(lines): + # It'll sometimes give (end_row_past_finish, 0) + parts.append(lines[erow][:ecol]) + return ''.join(parts) + +_fill_command_usage = """\ +%prog [OPTIONS] TEMPLATE arg=value + +Use py:arg=value to set a Python value; otherwise all values are +strings. +""" + + +def fill_command(args=None): + import sys + import optparse + import pkg_resources + import os + if args is None: + args = sys.argv[1:] + dist = pkg_resources.get_distribution('Paste') + parser = optparse.OptionParser( + version=coerce_text(dist), + usage=_fill_command_usage) + parser.add_option( + '-o', '--output', + dest='output', + metavar="FILENAME", + help="File to write output to (default stdout)") + parser.add_option( + '--env', + dest='use_env', + action='store_true', + help="Put the environment in as top-level variables") + options, args = parser.parse_args(args) + if len(args) < 1: + print('You must give a template filename') + sys.exit(2) + template_name = args[0] + args = args[1:] + vars = {} + if options.use_env: + vars.update(os.environ) + for value in args: + if '=' not in value: + print('Bad argument: %r' % value) + sys.exit(2) + name, value = value.split('=', 1) + if name.startswith('py:'): + name = name[:3] + value = eval(value) + vars[name] = value + if template_name == '-': + template_content = sys.stdin.read() + template_name = '' + else: + with open(template_name, 'rb') as f: + template_content = f.read() + template = Template(template_content, name=template_name) + result = template.substitute(vars) + if options.output: + with open(options.output, 'wb') as f: + f.write(result) + else: + sys.stdout.write(result) + +if __name__ == '__main__': + fill_command() diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/compat3.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/compat3.py new file mode 100644 index 0000000000000000000000000000000000000000..9905530757ae803920bb0ae6a76415db3e900863 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tempita/compat3.py @@ -0,0 +1,47 @@ +import sys + +__all__ = ['b', 'basestring_', 'bytes', 'unicode_', 'next', 'is_unicode'] + +if sys.version < "3": + b = bytes = str + basestring_ = basestring + unicode_ = unicode +else: + + def b(s): + if isinstance(s, str): + return s.encode('latin1') + return bytes(s) + basestring_ = (bytes, str) + bytes = bytes + unicode_ = str +text = str + +if sys.version < "3": + + def next(obj): + return obj.next() +else: + next = next + +if sys.version < "3": + + def is_unicode(obj): + return isinstance(obj, unicode) +else: + + def is_unicode(obj): + return isinstance(obj, str) + + +def coerce_text(v): + if not isinstance(v, basestring_): + if sys.version < "3": + attr = '__unicode__' + else: + attr = '__str__' + if hasattr(v, attr): + return unicode(v) + else: + return bytes(v) + return v diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/TestUtils.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/TestUtils.py new file mode 100644 index 0000000000000000000000000000000000000000..ed0c19793bcda7aa41e15d084b505fdb96223674 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/TestUtils.py @@ -0,0 +1,398 @@ +from __future__ import absolute_import + +import os +import re +import unittest +import shlex +import sys +import tempfile +import textwrap +from io import open +from functools import partial + +from .Compiler import Errors +from .CodeWriter import CodeWriter +from .Compiler.TreeFragment import TreeFragment, strip_common_indent +from .Compiler.Visitor import TreeVisitor, VisitorTransform +from .Compiler import TreePath + + +class NodeTypeWriter(TreeVisitor): + def __init__(self): + super(NodeTypeWriter, self).__init__() + self._indents = 0 + self.result = [] + + def visit_Node(self, node): + if not self.access_path: + name = u"(root)" + else: + tip = self.access_path[-1] + if tip[2] is not None: + name = u"%s[%d]" % tip[1:3] + else: + name = tip[1] + + self.result.append(u" " * self._indents + + u"%s: %s" % (name, node.__class__.__name__)) + self._indents += 1 + self.visitchildren(node) + self._indents -= 1 + + +def treetypes(root): + """Returns a string representing the tree by class names. + There's a leading and trailing whitespace so that it can be + compared by simple string comparison while still making test + cases look ok.""" + w = NodeTypeWriter() + w.visit(root) + return u"\n".join([u""] + w.result + [u""]) + + +class CythonTest(unittest.TestCase): + + def setUp(self): + Errors.init_thread() + + def tearDown(self): + Errors.init_thread() + + def assertLines(self, expected, result): + "Checks that the given strings or lists of strings are equal line by line" + if not isinstance(expected, list): + expected = expected.split(u"\n") + if not isinstance(result, list): + result = result.split(u"\n") + for idx, (expected_line, result_line) in enumerate(zip(expected, result)): + self.assertEqual(expected_line, result_line, + "Line %d:\nExp: %s\nGot: %s" % (idx, expected_line, result_line)) + self.assertEqual(len(expected), len(result), + "Unmatched lines. Got:\n%s\nExpected:\n%s" % ("\n".join(expected), u"\n".join(result))) + + def codeToLines(self, tree): + writer = CodeWriter() + writer.write(tree) + return writer.result.lines + + def codeToString(self, tree): + return "\n".join(self.codeToLines(tree)) + + def assertCode(self, expected, result_tree): + result_lines = self.codeToLines(result_tree) + + expected_lines = strip_common_indent(expected.split("\n")) + + for idx, (line, expected_line) in enumerate(zip(result_lines, expected_lines)): + self.assertEqual(expected_line, line, + "Line %d:\nGot: %s\nExp: %s" % (idx, line, expected_line)) + self.assertEqual(len(result_lines), len(expected_lines), + "Unmatched lines. Got:\n%s\nExpected:\n%s" % ("\n".join(result_lines), expected)) + + def assertNodeExists(self, path, result_tree): + self.assertNotEqual(TreePath.find_first(result_tree, path), None, + "Path '%s' not found in result tree" % path) + + def fragment(self, code, pxds=None, pipeline=None): + "Simply create a tree fragment using the name of the test-case in parse errors." + if pxds is None: + pxds = {} + if pipeline is None: + pipeline = [] + name = self.id() + if name.startswith("__main__."): + name = name[len("__main__."):] + name = name.replace(".", "_") + return TreeFragment(code, name, pxds, pipeline=pipeline) + + def treetypes(self, root): + return treetypes(root) + + def should_fail(self, func, exc_type=Exception): + """Calls "func" and fails if it doesn't raise the right exception + (any exception by default). Also returns the exception in question. + """ + try: + func() + self.fail("Expected an exception of type %r" % exc_type) + except exc_type as e: + self.assertTrue(isinstance(e, exc_type)) + return e + + def should_not_fail(self, func): + """Calls func and succeeds if and only if no exception is raised + (i.e. converts exception raising into a failed testcase). Returns + the return value of func.""" + try: + return func() + except Exception as exc: + self.fail(str(exc)) + + +class TransformTest(CythonTest): + """ + Utility base class for transform unit tests. It is based around constructing + test trees (either explicitly or by parsing a Cython code string); running + the transform, serialize it using a customized Cython serializer (with + special markup for nodes that cannot be represented in Cython), + and do a string-comparison line-by-line of the result. + + To create a test case: + - Call run_pipeline. The pipeline should at least contain the transform you + are testing; pyx should be either a string (passed to the parser to + create a post-parse tree) or a node representing input to pipeline. + The result will be a transformed result. + + - Check that the tree is correct. If wanted, assertCode can be used, which + takes a code string as expected, and a ModuleNode in result_tree + (it serializes the ModuleNode to a string and compares line-by-line). + + All code strings are first stripped for whitespace lines and then common + indentation. + + Plans: One could have a pxd dictionary parameter to run_pipeline. + """ + + def run_pipeline(self, pipeline, pyx, pxds=None): + if pxds is None: + pxds = {} + tree = self.fragment(pyx, pxds).root + # Run pipeline + for T in pipeline: + tree = T(tree) + return tree + + +# For the test C code validation, we have to take care that the test directives (and thus +# the match strings) do not just appear in (multiline) C code comments containing the original +# Cython source code. Thus, we discard the comments before matching. +# This seems a prime case for re.VERBOSE, but it seems to match some of the whitespace. +_strip_c_comments = partial(re.compile( + re.sub(r'\s+', '', r''' + /[*] ( + (?: [^*\n] | [*][^/] )* + [\n] + (?: [^*] | [*][^/] )* + ) [*]/ + ''') +).sub, '') + +_strip_cython_code_from_html = partial(re.compile( + re.sub(r'\s\s+', '', r''' + (?: +
+        (?:[^<]|<(?!/pre))+
+        
+ )|(?: + ]*> + (?:[^<]|<(?!/style))+ + + ) + ''') +).sub, '') + + +def _parse_pattern(pattern): + start = end = None + if pattern.startswith('/'): + start, pattern = re.split(r"(?= os.path.getmtime(file_path): + write_file(file_path, content, dedent=dedent, encoding=encoding) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestCodeWriter.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestCodeWriter.py new file mode 100644 index 0000000000000000000000000000000000000000..c3026cb1db335de366117c2afeb339a6d2c92e48 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestCodeWriter.py @@ -0,0 +1,128 @@ +from Cython.TestUtils import CythonTest + +class TestCodeWriter(CythonTest): + # CythonTest uses the CodeWriter heavily, so do some checking by + # roundtripping Cython code through the test framework. + + # Note that this test is dependent upon the normal Cython parser + # to generate the input trees to the CodeWriter. This save *a lot* + # of time; better to spend that time writing other tests than perfecting + # this one... + + # Whitespace is very significant in this process: + # - always newline on new block (!) + # - indent 4 spaces + # - 1 space around every operator + + def t(self, codestr): + self.assertCode(codestr, self.fragment(codestr).root) + + def test_print(self): + self.t(u""" + print(x + y ** 2) + print(x, y, z) + print(x + y, x + y * z, x * (y + z)) + """) + + def test_if(self): + self.t(u"if x:\n pass") + + def test_ifelifelse(self): + self.t(u""" + if x: + pass + elif y: + pass + elif z + 34 ** 34 - 2: + pass + else: + pass + """) + + def test_def(self): + self.t(u""" + def f(x, y, z): + pass + def f(x = 34, y = 54, z): + pass + """) + + def test_cdef(self): + self.t(u""" + cdef f(x, y, z): + pass + cdef public void (x = 34, y = 54, z): + pass + cdef f(int *x, void *y, Value *z): + pass + cdef f(int **x, void **y, Value **z): + pass + cdef inline f(int &x, Value &z): + pass + """) + + def test_longness_and_signedness(self): + self.t(u"def f(unsigned long long long long long int y):\n pass") + + def test_signed_short(self): + self.t(u"def f(signed short int y):\n pass") + + def test_typed_args(self): + self.t(u"def f(int x, unsigned long int y):\n pass") + + def test_cdef_var(self): + self.t(u""" + cdef int hello + cdef int hello = 4, x = 3, y, z + """) + + def test_for_loop(self): + self.t(u""" + for x, y, z in f(g(h(34) * 2) + 23): + print(x, y, z) + else: + print(43) + """) + self.t(u""" + for abc in (1, 2, 3): + print(x, y, z) + else: + print(43) + """) + + def test_while_loop(self): + self.t(u""" + while True: + while True: + while True: + continue + """) + + def test_inplace_assignment(self): + self.t(u"x += 43") + + def test_cascaded_assignment(self): + self.t(u"x = y = z = abc = 43") + + def test_attribute(self): + self.t(u"a.x") + + def test_return_none(self): + self.t(u""" + def f(x, y, z): + return + cdef f(x, y, z): + return + def f(x, y, z): + return None + cdef f(x, y, z): + return None + def f(x, y, z): + return 1234 + cdef f(x, y, z): + return 1234 + """) + +if __name__ == "__main__": + import unittest + unittest.main() diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestCythonUtils.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestCythonUtils.py new file mode 100644 index 0000000000000000000000000000000000000000..4c06cbe1c3ce417a6d07953c6f5c357e7613de71 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestCythonUtils.py @@ -0,0 +1,205 @@ +import sys +import unittest +try: + from StringIO import StringIO +except ImportError: + from io import StringIO # doesn't accept 'str' in Py2 + +from Cython.Utils import ( + _CACHE_NAME_PATTERN, _build_cache_name, _find_cache_attributes, + build_hex_version, cached_method, clear_method_caches, try_finally_contextmanager, + print_version, normalise_float_repr, +) + +METHOD_NAME = "cached_next" +CACHE_NAME = _build_cache_name(METHOD_NAME) +NAMES = CACHE_NAME, METHOD_NAME + +class Cached(object): + @cached_method + def cached_next(self, x): + return next(x) + + +class TestCythonUtils(unittest.TestCase): + def test_build_hex_version(self): + self.assertEqual('0x001D00A1', build_hex_version('0.29a1')) + self.assertEqual('0x001D03C4', build_hex_version('0.29.3rc4')) + self.assertEqual('0x001D00F0', build_hex_version('0.29')) + self.assertEqual('0x040000F0', build_hex_version('4.0')) + + ############################## Cached Methods ############################## + + def test_cache_method_name(self): + method_name = "foo" + cache_name = _build_cache_name(method_name) + match = _CACHE_NAME_PATTERN.match(cache_name) + + self.assertIsNot(match, None) + self.assertEqual(match.group(1), method_name) + + def test_requirements_for_Cached(self): + obj = Cached() + + self.assertFalse(hasattr(obj, CACHE_NAME)) + self.assertTrue(hasattr(obj, METHOD_NAME)) + self.set_of_names_equal(obj, set()) + + def set_of_names_equal(self, obj, value): + self.assertEqual(set(_find_cache_attributes(obj)), value) + + def test_find_cache_attributes(self): + obj = Cached() + method_name = "bar" + cache_name = _build_cache_name(method_name) + + setattr(obj, CACHE_NAME, {}) + setattr(obj, cache_name, {}) + + self.assertFalse(hasattr(obj, method_name)) + self.set_of_names_equal(obj, {NAMES, (cache_name, method_name)}) + + def test_cached_method(self): + obj = Cached() + value = iter(range(3)) # iter for Py2 + cache = {(value,): 0} + + # cache args + self.assertEqual(obj.cached_next(value), 0) + self.set_of_names_equal(obj, {NAMES}) + self.assertEqual(getattr(obj, CACHE_NAME), cache) + + # use cache + self.assertEqual(obj.cached_next(value), 0) + self.set_of_names_equal(obj, {NAMES}) + self.assertEqual(getattr(obj, CACHE_NAME), cache) + + def test_clear_method_caches(self): + obj = Cached() + value = iter(range(3)) # iter for Py2 + cache = {(value,): 1} + + obj.cached_next(value) # cache args + + clear_method_caches(obj) + self.set_of_names_equal(obj, set()) + + self.assertEqual(obj.cached_next(value), 1) + self.set_of_names_equal(obj, {NAMES}) + self.assertEqual(getattr(obj, CACHE_NAME), cache) + + def test_clear_method_caches_with_missing_method(self): + obj = Cached() + method_name = "bar" + cache_name = _build_cache_name(method_name) + names = cache_name, method_name + + setattr(obj, cache_name, object()) + + self.assertFalse(hasattr(obj, method_name)) + self.set_of_names_equal(obj, {names}) + + clear_method_caches(obj) + self.set_of_names_equal(obj, {names}) + + def test_try_finally_contextmanager(self): + states = [] + @try_finally_contextmanager + def gen(*args, **kwargs): + states.append("enter") + yield (args, kwargs) + states.append("exit") + + with gen(1, 2, 3, x=4) as call_args: + assert states == ["enter"] + self.assertEqual(call_args, ((1, 2, 3), {'x': 4})) + assert states == ["enter", "exit"] + + class MyException(RuntimeError): + pass + + del states[:] + with self.assertRaises(MyException): + with gen(1, 2, y=4) as call_args: + assert states == ["enter"] + self.assertEqual(call_args, ((1, 2), {'y': 4})) + raise MyException("FAIL INSIDE") + assert states == ["enter", "exit"] + + del states[:] + with self.assertRaises(StopIteration): + with gen(1, 2, y=4) as call_args: + assert states == ["enter"] + self.assertEqual(call_args, ((1, 2), {'y': 4})) + raise StopIteration("STOP") + assert states == ["enter", "exit"] + + def test_print_version(self): + orig_stderr = sys.stderr + orig_stdout = sys.stdout + stderr = sys.stderr = StringIO() + stdout = sys.stdout = StringIO() + try: + print_version() + finally: + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + stdout = stdout.getvalue() + stderr = stderr.getvalue() + + from .. import __version__ as version + self.assertIn(version, stdout) + if stderr: # Depends on os.fstat(1/2). + self.assertIn(version, stderr) + + def test_print_version_stdouterr(self): + orig_stderr = sys.stderr + orig_stdout = sys.stdout + stdout = sys.stdout = sys.stderr = StringIO() # same! + try: + print_version() + finally: + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + stdout = stdout.getvalue() + + from .. import __version__ as version + self.assertIn(version, stdout) + self.assertEqual(stdout.count(version), 1) + + def test_normalise_float_repr(self): + examples = [ + ('.0', '.0'), + ('.000000', '.0'), + ('.1', '.1'), + ('1.', '1.'), + ('1.0', '1.'), + ('1.000000000000000000000', '1.'), + ('00000000000000000000001.000000000000000000000', '1.'), + ('12345.0025', '12345.0025'), + ('1E5', '100000.'), + ('.1E-5', '.000001'), + ('1.1E-5', '.000011'), + ('12.3E-5', '.000123'), + ('.1E10', '1000000000.'), + ('1.1E10', '11000000000.'), + ('123.4E10', '1234000000000.'), + ('123.456E0', '123.456'), + ('123.456E-1', '12.3456'), + ('123.456E-2', '1.23456'), + ('123.456E1', '1234.56'), + ('123.456E2', '12345.6'), + ('2.1E80', '210000000000000000000000000000000000000000000000000000000000000000000000000000000.'), + ] + + for float_str, norm_str in examples: + self.assertEqual(float(float_str), float(norm_str)) # safety check for test data + + result = normalise_float_repr(float_str) + self.assertEqual(float(float_str), float(result)) + self.assertEqual( + result, norm_str, + "normalise_float_repr(%r) == %r != %r (%.330f)" % (float_str, result, norm_str, float(float_str)) + ) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestJediTyper.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestJediTyper.py new file mode 100644 index 0000000000000000000000000000000000000000..ede99b3a8a96caa4c4d9a2bb76c5e41a00ad0984 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestJediTyper.py @@ -0,0 +1,225 @@ +# -*- coding: utf-8 -*- +# tag: jedi + +from __future__ import absolute_import + +import sys +import os.path + +from textwrap import dedent +from contextlib import contextmanager +from tempfile import NamedTemporaryFile + +from Cython.Compiler.ParseTreeTransforms import NormalizeTree, InterpretCompilerDirectives +from Cython.Compiler import Main, Symtab, Visitor, Options +from Cython.TestUtils import TransformTest + +TOOLS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'Tools')) + + +@contextmanager +def _tempfile(code): + code = dedent(code) + if not isinstance(code, bytes): + code = code.encode('utf8') + + with NamedTemporaryFile(suffix='.py') as f: + f.write(code) + f.seek(0) + yield f + + +def _test_typing(code, inject=False): + sys.path.insert(0, TOOLS_DIR) + try: + import jedityper + finally: + sys.path.remove(TOOLS_DIR) + lines = [] + with _tempfile(code) as f: + types = jedityper.analyse(f.name) + if inject: + lines = jedityper.inject_types(f.name, types) + return types, lines + + +class DeclarationsFinder(Visitor.VisitorTransform): + directives = None + + visit_Node = Visitor.VisitorTransform.recurse_to_children + + def visit_CompilerDirectivesNode(self, node): + if not self.directives: + self.directives = [] + self.directives.append(node) + self.visitchildren(node) + return node + + +class TestJediTyper(TransformTest): + def _test(self, code): + return _test_typing(code)[0] + + def test_typing_global_int_loop(self): + code = '''\ + for i in range(10): + a = i + 1 + ''' + types = self._test(code) + self.assertIn((None, (1, 0)), types) + variables = types.pop((None, (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['int']), 'i': set(['int'])}, variables) + + def test_typing_function_int_loop(self): + code = '''\ + def func(x): + for i in range(x): + a = i + 1 + return a + ''' + types = self._test(code) + self.assertIn(('func', (1, 0)), types) + variables = types.pop(('func', (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['int']), 'i': set(['int'])}, variables) + + def test_conflicting_types_in_function(self): + code = '''\ + def func(a, b): + print(a) + a = 1 + b += a + a = 'abc' + return a, str(b) + + print(func(1.5, 2)) + ''' + types = self._test(code) + self.assertIn(('func', (1, 0)), types) + variables = types.pop(('func', (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['float', 'int', 'str']), 'b': set(['int'])}, variables) + + def _test_typing_function_char_loop(self): + code = '''\ + def func(x): + l = [] + for c in x: + l.append(c) + return l + + print(func('abcdefg')) + ''' + types = self._test(code) + self.assertIn(('func', (1, 0)), types) + variables = types.pop(('func', (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['int']), 'i': set(['int'])}, variables) + + def test_typing_global_list(self): + code = '''\ + a = [x for x in range(10)] + b = list(range(10)) + c = a + b + d = [0]*10 + ''' + types = self._test(code) + self.assertIn((None, (1, 0)), types) + variables = types.pop((None, (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['list']), 'b': set(['list']), 'c': set(['list']), 'd': set(['list'])}, variables) + + def test_typing_function_list(self): + code = '''\ + def func(x): + a = [[], []] + b = [0]* 10 + a + c = a[0] + + print(func([0]*100)) + ''' + types = self._test(code) + self.assertIn(('func', (1, 0)), types) + variables = types.pop(('func', (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['list']), 'b': set(['list']), 'c': set(['list']), 'x': set(['list'])}, variables) + + def test_typing_global_dict(self): + code = '''\ + a = dict() + b = {i: i**2 for i in range(10)} + c = a + ''' + types = self._test(code) + self.assertIn((None, (1, 0)), types) + variables = types.pop((None, (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['dict']), 'b': set(['dict']), 'c': set(['dict'])}, variables) + + def test_typing_function_dict(self): + code = '''\ + def func(x): + a = dict() + b = {i: i**2 for i in range(10)} + c = x + + print(func({1:2, 'x':7})) + ''' + types = self._test(code) + self.assertIn(('func', (1, 0)), types) + variables = types.pop(('func', (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['dict']), 'b': set(['dict']), 'c': set(['dict']), 'x': set(['dict'])}, variables) + + + def test_typing_global_set(self): + code = '''\ + a = set() + # b = {i for i in range(10)} # jedi does not support set comprehension yet + c = a + d = {1,2,3} + e = a | b + ''' + types = self._test(code) + self.assertIn((None, (1, 0)), types) + variables = types.pop((None, (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['set']), 'c': set(['set']), 'd': set(['set']), 'e': set(['set'])}, variables) + + def test_typing_function_set(self): + code = '''\ + def func(x): + a = set() + # b = {i for i in range(10)} # jedi does not support set comprehension yet + c = a + d = a | b + + print(func({1,2,3})) + ''' + types = self._test(code) + self.assertIn(('func', (1, 0)), types) + variables = types.pop(('func', (1, 0))) + self.assertFalse(types) + self.assertEqual({'a': set(['set']), 'c': set(['set']), 'd': set(['set']), 'x': set(['set'])}, variables) + + +class TestTypeInjection(TestJediTyper): + """ + Subtype of TestJediTyper that additionally tests type injection and compilation. + """ + def setUp(self): + super(TestTypeInjection, self).setUp() + compilation_options = Options.CompilationOptions(Options.default_options) + ctx = Main.Context.from_options(compilation_options) + transform = InterpretCompilerDirectives(ctx, ctx.compiler_directives) + transform.module_scope = Symtab.ModuleScope('__main__', None, ctx) + self.declarations_finder = DeclarationsFinder() + self.pipeline = [NormalizeTree(None), transform, self.declarations_finder] + + def _test(self, code): + types, lines = _test_typing(code, inject=True) + tree = self.run_pipeline(self.pipeline, ''.join(lines)) + directives = self.declarations_finder.directives + # TODO: validate directives + return types diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestShadow.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestShadow.py new file mode 100644 index 0000000000000000000000000000000000000000..83abdcec2bf23f6fe8cb77e5ed30be3359cc25a3 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestShadow.py @@ -0,0 +1,79 @@ +import unittest + +from Cython import Shadow +from Cython.Compiler import Options, CythonScope, PyrexTypes, Errors + +class TestShadow(unittest.TestCase): + def test_all_types_in_shadow(self): + cython_scope = CythonScope.create_cython_scope(None) + # Not doing load_cythonscope at this stage because it requires a proper context and + # Errors.py to be set up + + missing_types = [] + for key in cython_scope.entries.keys(): + if key.startswith('__') and key.endswith('__'): + continue + if key in ('PyTypeObject', 'PyObject_TypeCheck'): + # These are declared in Shadow.py for reasons that look to + # be an implementation detail, but it isn't our intention for + # users to access them from Pure Python mode. + continue + if not hasattr(Shadow, key): + missing_types.append(key) + self.assertEqual(missing_types, []) + + def test_int_types_in_shadow(self): + missing_types = [] + for int_name in Shadow.int_types: + for sign in ['', 'u', 's']: + name = sign + int_name + + if sign and ( + int_name in ['Py_UNICODE', 'Py_UCS4', 'Py_ssize_t', + 'ssize_t', 'ptrdiff_t', 'Py_hash_t'] or + name == "usize_t"): + # size_t is special-cased here a little since ssize_t legitimate + # but usize_t isn't + self.assertNotIn(name, dir(Shadow)) + self.assertNotIn('p_' + name, dir(Shadow)) + continue + + if not hasattr(Shadow, name): + missing_types.append(name) + + for ptr in range(1, 4): + ptr_name = 'p' * ptr + '_' + name + if not hasattr(Shadow, ptr_name): + missing_types.append(ptr_name) + self.assertEqual(missing_types, []) + + def test_most_types(self): + # TODO it's unfortunately hard to get a definite list of types to confirm that they're + # present (because they're obtained by on-the-fly string parsing in `cython_scope.lookup_type`) + + cython_scope = CythonScope.create_cython_scope(None) + # Set up just enough of "Context" and "Errors" that CythonScope.lookup_type can fail + class Context: + cpp = False + language_level = 3 + future_directives = [] + cython_scope.context = Context + Errors.init_thread() + + missing_types = [] + missing_lookups = [] + for (signed, longness, name), type_ in PyrexTypes.modifiers_and_name_to_type.items(): + if name == 'object': + continue # This probably shouldn't be in Shadow + if not hasattr(Shadow, name): + missing_types.append(name) + if not cython_scope.lookup_type(name): + missing_lookups.append(name) + for ptr in range(1, 4): + ptr_name = 'p' * ptr + '_' + name + if not hasattr(Shadow, ptr_name): + missing_types.append(ptr_name) + if not cython_scope.lookup_type(ptr_name): + missing_lookups.append(ptr_name) + self.assertEqual(missing_types, []) + self.assertEqual(missing_lookups, []) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestStringIOTree.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestStringIOTree.py new file mode 100644 index 0000000000000000000000000000000000000000..a15f2cd88d706fb11d8438e3fe7a63af23b08dcf --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestStringIOTree.py @@ -0,0 +1,67 @@ +import unittest + +from Cython import StringIOTree as stringtree + +code = """ +cdef int spam # line 1 + +cdef ham(): + a = 1 + b = 2 + c = 3 + d = 4 + +def eggs(): + pass + +cpdef bacon(): + print spam + print 'scotch' + print 'tea?' + print 'or coffee?' # line 16 +""" + +linemap = dict(enumerate(code.splitlines())) + +class TestStringIOTree(unittest.TestCase): + + def setUp(self): + self.tree = stringtree.StringIOTree() + + def test_markers(self): + assert not self.tree.allmarkers() + + def test_insertion(self): + self.write_lines((1, 2, 3)) + line_4_to_6_insertion_point = self.tree.insertion_point() + self.write_lines((7, 8)) + line_9_to_13_insertion_point = self.tree.insertion_point() + self.write_lines((14, 15, 16)) + + line_4_insertion_point = line_4_to_6_insertion_point.insertion_point() + self.write_lines((5, 6), tree=line_4_to_6_insertion_point) + + line_9_to_12_insertion_point = ( + line_9_to_13_insertion_point.insertion_point()) + self.write_line(13, tree=line_9_to_13_insertion_point) + + self.write_line(4, tree=line_4_insertion_point) + self.write_line(9, tree=line_9_to_12_insertion_point) + line_10_insertion_point = line_9_to_12_insertion_point.insertion_point() + self.write_line(11, tree=line_9_to_12_insertion_point) + self.write_line(10, tree=line_10_insertion_point) + self.write_line(12, tree=line_9_to_12_insertion_point) + + self.assertEqual(self.tree.allmarkers(), list(range(1, 17))) + self.assertEqual(code.strip(), self.tree.getvalue().strip()) + + + def write_lines(self, linenos, tree=None): + for lineno in linenos: + self.write_line(lineno, tree=tree) + + def write_line(self, lineno, tree=None): + if tree is None: + tree = self.tree + tree.markers.append(lineno) + tree.write(linemap[lineno] + '\n') diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestTestUtils.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestTestUtils.py new file mode 100644 index 0000000000000000000000000000000000000000..e8329188b31246424e7a15229894b7c0cd68a81b --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/TestTestUtils.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- + +import os.path +import unittest +import tempfile +import textwrap +import shutil + +from ..TestUtils import write_file, write_newer_file, _parse_pattern + + +class TestTestUtils(unittest.TestCase): + def setUp(self): + super(TestTestUtils, self).setUp() + self.temp_dir = tempfile.mkdtemp() + + def tearDown(self): + if self.temp_dir and os.path.isdir(self.temp_dir): + shutil.rmtree(self.temp_dir) + super(TestTestUtils, self).tearDown() + + def _test_path(self, filename): + return os.path.join(self.temp_dir, filename) + + def _test_write_file(self, content, expected, **kwargs): + file_path = self._test_path("abcfile") + write_file(file_path, content, **kwargs) + assert os.path.isfile(file_path) + + with open(file_path, 'rb') as f: + found = f.read() + assert found == expected, (repr(expected), repr(found)) + + def test_write_file_text(self): + text = u"abcüöä" + self._test_write_file(text, text.encode('utf8')) + + def test_write_file_dedent(self): + text = u""" + A horse is a horse, + of course, of course, + And no one can talk to a horse + of course + """ + self._test_write_file(text, textwrap.dedent(text).encode('utf8'), dedent=True) + + def test_write_file_bytes(self): + self._test_write_file(b"ab\0c", b"ab\0c") + + def test_write_newer_file(self): + file_path_1 = self._test_path("abcfile1.txt") + file_path_2 = self._test_path("abcfile2.txt") + write_file(file_path_1, "abc") + assert os.path.isfile(file_path_1) + write_newer_file(file_path_2, file_path_1, "xyz") + assert os.path.isfile(file_path_2) + assert os.path.getmtime(file_path_2) > os.path.getmtime(file_path_1) + + def test_write_newer_file_same(self): + file_path = self._test_path("abcfile.txt") + write_file(file_path, "abc") + mtime = os.path.getmtime(file_path) + write_newer_file(file_path, file_path, "xyz") + assert os.path.getmtime(file_path) > mtime + + def test_write_newer_file_fresh(self): + file_path = self._test_path("abcfile.txt") + assert not os.path.exists(file_path) + write_newer_file(file_path, file_path, "xyz") + assert os.path.isfile(file_path) + + def test_parse_pattern(self): + self.assertEqual( + _parse_pattern("pattern"), + (None, None, 'pattern') + ) + self.assertEqual( + _parse_pattern("/start/:pattern"), + ('start', None, 'pattern') + ) + self.assertEqual( + _parse_pattern(":/end/ pattern"), + (None, 'end', 'pattern') + ) + self.assertEqual( + _parse_pattern("/start/:/end/ pattern"), + ('start', 'end', 'pattern') + ) + self.assertEqual( + _parse_pattern("/start/:/end/pattern"), + ('start', 'end', 'pattern') + ) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa81adaff68e06d8e915a6afa375f62f7e5a8fad --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/__init__.py @@ -0,0 +1 @@ +# empty file diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/xmlrunner.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/xmlrunner.py new file mode 100644 index 0000000000000000000000000000000000000000..eeeb4939439ac208e516fc2afe3e9a70a50cad94 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Tests/xmlrunner.py @@ -0,0 +1,395 @@ +# -*- coding: utf-8 -*- + +"""unittest-xml-reporting is a PyUnit-based TestRunner that can export test +results to XML files that can be consumed by a wide range of tools, such as +build systems, IDEs and Continuous Integration servers. + +This module provides the XMLTestRunner class, which is heavily based on the +default TextTestRunner. This makes the XMLTestRunner very simple to use. + +The script below, adapted from the unittest documentation, shows how to use +XMLTestRunner in a very simple way. In fact, the only difference between this +script and the original one is the last line: + +import random +import unittest +import xmlrunner + +class TestSequenceFunctions(unittest.TestCase): + def setUp(self): + self.seq = range(10) + + def test_shuffle(self): + # make sure the shuffled sequence does not lose any elements + random.shuffle(self.seq) + self.seq.sort() + self.assertEqual(self.seq, range(10)) + + def test_choice(self): + element = random.choice(self.seq) + self.assertTrue(element in self.seq) + + def test_sample(self): + self.assertRaises(ValueError, random.sample, self.seq, 20) + for element in random.sample(self.seq, 5): + self.assertTrue(element in self.seq) + +if __name__ == '__main__': + unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports')) +""" + +from __future__ import absolute_import + +import os +import sys +import time +from unittest import TestResult, TextTestResult, TextTestRunner +import xml.dom.minidom +try: + from StringIO import StringIO +except ImportError: + from io import StringIO # doesn't accept 'str' in Py2 + + +class XMLDocument(xml.dom.minidom.Document): + def createCDATAOrText(self, data): + if ']]>' in data: + return self.createTextNode(data) + return self.createCDATASection(data) + + +class _TestInfo(object): + """This class is used to keep useful information about the execution of a + test method. + """ + + # Possible test outcomes + (SUCCESS, FAILURE, ERROR) = range(3) + + def __init__(self, test_result, test_method, outcome=SUCCESS, err=None): + "Create a new instance of _TestInfo." + self.test_result = test_result + self.test_method = test_method + self.outcome = outcome + self.err = err + self.stdout = test_result.stdout and test_result.stdout.getvalue().strip() or '' + self.stderr = test_result.stdout and test_result.stderr.getvalue().strip() or '' + + def get_elapsed_time(self): + """Return the time that shows how long the test method took to + execute. + """ + return self.test_result.stop_time - self.test_result.start_time + + def get_description(self): + "Return a text representation of the test method." + return self.test_result.getDescription(self.test_method) + + def get_error_info(self): + """Return a text representation of an exception thrown by a test + method. + """ + if not self.err: + return '' + return self.test_result._exc_info_to_string( + self.err, self.test_method) + + +class _XMLTestResult(TextTestResult): + """A test result class that can express test results in a XML report. + + Used by XMLTestRunner. + """ + def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1, + elapsed_times=True): + "Create a new instance of _XMLTestResult." + TextTestResult.__init__(self, stream, descriptions, verbosity) + self.successes = [] + self.callback = None + self.elapsed_times = elapsed_times + self.output_patched = False + + def _prepare_callback(self, test_info, target_list, verbose_str, short_str): + """Append a _TestInfo to the given target list and sets a callback + method to be called by stopTest method. + """ + target_list.append(test_info) + def callback(): + """This callback prints the test method outcome to the stream, + as well as the elapsed time. + """ + + # Ignore the elapsed times for a more reliable unit testing + if not self.elapsed_times: + self.start_time = self.stop_time = 0 + + if self.showAll: + self.stream.writeln('(%.3fs) %s' % + (test_info.get_elapsed_time(), verbose_str)) + elif self.dots: + self.stream.write(short_str) + self.callback = callback + + def _patch_standard_output(self): + """Replace the stdout and stderr streams with string-based streams + in order to capture the tests' output. + """ + if not self.output_patched: + (self.old_stdout, self.old_stderr) = (sys.stdout, sys.stderr) + self.output_patched = True + (sys.stdout, sys.stderr) = (self.stdout, self.stderr) = \ + (StringIO(), StringIO()) + + def _restore_standard_output(self): + "Restore the stdout and stderr streams." + (sys.stdout, sys.stderr) = (self.old_stdout, self.old_stderr) + self.output_patched = False + + def startTest(self, test): + "Called before execute each test method." + self._patch_standard_output() + self.start_time = time.time() + TestResult.startTest(self, test) + + if self.showAll: + self.stream.write(' ' + self.getDescription(test)) + self.stream.write(" ... ") + + def stopTest(self, test): + "Called after execute each test method." + self._restore_standard_output() + TextTestResult.stopTest(self, test) + self.stop_time = time.time() + + if self.callback and callable(self.callback): + self.callback() + self.callback = None + + def addSuccess(self, test): + "Called when a test executes successfully." + self._prepare_callback(_TestInfo(self, test), + self.successes, 'OK', '.') + + def addFailure(self, test, err): + "Called when a test method fails." + self._prepare_callback(_TestInfo(self, test, _TestInfo.FAILURE, err), + self.failures, 'FAIL', 'F') + + def addError(self, test, err): + "Called when a test method raises an error." + self._prepare_callback(_TestInfo(self, test, _TestInfo.ERROR, err), + self.errors, 'ERROR', 'E') + + def printErrorList(self, flavour, errors): + "Write some information about the FAIL or ERROR to the stream." + for test_info in errors: + if isinstance(test_info, tuple): + test_info, exc_info = test_info + + try: + t = test_info.get_elapsed_time() + except AttributeError: + t = 0 + try: + descr = test_info.get_description() + except AttributeError: + try: + descr = test_info.getDescription() + except AttributeError: + descr = str(test_info) + try: + err_info = test_info.get_error_info() + except AttributeError: + err_info = str(test_info) + + self.stream.writeln(self.separator1) + self.stream.writeln('%s [%.3fs]: %s' % (flavour, t, descr)) + self.stream.writeln(self.separator2) + self.stream.writeln('%s' % err_info) + + def _get_info_by_testcase(self): + """This method organizes test results by TestCase module. This + information is used during the report generation, where a XML report + will be generated for each TestCase. + """ + tests_by_testcase = {} + + for tests in (self.successes, self.failures, self.errors): + for test_info in tests: + if not isinstance(test_info, _TestInfo): + print("Unexpected test result type: %r" % (test_info,)) + continue + testcase = type(test_info.test_method) + + # Ignore module name if it is '__main__' + module = testcase.__module__ + '.' + if module == '__main__.': + module = '' + testcase_name = module + testcase.__name__ + + if testcase_name not in tests_by_testcase: + tests_by_testcase[testcase_name] = [] + tests_by_testcase[testcase_name].append(test_info) + + return tests_by_testcase + + def _report_testsuite(suite_name, tests, xml_document): + "Appends the testsuite section to the XML document." + testsuite = xml_document.createElement('testsuite') + xml_document.appendChild(testsuite) + + testsuite.setAttribute('name', str(suite_name)) + testsuite.setAttribute('tests', str(len(tests))) + + testsuite.setAttribute('time', '%.3f' % + sum([e.get_elapsed_time() for e in tests])) + + failures = len([1 for e in tests if e.outcome == _TestInfo.FAILURE]) + testsuite.setAttribute('failures', str(failures)) + + errors = len([1 for e in tests if e.outcome == _TestInfo.ERROR]) + testsuite.setAttribute('errors', str(errors)) + + return testsuite + + _report_testsuite = staticmethod(_report_testsuite) + + def _report_testcase(suite_name, test_result, xml_testsuite, xml_document): + "Appends a testcase section to the XML document." + testcase = xml_document.createElement('testcase') + xml_testsuite.appendChild(testcase) + + testcase.setAttribute('classname', str(suite_name)) + testcase.setAttribute('name', test_result.test_method.shortDescription() + or getattr(test_result.test_method, '_testMethodName', + str(test_result.test_method))) + testcase.setAttribute('time', '%.3f' % test_result.get_elapsed_time()) + + if (test_result.outcome != _TestInfo.SUCCESS): + elem_name = ('failure', 'error')[test_result.outcome-1] + failure = xml_document.createElement(elem_name) + testcase.appendChild(failure) + + failure.setAttribute('type', str(test_result.err[0].__name__)) + failure.setAttribute('message', str(test_result.err[1])) + + error_info = test_result.get_error_info() + failureText = xml_document.createCDATAOrText(error_info) + failure.appendChild(failureText) + + _report_testcase = staticmethod(_report_testcase) + + def _report_output(test_runner, xml_testsuite, xml_document, stdout, stderr): + "Appends the system-out and system-err sections to the XML document." + systemout = xml_document.createElement('system-out') + xml_testsuite.appendChild(systemout) + + systemout_text = xml_document.createCDATAOrText(stdout) + systemout.appendChild(systemout_text) + + systemerr = xml_document.createElement('system-err') + xml_testsuite.appendChild(systemerr) + + systemerr_text = xml_document.createCDATAOrText(stderr) + systemerr.appendChild(systemerr_text) + + _report_output = staticmethod(_report_output) + + def generate_reports(self, test_runner): + "Generates the XML reports to a given XMLTestRunner object." + all_results = self._get_info_by_testcase() + + if isinstance(test_runner.output, str) and not os.path.exists(test_runner.output): + os.makedirs(test_runner.output) + + for suite, tests in all_results.items(): + doc = XMLDocument() + + # Build the XML file + testsuite = _XMLTestResult._report_testsuite(suite, tests, doc) + stdout, stderr = [], [] + for test in tests: + _XMLTestResult._report_testcase(suite, test, testsuite, doc) + if test.stdout: + stdout.extend(['*****************', test.get_description(), test.stdout]) + if test.stderr: + stderr.extend(['*****************', test.get_description(), test.stderr]) + _XMLTestResult._report_output(test_runner, testsuite, doc, + '\n'.join(stdout), '\n'.join(stderr)) + xml_content = doc.toprettyxml(indent='\t') + + if type(test_runner.output) is str: + report_file = open('%s%sTEST-%s.xml' % + (test_runner.output, os.sep, suite), 'w') + try: + report_file.write(xml_content) + finally: + report_file.close() + else: + # Assume that test_runner.output is a stream + test_runner.output.write(xml_content) + + +class XMLTestRunner(TextTestRunner): + """A test runner class that outputs the results in JUnit like XML files. + """ + def __init__(self, output='.', stream=None, descriptions=True, verbose=False, elapsed_times=True): + "Create a new instance of XMLTestRunner." + if stream is None: + stream = sys.stderr + verbosity = (1, 2)[verbose] + TextTestRunner.__init__(self, stream, descriptions, verbosity) + self.output = output + self.elapsed_times = elapsed_times + + def _make_result(self): + """Create the TestResult object which will be used to store + information about the executed tests. + """ + return _XMLTestResult(self.stream, self.descriptions, + self.verbosity, self.elapsed_times) + + def run(self, test): + "Run the given test case or test suite." + # Prepare the test execution + result = self._make_result() + + # Print a nice header + self.stream.writeln() + self.stream.writeln('Running tests...') + self.stream.writeln(result.separator2) + + # Execute tests + start_time = time.time() + test(result) + stop_time = time.time() + time_taken = stop_time - start_time + + # Generate reports + self.stream.writeln() + self.stream.writeln('Generating XML reports...') + result.generate_reports(self) + + # Print results + result.printErrors() + self.stream.writeln(result.separator2) + run = result.testsRun + self.stream.writeln("Ran %d test%s in %.3fs" % + (run, run != 1 and "s" or "", time_taken)) + self.stream.writeln() + + # Error traces + if not result.wasSuccessful(): + self.stream.write("FAILED (") + failed, errored = (len(result.failures), len(result.errors)) + if failed: + self.stream.write("failures=%d" % failed) + if errored: + if failed: + self.stream.write(", ") + self.stream.write("errors=%d" % errored) + self.stream.writeln(")") + else: + self.stream.writeln("OK") + + return result diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/AsyncGen.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/AsyncGen.c new file mode 100644 index 0000000000000000000000000000000000000000..9bffedcfa9c96e7b905c6f4769b8de420a14bd21 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/AsyncGen.c @@ -0,0 +1,1305 @@ +// This is copied from genobject.c in CPython 3.6. +// Try to keep it in sync by doing this from time to time: +// sed -e 's|__pyx_||ig' Cython/Utility/AsyncGen.c | diff -udw - cpython/Objects/genobject.c | less + +//////////////////// AsyncGenerator.proto //////////////////// +//@requires: Coroutine.c::Coroutine + +#define __Pyx_AsyncGen_USED +typedef struct { + __pyx_CoroutineObject coro; + PyObject *ag_finalizer; + int ag_hooks_inited; + int ag_closed; + int ag_running_async; +} __pyx_PyAsyncGenObject; + +static PyTypeObject *__pyx__PyAsyncGenWrappedValueType = 0; +static PyTypeObject *__pyx__PyAsyncGenASendType = 0; +static PyTypeObject *__pyx__PyAsyncGenAThrowType = 0; +static PyTypeObject *__pyx_AsyncGenType = 0; + +#define __Pyx_AsyncGen_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_AsyncGenType) +#define __pyx_PyAsyncGenASend_CheckExact(o) \ + __Pyx_IS_TYPE(o, __pyx__PyAsyncGenASendType) +#define __pyx_PyAsyncGenAThrow_CheckExact(o) \ + __Pyx_IS_TYPE(o, __pyx__PyAsyncGenAThrowType) + +static PyObject *__Pyx_async_gen_anext(PyObject *o); +static CYTHON_INLINE PyObject *__Pyx_async_gen_asend_iternext(PyObject *o); +static PyObject *__Pyx_async_gen_asend_send(PyObject *o, PyObject *arg); +static PyObject *__Pyx_async_gen_asend_close(PyObject *o, PyObject *args); +static PyObject *__Pyx_async_gen_athrow_close(PyObject *o, PyObject *args); + +static PyObject *__Pyx__PyAsyncGenValueWrapperNew(PyObject *val); + + +static __pyx_CoroutineObject *__Pyx_AsyncGen_New( + __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name) { + __pyx_PyAsyncGenObject *gen = PyObject_GC_New(__pyx_PyAsyncGenObject, __pyx_AsyncGenType); + if (unlikely(!gen)) + return NULL; + gen->ag_finalizer = NULL; + gen->ag_closed = 0; + gen->ag_hooks_inited = 0; + gen->ag_running_async = 0; + return __Pyx__Coroutine_NewInit((__pyx_CoroutineObject*)gen, body, code, closure, name, qualname, module_name); +} + +static int __pyx_AsyncGen_init(PyObject *module); +static void __Pyx_PyAsyncGen_Fini(void); + +//////////////////// AsyncGenerator.cleanup //////////////////// + +__Pyx_PyAsyncGen_Fini(); + +//////////////////// AsyncGeneratorInitFinalizer //////////////////// + +// this is separated out because it needs more adaptation + +#if PY_VERSION_HEX < 0x030600B0 +static int __Pyx_async_gen_init_hooks(__pyx_PyAsyncGenObject *o) { +#if 0 + // TODO: implement finalizer support in older Python versions + PyThreadState *tstate; + PyObject *finalizer; + PyObject *firstiter; +#endif + + if (likely(o->ag_hooks_inited)) { + return 0; + } + + o->ag_hooks_inited = 1; + +#if 0 + tstate = __Pyx_PyThreadState_Current; + + finalizer = tstate->async_gen_finalizer; + if (finalizer) { + Py_INCREF(finalizer); + o->ag_finalizer = finalizer; + } + + firstiter = tstate->async_gen_firstiter; + if (firstiter) { + PyObject *res; + + Py_INCREF(firstiter); + res = __Pyx_PyObject_CallOneArg(firstiter, (PyObject*)o); + Py_DECREF(firstiter); + if (res == NULL) { + return 1; + } + Py_DECREF(res); + } +#endif + + return 0; +} +#endif + + +//////////////////// AsyncGenerator //////////////////// +//@requires: AsyncGeneratorInitFinalizer +//@requires: Coroutine.c::Coroutine +//@requires: Coroutine.c::ReturnWithStopIteration +//@requires: ObjectHandling.c::PyObjectCall2Args +//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict + +PyDoc_STRVAR(__Pyx_async_gen_send_doc, +"send(arg) -> send 'arg' into generator,\n\ +return next yielded value or raise StopIteration."); + +PyDoc_STRVAR(__Pyx_async_gen_close_doc, +"close() -> raise GeneratorExit inside generator."); + +PyDoc_STRVAR(__Pyx_async_gen_throw_doc, +"throw(typ[,val[,tb]]) -> raise exception in generator,\n\ +return next yielded value or raise StopIteration."); + +PyDoc_STRVAR(__Pyx_async_gen_await_doc, +"__await__() -> return a representation that can be passed into the 'await' expression."); + +// COPY STARTS HERE: + +static PyObject *__Pyx_async_gen_asend_new(__pyx_PyAsyncGenObject *, PyObject *); +static PyObject *__Pyx_async_gen_athrow_new(__pyx_PyAsyncGenObject *, PyObject *); + +static const char *__Pyx_NON_INIT_CORO_MSG = "can't send non-None value to a just-started coroutine"; +static const char *__Pyx_ASYNC_GEN_IGNORED_EXIT_MSG = "async generator ignored GeneratorExit"; +static const char *__Pyx_ASYNC_GEN_CANNOT_REUSE_SEND_MSG = "cannot reuse already awaited __anext__()/asend()"; +static const char *__Pyx_ASYNC_GEN_CANNOT_REUSE_CLOSE_MSG = "cannot reuse already awaited aclose()/athrow()"; + +typedef enum { + __PYX_AWAITABLE_STATE_INIT, /* new awaitable, has not yet been iterated */ + __PYX_AWAITABLE_STATE_ITER, /* being iterated */ + __PYX_AWAITABLE_STATE_CLOSED, /* closed */ +} __pyx_AwaitableState; + +typedef struct { + PyObject_HEAD + __pyx_PyAsyncGenObject *ags_gen; + + /* Can be NULL, when in the __anext__() mode (equivalent of "asend(None)") */ + PyObject *ags_sendval; + + __pyx_AwaitableState ags_state; +} __pyx_PyAsyncGenASend; + + +typedef struct { + PyObject_HEAD + __pyx_PyAsyncGenObject *agt_gen; + + /* Can be NULL, when in the "aclose()" mode (equivalent of "athrow(GeneratorExit)") */ + PyObject *agt_args; + + __pyx_AwaitableState agt_state; +} __pyx_PyAsyncGenAThrow; + + +typedef struct { + PyObject_HEAD + PyObject *agw_val; +} __pyx__PyAsyncGenWrappedValue; + + +#ifndef _PyAsyncGen_MAXFREELIST +#define _PyAsyncGen_MAXFREELIST 80 +#endif + +// Freelists boost performance 6-10%; they also reduce memory +// fragmentation, as _PyAsyncGenWrappedValue and PyAsyncGenASend +// are short-living objects that are instantiated for every +// __anext__ call. + +static __pyx__PyAsyncGenWrappedValue *__Pyx_ag_value_freelist[_PyAsyncGen_MAXFREELIST]; +static int __Pyx_ag_value_freelist_free = 0; + +static __pyx_PyAsyncGenASend *__Pyx_ag_asend_freelist[_PyAsyncGen_MAXFREELIST]; +static int __Pyx_ag_asend_freelist_free = 0; + +#define __pyx__PyAsyncGenWrappedValue_CheckExact(o) \ + __Pyx_IS_TYPE(o, __pyx__PyAsyncGenWrappedValueType) + + +static int +__Pyx_async_gen_traverse(__pyx_PyAsyncGenObject *gen, visitproc visit, void *arg) +{ + Py_VISIT(gen->ag_finalizer); + return __Pyx_Coroutine_traverse((__pyx_CoroutineObject*)gen, visit, arg); +} + + +static PyObject * +__Pyx_async_gen_repr(__pyx_CoroutineObject *o) +{ + // avoid NULL pointer dereference for qualname during garbage collection + return PyUnicode_FromFormat("", + o->gi_qualname ? o->gi_qualname : Py_None, o); +} + + +#if PY_VERSION_HEX >= 0x030600B0 +static int +__Pyx_async_gen_init_hooks(__pyx_PyAsyncGenObject *o) +{ +#if !CYTHON_COMPILING_IN_PYPY + PyThreadState *tstate; +#endif + PyObject *finalizer; + PyObject *firstiter; + + if (o->ag_hooks_inited) { + return 0; + } + + o->ag_hooks_inited = 1; + +#if CYTHON_COMPILING_IN_PYPY + finalizer = _PyEval_GetAsyncGenFinalizer(); +#else + tstate = __Pyx_PyThreadState_Current; + finalizer = tstate->async_gen_finalizer; +#endif + if (finalizer) { + Py_INCREF(finalizer); + o->ag_finalizer = finalizer; + } + +#if CYTHON_COMPILING_IN_PYPY + firstiter = _PyEval_GetAsyncGenFirstiter(); +#else + firstiter = tstate->async_gen_firstiter; +#endif + if (firstiter) { + PyObject *res; +#if CYTHON_UNPACK_METHODS + PyObject *self; +#endif + + Py_INCREF(firstiter); + // at least asyncio stores methods here => optimise the call +#if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(firstiter)) && likely((self = PyMethod_GET_SELF(firstiter)) != NULL)) { + PyObject *function = PyMethod_GET_FUNCTION(firstiter); + res = __Pyx_PyObject_Call2Args(function, self, (PyObject*)o); + } else +#endif + res = __Pyx_PyObject_CallOneArg(firstiter, (PyObject*)o); + + Py_DECREF(firstiter); + if (unlikely(res == NULL)) { + return 1; + } + Py_DECREF(res); + } + + return 0; +} +#endif + + +static PyObject * +__Pyx_async_gen_anext(PyObject *g) +{ + __pyx_PyAsyncGenObject *o = (__pyx_PyAsyncGenObject*) g; + if (unlikely(__Pyx_async_gen_init_hooks(o))) { + return NULL; + } + return __Pyx_async_gen_asend_new(o, NULL); +} + +static PyObject * +__Pyx_async_gen_anext_method(PyObject *g, PyObject *arg) { + CYTHON_UNUSED_VAR(arg); + return __Pyx_async_gen_anext(g); +} + + +static PyObject * +__Pyx_async_gen_asend(__pyx_PyAsyncGenObject *o, PyObject *arg) +{ + if (unlikely(__Pyx_async_gen_init_hooks(o))) { + return NULL; + } + return __Pyx_async_gen_asend_new(o, arg); +} + + +static PyObject * +__Pyx_async_gen_aclose(__pyx_PyAsyncGenObject *o, PyObject *arg) +{ + CYTHON_UNUSED_VAR(arg); + if (unlikely(__Pyx_async_gen_init_hooks(o))) { + return NULL; + } + return __Pyx_async_gen_athrow_new(o, NULL); +} + + +static PyObject * +__Pyx_async_gen_athrow(__pyx_PyAsyncGenObject *o, PyObject *args) +{ + if (unlikely(__Pyx_async_gen_init_hooks(o))) { + return NULL; + } + return __Pyx_async_gen_athrow_new(o, args); +} + + +static PyObject * +__Pyx_async_gen_self_method(PyObject *g, PyObject *arg) { + CYTHON_UNUSED_VAR(arg); + return __Pyx_NewRef(g); +} + + +static PyGetSetDef __Pyx_async_gen_getsetlist[] = { + {(char*) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, + (char*) PyDoc_STR("name of the async generator"), 0}, + {(char*) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, + (char*) PyDoc_STR("qualified name of the async generator"), 0}, + //REMOVED: {(char*) "ag_await", (getter)coro_get_cr_await, NULL, + //REMOVED: (char*) PyDoc_STR("object being awaited on, or None")}, + {0, 0, 0, 0, 0} /* Sentinel */ +}; + +static PyMemberDef __Pyx_async_gen_memberlist[] = { + //REMOVED: {(char*) "ag_frame", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_frame), READONLY}, + {(char*) "ag_running", T_BOOL, offsetof(__pyx_PyAsyncGenObject, ag_running_async), READONLY, NULL}, + //REMOVED: {(char*) "ag_code", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_code), READONLY}, + //ADDED: "ag_await" + {(char*) "ag_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, + (char*) PyDoc_STR("object being awaited on, or None")}, + {(char *) "__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), 0, 0}, +#if CYTHON_USE_TYPE_SPECS + {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CoroutineObject, gi_weakreflist), READONLY, 0}, +#endif + {0, 0, 0, 0, 0} /* Sentinel */ +}; + +PyDoc_STRVAR(__Pyx_async_aclose_doc, +"aclose() -> raise GeneratorExit inside generator."); + +PyDoc_STRVAR(__Pyx_async_asend_doc, +"asend(v) -> send 'v' in generator."); + +PyDoc_STRVAR(__Pyx_async_athrow_doc, +"athrow(typ[,val[,tb]]) -> raise exception in generator."); + +PyDoc_STRVAR(__Pyx_async_aiter_doc, +"__aiter__(v) -> return an asynchronous iterator."); + +PyDoc_STRVAR(__Pyx_async_anext_doc, +"__anext__(v) -> continue asynchronous iteration and return the next element."); + +static PyMethodDef __Pyx_async_gen_methods[] = { + {"asend", (PyCFunction)__Pyx_async_gen_asend, METH_O, __Pyx_async_asend_doc}, + {"athrow",(PyCFunction)__Pyx_async_gen_athrow, METH_VARARGS, __Pyx_async_athrow_doc}, + {"aclose", (PyCFunction)__Pyx_async_gen_aclose, METH_NOARGS, __Pyx_async_aclose_doc}, + {"__aiter__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_aiter_doc}, + {"__anext__", (PyCFunction)__Pyx_async_gen_anext_method, METH_NOARGS, __Pyx_async_anext_doc}, + {0, 0, 0, 0} /* Sentinel */ +}; + + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_AsyncGenType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_Coroutine_dealloc}, + {Py_am_aiter, (void *)PyObject_SelfIter}, + {Py_am_anext, (void *)__Pyx_async_gen_anext}, + {Py_tp_repr, (void *)__Pyx_async_gen_repr}, + {Py_tp_traverse, (void *)__Pyx_async_gen_traverse}, + {Py_tp_methods, (void *)__Pyx_async_gen_methods}, + {Py_tp_members, (void *)__Pyx_async_gen_memberlist}, + {Py_tp_getset, (void *)__Pyx_async_gen_getsetlist}, +#if CYTHON_USE_TP_FINALIZE + {Py_tp_finalize, (void *)__Pyx_Coroutine_del}, +#endif + {0, 0}, +}; + +static PyType_Spec __pyx_AsyncGenType_spec = { + __PYX_TYPE_MODULE_PREFIX "async_generator", + sizeof(__pyx_PyAsyncGenObject), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + __pyx_AsyncGenType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +#if CYTHON_USE_ASYNC_SLOTS +static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_as_async = { + 0, /* am_await */ + PyObject_SelfIter, /* am_aiter */ + (unaryfunc)__Pyx_async_gen_anext, /* am_anext */ +#if PY_VERSION_HEX >= 0x030A00A3 + 0, /*am_send*/ +#endif +}; +#endif + +static PyTypeObject __pyx_AsyncGenType_type = { + PyVarObject_HEAD_INIT(0, 0) + "async_generator", /* tp_name */ + sizeof(__pyx_PyAsyncGenObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor)__Pyx_Coroutine_dealloc, /* tp_dealloc */ + 0, /* tp_vectorcall_offset */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if CYTHON_USE_ASYNC_SLOTS + &__Pyx_async_gen_as_async, /* tp_as_async */ +#else + 0, /*tp_reserved*/ +#endif + (reprfunc)__Pyx_async_gen_repr, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | + Py_TPFLAGS_HAVE_FINALIZE, /* tp_flags */ + 0, /* tp_doc */ + (traverseproc)__Pyx_async_gen_traverse, /* tp_traverse */ + 0, /* tp_clear */ +#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 + // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare + __Pyx_Coroutine_compare, /*tp_richcompare*/ +#else + 0, /*tp_richcompare*/ +#endif + offsetof(__pyx_CoroutineObject, gi_weakreflist), /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + __Pyx_async_gen_methods, /* tp_methods */ + __Pyx_async_gen_memberlist, /* tp_members */ + __Pyx_async_gen_getsetlist, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ +#if CYTHON_USE_TP_FINALIZE + 0, /*tp_del*/ +#else + __Pyx_Coroutine_del, /*tp_del*/ +#endif + 0, /* tp_version_tag */ +#if CYTHON_USE_TP_FINALIZE + __Pyx_Coroutine_del, /* tp_finalize */ +#elif PY_VERSION_HEX >= 0x030400a1 + 0, /* tp_finalize */ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + + +static int +__Pyx_PyAsyncGen_ClearFreeLists(void) +{ + int ret = __Pyx_ag_value_freelist_free + __Pyx_ag_asend_freelist_free; + + while (__Pyx_ag_value_freelist_free) { + __pyx__PyAsyncGenWrappedValue *o; + o = __Pyx_ag_value_freelist[--__Pyx_ag_value_freelist_free]; + assert(__pyx__PyAsyncGenWrappedValue_CheckExact(o)); + __Pyx_PyHeapTypeObject_GC_Del(o); + } + + while (__Pyx_ag_asend_freelist_free) { + __pyx_PyAsyncGenASend *o; + o = __Pyx_ag_asend_freelist[--__Pyx_ag_asend_freelist_free]; + assert(__Pyx_IS_TYPE(o, __pyx__PyAsyncGenASendType)); + __Pyx_PyHeapTypeObject_GC_Del(o); + } + + return ret; +} + +static void +__Pyx_PyAsyncGen_Fini(void) +{ + __Pyx_PyAsyncGen_ClearFreeLists(); +} + + +static PyObject * +__Pyx_async_gen_unwrap_value(__pyx_PyAsyncGenObject *gen, PyObject *result) +{ + if (result == NULL) { + PyObject *exc_type = PyErr_Occurred(); + if (!exc_type) { + PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); + gen->ag_closed = 1; + } else if (__Pyx_PyErr_GivenExceptionMatches2(exc_type, __Pyx_PyExc_StopAsyncIteration, PyExc_GeneratorExit)) { + gen->ag_closed = 1; + } + + gen->ag_running_async = 0; + return NULL; + } + + if (__pyx__PyAsyncGenWrappedValue_CheckExact(result)) { + /* async yield */ + __Pyx_ReturnWithStopIteration(((__pyx__PyAsyncGenWrappedValue*)result)->agw_val); + Py_DECREF(result); + gen->ag_running_async = 0; + return NULL; + } + + return result; +} + + +/* ---------- Async Generator ASend Awaitable ------------ */ + + +static void +__Pyx_async_gen_asend_dealloc(__pyx_PyAsyncGenASend *o) +{ + PyObject_GC_UnTrack((PyObject *)o); + Py_CLEAR(o->ags_gen); + Py_CLEAR(o->ags_sendval); + if (likely(__Pyx_ag_asend_freelist_free < _PyAsyncGen_MAXFREELIST)) { + assert(__pyx_PyAsyncGenASend_CheckExact(o)); + __Pyx_ag_asend_freelist[__Pyx_ag_asend_freelist_free++] = o; + } else { + __Pyx_PyHeapTypeObject_GC_Del(o); + } +} + +static int +__Pyx_async_gen_asend_traverse(__pyx_PyAsyncGenASend *o, visitproc visit, void *arg) +{ + Py_VISIT(o->ags_gen); + Py_VISIT(o->ags_sendval); + return 0; +} + + +static PyObject * +__Pyx_async_gen_asend_send(PyObject *g, PyObject *arg) +{ + __pyx_PyAsyncGenASend *o = (__pyx_PyAsyncGenASend*) g; + PyObject *result; + + if (unlikely(o->ags_state == __PYX_AWAITABLE_STATE_CLOSED)) { + PyErr_SetString(PyExc_RuntimeError, __Pyx_ASYNC_GEN_CANNOT_REUSE_SEND_MSG); + return NULL; + } + + if (o->ags_state == __PYX_AWAITABLE_STATE_INIT) { + if (unlikely(o->ags_gen->ag_running_async)) { + PyErr_SetString( + PyExc_RuntimeError, + "anext(): asynchronous generator is already running"); + return NULL; + } + + if (arg == NULL || arg == Py_None) { + arg = o->ags_sendval ? o->ags_sendval : Py_None; + } + o->ags_state = __PYX_AWAITABLE_STATE_ITER; + } + + o->ags_gen->ag_running_async = 1; + result = __Pyx_Coroutine_Send((PyObject*)o->ags_gen, arg); + result = __Pyx_async_gen_unwrap_value(o->ags_gen, result); + + if (result == NULL) { + o->ags_state = __PYX_AWAITABLE_STATE_CLOSED; + } + + return result; +} + + +static CYTHON_INLINE PyObject * +__Pyx_async_gen_asend_iternext(PyObject *o) +{ + return __Pyx_async_gen_asend_send(o, Py_None); +} + + +static PyObject * +__Pyx_async_gen_asend_throw(__pyx_PyAsyncGenASend *o, PyObject *args) +{ + PyObject *result; + + if (unlikely(o->ags_state == __PYX_AWAITABLE_STATE_CLOSED)) { + PyErr_SetString(PyExc_RuntimeError, __Pyx_ASYNC_GEN_CANNOT_REUSE_SEND_MSG); + return NULL; + } + + result = __Pyx_Coroutine_Throw((PyObject*)o->ags_gen, args); + result = __Pyx_async_gen_unwrap_value(o->ags_gen, result); + + if (result == NULL) { + o->ags_state = __PYX_AWAITABLE_STATE_CLOSED; + } + + return result; +} + + +static PyObject * +__Pyx_async_gen_asend_close(PyObject *g, PyObject *args) +{ + __pyx_PyAsyncGenASend *o = (__pyx_PyAsyncGenASend*) g; + CYTHON_UNUSED_VAR(args); + o->ags_state = __PYX_AWAITABLE_STATE_CLOSED; + Py_RETURN_NONE; +} + + +static PyMethodDef __Pyx_async_gen_asend_methods[] = { + {"send", (PyCFunction)__Pyx_async_gen_asend_send, METH_O, __Pyx_async_gen_send_doc}, + {"throw", (PyCFunction)__Pyx_async_gen_asend_throw, METH_VARARGS, __Pyx_async_gen_throw_doc}, + {"close", (PyCFunction)__Pyx_async_gen_asend_close, METH_NOARGS, __Pyx_async_gen_close_doc}, + {"__await__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_gen_await_doc}, + {0, 0, 0, 0} /* Sentinel */ +}; + + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx__PyAsyncGenASendType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_async_gen_asend_dealloc}, + {Py_am_await, (void *)PyObject_SelfIter}, + {Py_tp_traverse, (void *)__Pyx_async_gen_asend_traverse}, + {Py_tp_methods, (void *)__Pyx_async_gen_asend_methods}, + {Py_tp_iter, (void *)PyObject_SelfIter}, + {Py_tp_iternext, (void *)__Pyx_async_gen_asend_iternext}, + {0, 0}, +}; + +static PyType_Spec __pyx__PyAsyncGenASendType_spec = { + __PYX_TYPE_MODULE_PREFIX "async_generator_asend", + sizeof(__pyx_PyAsyncGenASend), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + __pyx__PyAsyncGenASendType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +#if CYTHON_USE_ASYNC_SLOTS +static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_asend_as_async = { + PyObject_SelfIter, /* am_await */ + 0, /* am_aiter */ + 0, /* am_anext */ +#if PY_VERSION_HEX >= 0x030A00A3 + 0, /*am_send*/ +#endif +}; +#endif + +static PyTypeObject __pyx__PyAsyncGenASendType_type = { + PyVarObject_HEAD_INIT(0, 0) + "async_generator_asend", /* tp_name */ + sizeof(__pyx_PyAsyncGenASend), /* tp_basicsize */ + 0, /* tp_itemsize */ + /* methods */ + (destructor)__Pyx_async_gen_asend_dealloc, /* tp_dealloc */ + 0, /* tp_vectorcall_offset */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if CYTHON_USE_ASYNC_SLOTS + &__Pyx_async_gen_asend_as_async, /* tp_as_async */ +#else + 0, /*tp_reserved*/ +#endif + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */ + 0, /* tp_doc */ + (traverseproc)__Pyx_async_gen_asend_traverse, /* tp_traverse */ + 0, /* tp_clear */ +#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 + // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare + __Pyx_Coroutine_compare, /*tp_richcompare*/ +#else + 0, /*tp_richcompare*/ +#endif + 0, /* tp_weaklistoffset */ + PyObject_SelfIter, /* tp_iter */ + (iternextfunc)__Pyx_async_gen_asend_iternext, /* tp_iternext */ + __Pyx_async_gen_asend_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ + 0, /* tp_version_tag */ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /* tp_finalize */ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + + +static PyObject * +__Pyx_async_gen_asend_new(__pyx_PyAsyncGenObject *gen, PyObject *sendval) +{ + __pyx_PyAsyncGenASend *o; + if (likely(__Pyx_ag_asend_freelist_free)) { + __Pyx_ag_asend_freelist_free--; + o = __Pyx_ag_asend_freelist[__Pyx_ag_asend_freelist_free]; + _Py_NewReference((PyObject *)o); + } else { + o = PyObject_GC_New(__pyx_PyAsyncGenASend, __pyx__PyAsyncGenASendType); + if (unlikely(o == NULL)) { + return NULL; + } + } + + Py_INCREF(gen); + o->ags_gen = gen; + + Py_XINCREF(sendval); + o->ags_sendval = sendval; + + o->ags_state = __PYX_AWAITABLE_STATE_INIT; + + PyObject_GC_Track((PyObject*)o); + return (PyObject*)o; +} + + +/* ---------- Async Generator Value Wrapper ------------ */ + + +static void +__Pyx_async_gen_wrapped_val_dealloc(__pyx__PyAsyncGenWrappedValue *o) +{ + PyObject_GC_UnTrack((PyObject *)o); + Py_CLEAR(o->agw_val); + if (likely(__Pyx_ag_value_freelist_free < _PyAsyncGen_MAXFREELIST)) { + assert(__pyx__PyAsyncGenWrappedValue_CheckExact(o)); + __Pyx_ag_value_freelist[__Pyx_ag_value_freelist_free++] = o; + } else { + __Pyx_PyHeapTypeObject_GC_Del(o); + } +} + + +static int +__Pyx_async_gen_wrapped_val_traverse(__pyx__PyAsyncGenWrappedValue *o, + visitproc visit, void *arg) +{ + Py_VISIT(o->agw_val); + return 0; +} + + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx__PyAsyncGenWrappedValueType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_async_gen_wrapped_val_dealloc}, + {Py_tp_traverse, (void *)__Pyx_async_gen_wrapped_val_traverse}, + {0, 0}, +}; + +static PyType_Spec __pyx__PyAsyncGenWrappedValueType_spec = { + __PYX_TYPE_MODULE_PREFIX "async_generator_wrapped_value", + sizeof(__pyx__PyAsyncGenWrappedValue), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + __pyx__PyAsyncGenWrappedValueType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +static PyTypeObject __pyx__PyAsyncGenWrappedValueType_type = { + PyVarObject_HEAD_INIT(0, 0) + "async_generator_wrapped_value", /* tp_name */ + sizeof(__pyx__PyAsyncGenWrappedValue), /* tp_basicsize */ + 0, /* tp_itemsize */ + /* methods */ + (destructor)__Pyx_async_gen_wrapped_val_dealloc, /* tp_dealloc */ + 0, /* tp_vectorcall_offset */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_as_async */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */ + 0, /* tp_doc */ + (traverseproc)__Pyx_async_gen_wrapped_val_traverse, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ + 0, /* tp_version_tag */ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /* tp_finalize */ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + + +static PyObject * +__Pyx__PyAsyncGenValueWrapperNew(PyObject *val) +{ + // NOTE: steals a reference to val ! + __pyx__PyAsyncGenWrappedValue *o; + assert(val); + + if (likely(__Pyx_ag_value_freelist_free)) { + __Pyx_ag_value_freelist_free--; + o = __Pyx_ag_value_freelist[__Pyx_ag_value_freelist_free]; + assert(__pyx__PyAsyncGenWrappedValue_CheckExact(o)); + _Py_NewReference((PyObject*)o); + } else { + o = PyObject_GC_New(__pyx__PyAsyncGenWrappedValue, __pyx__PyAsyncGenWrappedValueType); + if (unlikely(!o)) { + Py_DECREF(val); + return NULL; + } + } + o->agw_val = val; + // no Py_INCREF(val) - steals reference! + PyObject_GC_Track((PyObject*)o); + return (PyObject*)o; +} + + +/* ---------- Async Generator AThrow awaitable ------------ */ + + +static void +__Pyx_async_gen_athrow_dealloc(__pyx_PyAsyncGenAThrow *o) +{ + PyObject_GC_UnTrack((PyObject *)o); + Py_CLEAR(o->agt_gen); + Py_CLEAR(o->agt_args); + __Pyx_PyHeapTypeObject_GC_Del(o); +} + + +static int +__Pyx_async_gen_athrow_traverse(__pyx_PyAsyncGenAThrow *o, visitproc visit, void *arg) +{ + Py_VISIT(o->agt_gen); + Py_VISIT(o->agt_args); + return 0; +} + + +static PyObject * +__Pyx_async_gen_athrow_send(__pyx_PyAsyncGenAThrow *o, PyObject *arg) +{ + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*)o->agt_gen; + PyObject *retval, *exc_type; + + if (unlikely(o->agt_state == __PYX_AWAITABLE_STATE_CLOSED)) { + PyErr_SetString(PyExc_RuntimeError, __Pyx_ASYNC_GEN_CANNOT_REUSE_CLOSE_MSG); + return NULL; + } + + if (unlikely(gen->resume_label == -1)) { + // already run past the end + o->agt_state = __PYX_AWAITABLE_STATE_CLOSED; + PyErr_SetNone(PyExc_StopIteration); + return NULL; + } + + if (o->agt_state == __PYX_AWAITABLE_STATE_INIT) { + if (unlikely(o->agt_gen->ag_running_async)) { + o->agt_state = __PYX_AWAITABLE_STATE_CLOSED; + if (o->agt_args == NULL) { + PyErr_SetString( + PyExc_RuntimeError, + "aclose(): asynchronous generator is already running"); + } else { + PyErr_SetString( + PyExc_RuntimeError, + "athrow(): asynchronous generator is already running"); + } + return NULL; + } + + if (unlikely(o->agt_gen->ag_closed)) { + o->agt_state = __PYX_AWAITABLE_STATE_CLOSED; + PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); + return NULL; + } + + if (unlikely(arg != Py_None)) { + PyErr_SetString(PyExc_RuntimeError, __Pyx_NON_INIT_CORO_MSG); + return NULL; + } + + o->agt_state = __PYX_AWAITABLE_STATE_ITER; + o->agt_gen->ag_running_async = 1; + + if (o->agt_args == NULL) { + /* aclose() mode */ + o->agt_gen->ag_closed = 1; + + retval = __Pyx__Coroutine_Throw((PyObject*)gen, + /* Do not close generator when PyExc_GeneratorExit is passed */ + PyExc_GeneratorExit, NULL, NULL, NULL, 0); + + if (retval && __pyx__PyAsyncGenWrappedValue_CheckExact(retval)) { + Py_DECREF(retval); + goto yield_close; + } + } else { + PyObject *typ; + PyObject *tb = NULL; + PyObject *val = NULL; + + if (unlikely(!PyArg_UnpackTuple(o->agt_args, "athrow", 1, 3, &typ, &val, &tb))) { + return NULL; + } + + retval = __Pyx__Coroutine_Throw((PyObject*)gen, + /* Do not close generator when PyExc_GeneratorExit is passed */ + typ, val, tb, o->agt_args, 0); + retval = __Pyx_async_gen_unwrap_value(o->agt_gen, retval); + } + if (retval == NULL) { + goto check_error; + } + return retval; + } + + assert (o->agt_state == __PYX_AWAITABLE_STATE_ITER); + + retval = __Pyx_Coroutine_Send((PyObject *)gen, arg); + if (o->agt_args) { + return __Pyx_async_gen_unwrap_value(o->agt_gen, retval); + } else { + /* aclose() mode */ + if (retval) { + if (unlikely(__pyx__PyAsyncGenWrappedValue_CheckExact(retval))) { + Py_DECREF(retval); + goto yield_close; + } + else { + return retval; + } + } + else { + goto check_error; + } + } + +yield_close: + o->agt_gen->ag_running_async = 0; + o->agt_state = __PYX_AWAITABLE_STATE_CLOSED; + PyErr_SetString( + PyExc_RuntimeError, __Pyx_ASYNC_GEN_IGNORED_EXIT_MSG); + return NULL; + +check_error: + o->agt_gen->ag_running_async = 0; + o->agt_state = __PYX_AWAITABLE_STATE_CLOSED; + exc_type = PyErr_Occurred(); + if (__Pyx_PyErr_GivenExceptionMatches2(exc_type, __Pyx_PyExc_StopAsyncIteration, PyExc_GeneratorExit)) { + if (o->agt_args == NULL) { + // when aclose() is called we don't want to propagate + // StopAsyncIteration or GeneratorExit; just raise + // StopIteration, signalling that this 'aclose()' await + // is done. + PyErr_Clear(); + PyErr_SetNone(PyExc_StopIteration); + } + } + return NULL; +} + + +static PyObject * +__Pyx_async_gen_athrow_throw(__pyx_PyAsyncGenAThrow *o, PyObject *args) +{ + PyObject *retval; + + if (unlikely(o->agt_state == __PYX_AWAITABLE_STATE_CLOSED)) { + PyErr_SetString(PyExc_RuntimeError, __Pyx_ASYNC_GEN_CANNOT_REUSE_CLOSE_MSG); + return NULL; + } + + retval = __Pyx_Coroutine_Throw((PyObject*)o->agt_gen, args); + if (o->agt_args) { + return __Pyx_async_gen_unwrap_value(o->agt_gen, retval); + } else { + // aclose() mode + PyObject *exc_type; + if (unlikely(retval && __pyx__PyAsyncGenWrappedValue_CheckExact(retval))) { + o->agt_gen->ag_running_async = 0; + o->agt_state = __PYX_AWAITABLE_STATE_CLOSED; + Py_DECREF(retval); + PyErr_SetString(PyExc_RuntimeError, __Pyx_ASYNC_GEN_IGNORED_EXIT_MSG); + return NULL; + } + exc_type = PyErr_Occurred(); + if (__Pyx_PyErr_GivenExceptionMatches2(exc_type, __Pyx_PyExc_StopAsyncIteration, PyExc_GeneratorExit)) { + // when aclose() is called we don't want to propagate + // StopAsyncIteration or GeneratorExit; just raise + // StopIteration, signalling that this 'aclose()' await + // is done. + PyErr_Clear(); + PyErr_SetNone(PyExc_StopIteration); + } + return retval; + } +} + + +static PyObject * +__Pyx_async_gen_athrow_iternext(__pyx_PyAsyncGenAThrow *o) +{ + return __Pyx_async_gen_athrow_send(o, Py_None); +} + + +static PyObject * +__Pyx_async_gen_athrow_close(PyObject *g, PyObject *args) +{ + __pyx_PyAsyncGenAThrow *o = (__pyx_PyAsyncGenAThrow*) g; + CYTHON_UNUSED_VAR(args); + o->agt_state = __PYX_AWAITABLE_STATE_CLOSED; + Py_RETURN_NONE; +} + + +static PyMethodDef __Pyx_async_gen_athrow_methods[] = { + {"send", (PyCFunction)__Pyx_async_gen_athrow_send, METH_O, __Pyx_async_gen_send_doc}, + {"throw", (PyCFunction)__Pyx_async_gen_athrow_throw, METH_VARARGS, __Pyx_async_gen_throw_doc}, + {"close", (PyCFunction)__Pyx_async_gen_athrow_close, METH_NOARGS, __Pyx_async_gen_close_doc}, + {"__await__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_gen_await_doc}, + {0, 0, 0, 0} /* Sentinel */ +}; + + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx__PyAsyncGenAThrowType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_async_gen_athrow_dealloc}, + {Py_am_await, (void *)PyObject_SelfIter}, + {Py_tp_traverse, (void *)__Pyx_async_gen_athrow_traverse}, + {Py_tp_iter, (void *)PyObject_SelfIter}, + {Py_tp_iternext, (void *)__Pyx_async_gen_athrow_iternext}, + {Py_tp_methods, (void *)__Pyx_async_gen_athrow_methods}, + {Py_tp_getattro, (void *)__Pyx_PyObject_GenericGetAttrNoDict}, + {0, 0}, +}; + +static PyType_Spec __pyx__PyAsyncGenAThrowType_spec = { + __PYX_TYPE_MODULE_PREFIX "async_generator_athrow", + sizeof(__pyx_PyAsyncGenAThrow), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + __pyx__PyAsyncGenAThrowType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +#if CYTHON_USE_ASYNC_SLOTS +static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_athrow_as_async = { + PyObject_SelfIter, /* am_await */ + 0, /* am_aiter */ + 0, /* am_anext */ +#if PY_VERSION_HEX >= 0x030A00A3 + 0, /*am_send*/ +#endif +}; +#endif + +static PyTypeObject __pyx__PyAsyncGenAThrowType_type = { + PyVarObject_HEAD_INIT(0, 0) + "async_generator_athrow", /* tp_name */ + sizeof(__pyx_PyAsyncGenAThrow), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor)__Pyx_async_gen_athrow_dealloc, /* tp_dealloc */ + 0, /* tp_vectorcall_offset */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ +#if CYTHON_USE_ASYNC_SLOTS + &__Pyx_async_gen_athrow_as_async, /* tp_as_async */ +#else + 0, /*tp_reserved*/ +#endif + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */ + 0, /* tp_doc */ + (traverseproc)__Pyx_async_gen_athrow_traverse, /* tp_traverse */ + 0, /* tp_clear */ +#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 + // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare + __Pyx_Coroutine_compare, /*tp_richcompare*/ +#else + 0, /*tp_richcompare*/ +#endif + 0, /* tp_weaklistoffset */ + PyObject_SelfIter, /* tp_iter */ + (iternextfunc)__Pyx_async_gen_athrow_iternext, /* tp_iternext */ + __Pyx_async_gen_athrow_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ + 0, /* tp_version_tag */ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /* tp_finalize */ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + + +static PyObject * +__Pyx_async_gen_athrow_new(__pyx_PyAsyncGenObject *gen, PyObject *args) +{ + __pyx_PyAsyncGenAThrow *o; + o = PyObject_GC_New(__pyx_PyAsyncGenAThrow, __pyx__PyAsyncGenAThrowType); + if (unlikely(o == NULL)) { + return NULL; + } + o->agt_gen = gen; + o->agt_args = args; + o->agt_state = __PYX_AWAITABLE_STATE_INIT; + Py_INCREF(gen); + Py_XINCREF(args); + PyObject_GC_Track((PyObject*)o); + return (PyObject*)o; +} + + +/* ---------- global type sharing ------------ */ + +static int __pyx_AsyncGen_init(PyObject *module) { +#if CYTHON_USE_TYPE_SPECS + __pyx_AsyncGenType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_AsyncGenType_spec, NULL); +#else + CYTHON_MAYBE_UNUSED_VAR(module); + // on Windows, C-API functions can't be used in slots statically + __pyx_AsyncGenType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; + __pyx_AsyncGenType = __Pyx_FetchCommonType(&__pyx_AsyncGenType_type); +#endif + if (unlikely(!__pyx_AsyncGenType)) + return -1; + +#if CYTHON_USE_TYPE_SPECS + __pyx__PyAsyncGenAThrowType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx__PyAsyncGenAThrowType_spec, NULL); +#else + __pyx__PyAsyncGenAThrowType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; + __pyx__PyAsyncGenAThrowType = __Pyx_FetchCommonType(&__pyx__PyAsyncGenAThrowType_type); +#endif + if (unlikely(!__pyx__PyAsyncGenAThrowType)) + return -1; + +#if CYTHON_USE_TYPE_SPECS + __pyx__PyAsyncGenWrappedValueType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx__PyAsyncGenWrappedValueType_spec, NULL); +#else + __pyx__PyAsyncGenWrappedValueType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; + __pyx__PyAsyncGenWrappedValueType = __Pyx_FetchCommonType(&__pyx__PyAsyncGenWrappedValueType_type); +#endif + if (unlikely(!__pyx__PyAsyncGenWrappedValueType)) + return -1; + +#if CYTHON_USE_TYPE_SPECS + __pyx__PyAsyncGenASendType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx__PyAsyncGenASendType_spec, NULL); +#else + __pyx__PyAsyncGenASendType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; + __pyx__PyAsyncGenASendType = __Pyx_FetchCommonType(&__pyx__PyAsyncGenASendType_type); +#endif + if (unlikely(!__pyx__PyAsyncGenASendType)) + return -1; + + return 0; +} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Buffer.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Buffer.c new file mode 100644 index 0000000000000000000000000000000000000000..5289a6b5591dd0a8da05b0c4bd329b452a898e58 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Buffer.c @@ -0,0 +1,933 @@ +/////////////// BufferStructDeclare.proto /////////////// + +/* structs for buffer access */ + +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; + +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; + +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[{{max_dims}}]; +} __Pyx_LocalBuf_ND; + +/////////////// BufferIndexError.proto /////////////// +static void __Pyx_RaiseBufferIndexError(int axis); /*proto*/ + +/////////////// BufferIndexError /////////////// +static void __Pyx_RaiseBufferIndexError(int axis) { + PyErr_Format(PyExc_IndexError, + "Out of bounds on buffer access (axis %d)", axis); +} + +/////////////// BufferIndexErrorNogil.proto /////////////// +//@requires: BufferIndexError + +static void __Pyx_RaiseBufferIndexErrorNogil(int axis); /*proto*/ + +/////////////// BufferIndexErrorNogil /////////////// +static void __Pyx_RaiseBufferIndexErrorNogil(int axis) { + #ifdef WITH_THREAD + PyGILState_STATE gilstate = PyGILState_Ensure(); + #endif + __Pyx_RaiseBufferIndexError(axis); + #ifdef WITH_THREAD + PyGILState_Release(gilstate); + #endif +} + +/////////////// BufferFallbackError.proto /////////////// +static void __Pyx_RaiseBufferFallbackError(void); /*proto*/ + +/////////////// BufferFallbackError /////////////// +static void __Pyx_RaiseBufferFallbackError(void) { + PyErr_SetString(PyExc_ValueError, + "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); +} + +/////////////// BufferFormatStructs.proto /////////////// +//@proto_block: utility_code_proto_before_types + +/* Run-time type information about structs used with buffers */ +struct __Pyx_StructField_; + +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) + +typedef struct { + const char* name; /* for error messages only */ + struct __Pyx_StructField_* fields; + size_t size; /* sizeof(type) */ + size_t arraysize[8]; /* length of array in each dimension */ + int ndim; + char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */ + char is_unsigned; + int flags; +} __Pyx_TypeInfo; + +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; + +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; + +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + + +/////////////// GetAndReleaseBuffer.proto /////////////// + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + +/////////////// GetAndReleaseBuffer /////////////// + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + __Pyx_TypeName obj_type_name; + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + + {{for type_ptr, getbuffer, releasebuffer in types}} + {{if getbuffer}} + if (__Pyx_TypeCheck(obj, {{type_ptr}})) return {{getbuffer}}(obj, view, flags); + {{endif}} + {{endfor}} + + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "'" __Pyx_FMT_TYPENAME "' does not have the buffer interface", + obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + return -1; +} + +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + + if ((0)) {} + {{for type_ptr, getbuffer, releasebuffer in types}} + {{if releasebuffer}} + else if (__Pyx_TypeCheck(obj, {{type_ptr}})) {{releasebuffer}}(obj, view); + {{endif}} + {{endfor}} + + view->obj = NULL; + Py_DECREF(obj); +} + +#endif /* PY_MAJOR_VERSION < 3 */ + + +/////////////// BufferGetAndValidate.proto /////////////// + +#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack) \ + ((obj == Py_None || obj == NULL) ? \ + (__Pyx_ZeroBuffer(buf), 0) : \ + __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) + +static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, + __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); +static void __Pyx_ZeroBuffer(Py_buffer* buf); +static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);/*proto*/ + +static Py_ssize_t __Pyx_minusones[] = { {{ ", ".join(["-1"] * max_dims) }} }; +static Py_ssize_t __Pyx_zeros[] = { {{ ", ".join(["0"] * max_dims) }} }; + + +/////////////// BufferGetAndValidate /////////////// +//@requires: BufferFormatCheck + +static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { + if (unlikely(info->buf == NULL)) return; + if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; + __Pyx_ReleaseBuffer(info); +} + +static void __Pyx_ZeroBuffer(Py_buffer* buf) { + buf->buf = NULL; + buf->obj = NULL; + buf->strides = __Pyx_zeros; + buf->shape = __Pyx_zeros; + buf->suboffsets = __Pyx_minusones; +} + +static int __Pyx__GetBufferAndValidate( + Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, + int nd, int cast, __Pyx_BufFmt_StackElem* stack) +{ + buf->buf = NULL; + if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { + __Pyx_ZeroBuffer(buf); + return -1; + } + // From this point on, we have acquired the buffer and must release it on errors. + if (unlikely(buf->ndim != nd)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + nd, buf->ndim); + goto fail; + } + if (!cast) { + __Pyx_BufFmt_Context ctx; + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; + } + if (unlikely((size_t)buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", + buf->itemsize, (buf->itemsize > 1) ? "s" : "", + dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; + return 0; +fail:; + __Pyx_SafeReleaseBuffer(buf); + return -1; +} + + +/////////////// BufferFormatCheck.proto /////////////// + +// Buffer format string checking +// +// Buffer type checking. Utility code for checking that acquired +// buffers match our assumptions. We only need to check ndim and +// the format string; the access mode/flags is checked by the +// exporter. See: +// +// https://docs.python.org/3/library/struct.html +// https://www.python.org/dev/peps/pep-3118/#additions-to-the-struct-string-syntax +// +// The alignment code is copied from _struct.c in Python. + +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); /*proto*/ + +/////////////// BufferFormatCheck /////////////// +//@requires: ModuleSetupCode.c::IsLittleEndian +//@requires: BufferFormatStructs + +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} + +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t <= '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} + +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) /* First char was not a digit */ + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} + + +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} + +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case '?': return "'bool'"; + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparsable format string"; + } +} + +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} + +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} + +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif + +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) { + CYTHON_UNUSED_VAR(is_complex); + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} + +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif + +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, int is_complex) { + CYTHON_UNUSED_VAR(is_complex); + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} + +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} + + +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} + +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + + /* printf("processing... %s\n", ctx->head->field->type->name); */ + + if (ctx->enc_type == 0) return 0; + + /* Validate array size */ + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + + /* handle strings ('s' and 'p') */ + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + /* special case -- treat as struct rather than complex number */ + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + /* special case -- chars don't care about sign */ + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + + --ctx->enc_count; /* Consume from buffer string */ + + /* Done checking, move to next field, pushing or popping struct stack if needed */ + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; /* breaks both loops as ctx->enc_count == 0 */ + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; /* empty struct */ + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} + +// Parse an array in the format string (e.g. (1,2,3)) +// Return 0 on success, -1 on error +static int +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number, ndim; + + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return -1; + } + + /* Process the previous element */ + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return -1; + + // store ndim now, as field advanced by __Pyx_BufFmt_ProcessTypeChunk call + ndim = ctx->head->field->type->ndim; + + /* Parse all numbers in the format string */ + while (*ts && *ts != ')') { + // ignore space characters (not using isspace() due to C/C++ problem on MacOS-X) + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; /* not a 'break' in the loop */ + } + + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return -1; + + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + return -1; + } + + if (*ts != ',' && *ts != ')') { + PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + return -1; + } + + if (*ts == ',') ts++; + i++; + } + + if (i != ndim) { + PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + return -1; + } + + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return -1; + } + + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return 0; +} + +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + + while (1) { + /* puts(ts); */ + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': /* substruct */ + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; /* Erase processed last struct element */ + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': /* end of substruct; either repeat or move on */ + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; /* Erase processed last struct element */ + if (alignment && ctx->fmt_offset % alignment) { + /* Pad struct on size of the first member */ + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + CYTHON_FALLTHROUGH; + case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && + (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { + /* Continue pooling same type */ + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + CYTHON_FALLTHROUGH; + case 's': + /* 's' or new type (cannot be added to current pool) */ + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (__pyx_buffmt_parse_array(ctx, &ts) < 0) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/////////////// TypeInfoCompare.proto /////////////// +static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); + +/////////////// TypeInfoCompare /////////////// +//@requires: BufferFormatStructs + +// See if two dtypes are equal +static int +__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) +{ + int i; + + if (!a || !b) + return 0; + + if (a == b) + return 1; + + if (a->size != b->size || a->typegroup != b->typegroup || + a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { + if (a->typegroup == 'H' || b->typegroup == 'H') { + /* Special case for chars */ + return a->size == b->size; + } else { + return 0; + } + } + + if (a->ndim) { + /* Verify multidimensional C arrays */ + for (i = 0; i < a->ndim; i++) + if (a->arraysize[i] != b->arraysize[i]) + return 0; + } + + if (a->typegroup == 'S') { + /* Check for packed struct */ + if (a->flags != b->flags) + return 0; + + /* compare all struct fields */ + if (a->fields || b->fields) { + /* Check if both have fields */ + if (!(a->fields && b->fields)) + return 0; + + /* compare */ + for (i = 0; a->fields[i].type && b->fields[i].type; i++) { + __Pyx_StructField *field_a = a->fields + i; + __Pyx_StructField *field_b = b->fields + i; + + if (field_a->offset != field_b->offset || + !__pyx_typeinfo_cmp(field_a->type, field_b->type)) + return 0; + } + + /* If all fields are processed, we have a match */ + return !a->fields[i].type && !b->fields[i].type; + } + } + + return 1; +} + + +/////////////// TypeInfoToFormat.proto /////////////// +struct __pyx_typeinfo_string { + char string[3]; +}; +static struct __pyx_typeinfo_string __Pyx_TypeInfoToFormat(__Pyx_TypeInfo *type); + +/////////////// TypeInfoToFormat /////////////// +//@requires: BufferFormatStructs + +// See also MemoryView.pyx:BufferFormatFromTypeInfo + +static struct __pyx_typeinfo_string __Pyx_TypeInfoToFormat(__Pyx_TypeInfo *type) { + struct __pyx_typeinfo_string result = { {0} }; + char *buf = (char *) result.string; + size_t size = type->size; + + switch (type->typegroup) { + case 'H': + *buf = 'c'; + break; + case 'I': + case 'U': + if (size == 1) + *buf = (type->is_unsigned) ? 'B' : 'b'; + else if (size == 2) + *buf = (type->is_unsigned) ? 'H' : 'h'; + else if (size == 4) + *buf = (type->is_unsigned) ? 'I' : 'i'; + else if (size == 8) + *buf = (type->is_unsigned) ? 'Q' : 'q'; + break; + case 'P': + *buf = 'P'; + break; + case 'C': + { + __Pyx_TypeInfo complex_type = *type; + complex_type.typegroup = 'R'; + complex_type.size /= 2; + + *buf++ = 'Z'; + *buf = __Pyx_TypeInfoToFormat(&complex_type).string[0]; + break; + } + case 'R': + if (size == 4) + *buf = 'f'; + else if (size == 8) + *buf = 'd'; + else + *buf = 'g'; + break; + } + + return result; +} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Builtins.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Builtins.c new file mode 100644 index 0000000000000000000000000000000000000000..87609b5f13ad15e633b38c0c40109c5484cd7e29 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Builtins.c @@ -0,0 +1,615 @@ +/* + * Special implementations of built-in functions and methods. + * + * Optional optimisations for builtins are in Optimize.c. + * + * General object operations and protocols are in ObjectHandling.c. + */ + +//////////////////// Globals.proto //////////////////// + +static PyObject* __Pyx_Globals(void); /*proto*/ + +//////////////////// Globals //////////////////// +//@substitute: naming +//@requires: ObjectHandling.c::GetAttr + +// This is a stub implementation until we have something more complete. +// Currently, we only handle the most common case of a read-only dict +// of Python names. Supporting cdef names in the module and write +// access requires a rewrite as a dedicated class. + +static PyObject* __Pyx_Globals(void) { + return __Pyx_NewRef($moddict_cname); +} + +//////////////////// PyExecGlobals.proto //////////////////// + +static PyObject* __Pyx_PyExecGlobals(PyObject*); + +//////////////////// PyExecGlobals //////////////////// +//@substitute: naming +//@requires: PyExec + +static PyObject* __Pyx_PyExecGlobals(PyObject* code) { + return __Pyx_PyExec2(code, $moddict_cname); +} + +//////////////////// PyExec.proto //////////////////// + +static PyObject* __Pyx_PyExec3(PyObject*, PyObject*, PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyExec2(PyObject*, PyObject*); + +//////////////////// PyExec //////////////////// +//@substitute: naming + +static CYTHON_INLINE PyObject* __Pyx_PyExec2(PyObject* o, PyObject* globals) { + return __Pyx_PyExec3(o, globals, NULL); +} + +static PyObject* __Pyx_PyExec3(PyObject* o, PyObject* globals, PyObject* locals) { + PyObject* result; + PyObject* s = 0; + char *code = 0; + + if (!globals || globals == Py_None) { + globals = $moddict_cname; + } else if (unlikely(!PyDict_Check(globals))) { + __Pyx_TypeName globals_type_name = + __Pyx_PyType_GetName(Py_TYPE(globals)); + PyErr_Format(PyExc_TypeError, + "exec() arg 2 must be a dict, not " __Pyx_FMT_TYPENAME, + globals_type_name); + __Pyx_DECREF_TypeName(globals_type_name); + goto bad; + } + if (!locals || locals == Py_None) { + locals = globals; + } + + if (__Pyx_PyDict_GetItemStr(globals, PYIDENT("__builtins__")) == NULL) { + if (unlikely(PyDict_SetItem(globals, PYIDENT("__builtins__"), PyEval_GetBuiltins()) < 0)) + goto bad; + } + + if (PyCode_Check(o)) { + if (unlikely(__Pyx_PyCode_HasFreeVars((PyCodeObject *)o))) { + PyErr_SetString(PyExc_TypeError, + "code object passed to exec() may not contain free variables"); + goto bad; + } + #if PY_VERSION_HEX < 0x030200B1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) + result = PyEval_EvalCode((PyCodeObject *)o, globals, locals); + #else + result = PyEval_EvalCode(o, globals, locals); + #endif + } else { + PyCompilerFlags cf; + cf.cf_flags = 0; +#if PY_VERSION_HEX >= 0x030800A3 + cf.cf_feature_version = PY_MINOR_VERSION; +#endif + if (PyUnicode_Check(o)) { + cf.cf_flags = PyCF_SOURCE_IS_UTF8; + s = PyUnicode_AsUTF8String(o); + if (unlikely(!s)) goto bad; + o = s; + #if PY_MAJOR_VERSION >= 3 + } else if (unlikely(!PyBytes_Check(o))) { + #else + } else if (unlikely(!PyString_Check(o))) { + #endif + __Pyx_TypeName o_type_name = __Pyx_PyType_GetName(Py_TYPE(o)); + PyErr_Format(PyExc_TypeError, + "exec: arg 1 must be string, bytes or code object, got " __Pyx_FMT_TYPENAME, + o_type_name); + __Pyx_DECREF_TypeName(o_type_name); + goto bad; + } + #if PY_MAJOR_VERSION >= 3 + code = PyBytes_AS_STRING(o); + #else + code = PyString_AS_STRING(o); + #endif + if (PyEval_MergeCompilerFlags(&cf)) { + result = PyRun_StringFlags(code, Py_file_input, globals, locals, &cf); + } else { + result = PyRun_String(code, Py_file_input, globals, locals); + } + Py_XDECREF(s); + } + + return result; +bad: + Py_XDECREF(s); + return 0; +} + +//////////////////// GetAttr3.proto //////////////////// + +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /*proto*/ + +//////////////////// GetAttr3 //////////////////// +//@requires: ObjectHandling.c::PyObjectGetAttrStr +//@requires: Exceptions.c::PyThreadStateGet +//@requires: Exceptions.c::PyErrFetchRestore +//@requires: Exceptions.c::PyErrExceptionMatches + +#if __PYX_LIMITED_VERSION_HEX < 0x030d00A1 +static PyObject *__Pyx_GetAttr3Default(PyObject *d) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + return NULL; + __Pyx_PyErr_Clear(); + Py_INCREF(d); + return d; +} +#endif + +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { + PyObject *r; +#if __PYX_LIMITED_VERSION_HEX >= 0x030d00A1 + int res = PyObject_GetOptionalAttr(o, n, &r); + // On failure (res == -1), r is set to NULL. + return (res != 0) ? r : __Pyx_NewRef(d); +#else + #if CYTHON_USE_TYPE_SLOTS + if (likely(PyString_Check(n))) { + r = __Pyx_PyObject_GetAttrStrNoError(o, n); + if (unlikely(!r) && likely(!PyErr_Occurred())) { + r = __Pyx_NewRef(d); + } + return r; + } + #endif + r = PyObject_GetAttr(o, n); + return (likely(r)) ? r : __Pyx_GetAttr3Default(d); +#endif +} + +//////////////////// HasAttr.proto //////////////////// + +#if __PYX_LIMITED_VERSION_HEX >= 0x030d00A1 +#define __Pyx_HasAttr(o, n) PyObject_HasAttrWithError(o, n) +#else +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /*proto*/ +#endif + +//////////////////// HasAttr //////////////////// +//@requires: ObjectHandling.c::GetAttr + +#if __PYX_LIMITED_VERSION_HEX < 0x030d00A1 +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { + PyObject *r; + if (unlikely(!__Pyx_PyBaseString_Check(n))) { + PyErr_SetString(PyExc_TypeError, + "hasattr(): attribute name must be string"); + return -1; + } + r = __Pyx_GetAttr(o, n); + if (!r) { + PyErr_Clear(); + return 0; + } else { + Py_DECREF(r); + return 1; + } +} +#endif + +//////////////////// Intern.proto //////////////////// + +static PyObject* __Pyx_Intern(PyObject* s); /* proto */ + +//////////////////// Intern //////////////////// +//@requires: ObjectHandling.c::RaiseUnexpectedTypeError + +static PyObject* __Pyx_Intern(PyObject* s) { + if (unlikely(!PyString_CheckExact(s))) { + __Pyx_RaiseUnexpectedTypeError("str", s); + return NULL; + } + Py_INCREF(s); + #if PY_MAJOR_VERSION >= 3 + PyUnicode_InternInPlace(&s); + #else + PyString_InternInPlace(&s); + #endif + return s; +} + +//////////////////// abs_longlong.proto //////////////////// + +static CYTHON_INLINE PY_LONG_LONG __Pyx_abs_longlong(PY_LONG_LONG x) { +#if defined (__cplusplus) && __cplusplus >= 201103L + return std::abs(x); +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + return llabs(x); +#elif defined (_MSC_VER) + // abs() is defined for long, but 64-bits type on MSVC is long long. + // Use MS-specific _abs64() instead, which returns the original (negative) value for abs(-MAX-1) + return _abs64(x); +#elif defined (__GNUC__) + // gcc or clang on 64 bit windows. + return __builtin_llabs(x); +#else + if (sizeof(PY_LONG_LONG) <= sizeof(Py_ssize_t)) + return __Pyx_sst_abs(x); + return (x<0) ? -x : x; +#endif +} + + +//////////////////// py_abs.proto //////////////////// + +#if CYTHON_USE_PYLONG_INTERNALS +static PyObject *__Pyx_PyLong_AbsNeg(PyObject *num);/*proto*/ + +#define __Pyx_PyNumber_Absolute(x) \ + ((likely(PyLong_CheckExact(x))) ? \ + (likely(__Pyx_PyLong_IsNonNeg(x)) ? (Py_INCREF(x), (x)) : __Pyx_PyLong_AbsNeg(x)) : \ + PyNumber_Absolute(x)) + +#else +#define __Pyx_PyNumber_Absolute(x) PyNumber_Absolute(x) +#endif + +//////////////////// py_abs //////////////////// + +#if CYTHON_USE_PYLONG_INTERNALS +static PyObject *__Pyx_PyLong_AbsNeg(PyObject *n) { +#if PY_VERSION_HEX >= 0x030C00A7 + if (likely(__Pyx_PyLong_IsCompact(n))) { + return PyLong_FromSize_t(__Pyx_PyLong_CompactValueUnsigned(n)); + } +#else + if (likely(Py_SIZE(n) == -1)) { + // digits are unsigned + return PyLong_FromUnsignedLong(__Pyx_PyLong_Digits(n)[0]); + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000 + { + PyObject *copy = _PyLong_Copy((PyLongObject*)n); + if (likely(copy)) { + #if PY_VERSION_HEX >= 0x030C00A7 + // clear the sign bits to set the sign from SIGN_NEGATIVE (2) to positive (0) + ((PyLongObject*)copy)->long_value.lv_tag = ((PyLongObject*)copy)->long_value.lv_tag & ~_PyLong_SIGN_MASK; + #else + // negate the size to swap the sign + __Pyx_SET_SIZE(copy, -Py_SIZE(copy)); + #endif + } + return copy; + } +#else + return PyNumber_Negative(n); +#endif +} +#endif + + +//////////////////// pow2.proto //////////////////// + +#define __Pyx_PyNumber_Power2(a, b) PyNumber_Power(a, b, Py_None) + + +//////////////////// int_pyucs4.proto //////////////////// + +static CYTHON_INLINE int __Pyx_int_from_UCS4(Py_UCS4 uchar); + +//////////////////// int_pyucs4 //////////////////// + +static int __Pyx_int_from_UCS4(Py_UCS4 uchar) { + int digit = Py_UNICODE_TODIGIT(uchar); + if (unlikely(digit < 0)) { + PyErr_Format(PyExc_ValueError, + "invalid literal for int() with base 10: '%c'", + (int) uchar); + return -1; + } + return digit; +} + + +//////////////////// float_pyucs4.proto //////////////////// + +static CYTHON_INLINE double __Pyx_double_from_UCS4(Py_UCS4 uchar); + +//////////////////// float_pyucs4 //////////////////// + +static double __Pyx_double_from_UCS4(Py_UCS4 uchar) { + double digit = Py_UNICODE_TONUMERIC(uchar); + if (unlikely(digit < 0.0)) { + PyErr_Format(PyExc_ValueError, + "could not convert string to float: '%c'", + (int) uchar); + return -1.0; + } + return digit; +} + + +//////////////////// object_ord.proto //////////////////// +//@requires: TypeConversion.c::UnicodeAsUCS4 + +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyObject_Ord(c) \ + (likely(PyUnicode_Check(c)) ? (long)__Pyx_PyUnicode_AsPy_UCS4(c) : __Pyx__PyObject_Ord(c)) +#else +#define __Pyx_PyObject_Ord(c) __Pyx__PyObject_Ord(c) +#endif +static long __Pyx__PyObject_Ord(PyObject* c); /*proto*/ + +//////////////////// object_ord //////////////////// + +static long __Pyx__PyObject_Ord(PyObject* c) { + Py_ssize_t size; + if (PyBytes_Check(c)) { + size = PyBytes_GET_SIZE(c); + if (likely(size == 1)) { + return (unsigned char) PyBytes_AS_STRING(c)[0]; + } +#if PY_MAJOR_VERSION < 3 + } else if (PyUnicode_Check(c)) { + return (long)__Pyx_PyUnicode_AsPy_UCS4(c); +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + } else if (PyByteArray_Check(c)) { + size = PyByteArray_GET_SIZE(c); + if (likely(size == 1)) { + return (unsigned char) PyByteArray_AS_STRING(c)[0]; + } +#endif + } else { + // FIXME: support character buffers - but CPython doesn't support them either + __Pyx_TypeName c_type_name = __Pyx_PyType_GetName(Py_TYPE(c)); + PyErr_Format(PyExc_TypeError, + "ord() expected string of length 1, but " __Pyx_FMT_TYPENAME " found", + c_type_name); + __Pyx_DECREF_TypeName(c_type_name); + return (long)(Py_UCS4)-1; + } + PyErr_Format(PyExc_TypeError, + "ord() expected a character, but string of length %zd found", size); + return (long)(Py_UCS4)-1; +} + + +//////////////////// py_dict_keys.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_Keys(PyObject* d); /*proto*/ + +//////////////////// py_dict_keys //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_Keys(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d); + else + return PyDict_Keys(d); +} + +//////////////////// py_dict_values.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d); /*proto*/ + +//////////////////// py_dict_values //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "values", d); + else + return PyDict_Values(d); +} + +//////////////////// py_dict_items.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d); /*proto*/ + +//////////////////// py_dict_items //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "items", d); + else + return PyDict_Items(d); +} + +//////////////////// py_dict_iterkeys.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_IterKeys(PyObject* d); /*proto*/ + +//////////////////// py_dict_iterkeys //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_IterKeys(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "iterkeys", d); +} + +//////////////////// py_dict_itervalues.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_IterValues(PyObject* d); /*proto*/ + +//////////////////// py_dict_itervalues //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_IterValues(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "values", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "itervalues", d); +} + +//////////////////// py_dict_iteritems.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_IterItems(PyObject* d); /*proto*/ + +//////////////////// py_dict_iteritems //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_IterItems(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "items", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "iteritems", d); +} + +//////////////////// py_dict_viewkeys.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewKeys(PyObject* d); /*proto*/ + +//////////////////// py_dict_viewkeys //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewKeys(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "viewkeys", d); +} + +//////////////////// py_dict_viewvalues.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewValues(PyObject* d); /*proto*/ + +//////////////////// py_dict_viewvalues //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewValues(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "values", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "viewvalues", d); +} + +//////////////////// py_dict_viewitems.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewItems(PyObject* d); /*proto*/ + +//////////////////// py_dict_viewitems //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewItems(PyObject* d) { + if (PY_MAJOR_VERSION >= 3) + return CALL_UNBOUND_METHOD(PyDict_Type, "items", d); + else + return CALL_UNBOUND_METHOD(PyDict_Type, "viewitems", d); +} + + +//////////////////// pyfrozenset_new.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyFrozenSet_New(PyObject* it); + +//////////////////// pyfrozenset_new //////////////////// +//@substitute: naming + +static CYTHON_INLINE PyObject* __Pyx_PyFrozenSet_New(PyObject* it) { + if (it) { + PyObject* result; +#if CYTHON_COMPILING_IN_PYPY + // PyPy currently lacks PyFrozenSet_CheckExact() and PyFrozenSet_New() + PyObject* args; + args = PyTuple_Pack(1, it); + if (unlikely(!args)) + return NULL; + result = PyObject_Call((PyObject*)&PyFrozenSet_Type, args, NULL); + Py_DECREF(args); + return result; +#else + if (PyFrozenSet_CheckExact(it)) { + Py_INCREF(it); + return it; + } + result = PyFrozenSet_New(it); + if (unlikely(!result)) + return NULL; + if ((PY_VERSION_HEX >= 0x031000A1) || likely(PySet_GET_SIZE(result))) + return result; + // empty frozenset is a singleton (on Python <3.10) + // seems wasteful, but CPython does the same + Py_DECREF(result); +#endif + } +#if CYTHON_USE_TYPE_SLOTS + return PyFrozenSet_Type.tp_new(&PyFrozenSet_Type, $empty_tuple, NULL); +#else + return PyObject_Call((PyObject*)&PyFrozenSet_Type, $empty_tuple, NULL); +#endif +} + + +//////////////////// PySet_Update.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PySet_Update(PyObject* set, PyObject* it); /*proto*/ + +//////////////////// PySet_Update //////////////////// + +static CYTHON_INLINE int __Pyx_PySet_Update(PyObject* set, PyObject* it) { + PyObject *retval; + #if CYTHON_USE_TYPE_SLOTS && !CYTHON_COMPILING_IN_PYPY + if (PyAnySet_Check(it)) { + if (PySet_GET_SIZE(it) == 0) + return 0; + // fast and safe case: CPython will update our result set and return it + retval = PySet_Type.tp_as_number->nb_inplace_or(set, it); + if (likely(retval == set)) { + Py_DECREF(retval); + return 0; + } + if (unlikely(!retval)) + return -1; + // unusual result, fall through to set.update() call below + Py_DECREF(retval); + } + #endif + retval = CALL_UNBOUND_METHOD(PySet_Type, "update", set, it); + if (unlikely(!retval)) return -1; + Py_DECREF(retval); + return 0; +} + +///////////////// memoryview_get_from_buffer.proto //////////////////// + +// buffer is in limited api from Py3.11 +#if !CYTHON_COMPILING_IN_LIMITED_API || CYTHON_LIMITED_API >= 0x030b0000 +#define __Pyx_PyMemoryView_Get_{{name}}(o) PyMemoryView_GET_BUFFER(o)->{{name}} +#else +{{py: +out_types = dict( + ndim='int', readonly='int', + len='Py_ssize_t', itemsize='Py_ssize_t') +}} // can't get format like this unfortunately. It's unicode via getattr +{{py: out_type = out_types[name]}} +static {{out_type}} __Pyx_PyMemoryView_Get_{{name}}(PyObject *obj); /* proto */ +#endif + +////////////// memoryview_get_from_buffer ///////////////////////// + +#if !CYTHON_COMPILING_IN_LIMITED_API || CYTHON_LIMITED_API >= 0x030b0000 +#else +{{py: +out_types = dict( + ndim='int', readonly='int', + len='Py_ssize_t', itemsize='Py_ssize_t') +}} +{{py: out_type = out_types[name]}} +static {{out_type}} __Pyx_PyMemoryView_Get_{{name}}(PyObject *obj) { + {{out_type}} result; + PyObject *attr = PyObject_GetAttr(obj, PYIDENT("{{name}}")); + if (!attr) { + goto bad; + } +{{if out_type == 'int'}} + // I'm not worrying about overflow here because + // ultimately it comes from a C struct that's an int + result = PyLong_AsLong(attr); +{{elif out_type == 'Py_ssize_t'}} + result = PyLong_AsSsize_t(attr); +{{endif}} + Py_DECREF(attr); + return result; + + bad: + Py_XDECREF(attr); + return -1; +} +#endif diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CConvert.pyx b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CConvert.pyx new file mode 100644 index 0000000000000000000000000000000000000000..4ae66162ee5052e18334f60703a3e79a0aee9f6b --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CConvert.pyx @@ -0,0 +1,134 @@ +#################### FromPyStructUtility #################### + +cdef extern from *: + ctypedef struct PyTypeObject: + char* tp_name + PyTypeObject *Py_TYPE(obj) + bint PyMapping_Check(obj) + object PyErr_Format(exc, const char *format, ...) + int __Pyx_RaiseUnexpectedTypeError(const char *expected, object obj) except 0 + +@cname("{{funcname}}") +cdef {{struct_type}} {{funcname}}(obj) except *: + cdef {{struct_type}} result + if not PyMapping_Check(obj): + __Pyx_RaiseUnexpectedTypeError(b"a mapping", obj) + + {{for member in var_entries:}} + try: + value = obj['{{member.name}}'] + except KeyError: + raise ValueError("No value specified for struct attribute '{{member.name}}'") + result.{{member.name}} = value + {{endfor}} + return result + + +#################### FromPyUnionUtility #################### + +cdef extern from *: + ctypedef struct PyTypeObject: + char* tp_name + PyTypeObject *Py_TYPE(obj) + bint PyMapping_Check(obj) + object PyErr_Format(exc, const char *format, ...) + int __Pyx_RaiseUnexpectedTypeError(const char *expected, object obj) except 0 + +@cname("{{funcname}}") +cdef {{struct_type}} {{funcname}}(obj) except *: + cdef {{struct_type}} result + cdef Py_ssize_t length + if not PyMapping_Check(obj): + __Pyx_RaiseUnexpectedTypeError(b"a mapping", obj) + + last_found = None + length = len(obj) + if length: + {{for member in var_entries:}} + if '{{member.name}}' in obj: + if last_found is not None: + raise ValueError("More than one union attribute passed: '%s' and '%s'" % (last_found, '{{member.name}}')) + last_found = '{{member.name}}' + result.{{member.cname}} = obj['{{member.name}}'] + length -= 1 + if not length: + return result + {{endfor}} + if last_found is None: + raise ValueError("No value specified for any of the union attributes (%s)" % + '{{", ".join(member.name for member in var_entries)}}') + return result + + +#################### cfunc.to_py #################### + +@cname("{{cname}}") +cdef object {{cname}}({{return_type.ctype}} (*f)({{ ', '.join(arg.type_cname for arg in args) }}) {{except_clause}}): + def wrap({{ ', '.join('{arg.ctype} {arg.name}'.format(arg=arg) for arg in args) }}): + """wrap({{', '.join(('{arg.name}: {arg.type_displayname}'.format(arg=arg) if arg.type_displayname else arg.name) for arg in args)}}){{if return_type.type_displayname}} -> {{return_type.type_displayname}}{{endif}}""" + {{'' if return_type.type.is_void else 'return '}}f({{ ', '.join(arg.name for arg in args) }}) + return wrap + + +#################### carray.from_py #################### + +cdef extern from *: + object PyErr_Format(exc, const char *format, ...) + +@cname("{{cname}}") +cdef int {{cname}}(object o, {{base_type}} *v, Py_ssize_t length) except -1: + cdef Py_ssize_t i = length + try: + i = len(o) + except (TypeError, OverflowError): + pass + if i == length: + for i, item in enumerate(o): + if i >= length: + break + v[i] = item + else: + i += 1 # convert index to length + if i == length: + return 0 + + PyErr_Format( + IndexError, + ("too many values found during array assignment, expected %zd" + if i >= length else + "not enough values found during array assignment, expected %zd, got %zd"), + length, i) + + +#################### carray.to_py #################### + +cdef extern from *: + void Py_INCREF(object o) + tuple PyTuple_New(Py_ssize_t size) + list PyList_New(Py_ssize_t size) + void PyTuple_SET_ITEM(object p, Py_ssize_t pos, object o) + void PyList_SET_ITEM(object p, Py_ssize_t pos, object o) + + +@cname("{{cname}}") +cdef inline list {{cname}}({{base_type}} *v, Py_ssize_t length): + cdef size_t i + cdef object value + l = PyList_New(length) + for i in range(length): + value = v[i] + Py_INCREF(value) + PyList_SET_ITEM(l, i, value) + return l + + +@cname("{{to_tuple_cname}}") +cdef inline tuple {{to_tuple_cname}}({{base_type}} *v, Py_ssize_t length): + cdef size_t i + cdef object value + t = PyTuple_New(length) + for i in range(length): + value = v[i] + Py_INCREF(value) + PyTuple_SET_ITEM(t, i, value) + return t diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CMath.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CMath.c new file mode 100644 index 0000000000000000000000000000000000000000..2cd22231384c75624cbcbb526c77b582b6c194dc --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CMath.c @@ -0,0 +1,95 @@ + +/////////////// CDivisionWarning.proto /////////////// + +static int __Pyx_cdivision_warning(const char *, int); /* proto */ + +/////////////// CDivisionWarning /////////////// + +static int __Pyx_cdivision_warning(const char *filename, int lineno) { +#if CYTHON_COMPILING_IN_PYPY + // avoid compiler warnings + filename++; lineno++; + return PyErr_Warn(PyExc_RuntimeWarning, + "division with oppositely signed operands, C and Python semantics differ"); +#else + return PyErr_WarnExplicit(PyExc_RuntimeWarning, + "division with oppositely signed operands, C and Python semantics differ", + filename, + lineno, + __Pyx_MODULE_NAME, + NULL); +#endif +} + + +/////////////// DivInt.proto /////////////// + +static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s, %(type)s); /* proto */ + +/////////////// DivInt /////////////// + +static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s a, %(type)s b) { + %(type)s q = a / b; + %(type)s r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + + +/////////////// ModInt.proto /////////////// + +static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */ + +/////////////// ModInt /////////////// + +static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) { + %(type)s r = a %% b; + r += ((r != 0) & ((r ^ b) < 0)) * b; + return r; +} + + +/////////////// ModFloat.proto /////////////// + +static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */ + +/////////////// ModFloat /////////////// + +static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) { + %(type)s r = fmod%(math_h_modifier)s(a, b); + r += ((r != 0) & ((r < 0) ^ (b < 0))) * b; + return r; +} + + +/////////////// IntPow.proto /////////////// + +static CYTHON_INLINE %(type)s %(func_name)s(%(type)s, %(type)s); /* proto */ + +/////////////// IntPow /////////////// + +static CYTHON_INLINE %(type)s %(func_name)s(%(type)s b, %(type)s e) { + %(type)s t = b; + switch (e) { + case 3: + t *= b; + CYTHON_FALLTHROUGH; + case 2: + t *= b; + CYTHON_FALLTHROUGH; + case 1: + return t; + case 0: + return 1; + } + #if %(signed)s + if (unlikely(e<0)) return 0; + #endif + t = 1; + while (likely(e)) { + t *= (b * (e&1)) | ((~e)&1); /* 1 or b */ + b *= b; + e >>= 1; + } + return t; +} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CommonStructures.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CommonStructures.c new file mode 100644 index 0000000000000000000000000000000000000000..35875d0ec2473634905d067c875d59091550d4dd --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CommonStructures.c @@ -0,0 +1,139 @@ +/////////////// FetchSharedCythonModule.proto /////// + +static PyObject *__Pyx_FetchSharedCythonABIModule(void); + +/////////////// FetchSharedCythonModule //////////// + +static PyObject *__Pyx_FetchSharedCythonABIModule(void) { + return __Pyx_PyImport_AddModuleRef((char*) __PYX_ABI_MODULE_NAME); +} + +/////////////// FetchCommonType.proto /////////////// + +#if !CYTHON_USE_TYPE_SPECS +static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); +#else +static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases); +#endif + +/////////////// FetchCommonType /////////////// +//@requires:ExtensionTypes.c::FixUpExtensionType +//@requires: FetchSharedCythonModule +//@requires:StringTools.c::IncludeStringH + +static int __Pyx_VerifyCachedType(PyObject *cached_type, + const char *name, + Py_ssize_t basicsize, + Py_ssize_t expected_basicsize) { + if (!PyType_Check(cached_type)) { + PyErr_Format(PyExc_TypeError, + "Shared Cython type %.200s is not a type object", name); + return -1; + } + if (basicsize != expected_basicsize) { + PyErr_Format(PyExc_TypeError, + "Shared Cython type %.200s has the wrong size, try recompiling", + name); + return -1; + } + return 0; +} + +#if !CYTHON_USE_TYPE_SPECS +static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { + PyObject* abi_module; + const char* object_name; + PyTypeObject *cached_type = NULL; + + abi_module = __Pyx_FetchSharedCythonABIModule(); + if (!abi_module) return NULL; + // get the final part of the object name (after the last dot) + object_name = strrchr(type->tp_name, '.'); + object_name = object_name ? object_name+1 : type->tp_name; + cached_type = (PyTypeObject*) PyObject_GetAttrString(abi_module, object_name); + if (cached_type) { + if (__Pyx_VerifyCachedType( + (PyObject *)cached_type, + object_name, + cached_type->tp_basicsize, + type->tp_basicsize) < 0) { + goto bad; + } + goto done; + } + + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; + PyErr_Clear(); + if (PyType_Ready(type) < 0) goto bad; + if (PyObject_SetAttrString(abi_module, object_name, (PyObject *)type) < 0) + goto bad; + Py_INCREF(type); + cached_type = type; + +done: + Py_DECREF(abi_module); + // NOTE: always returns owned reference, or NULL on error + return cached_type; + +bad: + Py_XDECREF(cached_type); + cached_type = NULL; + goto done; +} +#else + +static PyTypeObject *__Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases) { + PyObject *abi_module, *cached_type = NULL; + // get the final part of the object name (after the last dot) + const char* object_name = strrchr(spec->name, '.'); + object_name = object_name ? object_name+1 : spec->name; + + abi_module = __Pyx_FetchSharedCythonABIModule(); + if (!abi_module) return NULL; + + cached_type = PyObject_GetAttrString(abi_module, object_name); + if (cached_type) { + Py_ssize_t basicsize; +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *py_basicsize; + py_basicsize = PyObject_GetAttrString(cached_type, "__basicsize__"); + if (unlikely(!py_basicsize)) goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (unlikely(basicsize == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; +#else + basicsize = likely(PyType_Check(cached_type)) ? ((PyTypeObject*) cached_type)->tp_basicsize : -1; +#endif + if (__Pyx_VerifyCachedType( + cached_type, + object_name, + basicsize, + spec->basicsize) < 0) { + goto bad; + } + goto done; + } + + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; + PyErr_Clear(); + // We pass the ABI module reference to avoid keeping the user module alive by foreign type usages. + CYTHON_UNUSED_VAR(module); + cached_type = __Pyx_PyType_FromModuleAndSpec(abi_module, spec, bases); + if (unlikely(!cached_type)) goto bad; + if (unlikely(__Pyx_fix_up_extension_type_from_spec(spec, (PyTypeObject *) cached_type) < 0)) goto bad; + if (PyObject_SetAttrString(abi_module, object_name, cached_type) < 0) goto bad; + +done: + Py_DECREF(abi_module); + // NOTE: always returns owned reference, or NULL on error + assert(cached_type == NULL || PyType_Check(cached_type)); + return (PyTypeObject *) cached_type; + +bad: + Py_XDECREF(cached_type); + cached_type = NULL; + goto done; +} +#endif + diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Complex.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Complex.c new file mode 100644 index 0000000000000000000000000000000000000000..c95511dcf3c8e7e4c707c6ba30cb5a3d651301b9 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Complex.c @@ -0,0 +1,366 @@ +/////////////// Header.proto /////////////// +//@proto_block: h_code + +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif (defined(_Complex_I) && !defined(_MSC_VER)) || ((defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_COMPLEX__) && !defined(_MSC_VER)) + // should exist since C99, but only C11 defines a test to detect it. + // MSVC defines "_Complex_I" but not "_Complex". See https://github.com/cython/cython/issues/5512 + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif + +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include + #else + #include + #endif +#endif + +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + +/////////////// RealImag.proto /////////////// + +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif + +#if defined(__cplusplus) && CYTHON_CCOMPLEX \ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + +/////////////// RealImag_Cy.proto /////////////// + +// alternative version of RealImag.proto for the case where +// we definitely want to force it to use the Cython utility +// code version of complex. +// Because integer complex types simply aren't covered by +// the C or C++ standards +// (although practically will probably work in C++). + +#define __Pyx_CREAL_Cy(z) ((z).real) +#define __Pyx_CIMAG_Cy(z) ((z).imag) +#define __Pyx_SET_CREAL_Cy(z,x) __Pyx_CREAL_Cy(z) = (x) +#define __Pyx_SET_CIMAG_Cy(z,y) __Pyx_CIMAG_cy(z) = (y) + +/////////////// RealImag_CyTypedef.proto ////////// +//@requires: RealImag +//@requires: RealImag_Cy + +#if __cplusplus +// C++ is fine with complexes based on typedefs because the template sees through them +#define __Pyx_CREAL_CyTypedef(z) __Pyx_CREAL(z) +#define __Pyx_CIMAG_CyTypedef(z) __Pyx_CIMAG(z) +#define __Pyx_SET_CREAL_CyTypedef(z,x) __Pyx_SET_CREAL(z) +#define __Pyx_SET_CIMAG_CyTypedef(z,x) __Pyx_SET_CIMAG(z) +#else +// C isn't +#define __Pyx_CREAL_CyTypedef(z) __Pyx_CREAL_Cy(z) +#define __Pyx_CIMAG_CyTypedef(z) __Pyx_CIMAG_Cy(z) +#define __Pyx_SET_CREAL_CyTypedef(z,x) __Pyx_SET_CREAL_Cy(z) +#define __Pyx_SET_CIMAG_CyTypedef(z,x) __Pyx_SET_CIMAG_Cy(z) +#endif + +/////////////// Declarations.proto /////////////// +//@proto_block: complex_type_declarations + +#if CYTHON_CCOMPLEX && ({{is_float}}) && (!{{is_extern_float_typedef}} || __cplusplus) + #ifdef __cplusplus + typedef ::std::complex< {{real_type}} > {{type_name}}; + #else + typedef {{real_type}} _Complex {{type_name}}; + #endif +#else + typedef struct { {{real_type}} real, imag; } {{type_name}}; +#endif + +static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}}, {{real_type}}); + +/////////////// Declarations /////////////// + +#if CYTHON_CCOMPLEX && ({{is_float}}) && (!{{is_extern_float_typedef}} || __cplusplus) + #ifdef __cplusplus + static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}} x, {{real_type}} y) { + return ::std::complex< {{real_type}} >(x, y); + } + #else + static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}} x, {{real_type}} y) { + return x + y*({{type}})_Complex_I; + } + #endif +#else + static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}} x, {{real_type}} y) { + {{type}} z; + z.real = x; + z.imag = y; + return z; + } +#endif + + +/////////////// ToPy.proto /////////////// + +{{py: func_suffix = "_CyTypedef" if is_extern_float_typedef else ("" if is_float else "_Cy")}} +#define __pyx_PyComplex_FromComplex{{func_suffix}}(z) \ + PyComplex_FromDoubles((double)__Pyx_CREAL{{func_suffix}}(z), \ + (double)__Pyx_CIMAG{{func_suffix}}(z)) + +/////////////// FromPy.proto /////////////// + +static {{type}} __Pyx_PyComplex_As_{{type_name}}(PyObject*); + +/////////////// FromPy /////////////// + +static {{type}} __Pyx_PyComplex_As_{{type_name}}(PyObject* o) { + Py_complex cval; +#if !CYTHON_COMPILING_IN_PYPY + if (PyComplex_CheckExact(o)) + cval = ((PyComplexObject *)o)->cval; + else +#endif + cval = PyComplex_AsCComplex(o); + return {{type_name}}_from_parts( + ({{real_type}})cval.real, + ({{real_type}})cval.imag); +} + + +/////////////// Arithmetic.proto /////////////// + +#if CYTHON_CCOMPLEX && ({{is_float}}) && (!{{is_extern_float_typedef}} || __cplusplus) + #define __Pyx_c_eq{{func_suffix}}(a, b) ((a)==(b)) + #define __Pyx_c_sum{{func_suffix}}(a, b) ((a)+(b)) + #define __Pyx_c_diff{{func_suffix}}(a, b) ((a)-(b)) + #define __Pyx_c_prod{{func_suffix}}(a, b) ((a)*(b)) + #define __Pyx_c_quot{{func_suffix}}(a, b) ((a)/(b)) + #define __Pyx_c_neg{{func_suffix}}(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero{{func_suffix}}(z) ((z)==({{real_type}})0) + #define __Pyx_c_conj{{func_suffix}}(z) (::std::conj(z)) + #if {{is_float}} + #define __Pyx_c_abs{{func_suffix}}(z) (::std::abs(z)) + #define __Pyx_c_pow{{func_suffix}}(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero{{func_suffix}}(z) ((z)==0) + #define __Pyx_c_conj{{func_suffix}}(z) (conj{{m}}(z)) + #if {{is_float}} + #define __Pyx_c_abs{{func_suffix}}(z) (cabs{{m}}(z)) + #define __Pyx_c_pow{{func_suffix}}(a, b) (cpow{{m}}(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq{{func_suffix}}({{type}}, {{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_sum{{func_suffix}}({{type}}, {{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_diff{{func_suffix}}({{type}}, {{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_prod{{func_suffix}}({{type}}, {{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_quot{{func_suffix}}({{type}}, {{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_neg{{func_suffix}}({{type}}); + static CYTHON_INLINE int __Pyx_c_is_zero{{func_suffix}}({{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_conj{{func_suffix}}({{type}}); + #if {{is_float}} + static CYTHON_INLINE {{real_type}} __Pyx_c_abs{{func_suffix}}({{type}}); + static CYTHON_INLINE {{type}} __Pyx_c_pow{{func_suffix}}({{type}}, {{type}}); + #endif +#endif + +/////////////// Arithmetic /////////////// + +#if CYTHON_CCOMPLEX && ({{is_float}}) && (!{{is_extern_float_typedef}} || __cplusplus) +#else + static CYTHON_INLINE int __Pyx_c_eq{{func_suffix}}({{type}} a, {{type}} b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE {{type}} __Pyx_c_sum{{func_suffix}}({{type}} a, {{type}} b) { + {{type}} z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE {{type}} __Pyx_c_diff{{func_suffix}}({{type}} a, {{type}} b) { + {{type}} z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE {{type}} __Pyx_c_prod{{func_suffix}}({{type}} a, {{type}} b) { + {{type}} z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + + #if {{is_float}} + static CYTHON_INLINE {{type}} __Pyx_c_quot{{func_suffix}}({{type}} a, {{type}} b) { + if (b.imag == 0) { + return {{type_name}}_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs{{m}}(b.real) >= fabs{{m}}(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return {{type_name}}_from_parts(a.real / b.real, a.imag / b.imag); + } else { + {{real_type}} r = b.imag / b.real; + {{real_type}} s = ({{real_type}})(1.0) / (b.real + b.imag * r); + return {{type_name}}_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + {{real_type}} r = b.real / b.imag; + {{real_type}} s = ({{real_type}})(1.0) / (b.imag + b.real * r); + return {{type_name}}_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE {{type}} __Pyx_c_quot{{func_suffix}}({{type}} a, {{type}} b) { + if (b.imag == 0) { + return {{type_name}}_from_parts(a.real / b.real, a.imag / b.real); + } else { + {{real_type}} denom = b.real * b.real + b.imag * b.imag; + return {{type_name}}_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + + static CYTHON_INLINE {{type}} __Pyx_c_neg{{func_suffix}}({{type}} a) { + {{type}} z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero{{func_suffix}}({{type}} a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE {{type}} __Pyx_c_conj{{func_suffix}}({{type}} a) { + {{type}} z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if {{is_float}} + static CYTHON_INLINE {{real_type}} __Pyx_c_abs{{func_suffix}}({{type}} z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt{{m}}(z.real*z.real + z.imag*z.imag); + #else + return hypot{{m}}(z.real, z.imag); + #endif + } + static CYTHON_INLINE {{type}} __Pyx_c_pow{{func_suffix}}({{type}} a, {{type}} b) { + {{type}} z; + {{real_type}} r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + {{real_type}} denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + return __Pyx_c_prod{{func_suffix}}(a, a); + case 3: + z = __Pyx_c_prod{{func_suffix}}(a, a); + return __Pyx_c_prod{{func_suffix}}(z, a); + case 4: + z = __Pyx_c_prod{{func_suffix}}(a, a); + return __Pyx_c_prod{{func_suffix}}(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if ((b.imag == 0) && (a.real >= 0)) { + z.real = pow{{m}}(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2{{m}}(0.0, -1.0); + } + } else { + r = __Pyx_c_abs{{func_suffix}}(a); + theta = atan2{{m}}(a.imag, a.real); + } + lnr = log{{m}}(r); + z_r = exp{{m}}(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos{{m}}(z_theta); + z.imag = z_r * sin{{m}}(z_theta); + return z; + } + #endif +#endif + +/////////////// SoftComplexToDouble.proto ////////////////// + +static double __Pyx_SoftComplexToDouble(__pyx_t_double_complex value, int have_gil); /* proto */ + +/////////////// SoftComplexToDouble ////////////////// +//@requires: RealImag + +static double __Pyx_SoftComplexToDouble(__pyx_t_double_complex value, int have_gil) { + // This isn't an absolutely perfect match for the Python behaviour: + // In Python the type would be determined right after the number is + // created (usually '**'), while here it's determined when coerced + // to a PyObject, which may be a few operations later. + if (unlikely(__Pyx_CIMAG(value))) { + PyGILState_STATE gilstate; + if (!have_gil) + gilstate = PyGILState_Ensure(); + PyErr_SetString(PyExc_TypeError, + "Cannot convert 'complex' with non-zero imaginary component to 'double' " + "(this most likely comes from the '**' operator; " + "use 'cython.cpow(True)' to return 'nan' instead of a " + "complex number)."); + if (!have_gil) + PyGILState_Release(gilstate); + return -1.; + } + return __Pyx_CREAL(value); +} + +///////// SoftComplexToPy.proto /////////////////////// + +static PyObject *__pyx_Py_FromSoftComplex(__pyx_t_double_complex value); /* proto */ + +//////// SoftComplexToPy //////////////// +//@requires: RealImag + +static PyObject *__pyx_Py_FromSoftComplex(__pyx_t_double_complex value) { + if (__Pyx_CIMAG(value)) { + return PyComplex_FromDoubles(__Pyx_CREAL(value), __Pyx_CIMAG(value)); + } else { + return PyFloat_FromDouble(__Pyx_CREAL(value)); + } +} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Coroutine.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Coroutine.c new file mode 100644 index 0000000000000000000000000000000000000000..688a62d8efb61094d1249dd1dbd18a8e20a8bfe3 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Coroutine.c @@ -0,0 +1,2623 @@ +//////////////////// GeneratorYieldFrom.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source); + +//////////////////// GeneratorYieldFrom //////////////////// +//@requires: Generator + +#if CYTHON_USE_TYPE_SLOTS +static void __Pyx_PyIter_CheckErrorAndDecref(PyObject *source) { + __Pyx_TypeName source_type_name = __Pyx_PyType_GetName(Py_TYPE(source)); + PyErr_Format(PyExc_TypeError, + "iter() returned non-iterator of type '" __Pyx_FMT_TYPENAME "'", source_type_name); + __Pyx_DECREF_TypeName(source_type_name); + Py_DECREF(source); +} +#endif + +static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) { + PyObject *source_gen, *retval; +#ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_Check(source)) { + // TODO: this should only happen for types.coroutine()ed generators, but we can't determine that here + Py_INCREF(source); + source_gen = source; + retval = __Pyx_Generator_Next(source); + } else +#endif + { +#if CYTHON_USE_TYPE_SLOTS + if (likely(Py_TYPE(source)->tp_iter)) { + source_gen = Py_TYPE(source)->tp_iter(source); + if (unlikely(!source_gen)) + return NULL; + if (unlikely(!PyIter_Check(source_gen))) { + __Pyx_PyIter_CheckErrorAndDecref(source_gen); + return NULL; + } + } else + // CPython also allows non-iterable sequences to be iterated over +#endif + { + source_gen = PyObject_GetIter(source); + if (unlikely(!source_gen)) + return NULL; + } + // source_gen is now the iterator, make the first next() call + retval = __Pyx_PyObject_GetIterNextFunc(source_gen)(source_gen); + } + if (likely(retval)) { + gen->yieldfrom = source_gen; + return retval; + } + Py_DECREF(source_gen); + return NULL; +} + + +//////////////////// CoroutineYieldFrom.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_Coroutine_Yield_From(__pyx_CoroutineObject *gen, PyObject *source); + +//////////////////// CoroutineYieldFrom //////////////////// +//@requires: Coroutine +//@requires: GetAwaitIter + +static PyObject* __Pyx__Coroutine_Yield_From_Generic(__pyx_CoroutineObject *gen, PyObject *source) { + PyObject *retval; + PyObject *source_gen = __Pyx__Coroutine_GetAwaitableIter(source); + if (unlikely(!source_gen)) { + return NULL; + } + // source_gen is now the iterator, make the first next() call + if (__Pyx_Coroutine_Check(source_gen)) { + retval = __Pyx_Generator_Next(source_gen); + } else { + retval = __Pyx_PyObject_GetIterNextFunc(source_gen)(source_gen); + } + if (retval) { + gen->yieldfrom = source_gen; + return retval; + } + Py_DECREF(source_gen); + return NULL; +} + +static CYTHON_INLINE PyObject* __Pyx_Coroutine_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) { + PyObject *retval; + if (__Pyx_Coroutine_Check(source)) { + if (unlikely(((__pyx_CoroutineObject*)source)->yieldfrom)) { + PyErr_SetString( + PyExc_RuntimeError, + "coroutine is being awaited already"); + return NULL; + } + retval = __Pyx_Generator_Next(source); +#ifdef __Pyx_AsyncGen_USED + // inlined "__pyx_PyAsyncGenASend" handling to avoid the series of generic calls + } else if (__pyx_PyAsyncGenASend_CheckExact(source)) { + retval = __Pyx_async_gen_asend_iternext(source); +#endif + } else { + return __Pyx__Coroutine_Yield_From_Generic(gen, source); + } + if (retval) { + Py_INCREF(source); + gen->yieldfrom = source; + } + return retval; +} + + +//////////////////// GetAwaitIter.proto //////////////////// + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o); /*proto*/ +static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *o); /*proto*/ + +//////////////////// GetAwaitIter //////////////////// +//@requires: ObjectHandling.c::PyObjectGetMethod +//@requires: ObjectHandling.c::PyObjectCallNoArg +//@requires: ObjectHandling.c::PyObjectCallOneArg + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o) { +#ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_Check(o)) { + return __Pyx_NewRef(o); + } +#endif + return __Pyx__Coroutine_GetAwaitableIter(o); +} + + +static void __Pyx_Coroutine_AwaitableIterError(PyObject *source) { +#if PY_VERSION_HEX >= 0x030600B3 && PY_VERSION_HEX < 0x030d0000 || defined(_PyErr_FormatFromCause) + __Pyx_TypeName source_type_name = __Pyx_PyType_GetName(Py_TYPE(source)); + _PyErr_FormatFromCause(PyExc_TypeError, + "'async for' received an invalid object from __anext__: " __Pyx_FMT_TYPENAME, source_type_name); + __Pyx_DECREF_TypeName(source_type_name); +#elif PY_MAJOR_VERSION >= 3 + PyObject *exc, *val, *val2, *tb; + __Pyx_TypeName source_type_name = __Pyx_PyType_GetName(Py_TYPE(source)); + assert(PyErr_Occurred()); + PyErr_Fetch(&exc, &val, &tb); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + assert(!PyErr_Occurred()); + PyErr_Format(PyExc_TypeError, + "'async for' received an invalid object from __anext__: " __Pyx_FMT_TYPENAME, source_type_name); + __Pyx_DECREF_TypeName(source_type_name); + + PyErr_Fetch(&exc, &val2, &tb); + PyErr_NormalizeException(&exc, &val2, &tb); + Py_INCREF(val); + PyException_SetCause(val2, val); + PyException_SetContext(val2, val); + PyErr_Restore(exc, val2, tb); +#else + // since Py2 does not have exception chaining, it's better to avoid shadowing exceptions there + CYTHON_UNUSED_VAR(source); +#endif +} + +// adapted from genobject.c in Py3.5 +static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *obj) { + PyObject *res; +#if CYTHON_USE_ASYNC_SLOTS + __Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj); + if (likely(am && am->am_await)) { + res = (*am->am_await)(obj); + } else +#endif +#if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact) + if (PyCoro_CheckExact(obj)) { + return __Pyx_NewRef(obj); + } else +#endif +#if CYTHON_COMPILING_IN_CPYTHON && defined(CO_ITERABLE_COROUTINE) +#if PY_VERSION_HEX >= 0x030C00A6 + if (PyGen_CheckExact(obj) && (PyGen_GetCode((PyGenObject*)obj)->co_flags & CO_ITERABLE_COROUTINE)) { +#else + if (PyGen_CheckExact(obj) && ((PyGenObject*)obj)->gi_code && ((PyCodeObject *)((PyGenObject*)obj)->gi_code)->co_flags & CO_ITERABLE_COROUTINE) { +#endif + // Python generator marked with "@types.coroutine" decorator + return __Pyx_NewRef(obj); + } else +#endif + { + PyObject *method = NULL; + int is_method = __Pyx_PyObject_GetMethod(obj, PYIDENT("__await__"), &method); + if (likely(is_method)) { + res = __Pyx_PyObject_CallOneArg(method, obj); + } else if (likely(method)) { + res = __Pyx_PyObject_CallNoArg(method); + } else + goto slot_error; + Py_DECREF(method); + } + if (unlikely(!res)) { + // surprisingly, CPython replaces the exception here... + __Pyx_Coroutine_AwaitableIterError(obj); + goto bad; + } + if (unlikely(!PyIter_Check(res))) { + __Pyx_TypeName res_type_name = __Pyx_PyType_GetName(Py_TYPE(res)); + PyErr_Format(PyExc_TypeError, + "__await__() returned non-iterator of type '" __Pyx_FMT_TYPENAME "'", res_type_name); + __Pyx_DECREF_TypeName(res_type_name); + Py_CLEAR(res); + } else { + int is_coroutine = 0; + #ifdef __Pyx_Coroutine_USED + is_coroutine |= __Pyx_Coroutine_Check(res); + #endif + #if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact) + is_coroutine |= PyCoro_CheckExact(res); + #endif + if (unlikely(is_coroutine)) { + /* __await__ must return an *iterator*, not + a coroutine or another awaitable (see PEP 492) */ + PyErr_SetString(PyExc_TypeError, + "__await__() returned a coroutine"); + Py_CLEAR(res); + } + } + return res; +slot_error: + { + __Pyx_TypeName obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "object " __Pyx_FMT_TYPENAME " can't be used in 'await' expression", obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + } +bad: + return NULL; +} + + +//////////////////// AsyncIter.proto //////////////////// + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAsyncIter(PyObject *o); /*proto*/ +static CYTHON_INLINE PyObject *__Pyx_Coroutine_AsyncIterNext(PyObject *o); /*proto*/ + +//////////////////// AsyncIter //////////////////// +//@requires: GetAwaitIter +//@requires: ObjectHandling.c::PyObjectCallMethod0 + +static PyObject *__Pyx_Coroutine_GetAsyncIter_Generic(PyObject *obj) { + __Pyx_TypeName obj_type_name; +#if PY_VERSION_HEX < 0x030500B1 + { + PyObject *iter = __Pyx_PyObject_CallMethod0(obj, PYIDENT("__aiter__")); + if (likely(iter)) + return iter; + // FIXME: for the sake of a nicely conforming exception message, assume any AttributeError meant '__aiter__' + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) + return NULL; + } +#else + // avoid C warning about 'unused function' + (void)&__Pyx_PyObject_CallMethod0; +#endif + + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "'async for' requires an object with __aiter__ method, got " __Pyx_FMT_TYPENAME, obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + return NULL; +} + + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAsyncIter(PyObject *obj) { +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(obj)) { + return __Pyx_NewRef(obj); + } +#endif +#if CYTHON_USE_ASYNC_SLOTS + { + __Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj); + if (likely(am && am->am_aiter)) { + return (*am->am_aiter)(obj); + } + } +#endif + return __Pyx_Coroutine_GetAsyncIter_Generic(obj); +} + + +static PyObject *__Pyx__Coroutine_AsyncIterNext(PyObject *obj) { +#if PY_VERSION_HEX < 0x030500B1 + { + PyObject *value = __Pyx_PyObject_CallMethod0(obj, PYIDENT("__anext__")); + if (likely(value)) + return value; + } + // FIXME: for the sake of a nicely conforming exception message, assume any AttributeError meant '__anext__' + if (PyErr_ExceptionMatches(PyExc_AttributeError)) +#endif + { + __Pyx_TypeName obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "'async for' requires an object with __anext__ method, got " __Pyx_FMT_TYPENAME, obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + } + return NULL; +} + + +static CYTHON_INLINE PyObject *__Pyx_Coroutine_AsyncIterNext(PyObject *obj) { +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(obj)) { + return __Pyx_async_gen_anext(obj); + } +#endif +#if CYTHON_USE_ASYNC_SLOTS + { + __Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj); + if (likely(am && am->am_anext)) { + return (*am->am_anext)(obj); + } + } +#endif + return __Pyx__Coroutine_AsyncIterNext(obj); +} + + +//////////////////// pep479.proto //////////////////// + +static void __Pyx_Generator_Replace_StopIteration(int in_async_gen); /*proto*/ + +//////////////////// pep479 //////////////////// +//@requires: Exceptions.c::GetException + +static void __Pyx_Generator_Replace_StopIteration(int in_async_gen) { + PyObject *exc, *val, *tb, *cur_exc; + __Pyx_PyThreadState_declare + #ifdef __Pyx_StopAsyncIteration_USED + int is_async_stopiteration = 0; + #endif + CYTHON_MAYBE_UNUSED_VAR(in_async_gen); + + cur_exc = PyErr_Occurred(); + if (likely(!__Pyx_PyErr_GivenExceptionMatches(cur_exc, PyExc_StopIteration))) { + #ifdef __Pyx_StopAsyncIteration_USED + if (in_async_gen && unlikely(__Pyx_PyErr_GivenExceptionMatches(cur_exc, __Pyx_PyExc_StopAsyncIteration))) { + is_async_stopiteration = 1; + } else + #endif + return; + } + + __Pyx_PyThreadState_assign + // Chain exceptions by moving Stop(Async)Iteration to exc_info before creating the RuntimeError. + // In Py2.x, no chaining happens, but the exception still stays visible in exc_info. + __Pyx_GetException(&exc, &val, &tb); + Py_XDECREF(exc); + Py_XDECREF(val); + Py_XDECREF(tb); + PyErr_SetString(PyExc_RuntimeError, + #ifdef __Pyx_StopAsyncIteration_USED + is_async_stopiteration ? "async generator raised StopAsyncIteration" : + in_async_gen ? "async generator raised StopIteration" : + #endif + "generator raised StopIteration"); +} + + +//////////////////// CoroutineBase.proto //////////////////// +//@substitute: naming + +struct __pyx_CoroutineObject; +typedef PyObject *(*__pyx_coroutine_body_t)(struct __pyx_CoroutineObject *, PyThreadState *, PyObject *); + +#if CYTHON_USE_EXC_INFO_STACK +// See https://bugs.python.org/issue25612 +#define __Pyx_ExcInfoStruct _PyErr_StackItem +#else +// Minimal replacement struct for Py<3.7, without the Py3.7 exception state stack. +typedef struct { + PyObject *exc_type; + PyObject *exc_value; + PyObject *exc_traceback; +} __Pyx_ExcInfoStruct; +#endif + +typedef struct __pyx_CoroutineObject { + PyObject_HEAD + __pyx_coroutine_body_t body; + PyObject *closure; + __Pyx_ExcInfoStruct gi_exc_state; + PyObject *gi_weakreflist; + PyObject *classobj; + PyObject *yieldfrom; + PyObject *gi_name; + PyObject *gi_qualname; + PyObject *gi_modulename; + PyObject *gi_code; + PyObject *gi_frame; + int resume_label; + // using T_BOOL for property below requires char value + char is_running; +} __pyx_CoroutineObject; + +static __pyx_CoroutineObject *__Pyx__Coroutine_New( + PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/ + +static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( + __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/ + +static CYTHON_INLINE void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *self); +static int __Pyx_Coroutine_clear(PyObject *self); /*proto*/ +static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value); /*proto*/ +static PyObject *__Pyx_Coroutine_Close(PyObject *self); /*proto*/ +static PyObject *__Pyx_Coroutine_Throw(PyObject *gen, PyObject *args); /*proto*/ + +// macros for exception state swapping instead of inline functions to make use of the local thread state context +#if CYTHON_USE_EXC_INFO_STACK +#define __Pyx_Coroutine_SwapException(self) +#define __Pyx_Coroutine_ResetAndClearException(self) __Pyx_Coroutine_ExceptionClear(&(self)->gi_exc_state) +#else +#define __Pyx_Coroutine_SwapException(self) { \ + __Pyx_ExceptionSwap(&(self)->gi_exc_state.exc_type, &(self)->gi_exc_state.exc_value, &(self)->gi_exc_state.exc_traceback); \ + __Pyx_Coroutine_ResetFrameBackpointer(&(self)->gi_exc_state); \ + } +#define __Pyx_Coroutine_ResetAndClearException(self) { \ + __Pyx_ExceptionReset((self)->gi_exc_state.exc_type, (self)->gi_exc_state.exc_value, (self)->gi_exc_state.exc_traceback); \ + (self)->gi_exc_state.exc_type = (self)->gi_exc_state.exc_value = (self)->gi_exc_state.exc_traceback = NULL; \ + } +#endif + +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyGen_FetchStopIterationValue(pvalue) \ + __Pyx_PyGen__FetchStopIterationValue($local_tstate_cname, pvalue) +#else +#define __Pyx_PyGen_FetchStopIterationValue(pvalue) \ + __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, pvalue) +#endif +static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *tstate, PyObject **pvalue); /*proto*/ +static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state); /*proto*/ + + +//////////////////// Coroutine.proto //////////////////// + +#define __Pyx_Coroutine_USED +#define __Pyx_Coroutine_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CoroutineType) +// __Pyx_Coroutine_Check(obj): see override for IterableCoroutine below +#define __Pyx_Coroutine_Check(obj) __Pyx_Coroutine_CheckExact(obj) +#define __Pyx_CoroutineAwait_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CoroutineAwaitType) + +#define __Pyx_Coroutine_New(body, code, closure, name, qualname, module_name) \ + __Pyx__Coroutine_New(__pyx_CoroutineType, body, code, closure, name, qualname, module_name) + +static int __pyx_Coroutine_init(PyObject *module); /*proto*/ +static PyObject *__Pyx__Coroutine_await(PyObject *coroutine); /*proto*/ + +typedef struct { + PyObject_HEAD + PyObject *coroutine; +} __pyx_CoroutineAwaitObject; + +static PyObject *__Pyx_CoroutineAwait_Close(__pyx_CoroutineAwaitObject *self, PyObject *arg); /*proto*/ +static PyObject *__Pyx_CoroutineAwait_Throw(__pyx_CoroutineAwaitObject *self, PyObject *args); /*proto*/ + + +//////////////////// Generator.proto //////////////////// + +#define __Pyx_Generator_USED +#define __Pyx_Generator_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_GeneratorType) + +#define __Pyx_Generator_New(body, code, closure, name, qualname, module_name) \ + __Pyx__Coroutine_New(__pyx_GeneratorType, body, code, closure, name, qualname, module_name) + +static PyObject *__Pyx_Generator_Next(PyObject *self); +static int __pyx_Generator_init(PyObject *module); /*proto*/ + + +//////////////////// AsyncGen //////////////////// +//@requires: AsyncGen.c::AsyncGenerator +// -> empty, only delegates to separate file + + +//////////////////// CoroutineBase //////////////////// +//@substitute: naming +//@requires: Exceptions.c::PyErrFetchRestore +//@requires: Exceptions.c::PyThreadStateGet +//@requires: Exceptions.c::SwapException +//@requires: Exceptions.c::RaiseException +//@requires: Exceptions.c::SaveResetException +//@requires: ObjectHandling.c::PyObjectCallMethod1 +//@requires: ObjectHandling.c::PyObjectCallNoArg +//@requires: ObjectHandling.c::PyObjectCallOneArg +//@requires: ObjectHandling.c::PyObjectFastCall +//@requires: ObjectHandling.c::PyObjectGetAttrStr +//@requires: ObjectHandling.c::PyObjectGetAttrStrNoError +//@requires: CommonStructures.c::FetchCommonType +//@requires: ModuleSetupCode.c::IncludeStructmemberH + +#include +#if PY_VERSION_HEX >= 0x030b00a6 + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif + +#define __Pyx_Coroutine_Undelegate(gen) Py_CLEAR((gen)->yieldfrom) + +// If StopIteration exception is set, fetches its 'value' +// attribute if any, otherwise sets pvalue to None. +// +// Returns 0 if no exception or StopIteration is set. +// If any other exception is set, returns -1 and leaves +// pvalue unchanged. +static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *$local_tstate_cname, PyObject **pvalue) { + PyObject *et, *ev, *tb; + PyObject *value = NULL; + CYTHON_UNUSED_VAR($local_tstate_cname); + + __Pyx_ErrFetch(&et, &ev, &tb); + + if (!et) { + Py_XDECREF(tb); + Py_XDECREF(ev); + Py_INCREF(Py_None); + *pvalue = Py_None; + return 0; + } + + // most common case: plain StopIteration without or with separate argument + if (likely(et == PyExc_StopIteration)) { + if (!ev) { + Py_INCREF(Py_None); + value = Py_None; + } +#if PY_VERSION_HEX >= 0x030300A0 + else if (likely(__Pyx_IS_TYPE(ev, (PyTypeObject*)PyExc_StopIteration))) { + value = ((PyStopIterationObject *)ev)->value; + Py_INCREF(value); + Py_DECREF(ev); + } +#endif + // PyErr_SetObject() and friends put the value directly into ev + else if (unlikely(PyTuple_Check(ev))) { + // if it's a tuple, it is interpreted as separate constructor arguments (surprise!) + if (PyTuple_GET_SIZE(ev) >= 1) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + value = PyTuple_GET_ITEM(ev, 0); + Py_INCREF(value); +#else + value = PySequence_ITEM(ev, 0); +#endif + } else { + Py_INCREF(Py_None); + value = Py_None; + } + Py_DECREF(ev); + } + else if (!__Pyx_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration)) { + // 'steal' reference to ev + value = ev; + } + if (likely(value)) { + Py_XDECREF(tb); + Py_DECREF(et); + *pvalue = value; + return 0; + } + } else if (!__Pyx_PyErr_GivenExceptionMatches(et, PyExc_StopIteration)) { + __Pyx_ErrRestore(et, ev, tb); + return -1; + } + + // otherwise: normalise and check what that gives us + PyErr_NormalizeException(&et, &ev, &tb); + if (unlikely(!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration))) { + // looks like normalisation failed - raise the new exception + __Pyx_ErrRestore(et, ev, tb); + return -1; + } + Py_XDECREF(tb); + Py_DECREF(et); +#if PY_VERSION_HEX >= 0x030300A0 + value = ((PyStopIterationObject *)ev)->value; + Py_INCREF(value); + Py_DECREF(ev); +#else + { + PyObject* args = __Pyx_PyObject_GetAttrStr(ev, PYIDENT("args")); + Py_DECREF(ev); + if (likely(args)) { + value = PySequence_GetItem(args, 0); + Py_DECREF(args); + } + if (unlikely(!value)) { + __Pyx_ErrRestore(NULL, NULL, NULL); + Py_INCREF(Py_None); + value = Py_None; + } + } +#endif + *pvalue = value; + return 0; +} + +static CYTHON_INLINE +void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *exc_state) { +#if PY_VERSION_HEX >= 0x030B00a4 + Py_CLEAR(exc_state->exc_value); +#else + PyObject *t, *v, *tb; + t = exc_state->exc_type; + v = exc_state->exc_value; + tb = exc_state->exc_traceback; + + exc_state->exc_type = NULL; + exc_state->exc_value = NULL; + exc_state->exc_traceback = NULL; + + Py_XDECREF(t); + Py_XDECREF(v); + Py_XDECREF(tb); +#endif +} + +#define __Pyx_Coroutine_AlreadyRunningError(gen) (__Pyx__Coroutine_AlreadyRunningError(gen), (PyObject*)NULL) +static void __Pyx__Coroutine_AlreadyRunningError(__pyx_CoroutineObject *gen) { + const char *msg; + CYTHON_MAYBE_UNUSED_VAR(gen); + if ((0)) { + #ifdef __Pyx_Coroutine_USED + } else if (__Pyx_Coroutine_Check((PyObject*)gen)) { + msg = "coroutine already executing"; + #endif + #ifdef __Pyx_AsyncGen_USED + } else if (__Pyx_AsyncGen_CheckExact((PyObject*)gen)) { + msg = "async generator already executing"; + #endif + } else { + msg = "generator already executing"; + } + PyErr_SetString(PyExc_ValueError, msg); +} + +#define __Pyx_Coroutine_NotStartedError(gen) (__Pyx__Coroutine_NotStartedError(gen), (PyObject*)NULL) +static void __Pyx__Coroutine_NotStartedError(PyObject *gen) { + const char *msg; + CYTHON_MAYBE_UNUSED_VAR(gen); + if ((0)) { + #ifdef __Pyx_Coroutine_USED + } else if (__Pyx_Coroutine_Check(gen)) { + msg = "can't send non-None value to a just-started coroutine"; + #endif + #ifdef __Pyx_AsyncGen_USED + } else if (__Pyx_AsyncGen_CheckExact(gen)) { + msg = "can't send non-None value to a just-started async generator"; + #endif + } else { + msg = "can't send non-None value to a just-started generator"; + } + PyErr_SetString(PyExc_TypeError, msg); +} + +#define __Pyx_Coroutine_AlreadyTerminatedError(gen, value, closing) (__Pyx__Coroutine_AlreadyTerminatedError(gen, value, closing), (PyObject*)NULL) +static void __Pyx__Coroutine_AlreadyTerminatedError(PyObject *gen, PyObject *value, int closing) { + CYTHON_MAYBE_UNUSED_VAR(gen); + CYTHON_MAYBE_UNUSED_VAR(closing); + #ifdef __Pyx_Coroutine_USED + if (!closing && __Pyx_Coroutine_Check(gen)) { + // `self` is an exhausted coroutine: raise an error, + // except when called from gen_close(), which should + // always be a silent method. + PyErr_SetString(PyExc_RuntimeError, "cannot reuse already awaited coroutine"); + } else + #endif + if (value) { + // `gen` is an exhausted generator: + // only set exception if called from send(). + #ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(gen)) + PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); + else + #endif + PyErr_SetNone(PyExc_StopIteration); + } +} + +static +PyObject *__Pyx_Coroutine_SendEx(__pyx_CoroutineObject *self, PyObject *value, int closing) { + __Pyx_PyThreadState_declare + PyThreadState *tstate; + __Pyx_ExcInfoStruct *exc_state; + PyObject *retval; + + assert(!self->is_running); + + if (unlikely(self->resume_label == 0)) { + if (unlikely(value && value != Py_None)) { + return __Pyx_Coroutine_NotStartedError((PyObject*)self); + } + } + + if (unlikely(self->resume_label == -1)) { + return __Pyx_Coroutine_AlreadyTerminatedError((PyObject*)self, value, closing); + } + +#if CYTHON_FAST_THREAD_STATE + __Pyx_PyThreadState_assign + tstate = $local_tstate_cname; +#else + tstate = __Pyx_PyThreadState_Current; +#endif + + // Traceback/Frame rules pre-Py3.7: + // - on entry, save external exception state in self->gi_exc_state, restore it on exit + // - on exit, keep internally generated exceptions in self->gi_exc_state, clear everything else + // - on entry, set "f_back" pointer of internal exception traceback to (current) outer call frame + // - on exit, clear "f_back" of internal exception traceback + // - do not touch external frames and tracebacks + + // Traceback/Frame rules for Py3.7+ (CYTHON_USE_EXC_INFO_STACK): + // - on entry, push internal exception state in self->gi_exc_state on the exception stack + // - on exit, keep internally generated exceptions in self->gi_exc_state, clear everything else + // - on entry, set "f_back" pointer of internal exception traceback to (current) outer call frame + // - on exit, clear "f_back" of internal exception traceback + // - do not touch external frames and tracebacks + + exc_state = &self->gi_exc_state; + if (exc_state->exc_value) { + #if CYTHON_COMPILING_IN_PYPY + // FIXME: what to do in PyPy? + #else + // Generators always return to their most recent caller, not + // necessarily their creator. + PyObject *exc_tb; + #if PY_VERSION_HEX >= 0x030B00a4 && !CYTHON_COMPILING_IN_CPYTHON + // owned reference! + exc_tb = PyException_GetTraceback(exc_state->exc_value); + #elif PY_VERSION_HEX >= 0x030B00a4 + exc_tb = ((PyBaseExceptionObject*) exc_state->exc_value)->traceback; + #else + exc_tb = exc_state->exc_traceback; + #endif + if (exc_tb) { + PyTracebackObject *tb = (PyTracebackObject *) exc_tb; + PyFrameObject *f = tb->tb_frame; + + assert(f->f_back == NULL); + #if PY_VERSION_HEX >= 0x030B00A1 + // PyThreadState_GetFrame returns NULL if there isn't a current frame + // which is a valid state so no need to check + f->f_back = PyThreadState_GetFrame(tstate); + #else + Py_XINCREF(tstate->frame); + f->f_back = tstate->frame; + #endif + #if PY_VERSION_HEX >= 0x030B00a4 && !CYTHON_COMPILING_IN_CPYTHON + Py_DECREF(exc_tb); + #endif + } + #endif + } + +#if CYTHON_USE_EXC_INFO_STACK + // See https://bugs.python.org/issue25612 + exc_state->previous_item = tstate->exc_info; + tstate->exc_info = exc_state; +#else + if (exc_state->exc_type) { + // We were in an except handler when we left, + // restore the exception state which was put aside. + __Pyx_ExceptionSwap(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); + // self->exc_* now holds the exception state of the caller + } else { + // save away the exception state of the caller + __Pyx_Coroutine_ExceptionClear(exc_state); + __Pyx_ExceptionSave(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); + } +#endif + + self->is_running = 1; + retval = self->body(self, tstate, value); + self->is_running = 0; + +#if CYTHON_USE_EXC_INFO_STACK + // See https://bugs.python.org/issue25612 + exc_state = &self->gi_exc_state; + tstate->exc_info = exc_state->previous_item; + exc_state->previous_item = NULL; + // Cut off the exception frame chain so that we can reconnect it on re-entry above. + __Pyx_Coroutine_ResetFrameBackpointer(exc_state); +#endif + + return retval; +} + +static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state) { + // Don't keep the reference to f_back any longer than necessary. It + // may keep a chain of frames alive or it could create a reference + // cycle. +#if CYTHON_COMPILING_IN_PYPY + // FIXME: what to do in PyPy? + CYTHON_UNUSED_VAR(exc_state); +#else + PyObject *exc_tb; + + #if PY_VERSION_HEX >= 0x030B00a4 + if (!exc_state->exc_value) return; + // owned reference! + exc_tb = PyException_GetTraceback(exc_state->exc_value); + #else + exc_tb = exc_state->exc_traceback; + #endif + + if (likely(exc_tb)) { + PyTracebackObject *tb = (PyTracebackObject *) exc_tb; + PyFrameObject *f = tb->tb_frame; + Py_CLEAR(f->f_back); + #if PY_VERSION_HEX >= 0x030B00a4 + Py_DECREF(exc_tb); + #endif + } +#endif +} + +static CYTHON_INLINE +PyObject *__Pyx_Coroutine_MethodReturn(PyObject* gen, PyObject *retval) { + CYTHON_MAYBE_UNUSED_VAR(gen); + if (unlikely(!retval)) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (!__Pyx_PyErr_Occurred()) { + // method call must not terminate with NULL without setting an exception + PyObject *exc = PyExc_StopIteration; + #ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(gen)) + exc = __Pyx_PyExc_StopAsyncIteration; + #endif + __Pyx_PyErr_SetNone(exc); + } + } + return retval; +} + +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) +static CYTHON_INLINE +PyObject *__Pyx_PyGen_Send(PyGenObject *gen, PyObject *arg) { +#if PY_VERSION_HEX <= 0x030A00A1 + return _PyGen_Send(gen, arg); +#else + PyObject *result; + // PyIter_Send() asserts non-NULL arg + if (PyIter_Send((PyObject*)gen, arg ? arg : Py_None, &result) == PYGEN_RETURN) { + if (PyAsyncGen_CheckExact(gen)) { + assert(result == Py_None); + PyErr_SetNone(PyExc_StopAsyncIteration); + } + else if (result == Py_None) { + PyErr_SetNone(PyExc_StopIteration); + } + else { +#if PY_VERSION_HEX < 0x030d00A1 + _PyGen_SetStopIterationValue(result); +#else + if (!PyTuple_Check(result) && !PyExceptionInstance_Check(result)) { + // delay instantiation if possible + PyErr_SetObject(PyExc_StopIteration, result); + } else { + PyObject *exc = __Pyx_PyObject_CallOneArg(PyExc_StopIteration, result); + if (likely(exc != NULL)) { + PyErr_SetObject(PyExc_StopIteration, exc); + Py_DECREF(exc); + } + } +#endif + } + Py_DECREF(result); + result = NULL; + } + return result; +#endif +} +#endif + +static CYTHON_INLINE +PyObject *__Pyx_Coroutine_FinishDelegation(__pyx_CoroutineObject *gen) { + PyObject *ret; + PyObject *val = NULL; + __Pyx_Coroutine_Undelegate(gen); + __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, &val); + // val == NULL on failure => pass on exception + ret = __Pyx_Coroutine_SendEx(gen, val, 0); + Py_XDECREF(val); + return ret; +} + +static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) { + PyObject *retval; + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; + PyObject *yf = gen->yieldfrom; + if (unlikely(gen->is_running)) + return __Pyx_Coroutine_AlreadyRunningError(gen); + if (yf) { + PyObject *ret; + // FIXME: does this really need an INCREF() ? + //Py_INCREF(yf); + gen->is_running = 1; + #ifdef __Pyx_Generator_USED + if (__Pyx_Generator_CheckExact(yf)) { + ret = __Pyx_Coroutine_Send(yf, value); + } else + #endif + #ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_Check(yf)) { + ret = __Pyx_Coroutine_Send(yf, value); + } else + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_PyAsyncGenASend_CheckExact(yf)) { + ret = __Pyx_async_gen_asend_send(yf, value); + } else + #endif + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) + // _PyGen_Send() is not exported before Py3.6 + if (PyGen_CheckExact(yf)) { + ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); + } else + #endif + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03050000 && defined(PyCoro_CheckExact) && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) + // _PyGen_Send() is not exported before Py3.6 + if (PyCoro_CheckExact(yf)) { + ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); + } else + #endif + { + if (value == Py_None) + ret = __Pyx_PyObject_GetIterNextFunc(yf)(yf); + else + ret = __Pyx_PyObject_CallMethod1(yf, PYIDENT("send"), value); + } + gen->is_running = 0; + //Py_DECREF(yf); + if (likely(ret)) { + return ret; + } + retval = __Pyx_Coroutine_FinishDelegation(gen); + } else { + retval = __Pyx_Coroutine_SendEx(gen, value, 0); + } + return __Pyx_Coroutine_MethodReturn(self, retval); +} + +// This helper function is used by gen_close and gen_throw to +// close a subiterator being delegated to by yield-from. +static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) { + PyObject *retval = NULL; + int err = 0; + + #ifdef __Pyx_Generator_USED + if (__Pyx_Generator_CheckExact(yf)) { + retval = __Pyx_Coroutine_Close(yf); + if (!retval) + return -1; + } else + #endif + #ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_Check(yf)) { + retval = __Pyx_Coroutine_Close(yf); + if (!retval) + return -1; + } else + if (__Pyx_CoroutineAwait_CheckExact(yf)) { + retval = __Pyx_CoroutineAwait_Close((__pyx_CoroutineAwaitObject*)yf, NULL); + if (!retval) + return -1; + } else + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_PyAsyncGenASend_CheckExact(yf)) { + retval = __Pyx_async_gen_asend_close(yf, NULL); + // cannot fail + } else + if (__pyx_PyAsyncGenAThrow_CheckExact(yf)) { + retval = __Pyx_async_gen_athrow_close(yf, NULL); + // cannot fail + } else + #endif + { + PyObject *meth; + gen->is_running = 1; + meth = __Pyx_PyObject_GetAttrStrNoError(yf, PYIDENT("close")); + if (unlikely(!meth)) { + if (unlikely(PyErr_Occurred())) { + PyErr_WriteUnraisable(yf); + } + } else { + retval = __Pyx_PyObject_CallNoArg(meth); + Py_DECREF(meth); + if (unlikely(!retval)) + err = -1; + } + gen->is_running = 0; + } + Py_XDECREF(retval); + return err; +} + +static PyObject *__Pyx_Generator_Next(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; + PyObject *yf = gen->yieldfrom; + if (unlikely(gen->is_running)) + return __Pyx_Coroutine_AlreadyRunningError(gen); + if (yf) { + PyObject *ret; + // FIXME: does this really need an INCREF() ? + //Py_INCREF(yf); + // YieldFrom code ensures that yf is an iterator + gen->is_running = 1; + #ifdef __Pyx_Generator_USED + if (__Pyx_Generator_CheckExact(yf)) { + ret = __Pyx_Generator_Next(yf); + } else + #endif + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) + // _PyGen_Send() is not exported before Py3.6 + if (PyGen_CheckExact(yf)) { + ret = __Pyx_PyGen_Send((PyGenObject*)yf, NULL); + } else + #endif + #ifdef __Pyx_Coroutine_USED + if (__Pyx_Coroutine_Check(yf)) { + ret = __Pyx_Coroutine_Send(yf, Py_None); + } else + #endif + ret = __Pyx_PyObject_GetIterNextFunc(yf)(yf); + gen->is_running = 0; + //Py_DECREF(yf); + if (likely(ret)) { + return ret; + } + return __Pyx_Coroutine_FinishDelegation(gen); + } + return __Pyx_Coroutine_SendEx(gen, Py_None, 0); +} + +static PyObject *__Pyx_Coroutine_Close_Method(PyObject *self, PyObject *arg) { + CYTHON_UNUSED_VAR(arg); + return __Pyx_Coroutine_Close(self); +} + +static PyObject *__Pyx_Coroutine_Close(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + PyObject *retval, *raised_exception; + PyObject *yf = gen->yieldfrom; + int err = 0; + + if (unlikely(gen->is_running)) + return __Pyx_Coroutine_AlreadyRunningError(gen); + + if (yf) { + Py_INCREF(yf); + err = __Pyx_Coroutine_CloseIter(gen, yf); + __Pyx_Coroutine_Undelegate(gen); + Py_DECREF(yf); + } + if (err == 0) + PyErr_SetNone(PyExc_GeneratorExit); + retval = __Pyx_Coroutine_SendEx(gen, NULL, 1); + if (unlikely(retval)) { + const char *msg; + Py_DECREF(retval); + if ((0)) { + #ifdef __Pyx_Coroutine_USED + } else if (__Pyx_Coroutine_Check(self)) { + msg = "coroutine ignored GeneratorExit"; + #endif + #ifdef __Pyx_AsyncGen_USED + } else if (__Pyx_AsyncGen_CheckExact(self)) { +#if PY_VERSION_HEX < 0x03060000 + msg = "async generator ignored GeneratorExit - might require Python 3.6+ finalisation (PEP 525)"; +#else + msg = "async generator ignored GeneratorExit"; +#endif + #endif + } else { + msg = "generator ignored GeneratorExit"; + } + PyErr_SetString(PyExc_RuntimeError, msg); + return NULL; + } + raised_exception = PyErr_Occurred(); + if (likely(!raised_exception || __Pyx_PyErr_GivenExceptionMatches2(raised_exception, PyExc_GeneratorExit, PyExc_StopIteration))) { + // ignore these errors + if (raised_exception) PyErr_Clear(); + Py_INCREF(Py_None); + return Py_None; + } + return NULL; +} + +static PyObject *__Pyx__Coroutine_Throw(PyObject *self, PyObject *typ, PyObject *val, PyObject *tb, + PyObject *args, int close_on_genexit) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + PyObject *yf = gen->yieldfrom; + + if (unlikely(gen->is_running)) + return __Pyx_Coroutine_AlreadyRunningError(gen); + + if (yf) { + PyObject *ret; + Py_INCREF(yf); + if (__Pyx_PyErr_GivenExceptionMatches(typ, PyExc_GeneratorExit) && close_on_genexit) { + // Asynchronous generators *should not* be closed right away. + // We have to allow some awaits to work it through, hence the + // `close_on_genexit` parameter here. + int err = __Pyx_Coroutine_CloseIter(gen, yf); + Py_DECREF(yf); + __Pyx_Coroutine_Undelegate(gen); + if (err < 0) + return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0)); + goto throw_here; + } + gen->is_running = 1; + if (0 + #ifdef __Pyx_Generator_USED + || __Pyx_Generator_CheckExact(yf) + #endif + #ifdef __Pyx_Coroutine_USED + || __Pyx_Coroutine_Check(yf) + #endif + ) { + ret = __Pyx__Coroutine_Throw(yf, typ, val, tb, args, close_on_genexit); + #ifdef __Pyx_Coroutine_USED + } else if (__Pyx_CoroutineAwait_CheckExact(yf)) { + ret = __Pyx__Coroutine_Throw(((__pyx_CoroutineAwaitObject*)yf)->coroutine, typ, val, tb, args, close_on_genexit); + #endif + } else { + PyObject *meth = __Pyx_PyObject_GetAttrStrNoError(yf, PYIDENT("throw")); + if (unlikely(!meth)) { + Py_DECREF(yf); + if (unlikely(PyErr_Occurred())) { + gen->is_running = 0; + return NULL; + } + __Pyx_Coroutine_Undelegate(gen); + gen->is_running = 0; + goto throw_here; + } + if (likely(args)) { + ret = __Pyx_PyObject_Call(meth, args, NULL); + } else { + // "tb" or even "val" might be NULL, but that also correctly terminates the argument list + PyObject *cargs[4] = {NULL, typ, val, tb}; + ret = __Pyx_PyObject_FastCall(meth, cargs+1, 3 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); + } + Py_DECREF(meth); + } + gen->is_running = 0; + Py_DECREF(yf); + if (!ret) { + ret = __Pyx_Coroutine_FinishDelegation(gen); + } + return __Pyx_Coroutine_MethodReturn(self, ret); + } +throw_here: + __Pyx_Raise(typ, val, tb, NULL); + return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0)); +} + +static PyObject *__Pyx_Coroutine_Throw(PyObject *self, PyObject *args) { + PyObject *typ; + PyObject *val = NULL; + PyObject *tb = NULL; + + if (unlikely(!PyArg_UnpackTuple(args, (char *)"throw", 1, 3, &typ, &val, &tb))) + return NULL; + + return __Pyx__Coroutine_Throw(self, typ, val, tb, args, 1); +} + +static CYTHON_INLINE int __Pyx_Coroutine_traverse_excstate(__Pyx_ExcInfoStruct *exc_state, visitproc visit, void *arg) { +#if PY_VERSION_HEX >= 0x030B00a4 + Py_VISIT(exc_state->exc_value); +#else + Py_VISIT(exc_state->exc_type); + Py_VISIT(exc_state->exc_value); + Py_VISIT(exc_state->exc_traceback); +#endif + return 0; +} + +static int __Pyx_Coroutine_traverse(__pyx_CoroutineObject *gen, visitproc visit, void *arg) { + Py_VISIT(gen->closure); + Py_VISIT(gen->classobj); + Py_VISIT(gen->yieldfrom); + return __Pyx_Coroutine_traverse_excstate(&gen->gi_exc_state, visit, arg); +} + +static int __Pyx_Coroutine_clear(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + + Py_CLEAR(gen->closure); + Py_CLEAR(gen->classobj); + Py_CLEAR(gen->yieldfrom); + __Pyx_Coroutine_ExceptionClear(&gen->gi_exc_state); +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(self)) { + Py_CLEAR(((__pyx_PyAsyncGenObject*)gen)->ag_finalizer); + } +#endif + Py_CLEAR(gen->gi_code); + Py_CLEAR(gen->gi_frame); + Py_CLEAR(gen->gi_name); + Py_CLEAR(gen->gi_qualname); + Py_CLEAR(gen->gi_modulename); + return 0; +} + +static void __Pyx_Coroutine_dealloc(PyObject *self) { + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + + PyObject_GC_UnTrack(gen); + if (gen->gi_weakreflist != NULL) + PyObject_ClearWeakRefs(self); + + if (gen->resume_label >= 0) { + // Generator is paused or unstarted, so we need to close + PyObject_GC_Track(self); +#if PY_VERSION_HEX >= 0x030400a1 && CYTHON_USE_TP_FINALIZE + if (unlikely(PyObject_CallFinalizerFromDealloc(self))) +#else + Py_TYPE(gen)->tp_del(self); + if (unlikely(Py_REFCNT(self) > 0)) +#endif + { + // resurrected. :( + return; + } + PyObject_GC_UnTrack(self); + } + +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(self)) { + /* We have to handle this case for asynchronous generators + right here, because this code has to be between UNTRACK + and GC_Del. */ + Py_CLEAR(((__pyx_PyAsyncGenObject*)self)->ag_finalizer); + } +#endif + __Pyx_Coroutine_clear(self); + __Pyx_PyHeapTypeObject_GC_Del(gen); +} + +static void __Pyx_Coroutine_del(PyObject *self) { + PyObject *error_type, *error_value, *error_traceback; + __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; + __Pyx_PyThreadState_declare + + if (gen->resume_label < 0) { + // already terminated => nothing to clean up + return; + } + +#if !CYTHON_USE_TP_FINALIZE + // Temporarily resurrect the object. + assert(self->ob_refcnt == 0); + __Pyx_SET_REFCNT(self, 1); +#endif + + __Pyx_PyThreadState_assign + + // Save the current exception, if any. + __Pyx_ErrFetch(&error_type, &error_value, &error_traceback); + +#ifdef __Pyx_AsyncGen_USED + if (__Pyx_AsyncGen_CheckExact(self)) { + __pyx_PyAsyncGenObject *agen = (__pyx_PyAsyncGenObject*)self; + PyObject *finalizer = agen->ag_finalizer; + if (finalizer && !agen->ag_closed) { + PyObject *res = __Pyx_PyObject_CallOneArg(finalizer, self); + if (unlikely(!res)) { + PyErr_WriteUnraisable(self); + } else { + Py_DECREF(res); + } + // Restore the saved exception. + __Pyx_ErrRestore(error_type, error_value, error_traceback); + return; + } + } +#endif + + if (unlikely(gen->resume_label == 0 && !error_value)) { +#ifdef __Pyx_Coroutine_USED +#ifdef __Pyx_Generator_USED + // only warn about (async) coroutines + if (!__Pyx_Generator_CheckExact(self)) +#endif + { + // untrack dead object as we are executing Python code (which might trigger GC) + PyObject_GC_UnTrack(self); +#if PY_MAJOR_VERSION >= 3 /* PY_VERSION_HEX >= 0x03030000*/ || defined(PyErr_WarnFormat) + if (unlikely(PyErr_WarnFormat(PyExc_RuntimeWarning, 1, "coroutine '%.50S' was never awaited", gen->gi_qualname) < 0)) + PyErr_WriteUnraisable(self); +#else + {PyObject *msg; + char *cmsg; + #if CYTHON_COMPILING_IN_PYPY + msg = NULL; + cmsg = (char*) "coroutine was never awaited"; + #else + char *cname; + PyObject *qualname; + qualname = gen->gi_qualname; + cname = PyString_AS_STRING(qualname); + msg = PyString_FromFormat("coroutine '%.50s' was never awaited", cname); + + if (unlikely(!msg)) { + PyErr_Clear(); + cmsg = (char*) "coroutine was never awaited"; + } else { + cmsg = PyString_AS_STRING(msg); + } + #endif + if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, cmsg, 1) < 0)) + PyErr_WriteUnraisable(self); + Py_XDECREF(msg);} +#endif + PyObject_GC_Track(self); + } +#endif /*__Pyx_Coroutine_USED*/ + } else { + PyObject *res = __Pyx_Coroutine_Close(self); + if (unlikely(!res)) { + if (PyErr_Occurred()) + PyErr_WriteUnraisable(self); + } else { + Py_DECREF(res); + } + } + + // Restore the saved exception. + __Pyx_ErrRestore(error_type, error_value, error_traceback); + +#if !CYTHON_USE_TP_FINALIZE + // Undo the temporary resurrection; can't use DECREF here, it would + // cause a recursive call. + assert(Py_REFCNT(self) > 0); + if (likely(--self->ob_refcnt == 0)) { + // this is the normal path out + return; + } + + // close() resurrected it! Make it look like the original Py_DECREF + // never happened. + { + Py_ssize_t refcnt = Py_REFCNT(self); + _Py_NewReference(self); + __Pyx_SET_REFCNT(self, refcnt); + } +#if CYTHON_COMPILING_IN_CPYTHON + assert(PyType_IS_GC(Py_TYPE(self)) && + _Py_AS_GC(self)->gc.gc_refs != _PyGC_REFS_UNTRACKED); + + // If Py_REF_DEBUG, _Py_NewReference bumped _Py_RefTotal, so + // we need to undo that. + _Py_DEC_REFTOTAL; +#endif + // If Py_TRACE_REFS, _Py_NewReference re-added self to the object + // chain, so no more to do there. + // If COUNT_ALLOCS, the original decref bumped tp_frees, and + // _Py_NewReference bumped tp_allocs: both of those need to be + // undone. +#ifdef COUNT_ALLOCS + --Py_TYPE(self)->tp_frees; + --Py_TYPE(self)->tp_allocs; +#endif +#endif +} + +static PyObject * +__Pyx_Coroutine_get_name(__pyx_CoroutineObject *self, void *context) +{ + PyObject *name = self->gi_name; + CYTHON_UNUSED_VAR(context); + // avoid NULL pointer dereference during garbage collection + if (unlikely(!name)) name = Py_None; + Py_INCREF(name); + return name; +} + +static int +__Pyx_Coroutine_set_name(__pyx_CoroutineObject *self, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__name__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(self->gi_name, value); + return 0; +} + +static PyObject * +__Pyx_Coroutine_get_qualname(__pyx_CoroutineObject *self, void *context) +{ + PyObject *name = self->gi_qualname; + CYTHON_UNUSED_VAR(context); + // avoid NULL pointer dereference during garbage collection + if (unlikely(!name)) name = Py_None; + Py_INCREF(name); + return name; +} + +static int +__Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__qualname__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(self->gi_qualname, value); + return 0; +} + +static PyObject * +__Pyx_Coroutine_get_frame(__pyx_CoroutineObject *self, void *context) +{ + PyObject *frame = self->gi_frame; + CYTHON_UNUSED_VAR(context); + if (!frame) { + if (unlikely(!self->gi_code)) { + // Avoid doing something stupid, e.g. during garbage collection. + Py_RETURN_NONE; + } + frame = (PyObject *) PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + (PyCodeObject*) self->gi_code, /*PyCodeObject *code,*/ + $moddict_cname, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (unlikely(!frame)) + return NULL; + // keep the frame cached once it's created + self->gi_frame = frame; + } + Py_INCREF(frame); + return frame; +} + +static __pyx_CoroutineObject *__Pyx__Coroutine_New( + PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name) { + __pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type); + if (unlikely(!gen)) + return NULL; + return __Pyx__Coroutine_NewInit(gen, body, code, closure, name, qualname, module_name); +} + +static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( + __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, + PyObject *name, PyObject *qualname, PyObject *module_name) { + gen->body = body; + gen->closure = closure; + Py_XINCREF(closure); + gen->is_running = 0; + gen->resume_label = 0; + gen->classobj = NULL; + gen->yieldfrom = NULL; + #if PY_VERSION_HEX >= 0x030B00a4 + gen->gi_exc_state.exc_value = NULL; + #else + gen->gi_exc_state.exc_type = NULL; + gen->gi_exc_state.exc_value = NULL; + gen->gi_exc_state.exc_traceback = NULL; + #endif +#if CYTHON_USE_EXC_INFO_STACK + gen->gi_exc_state.previous_item = NULL; +#endif + gen->gi_weakreflist = NULL; + Py_XINCREF(qualname); + gen->gi_qualname = qualname; + Py_XINCREF(name); + gen->gi_name = name; + Py_XINCREF(module_name); + gen->gi_modulename = module_name; + Py_XINCREF(code); + gen->gi_code = code; + gen->gi_frame = NULL; + + PyObject_GC_Track(gen); + return gen; +} + + +//////////////////// Coroutine //////////////////// +//@requires: CoroutineBase +//@requires: PatchGeneratorABC +//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict + +static void __Pyx_CoroutineAwait_dealloc(PyObject *self) { + PyObject_GC_UnTrack(self); + Py_CLEAR(((__pyx_CoroutineAwaitObject*)self)->coroutine); + __Pyx_PyHeapTypeObject_GC_Del(self); +} + +static int __Pyx_CoroutineAwait_traverse(__pyx_CoroutineAwaitObject *self, visitproc visit, void *arg) { + Py_VISIT(self->coroutine); + return 0; +} + +static int __Pyx_CoroutineAwait_clear(__pyx_CoroutineAwaitObject *self) { + Py_CLEAR(self->coroutine); + return 0; +} + +static PyObject *__Pyx_CoroutineAwait_Next(__pyx_CoroutineAwaitObject *self) { + return __Pyx_Generator_Next(self->coroutine); +} + +static PyObject *__Pyx_CoroutineAwait_Send(__pyx_CoroutineAwaitObject *self, PyObject *value) { + return __Pyx_Coroutine_Send(self->coroutine, value); +} + +static PyObject *__Pyx_CoroutineAwait_Throw(__pyx_CoroutineAwaitObject *self, PyObject *args) { + return __Pyx_Coroutine_Throw(self->coroutine, args); +} + +static PyObject *__Pyx_CoroutineAwait_Close(__pyx_CoroutineAwaitObject *self, PyObject *arg) { + CYTHON_UNUSED_VAR(arg); + return __Pyx_Coroutine_Close(self->coroutine); +} + +static PyObject *__Pyx_CoroutineAwait_self(PyObject *self) { + Py_INCREF(self); + return self; +} + +#if !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_CoroutineAwait_no_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) { + CYTHON_UNUSED_VAR(type); + CYTHON_UNUSED_VAR(args); + CYTHON_UNUSED_VAR(kwargs); + PyErr_SetString(PyExc_TypeError, "cannot instantiate type, use 'await coroutine' instead"); + return NULL; +} +#endif + +// In earlier versions of Python an object with no __dict__ and not __slots__ is assumed +// to be pickleable by default. Coroutine-wrappers have significant state so shouldn't be. +// Therefore provide a default implementation. +// Something similar applies to heaptypes (i.e. with type_specs) with protocols 0 and 1 +// even in more recent versions. +// We are applying this to all Python versions (hence the commented out version guard) +// to make the behaviour explicit. +// #if PY_VERSION_HEX < 0x03060000 || CYTHON_USE_TYPE_SPECS +static PyObject *__Pyx_CoroutineAwait_reduce_ex(__pyx_CoroutineAwaitObject *self, PyObject *arg) { + CYTHON_UNUSED_VAR(arg); + PyErr_Format(PyExc_TypeError, "cannot pickle '%.200s' object", + Py_TYPE(self)->tp_name); + return NULL; +} +// #endif + +static PyMethodDef __pyx_CoroutineAwait_methods[] = { + {"send", (PyCFunction) __Pyx_CoroutineAwait_Send, METH_O, + (char*) PyDoc_STR("send(arg) -> send 'arg' into coroutine,\nreturn next yielded value or raise StopIteration.")}, + {"throw", (PyCFunction) __Pyx_CoroutineAwait_Throw, METH_VARARGS, + (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in coroutine,\nreturn next yielded value or raise StopIteration.")}, + {"close", (PyCFunction) __Pyx_CoroutineAwait_Close, METH_NOARGS, + (char*) PyDoc_STR("close() -> raise GeneratorExit inside coroutine.")}, +// only needed with type-specs or version<3.6, but included in all versions for clarity +// #if PY_VERSION_HEX < 0x03060000 || CYTHON_USE_TYPE_SPECS + {"__reduce_ex__", (PyCFunction) __Pyx_CoroutineAwait_reduce_ex, METH_O, 0}, + {"__reduce__", (PyCFunction) __Pyx_CoroutineAwait_reduce_ex, METH_NOARGS, 0}, +// #endif + {0, 0, 0, 0} +}; + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_CoroutineAwaitType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_CoroutineAwait_dealloc}, + {Py_tp_traverse, (void *)__Pyx_CoroutineAwait_traverse}, + {Py_tp_clear, (void *)__Pyx_CoroutineAwait_clear}, +#if !CYTHON_COMPILING_IN_PYPY + {Py_tp_new, (void *)__Pyx_CoroutineAwait_no_new}, +#endif + {Py_tp_methods, (void *)__pyx_CoroutineAwait_methods}, + {Py_tp_iter, (void *)__Pyx_CoroutineAwait_self}, + {Py_tp_iternext, (void *)__Pyx_CoroutineAwait_Next}, + {0, 0}, +}; + +static PyType_Spec __pyx_CoroutineAwaitType_spec = { + __PYX_TYPE_MODULE_PREFIX "coroutine_wrapper", + sizeof(__pyx_CoroutineAwaitObject), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + __pyx_CoroutineAwaitType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +static PyTypeObject __pyx_CoroutineAwaitType_type = { + PyVarObject_HEAD_INIT(0, 0) + __PYX_TYPE_MODULE_PREFIX "coroutine_wrapper", /*tp_name*/ + sizeof(__pyx_CoroutineAwaitObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor) __Pyx_CoroutineAwait_dealloc,/*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_as_async resp. tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + PyDoc_STR("A wrapper object implementing __await__ for coroutines."), /*tp_doc*/ + (traverseproc) __Pyx_CoroutineAwait_traverse, /*tp_traverse*/ + (inquiry) __Pyx_CoroutineAwait_clear, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + __Pyx_CoroutineAwait_self, /*tp_iter*/ + (iternextfunc) __Pyx_CoroutineAwait_Next, /*tp_iternext*/ + __pyx_CoroutineAwait_methods, /*tp_methods*/ + 0 , /*tp_members*/ + 0 , /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ +#if !CYTHON_COMPILING_IN_PYPY + __Pyx_CoroutineAwait_no_new, /*tp_new*/ +#else + 0, /*tp_new*/ +#endif + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + +#if PY_VERSION_HEX < 0x030500B1 || defined(__Pyx_IterableCoroutine_USED) || CYTHON_USE_ASYNC_SLOTS +static CYTHON_INLINE PyObject *__Pyx__Coroutine_await(PyObject *coroutine) { + __pyx_CoroutineAwaitObject *await = PyObject_GC_New(__pyx_CoroutineAwaitObject, __pyx_CoroutineAwaitType); + if (unlikely(!await)) return NULL; + Py_INCREF(coroutine); + await->coroutine = coroutine; + PyObject_GC_Track(await); + return (PyObject*)await; +} +#endif + +#if PY_VERSION_HEX < 0x030500B1 +static PyObject *__Pyx_Coroutine_await_method(PyObject *coroutine, PyObject *arg) { + CYTHON_UNUSED_VAR(arg); + return __Pyx__Coroutine_await(coroutine); +} +#endif + +#if defined(__Pyx_IterableCoroutine_USED) || CYTHON_USE_ASYNC_SLOTS +static PyObject *__Pyx_Coroutine_await(PyObject *coroutine) { + if (unlikely(!coroutine || !__Pyx_Coroutine_Check(coroutine))) { + PyErr_SetString(PyExc_TypeError, "invalid input, expected coroutine"); + return NULL; + } + return __Pyx__Coroutine_await(coroutine); +} +#endif + +#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 +static PyObject *__Pyx_Coroutine_compare(PyObject *obj, PyObject *other, int op) { + PyObject* result; + switch (op) { + case Py_EQ: result = (other == obj) ? Py_True : Py_False; break; + case Py_NE: result = (other != obj) ? Py_True : Py_False; break; + default: + result = Py_NotImplemented; + } + Py_INCREF(result); + return result; +} +#endif + +static PyMethodDef __pyx_Coroutine_methods[] = { + {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, + (char*) PyDoc_STR("send(arg) -> send 'arg' into coroutine,\nreturn next iterated value or raise StopIteration.")}, + {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, + (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in coroutine,\nreturn next iterated value or raise StopIteration.")}, + {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, + (char*) PyDoc_STR("close() -> raise GeneratorExit inside coroutine.")}, +#if PY_VERSION_HEX < 0x030500B1 + {"__await__", (PyCFunction) __Pyx_Coroutine_await_method, METH_NOARGS, + (char*) PyDoc_STR("__await__() -> return an iterator to be used in await expression.")}, +#endif + {0, 0, 0, 0} +}; + +static PyMemberDef __pyx_Coroutine_memberlist[] = { + {(char *) "cr_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, + {(char*) "cr_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, + (char*) PyDoc_STR("object being awaited, or None")}, + {(char*) "cr_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL}, + {(char *) "__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), 0, 0}, +#if CYTHON_USE_TYPE_SPECS + {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CoroutineObject, gi_weakreflist), READONLY, 0}, +#endif + {0, 0, 0, 0, 0} +}; + +static PyGetSetDef __pyx_Coroutine_getsets[] = { + {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, + (char*) PyDoc_STR("name of the coroutine"), 0}, + {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, + (char*) PyDoc_STR("qualified name of the coroutine"), 0}, + {(char *) "cr_frame", (getter)__Pyx_Coroutine_get_frame, NULL, + (char*) PyDoc_STR("Frame of the coroutine"), 0}, + {0, 0, 0, 0, 0} +}; + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_CoroutineType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_Coroutine_dealloc}, + {Py_am_await, (void *)&__Pyx_Coroutine_await}, + {Py_tp_traverse, (void *)__Pyx_Coroutine_traverse}, + {Py_tp_methods, (void *)__pyx_Coroutine_methods}, + {Py_tp_members, (void *)__pyx_Coroutine_memberlist}, + {Py_tp_getset, (void *)__pyx_Coroutine_getsets}, + {Py_tp_getattro, (void *) __Pyx_PyObject_GenericGetAttrNoDict}, +#if CYTHON_USE_TP_FINALIZE + {Py_tp_finalize, (void *)__Pyx_Coroutine_del}, +#endif + {0, 0}, +}; + +static PyType_Spec __pyx_CoroutineType_spec = { + __PYX_TYPE_MODULE_PREFIX "coroutine", + sizeof(__pyx_CoroutineObject), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + __pyx_CoroutineType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +#if CYTHON_USE_ASYNC_SLOTS +static __Pyx_PyAsyncMethodsStruct __pyx_Coroutine_as_async = { + __Pyx_Coroutine_await, /*am_await*/ + 0, /*am_aiter*/ + 0, /*am_anext*/ +#if PY_VERSION_HEX >= 0x030A00A3 + 0, /*am_send*/ +#endif +}; +#endif + +static PyTypeObject __pyx_CoroutineType_type = { + PyVarObject_HEAD_INIT(0, 0) + __PYX_TYPE_MODULE_PREFIX "coroutine", /*tp_name*/ + sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ +#if CYTHON_USE_ASYNC_SLOTS + &__pyx_Coroutine_as_async, /*tp_as_async (tp_reserved) - Py3 only! */ +#else + 0, /*tp_reserved*/ +#endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + 0, /*tp_doc*/ + (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ + 0, /*tp_clear*/ +#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 + // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare + __Pyx_Coroutine_compare, /*tp_richcompare*/ +#else + 0, /*tp_richcompare*/ +#endif + offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ + // no tp_iter() as iterator is only available through __await__() + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_Coroutine_methods, /*tp_methods*/ + __pyx_Coroutine_memberlist, /*tp_members*/ + __pyx_Coroutine_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ +#if CYTHON_USE_TP_FINALIZE + 0, /*tp_del*/ +#else + __Pyx_Coroutine_del, /*tp_del*/ +#endif + 0, /*tp_version_tag*/ +#if CYTHON_USE_TP_FINALIZE + __Pyx_Coroutine_del, /*tp_finalize*/ +#elif PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + +static int __pyx_Coroutine_init(PyObject *module) { + CYTHON_MAYBE_UNUSED_VAR(module); + // on Windows, C-API functions can't be used in slots statically +#if CYTHON_USE_TYPE_SPECS + __pyx_CoroutineType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_CoroutineType_spec, NULL); +#else + __pyx_CoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; + __pyx_CoroutineType = __Pyx_FetchCommonType(&__pyx_CoroutineType_type); +#endif + if (unlikely(!__pyx_CoroutineType)) + return -1; + +#ifdef __Pyx_IterableCoroutine_USED + if (unlikely(__pyx_IterableCoroutine_init(module) == -1)) + return -1; +#endif + +#if CYTHON_USE_TYPE_SPECS + __pyx_CoroutineAwaitType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_CoroutineAwaitType_spec, NULL); +#else + __pyx_CoroutineAwaitType = __Pyx_FetchCommonType(&__pyx_CoroutineAwaitType_type); +#endif + if (unlikely(!__pyx_CoroutineAwaitType)) + return -1; + return 0; +} + + +//////////////////// IterableCoroutine.proto //////////////////// + +#define __Pyx_IterableCoroutine_USED + +#undef __Pyx_Coroutine_Check +#define __Pyx_Coroutine_Check(obj) (__Pyx_Coroutine_CheckExact(obj) || __Pyx_IS_TYPE(obj, __pyx_IterableCoroutineType)) + +#define __Pyx_IterableCoroutine_New(body, code, closure, name, qualname, module_name) \ + __Pyx__Coroutine_New(__pyx_IterableCoroutineType, body, code, closure, name, qualname, module_name) + +static int __pyx_IterableCoroutine_init(PyObject *module);/*proto*/ + + +//////////////////// IterableCoroutine //////////////////// +//@requires: Coroutine +//@requires: CommonStructures.c::FetchCommonType + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_IterableCoroutineType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_Coroutine_dealloc}, + {Py_am_await, (void *)&__Pyx_Coroutine_await}, + {Py_tp_traverse, (void *)__Pyx_Coroutine_traverse}, + {Py_tp_iter, (void *)__Pyx_Coroutine_await}, + {Py_tp_iternext, (void *)__Pyx_Generator_Next}, + {Py_tp_methods, (void *)__pyx_Coroutine_methods}, + {Py_tp_members, (void *)__pyx_Coroutine_memberlist}, + {Py_tp_getset, (void *)__pyx_Coroutine_getsets}, + {Py_tp_getattro, (void *) __Pyx_PyObject_GenericGetAttrNoDict}, +#if CYTHON_USE_TP_FINALIZE + {Py_tp_finalize, (void *)__Pyx_Coroutine_del}, +#endif + {0, 0}, +}; + +static PyType_Spec __pyx_IterableCoroutineType_spec = { + __PYX_TYPE_MODULE_PREFIX "iterable_coroutine", + sizeof(__pyx_CoroutineObject), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + __pyx_IterableCoroutineType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +static PyTypeObject __pyx_IterableCoroutineType_type = { + PyVarObject_HEAD_INIT(0, 0) + __PYX_TYPE_MODULE_PREFIX "iterable_coroutine", /*tp_name*/ + sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ +#if CYTHON_USE_ASYNC_SLOTS + &__pyx_Coroutine_as_async, /*tp_as_async (tp_reserved) - Py3 only! */ +#else + 0, /*tp_reserved*/ +#endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + 0, /*tp_doc*/ + (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ + 0, /*tp_clear*/ +#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1 + // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare + __Pyx_Coroutine_compare, /*tp_richcompare*/ +#else + 0, /*tp_richcompare*/ +#endif + offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ + // enable iteration for legacy support of asyncio yield-from protocol + __Pyx_Coroutine_await, /*tp_iter*/ + (iternextfunc) __Pyx_Generator_Next, /*tp_iternext*/ + __pyx_Coroutine_methods, /*tp_methods*/ + __pyx_Coroutine_memberlist, /*tp_members*/ + __pyx_Coroutine_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_del*/ +#else + __Pyx_Coroutine_del, /*tp_del*/ +#endif + 0, /*tp_version_tag*/ +#if PY_VERSION_HEX >= 0x030400a1 && !CYTHON_COMPILING_IN_PYPY + __Pyx_Coroutine_del, /*tp_finalize*/ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + + +static int __pyx_IterableCoroutine_init(PyObject *module) { +#if CYTHON_USE_TYPE_SPECS + __pyx_IterableCoroutineType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_IterableCoroutineType_spec, NULL); +#else + CYTHON_UNUSED_VAR(module); + __pyx_IterableCoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; + __pyx_IterableCoroutineType = __Pyx_FetchCommonType(&__pyx_IterableCoroutineType_type); +#endif + if (unlikely(!__pyx_IterableCoroutineType)) + return -1; + return 0; +} + + +//////////////////// Generator //////////////////// +//@requires: CoroutineBase +//@requires: PatchGeneratorABC +//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict + +static PyMethodDef __pyx_Generator_methods[] = { + {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, + (char*) PyDoc_STR("send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration.")}, + {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, + (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.")}, + {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, + (char*) PyDoc_STR("close() -> raise GeneratorExit inside generator.")}, + {0, 0, 0, 0} +}; + +static PyMemberDef __pyx_Generator_memberlist[] = { + {(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, + {(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, + (char*) PyDoc_STR("object being iterated by 'yield from', or None")}, + {(char*) "gi_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL}, + {(char *) "__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), 0, 0}, +#if CYTHON_USE_TYPE_SPECS + {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CoroutineObject, gi_weakreflist), READONLY, 0}, +#endif + {0, 0, 0, 0, 0} +}; + +static PyGetSetDef __pyx_Generator_getsets[] = { + {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, + (char*) PyDoc_STR("name of the generator"), 0}, + {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, + (char*) PyDoc_STR("qualified name of the generator"), 0}, + {(char *) "gi_frame", (getter)__Pyx_Coroutine_get_frame, NULL, + (char*) PyDoc_STR("Frame of the generator"), 0}, + {0, 0, 0, 0, 0} +}; + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_GeneratorType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_Coroutine_dealloc}, + {Py_tp_traverse, (void *)__Pyx_Coroutine_traverse}, + {Py_tp_iter, (void *)PyObject_SelfIter}, + {Py_tp_iternext, (void *)__Pyx_Generator_Next}, + {Py_tp_methods, (void *)__pyx_Generator_methods}, + {Py_tp_members, (void *)__pyx_Generator_memberlist}, + {Py_tp_getset, (void *)__pyx_Generator_getsets}, + {Py_tp_getattro, (void *) __Pyx_PyObject_GenericGetAttrNoDict}, +#if CYTHON_USE_TP_FINALIZE + {Py_tp_finalize, (void *)__Pyx_Coroutine_del}, +#endif + {0, 0}, +}; + +static PyType_Spec __pyx_GeneratorType_spec = { + __PYX_TYPE_MODULE_PREFIX "generator", + sizeof(__pyx_CoroutineObject), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + __pyx_GeneratorType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +static PyTypeObject __pyx_GeneratorType_type = { + PyVarObject_HEAD_INIT(0, 0) + __PYX_TYPE_MODULE_PREFIX "generator", /*tp_name*/ + sizeof(__pyx_CoroutineObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_as_async*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/ + 0, /*tp_doc*/ + (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + (iternextfunc) __Pyx_Generator_Next, /*tp_iternext*/ + __pyx_Generator_methods, /*tp_methods*/ + __pyx_Generator_memberlist, /*tp_members*/ + __pyx_Generator_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ +#if CYTHON_USE_TP_FINALIZE + 0, /*tp_del*/ +#else + __Pyx_Coroutine_del, /*tp_del*/ +#endif + 0, /*tp_version_tag*/ +#if CYTHON_USE_TP_FINALIZE + __Pyx_Coroutine_del, /*tp_finalize*/ +#elif PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + +static int __pyx_Generator_init(PyObject *module) { +#if CYTHON_USE_TYPE_SPECS + __pyx_GeneratorType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_GeneratorType_spec, NULL); +#else + CYTHON_UNUSED_VAR(module); + // on Windows, C-API functions can't be used in slots statically + __pyx_GeneratorType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; + __pyx_GeneratorType_type.tp_iter = PyObject_SelfIter; + __pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type); +#endif + if (unlikely(!__pyx_GeneratorType)) { + return -1; + } + return 0; +} + + +/////////////// ReturnWithStopIteration.proto /////////////// + +#define __Pyx_ReturnWithStopIteration(value) \ + if (value == Py_None) PyErr_SetNone(PyExc_StopIteration); else __Pyx__ReturnWithStopIteration(value) +static void __Pyx__ReturnWithStopIteration(PyObject* value); /*proto*/ + +/////////////// ReturnWithStopIteration /////////////// +//@requires: Exceptions.c::PyErrFetchRestore +//@requires: Exceptions.c::PyThreadStateGet +//@substitute: naming + +// 1) Instantiating an exception just to pass back a value is costly. +// 2) CPython 3.12 cannot separate exception type and value +// 3) Passing a tuple as value into PyErr_SetObject() passes its items on as arguments. +// 4) Passing an exception as value will interpret it as an exception on unpacking and raise it (or unpack its value). +// 5) If there is currently an exception being handled, we need to chain it. + +static void __Pyx__ReturnWithStopIteration(PyObject* value) { + PyObject *exc, *args; +#if CYTHON_COMPILING_IN_CPYTHON + __Pyx_PyThreadState_declare + if (PY_VERSION_HEX >= 0x030C00A6 + || unlikely(PyTuple_Check(value) || PyExceptionInstance_Check(value))) { + args = PyTuple_New(1); + if (unlikely(!args)) return; + Py_INCREF(value); + PyTuple_SET_ITEM(args, 0, value); + exc = PyType_Type.tp_call(PyExc_StopIteration, args, NULL); + Py_DECREF(args); + if (!exc) return; + } else { + // it's safe to avoid instantiating the exception + Py_INCREF(value); + exc = value; + } + #if CYTHON_FAST_THREAD_STATE + __Pyx_PyThreadState_assign + #if CYTHON_USE_EXC_INFO_STACK + if (!$local_tstate_cname->exc_info->exc_value) + #else + if (!$local_tstate_cname->exc_type) + #endif + { + // no chaining needed => avoid the overhead in PyErr_SetObject() + Py_INCREF(PyExc_StopIteration); + __Pyx_ErrRestore(PyExc_StopIteration, exc, NULL); + return; + } + #endif +#else + args = PyTuple_Pack(1, value); + if (unlikely(!args)) return; + exc = PyObject_Call(PyExc_StopIteration, args, NULL); + Py_DECREF(args); + if (unlikely(!exc)) return; +#endif + PyErr_SetObject(PyExc_StopIteration, exc); + Py_DECREF(exc); +} + + +//////////////////// PatchModuleWithCoroutine.proto //////////////////// + +static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code); /*proto*/ + +//////////////////// PatchModuleWithCoroutine //////////////////// +//@substitute: naming + +static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code) { +#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + int result; + PyObject *globals, *result_obj; + globals = PyDict_New(); if (unlikely(!globals)) goto ignore; + result = PyDict_SetItemString(globals, "_cython_coroutine_type", + #ifdef __Pyx_Coroutine_USED + (PyObject*)__pyx_CoroutineType); + #else + Py_None); + #endif + if (unlikely(result < 0)) goto ignore; + result = PyDict_SetItemString(globals, "_cython_generator_type", + #ifdef __Pyx_Generator_USED + (PyObject*)__pyx_GeneratorType); + #else + Py_None); + #endif + if (unlikely(result < 0)) goto ignore; + if (unlikely(PyDict_SetItemString(globals, "_module", module) < 0)) goto ignore; + if (unlikely(PyDict_SetItemString(globals, "__builtins__", $builtins_cname) < 0)) goto ignore; + result_obj = PyRun_String(py_code, Py_file_input, globals, globals); + if (unlikely(!result_obj)) goto ignore; + Py_DECREF(result_obj); + Py_DECREF(globals); + return module; + +ignore: + Py_XDECREF(globals); + PyErr_WriteUnraisable(module); + if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch module with custom type", 1) < 0)) { + Py_DECREF(module); + module = NULL; + } +#else + // avoid "unused" warning + py_code++; +#endif + return module; +} + + +//////////////////// PatchGeneratorABC.proto //////////////////// + +// register with Generator/Coroutine ABCs in 'collections.abc' +// see https://bugs.python.org/issue24018 +static int __Pyx_patch_abc(void); /*proto*/ + +//////////////////// PatchGeneratorABC //////////////////// +//@requires: PatchModuleWithCoroutine + +#ifndef CYTHON_REGISTER_ABCS +#define CYTHON_REGISTER_ABCS 1 +#endif + +#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) +static PyObject* __Pyx_patch_abc_module(PyObject *module); /*proto*/ +static PyObject* __Pyx_patch_abc_module(PyObject *module) { + module = __Pyx_Coroutine_patch_module( + module, CSTRING("""\ +if _cython_generator_type is not None: + try: Generator = _module.Generator + except AttributeError: pass + else: Generator.register(_cython_generator_type) +if _cython_coroutine_type is not None: + try: Coroutine = _module.Coroutine + except AttributeError: pass + else: Coroutine.register(_cython_coroutine_type) +""") + ); + return module; +} +#endif + +static int __Pyx_patch_abc(void) { +#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + static int abc_patched = 0; + if (CYTHON_REGISTER_ABCS && !abc_patched) { + PyObject *module; + module = PyImport_ImportModule((PY_MAJOR_VERSION >= 3) ? "collections.abc" : "collections"); + if (unlikely(!module)) { + PyErr_WriteUnraisable(NULL); + if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, + ((PY_MAJOR_VERSION >= 3) ? + "Cython module failed to register with collections.abc module" : + "Cython module failed to register with collections module"), 1) < 0)) { + return -1; + } + } else { + module = __Pyx_patch_abc_module(module); + abc_patched = 1; + if (unlikely(!module)) + return -1; + Py_DECREF(module); + } + // also register with "backports_abc" module if available, just in case + module = PyImport_ImportModule("backports_abc"); + if (module) { + module = __Pyx_patch_abc_module(module); + Py_XDECREF(module); + } + if (!module) { + PyErr_Clear(); + } + } +#else + // avoid "unused" warning for __Pyx_Coroutine_patch_module() + if ((0)) __Pyx_Coroutine_patch_module(NULL, NULL); +#endif + return 0; +} + + +//////////////////// PatchAsyncIO.proto //////////////////// + +// run after importing "asyncio" to patch Cython generator support into it +static PyObject* __Pyx_patch_asyncio(PyObject* module); /*proto*/ + +//////////////////// PatchAsyncIO //////////////////// +//@requires: ImportExport.c::Import +//@requires: PatchModuleWithCoroutine +//@requires: PatchInspect + +static PyObject* __Pyx_patch_asyncio(PyObject* module) { +#if PY_VERSION_HEX < 0x030500B2 && \ + (defined(__Pyx_Coroutine_USED) || defined(__Pyx_Generator_USED)) && \ + (!defined(CYTHON_PATCH_ASYNCIO) || CYTHON_PATCH_ASYNCIO) + PyObject *patch_module = NULL; + static int asyncio_patched = 0; + if (unlikely((!asyncio_patched) && module)) { + PyObject *package; + package = __Pyx_Import(PYIDENT("asyncio.coroutines"), NULL, 0); + if (package) { + patch_module = __Pyx_Coroutine_patch_module( + PyObject_GetAttrString(package, "coroutines"), CSTRING("""\ +try: + coro_types = _module._COROUTINE_TYPES +except AttributeError: pass +else: + if _cython_coroutine_type is not None and _cython_coroutine_type not in coro_types: + coro_types = tuple(coro_types) + (_cython_coroutine_type,) + if _cython_generator_type is not None and _cython_generator_type not in coro_types: + coro_types = tuple(coro_types) + (_cython_generator_type,) +_module._COROUTINE_TYPES = coro_types +""") + ); + } else { + PyErr_Clear(); +// Always enable fallback: even if we compile against 3.4.2, we might be running on 3.4.1 at some point. +//#if PY_VERSION_HEX < 0x03040200 + // Py3.4.1 used to have asyncio.tasks instead of asyncio.coroutines + package = __Pyx_Import(PYIDENT("asyncio.tasks"), NULL, 0); + if (unlikely(!package)) goto asyncio_done; + patch_module = __Pyx_Coroutine_patch_module( + PyObject_GetAttrString(package, "tasks"), CSTRING("""\ +if hasattr(_module, 'iscoroutine'): + old_types = getattr(_module.iscoroutine, '_cython_coroutine_types', None) + if old_types is None or not isinstance(old_types, set): + old_types = set() + def cy_wrap(orig_func, type=type, cython_coroutine_types=old_types): + def cy_iscoroutine(obj): return type(obj) in cython_coroutine_types or orig_func(obj) + cy_iscoroutine._cython_coroutine_types = cython_coroutine_types + return cy_iscoroutine + _module.iscoroutine = cy_wrap(_module.iscoroutine) + if _cython_coroutine_type is not None: + old_types.add(_cython_coroutine_type) + if _cython_generator_type is not None: + old_types.add(_cython_generator_type) +""") + ); +//#endif +// Py < 0x03040200 + } + Py_DECREF(package); + if (unlikely(!patch_module)) goto ignore; +//#if PY_VERSION_HEX < 0x03040200 +asyncio_done: + PyErr_Clear(); +//#endif + asyncio_patched = 1; +#ifdef __Pyx_Generator_USED + // now patch inspect.isgenerator() by looking up the imported module in the patched asyncio module + { + PyObject *inspect_module; + if (patch_module) { + inspect_module = PyObject_GetAttr(patch_module, PYIDENT("inspect")); + Py_DECREF(patch_module); + } else { + inspect_module = __Pyx_Import(PYIDENT("inspect"), NULL, 0); + } + if (unlikely(!inspect_module)) goto ignore; + inspect_module = __Pyx_patch_inspect(inspect_module); + if (unlikely(!inspect_module)) { + Py_DECREF(module); + module = NULL; + } + Py_XDECREF(inspect_module); + } +#else + // avoid "unused" warning for __Pyx_patch_inspect() + if ((0)) return __Pyx_patch_inspect(module); +#endif + } + return module; +ignore: + PyErr_WriteUnraisable(module); + if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch asyncio package with custom generator type", 1) < 0)) { + Py_DECREF(module); + module = NULL; + } +#else + // avoid "unused" warning for __Pyx_Coroutine_patch_module() + if ((0)) return __Pyx_patch_inspect(__Pyx_Coroutine_patch_module(module, NULL)); +#endif + return module; +} + + +//////////////////// PatchInspect.proto //////////////////// + +// run after importing "inspect" to patch Cython generator support into it +static PyObject* __Pyx_patch_inspect(PyObject* module); /*proto*/ + +//////////////////// PatchInspect //////////////////// +//@requires: PatchModuleWithCoroutine + +static PyObject* __Pyx_patch_inspect(PyObject* module) { +#if defined(__Pyx_Generator_USED) && (!defined(CYTHON_PATCH_INSPECT) || CYTHON_PATCH_INSPECT) + static int inspect_patched = 0; + if (unlikely((!inspect_patched) && module)) { + module = __Pyx_Coroutine_patch_module( + module, CSTRING("""\ +old_types = getattr(_module.isgenerator, '_cython_generator_types', None) +if old_types is None or not isinstance(old_types, set): + old_types = set() + def cy_wrap(orig_func, type=type, cython_generator_types=old_types): + def cy_isgenerator(obj): return type(obj) in cython_generator_types or orig_func(obj) + cy_isgenerator._cython_generator_types = cython_generator_types + return cy_isgenerator + _module.isgenerator = cy_wrap(_module.isgenerator) +old_types.add(_cython_generator_type) +""") + ); + inspect_patched = 1; + } +#else + // avoid "unused" warning for __Pyx_Coroutine_patch_module() + if ((0)) return __Pyx_Coroutine_patch_module(module, NULL); +#endif + return module; +} + + +//////////////////// StopAsyncIteration.proto //////////////////// + +#define __Pyx_StopAsyncIteration_USED +static PyObject *__Pyx_PyExc_StopAsyncIteration; +static int __pyx_StopAsyncIteration_init(PyObject *module); /*proto*/ + +//////////////////// StopAsyncIteration //////////////////// + +#if PY_VERSION_HEX < 0x030500B1 +#if CYTHON_USE_TYPE_SPECS +#error Using async coroutines with type specs requires Python 3.5 or later. +#else + +static PyTypeObject __Pyx__PyExc_StopAsyncIteration_type = { + PyVarObject_HEAD_INIT(0, 0) + "StopAsyncIteration", /*tp_name*/ + sizeof(PyBaseExceptionObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + 0, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare / reserved*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + PyDoc_STR("Signal the end from iterator.__anext__()."), /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM+0 >= 0x06000000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif +#endif + +static int __pyx_StopAsyncIteration_init(PyObject *module) { + CYTHON_UNUSED_VAR(module); +#if PY_VERSION_HEX >= 0x030500B1 + __Pyx_PyExc_StopAsyncIteration = PyExc_StopAsyncIteration; +#else + PyObject *builtins = PyEval_GetBuiltins(); + if (likely(builtins)) { + PyObject *exc = PyMapping_GetItemString(builtins, (char*) "StopAsyncIteration"); + if (exc) { + __Pyx_PyExc_StopAsyncIteration = exc; + return 0; + } + } + PyErr_Clear(); + + __Pyx__PyExc_StopAsyncIteration_type.tp_traverse = ((PyTypeObject*)PyExc_BaseException)->tp_traverse; + __Pyx__PyExc_StopAsyncIteration_type.tp_clear = ((PyTypeObject*)PyExc_BaseException)->tp_clear; + __Pyx__PyExc_StopAsyncIteration_type.tp_dictoffset = ((PyTypeObject*)PyExc_BaseException)->tp_dictoffset; + __Pyx__PyExc_StopAsyncIteration_type.tp_base = (PyTypeObject*)PyExc_Exception; + + __Pyx_PyExc_StopAsyncIteration = (PyObject*) __Pyx_FetchCommonType(&__Pyx__PyExc_StopAsyncIteration_type); + if (unlikely(!__Pyx_PyExc_StopAsyncIteration)) + return -1; + if (likely(builtins) && unlikely(PyMapping_SetItemString(builtins, (char*) "StopAsyncIteration", __Pyx_PyExc_StopAsyncIteration) < 0)) + return -1; +#endif + return 0; +} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CpdefEnums.pyx b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CpdefEnums.pyx new file mode 100644 index 0000000000000000000000000000000000000000..39377b30ccedaf6c57e3162b4af8a944fdd50270 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CpdefEnums.pyx @@ -0,0 +1,164 @@ +#################### EnumBase #################### + +cimport cython + +cdef extern from *: + int PY_VERSION_HEX + +cdef object __Pyx_OrderedDict + +if PY_VERSION_HEX >= 0x03060000: + __Pyx_OrderedDict = dict +else: + from collections import OrderedDict as __Pyx_OrderedDict + +@cython.internal +cdef class __Pyx_EnumMeta(type): + def __init__(cls, name, parents, dct): + type.__init__(cls, name, parents, dct) + cls.__members__ = __Pyx_OrderedDict() + def __iter__(cls): + return iter(cls.__members__.values()) + def __getitem__(cls, name): + return cls.__members__[name] + +# @cython.internal +cdef object __Pyx_EnumBase +class __Pyx_EnumBase(int, metaclass=__Pyx_EnumMeta): + def __new__(cls, value, name=None): + for v in cls: + if v == value: + return v + if name is None: + raise ValueError("Unknown enum value: '%s'" % value) + res = int.__new__(cls, value) + res.name = name + setattr(cls, name, res) + cls.__members__[name] = res + return res + def __repr__(self): + return "<%s.%s: %d>" % (self.__class__.__name__, self.name, self) + def __str__(self): + return "%s.%s" % (self.__class__.__name__, self.name) + +if PY_VERSION_HEX >= 0x03040000: + from enum import IntEnum as __Pyx_EnumBase + +cdef object __Pyx_FlagBase +class __Pyx_FlagBase(int, metaclass=__Pyx_EnumMeta): + def __new__(cls, value, name=None): + for v in cls: + if v == value: + return v + res = int.__new__(cls, value) + if name is None: + # some bitwise combination, no validation here + res.name = "" + else: + res.name = name + setattr(cls, name, res) + cls.__members__[name] = res + return res + def __repr__(self): + return "<%s.%s: %d>" % (self.__class__.__name__, self.name, self) + def __str__(self): + return "%s.%s" % (self.__class__.__name__, self.name) + +if PY_VERSION_HEX >= 0x03060000: + from enum import IntFlag as __Pyx_FlagBase + +#################### EnumType #################### +#@requires: EnumBase + +cdef extern from *: + object {{enum_to_pyint_func}}({{name}} value) + +cdef dict __Pyx_globals = globals() +if PY_VERSION_HEX >= 0x03060000: + # create new IntFlag() - the assumption is that C enums are sufficiently commonly + # used as flags that this is the most appropriate base class + {{name}} = __Pyx_FlagBase('{{name}}', [ + {{for item in items}} + ('{{item}}', {{enum_to_pyint_func}}({{item}})), + {{endfor}} + # Try to look up the module name dynamically if possible + ], module=__Pyx_globals.get("__module__", '{{static_modname}}')) + + if PY_VERSION_HEX >= 0x030B0000: + # Python 3.11 starts making the behaviour of flags stricter + # (only including powers of 2 when iterating). Since we're using + # "flag" because C enums *might* be used as flags, not because + # we want strict flag behaviour, manually undo some of this. + {{name}}._member_names_ = list({{name}}.__members__) + + {{if enum_doc is not None}} + {{name}}.__doc__ = {{ repr(enum_doc) }} + {{endif}} + + {{for item in items}} + __Pyx_globals['{{item}}'] = {{name}}.{{item}} + {{endfor}} +else: + class {{name}}(__Pyx_FlagBase): + {{ repr(enum_doc) if enum_doc is not None else 'pass' }} + {{for item in items}} + __Pyx_globals['{{item}}'] = {{name}}({{enum_to_pyint_func}}({{item}}), '{{item}}') + {{endfor}} + +#################### CppScopedEnumType #################### +#@requires: EnumBase +cdef dict __Pyx_globals = globals() + +if PY_VERSION_HEX >= 0x03040000: + __Pyx_globals["{{name}}"] = __Pyx_EnumBase('{{name}}', [ + {{for item in items}} + ('{{item}}', <{{underlying_type}}>({{name}}.{{item}})), + {{endfor}} + ], module=__Pyx_globals.get("__module__", '{{static_modname}}')) +else: + __Pyx_globals["{{name}}"] = type('{{name}}', (__Pyx_EnumBase,), {}) + {{for item in items}} + __Pyx_globals["{{name}}"](<{{underlying_type}}>({{name}}.{{item}}), '{{item}}') + {{endfor}} + +{{if enum_doc is not None}} +__Pyx_globals["{{name}}"].__doc__ = {{ repr(enum_doc) }} +{{endif}} + + +#################### EnumTypeToPy #################### + +@cname("{{funcname}}") +cdef {{funcname}}({{name}} c_val): + cdef object __pyx_enum + # There's a complication here: the Python enum wrapping is only generated + # for enums defined in the same module that they're used in. Therefore, if + # the enum was cimported from a different module, we try to import it. + # If that fails we return an int equivalent as the next best option. +{{if module_name}} + try: + from {{module_name}} import {{name}} as __pyx_enum + except ImportError: + import warnings + warnings.warn( + f"enum class {{name}} not importable from {{module_name}}. " + "You are probably using a cpdef enum declared in a .pxd file that " + "does not have a .py or .pyx file.") + return <{{underlying_type}}>c_val +{{else}} + __pyx_enum = {{name}} +{{endif}} + # TODO - Cython only manages to optimize C enums to a switch currently + if 0: + pass +{{for item in items}} + elif c_val == {{name}}.{{item}}: + return __pyx_enum.{{item}} +{{endfor}} + else: + underlying_c_val = <{{underlying_type}}>c_val +{{if is_flag}} + return __pyx_enum(underlying_c_val) +{{else}} + raise ValueError(f"{underlying_c_val} is not a valid {{name}}") +{{endif}} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CppConvert.pyx b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CppConvert.pyx new file mode 100644 index 0000000000000000000000000000000000000000..0e7cf4e2dc40e6eb7c21848b862a12116a35c82f --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CppConvert.pyx @@ -0,0 +1,273 @@ +# TODO: Figure out how many of the pass-by-value copies the compiler can eliminate. + + +#################### string.from_py #################### + +cdef extern from *: + cdef cppclass string "{{type}}": + string() except + + string(char* c_str, size_t size) except + + cdef const char* __Pyx_PyObject_AsStringAndSize(object, Py_ssize_t*) except NULL + +@cname("{{cname}}") +cdef string {{cname}}(object o) except *: + cdef Py_ssize_t length = 0 + cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length) + return string(data, length) + + +#################### string.to_py #################### + +#cimport cython +#from libcpp.string cimport string +cdef extern from *: + cdef cppclass string "{{type}}": + char* data() + size_t size() + +{{for py_type in ['PyObject', 'PyUnicode', 'PyStr', 'PyBytes', 'PyByteArray']}} +cdef extern from *: + cdef object __Pyx_{{py_type}}_FromStringAndSize(const char*, size_t) + +@cname("{{cname.replace("PyObject", py_type, 1)}}") +cdef inline object {{cname.replace("PyObject", py_type, 1)}}(const string& s): + return __Pyx_{{py_type}}_FromStringAndSize(s.data(), s.size()) +{{endfor}} + + +#################### vector.from_py #################### + +cdef extern from *: + cdef cppclass vector "std::vector" [T]: + void push_back(T&) except + + +@cname("{{cname}}") +cdef vector[X] {{cname}}(object o) except *: + cdef vector[X] v + for item in o: + v.push_back(item) + return v + + +#################### vector.to_py #################### + +cdef extern from *: + cdef cppclass vector "std::vector" [T]: + size_t size() + T& operator[](size_t) + +cdef extern from "Python.h": + void Py_INCREF(object) + list PyList_New(Py_ssize_t size) + void PyList_SET_ITEM(object list, Py_ssize_t i, object o) + const Py_ssize_t PY_SSIZE_T_MAX + +@cname("{{cname}}") +cdef object {{cname}}(const vector[X]& v): + if v.size() > PY_SSIZE_T_MAX: + raise MemoryError() + v_size_signed = v.size() + + o = PyList_New(v_size_signed) + + cdef Py_ssize_t i + cdef object item + + for i in range(v_size_signed): + item = v[i] + Py_INCREF(item) + PyList_SET_ITEM(o, i, item) + + return o + +#################### list.from_py #################### + +cdef extern from *: + cdef cppclass cpp_list "std::list" [T]: + void push_back(T&) except + + +@cname("{{cname}}") +cdef cpp_list[X] {{cname}}(object o) except *: + cdef cpp_list[X] l + for item in o: + l.push_back(item) + return l + + +#################### list.to_py #################### + +cimport cython + +cdef extern from *: + cdef cppclass cpp_list "std::list" [T]: + cppclass const_iterator: + T& operator*() + const_iterator operator++() + bint operator!=(const_iterator) + const_iterator begin() + const_iterator end() + size_t size() + +cdef extern from "Python.h": + void Py_INCREF(object) + list PyList_New(Py_ssize_t size) + void PyList_SET_ITEM(object list, Py_ssize_t i, object o) + cdef Py_ssize_t PY_SSIZE_T_MAX + +@cname("{{cname}}") +cdef object {{cname}}(const cpp_list[X]& v): + if v.size() > PY_SSIZE_T_MAX: + raise MemoryError() + + o = PyList_New( v.size()) + + cdef object item + cdef Py_ssize_t i = 0 + cdef cpp_list[X].const_iterator iter = v.begin() + + while iter != v.end(): + item = cython.operator.dereference(iter) + Py_INCREF(item) + PyList_SET_ITEM(o, i, item) + cython.operator.preincrement(iter) + i += 1 + + return o + + +#################### set.from_py #################### + +cdef extern from *: + cdef cppclass set "std::{{maybe_unordered}}set" [T]: + void insert(T&) except + + +@cname("{{cname}}") +cdef set[X] {{cname}}(object o) except *: + cdef set[X] s + for item in o: + s.insert(item) + return s + + +#################### set.to_py #################### + +cimport cython + +cdef extern from *: + cdef cppclass cpp_set "std::{{maybe_unordered}}set" [T]: + cppclass const_iterator: + T& operator*() + const_iterator operator++() + bint operator!=(const_iterator) + const_iterator begin() + const_iterator end() + +@cname("{{cname}}") +cdef object {{cname}}(const cpp_set[X]& s): + return {v for v in s} + +#################### pair.from_py #################### + +cdef extern from *: + cdef cppclass pair "std::pair" [T, U]: + pair() except + + pair(T&, U&) except + + +@cname("{{cname}}") +cdef pair[X,Y] {{cname}}(object o) except *: + x, y = o + return pair[X,Y](x, y) + + +#################### pair.to_py #################### + +cdef extern from *: + cdef cppclass pair "std::pair" [T, U]: + T first + U second + +@cname("{{cname}}") +cdef object {{cname}}(const pair[X,Y]& p): + return p.first, p.second + + +#################### map.from_py #################### + +cdef extern from *: + cdef cppclass pair "std::pair" [T, U]: + pair(T&, U&) except + + cdef cppclass map "std::{{maybe_unordered}}map" [T, U]: + void insert(pair[T, U]&) except + + cdef cppclass vector "std::vector" [T]: + pass + int PY_MAJOR_VERSION + + +@cname("{{cname}}") +cdef map[X,Y] {{cname}}(object o) except *: + cdef map[X,Y] m + if PY_MAJOR_VERSION < 3: + for key, value in o.iteritems(): + m.insert(pair[X,Y](key, value)) + else: + for key, value in o.items(): + m.insert(pair[X,Y](key, value)) + return m + + +#################### map.to_py #################### +# TODO: Work out const so that this can take a const +# reference rather than pass by value. + +cimport cython + +cdef extern from *: + cdef cppclass map "std::{{maybe_unordered}}map" [T, U]: + cppclass value_type: + T first + U second + cppclass const_iterator: + value_type& operator*() + const_iterator operator++() + bint operator!=(const_iterator) + const_iterator begin() + const_iterator end() + +@cname("{{cname}}") +cdef object {{cname}}(const map[X,Y]& s): + o = {} + cdef const map[X,Y].value_type *key_value + cdef map[X,Y].const_iterator iter = s.begin() + while iter != s.end(): + key_value = &cython.operator.dereference(iter) + o[key_value.first] = key_value.second + cython.operator.preincrement(iter) + return o + + +#################### complex.from_py #################### + +cdef extern from *: + cdef cppclass std_complex "std::complex" [T]: + std_complex() + std_complex(T, T) except + + +@cname("{{cname}}") +cdef std_complex[X] {{cname}}(object o) except *: + cdef double complex z = o + return std_complex[X](z.real, z.imag) + + +#################### complex.to_py #################### + +cdef extern from *: + cdef cppclass std_complex "std::complex" [T]: + X real() + X imag() + +@cname("{{cname}}") +cdef object {{cname}}(const std_complex[X]& z): + cdef double complex tmp + tmp.real = z.real() + tmp.imag = z.imag() + return tmp diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CppSupport.cpp b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CppSupport.cpp new file mode 100644 index 0000000000000000000000000000000000000000..03f37a22ba4804d2632de9f97b8a30af11deffdc --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CppSupport.cpp @@ -0,0 +1,133 @@ +/////////////// CppExceptionConversion.proto /////////////// + +#ifndef __Pyx_CppExn2PyErr +#include +#include +#include +#include + +static void __Pyx_CppExn2PyErr() { + // Catch a handful of different errors here and turn them into the + // equivalent Python errors. + try { + if (PyErr_Occurred()) + ; // let the latest Python exn pass through and ignore the current one + else + throw; + } catch (const std::bad_alloc& exn) { + PyErr_SetString(PyExc_MemoryError, exn.what()); + } catch (const std::bad_cast& exn) { + PyErr_SetString(PyExc_TypeError, exn.what()); + } catch (const std::bad_typeid& exn) { + PyErr_SetString(PyExc_TypeError, exn.what()); + } catch (const std::domain_error& exn) { + PyErr_SetString(PyExc_ValueError, exn.what()); + } catch (const std::invalid_argument& exn) { + PyErr_SetString(PyExc_ValueError, exn.what()); + } catch (const std::ios_base::failure& exn) { + // Unfortunately, in standard C++ we have no way of distinguishing EOF + // from other errors here; be careful with the exception mask + PyErr_SetString(PyExc_IOError, exn.what()); + } catch (const std::out_of_range& exn) { + // Change out_of_range to IndexError + PyErr_SetString(PyExc_IndexError, exn.what()); + } catch (const std::overflow_error& exn) { + PyErr_SetString(PyExc_OverflowError, exn.what()); + } catch (const std::range_error& exn) { + PyErr_SetString(PyExc_ArithmeticError, exn.what()); + } catch (const std::underflow_error& exn) { + PyErr_SetString(PyExc_ArithmeticError, exn.what()); + } catch (const std::exception& exn) { + PyErr_SetString(PyExc_RuntimeError, exn.what()); + } + catch (...) + { + PyErr_SetString(PyExc_RuntimeError, "Unknown exception"); + } +} +#endif + +/////////////// PythranConversion.proto /////////////// + +template +auto __Pyx_pythran_to_python(T &&value) -> decltype(to_python( + typename pythonic::returnable::type>::type>::type{std::forward(value)})) +{ + using returnable_type = typename pythonic::returnable::type>::type>::type; + return to_python(returnable_type{std::forward(value)}); +} + +#define __Pyx_PythranShapeAccessor(x) (pythonic::builtins::getattr(pythonic::types::attr::SHAPE{}, x)) + +////////////// MoveIfSupported.proto ////////////////// + +#if CYTHON_USE_CPP_STD_MOVE + #include + #define __PYX_STD_MOVE_IF_SUPPORTED(x) std::move(x) +#else + #define __PYX_STD_MOVE_IF_SUPPORTED(x) x +#endif + +////////////// EnumClassDecl.proto ////////////////// +//@proto_block: utility_code_proto_before_types + +#if defined (_MSC_VER) + #if _MSC_VER >= 1910 + #define __PYX_ENUM_CLASS_DECL enum + #else + #define __PYX_ENUM_CLASS_DECL + #endif +#else + #define __PYX_ENUM_CLASS_DECL enum +#endif + +////////////// OptionalLocals.proto //////////////// +//@proto_block: utility_code_proto_before_types + +#include +#if defined(CYTHON_USE_BOOST_OPTIONAL) + // fallback mode - std::optional is preferred but this gives + // people with a less up-to-date compiler a chance + #include + #define __Pyx_Optional_BaseType boost::optional +#else + #include + // since std::optional is a C++17 features, a templated using declaration should be safe + // (although it could be replaced with a define) + template + using __Pyx_Optional_BaseType = std::optional; +#endif + +// This class reuses as much of the implementation of std::optional as possible. +// The only place it differs significantly is the assignment operators, which use +// "emplace" (thus requiring move/copy constructors, but not move/copy +// assignment operators). This is preferred because it lets us work with assignable +// types (for example those with const members) +template +class __Pyx_Optional_Type : private __Pyx_Optional_BaseType { +public: + using __Pyx_Optional_BaseType::__Pyx_Optional_BaseType; + using __Pyx_Optional_BaseType::has_value; + using __Pyx_Optional_BaseType::operator*; + using __Pyx_Optional_BaseType::operator->; +#if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1600) + __Pyx_Optional_Type& operator=(const __Pyx_Optional_Type& rhs) { + this->emplace(*rhs); + return *this; + } + __Pyx_Optional_Type& operator=(__Pyx_Optional_Type&& rhs) { + this->emplace(std::move(*rhs)); + return *this; + } + template + __Pyx_Optional_Type& operator=(U&& rhs) { + this->emplace(std::forward(rhs)); + return *this; + } +#else + // Note - the "cpp_locals" feature is designed to require C++14. + // This pre-c++11 fallback is largely untested, and definitely won't work + // in all the cases that the more modern version does + using __Pyx_Optional_BaseType::operator=; // the chances are emplace can't work... +#endif +}; diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CythonFunction.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CythonFunction.c new file mode 100644 index 0000000000000000000000000000000000000000..2f8efc9e8b53fdffc1d68ebb47559b38ff80a657 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/CythonFunction.c @@ -0,0 +1,1804 @@ + +//////////////////// CythonFunctionShared.proto //////////////////// + +#define __Pyx_CyFunction_USED + +#define __Pyx_CYFUNCTION_STATICMETHOD 0x01 +#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 +#define __Pyx_CYFUNCTION_CCLASS 0x04 +#define __Pyx_CYFUNCTION_COROUTINE 0x08 + +#define __Pyx_CyFunction_GetClosure(f) \ + (((__pyx_CyFunctionObject *) (f))->func_closure) + +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_CyFunction_GetClassObj(f) \ + (((__pyx_CyFunctionObject *) (f))->func_classobj) +#else + #define __Pyx_CyFunction_GetClassObj(f) \ + ((PyObject*) ((PyCMethodObject *) (f))->mm_class) +#endif +#define __Pyx_CyFunction_SetClassObj(f, classobj) \ + __Pyx__CyFunction_SetClassObj((__pyx_CyFunctionObject *) (f), (classobj)) + +#define __Pyx_CyFunction_Defaults(type, f) \ + ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) +#define __Pyx_CyFunction_SetDefaultsGetter(f, g) \ + ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) + + +typedef struct { +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject_HEAD + // We can't "inherit" from func, but we can use it as a data store + PyObject *func; +#elif PY_VERSION_HEX < 0x030900B1 + PyCFunctionObject func; +#else + // PEP-573: PyCFunctionObject + mm_class + PyCMethodObject func; +#endif +#if CYTHON_BACKPORT_VECTORCALL + __pyx_vectorcallfunc func_vectorcall; +#endif +#if PY_VERSION_HEX < 0x030500A0 || CYTHON_COMPILING_IN_LIMITED_API + PyObject *func_weakreflist; +#endif + PyObject *func_dict; + PyObject *func_name; + PyObject *func_qualname; + PyObject *func_doc; + PyObject *func_globals; + PyObject *func_code; + PyObject *func_closure; +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + // No-args super() class cell + PyObject *func_classobj; +#endif + // Dynamic default args and annotations + void *defaults; + int defaults_pyobjects; + size_t defaults_size; /* used by FusedFunction for copying defaults */ + int flags; + + // Defaults info + PyObject *defaults_tuple; /* Const defaults tuple */ + PyObject *defaults_kwdict; /* Const kwonly defaults dict */ + PyObject *(*defaults_getter)(PyObject *); + PyObject *func_annotations; /* function annotations dict */ + + // Coroutine marker + PyObject *func_is_coroutine; +} __pyx_CyFunctionObject; + +#undef __Pyx_CyOrPyCFunction_Check +#define __Pyx_CyFunction_Check(obj) __Pyx_TypeCheck(obj, __pyx_CyFunctionType) +#define __Pyx_CyOrPyCFunction_Check(obj) __Pyx_TypeCheck2(obj, __pyx_CyFunctionType, &PyCFunction_Type) +#define __Pyx_CyFunction_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CyFunctionType) +static CYTHON_INLINE int __Pyx__IsSameCyOrCFunction(PyObject *func, void *cfunc);/*proto*/ +#undef __Pyx_IsSameCFunction +#define __Pyx_IsSameCFunction(func, cfunc) __Pyx__IsSameCyOrCFunction(func, cfunc) + +static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, + int flags, PyObject* qualname, + PyObject *closure, + PyObject *module, PyObject *globals, + PyObject* code); + +static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj); +static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, + size_t size, + int pyobjects); +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, + PyObject *tuple); +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, + PyObject *dict); +static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, + PyObject *dict); + + +static int __pyx_CyFunction_init(PyObject *module); + +#if CYTHON_METH_FASTCALL +static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +#if CYTHON_BACKPORT_VECTORCALL +#define __Pyx_CyFunction_func_vectorcall(f) (((__pyx_CyFunctionObject*)f)->func_vectorcall) +#else +#define __Pyx_CyFunction_func_vectorcall(f) (((PyCFunctionObject*)f)->vectorcall) +#endif +#endif + +//////////////////// CythonFunctionShared //////////////////// +//@substitute: naming +//@requires: CommonStructures.c::FetchCommonType +//@requires: ObjectHandling.c::PyMethodNew +//@requires: ObjectHandling.c::PyVectorcallFastCallDict +//@requires: ModuleSetupCode.c::IncludeStructmemberH +//@requires: ObjectHandling.c::PyObjectGetAttrStr + +#if CYTHON_COMPILING_IN_LIMITED_API +static CYTHON_INLINE int __Pyx__IsSameCyOrCFunction(PyObject *func, void *cfunc) { + if (__Pyx_CyFunction_Check(func)) { + return PyCFunction_GetFunction(((__pyx_CyFunctionObject*)func)->func) == (PyCFunction) cfunc; + } else if (PyCFunction_Check(func)) { + return PyCFunction_GetFunction(func) == (PyCFunction) cfunc; + } + return 0; +} +#else +static CYTHON_INLINE int __Pyx__IsSameCyOrCFunction(PyObject *func, void *cfunc) { + return __Pyx_CyOrPyCFunction_Check(func) && __Pyx_CyOrPyCFunction_GET_FUNCTION(func) == (PyCFunction) cfunc; +} +#endif + +static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj) { +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + __Pyx_Py_XDECREF_SET( + __Pyx_CyFunction_GetClassObj(f), + ((classobj) ? __Pyx_NewRef(classobj) : NULL)); +#else + __Pyx_Py_XDECREF_SET( + // assigning to "mm_class", which is a "PyTypeObject*" + ((PyCMethodObject *) (f))->mm_class, + (PyTypeObject*)((classobj) ? __Pyx_NewRef(classobj) : NULL)); +#endif +} + +static PyObject * +__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, void *closure) +{ + CYTHON_UNUSED_VAR(closure); + if (unlikely(op->func_doc == NULL)) { +#if CYTHON_COMPILING_IN_LIMITED_API + op->func_doc = PyObject_GetAttrString(op->func, "__doc__"); + if (unlikely(!op->func_doc)) return NULL; +#else + if (((PyCFunctionObject*)op)->m_ml->ml_doc) { +#if PY_MAJOR_VERSION >= 3 + op->func_doc = PyUnicode_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); +#else + op->func_doc = PyString_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); +#endif + if (unlikely(op->func_doc == NULL)) + return NULL; + } else { + Py_INCREF(Py_None); + return Py_None; + } +#endif /* CYTHON_COMPILING_IN_LIMITED_API */ + } + Py_INCREF(op->func_doc); + return op->func_doc; +} + +static int +__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (value == NULL) { + // Mark as deleted + value = Py_None; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->func_doc, value); + return 0; +} + +static PyObject * +__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(op->func_name == NULL)) { +#if CYTHON_COMPILING_IN_LIMITED_API + op->func_name = PyObject_GetAttrString(op->func, "__name__"); +#elif PY_MAJOR_VERSION >= 3 + op->func_name = PyUnicode_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); +#else + op->func_name = PyString_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); +#endif /* CYTHON_COMPILING_IN_LIMITED_API */ + if (unlikely(op->func_name == NULL)) + return NULL; + } + Py_INCREF(op->func_name); + return op->func_name; +} + +static int +__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__name__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->func_name, value); + return 0; +} + +static PyObject * +__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + Py_INCREF(op->func_qualname); + return op->func_qualname; +} + +static int +__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__qualname__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->func_qualname, value); + return 0; +} + +static PyObject * +__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(op->func_dict == NULL)) { + op->func_dict = PyDict_New(); + if (unlikely(op->func_dict == NULL)) + return NULL; + } + Py_INCREF(op->func_dict); + return op->func_dict; +} + +static int +__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(value == NULL)) { + PyErr_SetString(PyExc_TypeError, + "function's dictionary may not be deleted"); + return -1; + } + if (unlikely(!PyDict_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "setting function's dictionary to a non-dict"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->func_dict, value); + return 0; +} + +static PyObject * +__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + Py_INCREF(op->func_globals); + return op->func_globals; +} + +static PyObject * +__Pyx_CyFunction_get_closure(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(op); + CYTHON_UNUSED_VAR(context); + Py_INCREF(Py_None); + return Py_None; +} + +static PyObject * +__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, void *context) +{ + PyObject* result = (op->func_code) ? op->func_code : Py_None; + CYTHON_UNUSED_VAR(context); + Py_INCREF(result); + return result; +} + +static int +__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { + int result = 0; + PyObject *res = op->defaults_getter((PyObject *) op); + if (unlikely(!res)) + return -1; + + // Cache result + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + op->defaults_tuple = PyTuple_GET_ITEM(res, 0); + Py_INCREF(op->defaults_tuple); + op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); + Py_INCREF(op->defaults_kwdict); + #else + op->defaults_tuple = __Pyx_PySequence_ITEM(res, 0); + if (unlikely(!op->defaults_tuple)) result = -1; + else { + op->defaults_kwdict = __Pyx_PySequence_ITEM(res, 1); + if (unlikely(!op->defaults_kwdict)) result = -1; + } + #endif + Py_DECREF(res); + return result; +} + +static int +__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + if (!value) { + // del => explicit None to prevent rebuilding + value = Py_None; + } else if (unlikely(value != Py_None && !PyTuple_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__defaults__ must be set to a tuple object"); + return -1; + } + PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__defaults__ will not " + "currently affect the values used in function calls", 1); + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->defaults_tuple, value); + return 0; +} + +static PyObject * +__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, void *context) { + PyObject* result = op->defaults_tuple; + CYTHON_UNUSED_VAR(context); + if (unlikely(!result)) { + if (op->defaults_getter) { + if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; + result = op->defaults_tuple; + } else { + result = Py_None; + } + } + Py_INCREF(result); + return result; +} + +static int +__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + if (!value) { + // del => explicit None to prevent rebuilding + value = Py_None; + } else if (unlikely(value != Py_None && !PyDict_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__kwdefaults__ must be set to a dict object"); + return -1; + } + PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__kwdefaults__ will not " + "currently affect the values used in function calls", 1); + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->defaults_kwdict, value); + return 0; +} + +static PyObject * +__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, void *context) { + PyObject* result = op->defaults_kwdict; + CYTHON_UNUSED_VAR(context); + if (unlikely(!result)) { + if (op->defaults_getter) { + if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; + result = op->defaults_kwdict; + } else { + result = Py_None; + } + } + Py_INCREF(result); + return result; +} + +static int +__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + if (!value || value == Py_None) { + value = NULL; + } else if (unlikely(!PyDict_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__annotations__ must be set to a dict object"); + return -1; + } + Py_XINCREF(value); + __Pyx_Py_XDECREF_SET(op->func_annotations, value); + return 0; +} + +static PyObject * +__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, void *context) { + PyObject* result = op->func_annotations; + CYTHON_UNUSED_VAR(context); + if (unlikely(!result)) { + result = PyDict_New(); + if (unlikely(!result)) return NULL; + op->func_annotations = result; + } + Py_INCREF(result); + return result; +} + +static PyObject * +__Pyx_CyFunction_get_is_coroutine(__pyx_CyFunctionObject *op, void *context) { + int is_coroutine; + CYTHON_UNUSED_VAR(context); + if (op->func_is_coroutine) { + return __Pyx_NewRef(op->func_is_coroutine); + } + + is_coroutine = op->flags & __Pyx_CYFUNCTION_COROUTINE; +#if PY_VERSION_HEX >= 0x03050000 + if (is_coroutine) { + PyObject *module, *fromlist, *marker = PYIDENT("_is_coroutine"); + fromlist = PyList_New(1); + if (unlikely(!fromlist)) return NULL; + Py_INCREF(marker); +#if CYTHON_ASSUME_SAFE_MACROS + PyList_SET_ITEM(fromlist, 0, marker); +#else + if (unlikely(PyList_SetItem(fromlist, 0, marker) < 0)) { + Py_DECREF(marker); + Py_DECREF(fromlist); + return NULL; + } +#endif + module = PyImport_ImportModuleLevelObject(PYIDENT("asyncio.coroutines"), NULL, NULL, fromlist, 0); + Py_DECREF(fromlist); + if (unlikely(!module)) goto ignore; + op->func_is_coroutine = __Pyx_PyObject_GetAttrStr(module, marker); + Py_DECREF(module); + if (likely(op->func_is_coroutine)) { + return __Pyx_NewRef(op->func_is_coroutine); + } +ignore: + PyErr_Clear(); + } +#endif + + op->func_is_coroutine = __Pyx_PyBool_FromLong(is_coroutine); + return __Pyx_NewRef(op->func_is_coroutine); +} + +//#if PY_VERSION_HEX >= 0x030400C1 +//static PyObject * +//__Pyx_CyFunction_get_signature(__pyx_CyFunctionObject *op, void *context) { +// PyObject *inspect_module, *signature_class, *signature; +// CYTHON_UNUSED_VAR(context); +// // from inspect import Signature +// inspect_module = PyImport_ImportModuleLevelObject(PYIDENT("inspect"), NULL, NULL, NULL, 0); +// if (unlikely(!inspect_module)) +// goto bad; +// signature_class = __Pyx_PyObject_GetAttrStr(inspect_module, PYIDENT("Signature")); +// Py_DECREF(inspect_module); +// if (unlikely(!signature_class)) +// goto bad; +// // return Signature.from_function(op) +// signature = PyObject_CallMethodObjArgs(signature_class, PYIDENT("from_function"), op, NULL); +// Py_DECREF(signature_class); +// if (likely(signature)) +// return signature; +//bad: +// // make sure we raise an AttributeError from this property on any errors +// if (!PyErr_ExceptionMatches(PyExc_AttributeError)) +// PyErr_SetString(PyExc_AttributeError, "failed to calculate __signature__"); +// return NULL; +//} +//#endif + +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject * +__Pyx_CyFunction_get_module(__pyx_CyFunctionObject *op, void *context) { + CYTHON_UNUSED_VAR(context); + return PyObject_GetAttrString(op->func, "__module__"); +} + +static int +__Pyx_CyFunction_set_module(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + return PyObject_SetAttrString(op->func, "__module__", value); +} +#endif + +static PyGetSetDef __pyx_CyFunction_getsets[] = { + {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, + {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, + {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, + {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, + {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, + {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, + {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, + {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, + {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, + {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, + {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, + {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, + {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, + {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, + {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, + {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, + {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, + {(char *) "_is_coroutine", (getter)__Pyx_CyFunction_get_is_coroutine, 0, 0, 0}, +//#if PY_VERSION_HEX >= 0x030400C1 +// {(char *) "__signature__", (getter)__Pyx_CyFunction_get_signature, 0, 0, 0}, +//#endif +#if CYTHON_COMPILING_IN_LIMITED_API + {"__module__", (getter)__Pyx_CyFunction_get_module, (setter)__Pyx_CyFunction_set_module, 0, 0}, +#endif + {0, 0, 0, 0, 0} +}; + +static PyMemberDef __pyx_CyFunction_members[] = { +#if !CYTHON_COMPILING_IN_LIMITED_API + {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), 0, 0}, +#endif +#if CYTHON_USE_TYPE_SPECS + {(char *) "__dictoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_dict), READONLY, 0}, +#if CYTHON_METH_FASTCALL +#if CYTHON_BACKPORT_VECTORCALL + {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_vectorcall), READONLY, 0}, +#else +#if !CYTHON_COMPILING_IN_LIMITED_API + {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(PyCFunctionObject, vectorcall), READONLY, 0}, +#endif +#endif +#endif +#if PY_VERSION_HEX < 0x030500A0 || CYTHON_COMPILING_IN_LIMITED_API + {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_weakreflist), READONLY, 0}, +#else + {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(PyCFunctionObject, m_weakreflist), READONLY, 0}, +#endif +#endif + {0, 0, 0, 0, 0} +}; + +static PyObject * +__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, PyObject *args) +{ + CYTHON_UNUSED_VAR(args); +#if PY_MAJOR_VERSION >= 3 + Py_INCREF(m->func_qualname); + return m->func_qualname; +#else + return PyString_FromString(((PyCFunctionObject*)m)->m_ml->ml_name); +#endif +} + +static PyMethodDef __pyx_CyFunction_methods[] = { + {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, + {0, 0, 0, 0} +}; + + +#if PY_VERSION_HEX < 0x030500A0 || CYTHON_COMPILING_IN_LIMITED_API +#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) +#else +#define __Pyx_CyFunction_weakreflist(cyfunc) (((PyCFunctionObject*)cyfunc)->m_weakreflist) +#endif + +static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname, + PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { +#if !CYTHON_COMPILING_IN_LIMITED_API + PyCFunctionObject *cf = (PyCFunctionObject*) op; +#endif + if (unlikely(op == NULL)) + return NULL; +#if CYTHON_COMPILING_IN_LIMITED_API + // Note that we end up with a circular reference to op. This isn't + // a disaster, but in an ideal world it'd be nice to avoid it. + op->func = PyCFunction_NewEx(ml, (PyObject*)op, module); + if (unlikely(!op->func)) return NULL; +#endif + op->flags = flags; + __Pyx_CyFunction_weakreflist(op) = NULL; +#if !CYTHON_COMPILING_IN_LIMITED_API + cf->m_ml = ml; + cf->m_self = (PyObject *) op; +#endif + Py_XINCREF(closure); + op->func_closure = closure; +#if !CYTHON_COMPILING_IN_LIMITED_API + Py_XINCREF(module); + cf->m_module = module; +#endif + op->func_dict = NULL; + op->func_name = NULL; + Py_INCREF(qualname); + op->func_qualname = qualname; + op->func_doc = NULL; +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + op->func_classobj = NULL; +#else + ((PyCMethodObject*)op)->mm_class = NULL; +#endif + op->func_globals = globals; + Py_INCREF(op->func_globals); + Py_XINCREF(code); + op->func_code = code; + // Dynamic Default args + op->defaults_pyobjects = 0; + op->defaults_size = 0; + op->defaults = NULL; + op->defaults_tuple = NULL; + op->defaults_kwdict = NULL; + op->defaults_getter = NULL; + op->func_annotations = NULL; + op->func_is_coroutine = NULL; +#if CYTHON_METH_FASTCALL + switch (ml->ml_flags & (METH_VARARGS | METH_FASTCALL | METH_NOARGS | METH_O | METH_KEYWORDS | METH_METHOD)) { + case METH_NOARGS: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_NOARGS; + break; + case METH_O: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_O; + break; + // case METH_FASTCALL is not used + case METH_METHOD | METH_FASTCALL | METH_KEYWORDS: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD; + break; + case METH_FASTCALL | METH_KEYWORDS: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS; + break; + // case METH_VARARGS is not used + case METH_VARARGS | METH_KEYWORDS: + __Pyx_CyFunction_func_vectorcall(op) = NULL; + break; + default: + PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); + Py_DECREF(op); + return NULL; + } +#endif + return (PyObject *) op; +} + +static int +__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) +{ + Py_CLEAR(m->func_closure); +#if CYTHON_COMPILING_IN_LIMITED_API + Py_CLEAR(m->func); +#else + Py_CLEAR(((PyCFunctionObject*)m)->m_module); +#endif + Py_CLEAR(m->func_dict); + Py_CLEAR(m->func_name); + Py_CLEAR(m->func_qualname); + Py_CLEAR(m->func_doc); + Py_CLEAR(m->func_globals); + Py_CLEAR(m->func_code); +#if !CYTHON_COMPILING_IN_LIMITED_API +#if PY_VERSION_HEX < 0x030900B1 + Py_CLEAR(__Pyx_CyFunction_GetClassObj(m)); +#else + { + PyObject *cls = (PyObject*) ((PyCMethodObject *) (m))->mm_class; + ((PyCMethodObject *) (m))->mm_class = NULL; + Py_XDECREF(cls); + } +#endif +#endif + Py_CLEAR(m->defaults_tuple); + Py_CLEAR(m->defaults_kwdict); + Py_CLEAR(m->func_annotations); + Py_CLEAR(m->func_is_coroutine); + + if (m->defaults) { + PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); + int i; + + for (i = 0; i < m->defaults_pyobjects; i++) + Py_XDECREF(pydefaults[i]); + + PyObject_Free(m->defaults); + m->defaults = NULL; + } + + return 0; +} + +static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) +{ + if (__Pyx_CyFunction_weakreflist(m) != NULL) + PyObject_ClearWeakRefs((PyObject *) m); + __Pyx_CyFunction_clear(m); + __Pyx_PyHeapTypeObject_GC_Del(m); +} + +static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) +{ + PyObject_GC_UnTrack(m); + __Pyx__CyFunction_dealloc(m); +} + +static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) +{ + Py_VISIT(m->func_closure); +#if CYTHON_COMPILING_IN_LIMITED_API + Py_VISIT(m->func); +#else + Py_VISIT(((PyCFunctionObject*)m)->m_module); +#endif + Py_VISIT(m->func_dict); + Py_VISIT(m->func_name); + Py_VISIT(m->func_qualname); + Py_VISIT(m->func_doc); + Py_VISIT(m->func_globals); + Py_VISIT(m->func_code); +#if !CYTHON_COMPILING_IN_LIMITED_API + Py_VISIT(__Pyx_CyFunction_GetClassObj(m)); +#endif + Py_VISIT(m->defaults_tuple); + Py_VISIT(m->defaults_kwdict); + Py_VISIT(m->func_is_coroutine); + + if (m->defaults) { + PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); + int i; + + for (i = 0; i < m->defaults_pyobjects; i++) + Py_VISIT(pydefaults[i]); + } + + return 0; +} + +static PyObject* +__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) +{ +#if PY_MAJOR_VERSION >= 3 + return PyUnicode_FromFormat("", + op->func_qualname, (void *)op); +#else + return PyString_FromFormat("", + PyString_AsString(op->func_qualname), (void *)op); +#endif +} + +static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { + // originally copied from PyCFunction_Call() in CPython's Objects/methodobject.c +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *f = ((__pyx_CyFunctionObject*)func)->func; + PyObject *py_name = NULL; + PyCFunction meth; + int flags; + meth = PyCFunction_GetFunction(f); + if (unlikely(!meth)) return NULL; + flags = PyCFunction_GetFlags(f); + if (unlikely(flags < 0)) return NULL; +#else + PyCFunctionObject* f = (PyCFunctionObject*)func; + PyCFunction meth = f->m_ml->ml_meth; + int flags = f->m_ml->ml_flags; +#endif + + Py_ssize_t size; + + switch (flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { + case METH_VARARGS: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) + return (*meth)(self, arg); + break; + case METH_VARARGS | METH_KEYWORDS: + return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); + case METH_NOARGS: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) { +#if CYTHON_ASSUME_SAFE_MACROS + size = PyTuple_GET_SIZE(arg); +#else + size = PyTuple_Size(arg); + if (unlikely(size < 0)) return NULL; +#endif + if (likely(size == 0)) + return (*meth)(self, NULL); +#if CYTHON_COMPILING_IN_LIMITED_API + py_name = __Pyx_CyFunction_get_name((__pyx_CyFunctionObject*)func, NULL); + if (!py_name) return NULL; + PyErr_Format(PyExc_TypeError, + "%.200S() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", + py_name, size); + Py_DECREF(py_name); +#else + PyErr_Format(PyExc_TypeError, + "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", + f->m_ml->ml_name, size); +#endif + return NULL; + } + break; + case METH_O: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) { +#if CYTHON_ASSUME_SAFE_MACROS + size = PyTuple_GET_SIZE(arg); +#else + size = PyTuple_Size(arg); + if (unlikely(size < 0)) return NULL; +#endif + if (likely(size == 1)) { + PyObject *result, *arg0; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + arg0 = PyTuple_GET_ITEM(arg, 0); + #else + arg0 = __Pyx_PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; + #endif + result = (*meth)(self, arg0); + #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(arg0); + #endif + return result; + } +#if CYTHON_COMPILING_IN_LIMITED_API + py_name = __Pyx_CyFunction_get_name((__pyx_CyFunctionObject*)func, NULL); + if (!py_name) return NULL; + PyErr_Format(PyExc_TypeError, + "%.200S() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", + py_name, size); + Py_DECREF(py_name); +#else + PyErr_Format(PyExc_TypeError, + "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", + f->m_ml->ml_name, size); +#endif + + return NULL; + } + break; + default: + PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); + return NULL; + } +#if CYTHON_COMPILING_IN_LIMITED_API + py_name = __Pyx_CyFunction_get_name((__pyx_CyFunctionObject*)func, NULL); + if (!py_name) return NULL; + PyErr_Format(PyExc_TypeError, "%.200S() takes no keyword arguments", + py_name); + Py_DECREF(py_name); +#else + PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", + f->m_ml->ml_name); +#endif + return NULL; +} + +static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *self, *result; +#if CYTHON_COMPILING_IN_LIMITED_API + // PyCFunction_GetSelf returns a borrowed reference + self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)func)->func); + if (unlikely(!self) && PyErr_Occurred()) return NULL; +#else + self = ((PyCFunctionObject*)func)->m_self; +#endif + result = __Pyx_CyFunction_CallMethod(func, self, arg, kw); + return result; +} + +static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { + PyObject *result; + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; + +#if CYTHON_METH_FASTCALL + // Prefer vectorcall if available. This is not the typical case, as + // CPython would normally use vectorcall directly instead of tp_call. + __pyx_vectorcallfunc vc = __Pyx_CyFunction_func_vectorcall(cyfunc); + if (vc) { +#if CYTHON_ASSUME_SAFE_MACROS + return __Pyx_PyVectorcall_FastCallDict(func, vc, &PyTuple_GET_ITEM(args, 0), (size_t)PyTuple_GET_SIZE(args), kw); +#else + // avoid unused function warning + (void) &__Pyx_PyVectorcall_FastCallDict; + return PyVectorcall_Call(func, args, kw); +#endif + } +#endif + + if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { + Py_ssize_t argc; + PyObject *new_args; + PyObject *self; + +#if CYTHON_ASSUME_SAFE_MACROS + argc = PyTuple_GET_SIZE(args); +#else + argc = PyTuple_Size(args); + if (unlikely(!argc) < 0) return NULL; +#endif + new_args = PyTuple_GetSlice(args, 1, argc); + + if (unlikely(!new_args)) + return NULL; + + self = PyTuple_GetItem(args, 0); + if (unlikely(!self)) { + Py_DECREF(new_args); +#if PY_MAJOR_VERSION > 2 + PyErr_Format(PyExc_TypeError, + "unbound method %.200S() needs an argument", + cyfunc->func_qualname); +#else + // %S doesn't work in PyErr_Format on Py2 and replicating + // the formatting seems more trouble than it's worth + // (so produce a less useful error message). + PyErr_SetString(PyExc_TypeError, + "unbound method needs an argument"); +#endif + return NULL; + } + + result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); + Py_DECREF(new_args); + } else { + result = __Pyx_CyFunction_Call(func, args, kw); + } + return result; +} + +#if CYTHON_METH_FASTCALL +// Check that kwnames is empty (if you want to allow keyword arguments, +// simply pass kwnames=NULL) and figure out what to do with "self". +// Return value: +// 1: self = args[0] +// 0: self = cyfunc->func.m_self +// -1: error +static CYTHON_INLINE int __Pyx_CyFunction_Vectorcall_CheckArgs(__pyx_CyFunctionObject *cyfunc, Py_ssize_t nargs, PyObject *kwnames) +{ + int ret = 0; + if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { + if (unlikely(nargs < 1)) { + PyErr_Format(PyExc_TypeError, "%.200s() needs an argument", + ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); + return -1; + } + ret = 1; + } + if (unlikely(kwnames) && unlikely(PyTuple_GET_SIZE(kwnames))) { + PyErr_Format(PyExc_TypeError, + "%.200s() takes no keyword arguments", ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); + return -1; + } + return ret; +} + +static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; +#if CYTHON_BACKPORT_VECTORCALL + Py_ssize_t nargs = (Py_ssize_t)nargsf; +#else + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); +#endif + PyObject *self; + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: + self = ((PyCFunctionObject*)cyfunc)->m_self; + break; + default: + return NULL; + } + + if (unlikely(nargs != 0)) { + PyErr_Format(PyExc_TypeError, + "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", + def->ml_name, nargs); + return NULL; + } + return def->ml_meth(self, NULL); +} + +static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; +#if CYTHON_BACKPORT_VECTORCALL + Py_ssize_t nargs = (Py_ssize_t)nargsf; +#else + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); +#endif + PyObject *self; + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: + self = ((PyCFunctionObject*)cyfunc)->m_self; + break; + default: + return NULL; + } + + if (unlikely(nargs != 1)) { + PyErr_Format(PyExc_TypeError, + "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", + def->ml_name, nargs); + return NULL; + } + return def->ml_meth(self, args[0]); +} + +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; +#if CYTHON_BACKPORT_VECTORCALL + Py_ssize_t nargs = (Py_ssize_t)nargsf; +#else + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); +#endif + PyObject *self; + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: + self = ((PyCFunctionObject*)cyfunc)->m_self; + break; + default: + return NULL; + } + + return ((_PyCFunctionFastWithKeywords)(void(*)(void))def->ml_meth)(self, args, nargs, kwnames); +} + +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; + PyTypeObject *cls = (PyTypeObject *) __Pyx_CyFunction_GetClassObj(cyfunc); +#if CYTHON_BACKPORT_VECTORCALL + Py_ssize_t nargs = (Py_ssize_t)nargsf; +#else + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); +#endif + PyObject *self; + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: + self = ((PyCFunctionObject*)cyfunc)->m_self; + break; + default: + return NULL; + } + + return ((__Pyx_PyCMethod)(void(*)(void))def->ml_meth)(self, cls, args, (size_t)nargs, kwnames); +} +#endif + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_CyFunctionType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_CyFunction_dealloc}, + {Py_tp_repr, (void *)__Pyx_CyFunction_repr}, + {Py_tp_call, (void *)__Pyx_CyFunction_CallAsMethod}, + {Py_tp_traverse, (void *)__Pyx_CyFunction_traverse}, + {Py_tp_clear, (void *)__Pyx_CyFunction_clear}, + {Py_tp_methods, (void *)__pyx_CyFunction_methods}, + {Py_tp_members, (void *)__pyx_CyFunction_members}, + {Py_tp_getset, (void *)__pyx_CyFunction_getsets}, + {Py_tp_descr_get, (void *)__Pyx_PyMethod_New}, + {0, 0}, +}; + +static PyType_Spec __pyx_CyFunctionType_spec = { + __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", + sizeof(__pyx_CyFunctionObject), + 0, +#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR + Py_TPFLAGS_METHOD_DESCRIPTOR | +#endif +#if (defined(_Py_TPFLAGS_HAVE_VECTORCALL) && CYTHON_METH_FASTCALL) + _Py_TPFLAGS_HAVE_VECTORCALL | +#endif + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /*tp_flags*/ + __pyx_CyFunctionType_slots +}; +#else /* CYTHON_USE_TYPE_SPECS */ + +static PyTypeObject __pyx_CyFunctionType_type = { + PyVarObject_HEAD_INIT(0, 0) + __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", /*tp_name*/ + sizeof(__pyx_CyFunctionObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor) __Pyx_CyFunction_dealloc, /*tp_dealloc*/ +#if !CYTHON_METH_FASTCALL + 0, /*tp_print*/ +#elif CYTHON_BACKPORT_VECTORCALL + (printfunc)offsetof(__pyx_CyFunctionObject, func_vectorcall), /*tp_vectorcall_offset backported into tp_print*/ +#else + offsetof(PyCFunctionObject, vectorcall), /*tp_vectorcall_offset*/ +#endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ +#if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ +#else + 0, /*tp_as_async*/ +#endif + (reprfunc) __Pyx_CyFunction_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + __Pyx_CyFunction_CallAsMethod, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ +#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR + Py_TPFLAGS_METHOD_DESCRIPTOR | +#endif +#if defined(_Py_TPFLAGS_HAVE_VECTORCALL) && CYTHON_METH_FASTCALL + _Py_TPFLAGS_HAVE_VECTORCALL | +#endif + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /*tp_flags*/ + 0, /*tp_doc*/ + (traverseproc) __Pyx_CyFunction_traverse, /*tp_traverse*/ + (inquiry) __Pyx_CyFunction_clear, /*tp_clear*/ + 0, /*tp_richcompare*/ +#if PY_VERSION_HEX < 0x030500A0 + offsetof(__pyx_CyFunctionObject, func_weakreflist), /*tp_weaklistoffset*/ +#else + offsetof(PyCFunctionObject, m_weakreflist), /*tp_weaklistoffset*/ +#endif + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_CyFunction_methods, /*tp_methods*/ + __pyx_CyFunction_members, /*tp_members*/ + __pyx_CyFunction_getsets, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + __Pyx_PyMethod_New, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + offsetof(__pyx_CyFunctionObject, func_dict),/*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif /* CYTHON_USE_TYPE_SPECS */ + + +static int __pyx_CyFunction_init(PyObject *module) { +#if CYTHON_USE_TYPE_SPECS + __pyx_CyFunctionType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_CyFunctionType_spec, NULL); +#else + CYTHON_UNUSED_VAR(module); + __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); +#endif + if (unlikely(__pyx_CyFunctionType == NULL)) { + return -1; + } + return 0; +} + +static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + + m->defaults = PyObject_Malloc(size); + if (unlikely(!m->defaults)) + return PyErr_NoMemory(); + memset(m->defaults, 0, size); + m->defaults_pyobjects = pyobjects; + m->defaults_size = size; + return m->defaults; +} + +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults_tuple = tuple; + Py_INCREF(tuple); +} + +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults_kwdict = dict; + Py_INCREF(dict); +} + +static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->func_annotations = dict; + Py_INCREF(dict); +} + + +//////////////////// CythonFunction.proto //////////////////// + +static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, + int flags, PyObject* qualname, + PyObject *closure, + PyObject *module, PyObject *globals, + PyObject* code); + +//////////////////// CythonFunction //////////////////// +//@requires: CythonFunctionShared + +static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, + PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { + PyObject *op = __Pyx_CyFunction_Init( + PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType), + ml, flags, qualname, closure, module, globals, code + ); + if (likely(op)) { + PyObject_GC_Track(op); + } + return op; +} + +//////////////////// CyFunctionClassCell.proto //////////////////// +static int __Pyx_CyFunction_InitClassCell(PyObject *cyfunctions, PyObject *classobj);/*proto*/ + +//////////////////// CyFunctionClassCell //////////////////// +//@requires: CythonFunctionShared + +static int __Pyx_CyFunction_InitClassCell(PyObject *cyfunctions, PyObject *classobj) { + Py_ssize_t i, count = PyList_GET_SIZE(cyfunctions); + + for (i = 0; i < count; i++) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + PyList_GET_ITEM(cyfunctions, i); +#else + PySequence_ITEM(cyfunctions, i); + if (unlikely(!m)) + return -1; +#endif + __Pyx_CyFunction_SetClassObj(m, classobj); +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF((PyObject*)m); +#endif + } + return 0; +} + + +//////////////////// FusedFunction.proto //////////////////// + +typedef struct { + __pyx_CyFunctionObject func; + PyObject *__signatures__; + PyObject *self; +} __pyx_FusedFunctionObject; + +static PyObject *__pyx_FusedFunction_New(PyMethodDef *ml, int flags, + PyObject *qualname, PyObject *closure, + PyObject *module, PyObject *globals, + PyObject *code); + +static int __pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self); +static int __pyx_FusedFunction_init(PyObject *module); + +#define __Pyx_FusedFunction_USED + +//////////////////// FusedFunction //////////////////// +//@requires: CythonFunctionShared + +static PyObject * +__pyx_FusedFunction_New(PyMethodDef *ml, int flags, + PyObject *qualname, PyObject *closure, + PyObject *module, PyObject *globals, + PyObject *code) +{ + PyObject *op = __Pyx_CyFunction_Init( + // __pyx_CyFunctionObject is correct below since that's the cast that we want. + PyObject_GC_New(__pyx_CyFunctionObject, __pyx_FusedFunctionType), + ml, flags, qualname, closure, module, globals, code + ); + if (likely(op)) { + __pyx_FusedFunctionObject *fusedfunc = (__pyx_FusedFunctionObject *) op; + fusedfunc->__signatures__ = NULL; + fusedfunc->self = NULL; + PyObject_GC_Track(op); + } + return op; +} + +static void +__pyx_FusedFunction_dealloc(__pyx_FusedFunctionObject *self) +{ + PyObject_GC_UnTrack(self); + Py_CLEAR(self->self); + Py_CLEAR(self->__signatures__); + __Pyx__CyFunction_dealloc((__pyx_CyFunctionObject *) self); +} + +static int +__pyx_FusedFunction_traverse(__pyx_FusedFunctionObject *self, + visitproc visit, + void *arg) +{ + Py_VISIT(self->self); + Py_VISIT(self->__signatures__); + return __Pyx_CyFunction_traverse((__pyx_CyFunctionObject *) self, visit, arg); +} + +static int +__pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self) +{ + Py_CLEAR(self->self); + Py_CLEAR(self->__signatures__); + return __Pyx_CyFunction_clear((__pyx_CyFunctionObject *) self); +} + + +static PyObject * +__pyx_FusedFunction_descr_get(PyObject *self, PyObject *obj, PyObject *type) +{ + __pyx_FusedFunctionObject *func, *meth; + + func = (__pyx_FusedFunctionObject *) self; + + if (func->self || func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD) { + // Do not allow rebinding and don't do anything for static methods + Py_INCREF(self); + return self; + } + + if (obj == Py_None) + obj = NULL; + + if (func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD) + obj = type; + + if (obj == NULL) { + // We aren't actually binding to anything, save the effort of rebinding + Py_INCREF(self); + return self; + } + + meth = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_New( + ((PyCFunctionObject *) func)->m_ml, + ((__pyx_CyFunctionObject *) func)->flags, + ((__pyx_CyFunctionObject *) func)->func_qualname, + ((__pyx_CyFunctionObject *) func)->func_closure, + ((PyCFunctionObject *) func)->m_module, + ((__pyx_CyFunctionObject *) func)->func_globals, + ((__pyx_CyFunctionObject *) func)->func_code); + if (unlikely(!meth)) + return NULL; + + // defaults needs copying fully rather than just copying the pointer + // since otherwise it will be freed on destruction of meth despite + // belonging to func rather than meth + if (func->func.defaults) { + PyObject **pydefaults; + int i; + + if (unlikely(!__Pyx_CyFunction_InitDefaults( + (PyObject*)meth, + func->func.defaults_size, + func->func.defaults_pyobjects))) { + Py_XDECREF((PyObject*)meth); + return NULL; + } + memcpy(meth->func.defaults, func->func.defaults, func->func.defaults_size); + + pydefaults = __Pyx_CyFunction_Defaults(PyObject *, meth); + for (i = 0; i < meth->func.defaults_pyobjects; i++) + Py_XINCREF(pydefaults[i]); + } + + __Pyx_CyFunction_SetClassObj(meth, __Pyx_CyFunction_GetClassObj(func)); + + Py_XINCREF(func->__signatures__); + meth->__signatures__ = func->__signatures__; + + Py_XINCREF(func->func.defaults_tuple); + meth->func.defaults_tuple = func->func.defaults_tuple; + + Py_XINCREF(obj); + meth->self = obj; + + return (PyObject *) meth; +} + +static PyObject * +_obj_to_string(PyObject *obj) +{ + if (PyUnicode_CheckExact(obj)) + return __Pyx_NewRef(obj); +#if PY_MAJOR_VERSION == 2 + else if (PyString_Check(obj)) + return PyUnicode_FromEncodedObject(obj, NULL, "strict"); +#endif + else if (PyType_Check(obj)) + return PyObject_GetAttr(obj, PYIDENT("__name__")); + else + return PyObject_Unicode(obj); +} + +static PyObject * +__pyx_FusedFunction_getitem(__pyx_FusedFunctionObject *self, PyObject *idx) +{ + PyObject *signature = NULL; + PyObject *unbound_result_func; + PyObject *result_func = NULL; + + if (unlikely(self->__signatures__ == NULL)) { + PyErr_SetString(PyExc_TypeError, "Function is not fused"); + return NULL; + } + + if (PyTuple_Check(idx)) { + Py_ssize_t n = PyTuple_GET_SIZE(idx); + PyObject *list = PyList_New(n); + int i; + + if (unlikely(!list)) + return NULL; + + for (i = 0; i < n; i++) { + PyObject *string; +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + PyObject *item = PyTuple_GET_ITEM(idx, i); +#else + PyObject *item = PySequence_ITEM(idx, i); if (unlikely(!item)) goto __pyx_err; +#endif + string = _obj_to_string(item); +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(item); +#endif + if (unlikely(!string)) goto __pyx_err; + PyList_SET_ITEM(list, i, string); + } + + signature = PyUnicode_Join(PYUNICODE("|"), list); +__pyx_err:; + Py_DECREF(list); + } else { + signature = _obj_to_string(idx); + } + + if (unlikely(!signature)) + return NULL; + + unbound_result_func = PyObject_GetItem(self->__signatures__, signature); + + if (likely(unbound_result_func)) { + if (self->self) { + __pyx_FusedFunctionObject *unbound = (__pyx_FusedFunctionObject *) unbound_result_func; + + // TODO: move this to InitClassCell + __Pyx_CyFunction_SetClassObj(unbound, __Pyx_CyFunction_GetClassObj(self)); + + result_func = __pyx_FusedFunction_descr_get(unbound_result_func, + self->self, self->self); + } else { + result_func = unbound_result_func; + Py_INCREF(result_func); + } + } + + Py_DECREF(signature); + Py_XDECREF(unbound_result_func); + + return result_func; +} + +static PyObject * +__pyx_FusedFunction_callfunction(PyObject *func, PyObject *args, PyObject *kw) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; + int static_specialized = (cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD && + !((__pyx_FusedFunctionObject *) func)->__signatures__); + + if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !static_specialized) { + return __Pyx_CyFunction_CallAsMethod(func, args, kw); + } else { + return __Pyx_CyFunction_Call(func, args, kw); + } +} + +// Note: the 'self' from method binding is passed in in the args tuple, +// whereas PyCFunctionObject's m_self is passed in as the first +// argument to the C function. For extension methods we need +// to pass 'self' as 'm_self' and not as the first element of the +// args tuple. + +static PyObject * +__pyx_FusedFunction_call(PyObject *func, PyObject *args, PyObject *kw) +{ + __pyx_FusedFunctionObject *binding_func = (__pyx_FusedFunctionObject *) func; + Py_ssize_t argc = PyTuple_GET_SIZE(args); + PyObject *new_args = NULL; + __pyx_FusedFunctionObject *new_func = NULL; + PyObject *result = NULL; + int is_staticmethod = binding_func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD; + + if (binding_func->self) { + // Bound method call, put 'self' in the args tuple + PyObject *self; + Py_ssize_t i; + new_args = PyTuple_New(argc + 1); + if (unlikely(!new_args)) + return NULL; + + self = binding_func->self; + + Py_INCREF(self); + PyTuple_SET_ITEM(new_args, 0, self); + self = NULL; + + for (i = 0; i < argc; i++) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + PyObject *item = PyTuple_GET_ITEM(args, i); + Py_INCREF(item); +#else + PyObject *item = PySequence_ITEM(args, i); if (unlikely(!item)) goto bad; +#endif + PyTuple_SET_ITEM(new_args, i + 1, item); + } + + args = new_args; + } + + if (binding_func->__signatures__) { + PyObject *tup; + if (is_staticmethod && binding_func->func.flags & __Pyx_CYFUNCTION_CCLASS) { + // FIXME: this seems wrong, but we must currently pass the signatures dict as 'self' argument + tup = PyTuple_Pack(3, args, + kw == NULL ? Py_None : kw, + binding_func->func.defaults_tuple); + if (unlikely(!tup)) goto bad; + new_func = (__pyx_FusedFunctionObject *) __Pyx_CyFunction_CallMethod( + func, binding_func->__signatures__, tup, NULL); + } else { + tup = PyTuple_Pack(4, binding_func->__signatures__, args, + kw == NULL ? Py_None : kw, + binding_func->func.defaults_tuple); + if (unlikely(!tup)) goto bad; + new_func = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_callfunction(func, tup, NULL); + } + Py_DECREF(tup); + + if (unlikely(!new_func)) + goto bad; + + __Pyx_CyFunction_SetClassObj(new_func, __Pyx_CyFunction_GetClassObj(binding_func)); + + func = (PyObject *) new_func; + } + + result = __pyx_FusedFunction_callfunction(func, args, kw); +bad: + Py_XDECREF(new_args); + Py_XDECREF((PyObject *) new_func); + return result; +} + +static PyMemberDef __pyx_FusedFunction_members[] = { + {(char *) "__signatures__", + T_OBJECT, + offsetof(__pyx_FusedFunctionObject, __signatures__), + READONLY, + 0}, + {(char *) "__self__", T_OBJECT_EX, offsetof(__pyx_FusedFunctionObject, self), READONLY, 0}, + {0, 0, 0, 0, 0}, +}; + +static PyGetSetDef __pyx_FusedFunction_getsets[] = { + // __doc__ is None for the fused function type, but we need it to be + // a descriptor for the instance's __doc__, so rebuild the descriptor in our subclass + // (all other descriptors are inherited) + {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, + {0, 0, 0, 0, 0} +}; + +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_FusedFunctionType_slots[] = { + {Py_tp_dealloc, (void *)__pyx_FusedFunction_dealloc}, + {Py_tp_call, (void *)__pyx_FusedFunction_call}, + {Py_tp_traverse, (void *)__pyx_FusedFunction_traverse}, + {Py_tp_clear, (void *)__pyx_FusedFunction_clear}, + {Py_tp_members, (void *)__pyx_FusedFunction_members}, + {Py_tp_getset, (void *)__pyx_FusedFunction_getsets}, + {Py_tp_descr_get, (void *)__pyx_FusedFunction_descr_get}, + {Py_mp_subscript, (void *)__pyx_FusedFunction_getitem}, + {0, 0}, +}; + +static PyType_Spec __pyx_FusedFunctionType_spec = { + __PYX_TYPE_MODULE_PREFIX "fused_cython_function", + sizeof(__pyx_FusedFunctionObject), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /*tp_flags*/ + __pyx_FusedFunctionType_slots +}; + +#else /* !CYTHON_USE_TYPE_SPECS */ + +static PyMappingMethods __pyx_FusedFunction_mapping_methods = { + 0, + (binaryfunc) __pyx_FusedFunction_getitem, + 0, +}; + +static PyTypeObject __pyx_FusedFunctionType_type = { + PyVarObject_HEAD_INIT(0, 0) + __PYX_TYPE_MODULE_PREFIX "fused_cython_function", /*tp_name*/ + sizeof(__pyx_FusedFunctionObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor) __pyx_FusedFunction_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ +#if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ +#else + 0, /*tp_as_async*/ +#endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + &__pyx_FusedFunction_mapping_methods, /*tp_as_mapping*/ + 0, /*tp_hash*/ + (ternaryfunc) __pyx_FusedFunction_call, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /*tp_flags*/ + 0, /*tp_doc*/ + (traverseproc) __pyx_FusedFunction_traverse, /*tp_traverse*/ + (inquiry) __pyx_FusedFunction_clear,/*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + __pyx_FusedFunction_members, /*tp_members*/ + __pyx_FusedFunction_getsets, /*tp_getset*/ + // NOTE: tp_base may be changed later during module initialisation when importing CyFunction across modules. + &__pyx_CyFunctionType_type, /*tp_base*/ + 0, /*tp_dict*/ + __pyx_FusedFunction_descr_get, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ +#if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, /*tp_print*/ +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ +#endif +}; +#endif + +static int __pyx_FusedFunction_init(PyObject *module) { +#if CYTHON_USE_TYPE_SPECS + PyObject *bases = PyTuple_Pack(1, __pyx_CyFunctionType); + if (unlikely(!bases)) { + return -1; + } + __pyx_FusedFunctionType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_FusedFunctionType_spec, bases); + Py_DECREF(bases); +#else + CYTHON_UNUSED_VAR(module); + // Set base from __Pyx_FetchCommonTypeFromSpec, in case it's different from the local static value. + __pyx_FusedFunctionType_type.tp_base = __pyx_CyFunctionType; + __pyx_FusedFunctionType = __Pyx_FetchCommonType(&__pyx_FusedFunctionType_type); +#endif + if (unlikely(__pyx_FusedFunctionType == NULL)) { + return -1; + } + return 0; +} + +//////////////////// ClassMethod.proto //////////////////// + +#include "descrobject.h" +CYTHON_UNUSED static PyObject* __Pyx_Method_ClassMethod(PyObject *method); /*proto*/ + +//////////////////// ClassMethod //////////////////// + +static PyObject* __Pyx_Method_ClassMethod(PyObject *method) { +#if CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM <= 0x05080000 + if (PyObject_TypeCheck(method, &PyWrapperDescr_Type)) { + // cdef classes + return PyClassMethod_New(method); + } +#else +#if CYTHON_COMPILING_IN_PYPY + // special C-API function only in PyPy >= 5.9 + if (PyMethodDescr_Check(method)) +#else + #if PY_MAJOR_VERSION == 2 + // PyMethodDescr_Type is not exposed in the CPython C-API in Py2. + static PyTypeObject *methoddescr_type = NULL; + if (unlikely(methoddescr_type == NULL)) { + PyObject *meth = PyObject_GetAttrString((PyObject*)&PyList_Type, "append"); + if (unlikely(!meth)) return NULL; + methoddescr_type = Py_TYPE(meth); + Py_DECREF(meth); + } + #else + PyTypeObject *methoddescr_type = &PyMethodDescr_Type; + #endif + if (__Pyx_TypeCheck(method, methoddescr_type)) +#endif + { + // cdef classes + PyMethodDescrObject *descr = (PyMethodDescrObject *)method; + #if PY_VERSION_HEX < 0x03020000 + PyTypeObject *d_type = descr->d_type; + #else + PyTypeObject *d_type = descr->d_common.d_type; + #endif + return PyDescr_NewClassMethod(d_type, descr->d_method); + } +#endif + else if (PyMethod_Check(method)) { + // python classes + return PyClassMethod_New(PyMethod_GET_FUNCTION(method)); + } + else { + return PyClassMethod_New(method); + } +} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Dataclasses.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Dataclasses.c new file mode 100644 index 0000000000000000000000000000000000000000..0b69049ab2202cb4666308ec77eb751c7a09275b --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Dataclasses.c @@ -0,0 +1,188 @@ +///////////////////// ModuleLoader.proto ////////////////////////// + +static PyObject* __Pyx_LoadInternalModule(const char* name, const char* fallback_code); /* proto */ + +//////////////////// ModuleLoader /////////////////////// +//@requires: CommonStructures.c::FetchSharedCythonModule + +static PyObject* __Pyx_LoadInternalModule(const char* name, const char* fallback_code) { + // We want to be able to use the contents of the standard library dataclasses module where available. + // If those objects aren't available (due to Python version) then a simple fallback is substituted + // instead, which largely just fails with a not-implemented error. + // + // The fallbacks are placed in the "shared abi module" as a convenient internal place to + // store them + + PyObject *shared_abi_module = 0, *module = 0; +#if __PYX_LIMITED_VERSION_HEX >= 0x030d00A1 + PyObject *result; +#endif + + shared_abi_module = __Pyx_FetchSharedCythonABIModule(); + if (!shared_abi_module) return NULL; + +#if __PYX_LIMITED_VERSION_HEX >= 0x030d00A1 + if (PyObject_GetOptionalAttrString(shared_abi_module, name, &result) != 0) { + Py_DECREF(shared_abi_module); + return result; + } +#else + if (PyObject_HasAttrString(shared_abi_module, name)) { + PyObject* result = PyObject_GetAttrString(shared_abi_module, name); + Py_DECREF(shared_abi_module); + return result; + } +#endif + + // the best and simplest case is simply to defer to the standard library (if available) + module = PyImport_ImportModule(name); + if (!module) { + PyObject *localDict, *runValue, *builtins, *modulename; + if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; + PyErr_Clear(); /* this is reasonably likely (especially on older versions of Python) */ +#if PY_MAJOR_VERSION < 3 + modulename = PyBytes_FromFormat("_cython_" CYTHON_ABI ".%s", name); +#else + modulename = PyUnicode_FromFormat("_cython_" CYTHON_ABI ".%s", name); +#endif + if (!modulename) goto bad; +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_CPYTHON + module = PyImport_AddModuleObject(modulename); /* borrowed */ +#else + module = PyImport_AddModule(PyBytes_AsString(modulename)); /* borrowed */ +#endif + Py_DECREF(modulename); + if (!module) goto bad; + Py_INCREF(module); + if (PyObject_SetAttrString(shared_abi_module, name, module) < 0) goto bad; + localDict = PyModule_GetDict(module); /* borrowed */ + if (!localDict) goto bad; + builtins = PyEval_GetBuiltins(); /* borrowed */ + if (!builtins) goto bad; + if (PyDict_SetItemString(localDict, "__builtins__", builtins) <0) goto bad; + + runValue = PyRun_String(fallback_code, Py_file_input, localDict, localDict); + if (!runValue) goto bad; + Py_DECREF(runValue); + } + goto shared_cleanup; + + bad: + Py_CLEAR(module); + shared_cleanup: + Py_XDECREF(shared_abi_module); + return module; +} + +///////////////////// SpecificModuleLoader.proto ////////////////////// +//@substitute: tempita + +static PyObject* __Pyx_Load_{{cname}}_Module(void); /* proto */ + + +//////////////////// SpecificModuleLoader /////////////////////// +//@requires: ModuleLoader + +static PyObject* __Pyx_Load_{{cname}}_Module(void) { + return __Pyx_LoadInternalModule("{{cname}}", {{py_code}}); +} + +//////////////////// DataclassesCallHelper.proto //////////////////////// + +static PyObject* __Pyx_DataclassesCallHelper(PyObject *callable, PyObject *kwds); /* proto */ + +//////////////////// DataclassesCallHelper //////////////////////// +//@substitute: naming + +// The signature of a few of the dataclasses module functions has +// been expanded over the years. Cython always passes the full set +// of arguments from the most recent version we know of, so needs +// to remove any arguments that don't exist on earlier versions. + +#if PY_MAJOR_VERSION >= 3 +static int __Pyx_DataclassesCallHelper_FilterToDict(PyObject *callable, PyObject *kwds, PyObject *new_kwds, PyObject *args_list, int is_kwonly) { + Py_ssize_t size, i; + size = PySequence_Size(args_list); + if (size == -1) return -1; + + for (i=0; i' +_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS() + +def dataclass(*args, **kwds): + raise NotImplementedError("Standard library 'dataclasses' module" + "is unavailable, likely due to the version of Python you're using.") + +# Markers for the various kinds of fields and pseudo-fields. +class _FIELD_BASE: + def __init__(self, name): + self.name = name + def __repr__(self): + return self.name +_FIELD = _FIELD_BASE('_FIELD') +_FIELD_CLASSVAR = _FIELD_BASE('_FIELD_CLASSVAR') +_FIELD_INITVAR = _FIELD_BASE('_FIELD_INITVAR') + +def field(*ignore, **kwds): + default = kwds.pop("default", MISSING) + default_factory = kwds.pop("default_factory", MISSING) + init = kwds.pop("init", True) + repr = kwds.pop("repr", True) + hash = kwds.pop("hash", None) + compare = kwds.pop("compare", True) + metadata = kwds.pop("metadata", None) + kw_only = kwds.pop("kw_only", None) + + if kwds: + raise ValueError("field received unexpected keyword arguments: %s" + % list(kwds.keys())) + if default is not MISSING and default_factory is not MISSING: + raise ValueError('cannot specify both default and default_factory') + if ignore: + raise ValueError("'field' does not take any positional arguments") + return Field(default, default_factory, init, + repr, hash, compare, metadata, kw_only) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Embed.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Embed.c new file mode 100644 index 0000000000000000000000000000000000000000..3c827794e75598fb1062c1b419a8bb8fcc3df803 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Embed.c @@ -0,0 +1,255 @@ +//////////////////// MainFunction //////////////////// + +#ifdef __FreeBSD__ +#include +#endif + +#if PY_MAJOR_VERSION < 3 +int %(main_method)s(int argc, char** argv) +#elif defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS) +int %(wmain_method)s(int argc, wchar_t **argv) +#else +static int __Pyx_main(int argc, wchar_t **argv) +#endif +{ + /* 754 requires that FP exceptions run in "no stop" mode by default, + * and until C vendors implement C99's ways to control FP exceptions, + * Python requires non-stop mode. Alas, some platforms enable FP + * exceptions by default. Here we disable them. + */ +#ifdef __FreeBSD__ + fp_except_t m; + + m = fpgetmask(); + fpsetmask(m & ~FP_X_OFL); +#endif +#if PY_VERSION_HEX < 0x03080000 + if (argc && argv) + Py_SetProgramName(argv[0]); +#endif + + #if PY_MAJOR_VERSION < 3 + if (PyImport_AppendInittab("%(module_name)s", init%(module_name)s) < 0) return 1; + #else + if (PyImport_AppendInittab("%(module_name)s", PyInit_%(module_name)s) < 0) return 1; + #endif + +#if PY_VERSION_HEX < 0x03080000 + Py_Initialize(); + if (argc && argv) + PySys_SetArgv(argc, argv); +#else + { + PyStatus status; + + PyConfig config; + PyConfig_InitPythonConfig(&config); + // Disable parsing command line arguments + config.parse_argv = 0; + + if (argc && argv) { + status = PyConfig_SetString(&config, &config.program_name, argv[0]); + if (PyStatus_Exception(status)) { + PyConfig_Clear(&config); + return 1; + } + + status = PyConfig_SetArgv(&config, argc, argv); + if (PyStatus_Exception(status)) { + PyConfig_Clear(&config); + return 1; + } + } + + status = Py_InitializeFromConfig(&config); + if (PyStatus_Exception(status)) { + PyConfig_Clear(&config); + return 1; + } + + PyConfig_Clear(&config); + } +#endif + + { /* init module '%(module_name)s' as '__main__' */ + PyObject* m = NULL; + %(module_is_main)s = 1; + m = PyImport_ImportModule("%(module_name)s"); + + if (!m && PyErr_Occurred()) { + PyErr_Print(); /* This exits with the right code if SystemExit. */ + #if PY_MAJOR_VERSION < 3 + if (Py_FlushLine()) PyErr_Clear(); + #endif + return 1; + } + Py_XDECREF(m); + } +#if PY_VERSION_HEX < 0x03060000 + Py_Finalize(); +#else + if (Py_FinalizeEx() < 0) + return 2; +#endif + return 0; +} + + +#if PY_MAJOR_VERSION >= 3 && !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS) +#include + +#if PY_VERSION_HEX < 0x03050000 + +static wchar_t* +__Pyx_char2wchar(char* arg) +{ + wchar_t *res; +#ifdef HAVE_BROKEN_MBSTOWCS + /* Some platforms have a broken implementation of + * mbstowcs which does not count the characters that + * would result from conversion. Use an upper bound. + */ + size_t argsize = strlen(arg); +#else + size_t argsize = mbstowcs(NULL, arg, 0); +#endif + size_t count; + unsigned char *in; + wchar_t *out; +#ifdef HAVE_MBRTOWC + mbstate_t mbs; +#endif + if (argsize != (size_t)-1) { + res = (wchar_t *)malloc((argsize+1)*sizeof(wchar_t)); + if (!res) + goto oom; + count = mbstowcs(res, arg, argsize+1); + if (count != (size_t)-1) { + wchar_t *tmp; + /* Only use the result if it contains no + surrogate characters. */ + for (tmp = res; *tmp != 0 && + (*tmp < 0xd800 || *tmp > 0xdfff); tmp++) + ; + if (*tmp == 0) + return res; + } + free(res); + } + /* Conversion failed. Fall back to escaping with surrogateescape. */ +#ifdef HAVE_MBRTOWC + /* Try conversion with mbrtwoc (C99), and escape non-decodable bytes. */ + + /* Overallocate; as multi-byte characters are in the argument, the + actual output could use less memory. */ + argsize = strlen(arg) + 1; + res = (wchar_t *)malloc(argsize*sizeof(wchar_t)); + if (!res) goto oom; + in = (unsigned char*)arg; + out = res; + memset(&mbs, 0, sizeof mbs); + while (argsize) { + size_t converted = mbrtowc(out, (char*)in, argsize, &mbs); + if (converted == 0) + /* Reached end of string; null char stored. */ + break; + if (converted == (size_t)-2) { + /* Incomplete character. This should never happen, + since we provide everything that we have - + unless there is a bug in the C library, or I + misunderstood how mbrtowc works. */ + fprintf(stderr, "unexpected mbrtowc result -2\\n"); + free(res); + return NULL; + } + if (converted == (size_t)-1) { + /* Conversion error. Escape as UTF-8b, and start over + in the initial shift state. */ + *out++ = 0xdc00 + *in++; + argsize--; + memset(&mbs, 0, sizeof mbs); + continue; + } + if (*out >= 0xd800 && *out <= 0xdfff) { + /* Surrogate character. Escape the original + byte sequence with surrogateescape. */ + argsize -= converted; + while (converted--) + *out++ = 0xdc00 + *in++; + continue; + } + /* successfully converted some bytes */ + in += converted; + argsize -= converted; + out++; + } +#else + /* Cannot use C locale for escaping; manually escape as if charset + is ASCII (i.e. escape all bytes > 128. This will still roundtrip + correctly in the locale's charset, which must be an ASCII superset. */ + res = (wchar_t *)malloc((strlen(arg)+1)*sizeof(wchar_t)); + if (!res) goto oom; + in = (unsigned char*)arg; + out = res; + while(*in) + if(*in < 128) + *out++ = *in++; + else + *out++ = 0xdc00 + *in++; + *out = 0; +#endif + return res; +oom: + fprintf(stderr, "out of memory\\n"); + return NULL; +} + +#endif + +int +%(main_method)s(int argc, char **argv) +{ + if (!argc) { + return __Pyx_main(0, NULL); + } + else { + int i, res; + wchar_t **argv_copy = (wchar_t **)malloc(sizeof(wchar_t*)*argc); + /* We need a second copy, as Python might modify the first one. */ + wchar_t **argv_copy2 = (wchar_t **)malloc(sizeof(wchar_t*)*argc); + char *oldloc = strdup(setlocale(LC_ALL, NULL)); + if (!argv_copy || !argv_copy2 || !oldloc) { + fprintf(stderr, "out of memory\\n"); + free(argv_copy); + free(argv_copy2); + free(oldloc); + return 1; + } + res = 0; + setlocale(LC_ALL, ""); + for (i = 0; i < argc; i++) { + argv_copy2[i] = argv_copy[i] = +#if PY_VERSION_HEX < 0x03050000 + __Pyx_char2wchar(argv[i]); +#else + Py_DecodeLocale(argv[i], NULL); +#endif + if (!argv_copy[i]) res = 1; /* failure, but continue to simplify cleanup */ + } + setlocale(LC_ALL, oldloc); + free(oldloc); + if (res == 0) + res = __Pyx_main(argc, argv_copy); + for (i = 0; i < argc; i++) { +#if PY_VERSION_HEX < 0x03050000 + free(argv_copy2[i]); +#else + PyMem_RawFree(argv_copy2[i]); +#endif + } + free(argv_copy); + free(argv_copy2); + return res; + } +} +#endif diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Exceptions.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Exceptions.c new file mode 100644 index 0000000000000000000000000000000000000000..daf6578eb96a7ee27baef3fe74cab110e5ae988a --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Exceptions.c @@ -0,0 +1,1137 @@ +// Exception raising code +// +// Exceptions are raised by __Pyx_Raise() and stored as plain +// type/value/tb in PyThreadState->curexc_*. When being caught by an +// 'except' statement, curexc_* is moved over to exc_* by +// __Pyx_GetException() + + +/////////////// AssertionsEnabled.init /////////////// + +if (likely(__Pyx_init_assertions_enabled() == 0)); else +// error propagation code is appended automatically + +/////////////// AssertionsEnabled.proto /////////////// + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define __Pyx_init_assertions_enabled() (0) + #define __pyx_assertions_enabled() (1) +#elif CYTHON_COMPILING_IN_LIMITED_API || (CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030C0000) + // Py_OptimizeFlag is deprecated in Py3.12+ and not available in the Limited API. + static int __pyx_assertions_enabled_flag; + #define __pyx_assertions_enabled() (__pyx_assertions_enabled_flag) + + static int __Pyx_init_assertions_enabled(void) { + PyObject *builtins, *debug, *debug_str; + int flag; + builtins = PyEval_GetBuiltins(); + if (!builtins) goto bad; + debug_str = PyUnicode_FromStringAndSize("__debug__", 9); + if (!debug_str) goto bad; + debug = PyObject_GetItem(builtins, debug_str); + Py_DECREF(debug_str); + if (!debug) goto bad; + flag = PyObject_IsTrue(debug); + Py_DECREF(debug); + if (flag == -1) goto bad; + __pyx_assertions_enabled_flag = flag; + return 0; + bad: + __pyx_assertions_enabled_flag = 1; + // We (rarely) may not have an exception set, but the calling code will call PyErr_Occurred() either way. + return -1; + } +#else + #define __Pyx_init_assertions_enabled() (0) + #define __pyx_assertions_enabled() (!Py_OptimizeFlag) +#endif + + +/////////////// ErrOccurredWithGIL.proto /////////////// +static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */ + +/////////////// ErrOccurredWithGIL /////////////// +static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) { + int err; + #ifdef WITH_THREAD + PyGILState_STATE _save = PyGILState_Ensure(); + #endif + err = !!PyErr_Occurred(); + #ifdef WITH_THREAD + PyGILState_Release(_save); + #endif + return err; +} + + +/////////////// PyThreadStateGet.proto /////////////// +//@substitute: naming + +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *$local_tstate_cname; +#define __Pyx_PyThreadState_assign $local_tstate_cname = __Pyx_PyThreadState_Current; +#if PY_VERSION_HEX >= 0x030C00A6 +#define __Pyx_PyErr_Occurred() ($local_tstate_cname->current_exception != NULL) +#define __Pyx_PyErr_CurrentExceptionType() ($local_tstate_cname->current_exception ? (PyObject*) Py_TYPE($local_tstate_cname->current_exception) : (PyObject*) NULL) +#else +#define __Pyx_PyErr_Occurred() ($local_tstate_cname->curexc_type != NULL) +#define __Pyx_PyErr_CurrentExceptionType() ($local_tstate_cname->curexc_type) +#endif +#else +// !CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() (PyErr_Occurred() != NULL) +#define __Pyx_PyErr_CurrentExceptionType() PyErr_Occurred() +#endif + + +/////////////// PyErrExceptionMatches.proto /////////////// +//@substitute: naming + +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState($local_tstate_cname, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/////////////// PyErrExceptionMatches /////////////// + +#if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + // the tighter subtype checking in Py3 allows faster out-of-order comparison + for (i=0; i= 0x030C00A6 + PyObject *current_exception = tstate->current_exception; + if (unlikely(!current_exception)) return 0; + exc_type = (PyObject*) Py_TYPE(current_exception); + if (exc_type == err) return 1; +#else + exc_type = tstate->curexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; +#endif + #if CYTHON_AVOID_BORROWED_REFS + Py_INCREF(exc_type); + #endif + if (unlikely(PyTuple_Check(err))) { + result = __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + } else { + result = __Pyx_PyErr_GivenExceptionMatches(exc_type, err); + } + #if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(exc_type); + #endif + return result; +} +#endif + +/////////////// PyErrFetchRestore.proto /////////////// +//@substitute: naming +//@requires: PyThreadStateGet + +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState($local_tstate_cname, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState($local_tstate_cname, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); /*proto*/ +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/ + +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A6 +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif + +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/////////////// PyErrFetchRestore /////////////// + +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { +#if PY_VERSION_HEX >= 0x030C00A6 + PyObject *tmp_value; + assert(type == NULL || (value != NULL && type == (PyObject*) Py_TYPE(value))); + if (value) { + #if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(((PyBaseExceptionObject*) value)->traceback != tb)) + #endif + // If this fails, we may lose the traceback but still set the expected exception below. + PyException_SetTraceback(value, tb); + } + tmp_value = tstate->current_exception; + tstate->current_exception = value; + Py_XDECREF(tmp_value); + Py_XDECREF(type); + Py_XDECREF(tb); +#else + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#endif +} + +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { +#if PY_VERSION_HEX >= 0x030C00A6 + PyObject* exc_value; + exc_value = tstate->current_exception; + tstate->current_exception = 0; + *value = exc_value; + *type = NULL; + *tb = NULL; + if (exc_value) { + *type = (PyObject*) Py_TYPE(exc_value); + Py_INCREF(*type); + #if CYTHON_COMPILING_IN_CPYTHON + *tb = ((PyBaseExceptionObject*) exc_value)->traceback; + Py_XINCREF(*tb); + #else + *tb = PyException_GetTraceback(exc_value); + #endif + } +#else + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#endif +} +#endif + +/////////////// RaiseException.proto /////////////// + +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ + +/////////////// RaiseException /////////////// +//@requires: PyErrFetchRestore +//@requires: PyThreadStateGet + +// The following function is based on do_raise() from ceval.c. There +// are separate versions for Python2 and Python3 as exception handling +// has changed quite a lot between the two versions. + +#if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + __Pyx_PyThreadState_declare + CYTHON_UNUSED_VAR(cause); + /* 'cause' is only used in Py3 */ + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + + if (PyType_Check(type)) { + /* instantiate the type now (we don't know when and how it will be caught) */ +#if CYTHON_COMPILING_IN_PYPY + /* PyPy can't handle value == NULL */ + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + + } else { + /* Raising an instance. The value should be a dummy. */ + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + /* Normalize to raise , */ + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} + +#else /* Python 3+ */ + +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + // make sure value is an exception instance of type + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + // error on subclass test + goto bad; + } else { + // believe the instance + type = instance_class; + } + } + } + if (!instance_class) { + // instantiate the type now (we don't know when and how it will be caught) + // assuming that 'value' is an argument to the type's constructor + // not using PyErr_NormalizeException() to avoid ref-counting problems + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + // raise ... from None + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + + PyErr_SetObject(type, value); + + if (tb) { + #if PY_VERSION_HEX >= 0x030C00A6 + // If this fails, we just get a different exception, so ignore the return value. + PyException_SetTraceback(value, tb); + #elif CYTHON_FAST_THREAD_STATE + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#else + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#endif + } + +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + + +/////////////// GetTopmostException.proto /////////////// + +#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/////////////// GetTopmostException /////////////// + +#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE +// Copied from errors.c in CPython. +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_value == NULL || exc_info->exc_value == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + + +/////////////// GetException.proto /////////////// +//@substitute: naming +//@requires: PyThreadStateGet + +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException($local_tstate_cname, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/ +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ +#endif + +/////////////// GetException /////////////// + +#if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type = NULL, *local_value, *local_tb = NULL; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if PY_VERSION_HEX >= 0x030C00A6 + local_value = tstate->current_exception; + tstate->current_exception = 0; + if (likely(local_value)) { + local_type = (PyObject*) Py_TYPE(local_value); + Py_INCREF(local_type); + local_tb = PyException_GetTraceback(local_value); + } + #else + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; + #endif +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE && PY_VERSION_HEX >= 0x030C00A6 + if (unlikely(tstate->current_exception)) +#elif CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + + // traceback may be NULL for freshly raised exceptions + Py_XINCREF(local_tb); + // exception state may be temporarily empty in parallel loops (race condition) + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; + +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + #if PY_VERSION_HEX >= 0x030B00a4 + tmp_value = exc_info->exc_value; + exc_info->exc_value = local_value; + tmp_type = NULL; + tmp_tb = NULL; + Py_XDECREF(local_type); + Py_XDECREF(local_tb); + #else + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + #endif + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + // Make sure tstate is in a consistent state when we XDECREF + // these objects (DECREF may run arbitrary code). + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + + return 0; + +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/////////////// ReRaiseException.proto /////////////// + +static CYTHON_INLINE void __Pyx_ReraiseException(void); /*proto*/ + +/////////////// ReRaiseException /////////////// +//@requires: GetTopmostException + +static CYTHON_INLINE void __Pyx_ReraiseException(void) { + PyObject *type = NULL, *value = NULL, *tb = NULL; +#if CYTHON_FAST_THREAD_STATE + PyThreadState *tstate = PyThreadState_GET(); + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + value = exc_info->exc_value; + #if PY_VERSION_HEX >= 0x030B00a4 + if (unlikely(value == Py_None)) { + value = NULL; + } else if (value) { + Py_INCREF(value); + type = (PyObject*) Py_TYPE(value); + Py_INCREF(type); + tb = PyException_GetTraceback(value); + } + #else + type = exc_info->exc_type; + tb = exc_info->exc_traceback; + Py_XINCREF(type); + Py_XINCREF(value); + Py_XINCREF(tb); + #endif + #else + type = tstate->exc_type; + value = tstate->exc_value; + tb = tstate->exc_traceback; + Py_XINCREF(type); + Py_XINCREF(value); + Py_XINCREF(tb); + #endif +#else + PyErr_GetExcInfo(&type, &value, &tb); +#endif + if (unlikely(!type || type == Py_None)) { + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(tb); + // message copied from Py3 + PyErr_SetString(PyExc_RuntimeError, + "No active exception to reraise"); + } else { + PyErr_Restore(type, value, tb); + } +} + +/////////////// SaveResetException.proto /////////////// +//@substitute: naming +//@requires: PyThreadStateGet + +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave($local_tstate_cname, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/ +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset($local_tstate_cname, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); /*proto*/ + +#else + +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/////////////// SaveResetException /////////////// +//@requires: GetTopmostException + +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + PyObject *exc_value = exc_info->exc_value; + if (exc_value == NULL || exc_value == Py_None) { + *value = NULL; + *type = NULL; + *tb = NULL; + } else { + *value = exc_value; + Py_INCREF(*value); + *type = (PyObject*) Py_TYPE(exc_value); + Py_INCREF(*type); + *tb = PyException_GetTraceback(exc_value); + } + #elif CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); + #endif +} + +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 + _PyErr_StackItem *exc_info = tstate->exc_info; + PyObject *tmp_value = exc_info->exc_value; + exc_info->exc_value = value; + Py_XDECREF(tmp_value); + // TODO: avoid passing these at all + Py_XDECREF(type); + Py_XDECREF(tb); + #else + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); + #endif +} +#endif + +/////////////// SwapException.proto /////////////// +//@substitute: naming +//@requires: PyThreadStateGet + +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap($local_tstate_cname, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/ +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ +#endif + +/////////////// SwapException /////////////// + +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_value = exc_info->exc_value; + exc_info->exc_value = *value; + if (tmp_value == NULL || tmp_value == Py_None) { + Py_XDECREF(tmp_value); + tmp_value = NULL; + tmp_type = NULL; + tmp_tb = NULL; + } else { + // TODO: avoid swapping these at all + tmp_type = (PyObject*) Py_TYPE(tmp_value); + Py_INCREF(tmp_type); + #if CYTHON_COMPILING_IN_CPYTHON + tmp_tb = ((PyBaseExceptionObject*) tmp_value)->traceback; + Py_XINCREF(tmp_tb); + #else + tmp_tb = PyException_GetTraceback(tmp_value); + #endif + } + #elif CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + + exc_info->exc_type = *type; + exc_info->exc_value = *value; + exc_info->exc_traceback = *tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + + tstate->exc_type = *type; + tstate->exc_value = *value; + tstate->exc_traceback = *tb; + #endif + + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} + +#else + +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); + PyErr_SetExcInfo(*type, *value, *tb); + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#endif + +/////////////// WriteUnraisableException.proto /////////////// + +static void __Pyx_WriteUnraisable(const char *name, int clineno, + int lineno, const char *filename, + int full_traceback, int nogil); /*proto*/ + +/////////////// WriteUnraisableException /////////////// +//@requires: PyErrFetchRestore +//@requires: PyThreadStateGet + +static void __Pyx_WriteUnraisable(const char *name, int clineno, + int lineno, const char *filename, + int full_traceback, int nogil) { + PyObject *old_exc, *old_val, *old_tb; + PyObject *ctx; + __Pyx_PyThreadState_declare +#ifdef WITH_THREAD + PyGILState_STATE state; + if (nogil) + state = PyGILState_Ensure(); + /* arbitrary, to suppress warning */ + else state = (PyGILState_STATE)0; +#endif + CYTHON_UNUSED_VAR(clineno); + CYTHON_UNUSED_VAR(lineno); + CYTHON_UNUSED_VAR(filename); + CYTHON_MAYBE_UNUSED_VAR(nogil); + + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); + if (full_traceback) { + Py_XINCREF(old_exc); + Py_XINCREF(old_val); + Py_XINCREF(old_tb); + __Pyx_ErrRestore(old_exc, old_val, old_tb); + PyErr_PrintEx(1); + } + #if PY_MAJOR_VERSION < 3 + ctx = PyString_FromString(name); + #else + ctx = PyUnicode_FromString(name); + #endif + __Pyx_ErrRestore(old_exc, old_val, old_tb); + if (!ctx) { + PyErr_WriteUnraisable(Py_None); + } else { + PyErr_WriteUnraisable(ctx); + Py_DECREF(ctx); + } +#ifdef WITH_THREAD + if (nogil) + PyGILState_Release(state); +#endif +} + +/////////////// CLineInTraceback.proto /////////////// + +#ifdef CYTHON_CLINE_IN_TRACEBACK /* 0 or 1 to disable/enable C line display in tracebacks at C compile time */ +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);/*proto*/ +#endif + +/////////////// CLineInTraceback /////////////// +//@requires: ObjectHandling.c::PyObjectGetAttrStrNoError +//@requires: ObjectHandling.c::PyDictVersioning +//@requires: PyErrFetchRestore +//@substitute: naming + +#ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + + CYTHON_MAYBE_UNUSED_VAR(tstate); + + if (unlikely(!${cython_runtime_cname})) { + // Very early error where the runtime module is not set up yet. + return c_line; + } + + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); + +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(${cython_runtime_cname}); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, PYIDENT("cline_in_traceback"))) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStrNoError(${cython_runtime_cname}, PYIDENT("cline_in_traceback")); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + // No need to handle errors here when we reset the exception state just afterwards. + (void) PyObject_SetAttr(${cython_runtime_cname}, PYIDENT("cline_in_traceback"), Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/////////////// AddTraceback.proto /////////////// + +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); /*proto*/ + +/////////////// AddTraceback /////////////// +//@requires: ModuleSetupCode.c::CodeObjectCache +//@requires: CLineInTraceback +//@substitute: naming + +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +#if PY_VERSION_HEX >= 0x030b00a6 && !CYTHON_COMPILING_IN_LIMITED_API + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif + +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject *__Pyx_PyCode_Replace_For_AddTraceback(PyObject *code, PyObject *scratch_dict, + PyObject *firstlineno, PyObject *name) { + PyObject *replace = NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "co_firstlineno", firstlineno))) return NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "co_name", name))) return NULL; + + replace = PyObject_GetAttrString(code, "replace"); + if (likely(replace)) { + PyObject *result; + result = PyObject_Call(replace, $empty_tuple, scratch_dict); + Py_DECREF(replace); + return result; + } + PyErr_Clear(); + + #if __PYX_LIMITED_VERSION_HEX < 0x030780000 + // If we're here, we're probably on Python <=3.7 which doesn't have code.replace. + // In this we take a lazy interpreted route (without regard to performance + // since it's fairly old and this is mostly just to get something working) + { + PyObject *compiled = NULL, *result = NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "code", code))) return NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "type", (PyObject*)(&PyType_Type)))) return NULL; + compiled = Py_CompileString( + "out = type(code)(\n" + " code.co_argcount, code.co_kwonlyargcount, code.co_nlocals, code.co_stacksize,\n" + " code.co_flags, code.co_code, code.co_consts, code.co_names,\n" + " code.co_varnames, code.co_filename, co_name, co_firstlineno,\n" + " code.co_lnotab)\n", "", Py_file_input); + if (!compiled) return NULL; + result = PyEval_EvalCode(compiled, scratch_dict, scratch_dict); + Py_DECREF(compiled); + if (!result) PyErr_Print(); + Py_DECREF(result); + result = PyDict_GetItemString(scratch_dict, "out"); + if (result) Py_INCREF(result); + return result; + } + #else + return NULL; + #endif +} + +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyObject *code_object = NULL, *py_py_line = NULL, *py_funcname = NULL, *dict = NULL; + PyObject *replace = NULL, *getframe = NULL, *frame = NULL; + PyObject *exc_type, *exc_value, *exc_traceback; + int success = 0; + if (c_line) { + // Avoid "unused" warning as long as we don't use this. + (void) $cfilenm_cname; + (void) __Pyx_CLineForTraceback(__Pyx_PyThreadState_Current, c_line); + } + + // DW - this is a horrendous hack, but I'm quite proud of it. Essentially + // we need to generate a frame with the right line number/filename/funcname. + // We do this by compiling a small bit of code that uses sys._getframe to get a + // frame, and then customizing the details of the code to match. + // We then run the code object and use the generated frame to set the traceback. + + PyErr_Fetch(&exc_type, &exc_value, &exc_traceback); + + code_object = Py_CompileString("_getframe()", filename, Py_eval_input); + if (unlikely(!code_object)) goto bad; + py_py_line = PyLong_FromLong(py_line); + if (unlikely(!py_py_line)) goto bad; + py_funcname = PyUnicode_FromString(funcname); + if (unlikely(!py_funcname)) goto bad; + dict = PyDict_New(); + if (unlikely(!dict)) goto bad; + { + PyObject *old_code_object = code_object; + code_object = __Pyx_PyCode_Replace_For_AddTraceback(code_object, dict, py_py_line, py_funcname); + Py_DECREF(old_code_object); + } + if (unlikely(!code_object)) goto bad; + + // Note that getframe is borrowed + getframe = PySys_GetObject("_getframe"); + if (unlikely(!getframe)) goto bad; + // reuse dict as globals (nothing conflicts, and it saves an allocation) + if (unlikely(PyDict_SetItemString(dict, "_getframe", getframe))) goto bad; + + frame = PyEval_EvalCode(code_object, dict, dict); + if (unlikely(!frame) || frame == Py_None) goto bad; + success = 1; + + bad: + PyErr_Restore(exc_type, exc_value, exc_traceback); + Py_XDECREF(code_object); + Py_XDECREF(py_py_line); + Py_XDECREF(py_funcname); + Py_XDECREF(dict); + Py_XDECREF(replace); + + if (success) { + // Unfortunately an easy way to check the type of frame isn't in the + // limited API. The check against None should cover the most + // likely wrong answer though. + PyTraceBack_Here( + // Python < 0x03090000 didn't expose PyFrameObject + // but they all expose struct _frame as an opaque type + (struct _frame*)frame); + } + + Py_XDECREF(frame); +} +#else +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = NULL; + PyObject *py_funcname = NULL; + #if PY_MAJOR_VERSION < 3 + PyObject *py_srcfile = NULL; + + py_srcfile = PyString_FromString(filename); + if (!py_srcfile) goto bad; + #endif + + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, $cfilenm_cname, c_line); + if (!py_funcname) goto bad; + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, $cfilenm_cname, c_line); + if (!py_funcname) goto bad; + funcname = PyUnicode_AsUTF8(py_funcname); + if (!funcname) goto bad; + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + if (!py_funcname) goto bad; + #endif + } + #if PY_MAJOR_VERSION < 3 + py_code = __Pyx_PyCode_New( + 0, /*int argcount,*/ + 0, /*int posonlyargcount,*/ + 0, /*int kwonlyargcount,*/ + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + 0, /*int flags,*/ + $empty_bytes, /*PyObject *code,*/ + $empty_tuple, /*PyObject *consts,*/ + $empty_tuple, /*PyObject *names,*/ + $empty_tuple, /*PyObject *varnames,*/ + $empty_tuple, /*PyObject *freevars,*/ + $empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, /*int firstlineno,*/ + $empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + #else + py_code = PyCode_NewEmpty(filename, funcname, py_line); + #endif + Py_XDECREF(py_funcname); /* XDECREF since it's only set on Py3 if cline */ + return py_code; +bad: + Py_XDECREF(py_funcname); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_srcfile); + #endif + return NULL; +} + +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject *ptype, *pvalue, *ptraceback; + + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + + // Negate to avoid collisions between py and c lines. + py_code = $global_code_object_cache_find(c_line ? -c_line : py_line); + if (!py_code) { + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) { + /* If the code object creation fails, then we should clear the + fetched exception references and propagate the new exception */ + Py_XDECREF(ptype); + Py_XDECREF(pvalue); + Py_XDECREF(ptraceback); + goto bad; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + $global_code_object_cache_insert(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + $moddict_cname, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} +#endif diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/ExtensionTypes.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/ExtensionTypes.c new file mode 100644 index 0000000000000000000000000000000000000000..544e50b9420c9bbfa08715ada0d52160d4dc7fe2 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/ExtensionTypes.c @@ -0,0 +1,660 @@ +/////////////// FixUpExtensionType.proto /////////////// + +#if CYTHON_USE_TYPE_SPECS +static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type); /*proto*/ +#endif + +/////////////// FixUpExtensionType /////////////// +//@requires:ModuleSetupCode.c::IncludeStructmemberH +//@requires:StringTools.c::IncludeStringH + +#if CYTHON_USE_TYPE_SPECS +static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type) { +#if PY_VERSION_HEX > 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + CYTHON_UNUSED_VAR(spec); + CYTHON_UNUSED_VAR(type); +#else + // Set tp_weakreflist, tp_dictoffset, tp_vectorcalloffset + // Copied and adapted from https://bugs.python.org/issue38140 + const PyType_Slot *slot = spec->slots; + while (slot && slot->slot && slot->slot != Py_tp_members) + slot++; + if (slot && slot->slot == Py_tp_members) { + int changed = 0; +#if !(PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON) + const +#endif + PyMemberDef *memb = (PyMemberDef*) slot->pfunc; + while (memb && memb->name) { + if (memb->name[0] == '_' && memb->name[1] == '_') { +#if PY_VERSION_HEX < 0x030900b1 + if (strcmp(memb->name, "__weaklistoffset__") == 0) { + // The PyMemberDef must be a Py_ssize_t and readonly. + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); + type->tp_weaklistoffset = memb->offset; + // FIXME: is it even worth calling PyType_Modified() here? + changed = 1; + } + else if (strcmp(memb->name, "__dictoffset__") == 0) { + // The PyMemberDef must be a Py_ssize_t and readonly. + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); + type->tp_dictoffset = memb->offset; + // FIXME: is it even worth calling PyType_Modified() here? + changed = 1; + } +#if CYTHON_METH_FASTCALL + else if (strcmp(memb->name, "__vectorcalloffset__") == 0) { + // The PyMemberDef must be a Py_ssize_t and readonly. + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); +#if PY_VERSION_HEX >= 0x030800b4 + type->tp_vectorcall_offset = memb->offset; +#else + type->tp_print = (printfunc) memb->offset; +#endif + // FIXME: is it even worth calling PyType_Modified() here? + changed = 1; + } +#endif +#else + if ((0)); +#endif +#if PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON + else if (strcmp(memb->name, "__module__") == 0) { + // PyType_FromSpec() in CPython <= 3.9b1 overwrites this field with a constant string. + // See https://bugs.python.org/issue40703 + PyObject *descr; + // The PyMemberDef must be an object and normally readable, possibly writable. + assert(memb->type == T_OBJECT); + assert(memb->flags == 0 || memb->flags == READONLY); + descr = PyDescr_NewMember(type, memb); + if (unlikely(!descr)) + return -1; + if (unlikely(PyDict_SetItem(type->tp_dict, PyDescr_NAME(descr), descr) < 0)) { + Py_DECREF(descr); + return -1; + } + Py_DECREF(descr); + changed = 1; + } +#endif + } + memb++; + } + if (changed) + PyType_Modified(type); + } +#endif + return 0; +} +#endif + + +/////////////// ValidateBasesTuple.proto /////////////// + +#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS +static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases); /*proto*/ +#endif + +/////////////// ValidateBasesTuple /////////////// + +#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS +static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases) { + // Loop over all bases (except the first) and check that those + // really are heap types. Otherwise, it would not be safe to + // subclass them. + // + // We also check tp_dictoffset: it is unsafe to inherit + // tp_dictoffset from a base class because the object structures + // would not be compatible. So, if our extension type doesn't set + // tp_dictoffset (i.e. there is no __dict__ attribute in the object + // structure), we need to check that none of the base classes sets + // it either. + Py_ssize_t i, n; +#if CYTHON_ASSUME_SAFE_MACROS + n = PyTuple_GET_SIZE(bases); +#else + n = PyTuple_Size(bases); + if (n < 0) return -1; +#endif + for (i = 1; i < n; i++) /* Skip first base */ + { +#if CYTHON_AVOID_BORROWED_REFS + PyObject *b0 = PySequence_GetItem(bases, i); + if (!b0) return -1; +#elif CYTHON_ASSUME_SAFE_MACROS + PyObject *b0 = PyTuple_GET_ITEM(bases, i); +#else + PyObject *b0 = PyTuple_GetItem(bases, i); + if (!b0) return -1; +#endif + PyTypeObject *b; +#if PY_MAJOR_VERSION < 3 + /* Disallow old-style classes */ + if (PyClass_Check(b0)) + { + PyErr_Format(PyExc_TypeError, "base class '%.200s' is an old-style class", + PyString_AS_STRING(((PyClassObject*)b0)->cl_name)); +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + return -1; + } +#endif + b = (PyTypeObject*) b0; + if (!__Pyx_PyType_HasFeature(b, Py_TPFLAGS_HEAPTYPE)) + { + __Pyx_TypeName b_name = __Pyx_PyType_GetName(b); + PyErr_Format(PyExc_TypeError, + "base class '" __Pyx_FMT_TYPENAME "' is not a heap type", b_name); + __Pyx_DECREF_TypeName(b_name); +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + return -1; + } + if (dictoffset == 0) + { + Py_ssize_t b_dictoffset = 0; +#if CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY + b_dictoffset = b->tp_dictoffset; +#else + PyObject *py_b_dictoffset = PyObject_GetAttrString((PyObject*)b, "__dictoffset__"); + if (!py_b_dictoffset) goto dictoffset_return; + b_dictoffset = PyLong_AsSsize_t(py_b_dictoffset); + Py_DECREF(py_b_dictoffset); + if (b_dictoffset == -1 && PyErr_Occurred()) goto dictoffset_return; +#endif + if (b_dictoffset) { + { + __Pyx_TypeName b_name = __Pyx_PyType_GetName(b); + PyErr_Format(PyExc_TypeError, + "extension type '%.200s' has no __dict__ slot, " + "but base type '" __Pyx_FMT_TYPENAME "' has: " + "either add 'cdef dict __dict__' to the extension type " + "or add '__slots__ = [...]' to the base type", + type_name, b_name); + __Pyx_DECREF_TypeName(b_name); + } +#if !(CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY) + dictoffset_return: +#endif +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + return -1; + } + } +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + } + return 0; +} +#endif + + +/////////////// PyType_Ready.proto /////////////// + +// unused when using type specs +CYTHON_UNUSED static int __Pyx_PyType_Ready(PyTypeObject *t);/*proto*/ + +/////////////// PyType_Ready /////////////// +//@requires: ObjectHandling.c::PyObjectCallMethod0 +//@requires: ValidateBasesTuple + +// Wrapper around PyType_Ready() with some runtime checks and fixes +// to deal with multiple inheritance. +static int __Pyx_PyType_Ready(PyTypeObject *t) { + +// FIXME: is this really suitable for CYTHON_COMPILING_IN_LIMITED_API? +#if CYTHON_USE_TYPE_SPECS || !(CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API) || defined(PYSTON_MAJOR_VERSION) + // avoid C warning about unused helper function + (void)__Pyx_PyObject_CallMethod0; +#if CYTHON_USE_TYPE_SPECS + (void)__Pyx_validate_bases_tuple; +#endif + + return PyType_Ready(t); + +#else + int r; + PyObject *bases = __Pyx_PyType_GetSlot(t, tp_bases, PyObject*); + if (bases && unlikely(__Pyx_validate_bases_tuple(t->tp_name, t->tp_dictoffset, bases) == -1)) + return -1; + +#if PY_VERSION_HEX >= 0x03050000 && !defined(PYSTON_MAJOR_VERSION) + { + // Make sure GC does not pick up our non-heap type as heap type with this hack! + // For details, see https://github.com/cython/cython/issues/3603 + int gc_was_enabled; + #if PY_VERSION_HEX >= 0x030A00b1 + // finally added in Py3.10 :) + gc_was_enabled = PyGC_Disable(); + (void)__Pyx_PyObject_CallMethod0; + + #else + // Call gc.disable() as a backwards compatible fallback, but only if needed. + PyObject *ret, *py_status; + PyObject *gc = NULL; + #if PY_VERSION_HEX >= 0x030700a1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM+0 >= 0x07030400) + // https://foss.heptapod.net/pypy/pypy/-/issues/3385 + gc = PyImport_GetModule(PYUNICODE("gc")); + #endif + if (unlikely(!gc)) gc = PyImport_Import(PYUNICODE("gc")); + if (unlikely(!gc)) return -1; + py_status = __Pyx_PyObject_CallMethod0(gc, PYUNICODE("isenabled")); + if (unlikely(!py_status)) { + Py_DECREF(gc); + return -1; + } + gc_was_enabled = __Pyx_PyObject_IsTrue(py_status); + Py_DECREF(py_status); + if (gc_was_enabled > 0) { + ret = __Pyx_PyObject_CallMethod0(gc, PYUNICODE("disable")); + if (unlikely(!ret)) { + Py_DECREF(gc); + return -1; + } + Py_DECREF(ret); + } else if (unlikely(gc_was_enabled == -1)) { + Py_DECREF(gc); + return -1; + } + #endif + + // As of https://bugs.python.org/issue22079 + // PyType_Ready enforces that all bases of a non-heap type are + // non-heap. We know that this is the case for the solid base but + // other bases are heap allocated and are kept alive through the + // tp_bases reference. + // Other than this check, the Py_TPFLAGS_HEAPTYPE flag is unused + // in PyType_Ready(). + t->tp_flags |= Py_TPFLAGS_HEAPTYPE; +#if PY_VERSION_HEX >= 0x030A0000 + // As of https://github.com/python/cpython/pull/25520 + // PyType_Ready marks types as immutable if they are static types + // and requires the Py_TPFLAGS_IMMUTABLETYPE flag to mark types as + // immutable + // Manually set the Py_TPFLAGS_IMMUTABLETYPE flag, since the type + // is immutable + t->tp_flags |= Py_TPFLAGS_IMMUTABLETYPE; +#endif +#else + // avoid C warning about unused helper function + (void)__Pyx_PyObject_CallMethod0; +#endif + + r = PyType_Ready(t); + +#if PY_VERSION_HEX >= 0x03050000 && !defined(PYSTON_MAJOR_VERSION) + t->tp_flags &= ~Py_TPFLAGS_HEAPTYPE; + + #if PY_VERSION_HEX >= 0x030A00b1 + if (gc_was_enabled) + PyGC_Enable(); + #else + if (gc_was_enabled) { + PyObject *tp, *v, *tb; + PyErr_Fetch(&tp, &v, &tb); + ret = __Pyx_PyObject_CallMethod0(gc, PYUNICODE("enable")); + if (likely(ret || r == -1)) { + Py_XDECREF(ret); + // do not overwrite exceptions raised by PyType_Ready() above + PyErr_Restore(tp, v, tb); + } else { + // PyType_Ready() succeeded, but gc.enable() failed. + Py_XDECREF(tp); + Py_XDECREF(v); + Py_XDECREF(tb); + r = -1; + } + } + Py_DECREF(gc); + #endif + } +#endif + + return r; +#endif +} + + +/////////////// PyTrashcan.proto /////////////// + +// These macros are taken from https://github.com/python/cpython/pull/11841 +// Unlike the Py_TRASHCAN_SAFE_BEGIN/Py_TRASHCAN_SAFE_END macros, they +// allow dealing correctly with subclasses. + +// This requires CPython version >= 2.7.4 +// (or >= 3.2.4 but we don't support such old Python 3 versions anyway) +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03080000 +// https://github.com/python/cpython/pull/11841 merged so Cython reimplementation +// is no longer necessary +#define __Pyx_TRASHCAN_BEGIN Py_TRASHCAN_BEGIN +#define __Pyx_TRASHCAN_END Py_TRASHCAN_END +#elif CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070400 +#define __Pyx_TRASHCAN_BEGIN_CONDITION(op, cond) \ + do { \ + PyThreadState *_tstate = NULL; \ + // If "cond" is false, then _tstate remains NULL and the deallocator + // is run normally without involving the trashcan + if (cond) { \ + _tstate = PyThreadState_GET(); \ + if (_tstate->trash_delete_nesting >= PyTrash_UNWIND_LEVEL) { \ + // Store the object (to be deallocated later) and jump past + // Py_TRASHCAN_END, skipping the body of the deallocator + _PyTrash_thread_deposit_object((PyObject*)(op)); \ + break; \ + } \ + ++_tstate->trash_delete_nesting; \ + } + // The body of the deallocator is here. +#define __Pyx_TRASHCAN_END \ + if (_tstate) { \ + --_tstate->trash_delete_nesting; \ + if (_tstate->trash_delete_later && _tstate->trash_delete_nesting <= 0) \ + _PyTrash_thread_destroy_chain(); \ + } \ + } while (0); + +#define __Pyx_TRASHCAN_BEGIN(op, dealloc) __Pyx_TRASHCAN_BEGIN_CONDITION(op, \ + __Pyx_PyObject_GetSlot(op, tp_dealloc, destructor) == (destructor)(dealloc)) + +#else +// The trashcan is a no-op on other Python implementations +// or old CPython versions +#define __Pyx_TRASHCAN_BEGIN(op, dealloc) +#define __Pyx_TRASHCAN_END +#endif + +/////////////// CallNextTpDealloc.proto /////////////// + +static void __Pyx_call_next_tp_dealloc(PyObject* obj, destructor current_tp_dealloc); + +/////////////// CallNextTpDealloc /////////////// + +static void __Pyx_call_next_tp_dealloc(PyObject* obj, destructor current_tp_dealloc) { + PyTypeObject* type = Py_TYPE(obj); + destructor tp_dealloc = NULL; + /* try to find the first parent type that has a different tp_dealloc() function */ + while (type && __Pyx_PyType_GetSlot(type, tp_dealloc, destructor) != current_tp_dealloc) + type = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*); + while (type && (tp_dealloc = __Pyx_PyType_GetSlot(type, tp_dealloc, destructor)) == current_tp_dealloc) + type = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*); + if (type) + tp_dealloc(obj); +} + +/////////////// CallNextTpTraverse.proto /////////////// + +static int __Pyx_call_next_tp_traverse(PyObject* obj, visitproc v, void *a, traverseproc current_tp_traverse); + +/////////////// CallNextTpTraverse /////////////// + +static int __Pyx_call_next_tp_traverse(PyObject* obj, visitproc v, void *a, traverseproc current_tp_traverse) { + PyTypeObject* type = Py_TYPE(obj); + traverseproc tp_traverse = NULL; + /* try to find the first parent type that has a different tp_traverse() function */ + while (type && __Pyx_PyType_GetSlot(type, tp_traverse, traverseproc) != current_tp_traverse) + type = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*); + while (type && (tp_traverse = __Pyx_PyType_GetSlot(type, tp_traverse, traverseproc)) == current_tp_traverse) + type = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*); + if (type && tp_traverse) + return tp_traverse(obj, v, a); + // FIXME: really ignore? + return 0; +} + +/////////////// CallNextTpClear.proto /////////////// + +static void __Pyx_call_next_tp_clear(PyObject* obj, inquiry current_tp_clear); + +/////////////// CallNextTpClear /////////////// + +static void __Pyx_call_next_tp_clear(PyObject* obj, inquiry current_tp_clear) { + PyTypeObject* type = Py_TYPE(obj); + inquiry tp_clear = NULL; + /* try to find the first parent type that has a different tp_clear() function */ + while (type && __Pyx_PyType_GetSlot(type, tp_clear, inquiry) != current_tp_clear) + type = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*); + while (type && (tp_clear = __Pyx_PyType_GetSlot(type, tp_clear, inquiry)) == current_tp_clear) + type = __Pyx_PyType_GetSlot(type, tp_base, PyTypeObject*); + if (type && tp_clear) + tp_clear(obj); +} + +/////////////// SetupReduce.proto /////////////// + +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_setup_reduce(PyObject* type_obj); +#endif + +/////////////// SetupReduce /////////////// +//@requires: ObjectHandling.c::PyObjectGetAttrStrNoError +//@requires: ObjectHandling.c::PyObjectGetAttrStr +//@substitute: naming + +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { + int ret; + PyObject *name_attr; + + name_attr = __Pyx_PyObject_GetAttrStrNoError(meth, PYIDENT("__name__")); + if (likely(name_attr)) { + ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); + } else { + ret = -1; + } + + if (unlikely(ret < 0)) { + PyErr_Clear(); + ret = 0; + } + + Py_XDECREF(name_attr); + return ret; +} + +static int __Pyx_setup_reduce(PyObject* type_obj) { + int ret = 0; + PyObject *object_reduce = NULL; + PyObject *object_getstate = NULL; + PyObject *object_reduce_ex = NULL; + PyObject *reduce = NULL; + PyObject *reduce_ex = NULL; + PyObject *reduce_cython = NULL; + PyObject *setstate = NULL; + PyObject *setstate_cython = NULL; + PyObject *getstate = NULL; + +#if CYTHON_USE_PYTYPE_LOOKUP + getstate = _PyType_Lookup((PyTypeObject*)type_obj, PYIDENT("__getstate__")); +#else + getstate = __Pyx_PyObject_GetAttrStrNoError(type_obj, PYIDENT("__getstate__")); + if (!getstate && PyErr_Occurred()) { + goto __PYX_BAD; + } +#endif + if (getstate) { + // Python 3.11 introduces object.__getstate__. Because it's version-specific failure to find it should not be an error +#if CYTHON_USE_PYTYPE_LOOKUP + object_getstate = _PyType_Lookup(&PyBaseObject_Type, PYIDENT("__getstate__")); +#else + object_getstate = __Pyx_PyObject_GetAttrStrNoError((PyObject*)&PyBaseObject_Type, PYIDENT("__getstate__")); + if (!object_getstate && PyErr_Occurred()) { + goto __PYX_BAD; + } +#endif + if (object_getstate != getstate) { + goto __PYX_GOOD; + } + } + +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, PYIDENT("__reduce_ex__")); if (!object_reduce_ex) goto __PYX_BAD; +#else + object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, PYIDENT("__reduce_ex__")); if (!object_reduce_ex) goto __PYX_BAD; +#endif + + reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, PYIDENT("__reduce_ex__")); if (unlikely(!reduce_ex)) goto __PYX_BAD; + if (reduce_ex == object_reduce_ex) { + +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce = _PyType_Lookup(&PyBaseObject_Type, PYIDENT("__reduce__")); if (!object_reduce) goto __PYX_BAD; +#else + object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, PYIDENT("__reduce__")); if (!object_reduce) goto __PYX_BAD; +#endif + reduce = __Pyx_PyObject_GetAttrStr(type_obj, PYIDENT("__reduce__")); if (unlikely(!reduce)) goto __PYX_BAD; + + if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, PYIDENT("__reduce_cython__"))) { + reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, PYIDENT("__reduce_cython__")); + if (likely(reduce_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__reduce__"), reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__reduce_cython__")); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (reduce == object_reduce || PyErr_Occurred()) { + // Ignore if we're done, i.e. if 'reduce' already has the right name and the original is gone. + // Otherwise: error. + goto __PYX_BAD; + } + + setstate = __Pyx_PyObject_GetAttrStrNoError(type_obj, PYIDENT("__setstate__")); + if (!setstate) PyErr_Clear(); + if (!setstate || __Pyx_setup_reduce_is_named(setstate, PYIDENT("__setstate_cython__"))) { + setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, PYIDENT("__setstate_cython__")); + if (likely(setstate_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__setstate__"), setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__setstate_cython__")); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (!setstate || PyErr_Occurred()) { + // Ignore if we're done, i.e. if 'setstate' already has the right name and the original is gone. + // Otherwise: error. + goto __PYX_BAD; + } + } + PyType_Modified((PyTypeObject*)type_obj); + } + } + goto __PYX_GOOD; + +__PYX_BAD: + if (!PyErr_Occurred()) { + __Pyx_TypeName type_obj_name = + __Pyx_PyType_GetName((PyTypeObject*)type_obj); + PyErr_Format(PyExc_RuntimeError, + "Unable to initialize pickling for " __Pyx_FMT_TYPENAME, type_obj_name); + __Pyx_DECREF_TypeName(type_obj_name); + } + ret = -1; +__PYX_GOOD: +#if !CYTHON_USE_PYTYPE_LOOKUP + Py_XDECREF(object_reduce); + Py_XDECREF(object_reduce_ex); + Py_XDECREF(object_getstate); + Py_XDECREF(getstate); +#endif + Py_XDECREF(reduce); + Py_XDECREF(reduce_ex); + Py_XDECREF(reduce_cython); + Py_XDECREF(setstate); + Py_XDECREF(setstate_cython); + return ret; +} +#endif + + +/////////////// BinopSlot /////////////// + +static CYTHON_INLINE PyObject *{{func_name}}_maybe_call_slot(PyTypeObject* type, PyObject *left, PyObject *right {{extra_arg_decl}}) { + {{slot_type}} slot; +#if CYTHON_USE_TYPE_SLOTS || PY_MAJOR_VERSION < 3 || CYTHON_COMPILING_IN_PYPY + slot = type->tp_as_number ? type->tp_as_number->{{slot_name}} : NULL; +#else + slot = ({{slot_type}}) PyType_GetSlot(type, Py_{{slot_name}}); +#endif + return slot ? slot(left, right {{extra_arg}}) : __Pyx_NewRef(Py_NotImplemented); +} + +static PyObject *{{func_name}}(PyObject *left, PyObject *right {{extra_arg_decl}}) { + int maybe_self_is_left, maybe_self_is_right = 0; + maybe_self_is_left = Py_TYPE(left) == Py_TYPE(right) +#if CYTHON_USE_TYPE_SLOTS + || (Py_TYPE(left)->tp_as_number && Py_TYPE(left)->tp_as_number->{{slot_name}} == &{{func_name}}) +#endif + || __Pyx_TypeCheck(left, {{type_cname}}); + + // Optimize for the common case where the left operation is defined (and successful). + {{if not overloads_left}} + maybe_self_is_right = Py_TYPE(left) == Py_TYPE(right) +#if CYTHON_USE_TYPE_SLOTS + || (Py_TYPE(right)->tp_as_number && Py_TYPE(right)->tp_as_number->{{slot_name}} == &{{func_name}}) +#endif + || __Pyx_TypeCheck(right, {{type_cname}}); + {{endif}} + + if (maybe_self_is_left) { + PyObject *res; + + {{if overloads_right and not overloads_left}} + if (maybe_self_is_right) { + res = {{call_right}}; + if (res != Py_NotImplemented) return res; + Py_DECREF(res); + // Don't bother calling it again. + maybe_self_is_right = 0; + } + {{endif}} + + res = {{call_left}}; + if (res != Py_NotImplemented) return res; + Py_DECREF(res); + } + + {{if overloads_left}} + maybe_self_is_right = Py_TYPE(left) == Py_TYPE(right) +#if CYTHON_USE_TYPE_SLOTS + || (Py_TYPE(right)->tp_as_number && Py_TYPE(right)->tp_as_number->{{slot_name}} == &{{func_name}}) +#endif + || PyType_IsSubtype(Py_TYPE(right), {{type_cname}}); + {{endif}} + + if (maybe_self_is_right) { + return {{call_right}}; + } + return __Pyx_NewRef(Py_NotImplemented); +} + +/////////////// ValidateExternBase.proto /////////////// + +static int __Pyx_validate_extern_base(PyTypeObject *base); /* proto */ + +/////////////// ValidateExternBase /////////////// +//@requires: ObjectHandling.c::FormatTypeName + +static int __Pyx_validate_extern_base(PyTypeObject *base) { + Py_ssize_t itemsize; +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *py_itemsize; +#endif +#if !CYTHON_COMPILING_IN_LIMITED_API + itemsize = ((PyTypeObject *)base)->tp_itemsize; +#else + py_itemsize = PyObject_GetAttrString((PyObject*)base, "__itemsize__"); + if (!py_itemsize) + return -1; + itemsize = PyLong_AsSsize_t(py_itemsize); + Py_DECREF(py_itemsize); + py_itemsize = 0; + if (itemsize == (Py_ssize_t)-1 && PyErr_Occurred()) + return -1; +#endif + if (itemsize) { + __Pyx_TypeName b_name = __Pyx_PyType_GetName(base); + PyErr_Format(PyExc_TypeError, + "inheritance from PyVarObject types like '" __Pyx_FMT_TYPENAME "' not currently supported", b_name); + __Pyx_DECREF_TypeName(b_name); + return -1; + } + return 0; +} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/FunctionArguments.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/FunctionArguments.c new file mode 100644 index 0000000000000000000000000000000000000000..961fbc26ef8b5910e6ee6452aba60b24254ed19b --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/FunctionArguments.c @@ -0,0 +1,587 @@ +//////////////////// ArgTypeTest.proto //////////////////// + + +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact) \ + ((likely(__Pyx_IS_TYPE(obj, type) | (none_allowed && (obj == Py_None)))) ? 1 : \ + __Pyx__ArgTypeTest(obj, type, name, exact)) + +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /*proto*/ + +//////////////////// ArgTypeTest //////////////////// + +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + __Pyx_TypeName type_name; + __Pyx_TypeName obj_type_name; + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + type_name = __Pyx_PyType_GetName(type); + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected " __Pyx_FMT_TYPENAME + ", got " __Pyx_FMT_TYPENAME ")", name, type_name, obj_type_name); + __Pyx_DECREF_TypeName(type_name); + __Pyx_DECREF_TypeName(obj_type_name); + return 0; +} + +//////////////////// RaiseArgTupleInvalid.proto //////////////////// + +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ + +//////////////////// RaiseArgTupleInvalid //////////////////// + +// __Pyx_RaiseArgtupleInvalid raises the correct exception when too +// many or too few positional arguments were found. This handles +// Py_ssize_t formatting correctly. + +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + + +//////////////////// RaiseKeywordRequired.proto //////////////////// + +static void __Pyx_RaiseKeywordRequired(const char* func_name, PyObject* kw_name); /*proto*/ + +//////////////////// RaiseKeywordRequired //////////////////// + +static void __Pyx_RaiseKeywordRequired(const char* func_name, PyObject* kw_name) { + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() needs keyword-only argument %U", func_name, kw_name); + #else + "%s() needs keyword-only argument %s", func_name, + PyString_AS_STRING(kw_name)); + #endif +} + + +//////////////////// RaiseDoubleKeywords.proto //////////////////// + +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ + +//////////////////// RaiseDoubleKeywords //////////////////// + +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + + +//////////////////// RaiseMappingExpected.proto //////////////////// + +static void __Pyx_RaiseMappingExpectedError(PyObject* arg); /*proto*/ + +//////////////////// RaiseMappingExpected //////////////////// + +static void __Pyx_RaiseMappingExpectedError(PyObject* arg) { + __Pyx_TypeName arg_type_name = __Pyx_PyType_GetName(Py_TYPE(arg)); + PyErr_Format(PyExc_TypeError, + "'" __Pyx_FMT_TYPENAME "' object is not a mapping", arg_type_name); + __Pyx_DECREF_TypeName(arg_type_name); +} + + +//////////////////// KeywordStringCheck.proto //////////////////// + +static int __Pyx_CheckKeywordStrings(PyObject *kw, const char* function_name, int kw_allowed); /*proto*/ + +//////////////////// KeywordStringCheck //////////////////// + +// __Pyx_CheckKeywordStrings raises an error if non-string keywords +// were passed to a function, or if any keywords were passed to a +// function that does not accept them. +// +// The "kw" argument is either a dict (for METH_VARARGS) or a tuple +// (for METH_FASTCALL). + +static int __Pyx_CheckKeywordStrings( + PyObject *kw, + const char* function_name, + int kw_allowed) +{ + PyObject* key = 0; + Py_ssize_t pos = 0; +#if CYTHON_COMPILING_IN_PYPY + /* PyPy appears to check keywords at call time, not at unpacking time => not much to do here */ + if (!kw_allowed && PyDict_Next(kw, &pos, &key, 0)) + goto invalid_keyword; + return 1; +#else + if (CYTHON_METH_FASTCALL && likely(PyTuple_Check(kw))) { + Py_ssize_t kwsize; +#if CYTHON_ASSUME_SAFE_MACROS + kwsize = PyTuple_GET_SIZE(kw); +#else + kwsize = PyTuple_Size(kw); + if (kwsize < 0) return 0; +#endif + if (unlikely(kwsize == 0)) + return 1; + if (!kw_allowed) { +#if CYTHON_ASSUME_SAFE_MACROS + key = PyTuple_GET_ITEM(kw, 0); +#else + key = PyTuple_GetItem(kw, pos); + if (!key) return 0; +#endif + goto invalid_keyword; + } +#if PY_VERSION_HEX < 0x03090000 + // On CPython >= 3.9, the FASTCALL protocol guarantees that keyword + // names are strings (see https://bugs.python.org/issue37540) + for (pos = 0; pos < kwsize; pos++) { +#if CYTHON_ASSUME_SAFE_MACROS + key = PyTuple_GET_ITEM(kw, pos); +#else + key = PyTuple_GetItem(kw, pos); + if (!key) return 0; +#endif + if (unlikely(!PyUnicode_Check(key))) + goto invalid_keyword_type; + } +#endif + return 1; + } + + while (PyDict_Next(kw, &pos, &key, 0)) { + #if PY_MAJOR_VERSION < 3 + if (unlikely(!PyString_Check(key))) + #endif + if (unlikely(!PyUnicode_Check(key))) + goto invalid_keyword_type; + } + if (!kw_allowed && unlikely(key)) + goto invalid_keyword; + return 1; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + return 0; +#endif +invalid_keyword: + #if PY_MAJOR_VERSION < 3 + PyErr_Format(PyExc_TypeError, + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + PyErr_Format(PyExc_TypeError, + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif + return 0; +} + + +//////////////////// ParseKeywords.proto //////////////////// + +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject *const *kwvalues, + PyObject **argnames[], + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, + const char* function_name); /*proto*/ + +//////////////////// ParseKeywords //////////////////// +//@requires: RaiseDoubleKeywords + +// __Pyx_ParseOptionalKeywords copies the optional/unknown keyword +// arguments from kwds into the dict kwds2. If kwds2 is NULL, unknown +// keywords will raise an invalid keyword error. +// +// When not using METH_FASTCALL, kwds is a dict and kwvalues is NULL. +// Otherwise, kwds is a tuple with keyword names and kwvalues is a C +// array with the corresponding values. +// +// Three kinds of errors are checked: 1) non-string keywords, 2) +// unexpected keywords and 3) overlap with positional arguments. +// +// If num_posargs is greater 0, it denotes the number of positional +// arguments that were passed and that must therefore not appear +// amongst the keywords as well. +// +// This method does not check for required keyword arguments. + +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject *const *kwvalues, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + int kwds_is_tuple = CYTHON_METH_FASTCALL && likely(PyTuple_Check(kwds)); + + while (1) { + // clean up key and value when the loop is "continued" + Py_XDECREF(key); key = NULL; + Py_XDECREF(value); value = NULL; + + if (kwds_is_tuple) { + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS + size = PyTuple_GET_SIZE(kwds); +#else + size = PyTuple_Size(kwds); + if (size < 0) goto bad; +#endif + if (pos >= size) break; + +#if CYTHON_AVOID_BORROWED_REFS + // Get an owned reference to key. + key = __Pyx_PySequence_ITEM(kwds, pos); + if (!key) goto bad; +#elif CYTHON_ASSUME_SAFE_MACROS + key = PyTuple_GET_ITEM(kwds, pos); +#else + key = PyTuple_GetItem(kwds, pos); + if (!key) goto bad; +#endif + + value = kwvalues[pos]; + pos++; + } + else + { + if (!PyDict_Next(kwds, &pos, &key, &value)) break; + // It's unfortunately hard to avoid borrowed references (briefly) with PyDict_Next +#if CYTHON_AVOID_BORROWED_REFS + // Own the reference to match the behaviour above. + Py_INCREF(key); +#endif + } + + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; +#if CYTHON_AVOID_BORROWED_REFS + Py_INCREF(value); /* transfer ownership of value to values */ + Py_DECREF(key); +#endif + key = NULL; + value = NULL; + continue; + } + + // Now make sure we own both references since we're doing non-trivial Python operations. +#if !CYTHON_AVOID_BORROWED_REFS + Py_INCREF(key); +#endif + Py_INCREF(value); + + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; +#if CYTHON_AVOID_BORROWED_REFS + value = NULL; /* ownership transferred to values */ +#endif + break; + } + name++; + } + if (*name) continue; + else { + // not found after positional args, check for duplicate + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = ( + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + // In Py2, we may need to convert the argument name from str to unicode for comparison. + PyUnicode_Compare(**name, key) + ); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; +#if CYTHON_AVOID_BORROWED_REFS + value = NULL; /* ownership transferred to values */ +#endif + break; + } + name++; + } + if (*name) continue; + else { + // not found after positional args, check for duplicate + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + // need to convert argument name from bytes to unicode for comparison + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + Py_XDECREF(key); + Py_XDECREF(value); + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + #if PY_MAJOR_VERSION < 3 + PyErr_Format(PyExc_TypeError, + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + PyErr_Format(PyExc_TypeError, + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + Py_XDECREF(key); + Py_XDECREF(value); + return -1; +} + + +//////////////////// MergeKeywords.proto //////////////////// + +static int __Pyx_MergeKeywords(PyObject *kwdict, PyObject *source_mapping); /*proto*/ + +//////////////////// MergeKeywords //////////////////// +//@requires: RaiseDoubleKeywords +//@requires: Optimize.c::dict_iter + +static int __Pyx_MergeKeywords(PyObject *kwdict, PyObject *source_mapping) { + PyObject *iter, *key = NULL, *value = NULL; + int source_is_dict, result; + Py_ssize_t orig_length, ppos = 0; + + iter = __Pyx_dict_iterator(source_mapping, 0, PYIDENT("items"), &orig_length, &source_is_dict); + if (unlikely(!iter)) { + // slow fallback: try converting to dict, then iterate + PyObject *args; + if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError))) goto bad; + PyErr_Clear(); + args = PyTuple_Pack(1, source_mapping); + if (likely(args)) { + PyObject *fallback = PyObject_Call((PyObject*)&PyDict_Type, args, NULL); + Py_DECREF(args); + if (likely(fallback)) { + iter = __Pyx_dict_iterator(fallback, 1, PYIDENT("items"), &orig_length, &source_is_dict); + Py_DECREF(fallback); + } + } + if (unlikely(!iter)) goto bad; + } + + while (1) { + result = __Pyx_dict_iter_next(iter, orig_length, &ppos, &key, &value, NULL, source_is_dict); + if (unlikely(result < 0)) goto bad; + if (!result) break; + + if (unlikely(PyDict_Contains(kwdict, key))) { + __Pyx_RaiseDoubleKeywordsError("function", key); + result = -1; + } else { + result = PyDict_SetItem(kwdict, key, value); + } + Py_DECREF(key); + Py_DECREF(value); + if (unlikely(result < 0)) goto bad; + } + Py_XDECREF(iter); + return 0; + +bad: + Py_XDECREF(iter); + return -1; +} + + +/////////////// fastcall.proto /////////////// + +// We define various functions and macros with two variants: +//..._FASTCALL and ..._VARARGS + +// The first is used when METH_FASTCALL is enabled and the second is used +// otherwise. If the Python implementation does not support METH_FASTCALL +// (because it's an old version of CPython or it's not CPython at all), +// then the ..._FASTCALL macros simply alias ..._VARARGS + +#if CYTHON_AVOID_BORROWED_REFS + // This is the only case where we request an owned reference. + #define __Pyx_Arg_VARARGS(args, i) PySequence_GetItem(args, i) +#elif CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_Arg_VARARGS(args, i) PyTuple_GET_ITEM(args, i) +#else + #define __Pyx_Arg_VARARGS(args, i) PyTuple_GetItem(args, i) +#endif +#if CYTHON_AVOID_BORROWED_REFS + #define __Pyx_Arg_NewRef_VARARGS(arg) __Pyx_NewRef(arg) + #define __Pyx_Arg_XDECREF_VARARGS(arg) Py_XDECREF(arg) +#else + #define __Pyx_Arg_NewRef_VARARGS(arg) arg /* no-op */ + #define __Pyx_Arg_XDECREF_VARARGS(arg) /* no-op - arg is borrowed */ +#endif +#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds) +#define __Pyx_KwValues_VARARGS(args, nargs) NULL +#define __Pyx_GetKwValue_VARARGS(kw, kwvalues, s) __Pyx_PyDict_GetItemStrWithError(kw, s) +#define __Pyx_KwargsAsDict_VARARGS(kw, kwvalues) PyDict_Copy(kw) +#if CYTHON_METH_FASTCALL + #define __Pyx_Arg_FASTCALL(args, i) args[i] + #define __Pyx_NumKwargs_FASTCALL(kwds) PyTuple_GET_SIZE(kwds) + #define __Pyx_KwValues_FASTCALL(args, nargs) ((args) + (nargs)) + static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s); +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 + CYTHON_UNUSED static PyObject *__Pyx_KwargsAsDict_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues);/*proto*/ + #else + #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw) + #endif + #define __Pyx_Arg_NewRef_FASTCALL(arg) arg /* no-op, __Pyx_Arg_FASTCALL is direct and this needs + to have the same reference counting */ + #define __Pyx_Arg_XDECREF_FASTCALL(arg) /* no-op - arg was returned from array */ +#else + #define __Pyx_Arg_FASTCALL __Pyx_Arg_VARARGS + #define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS + #define __Pyx_KwValues_FASTCALL __Pyx_KwValues_VARARGS + #define __Pyx_GetKwValue_FASTCALL __Pyx_GetKwValue_VARARGS + #define __Pyx_KwargsAsDict_FASTCALL __Pyx_KwargsAsDict_VARARGS + #define __Pyx_Arg_NewRef_FASTCALL(arg) __Pyx_Arg_NewRef_VARARGS(arg) + #define __Pyx_Arg_XDECREF_FASTCALL(arg) __Pyx_Arg_XDECREF_VARARGS(arg) +#endif + +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +#define __Pyx_ArgsSlice_VARARGS(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_VARARGS(args, start), stop - start) +#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_FASTCALL(args, start), stop - start) +#else +/* Not CPython, so certainly no METH_FASTCALL support */ +#define __Pyx_ArgsSlice_VARARGS(args, start, stop) PyTuple_GetSlice(args, start, stop) +#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) PyTuple_GetSlice(args, start, stop) +#endif + + +/////////////// fastcall /////////////// +//@requires: ObjectHandling.c::TupleAndListFromArray +//@requires: StringTools.c::UnicodeEquals + +#if CYTHON_METH_FASTCALL +// kwnames: tuple with names of keyword arguments +// kwvalues: C array with values of keyword arguments +// s: str with the keyword name to look for +static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s) +{ + // Search the kwnames array for s and return the corresponding value. + // We do two loops: a first one to compare pointers (which will find a + // match if the name in kwnames is interned, given that s is interned + // by Cython). A second loop compares the actual strings. + Py_ssize_t i, n = PyTuple_GET_SIZE(kwnames); + for (i = 0; i < n; i++) + { + if (s == PyTuple_GET_ITEM(kwnames, i)) return kwvalues[i]; + } + for (i = 0; i < n; i++) + { + int eq = __Pyx_PyUnicode_Equals(s, PyTuple_GET_ITEM(kwnames, i), Py_EQ); + if (unlikely(eq != 0)) { + if (unlikely(eq < 0)) return NULL; /* error */ + return kwvalues[i]; + } + } + return NULL; /* not found (no exception set) */ +} + +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 +CYTHON_UNUSED static PyObject *__Pyx_KwargsAsDict_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues) { + Py_ssize_t i, nkwargs = PyTuple_GET_SIZE(kwnames); + PyObject *dict; + + dict = PyDict_New(); + if (unlikely(!dict)) + return NULL; + + for (i=0; i= 3 +static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple); /*proto*/ +#endif + +/////////////// ImportDottedModule /////////////// +//@requires: Import + +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx__ImportDottedModule_Error(PyObject *name, PyObject *parts_tuple, Py_ssize_t count) { + PyObject *partial_name = NULL, *slice = NULL, *sep = NULL; + if (unlikely(PyErr_Occurred())) { + PyErr_Clear(); + } + if (likely(PyTuple_GET_SIZE(parts_tuple) == count)) { + partial_name = name; + } else { + slice = PySequence_GetSlice(parts_tuple, 0, count); + if (unlikely(!slice)) + goto bad; + sep = PyUnicode_FromStringAndSize(".", 1); + if (unlikely(!sep)) + goto bad; + partial_name = PyUnicode_Join(sep, slice); + } + + PyErr_Format( +#if PY_MAJOR_VERSION < 3 + PyExc_ImportError, + "No module named '%s'", PyString_AS_STRING(partial_name)); +#else +#if PY_VERSION_HEX >= 0x030600B1 + PyExc_ModuleNotFoundError, +#else + PyExc_ImportError, +#endif + "No module named '%U'", partial_name); +#endif + +bad: + Py_XDECREF(sep); + Py_XDECREF(slice); + Py_XDECREF(partial_name); + return NULL; +} +#endif + +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx__ImportDottedModule_Lookup(PyObject *name) { + PyObject *imported_module; +#if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) + PyObject *modules = PyImport_GetModuleDict(); + if (unlikely(!modules)) + return NULL; + imported_module = __Pyx_PyDict_GetItemStr(modules, name); + Py_XINCREF(imported_module); +#else + imported_module = PyImport_GetModule(name); +#endif + return imported_module; +} +#endif + +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple) { + Py_ssize_t i, nparts; + nparts = PyTuple_GET_SIZE(parts_tuple); + for (i=1; i < nparts && module; i++) { + PyObject *part, *submodule; +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + part = PyTuple_GET_ITEM(parts_tuple, i); +#else + part = PySequence_ITEM(parts_tuple, i); +#endif + submodule = __Pyx_PyObject_GetAttrStrNoError(module, part); + // We stop if the attribute isn't found, i.e. if submodule is NULL here. +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(part); +#endif + Py_DECREF(module); + module = submodule; + } + if (unlikely(!module)) { + return __Pyx__ImportDottedModule_Error(name, parts_tuple, i); + } + return module; +} +#endif + +static PyObject *__Pyx__ImportDottedModule(PyObject *name, PyObject *parts_tuple) { +#if PY_MAJOR_VERSION < 3 + PyObject *module, *from_list, *star = PYIDENT("*"); + CYTHON_UNUSED_VAR(parts_tuple); + from_list = PyList_New(1); + if (unlikely(!from_list)) + return NULL; + Py_INCREF(star); + PyList_SET_ITEM(from_list, 0, star); + module = __Pyx_Import(name, from_list, 0); + Py_DECREF(from_list); + return module; +#else + PyObject *imported_module; + PyObject *module = __Pyx_Import(name, NULL, 0); + if (!parts_tuple || unlikely(!module)) + return module; + + // Look up module in sys.modules, which is safer than the attribute lookups below. + imported_module = __Pyx__ImportDottedModule_Lookup(name); + if (likely(imported_module)) { + Py_DECREF(module); + return imported_module; + } + PyErr_Clear(); + return __Pyx_ImportDottedModule_WalkParts(module, name, parts_tuple); +#endif +} + +static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple) { +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030400B1 + PyObject *module = __Pyx__ImportDottedModule_Lookup(name); + if (likely(module)) { + // CPython guards against thread-concurrent initialisation in importlib. + // In this case, we let PyImport_ImportModuleLevelObject() handle the locking. + PyObject *spec = __Pyx_PyObject_GetAttrStrNoError(module, PYIDENT("__spec__")); + if (likely(spec)) { + PyObject *unsafe = __Pyx_PyObject_GetAttrStrNoError(spec, PYIDENT("_initializing")); + if (likely(!unsafe || !__Pyx_PyObject_IsTrue(unsafe))) { + Py_DECREF(spec); + spec = NULL; + } + Py_XDECREF(unsafe); + } + if (likely(!spec)) { + // Not in initialisation phase => use modules as is. + PyErr_Clear(); + return module; + } + Py_DECREF(spec); + Py_DECREF(module); + } else if (PyErr_Occurred()) { + PyErr_Clear(); + } +#endif + + return __Pyx__ImportDottedModule(name, parts_tuple); +} + + +/////////////// ImportDottedModuleRelFirst.proto /////////////// + +static PyObject *__Pyx_ImportDottedModuleRelFirst(PyObject *name, PyObject *parts_tuple); /*proto*/ + +/////////////// ImportDottedModuleRelFirst /////////////// +//@requires: ImportDottedModule +//@requires: Import + +static PyObject *__Pyx_ImportDottedModuleRelFirst(PyObject *name, PyObject *parts_tuple) { + PyObject *module; + PyObject *from_list = NULL; +#if PY_MAJOR_VERSION < 3 + PyObject *star = PYIDENT("*"); + from_list = PyList_New(1); + if (unlikely(!from_list)) + return NULL; + Py_INCREF(star); + PyList_SET_ITEM(from_list, 0, star); +#endif + module = __Pyx_Import(name, from_list, -1); + Py_XDECREF(from_list); + if (module) { + #if PY_MAJOR_VERSION >= 3 + if (parts_tuple) { + module = __Pyx_ImportDottedModule_WalkParts(module, name, parts_tuple); + } + #endif + return module; + } + if (unlikely(!PyErr_ExceptionMatches(PyExc_ImportError))) + return NULL; + PyErr_Clear(); + // try absolute import + return __Pyx_ImportDottedModule(name, parts_tuple); +} + + +/////////////// Import.proto /////////////// + +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/ + +/////////////// Import /////////////// +//@requires: ObjectHandling.c::PyObjectGetAttrStr +//@requires:StringTools.c::IncludeStringH +//@substitute: naming + +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *module = 0; + PyObject *empty_dict = 0; + PyObject *empty_list = 0; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr($builtins_cname, PYIDENT("__import__")); + if (unlikely(!py_import)) + goto bad; + if (!from_list) { + empty_list = PyList_New(0); + if (unlikely(!empty_list)) + goto bad; + from_list = empty_list; + } + #endif + empty_dict = PyDict_New(); + if (unlikely(!empty_dict)) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if (strchr(__Pyx_MODULE_NAME, '.') != NULL) { + /* try package relative import first */ + module = PyImport_ImportModuleLevelObject( + name, $moddict_cname, empty_dict, from_list, 1); + if (unlikely(!module)) { + if (unlikely(!PyErr_ExceptionMatches(PyExc_ImportError))) + goto bad; + PyErr_Clear(); + } + } + level = 0; /* try absolute import on failure */ + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (unlikely(!py_level)) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, $moddict_cname, empty_dict, from_list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, $moddict_cname, empty_dict, from_list, level); + #endif + } + } +bad: + Py_XDECREF(empty_dict); + Py_XDECREF(empty_list); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + return module; +} + + +/////////////// ImportFrom.proto /////////////// + +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /*proto*/ + +/////////////// ImportFrom /////////////// +//@requires: ObjectHandling.c::PyObjectGetAttrStr + +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + // 'name' may refer to a (sub-)module which has not finished initialization + // yet, and may not be assigned as an attribute to its parent, so try + // finding it by full name. + const char* module_name_str = 0; + PyObject* module_name = 0; + PyObject* module_dot = 0; + PyObject* full_name = 0; + PyErr_Clear(); + module_name_str = PyModule_GetName(module); + if (unlikely(!module_name_str)) { goto modbad; } + module_name = PyUnicode_FromString(module_name_str); + if (unlikely(!module_name)) { goto modbad; } + module_dot = PyUnicode_Concat(module_name, PYUNICODE(".")); + if (unlikely(!module_dot)) { goto modbad; } + full_name = PyUnicode_Concat(module_dot, name); + if (unlikely(!full_name)) { goto modbad; } + #if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) + { + PyObject *modules = PyImport_GetModuleDict(); + if (unlikely(!modules)) + goto modbad; + value = PyObject_GetItem(modules, full_name); + } + #else + value = PyImport_GetModule(full_name); + #endif + + modbad: + Py_XDECREF(full_name); + Py_XDECREF(module_dot); + Py_XDECREF(module_name); + } + if (unlikely(!value)) { + PyErr_Format(PyExc_ImportError, + #if PY_MAJOR_VERSION < 3 + "cannot import name %.230s", PyString_AS_STRING(name)); + #else + "cannot import name %S", name); + #endif + } + return value; +} + + +/////////////// ImportStar /////////////// +//@substitute: naming + +/* import_all_from is an unexposed function from ceval.c */ + +static int +__Pyx_import_all_from(PyObject *locals, PyObject *v) +{ + PyObject *all = PyObject_GetAttrString(v, "__all__"); + PyObject *dict, *name, *value; + int skip_leading_underscores = 0; + int pos, err; + + if (all == NULL) { + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) + return -1; /* Unexpected error */ + PyErr_Clear(); + dict = PyObject_GetAttrString(v, "__dict__"); + if (dict == NULL) { + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) + return -1; + PyErr_SetString(PyExc_ImportError, + "from-import-* object has no __dict__ and no __all__"); + return -1; + } +#if PY_MAJOR_VERSION < 3 + all = PyObject_CallMethod(dict, (char *)"keys", NULL); +#else + all = PyMapping_Keys(dict); +#endif + Py_DECREF(dict); + if (all == NULL) + return -1; + skip_leading_underscores = 1; + } + + for (pos = 0, err = 0; ; pos++) { + name = PySequence_GetItem(all, pos); + if (name == NULL) { + if (!PyErr_ExceptionMatches(PyExc_IndexError)) + err = -1; + else + PyErr_Clear(); + break; + } + if (skip_leading_underscores && +#if PY_MAJOR_VERSION < 3 + likely(PyString_Check(name)) && + PyString_AS_STRING(name)[0] == '_') +#else + likely(PyUnicode_Check(name)) && + likely(__Pyx_PyUnicode_GET_LENGTH(name)) && + __Pyx_PyUnicode_READ_CHAR(name, 0) == '_') +#endif + { + Py_DECREF(name); + continue; + } + value = PyObject_GetAttr(v, name); + if (value == NULL) + err = -1; + else if (PyDict_CheckExact(locals)) + err = PyDict_SetItem(locals, name, value); + else + err = PyObject_SetItem(locals, name, value); + Py_DECREF(name); + Py_XDECREF(value); + if (err != 0) + break; + } + Py_DECREF(all); + return err; +} + + +static int ${import_star}(PyObject* m) { + + int i; + int ret = -1; + char* s; + PyObject *locals = 0; + PyObject *list = 0; +#if PY_MAJOR_VERSION >= 3 + PyObject *utf8_name = 0; +#endif + PyObject *name; + PyObject *item; + + locals = PyDict_New(); if (!locals) goto bad; + if (__Pyx_import_all_from(locals, m) < 0) goto bad; + list = PyDict_Items(locals); if (!list) goto bad; + + for(i=0; i= 3 + utf8_name = PyUnicode_AsUTF8String(name); + if (!utf8_name) goto bad; + s = PyBytes_AS_STRING(utf8_name); + if (${import_star_set}(item, name, s) < 0) goto bad; + Py_DECREF(utf8_name); utf8_name = 0; +#else + s = PyString_AsString(name); + if (!s) goto bad; + if (${import_star_set}(item, name, s) < 0) goto bad; +#endif + } + ret = 0; + +bad: + Py_XDECREF(locals); + Py_XDECREF(list); +#if PY_MAJOR_VERSION >= 3 + Py_XDECREF(utf8_name); +#endif + return ret; +} + + +/////////////// SetPackagePathFromImportLib.proto /////////////// + +// PY_VERSION_HEX >= 0x03030000 +#if PY_MAJOR_VERSION >= 3 && !CYTHON_PEP489_MULTI_PHASE_INIT +static int __Pyx_SetPackagePathFromImportLib(PyObject *module_name); +#else +#define __Pyx_SetPackagePathFromImportLib(a) 0 +#endif + +/////////////// SetPackagePathFromImportLib /////////////// +//@substitute: naming + +// PY_VERSION_HEX >= 0x03030000 +#if PY_MAJOR_VERSION >= 3 && !CYTHON_PEP489_MULTI_PHASE_INIT +static int __Pyx_SetPackagePathFromImportLib(PyObject *module_name) { + PyObject *importlib, *osmod, *ossep, *parts, *package_path; + PyObject *file_path = NULL; + int result; + PyObject *spec; + // package_path = [importlib.util.find_spec(module_name).origin.rsplit(os.sep, 1)[0]] + importlib = PyImport_ImportModule("importlib.util"); + if (unlikely(!importlib)) + goto bad; + spec = PyObject_CallMethod(importlib, "find_spec", "(O)", module_name); + Py_DECREF(importlib); + if (unlikely(!spec)) + goto bad; + file_path = PyObject_GetAttrString(spec, "origin"); + Py_DECREF(spec); + if (unlikely(!file_path)) + goto bad; + + if (unlikely(PyObject_SetAttrString($module_cname, "__file__", file_path) < 0)) + goto bad; + + osmod = PyImport_ImportModule("os"); + if (unlikely(!osmod)) + goto bad; + ossep = PyObject_GetAttrString(osmod, "sep"); + Py_DECREF(osmod); + if (unlikely(!ossep)) + goto bad; + parts = PyObject_CallMethod(file_path, "rsplit", "(Oi)", ossep, 1); + Py_DECREF(file_path); file_path = NULL; + Py_DECREF(ossep); + if (unlikely(!parts)) + goto bad; + package_path = Py_BuildValue("[O]", PyList_GET_ITEM(parts, 0)); + Py_DECREF(parts); + if (unlikely(!package_path)) + goto bad; + goto set_path; + +bad: + PyErr_WriteUnraisable(module_name); + Py_XDECREF(file_path); + + // set an empty path list on failure + PyErr_Clear(); + package_path = PyList_New(0); + if (unlikely(!package_path)) + return -1; + +set_path: + result = PyObject_SetAttrString($module_cname, "__path__", package_path); + Py_DECREF(package_path); + return result; +} +#endif + + +/////////////// TypeImport.proto /////////////// +//@substitute: naming + +#ifndef __PYX_HAVE_RT_ImportType_proto_$cyversion +#define __PYX_HAVE_RT_ImportType_proto_$cyversion + +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L +#include +#endif + +#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || __cplusplus >= 201103L +#define __PYX_GET_STRUCT_ALIGNMENT_$cyversion(s) alignof(s) +#else +// best guess at what the alignment could be since we can't measure it +#define __PYX_GET_STRUCT_ALIGNMENT_$cyversion(s) sizeof(void*) +#endif + +enum __Pyx_ImportType_CheckSize_$cyversion { + __Pyx_ImportType_CheckSize_Error_$cyversion = 0, + __Pyx_ImportType_CheckSize_Warn_$cyversion = 1, + __Pyx_ImportType_CheckSize_Ignore_$cyversion = 2 +}; + +static PyTypeObject *__Pyx_ImportType_$cyversion(PyObject* module, const char *module_name, const char *class_name, size_t size, size_t alignment, enum __Pyx_ImportType_CheckSize_$cyversion check_size); /*proto*/ + +#endif + +/////////////// TypeImport /////////////// +//@substitute: naming + +#ifndef __PYX_HAVE_RT_ImportType_$cyversion +#define __PYX_HAVE_RT_ImportType_$cyversion +static PyTypeObject *__Pyx_ImportType_$cyversion(PyObject *module, const char *module_name, const char *class_name, + size_t size, size_t alignment, enum __Pyx_ImportType_CheckSize_$cyversion check_size) +{ + PyObject *result = 0; + char warning[200]; + Py_ssize_t basicsize; + Py_ssize_t itemsize; +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *py_basicsize; + PyObject *py_itemsize; +#endif + + result = PyObject_GetAttrString(module, class_name); + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#if !CYTHON_COMPILING_IN_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; + itemsize = ((PyTypeObject *)result)->tp_itemsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; + py_itemsize = PyObject_GetAttrString(result, "__itemsize__"); + if (!py_itemsize) + goto bad; + itemsize = PyLong_AsSsize_t(py_itemsize); + Py_DECREF(py_itemsize); + py_itemsize = 0; + if (itemsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if (itemsize) { + // If itemsize is smaller than the alignment the struct can end up with some extra + // padding at the end. In this case we need to work out the maximum size that + // the padding could be when calculating the range of valid struct sizes. + if (size % alignment) { + // if this is true we've probably calculated the alignment wrongly + // (most likely because alignof isn't available) + alignment = size % alignment; + } + if (itemsize < (Py_ssize_t)alignment) + itemsize = (Py_ssize_t)alignment; + } + if ((size_t)(basicsize + itemsize) < size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize+itemsize); + goto bad; + } + // varobjects almost have structs between basicsize and basicsize + itemsize + // but the struct isn't always one of the two limiting values + if (check_size == __Pyx_ImportType_CheckSize_Error_$cyversion && + ((size_t)basicsize > size || (size_t)(basicsize + itemsize) < size)) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd-%zd from PyObject", + module_name, class_name, size, basicsize, basicsize+itemsize); + goto bad; + } + else if (check_size == __Pyx_ImportType_CheckSize_Warn_$cyversion && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + /* check_size == __Pyx_ImportType_CheckSize_Ignore does not warn nor error */ + return (PyTypeObject *)result; +bad: + Py_XDECREF(result); + return NULL; +} +#endif + +/////////////// FunctionImport.proto /////////////// +//@substitute: naming + +static int __Pyx_ImportFunction_$cyversion(PyObject *module, const char *funcname, void (**f)(void), const char *sig); /*proto*/ + +/////////////// FunctionImport /////////////// +//@substitute: naming + +#ifndef __PYX_HAVE_RT_ImportFunction_$cyversion +#define __PYX_HAVE_RT_ImportFunction_$cyversion +static int __Pyx_ImportFunction_$cyversion(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { + PyObject *d = 0; + PyObject *cobj = 0; + union { + void (*fp)(void); + void *p; + } tmp; + + d = PyObject_GetAttrString(module, (char *)"$api_name"); + if (!d) + goto bad; + cobj = PyDict_GetItemString(d, funcname); + if (!cobj) { + PyErr_Format(PyExc_ImportError, + "%.200s does not export expected C function %.200s", + PyModule_GetName(module), funcname); + goto bad; + } + if (!PyCapsule_IsValid(cobj, sig)) { + PyErr_Format(PyExc_TypeError, + "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", + PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); + goto bad; + } + tmp.p = PyCapsule_GetPointer(cobj, sig); + *f = tmp.fp; + if (!(*f)) + goto bad; + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(d); + return -1; +} +#endif + +/////////////// FunctionExport.proto /////////////// + +static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig); /*proto*/ + +/////////////// FunctionExport /////////////// +//@substitute: naming + +static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig) { + PyObject *d = 0; + PyObject *cobj = 0; + union { + void (*fp)(void); + void *p; + } tmp; + + d = PyObject_GetAttrString($module_cname, (char *)"$api_name"); + if (!d) { + PyErr_Clear(); + d = PyDict_New(); + if (!d) + goto bad; + Py_INCREF(d); + if (PyModule_AddObject($module_cname, (char *)"$api_name", d) < 0) + goto bad; + } + tmp.fp = f; + cobj = PyCapsule_New(tmp.p, sig, 0); + if (!cobj) + goto bad; + if (PyDict_SetItemString(d, name, cobj) < 0) + goto bad; + Py_DECREF(cobj); + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(cobj); + Py_XDECREF(d); + return -1; +} + +/////////////// VoidPtrImport.proto /////////////// +//@substitute: naming + +static int __Pyx_ImportVoidPtr_$cyversion(PyObject *module, const char *name, void **p, const char *sig); /*proto*/ + +/////////////// VoidPtrImport /////////////// +//@substitute: naming + +#ifndef __PYX_HAVE_RT_ImportVoidPtr_$cyversion +#define __PYX_HAVE_RT_ImportVoidPtr_$cyversion +static int __Pyx_ImportVoidPtr_$cyversion(PyObject *module, const char *name, void **p, const char *sig) { + PyObject *d = 0; + PyObject *cobj = 0; + + d = PyObject_GetAttrString(module, (char *)"$api_name"); + if (!d) + goto bad; + cobj = PyDict_GetItemString(d, name); + if (!cobj) { + PyErr_Format(PyExc_ImportError, + "%.200s does not export expected C variable %.200s", + PyModule_GetName(module), name); + goto bad; + } + if (!PyCapsule_IsValid(cobj, sig)) { + PyErr_Format(PyExc_TypeError, + "C variable %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", + PyModule_GetName(module), name, sig, PyCapsule_GetName(cobj)); + goto bad; + } + *p = PyCapsule_GetPointer(cobj, sig); + if (!(*p)) + goto bad; + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(d); + return -1; +} +#endif + +/////////////// VoidPtrExport.proto /////////////// + +static int __Pyx_ExportVoidPtr(PyObject *name, void *p, const char *sig); /*proto*/ + +/////////////// VoidPtrExport /////////////// +//@substitute: naming +//@requires: ObjectHandling.c::PyObjectSetAttrStr + +static int __Pyx_ExportVoidPtr(PyObject *name, void *p, const char *sig) { + PyObject *d; + PyObject *cobj = 0; + + d = PyDict_GetItem($moddict_cname, PYIDENT("$api_name")); + Py_XINCREF(d); + if (!d) { + d = PyDict_New(); + if (!d) + goto bad; + if (__Pyx_PyObject_SetAttrStr($module_cname, PYIDENT("$api_name"), d) < 0) + goto bad; + } + cobj = PyCapsule_New(p, sig, 0); + if (!cobj) + goto bad; + if (PyDict_SetItem(d, name, cobj) < 0) + goto bad; + Py_DECREF(cobj); + Py_DECREF(d); + return 0; +bad: + Py_XDECREF(cobj); + Py_XDECREF(d); + return -1; +} + + +/////////////// SetVTable.proto /////////////// + +static int __Pyx_SetVtable(PyTypeObject* typeptr , void* vtable); /*proto*/ + +/////////////// SetVTable /////////////// + +static int __Pyx_SetVtable(PyTypeObject *type, void *vtable) { + PyObject *ob = PyCapsule_New(vtable, 0, 0); + if (unlikely(!ob)) + goto bad; +#if CYTHON_COMPILING_IN_LIMITED_API + if (unlikely(PyObject_SetAttr((PyObject *) type, PYIDENT("__pyx_vtable__"), ob) < 0)) +#else + if (unlikely(PyDict_SetItem(type->tp_dict, PYIDENT("__pyx_vtable__"), ob) < 0)) +#endif + goto bad; + Py_DECREF(ob); + return 0; +bad: + Py_XDECREF(ob); + return -1; +} + + +/////////////// GetVTable.proto /////////////// + +static void* __Pyx_GetVtable(PyTypeObject *type); /*proto*/ + +/////////////// GetVTable /////////////// + +static void* __Pyx_GetVtable(PyTypeObject *type) { + void* ptr; +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *ob = PyObject_GetAttr((PyObject *)type, PYIDENT("__pyx_vtable__")); +#else + PyObject *ob = PyObject_GetItem(type->tp_dict, PYIDENT("__pyx_vtable__")); +#endif + if (!ob) + goto bad; + ptr = PyCapsule_GetPointer(ob, 0); + if (!ptr && !PyErr_Occurred()) + PyErr_SetString(PyExc_RuntimeError, "invalid vtable found for imported type"); + Py_DECREF(ob); + return ptr; +bad: + Py_XDECREF(ob); + return NULL; +} + + +/////////////// MergeVTables.proto /////////////// +//@requires: GetVTable + +// TODO: find a way to make this work with the Limited API! +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_MergeVtables(PyTypeObject *type); /*proto*/ +#endif + +/////////////// MergeVTables /////////////// + +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_MergeVtables(PyTypeObject *type) { + int i; + void** base_vtables; + __Pyx_TypeName tp_base_name; + __Pyx_TypeName base_name; + void* unknown = (void*)-1; + PyObject* bases = type->tp_bases; + int base_depth = 0; + { + PyTypeObject* base = type->tp_base; + while (base) { + base_depth += 1; + base = base->tp_base; + } + } + base_vtables = (void**) malloc(sizeof(void*) * (size_t)(base_depth + 1)); + base_vtables[0] = unknown; + // Could do MRO resolution of individual methods in the future, assuming + // compatible vtables, but for now simply require a common vtable base. + // Note that if the vtables of various bases are extended separately, + // resolution isn't possible and we must reject it just as when the + // instance struct is so extended. (It would be good to also do this + // check when a multiple-base class is created in pure Python as well.) + for (i = 1; i < PyTuple_GET_SIZE(bases); i++) { + void* base_vtable = __Pyx_GetVtable(((PyTypeObject*)PyTuple_GET_ITEM(bases, i))); + if (base_vtable != NULL) { + int j; + PyTypeObject* base = type->tp_base; + for (j = 0; j < base_depth; j++) { + if (base_vtables[j] == unknown) { + base_vtables[j] = __Pyx_GetVtable(base); + base_vtables[j + 1] = unknown; + } + if (base_vtables[j] == base_vtable) { + break; + } else if (base_vtables[j] == NULL) { + // No more potential matching bases (with vtables). + goto bad; + } + base = base->tp_base; + } + } + } + PyErr_Clear(); + free(base_vtables); + return 0; +bad: + tp_base_name = __Pyx_PyType_GetName(type->tp_base); + base_name = __Pyx_PyType_GetName((PyTypeObject*)PyTuple_GET_ITEM(bases, i)); + PyErr_Format(PyExc_TypeError, + "multiple bases have vtable conflict: '" __Pyx_FMT_TYPENAME "' and '" __Pyx_FMT_TYPENAME "'", tp_base_name, base_name); + __Pyx_DECREF_TypeName(tp_base_name); + __Pyx_DECREF_TypeName(base_name); + free(base_vtables); + return -1; +} +#endif + + +/////////////// ImportNumPyArray.proto /////////////// + +static PyObject *__pyx_numpy_ndarray = NULL; + +static PyObject* __Pyx_ImportNumPyArrayTypeIfAvailable(void); /*proto*/ + +/////////////// ImportNumPyArray.cleanup /////////////// +Py_CLEAR(__pyx_numpy_ndarray); + +/////////////// ImportNumPyArray /////////////// +//@requires: ImportExport.c::Import + +static PyObject* __Pyx__ImportNumPyArray(void) { + PyObject *numpy_module, *ndarray_object = NULL; + numpy_module = __Pyx_Import(PYIDENT("numpy"), NULL, 0); + if (likely(numpy_module)) { + ndarray_object = PyObject_GetAttrString(numpy_module, "ndarray"); + Py_DECREF(numpy_module); + } + if (unlikely(!ndarray_object)) { + // ImportError, AttributeError, ... + PyErr_Clear(); + } + if (unlikely(!ndarray_object || !PyObject_TypeCheck(ndarray_object, &PyType_Type))) { + Py_XDECREF(ndarray_object); + Py_INCREF(Py_None); + ndarray_object = Py_None; + } + return ndarray_object; +} + +static CYTHON_INLINE PyObject* __Pyx_ImportNumPyArrayTypeIfAvailable(void) { + if (unlikely(!__pyx_numpy_ndarray)) { + __pyx_numpy_ndarray = __Pyx__ImportNumPyArray(); + } + Py_INCREF(__pyx_numpy_ndarray); + return __pyx_numpy_ndarray; +} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/MemoryView.pyx b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/MemoryView.pyx new file mode 100644 index 0000000000000000000000000000000000000000..57ffaf92b483b692f8fb9ced000e981db20517f4 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/MemoryView.pyx @@ -0,0 +1,1481 @@ +#################### View.MemoryView #################### + +# cython: language_level=3str +# cython: binding=False + +# This utility provides cython.array and cython.view.memoryview + +from __future__ import absolute_import + +cimport cython + +# from cpython cimport ... +cdef extern from "Python.h": + ctypedef struct PyObject + int PyIndex_Check(object) + PyObject *PyExc_IndexError + PyObject *PyExc_ValueError + +cdef extern from "pythread.h": + ctypedef void *PyThread_type_lock + + PyThread_type_lock PyThread_allocate_lock() + void PyThread_free_lock(PyThread_type_lock) + +cdef extern from "": + void *memset(void *b, int c, size_t len) + +cdef extern from *: + bint __PYX_CYTHON_ATOMICS_ENABLED() + int __Pyx_GetBuffer(object, Py_buffer *, int) except -1 + void __Pyx_ReleaseBuffer(Py_buffer *) + + ctypedef struct PyObject + ctypedef Py_ssize_t Py_intptr_t + void Py_INCREF(PyObject *) + void Py_DECREF(PyObject *) + + void* PyMem_Malloc(size_t n) + void PyMem_Free(void *p) + void* PyObject_Malloc(size_t n) + void PyObject_Free(void *p) + + cdef struct __pyx_memoryview "__pyx_memoryview_obj": + Py_buffer view + PyObject *obj + __Pyx_TypeInfo *typeinfo + + ctypedef struct {{memviewslice_name}}: + __pyx_memoryview *memview + char *data + Py_ssize_t shape[{{max_dims}}] + Py_ssize_t strides[{{max_dims}}] + Py_ssize_t suboffsets[{{max_dims}}] + + void __PYX_INC_MEMVIEW({{memviewslice_name}} *memslice, int have_gil) + void __PYX_XCLEAR_MEMVIEW({{memviewslice_name}} *memslice, int have_gil) + + ctypedef struct __pyx_buffer "Py_buffer": + PyObject *obj + + PyObject *Py_None + + cdef enum: + PyBUF_C_CONTIGUOUS, + PyBUF_F_CONTIGUOUS, + PyBUF_ANY_CONTIGUOUS + PyBUF_FORMAT + PyBUF_WRITABLE + PyBUF_STRIDES + PyBUF_INDIRECT + PyBUF_ND + PyBUF_RECORDS + PyBUF_RECORDS_RO + + ctypedef struct __Pyx_TypeInfo: + pass + +cdef extern from *: + ctypedef int __pyx_atomic_int_type + {{memviewslice_name}} slice_copy_contig "__pyx_memoryview_copy_new_contig"( + __Pyx_memviewslice *from_mvs, + char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + bint dtype_is_object) except * nogil + bint slice_is_contig "__pyx_memviewslice_is_contig" ( + {{memviewslice_name}} mvs, char order, int ndim) nogil + bint slices_overlap "__pyx_slices_overlap" ({{memviewslice_name}} *slice1, + {{memviewslice_name}} *slice2, + int ndim, size_t itemsize) nogil + + +cdef extern from "": + void *malloc(size_t) nogil + void free(void *) nogil + void *memcpy(void *dest, void *src, size_t n) nogil + +# the sequence abstract base class +cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" +try: + if __import__("sys").version_info >= (3, 3): + __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence + else: + __pyx_collections_abc_Sequence = __import__("collections").Sequence +except: + # it isn't a big problem if this fails + __pyx_collections_abc_Sequence = None + +# +### cython.array class +# + +@cython.collection_type("sequence") +@cname("__pyx_array") +cdef class array: + + cdef: + char *data + Py_ssize_t len + char *format + int ndim + Py_ssize_t *_shape + Py_ssize_t *_strides + Py_ssize_t itemsize + unicode mode # FIXME: this should have been a simple 'char' + bytes _format + void (*callback_free_data)(void *data) noexcept + # cdef object _memview + cdef bint free_data + cdef bint dtype_is_object + + def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, + mode="c", bint allocate_buffer=True): + + cdef int idx + cdef Py_ssize_t dim + + self.ndim = len(shape) + self.itemsize = itemsize + + if not self.ndim: + raise ValueError, "Empty shape tuple for cython.array" + + if itemsize <= 0: + raise ValueError, "itemsize <= 0 for cython.array" + + if not isinstance(format, bytes): + format = format.encode('ASCII') + self._format = format # keep a reference to the byte string + self.format = self._format + + # use single malloc() for both shape and strides + self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) + self._strides = self._shape + self.ndim + + if not self._shape: + raise MemoryError, "unable to allocate shape and strides." + + # cdef Py_ssize_t dim, stride + for idx, dim in enumerate(shape): + if dim <= 0: + raise ValueError, f"Invalid shape in axis {idx}: {dim}." + self._shape[idx] = dim + + cdef char order + if mode == 'c': + order = b'C' + self.mode = u'c' + elif mode == 'fortran': + order = b'F' + self.mode = u'fortran' + else: + raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}" + + self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order) + + self.free_data = allocate_buffer + self.dtype_is_object = format == b'O' + + if allocate_buffer: + _allocate_buffer(self) + + @cname('getbuffer') + def __getbuffer__(self, Py_buffer *info, int flags): + cdef int bufmode = -1 + if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): + if self.mode == u"c": + bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + elif self.mode == u"fortran": + bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + if not (flags & bufmode): + raise ValueError, "Can only create a buffer that is contiguous in memory." + info.buf = self.data + info.len = self.len + + if flags & PyBUF_STRIDES: + info.ndim = self.ndim + info.shape = self._shape + info.strides = self._strides + else: + info.ndim = 1 + info.shape = &self.len if flags & PyBUF_ND else NULL + info.strides = NULL + + info.suboffsets = NULL + info.itemsize = self.itemsize + info.readonly = 0 + info.format = self.format if flags & PyBUF_FORMAT else NULL + info.obj = self + + def __dealloc__(array self): + if self.callback_free_data != NULL: + self.callback_free_data(self.data) + elif self.free_data and self.data is not NULL: + if self.dtype_is_object: + refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) + free(self.data) + PyObject_Free(self._shape) + + @property + def memview(self): + return self.get_memview() + + @cname('get_memview') + cdef get_memview(self): + flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + return memoryview(self, flags, self.dtype_is_object) + + def __len__(self): + return self._shape[0] + + def __getattr__(self, attr): + return getattr(self.memview, attr) + + def __getitem__(self, item): + return self.memview[item] + + def __setitem__(self, item, value): + self.memview[item] = value + + # Sequence methods + try: + count = __pyx_collections_abc_Sequence.count + index = __pyx_collections_abc_Sequence.index + except: + pass + +@cname("__pyx_array_allocate_buffer") +cdef int _allocate_buffer(array self) except -1: + # use malloc() for backwards compatibility + # in case external code wants to change the data pointer + cdef Py_ssize_t i + cdef PyObject **p + + self.free_data = True + self.data = malloc(self.len) + if not self.data: + raise MemoryError, "unable to allocate array data." + + if self.dtype_is_object: + p = self.data + for i in range(self.len // self.itemsize): + p[i] = Py_None + Py_INCREF(Py_None) + return 0 + + +@cname("__pyx_array_new") +cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf): + cdef array result + cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. + + if buf is NULL: + result = array.__new__(array, shape, itemsize, format, mode) + else: + result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False) + result.data = buf + + return result + + +# +### Memoryview constants and cython.view.memoryview class +# + +# Disable generic_contiguous, as it makes trouble verifying contiguity: +# - 'contiguous' or '::1' means the dimension is contiguous with dtype +# - 'indirect_contiguous' means a contiguous list of pointers +# - dtype contiguous must be contiguous in the first or last dimension +# from the start, or from the dimension following the last indirect dimension +# +# e.g. +# int[::indirect_contiguous, ::contiguous, :] +# +# is valid (list of pointers to 2d fortran-contiguous array), but +# +# int[::generic_contiguous, ::contiguous, :] +# +# would mean you'd have assert dimension 0 to be indirect (and pointer contiguous) at runtime. +# So it doesn't bring any performance benefit, and it's only confusing. + +@cname('__pyx_MemviewEnum') +cdef class Enum(object): + cdef object name + def __init__(self, name): + self.name = name + def __repr__(self): + return self.name + +cdef generic = Enum("") +cdef strided = Enum("") # default +cdef indirect = Enum("") +# Disable generic_contiguous, as it is a troublemaker +#cdef generic_contiguous = Enum("") +cdef contiguous = Enum("") +cdef indirect_contiguous = Enum("") + +# 'follow' is implied when the first or last axis is ::1 + + +# pre-allocate thread locks for reuse +## note that this could be implemented in a more beautiful way in "normal" Cython, +## but this code gets merged into the user module and not everything works there. +cdef int __pyx_memoryview_thread_locks_used = 0 +cdef PyThread_type_lock[{{THREAD_LOCKS_PREALLOCATED}}] __pyx_memoryview_thread_locks = [ +{{for _ in range(THREAD_LOCKS_PREALLOCATED)}} + PyThread_allocate_lock(), +{{endfor}} +] + + +@cname('__pyx_memoryview') +cdef class memoryview: + + cdef object obj + cdef object _size + cdef object _array_interface + cdef PyThread_type_lock lock + cdef __pyx_atomic_int_type acquisition_count + cdef Py_buffer view + cdef int flags + cdef bint dtype_is_object + cdef __Pyx_TypeInfo *typeinfo + + def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): + self.obj = obj + self.flags = flags + if type(self) is memoryview or obj is not None: + __Pyx_GetBuffer(obj, &self.view, flags) + if self.view.obj == NULL: + (<__pyx_buffer *> &self.view).obj = Py_None + Py_INCREF(Py_None) + + if not __PYX_CYTHON_ATOMICS_ENABLED(): + global __pyx_memoryview_thread_locks_used + if __pyx_memoryview_thread_locks_used < {{THREAD_LOCKS_PREALLOCATED}}: + self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + __pyx_memoryview_thread_locks_used += 1 + if self.lock is NULL: + self.lock = PyThread_allocate_lock() + if self.lock is NULL: + raise MemoryError + + if flags & PyBUF_FORMAT: + self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + else: + self.dtype_is_object = dtype_is_object + + assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0 + self.typeinfo = NULL + + def __dealloc__(memoryview self): + if self.obj is not None: + __Pyx_ReleaseBuffer(&self.view) + elif (<__pyx_buffer *> &self.view).obj == Py_None: + # Undo the incref in __cinit__() above. + (<__pyx_buffer *> &self.view).obj = NULL + Py_DECREF(Py_None) + + cdef int i + global __pyx_memoryview_thread_locks_used + if self.lock != NULL: + for i in range(__pyx_memoryview_thread_locks_used): + if __pyx_memoryview_thread_locks[i] is self.lock: + __pyx_memoryview_thread_locks_used -= 1 + if i != __pyx_memoryview_thread_locks_used: + __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + break + else: + PyThread_free_lock(self.lock) + + cdef char *get_item_pointer(memoryview self, object index) except NULL: + cdef Py_ssize_t dim + cdef char *itemp = self.view.buf + + for dim, idx in enumerate(index): + itemp = pybuffer_index(&self.view, itemp, idx, dim) + + return itemp + + #@cname('__pyx_memoryview_getitem') + def __getitem__(memoryview self, object index): + if index is Ellipsis: + return self + + have_slices, indices = _unellipsify(index, self.view.ndim) + + cdef char *itemp + if have_slices: + return memview_slice(self, indices) + else: + itemp = self.get_item_pointer(indices) + return self.convert_item_to_object(itemp) + + def __setitem__(memoryview self, object index, object value): + if self.view.readonly: + raise TypeError, "Cannot assign to read-only memoryview" + + have_slices, index = _unellipsify(index, self.view.ndim) + + if have_slices: + obj = self.is_slice(value) + if obj: + self.setitem_slice_assignment(self[index], obj) + else: + self.setitem_slice_assign_scalar(self[index], value) + else: + self.setitem_indexed(index, value) + + cdef is_slice(self, obj): + if not isinstance(obj, memoryview): + try: + obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + self.dtype_is_object) + except TypeError: + return None + + return obj + + cdef setitem_slice_assignment(self, dst, src): + cdef {{memviewslice_name}} dst_slice + cdef {{memviewslice_name}} src_slice + cdef {{memviewslice_name}} msrc = get_slice_from_memview(src, &src_slice)[0] + cdef {{memviewslice_name}} mdst = get_slice_from_memview(dst, &dst_slice)[0] + + memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object) + + cdef setitem_slice_assign_scalar(self, memoryview dst, value): + cdef int array[128] + cdef void *tmp = NULL + cdef void *item + + cdef {{memviewslice_name}} *dst_slice + cdef {{memviewslice_name}} tmp_slice + dst_slice = get_slice_from_memview(dst, &tmp_slice) + + if self.view.itemsize > sizeof(array): + tmp = PyMem_Malloc(self.view.itemsize) + if tmp == NULL: + raise MemoryError + item = tmp + else: + item = array + + try: + if self.dtype_is_object: + ( item)[0] = value + else: + self.assign_item_from_object( item, value) + + # It would be easy to support indirect dimensions, but it's easier + # to disallow :) + if self.view.suboffsets != NULL: + assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + item, self.dtype_is_object) + finally: + PyMem_Free(tmp) + + cdef setitem_indexed(self, index, value): + cdef char *itemp = self.get_item_pointer(index) + self.assign_item_from_object(itemp, value) + + cdef convert_item_to_object(self, char *itemp): + """Only used if instantiated manually by the user, or if Cython doesn't + know how to convert the type""" + import struct + cdef bytes bytesitem + # Do a manual and complete check here instead of this easy hack + bytesitem = itemp[:self.view.itemsize] + try: + result = struct.unpack(self.view.format, bytesitem) + except struct.error: + raise ValueError, "Unable to convert item to object" + else: + if len(self.view.format) == 1: + return result[0] + return result + + cdef assign_item_from_object(self, char *itemp, object value): + """Only used if instantiated manually by the user, or if Cython doesn't + know how to convert the type""" + import struct + cdef char c + cdef bytes bytesvalue + cdef Py_ssize_t i + + if isinstance(value, tuple): + bytesvalue = struct.pack(self.view.format, *value) + else: + bytesvalue = struct.pack(self.view.format, value) + + for i, c in enumerate(bytesvalue): + itemp[i] = c + + @cname('getbuffer') + def __getbuffer__(self, Py_buffer *info, int flags): + if flags & PyBUF_WRITABLE and self.view.readonly: + raise ValueError, "Cannot create writable memory view from read-only memoryview" + + if flags & PyBUF_ND: + info.shape = self.view.shape + else: + info.shape = NULL + + if flags & PyBUF_STRIDES: + info.strides = self.view.strides + else: + info.strides = NULL + + if flags & PyBUF_INDIRECT: + info.suboffsets = self.view.suboffsets + else: + info.suboffsets = NULL + + if flags & PyBUF_FORMAT: + info.format = self.view.format + else: + info.format = NULL + + info.buf = self.view.buf + info.ndim = self.view.ndim + info.itemsize = self.view.itemsize + info.len = self.view.len + info.readonly = self.view.readonly + info.obj = self + + # Some properties that have the same semantics as in NumPy + @property + def T(self): + cdef _memoryviewslice result = memoryview_copy(self) + transpose_memslice(&result.from_slice) + return result + + @property + def base(self): + return self._get_base() + + cdef _get_base(self): + return self.obj + + @property + def shape(self): + return tuple([length for length in self.view.shape[:self.view.ndim]]) + + @property + def strides(self): + if self.view.strides == NULL: + # Note: we always ask for strides, so if this is not set it's a bug + raise ValueError, "Buffer view does not expose strides" + + return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + + @property + def suboffsets(self): + if self.view.suboffsets == NULL: + return (-1,) * self.view.ndim + + return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + + @property + def ndim(self): + return self.view.ndim + + @property + def itemsize(self): + return self.view.itemsize + + @property + def nbytes(self): + return self.size * self.view.itemsize + + @property + def size(self): + if self._size is None: + result = 1 + + for length in self.view.shape[:self.view.ndim]: + result *= length + + self._size = result + + return self._size + + def __len__(self): + if self.view.ndim >= 1: + return self.view.shape[0] + + return 0 + + def __repr__(self): + return "" % (self.base.__class__.__name__, + id(self)) + + def __str__(self): + return "" % (self.base.__class__.__name__,) + + # Support the same attributes as memoryview slices + def is_c_contig(self): + cdef {{memviewslice_name}} *mslice + cdef {{memviewslice_name}} tmp + mslice = get_slice_from_memview(self, &tmp) + return slice_is_contig(mslice[0], 'C', self.view.ndim) + + def is_f_contig(self): + cdef {{memviewslice_name}} *mslice + cdef {{memviewslice_name}} tmp + mslice = get_slice_from_memview(self, &tmp) + return slice_is_contig(mslice[0], 'F', self.view.ndim) + + def copy(self): + cdef {{memviewslice_name}} mslice + cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + + slice_copy(self, &mslice) + mslice = slice_copy_contig(&mslice, "c", self.view.ndim, + self.view.itemsize, + flags|PyBUF_C_CONTIGUOUS, + self.dtype_is_object) + + return memoryview_copy_from_slice(self, &mslice) + + def copy_fortran(self): + cdef {{memviewslice_name}} src, dst + cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + + slice_copy(self, &src) + dst = slice_copy_contig(&src, "fortran", self.view.ndim, + self.view.itemsize, + flags|PyBUF_F_CONTIGUOUS, + self.dtype_is_object) + + return memoryview_copy_from_slice(self, &dst) + + +@cname('__pyx_memoryview_new') +cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): + cdef memoryview result = memoryview(o, flags, dtype_is_object) + result.typeinfo = typeinfo + return result + +@cname('__pyx_memoryview_check') +cdef inline bint memoryview_check(object o) noexcept: + return isinstance(o, memoryview) + +cdef tuple _unellipsify(object index, int ndim): + """ + Replace all ellipses with full slices and fill incomplete indices with + full slices. + """ + cdef Py_ssize_t idx + tup = index if isinstance(index, tuple) else (index,) + + result = [slice(None)] * ndim + have_slices = False + seen_ellipsis = False + idx = 0 + for item in tup: + if item is Ellipsis: + if not seen_ellipsis: + idx += ndim - len(tup) + seen_ellipsis = True + have_slices = True + else: + if isinstance(item, slice): + have_slices = True + elif not PyIndex_Check(item): + raise TypeError, f"Cannot index with type '{type(item)}'" + result[idx] = item + idx += 1 + + nslices = ndim - idx + return have_slices or nslices, tuple(result) + +cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: + for suboffset in suboffsets[:ndim]: + if suboffset >= 0: + raise ValueError, "Indirect dimensions not supported" + return 0 # return type just used as an error flag + +# +### Slicing a memoryview +# + +@cname('__pyx_memview_slice') +cdef memoryview memview_slice(memoryview memview, object indices): + cdef int new_ndim = 0, suboffset_dim = -1, dim + cdef bint negative_step + cdef {{memviewslice_name}} src, dst + cdef {{memviewslice_name}} *p_src + + # dst is copied by value in memoryview_fromslice -- initialize it + # src is never copied + memset(&dst, 0, sizeof(dst)) + + cdef _memoryviewslice memviewsliceobj + + assert memview.view.ndim > 0 + + if isinstance(memview, _memoryviewslice): + memviewsliceobj = memview + p_src = &memviewsliceobj.from_slice + else: + slice_copy(memview, &src) + p_src = &src + + # Note: don't use variable src at this point + # SubNote: we should be able to declare variables in blocks... + + # memoryview_fromslice() will inc our dst slice + dst.memview = p_src.memview + dst.data = p_src.data + + # Put everything in temps to avoid this bloody warning: + # "Argument evaluation order in C function call is undefined and + # may not be as expected" + cdef {{memviewslice_name}} *p_dst = &dst + cdef int *p_suboffset_dim = &suboffset_dim + cdef Py_ssize_t start, stop, step, cindex + cdef bint have_start, have_stop, have_step + + for dim, index in enumerate(indices): + if PyIndex_Check(index): + cindex = index + slice_memviewslice( + p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + dim, new_ndim, p_suboffset_dim, + cindex, 0, 0, # start, stop, step + 0, 0, 0, # have_{start,stop,step} + False) + elif index is None: + p_dst.shape[new_ndim] = 1 + p_dst.strides[new_ndim] = 0 + p_dst.suboffsets[new_ndim] = -1 + new_ndim += 1 + else: + start = index.start or 0 + stop = index.stop or 0 + step = index.step or 0 + + have_start = index.start is not None + have_stop = index.stop is not None + have_step = index.step is not None + + slice_memviewslice( + p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + dim, new_ndim, p_suboffset_dim, + start, stop, step, + have_start, have_stop, have_step, + True) + new_ndim += 1 + + if isinstance(memview, _memoryviewslice): + return memoryview_fromslice(dst, new_ndim, + memviewsliceobj.to_object_func, + memviewsliceobj.to_dtype_func, + memview.dtype_is_object) + else: + return memoryview_fromslice(dst, new_ndim, NULL, NULL, + memview.dtype_is_object) + + +# +### Slicing in a single dimension of a memoryviewslice +# + +@cname('__pyx_memoryview_slice_memviewslice') +cdef int slice_memviewslice( + {{memviewslice_name}} *dst, + Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, + int dim, int new_ndim, int *suboffset_dim, + Py_ssize_t start, Py_ssize_t stop, Py_ssize_t step, + int have_start, int have_stop, int have_step, + bint is_slice) except -1 nogil: + """ + Create a new slice dst given slice src. + + dim - the current src dimension (indexing will make dimensions + disappear) + new_dim - the new dst dimension + suboffset_dim - pointer to a single int initialized to -1 to keep track of + where slicing offsets should be added + """ + + cdef Py_ssize_t new_shape + cdef bint negative_step + + if not is_slice: + # index is a normal integer-like index + if start < 0: + start += shape + if not 0 <= start < shape: + _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim) + else: + # index is a slice + if have_step: + negative_step = step < 0 + if step == 0: + _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) + else: + negative_step = False + step = 1 + + # check our bounds and set defaults + if have_start: + if start < 0: + start += shape + if start < 0: + start = 0 + elif start >= shape: + if negative_step: + start = shape - 1 + else: + start = shape + else: + if negative_step: + start = shape - 1 + else: + start = 0 + + if have_stop: + if stop < 0: + stop += shape + if stop < 0: + stop = 0 + elif stop > shape: + stop = shape + else: + if negative_step: + stop = -1 + else: + stop = shape + + # len = ceil( (stop - start) / step ) + with cython.cdivision(True): + new_shape = (stop - start) // step + + if (stop - start) - step * new_shape: + new_shape += 1 + + if new_shape < 0: + new_shape = 0 + + # shape/strides/suboffsets + dst.strides[new_ndim] = stride * step + dst.shape[new_ndim] = new_shape + dst.suboffsets[new_ndim] = suboffset + + # Add the slicing or indexing offsets to the right suboffset or base data * + if suboffset_dim[0] < 0: + dst.data += start * stride + else: + dst.suboffsets[suboffset_dim[0]] += start * stride + + if suboffset >= 0: + if not is_slice: + if new_ndim == 0: + dst.data = ( dst.data)[0] + suboffset + else: + _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d " + "must be indexed and not sliced", dim) + else: + suboffset_dim[0] = new_ndim + + return 0 + +# +### Index a memoryview +# +@cname('__pyx_pybuffer_index') +cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, + Py_ssize_t dim) except NULL: + cdef Py_ssize_t shape, stride, suboffset = -1 + cdef Py_ssize_t itemsize = view.itemsize + cdef char *resultp + + if view.ndim == 0: + shape = view.len // itemsize + stride = itemsize + else: + shape = view.shape[dim] + stride = view.strides[dim] + if view.suboffsets != NULL: + suboffset = view.suboffsets[dim] + + if index < 0: + index += view.shape[dim] + if index < 0: + raise IndexError, f"Out of bounds on buffer access (axis {dim})" + + if index >= shape: + raise IndexError, f"Out of bounds on buffer access (axis {dim})" + + resultp = bufp + index * stride + if suboffset >= 0: + resultp = ( resultp)[0] + suboffset + + return resultp + +# +### Transposing a memoryviewslice +# +@cname('__pyx_memslice_transpose') +cdef int transpose_memslice({{memviewslice_name}} *memslice) except -1 nogil: + cdef int ndim = memslice.memview.view.ndim + + cdef Py_ssize_t *shape = memslice.shape + cdef Py_ssize_t *strides = memslice.strides + + # reverse strides and shape + cdef int i, j + for i in range(ndim // 2): + j = ndim - 1 - i + strides[i], strides[j] = strides[j], strides[i] + shape[i], shape[j] = shape[j], shape[i] + + if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: + _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions") + + return 0 + +# +### Creating new memoryview objects from slices and memoryviews +# +@cython.collection_type("sequence") +@cname('__pyx_memoryviewslice') +cdef class _memoryviewslice(memoryview): + "Internal class for passing memoryview slices to Python" + + # We need this to keep our shape/strides/suboffset pointers valid + cdef {{memviewslice_name}} from_slice + # We need this only to print it's class' name + cdef object from_object + + cdef object (*to_object_func)(char *) + cdef int (*to_dtype_func)(char *, object) except 0 + + def __dealloc__(self): + __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) + + cdef convert_item_to_object(self, char *itemp): + if self.to_object_func != NULL: + return self.to_object_func(itemp) + else: + return memoryview.convert_item_to_object(self, itemp) + + cdef assign_item_from_object(self, char *itemp, object value): + if self.to_dtype_func != NULL: + self.to_dtype_func(itemp, value) + else: + memoryview.assign_item_from_object(self, itemp, value) + + cdef _get_base(self): + return self.from_object + + # Sequence methods + try: + count = __pyx_collections_abc_Sequence.count + index = __pyx_collections_abc_Sequence.index + except: + pass + +try: + if __pyx_collections_abc_Sequence: + # The main value of registering _memoryviewslice as a + # Sequence is that it can be used in structural pattern + # matching in Python 3.10+ + __pyx_collections_abc_Sequence.register(_memoryviewslice) + __pyx_collections_abc_Sequence.register(array) +except: + pass # ignore failure, it's a minor issue + +@cname('__pyx_memoryview_fromslice') +cdef memoryview_fromslice({{memviewslice_name}} memviewslice, + int ndim, + object (*to_object_func)(char *), + int (*to_dtype_func)(char *, object) except 0, + bint dtype_is_object): + + cdef _memoryviewslice result + + if memviewslice.memview == Py_None: + return None + + # assert 0 < ndim <= memviewslice.memview.view.ndim, ( + # ndim, memviewslice.memview.view.ndim) + + result = _memoryviewslice.__new__(_memoryviewslice, None, 0, dtype_is_object) + + result.from_slice = memviewslice + __PYX_INC_MEMVIEW(&memviewslice, 1) + + result.from_object = ( memviewslice.memview)._get_base() + result.typeinfo = memviewslice.memview.typeinfo + + result.view = memviewslice.memview.view + result.view.buf = memviewslice.data + result.view.ndim = ndim + (<__pyx_buffer *> &result.view).obj = Py_None + Py_INCREF(Py_None) + + if (memviewslice.memview).flags & PyBUF_WRITABLE: + result.flags = PyBUF_RECORDS + else: + result.flags = PyBUF_RECORDS_RO + + result.view.shape = result.from_slice.shape + result.view.strides = result.from_slice.strides + + # only set suboffsets if actually used, otherwise set to NULL to improve compatibility + result.view.suboffsets = NULL + for suboffset in result.from_slice.suboffsets[:ndim]: + if suboffset >= 0: + result.view.suboffsets = result.from_slice.suboffsets + break + + result.view.len = result.view.itemsize + for length in result.view.shape[:ndim]: + result.view.len *= length + + result.to_object_func = to_object_func + result.to_dtype_func = to_dtype_func + + return result + +@cname('__pyx_memoryview_get_slice_from_memoryview') +cdef {{memviewslice_name}} *get_slice_from_memview(memoryview memview, + {{memviewslice_name}} *mslice) except NULL: + cdef _memoryviewslice obj + if isinstance(memview, _memoryviewslice): + obj = memview + return &obj.from_slice + else: + slice_copy(memview, mslice) + return mslice + +@cname('__pyx_memoryview_slice_copy') +cdef void slice_copy(memoryview memview, {{memviewslice_name}} *dst) noexcept: + cdef int dim + cdef (Py_ssize_t*) shape, strides, suboffsets + + shape = memview.view.shape + strides = memview.view.strides + suboffsets = memview.view.suboffsets + + dst.memview = <__pyx_memoryview *> memview + dst.data = memview.view.buf + + for dim in range(memview.view.ndim): + dst.shape[dim] = shape[dim] + dst.strides[dim] = strides[dim] + dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + +@cname('__pyx_memoryview_copy_object') +cdef memoryview_copy(memoryview memview): + "Create a new memoryview object" + cdef {{memviewslice_name}} memviewslice + slice_copy(memview, &memviewslice) + return memoryview_copy_from_slice(memview, &memviewslice) + +@cname('__pyx_memoryview_copy_object_from_slice') +cdef memoryview_copy_from_slice(memoryview memview, {{memviewslice_name}} *memviewslice): + """ + Create a new memoryview object from a given memoryview object and slice. + """ + cdef object (*to_object_func)(char *) + cdef int (*to_dtype_func)(char *, object) except 0 + + if isinstance(memview, _memoryviewslice): + to_object_func = (<_memoryviewslice> memview).to_object_func + to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + else: + to_object_func = NULL + to_dtype_func = NULL + + return memoryview_fromslice(memviewslice[0], memview.view.ndim, + to_object_func, to_dtype_func, + memview.dtype_is_object) + + +# +### Copy the contents of a memoryview slices +# +cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) noexcept nogil: + return -arg if arg < 0 else arg + +@cname('__pyx_get_best_slice_order') +cdef char get_best_order({{memviewslice_name}} *mslice, int ndim) noexcept nogil: + """ + Figure out the best memory access order for a given slice. + """ + cdef int i + cdef Py_ssize_t c_stride = 0 + cdef Py_ssize_t f_stride = 0 + + for i in range(ndim - 1, -1, -1): + if mslice.shape[i] > 1: + c_stride = mslice.strides[i] + break + + for i in range(ndim): + if mslice.shape[i] > 1: + f_stride = mslice.strides[i] + break + + if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): + return 'C' + else: + return 'F' + +@cython.cdivision(True) +cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, + char *dst_data, Py_ssize_t *dst_strides, + Py_ssize_t *src_shape, Py_ssize_t *dst_shape, + int ndim, size_t itemsize) noexcept nogil: + # Note: src_extent is 1 if we're broadcasting + # dst_extent always >= src_extent as we don't do reductions + cdef Py_ssize_t i + cdef Py_ssize_t src_extent = src_shape[0] + cdef Py_ssize_t dst_extent = dst_shape[0] + cdef Py_ssize_t src_stride = src_strides[0] + cdef Py_ssize_t dst_stride = dst_strides[0] + + if ndim == 1: + if (src_stride > 0 and dst_stride > 0 and + src_stride == itemsize == dst_stride): + memcpy(dst_data, src_data, itemsize * dst_extent) + else: + for i in range(dst_extent): + memcpy(dst_data, src_data, itemsize) + src_data += src_stride + dst_data += dst_stride + else: + for i in range(dst_extent): + _copy_strided_to_strided(src_data, src_strides + 1, + dst_data, dst_strides + 1, + src_shape + 1, dst_shape + 1, + ndim - 1, itemsize) + src_data += src_stride + dst_data += dst_stride + +cdef void copy_strided_to_strided({{memviewslice_name}} *src, + {{memviewslice_name}} *dst, + int ndim, size_t itemsize) noexcept nogil: + _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, + src.shape, dst.shape, ndim, itemsize) + +@cname('__pyx_memoryview_slice_get_size') +cdef Py_ssize_t slice_get_size({{memviewslice_name}} *src, int ndim) noexcept nogil: + "Return the size of the memory occupied by the slice in number of bytes" + cdef Py_ssize_t shape, size = src.memview.view.itemsize + + for shape in src.shape[:ndim]: + size *= shape + + return size + +@cname('__pyx_fill_contig_strides_array') +cdef Py_ssize_t fill_contig_strides_array( + Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, + int ndim, char order) noexcept nogil: + """ + Fill the strides array for a slice with C or F contiguous strides. + This is like PyBuffer_FillContiguousStrides, but compatible with py < 2.6 + """ + cdef int idx + + if order == 'F': + for idx in range(ndim): + strides[idx] = stride + stride *= shape[idx] + else: + for idx in range(ndim - 1, -1, -1): + strides[idx] = stride + stride *= shape[idx] + + return stride + +@cname('__pyx_memoryview_copy_data_to_temp') +cdef void *copy_data_to_temp({{memviewslice_name}} *src, + {{memviewslice_name}} *tmpslice, + char order, + int ndim) except NULL nogil: + """ + Copy a direct slice to temporary contiguous memory. The caller should free + the result when done. + """ + cdef int i + cdef void *result + + cdef size_t itemsize = src.memview.view.itemsize + cdef size_t size = slice_get_size(src, ndim) + + result = malloc(size) + if not result: + _err_no_memory() + + # tmpslice[0] = src + tmpslice.data = result + tmpslice.memview = src.memview + for i in range(ndim): + tmpslice.shape[i] = src.shape[i] + tmpslice.suboffsets[i] = -1 + + fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, ndim, order) + + # We need to broadcast strides again + for i in range(ndim): + if tmpslice.shape[i] == 1: + tmpslice.strides[i] = 0 + + if slice_is_contig(src[0], order, ndim): + memcpy(result, src.data, size) + else: + copy_strided_to_strided(src, tmpslice, ndim, itemsize) + + return result + +# Use 'with gil' functions and avoid 'with gil' blocks, as the code within the blocks +# has temporaries that need the GIL to clean up +@cname('__pyx_memoryview_err_extents') +cdef int _err_extents(int i, Py_ssize_t extent1, + Py_ssize_t extent2) except -1 with gil: + raise ValueError, f"got differing extents in dimension {i} (got {extent1} and {extent2})" + +@cname('__pyx_memoryview_err_dim') +cdef int _err_dim(PyObject *error, str msg, int dim) except -1 with gil: + raise error, msg % dim + +@cname('__pyx_memoryview_err') +cdef int _err(PyObject *error, str msg) except -1 with gil: + raise error, msg + +@cname('__pyx_memoryview_err_no_memory') +cdef int _err_no_memory() except -1 with gil: + raise MemoryError + + +@cname('__pyx_memoryview_copy_contents') +cdef int memoryview_copy_contents({{memviewslice_name}} src, + {{memviewslice_name}} dst, + int src_ndim, int dst_ndim, + bint dtype_is_object) except -1 nogil: + """ + Copy memory from slice src to slice dst. + Check for overlapping memory and verify the shapes. + """ + cdef void *tmpdata = NULL + cdef size_t itemsize = src.memview.view.itemsize + cdef int i + cdef char order = get_best_order(&src, src_ndim) + cdef bint broadcasting = False + cdef bint direct_copy = False + cdef {{memviewslice_name}} tmp + + if src_ndim < dst_ndim: + broadcast_leading(&src, src_ndim, dst_ndim) + elif dst_ndim < src_ndim: + broadcast_leading(&dst, dst_ndim, src_ndim) + + cdef int ndim = max(src_ndim, dst_ndim) + + for i in range(ndim): + if src.shape[i] != dst.shape[i]: + if src.shape[i] == 1: + broadcasting = True + src.strides[i] = 0 + else: + _err_extents(i, dst.shape[i], src.shape[i]) + + if src.suboffsets[i] >= 0: + _err_dim(PyExc_ValueError, "Dimension %d is not direct", i) + + if slices_overlap(&src, &dst, ndim, itemsize): + # slices overlap, copy to temp, copy temp to dst + if not slice_is_contig(src, order, ndim): + order = get_best_order(&dst, ndim) + + tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) + src = tmp + + if not broadcasting: + # See if both slices have equal contiguity, in that case perform a + # direct copy. This only works when we are not broadcasting. + if slice_is_contig(src, 'C', ndim): + direct_copy = slice_is_contig(dst, 'C', ndim) + elif slice_is_contig(src, 'F', ndim): + direct_copy = slice_is_contig(dst, 'F', ndim) + + if direct_copy: + # Contiguous slices with same order + refcount_copying(&dst, dtype_is_object, ndim, inc=False) + memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + refcount_copying(&dst, dtype_is_object, ndim, inc=True) + free(tmpdata) + return 0 + + if order == 'F' == get_best_order(&dst, ndim): + # see if both slices have Fortran order, transpose them to match our + # C-style indexing order + transpose_memslice(&src) + transpose_memslice(&dst) + + refcount_copying(&dst, dtype_is_object, ndim, inc=False) + copy_strided_to_strided(&src, &dst, ndim, itemsize) + refcount_copying(&dst, dtype_is_object, ndim, inc=True) + + free(tmpdata) + return 0 + +@cname('__pyx_memoryview_broadcast_leading') +cdef void broadcast_leading({{memviewslice_name}} *mslice, + int ndim, + int ndim_other) noexcept nogil: + cdef int i + cdef int offset = ndim_other - ndim + + for i in range(ndim - 1, -1, -1): + mslice.shape[i + offset] = mslice.shape[i] + mslice.strides[i + offset] = mslice.strides[i] + mslice.suboffsets[i + offset] = mslice.suboffsets[i] + + for i in range(offset): + mslice.shape[i] = 1 + mslice.strides[i] = mslice.strides[0] + mslice.suboffsets[i] = -1 + +# +### Take care of refcounting the objects in slices. Do this separately from any copying, +### to minimize acquiring the GIL +# + +@cname('__pyx_memoryview_refcount_copying') +cdef void refcount_copying({{memviewslice_name}} *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil: + # incref or decref the objects in the destination slice if the dtype is object + if dtype_is_object: + refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc) + +@cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') +cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, + Py_ssize_t *strides, int ndim, + bint inc) noexcept with gil: + refcount_objects_in_slice(data, shape, strides, ndim, inc) + +@cname('__pyx_memoryview_refcount_objects_in_slice') +cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, + Py_ssize_t *strides, int ndim, bint inc) noexcept: + cdef Py_ssize_t i + cdef Py_ssize_t stride = strides[0] + + for i in range(shape[0]): + if ndim == 1: + if inc: + Py_INCREF(( data)[0]) + else: + Py_DECREF(( data)[0]) + else: + refcount_objects_in_slice(data, shape + 1, strides + 1, ndim - 1, inc) + + data += stride + +# +### Scalar to slice assignment +# +@cname('__pyx_memoryview_slice_assign_scalar') +cdef void slice_assign_scalar({{memviewslice_name}} *dst, int ndim, + size_t itemsize, void *item, + bint dtype_is_object) noexcept nogil: + refcount_copying(dst, dtype_is_object, ndim, inc=False) + _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, itemsize, item) + refcount_copying(dst, dtype_is_object, ndim, inc=True) + + +@cname('__pyx_memoryview__slice_assign_scalar') +cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, + Py_ssize_t *strides, int ndim, + size_t itemsize, void *item) noexcept nogil: + cdef Py_ssize_t i + cdef Py_ssize_t stride = strides[0] + cdef Py_ssize_t extent = shape[0] + + if ndim == 1: + for i in range(extent): + memcpy(data, item, itemsize) + data += stride + else: + for i in range(extent): + _slice_assign_scalar(data, shape + 1, strides + 1, ndim - 1, itemsize, item) + data += stride + + +############### BufferFormatFromTypeInfo ############### +cdef extern from *: + ctypedef struct __Pyx_StructField + + cdef enum: + __PYX_BUF_FLAGS_PACKED_STRUCT + __PYX_BUF_FLAGS_INTEGER_COMPLEX + + ctypedef struct __Pyx_TypeInfo: + char* name + __Pyx_StructField* fields + size_t size + size_t arraysize[8] + int ndim + char typegroup + char is_unsigned + int flags + + ctypedef struct __Pyx_StructField: + __Pyx_TypeInfo* type + char* name + size_t offset + + ctypedef struct __Pyx_BufFmt_StackElem: + __Pyx_StructField* field + size_t parent_offset + + #ctypedef struct __Pyx_BufFmt_Context: + # __Pyx_StructField root + __Pyx_BufFmt_StackElem* head + + struct __pyx_typeinfo_string: + char string[3] + + __pyx_typeinfo_string __Pyx_TypeInfoToFormat(__Pyx_TypeInfo *) + + +@cname('__pyx_format_from_typeinfo') +cdef bytes format_from_typeinfo(__Pyx_TypeInfo *type): + cdef __Pyx_StructField *field + cdef __pyx_typeinfo_string fmt + cdef bytes part, result + cdef Py_ssize_t i + + if type.typegroup == 'S': + assert type.fields != NULL + assert type.fields.type != NULL + + if type.flags & __PYX_BUF_FLAGS_PACKED_STRUCT: + alignment = b'^' + else: + alignment = b'' + + parts = [b"T{"] + field = type.fields + + while field.type: + part = format_from_typeinfo(field.type) + parts.append(part + b':' + field.name + b':') + field += 1 + + result = alignment.join(parts) + b'}' + else: + fmt = __Pyx_TypeInfoToFormat(type) + result = fmt.string + if type.arraysize[0]: + extents = [f"{type.arraysize[i]}" for i in range(type.ndim)] + result = f"({u','.join(extents)})".encode('ascii') + result + + return result diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/MemoryView_C.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/MemoryView_C.c new file mode 100644 index 0000000000000000000000000000000000000000..7b0d8ff5d3f6246adb3fe7a55b6d8c1de91b274a --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/MemoryView_C.c @@ -0,0 +1,987 @@ +////////// MemviewSliceStruct.proto ////////// +//@proto_block: utility_code_proto_before_types + +/* memoryview slice struct */ +struct {{memview_struct_name}}; + +typedef struct { + struct {{memview_struct_name}} *memview; + char *data; + Py_ssize_t shape[{{max_dims}}]; + Py_ssize_t strides[{{max_dims}}]; + Py_ssize_t suboffsets[{{max_dims}}]; +} {{memviewslice_name}}; + +// used for "len(memviewslice)" +#define __Pyx_MemoryView_Len(m) (m.shape[0]) + + +/////////// Atomics.proto ///////////// +//@proto_block: utility_code_proto_before_types + +#include + +#ifndef CYTHON_ATOMICS + #define CYTHON_ATOMICS 1 +#endif +// using CYTHON_ATOMICS as a cdef extern bint in the Cython memoryview code +// interacts badly with "import *". Therefore, define a helper function-like macro +#define __PYX_CYTHON_ATOMICS_ENABLED() CYTHON_ATOMICS + +#define __pyx_atomic_int_type int +#define __pyx_nonatomic_int_type int + +// For standard C/C++ atomics, get the headers first so we have ATOMIC_INT_LOCK_FREE +// defined when we decide to use them. +#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) && \ + (__STDC_VERSION__ >= 201112L) && \ + !defined(__STDC_NO_ATOMICS__)) + #include +#elif CYTHON_ATOMICS && (defined(__cplusplus) && ( \ + (__cplusplus >= 201103L) || \ + (defined(_MSC_VER) && _MSC_VER >= 1700))) + #include +#endif + +#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) && \ + (__STDC_VERSION__ >= 201112L) && \ + !defined(__STDC_NO_ATOMICS__) && \ + ATOMIC_INT_LOCK_FREE == 2) + // C11 atomics are available and ATOMIC_INT_LOCK_FREE is definitely on + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type atomic_int + #define __pyx_atomic_incr_aligned(value) atomic_fetch_add_explicit(value, 1, memory_order_relaxed) + #define __pyx_atomic_decr_aligned(value) atomic_fetch_sub_explicit(value, 1, memory_order_acq_rel) + #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER) + #pragma message ("Using standard C atomics") + #elif defined(__PYX_DEBUG_ATOMICS) + #warning "Using standard C atomics" + #endif +#elif CYTHON_ATOMICS && (defined(__cplusplus) && ( \ + (__cplusplus >= 201103L) || \ + /*_MSC_VER 1700 is Visual Studio 2012 */ \ + (defined(_MSC_VER) && _MSC_VER >= 1700)) && \ + ATOMIC_INT_LOCK_FREE == 2) + // C++11 atomics are available and ATOMIC_INT_LOCK_FREE is definitely on + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type std::atomic_int + #define __pyx_atomic_incr_aligned(value) std::atomic_fetch_add_explicit(value, 1, std::memory_order_relaxed) + #define __pyx_atomic_decr_aligned(value) std::atomic_fetch_sub_explicit(value, 1, std::memory_order_acq_rel) + + #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER) + #pragma message ("Using standard C++ atomics") + #elif defined(__PYX_DEBUG_ATOMICS) + #warning "Using standard C++ atomics" + #endif +#elif CYTHON_ATOMICS && (__GNUC__ >= 5 || (__GNUC__ == 4 && \ + (__GNUC_MINOR__ > 1 || \ + (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ >= 2)))) + /* gcc >= 4.1.2 */ + #define __pyx_atomic_incr_aligned(value) __sync_fetch_and_add(value, 1) + #define __pyx_atomic_decr_aligned(value) __sync_fetch_and_sub(value, 1) + + #ifdef __PYX_DEBUG_ATOMICS + #warning "Using GNU atomics" + #endif +#elif CYTHON_ATOMICS && defined(_MSC_VER) + /* msvc */ + #include + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type long + #undef __pyx_nonatomic_int_type + #define __pyx_nonatomic_int_type long + #pragma intrinsic (_InterlockedExchangeAdd) + #define __pyx_atomic_incr_aligned(value) _InterlockedExchangeAdd(value, 1) + #define __pyx_atomic_decr_aligned(value) _InterlockedExchangeAdd(value, -1) + + #ifdef __PYX_DEBUG_ATOMICS + #pragma message ("Using MSVC atomics") + #endif +#else + #undef CYTHON_ATOMICS + #define CYTHON_ATOMICS 0 + + #ifdef __PYX_DEBUG_ATOMICS + #warning "Not using atomics" + #endif +#endif + +#if CYTHON_ATOMICS + #define __pyx_add_acquisition_count(memview) \ + __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview)) + #define __pyx_sub_acquisition_count(memview) \ + __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview)) +#else + #define __pyx_add_acquisition_count(memview) \ + __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) + #define __pyx_sub_acquisition_count(memview) \ + __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) +#endif + + +/////////////// ObjectToMemviewSlice.proto /////////////// + +static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *, int writable_flag); + + +////////// MemviewSliceInit.proto ////////// + +#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d + +#define __Pyx_MEMVIEW_DIRECT 1 +#define __Pyx_MEMVIEW_PTR 2 +#define __Pyx_MEMVIEW_FULL 4 +#define __Pyx_MEMVIEW_CONTIG 8 +#define __Pyx_MEMVIEW_STRIDED 16 +#define __Pyx_MEMVIEW_FOLLOW 32 + +#define __Pyx_IS_C_CONTIG 1 +#define __Pyx_IS_F_CONTIG 2 + +static int __Pyx_init_memviewslice( + struct __pyx_memoryview_obj *memview, + int ndim, + __Pyx_memviewslice *memviewslice, + int memview_is_new_reference); + +static CYTHON_INLINE int __pyx_add_acquisition_count_locked( + __pyx_atomic_int_type *acquisition_count, PyThread_type_lock lock); +static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( + __pyx_atomic_int_type *acquisition_count, PyThread_type_lock lock); + +#define __pyx_get_slice_count_pointer(memview) (&memview->acquisition_count) +#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) +#define __PYX_XCLEAR_MEMVIEW(slice, have_gil) __Pyx_XCLEAR_MEMVIEW(slice, have_gil, __LINE__) +static CYTHON_INLINE void __Pyx_INC_MEMVIEW({{memviewslice_name}} *, int, int); +static CYTHON_INLINE void __Pyx_XCLEAR_MEMVIEW({{memviewslice_name}} *, int, int); + + +/////////////// MemviewSliceIndex.proto /////////////// + +static CYTHON_INLINE char *__pyx_memviewslice_index_full( + const char *bufp, Py_ssize_t idx, Py_ssize_t stride, Py_ssize_t suboffset); + + +/////////////// ObjectToMemviewSlice /////////////// +//@requires: MemviewSliceValidateAndInit + +static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *obj, int writable_flag) { + {{memviewslice_name}} result = {{memslice_init}}; + __Pyx_BufFmt_StackElem stack[{{struct_nesting_depth}}]; + int axes_specs[] = { {{axes_specs}} }; + int retcode; + + if (obj == Py_None) { + /* We don't bother to refcount None */ + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, {{c_or_f_flag}}, + {{buf_flag}} | writable_flag, {{ndim}}, + &{{dtype_typeinfo}}, stack, + &result, obj); + + if (unlikely(retcode == -1)) + goto __pyx_fail; + + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + + +/////////////// MemviewSliceValidateAndInit.proto /////////////// + +static int __Pyx_ValidateAndInit_memviewslice( + int *axes_specs, + int c_or_f_flag, + int buf_flags, + int ndim, + __Pyx_TypeInfo *dtype, + __Pyx_BufFmt_StackElem stack[], + __Pyx_memviewslice *memviewslice, + PyObject *original_obj); + +/////////////// MemviewSliceValidateAndInit /////////////// +//@requires: Buffer.c::TypeInfoCompare +//@requires: Buffer.c::BufferFormatStructs +//@requires: Buffer.c::BufferFormatCheck + +static int +__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) +{ + if (buf->shape[dim] <= 1) + return 1; + + if (buf->strides) { + if (spec & __Pyx_MEMVIEW_CONTIG) { + if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { + if (unlikely(buf->strides[dim] != sizeof(void *))) { + PyErr_Format(PyExc_ValueError, + "Buffer is not indirectly contiguous " + "in dimension %d.", dim); + goto fail; + } + } else if (unlikely(buf->strides[dim] != buf->itemsize)) { + PyErr_SetString(PyExc_ValueError, + "Buffer and memoryview are not contiguous " + "in the same dimension."); + goto fail; + } + } + + if (spec & __Pyx_MEMVIEW_FOLLOW) { + Py_ssize_t stride = buf->strides[dim]; + if (stride < 0) + stride = -stride; + if (unlikely(stride < buf->itemsize)) { + PyErr_SetString(PyExc_ValueError, + "Buffer and memoryview are not contiguous " + "in the same dimension."); + goto fail; + } + } + } else { + if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { + PyErr_Format(PyExc_ValueError, + "C-contiguous buffer is not contiguous in " + "dimension %d", dim); + goto fail; + } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { + PyErr_Format(PyExc_ValueError, + "C-contiguous buffer is not indirect in " + "dimension %d", dim); + goto fail; + } else if (unlikely(buf->suboffsets)) { + PyErr_SetString(PyExc_ValueError, + "Buffer exposes suboffsets but no strides"); + goto fail; + } + } + + return 1; +fail: + return 0; +} + +static int +__pyx_check_suboffsets(Py_buffer *buf, int dim, int ndim, int spec) +{ + CYTHON_UNUSED_VAR(ndim); + // Todo: without PyBUF_INDIRECT we may not have suboffset information, i.e., the + // ptr may not be set to NULL but may be uninitialized? + if (spec & __Pyx_MEMVIEW_DIRECT) { + if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { + PyErr_Format(PyExc_ValueError, + "Buffer not compatible with direct access " + "in dimension %d.", dim); + goto fail; + } + } + + if (spec & __Pyx_MEMVIEW_PTR) { + if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { + PyErr_Format(PyExc_ValueError, + "Buffer is not indirectly accessible " + "in dimension %d.", dim); + goto fail; + } + } + + return 1; +fail: + return 0; +} + +static int +__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) +{ + int i; + + if (c_or_f_flag & __Pyx_IS_F_CONTIG) { + Py_ssize_t stride = 1; + for (i = 0; i < ndim; i++) { + if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { + PyErr_SetString(PyExc_ValueError, + "Buffer not fortran contiguous."); + goto fail; + } + stride = stride * buf->shape[i]; + } + } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { + Py_ssize_t stride = 1; + for (i = ndim - 1; i >- 1; i--) { + if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { + PyErr_SetString(PyExc_ValueError, + "Buffer not C contiguous."); + goto fail; + } + stride = stride * buf->shape[i]; + } + } + + return 1; +fail: + return 0; +} + +static int __Pyx_ValidateAndInit_memviewslice( + int *axes_specs, + int c_or_f_flag, + int buf_flags, + int ndim, + __Pyx_TypeInfo *dtype, + __Pyx_BufFmt_StackElem stack[], + __Pyx_memviewslice *memviewslice, + PyObject *original_obj) +{ + struct __pyx_memoryview_obj *memview, *new_memview; + __Pyx_RefNannyDeclarations + Py_buffer *buf; + int i, spec = 0, retval = -1; + __Pyx_BufFmt_Context ctx; + int from_memoryview = __pyx_memoryview_check(original_obj); + + __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); + + if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) + original_obj)->typeinfo)) { + /* We have a matching dtype, skip format parsing */ + memview = (struct __pyx_memoryview_obj *) original_obj; + new_memview = NULL; + } else { + memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( + original_obj, buf_flags, 0, dtype); + new_memview = memview; + if (unlikely(!memview)) + goto fail; + } + + buf = &memview->view; + if (unlikely(buf->ndim != ndim)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + ndim, buf->ndim); + goto fail; + } + + if (new_memview) { + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; + } + + if (unlikely((unsigned) buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " + "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", + buf->itemsize, + (buf->itemsize > 1) ? "s" : "", + dtype->name, + dtype->size, + (dtype->size > 1) ? "s" : ""); + goto fail; + } + + /* Check axes */ + if (buf->len > 0) { + // 0-sized arrays do not undergo these checks since their strides are + // irrelevant and they are always both C- and F-contiguous. + for (i = 0; i < ndim; i++) { + spec = axes_specs[i]; + if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) + goto fail; + if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) + goto fail; + } + + /* Check contiguity */ + if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) + goto fail; + } + + /* Initialize */ + if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, + new_memview != NULL) == -1)) { + goto fail; + } + + retval = 0; + goto no_fail; + +fail: + Py_XDECREF(new_memview); + retval = -1; + +no_fail: + __Pyx_RefNannyFinishContext(); + return retval; +} + + +////////// MemviewSliceInit ////////// + +static int +__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, + int ndim, + {{memviewslice_name}} *memviewslice, + int memview_is_new_reference) +{ + __Pyx_RefNannyDeclarations + int i, retval=-1; + Py_buffer *buf = &memview->view; + __Pyx_RefNannySetupContext("init_memviewslice", 0); + + if (unlikely(memviewslice->memview || memviewslice->data)) { + PyErr_SetString(PyExc_ValueError, + "memviewslice is already initialized!"); + goto fail; + } + + if (buf->strides) { + for (i = 0; i < ndim; i++) { + memviewslice->strides[i] = buf->strides[i]; + } + } else { + Py_ssize_t stride = buf->itemsize; + for (i = ndim - 1; i >= 0; i--) { + memviewslice->strides[i] = stride; + stride *= buf->shape[i]; + } + } + + for (i = 0; i < ndim; i++) { + memviewslice->shape[i] = buf->shape[i]; + if (buf->suboffsets) { + memviewslice->suboffsets[i] = buf->suboffsets[i]; + } else { + memviewslice->suboffsets[i] = -1; + } + } + + memviewslice->memview = memview; + memviewslice->data = (char *)buf->buf; + if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { + Py_INCREF(memview); + } + retval = 0; + goto no_fail; + +fail: + /* Don't decref, the memoryview may be borrowed. Let the caller do the cleanup */ + /* __Pyx_XDECREF(memviewslice->memview); */ + memviewslice->memview = 0; + memviewslice->data = 0; + retval = -1; +no_fail: + __Pyx_RefNannyFinishContext(); + return retval; +} + +#ifndef Py_NO_RETURN +// available since Py3.3 +#define Py_NO_RETURN +#endif + +static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { + va_list vargs; + char msg[200]; + +#if PY_VERSION_HEX >= 0x030A0000 || defined(HAVE_STDARG_PROTOTYPES) + va_start(vargs, fmt); +#else + va_start(vargs); +#endif + vsnprintf(msg, 200, fmt, vargs); + va_end(vargs); + + Py_FatalError(msg); +} + +static CYTHON_INLINE int +__pyx_add_acquisition_count_locked(__pyx_atomic_int_type *acquisition_count, + PyThread_type_lock lock) +{ + int result; + PyThread_acquire_lock(lock, 1); + result = (*acquisition_count)++; + PyThread_release_lock(lock); + return result; +} + +static CYTHON_INLINE int +__pyx_sub_acquisition_count_locked(__pyx_atomic_int_type *acquisition_count, + PyThread_type_lock lock) +{ + int result; + PyThread_acquire_lock(lock, 1); + result = (*acquisition_count)--; + PyThread_release_lock(lock); + return result; +} + + +static CYTHON_INLINE void +__Pyx_INC_MEMVIEW({{memviewslice_name}} *memslice, int have_gil, int lineno) +{ + __pyx_nonatomic_int_type old_acquisition_count; + struct {{memview_struct_name}} *memview = memslice->memview; + if (unlikely(!memview || (PyObject *) memview == Py_None)) { + // Allow uninitialized memoryview assignment and do not ref-count None. + return; + } + + old_acquisition_count = __pyx_add_acquisition_count(memview); + if (unlikely(old_acquisition_count <= 0)) { + if (likely(old_acquisition_count == 0)) { + // First acquisition => keep the memoryview object alive. + if (have_gil) { + Py_INCREF((PyObject *) memview); + } else { + PyGILState_STATE _gilstate = PyGILState_Ensure(); + Py_INCREF((PyObject *) memview); + PyGILState_Release(_gilstate); + } + } else { + __pyx_fatalerror("Acquisition count is %d (line %d)", + old_acquisition_count+1, lineno); + } + } +} + +static CYTHON_INLINE void __Pyx_XCLEAR_MEMVIEW({{memviewslice_name}} *memslice, + int have_gil, int lineno) { + __pyx_nonatomic_int_type old_acquisition_count; + struct {{memview_struct_name}} *memview = memslice->memview; + + if (unlikely(!memview || (PyObject *) memview == Py_None)) { + // Do not ref-count None. + memslice->memview = NULL; + return; + } + + old_acquisition_count = __pyx_sub_acquisition_count(memview); + memslice->data = NULL; + if (likely(old_acquisition_count > 1)) { + // Still other slices out there => we do not own the reference. + memslice->memview = NULL; + } else if (likely(old_acquisition_count == 1)) { + // Last slice => discard owned Python reference to memoryview object. + if (have_gil) { + Py_CLEAR(memslice->memview); + } else { + PyGILState_STATE _gilstate = PyGILState_Ensure(); + Py_CLEAR(memslice->memview); + PyGILState_Release(_gilstate); + } + } else { + __pyx_fatalerror("Acquisition count is %d (line %d)", + old_acquisition_count-1, lineno); + } +} + + +////////// MemviewSliceCopyTemplate.proto ////////// + +static {{memviewslice_name}} +__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, + const char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + int dtype_is_object); + + +////////// MemviewSliceCopyTemplate ////////// + +static {{memviewslice_name}} +__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, + const char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + int dtype_is_object) +{ + __Pyx_RefNannyDeclarations + int i; + __Pyx_memviewslice new_mvs = {{memslice_init}}; + struct __pyx_memoryview_obj *from_memview = from_mvs->memview; + Py_buffer *buf = &from_memview->view; + PyObject *shape_tuple = NULL; + PyObject *temp_int = NULL; + struct __pyx_array_obj *array_obj = NULL; + struct __pyx_memoryview_obj *memview_obj = NULL; + + __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); + + for (i = 0; i < ndim; i++) { + if (unlikely(from_mvs->suboffsets[i] >= 0)) { + PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " + "indirect dimensions (axis %d)", i); + goto fail; + } + } + + shape_tuple = PyTuple_New(ndim); + if (unlikely(!shape_tuple)) { + goto fail; + } + __Pyx_GOTREF(shape_tuple); + + + for(i = 0; i < ndim; i++) { + temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); + if(unlikely(!temp_int)) { + goto fail; + } else { + PyTuple_SET_ITEM(shape_tuple, i, temp_int); + temp_int = NULL; + } + } + + array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); + if (unlikely(!array_obj)) { + goto fail; + } + __Pyx_GOTREF(array_obj); + + memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( + (PyObject *) array_obj, contig_flag, + dtype_is_object, + from_mvs->memview->typeinfo); + if (unlikely(!memview_obj)) + goto fail; + + /* initialize new_mvs */ + if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) + goto fail; + + if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, + dtype_is_object) < 0)) + goto fail; + + goto no_fail; + +fail: + __Pyx_XDECREF(new_mvs.memview); + new_mvs.memview = NULL; + new_mvs.data = NULL; +no_fail: + __Pyx_XDECREF(shape_tuple); + __Pyx_XDECREF(temp_int); + __Pyx_XDECREF(array_obj); + __Pyx_RefNannyFinishContext(); + return new_mvs; +} + + +////////// CopyContentsUtility.proto ///////// + +#define {{func_cname}}(slice) \ + __pyx_memoryview_copy_new_contig(&slice, "{{mode}}", {{ndim}}, \ + sizeof({{dtype_decl}}), {{contig_flag}}, \ + {{dtype_is_object}}) + + +////////// OverlappingSlices.proto ////////// + +static int __pyx_slices_overlap({{memviewslice_name}} *slice1, + {{memviewslice_name}} *slice2, + int ndim, size_t itemsize); + + +////////// OverlappingSlices ////////// + +/* Based on numpy's core/src/multiarray/array_assign.c */ + +/* Gets a half-open range [start, end) which contains the array data */ +static void +__pyx_get_array_memory_extents({{memviewslice_name}} *slice, + void **out_start, void **out_end, + int ndim, size_t itemsize) +{ + char *start, *end; + int i; + + start = end = slice->data; + + for (i = 0; i < ndim; i++) { + Py_ssize_t stride = slice->strides[i]; + Py_ssize_t extent = slice->shape[i]; + + if (extent == 0) { + *out_start = *out_end = start; + return; + } else { + if (stride > 0) + end += stride * (extent - 1); + else + start += stride * (extent - 1); + } + } + + /* Return a half-open range */ + *out_start = start; + *out_end = end + itemsize; +} + +/* Returns 1 if the arrays have overlapping data, 0 otherwise */ +static int +__pyx_slices_overlap({{memviewslice_name}} *slice1, + {{memviewslice_name}} *slice2, + int ndim, size_t itemsize) +{ + void *start1, *end1, *start2, *end2; + + __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); + __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); + + return (start1 < end2) && (start2 < end1); +} + + +////////// MemviewSliceCheckContig.proto ////////// + +#define __pyx_memviewslice_is_contig_{{contig_type}}{{ndim}}(slice) \ + __pyx_memviewslice_is_contig(slice, '{{contig_type}}', {{ndim}}) + + +////////// MemviewSliceIsContig.proto ////////// + +static int __pyx_memviewslice_is_contig(const {{memviewslice_name}} mvs, char order, int ndim);/*proto*/ + + +////////// MemviewSliceIsContig ////////// + +static int +__pyx_memviewslice_is_contig(const {{memviewslice_name}} mvs, char order, int ndim) +{ + int i, index, step, start; + Py_ssize_t itemsize = mvs.memview->view.itemsize; + + if (order == 'F') { + step = 1; + start = 0; + } else { + step = -1; + start = ndim - 1; + } + + for (i = 0; i < ndim; i++) { + index = start + step * i; + if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) + return 0; + + itemsize *= mvs.shape[index]; + } + + return 1; +} + + +/////////////// MemviewSliceIndex /////////////// + +static CYTHON_INLINE char * +__pyx_memviewslice_index_full(const char *bufp, Py_ssize_t idx, + Py_ssize_t stride, Py_ssize_t suboffset) +{ + bufp = bufp + idx * stride; + if (suboffset >= 0) { + bufp = *((char **) bufp) + suboffset; + } + return (char *) bufp; +} + + +/////////////// MemviewDtypeToObject.proto /////////////// + +{{if to_py_function}} +static CYTHON_INLINE PyObject *{{get_function}}(const char *itemp); /* proto */ +{{endif}} + +{{if from_py_function}} +static CYTHON_INLINE int {{set_function}}(const char *itemp, PyObject *obj); /* proto */ +{{endif}} + +/////////////// MemviewDtypeToObject /////////////// + +{{#__pyx_memview__to_object}} + +/* Convert a dtype to or from a Python object */ + +{{if to_py_function}} +static CYTHON_INLINE PyObject *{{get_function}}(const char *itemp) { + return (PyObject *) {{to_py_function}}(*({{dtype}} *) itemp); +} +{{endif}} + +{{if from_py_function}} +static CYTHON_INLINE int {{set_function}}(const char *itemp, PyObject *obj) { + {{dtype}} value = {{from_py_function}}(obj); + if (unlikely({{error_condition}})) + return 0; + *({{dtype}} *) itemp = value; + return 1; +} +{{endif}} + + +/////////////// MemviewObjectToObject.proto /////////////// + +/* Function callbacks (for memoryview object) for dtype object */ +static PyObject *{{get_function}}(const char *itemp); /* proto */ +static int {{set_function}}(const char *itemp, PyObject *obj); /* proto */ + + +/////////////// MemviewObjectToObject /////////////// + +static PyObject *{{get_function}}(const char *itemp) { + PyObject *result = *(PyObject **) itemp; + Py_INCREF(result); + return result; +} + +static int {{set_function}}(const char *itemp, PyObject *obj) { + Py_INCREF(obj); + Py_DECREF(*(PyObject **) itemp); + *(PyObject **) itemp = obj; + return 1; +} + +/////////// ToughSlice ////////// + +/* Dimension is indexed with 'start:stop:step' */ + +if (unlikely(__pyx_memoryview_slice_memviewslice( + &{{dst}}, + {{src}}.shape[{{dim}}], {{src}}.strides[{{dim}}], {{src}}.suboffsets[{{dim}}], + {{dim}}, + {{new_ndim}}, + &{{get_suboffset_dim()}}, + {{start}}, + {{stop}}, + {{step}}, + {{int(have_start)}}, + {{int(have_stop)}}, + {{int(have_step)}}, + 1) < 0)) +{ + {{error_goto}} +} + + +////////// SimpleSlice ////////// + +/* Dimension is indexed with ':' only */ + +{{dst}}.shape[{{new_ndim}}] = {{src}}.shape[{{dim}}]; +{{dst}}.strides[{{new_ndim}}] = {{src}}.strides[{{dim}}]; + +{{if access == 'direct'}} + {{dst}}.suboffsets[{{new_ndim}}] = -1; +{{else}} + {{dst}}.suboffsets[{{new_ndim}}] = {{src}}.suboffsets[{{dim}}]; + if ({{src}}.suboffsets[{{dim}}] >= 0) + {{get_suboffset_dim()}} = {{new_ndim}}; +{{endif}} + + +////////// SliceIndex ////////// + +// Dimension is indexed with an integer, we could use the ToughSlice +// approach, but this is faster + +{ + Py_ssize_t __pyx_tmp_idx = {{idx}}; + + {{if wraparound or boundscheck}} + Py_ssize_t __pyx_tmp_shape = {{src}}.shape[{{dim}}]; + {{endif}} + + Py_ssize_t __pyx_tmp_stride = {{src}}.strides[{{dim}}]; + {{if wraparound}} + if (__pyx_tmp_idx < 0) + __pyx_tmp_idx += __pyx_tmp_shape; + {{endif}} + + {{if boundscheck}} + if (unlikely(!__Pyx_is_valid_index(__pyx_tmp_idx, __pyx_tmp_shape))) { + {{if not have_gil}} + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); + #endif + {{endif}} + + PyErr_SetString(PyExc_IndexError, + "Index out of bounds (axis {{dim}})"); + + {{if not have_gil}} + #ifdef WITH_THREAD + PyGILState_Release(__pyx_gilstate_save); + #endif + {{endif}} + + {{error_goto}} + } + {{endif}} + + {{if all_dimensions_direct}} + {{dst}}.data += __pyx_tmp_idx * __pyx_tmp_stride; + {{else}} + if ({{get_suboffset_dim()}} < 0) { + {{dst}}.data += __pyx_tmp_idx * __pyx_tmp_stride; + + /* This dimension is the first dimension, or is preceded by */ + /* direct or indirect dimensions that are indexed away. */ + /* Hence suboffset_dim must be less than zero, and we can have */ + /* our data pointer refer to another block by dereferencing. */ + /* slice.data -> B -> C becomes slice.data -> C */ + + {{if indirect}} + { + Py_ssize_t __pyx_tmp_suboffset = {{src}}.suboffsets[{{dim}}]; + + {{if generic}} + if (__pyx_tmp_suboffset >= 0) + {{endif}} + + {{dst}}.data = *((char **) {{dst}}.data) + __pyx_tmp_suboffset; + } + {{endif}} + + } else { + {{dst}}.suboffsets[{{get_suboffset_dim()}}] += __pyx_tmp_idx * __pyx_tmp_stride; + + /* Note: dimension can not be indirect, the compiler will have */ + /* issued an error */ + } + + {{endif}} +} + + +////////// FillStrided1DScalar.proto ////////// + +static void +__pyx_fill_slice_{{dtype_name}}({{type_decl}} *p, Py_ssize_t extent, Py_ssize_t stride, + size_t itemsize, void *itemp); + +////////// FillStrided1DScalar ////////// + +/* Fill a slice with a scalar value. The dimension is direct and strided or contiguous */ +/* This can be used as a callback for the memoryview object to efficiently assign a scalar */ +/* Currently unused */ +static void +__pyx_fill_slice_{{dtype_name}}({{type_decl}} *p, Py_ssize_t extent, Py_ssize_t stride, + size_t itemsize, void *itemp) +{ + Py_ssize_t i; + {{type_decl}} item = *(({{type_decl}} *) itemp); + {{type_decl}} *endp; + + stride /= sizeof({{type_decl}}); + endp = p + stride * extent; + + while (p < endp) { + *p = item; + p += stride; + } +} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/ModuleSetupCode.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/ModuleSetupCode.c new file mode 100644 index 0000000000000000000000000000000000000000..33e7abe06146aacf028768cd7ca6eef1701287fd --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/ModuleSetupCode.c @@ -0,0 +1,2328 @@ +/////////////// InitLimitedAPI /////////////// + +#if defined(CYTHON_LIMITED_API) && 0 /* disabled: enabling Py_LIMITED_API needs more work */ + #ifndef Py_LIMITED_API + #if CYTHON_LIMITED_API+0 > 0x03030000 + #define Py_LIMITED_API CYTHON_LIMITED_API + #else + #define Py_LIMITED_API 0x03030000 + #endif + #endif +#endif + + +/////////////// CModulePreamble /////////////// + +#include /* For offsetof */ +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif + +#if !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif + +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif + +// For use in DL_IMPORT/DL_EXPORT macros. +#define __PYX_COMMA , + +#ifndef HAVE_LONG_LONG + // CPython has required PY_LONG_LONG support for years, even if HAVE_LONG_LONG is not defined for us + #define HAVE_LONG_LONG +#endif + +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif + +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif + +// For the limited API it often makes sense to use Py_LIMITED_API rather than PY_VERSION_HEX +// when doing version checks. +#define __PYX_LIMITED_VERSION_HEX PY_VERSION_HEX + +#if defined(GRAALVM_PYTHON) + /* For very preliminary testing purposes. Most variables are set the same as PyPy. + The existence of this section does not imply that anything works or is even tested */ + // GRAALVM_PYTHON test comes before PyPy test because GraalPython unhelpfully defines PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 1 + #define CYTHON_COMPILING_IN_NOGIL 0 + + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #undef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) + #endif + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #undef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif + +#elif defined(PYPY_VERSION) + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_NOGIL 0 + + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #ifndef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #undef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) + #endif + #if PY_VERSION_HEX < 0x03090000 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + #undef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1 && PYPY_VERSION_NUM >= 0x07030C00) + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif + +#elif defined(CYTHON_LIMITED_API) + // EXPERIMENTAL !! + #ifdef Py_LIMITED_API + #undef __PYX_LIMITED_VERSION_HEX + #define __PYX_LIMITED_VERSION_HEX Py_LIMITED_API + #endif + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 1 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_NOGIL 0 + + // CYTHON_CLINE_IN_TRACEBACK is currently disabled for the Limited API + #undef CYTHON_CLINE_IN_TRACEBACK + #define CYTHON_CLINE_IN_TRACEBACK 0 + + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 1 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #endif + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #undef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS 1 + #endif + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 1 + #ifndef CYTHON_USE_TP_FINALIZE + // PyObject_CallFinalizerFromDealloc is missing and not easily replaced + #define CYTHON_USE_TP_FINALIZE 0 + #endif + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif + +#elif defined(Py_GIL_DISABLED) || defined(Py_NOGIL) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_NOGIL 1 + + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #ifndef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 1 + #endif + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_NOGIL 0 + + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #ifndef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #endif + #ifndef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #ifndef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2 + // Python 3.11a2 hid _PyLong_FormatAdvancedWriter and _PyFloat_FormatAdvancedWriter + // therefore disable unicode writer until a better alternative appears + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + // CYTHON_AVOID_BORROWED_REFS - Avoid borrowed references and always request owned references directly instead. + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + // CYTHON_ASSUME_SAFE_MACROS - Assume that macro calls do not fail and do not raise exceptions. + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_GIL + // Py3<3.5.2 does not support _PyThreadState_UncheckedGet(). + // FIXME: FastGIL can probably be supported also in CPython 3.12 but needs to be adapted. + #define CYTHON_FAST_GIL (PY_MAJOR_VERSION < 3 || PY_VERSION_HEX >= 0x03060000 && PY_VERSION_HEX < 0x030C00A6) + #endif + #ifndef CYTHON_METH_FASTCALL + // CPython 3.6 introduced METH_FASTCALL but with slightly different + // semantics. It became stable starting from CPython 3.7. + #define CYTHON_METH_FASTCALL (PY_VERSION_HEX >= 0x030700A1) + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS 1 + #endif + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + // CYTHON_USE_MODULE_STATE - Use a module state/globals struct tied to the module object. + #ifndef CYTHON_USE_MODULE_STATE + // EXPERIMENTAL !! + #define CYTHON_USE_MODULE_STATE 0 + #endif + #if PY_VERSION_HEX < 0x030400a1 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #elif !defined(CYTHON_USE_TP_FINALIZE) + #define CYTHON_USE_TP_FINALIZE 1 + #endif + #if PY_VERSION_HEX < 0x030600B1 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #elif !defined(CYTHON_USE_DICT_VERSIONS) + // Python 3.12a5 deprecated "ma_version_tag" + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX < 0x030C00A5) + #endif + #if PY_VERSION_HEX < 0x030700A3 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #elif !defined(CYTHON_USE_EXC_INFO_STACK) + #define CYTHON_USE_EXC_INFO_STACK 1 + #endif + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 1 + #endif +#endif + +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif + +#if !defined(CYTHON_VECTORCALL) +#define CYTHON_VECTORCALL (CYTHON_FAST_PYCCALL && PY_VERSION_HEX >= 0x030800B1) +#endif + +/* Whether to use METH_FASTCALL with a fake backported implementation of vectorcall */ +#define CYTHON_BACKPORT_VECTORCALL (CYTHON_METH_FASTCALL && PY_VERSION_HEX < 0x030800B1) + +#if CYTHON_USE_PYLONG_INTERNALS + #if PY_MAJOR_VERSION < 3 + #include "longintrepr.h" + #endif + /* These short defines can easily conflict with other code */ + #undef SHIFT + #undef BASE + #undef MASK + /* Compile-time sanity check that these are indeed equal. Github issue #2670. */ + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif + +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif + +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif + +// restrict +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif + +// unused attribute +#ifndef CYTHON_UNUSED + #if defined(__cplusplus) + /* for clang __has_cpp_attribute(maybe_unused) is true even before C++17 + * but leads to warnings with -pedantic, since it is a C++17 feature */ + #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L) + #if __has_cpp_attribute(maybe_unused) + #define CYTHON_UNUSED [[maybe_unused]] + #endif + #endif + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif + +#ifndef CYTHON_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_UNUSED_VAR(x) (void)(x) +# endif +#endif + +#ifndef CYTHON_MAYBE_UNUSED_VAR + #define CYTHON_MAYBE_UNUSED_VAR(x) CYTHON_UNUSED_VAR(x) +#endif + +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif + +#ifndef CYTHON_USE_CPP_STD_MOVE + // msvc doesn't set __cplusplus to a useful value + #if defined(__cplusplus) && ( \ + __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1600)) + #define CYTHON_USE_CPP_STD_MOVE 1 + #else + #define CYTHON_USE_CPP_STD_MOVE 0 + #endif +#endif + +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) + +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + #endif + #endif + #if _MSC_VER < 1300 + #ifdef _WIN64 + typedef unsigned long long __pyx_uintptr_t; + #else + typedef unsigned int __pyx_uintptr_t; + #endif + #else + #ifdef _WIN64 + typedef unsigned __int64 __pyx_uintptr_t; + #else + typedef unsigned __int32 __pyx_uintptr_t; + #endif + #endif +#else + #include + typedef uintptr_t __pyx_uintptr_t; +#endif + + +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) + /* for clang __has_cpp_attribute(fallthrough) is true even before C++17 + * but leads to warnings with -pedantic, since it is a C++17 feature */ + #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L) + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #endif + #endif + + #ifndef CYTHON_FALLTHROUGH + #if __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #endif + + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + + #if defined(__clang__) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 /* Xcode < 7.0 */ + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifdef __cplusplus + template + struct __PYX_IS_UNSIGNED_IMPL {static const bool value = T(0) < T(-1);}; + #define __PYX_IS_UNSIGNED(type) (__PYX_IS_UNSIGNED_IMPL::value) +#else + #define __PYX_IS_UNSIGNED(type) (((type)-1) > 0) +#endif + +#if CYTHON_COMPILING_IN_PYPY == 1 + #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x030A0000) +#else + #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000) +#endif +// reinterpret + +// TODO: refactor existing code to use those macros +#define __PYX_REINTERPRET_FUNCION(func_pointer, other_pointer) ((func_pointer)(void(*)(void))(other_pointer)) +// #define __PYX_REINTERPRET_POINTER(pointer_type, pointer) ((pointer_type)(void *)(pointer)) +// #define __PYX_RUNTIME_REINTERPRET(type, var) (*(type *)(&var)) + + +/////////////// CInitCode /////////////// + +// inline attribute +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + + +/////////////// CppInitCode /////////////// + +#ifndef __cplusplus + #error "Cython files generated with the C++ option must be compiled with a C++ compiler." +#endif + +// inline attribute +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #else + #define CYTHON_INLINE inline + #endif +#endif + +// Work around clang bug https://stackoverflow.com/questions/21847816/c-invoke-nested-template-class-destructor +template +void __Pyx_call_destructor(T& x) { + x.~T(); +} + +// Used for temporary variables of "reference" type. +template +class __Pyx_FakeReference { + public: + __Pyx_FakeReference() : ptr(NULL) { } + // __Pyx_FakeReference(T& ref) : ptr(&ref) { } + // Const version needed as Cython doesn't know about const overloads (e.g. for stl containers). + __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { } + T *operator->() { return ptr; } + T *operator&() { return ptr; } + operator T&() { return *ptr; } + // TODO(robertwb): Delegate all operators (or auto-generate unwrapping code where needed). + template bool operator ==(const U& other) const { return *ptr == other; } + template bool operator !=(const U& other) const { return *ptr != other; } + template bool operator==(const __Pyx_FakeReference& other) const { return *ptr == *other.ptr; } + template bool operator!=(const __Pyx_FakeReference& other) const { return *ptr != *other.ptr; } + private: + T *ptr; +}; + + +/////////////// PythonCompatibility /////////////// + +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" + +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_DefaultClassType PyClass_Type + #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" + #define __Pyx_DefaultClassType PyType_Type +#if CYTHON_COMPILING_IN_LIMITED_API + // Note that the limited API doesn't know about PyCodeObject, so the type of this + // is PyObject (unlike for the main API) + static CYTHON_INLINE PyObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f, + PyObject *code, PyObject *c, PyObject* n, PyObject *v, + PyObject *fv, PyObject *cell, PyObject* fn, + PyObject *name, int fline, PyObject *lnos) { + // Backout option for generating a code object. + // PyCode_NewEmpty isn't in the limited API. Therefore the two options are + // 1. Python call of the code type with a long list of positional args. + // 2. Generate a code object by compiling some trivial code, and customize. + // We use the second because it's less sensitive to changes in the code type + // constructor with version. + PyObject *exception_table = NULL; + PyObject *types_module=NULL, *code_type=NULL, *result=NULL; + #if __PYX_LIMITED_VERSION_HEX < 0x030B0000 + PyObject *version_info; /* borrowed */ + PyObject *py_minor_version = NULL; + #endif + long minor_version = 0; + PyObject *type, *value, *traceback; + + // we must be able to call this while an exception is happening - thus clear then restore the state + PyErr_Fetch(&type, &value, &traceback); + + #if __PYX_LIMITED_VERSION_HEX >= 0x030B0000 + minor_version = 11; + // we don't yet need to distinguish between versions > 11 + // Note that from 3.13, when we do, we can use Py_Version + #else + if (!(version_info = PySys_GetObject("version_info"))) goto end; + if (!(py_minor_version = PySequence_GetItem(version_info, 1))) goto end; + minor_version = PyLong_AsLong(py_minor_version); + Py_DECREF(py_minor_version); + if (minor_version == -1 && PyErr_Occurred()) goto end; + #endif + + if (!(types_module = PyImport_ImportModule("types"))) goto end; + if (!(code_type = PyObject_GetAttrString(types_module, "CodeType"))) goto end; + + if (minor_version <= 7) { + // 3.7: + // code(argcount, kwonlyargcount, nlocals, stacksize, flags, codestring, + // constants, names, varnames, filename, name, firstlineno, + // lnotab[, freevars[, cellvars]]) + (void)p; + result = PyObject_CallFunction(code_type, "iiiiiOOOOOOiOO", a, k, l, s, f, code, + c, n, v, fn, name, fline, lnos, fv, cell); + } else if (minor_version <= 10) { + // 3.8, 3.9, 3.10 + // code(argcount, posonlyargcount, kwonlyargcount, nlocals, stacksize, + // flags, codestring, constants, names, varnames, filename, name, + // firstlineno, lnotab[, freevars[, cellvars]]) + // 3.10 switches lnotab for linetable, but is otherwise the same + result = PyObject_CallFunction(code_type, "iiiiiiOOOOOOiOO", a,p, k, l, s, f, code, + c, n, v, fn, name, fline, lnos, fv, cell); + } else { + // 3.11, 3.12 + // code(argcount, posonlyargcount, kwonlyargcount, nlocals, stacksize, + // flags, codestring, constants, names, varnames, filename, name, + // qualname, firstlineno, linetable, exceptiontable, freevars=(), cellvars=(), /) + // We use name and qualname for simplicity + if (!(exception_table = PyBytes_FromStringAndSize(NULL, 0))) goto end; + result = PyObject_CallFunction(code_type, "iiiiiiOOOOOOOiOO", a,p, k, l, s, f, code, + c, n, v, fn, name, name, fline, lnos, exception_table, fv, cell); + } + + end: + Py_XDECREF(code_type); + Py_XDECREF(exception_table); + Py_XDECREF(types_module); + if (type) { + PyErr_Restore(type, value, traceback); + } + return result; + } + + // Cython uses these constants but they are not available in the limited API. + // (it'd be nice if there was a more robust way of looking these up) + #ifndef CO_OPTIMIZED + #define CO_OPTIMIZED 0x0001 + #endif + #ifndef CO_NEWLOCALS + #define CO_NEWLOCALS 0x0002 + #endif + #ifndef CO_VARARGS + #define CO_VARARGS 0x0004 + #endif + #ifndef CO_VARKEYWORDS + #define CO_VARKEYWORDS 0x0008 + #endif + #ifndef CO_ASYNC_GENERATOR + #define CO_ASYNC_GENERATOR 0x0200 + #endif + #ifndef CO_GENERATOR + #define CO_GENERATOR 0x0020 + #endif + #ifndef CO_COROUTINE + #define CO_COROUTINE 0x0080 + #endif +#elif PY_VERSION_HEX >= 0x030B0000 + static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f, + PyObject *code, PyObject *c, PyObject* n, PyObject *v, + PyObject *fv, PyObject *cell, PyObject* fn, + PyObject *name, int fline, PyObject *lnos) { + // As earlier versions, but + // 1. pass an empty bytes string as exception_table + // 2. pass name as qualname (TODO this might implementing properly in future) + PyCodeObject *result; + PyObject *empty_bytes = PyBytes_FromStringAndSize("", 0); /* we don't have access to __pyx_empty_bytes here */ + if (!empty_bytes) return NULL; + result = + #if PY_VERSION_HEX >= 0x030C0000 + PyUnstable_Code_NewWithPosOnlyArgs + #else + PyCode_NewWithPosOnlyArgs + #endif + (a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, name, fline, lnos, empty_bytes); + Py_DECREF(empty_bytes); + return result; + } +#elif PY_VERSION_HEX >= 0x030800B2 && !CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ + PyCode_NewWithPosOnlyArgs(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif +#endif + +#if PY_VERSION_HEX >= 0x030900A4 || defined(Py_IS_TYPE) + #define __Pyx_IS_TYPE(ob, type) Py_IS_TYPE(ob, type) +#else + #define __Pyx_IS_TYPE(ob, type) (((const PyObject*)ob)->ob_type == (type)) +#endif + +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_Is) + #define __Pyx_Py_Is(x, y) Py_Is(x, y) +#else + #define __Pyx_Py_Is(x, y) ((x) == (y)) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsNone) + #define __Pyx_Py_IsNone(ob) Py_IsNone(ob) +#else + #define __Pyx_Py_IsNone(ob) __Pyx_Py_Is((ob), Py_None) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsTrue) + #define __Pyx_Py_IsTrue(ob) Py_IsTrue(ob) +#else + #define __Pyx_Py_IsTrue(ob) __Pyx_Py_Is((ob), Py_True) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsFalse) + #define __Pyx_Py_IsFalse(ob) Py_IsFalse(ob) +#else + #define __Pyx_Py_IsFalse(ob) __Pyx_Py_Is((ob), Py_False) +#endif +#define __Pyx_NoneAsNull(obj) (__Pyx_Py_IsNone(obj) ? NULL : (obj)) + +#if PY_VERSION_HEX >= 0x030900F0 && !CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyObject_GC_IsFinalized(o) PyObject_GC_IsFinalized(o) +#else + #define __Pyx_PyObject_GC_IsFinalized(o) _PyGC_FINALIZED(o) +#endif + +#ifndef CO_COROUTINE + #define CO_COROUTINE 0x80 +#endif +#ifndef CO_ASYNC_GENERATOR + #define CO_ASYNC_GENERATOR 0x200 +#endif + +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef Py_TPFLAGS_SEQUENCE + #define Py_TPFLAGS_SEQUENCE 0 +#endif +#ifndef Py_TPFLAGS_MAPPING + #define Py_TPFLAGS_MAPPING 0 +#endif + +#ifndef METH_STACKLESS + // already defined for Stackless Python (all versions) and C-Python >= 3.7 + // value if defined: Stackless Python < 3.6: 0x80 else 0x100 + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + // new in CPython 3.6, but changed in 3.7 - see + // positional-only parameters: + // https://bugs.python.org/issue29464 + // const args: + // https://bugs.python.org/issue32240 + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + // new in CPython 3.7, used to be old signature of _PyCFunctionFast() in 3.6 + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif + +#if CYTHON_METH_FASTCALL + #define __Pyx_METH_FASTCALL METH_FASTCALL + #define __Pyx_PyCFunction_FastCall __Pyx_PyCFunctionFast + #define __Pyx_PyCFunction_FastCallWithKeywords __Pyx_PyCFunctionFastWithKeywords +#else + #define __Pyx_METH_FASTCALL METH_VARARGS + #define __Pyx_PyCFunction_FastCall PyCFunction + #define __Pyx_PyCFunction_FastCallWithKeywords PyCFunctionWithKeywords +#endif + +#if CYTHON_VECTORCALL + #define __pyx_vectorcallfunc vectorcallfunc + #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET PY_VECTORCALL_ARGUMENTS_OFFSET + #define __Pyx_PyVectorcall_NARGS(n) PyVectorcall_NARGS((size_t)(n)) +#elif CYTHON_BACKPORT_VECTORCALL + typedef PyObject *(*__pyx_vectorcallfunc)(PyObject *callable, PyObject *const *args, + size_t nargsf, PyObject *kwnames); + #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET ((size_t)1 << (8 * sizeof(size_t) - 1)) + #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(((size_t)(n)) & ~__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)) +#else + #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET 0 + #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(n)) +#endif + +// These PyCFunction related macros get redefined in CythonFunction.c. +// We need our own copies because the inline functions in CPython have a type-check assert +// that breaks with a CyFunction in debug mode. +#if PY_MAJOR_VERSION >= 0x030900B1 +#define __Pyx_PyCFunction_CheckExact(func) PyCFunction_CheckExact(func) +#else +#define __Pyx_PyCFunction_CheckExact(func) PyCFunction_Check(func) +#endif +#define __Pyx_CyOrPyCFunction_Check(func) PyCFunction_Check(func) + +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_CyOrPyCFunction_GET_FUNCTION(func) (((PyCFunctionObject*)(func))->m_ml->ml_meth) +#elif !CYTHON_COMPILING_IN_LIMITED_API +// It's probably easier for non-CPythons to support PyCFunction_GET_FUNCTION() than the object struct layout. +#define __Pyx_CyOrPyCFunction_GET_FUNCTION(func) PyCFunction_GET_FUNCTION(func) +// Unused in CYTHON_COMPILING_IN_LIMITED_API. +#endif +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_CyOrPyCFunction_GET_FLAGS(func) (((PyCFunctionObject*)(func))->m_ml->ml_flags) +static CYTHON_INLINE PyObject* __Pyx_CyOrPyCFunction_GET_SELF(PyObject *func) { + return (__Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_STATIC) ? NULL : ((PyCFunctionObject*)func)->m_self; +} +// Only used if CYTHON_COMPILING_IN_CPYTHON. +#endif +static CYTHON_INLINE int __Pyx__IsSameCFunction(PyObject *func, void *cfunc) { +#if CYTHON_COMPILING_IN_LIMITED_API + return PyCFunction_Check(func) && PyCFunction_GetFunction(func) == (PyCFunction) cfunc; +#else + return PyCFunction_Check(func) && PyCFunction_GET_FUNCTION(func) == (PyCFunction) cfunc; +#endif +} +#define __Pyx_IsSameCFunction(func, cfunc) __Pyx__IsSameCFunction(func, cfunc) + +// PEP-573: PyCFunction holds reference to defining class (PyCMethodObject) +#if __PYX_LIMITED_VERSION_HEX < 0x030900B1 + #define __Pyx_PyType_FromModuleAndSpec(m, s, b) ((void)m, PyType_FromSpecWithBases(s, b)) + typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *); +#else + #define __Pyx_PyType_FromModuleAndSpec(m, s, b) PyType_FromModuleAndSpec(m, s, b) + #define __Pyx_PyCMethod PyCMethod +#endif +#ifndef METH_METHOD + #define METH_METHOD 0x200 +#endif + +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif + +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif + +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_PyThreadState_Current PyThreadState_Get() +#elif !CYTHON_FAST_THREAD_STATE + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x030d00A1 + //#elif PY_VERSION_HEX >= 0x03050200 + // Actually added in 3.5.2, but compiling against that does not guarantee that we get imported there. + #define __Pyx_PyThreadState_Current PyThreadState_GetUnchecked() +#elif PY_VERSION_HEX >= 0x03060000 + //#elif PY_VERSION_HEX >= 0x03050200 + // Actually added in 3.5.2, but compiling against that does not guarantee that we get imported there. + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif + +#if CYTHON_COMPILING_IN_LIMITED_API +static CYTHON_INLINE void *__Pyx_PyModule_GetState(PyObject *op) +{ + void *result; + + result = PyModule_GetState(op); + if (!result) + Py_FatalError("Couldn't find the module state"); + return result; +} +#endif + +#define __Pyx_PyObject_GetSlot(obj, name, func_ctype) __Pyx_PyType_GetSlot(Py_TYPE(obj), name, func_ctype) +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((func_ctype) PyType_GetSlot((type), Py_##name)) +#else + #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((type)->name) +#endif + +// TSS (Thread Specific Storage) API +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; /* PyThread_create_key reports success always */ +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +// PyThread_delete_key_value(key) is equivalent to PyThread_set_key_value(key, NULL) +// PyThread_ReInitTLS() is a no-op +#endif /* TSS (Thread Specific Storage) API */ + + +#if PY_MAJOR_VERSION < 3 + #if CYTHON_COMPILING_IN_PYPY + #if PYPY_VERSION_NUM < 0x07030600 + #if defined(__cplusplus) && __cplusplus >= 201402L + [[deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")]] + #elif defined(__GNUC__) || defined(__clang__) + __attribute__ ((__deprecated__("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6"))) + #elif defined(_MSC_VER) + __declspec(deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")) + #endif + static CYTHON_INLINE int PyGILState_Check(void) { + // PyGILState_Check is used to decide whether to release the GIL when we don't + // know that we have it. For PyPy2 it isn't possible to check. + // Therefore assume that we don't have the GIL (which causes us not to release it, + // but is "safe") + return 0; + } + #else // PYPY_VERSION_NUM < 0x07030600 + // PyPy2 >= 7.3.6 has PyGILState_Check + #endif // PYPY_VERSION_NUM < 0x07030600 + #else + // https://stackoverflow.com/a/25666624 + static CYTHON_INLINE int PyGILState_Check(void) { + PyThreadState * tstate = _PyThreadState_Current; + return tstate && (tstate == PyGILState_GetThisThreadState()); + } + #endif +#endif + +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000 || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif + +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif + +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B4 && PY_VERSION_HEX < 0x030d0000 && CYTHON_USE_UNICODE_INTERNALS +// _PyDict_GetItem_KnownHash() existed from CPython 3.5 to 3.12, but it was +// dropping exceptions in 3.5. Since 3.6, exceptions are kept. +#define __Pyx_PyDict_GetItemStrWithError(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) { + PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name); + if (res == NULL) PyErr_Clear(); + return res; +} +#elif PY_MAJOR_VERSION >= 3 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000) +#define __Pyx_PyDict_GetItemStrWithError PyDict_GetItemWithError +#define __Pyx_PyDict_GetItemStr PyDict_GetItem +#else +static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) { + // This is tricky - we should return a borrowed reference but not swallow non-KeyError exceptions. 8-| + // But: this function is only used in Py2 and older PyPys, + // and currently only for argument parsing and other non-correctness-critical lookups + // and we know that 'name' is an interned 'str' with pre-calculated hash value (only comparisons can fail), + // thus, performance matters more than correctness here, especially in the "not found" case. +#if CYTHON_COMPILING_IN_PYPY + // So we ignore any exceptions in old PyPys ... + return PyDict_GetItem(dict, name); +#else + // and hack together a stripped-down and modified PyDict_GetItem() in CPython 2. + PyDictEntry *ep; + PyDictObject *mp = (PyDictObject*) dict; + long hash = ((PyStringObject *) name)->ob_shash; + assert(hash != -1); /* hash values of interned strings are always initialised */ + ep = (mp->ma_lookup)(mp, name, hash); + if (ep == NULL) { + // error occurred + return NULL; + } + // found or not found + return ep->me_value; +#endif +} +#define __Pyx_PyDict_GetItemStr PyDict_GetItem +#endif + +/* Type slots */ + +#if CYTHON_USE_TYPE_SLOTS + #define __Pyx_PyType_GetFlags(tp) (((PyTypeObject *)tp)->tp_flags) + #define __Pyx_PyType_HasFeature(type, feature) ((__Pyx_PyType_GetFlags(type) & (feature)) != 0) + #define __Pyx_PyObject_GetIterNextFunc(obj) (Py_TYPE(obj)->tp_iternext) +#else + #define __Pyx_PyType_GetFlags(tp) (PyType_GetFlags((PyTypeObject *)tp)) + #define __Pyx_PyType_HasFeature(type, feature) PyType_HasFeature(type, feature) + #define __Pyx_PyObject_GetIterNextFunc(obj) PyIter_Next +#endif + +#if CYTHON_COMPILING_IN_LIMITED_API + // Using PyObject_GenericSetAttr to bypass types immutability protection feels + // a little hacky, but it does work in the limited API . + // (It doesn't work on PyPy but that probably isn't a bug.) + #define __Pyx_SetItemOnTypeDict(tp, k, v) PyObject_GenericSetAttr((PyObject*)tp, k, v) +#else + #define __Pyx_SetItemOnTypeDict(tp, k, v) PyDict_SetItem(tp->tp_dict, k, v) +#endif + +#if CYTHON_USE_TYPE_SPECS && PY_VERSION_HEX >= 0x03080000 +// In Py3.8+, instances of heap types need to decref their type on deallocation. +// https://bugs.python.org/issue35810 +#define __Pyx_PyHeapTypeObject_GC_Del(obj) { \ + PyTypeObject *type = Py_TYPE((PyObject*)obj); \ + assert(__Pyx_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE)); \ + PyObject_GC_Del(obj); \ + Py_DECREF(type); \ +} +#else +#define __Pyx_PyHeapTypeObject_GC_Del(obj) PyObject_GC_Del(obj) +#endif + +#if CYTHON_COMPILING_IN_LIMITED_API + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GetLength(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((void)u, 1114111U) + #define __Pyx_PyUnicode_KIND(u) ((void)u, (0)) + // __Pyx_PyUnicode_DATA() and __Pyx_PyUnicode_READ() must go together, e.g. for iteration. + #define __Pyx_PyUnicode_DATA(u) ((void*)u) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)k, PyUnicode_ReadChar((PyObject*)(d), i)) + //#define __Pyx_PyUnicode_WRITE(k, d, i, ch) /* not available */ + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GetLength(u)) +#elif PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + /* new Py3.3 unicode type (PEP 393) */ + #define CYTHON_PEP393_ENABLED 1 + + #if PY_VERSION_HEX >= 0x030C0000 + // Py3.12 / PEP-623 removed wstr type unicode strings and all of the PyUnicode_READY() machinery. + #define __Pyx_PyUnicode_READY(op) (0) + #else + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #endif + + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) ((int)PyUnicode_KIND(u)) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, (Py_UCS4) ch) + #if PY_VERSION_HEX >= 0x030C0000 + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #else + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 + // Avoid calling deprecated C-API functions in Py3.9+ that PEP-623 schedules for removal in Py3.12. + // https://www.python.org/dev/peps/pep-0623/ + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #endif + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535U : 1114111U) + #define __Pyx_PyUnicode_KIND(u) ((int)sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + // (void)(k) => avoid unused variable warning due to macro: + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = (Py_UNICODE) ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif + +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif + +#if CYTHON_COMPILING_IN_PYPY + #if !defined(PyUnicode_DecodeUnicodeEscape) + #define PyUnicode_DecodeUnicodeEscape(s, size, errors) PyUnicode_Decode(s, size, "unicode_escape", errors) + #endif + #if !defined(PyUnicode_Contains) || (PY_MAJOR_VERSION == 2 && PYPY_VERSION_NUM < 0x07030500) + #undef PyUnicode_Contains + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) + #endif + #if !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) + #endif + #if !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) + #endif +#endif + +// ("..." % x) must call PyNumber_Remainder() if x is a string subclass that implements "__rmod__()". +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) + +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif + +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif + +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact + // PyPy3 used to define "PyObject_Unicode" +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif + +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif + +#if CYTHON_COMPILING_IN_CPYTHON + #define __Pyx_PySequence_ListKeepNew(obj) \ + (likely(PyList_CheckExact(obj) && Py_REFCNT(obj) == 1) ? __Pyx_NewRef(obj) : PySequence_List(obj)) +#else + #define __Pyx_PySequence_ListKeepNew(obj) PySequence_List(obj) +#endif + +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) __Pyx_IS_TYPE(obj, &PySet_Type) +#endif + +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif + +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_ITEM(o, i) PySequence_ITEM(o, i) + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) + #define __Pyx_PyTuple_SET_ITEM(o, i, v) (PyTuple_SET_ITEM(o, i, v), (0)) + #define __Pyx_PyList_SET_ITEM(o, i, v) (PyList_SET_ITEM(o, i, v), (0)) + #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_GET_SIZE(o) + #define __Pyx_PyList_GET_SIZE(o) PyList_GET_SIZE(o) + #define __Pyx_PySet_GET_SIZE(o) PySet_GET_SIZE(o) + #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_GET_SIZE(o) + #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_GET_SIZE(o) +#else + #define __Pyx_PySequence_ITEM(o, i) PySequence_GetItem(o, i) + // NOTE: might fail with exception => check for -1 + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) + // Note that this doesn't leak a reference to whatever's at o[i] + #define __Pyx_PyTuple_SET_ITEM(o, i, v) PyTuple_SetItem(o, i, v) + #define __Pyx_PyList_SET_ITEM(o, i, v) PyList_SetItem(o, i, v) + #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_Size(o) + #define __Pyx_PyList_GET_SIZE(o) PyList_Size(o) + #define __Pyx_PySet_GET_SIZE(o) PySet_Size(o) + #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_Size(o) + #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_Size(o) +#endif + +#if PY_VERSION_HEX >= 0x030d00A1 + #define __Pyx_PyImport_AddModuleRef(name) PyImport_AddModuleRef(name) +#else + static CYTHON_INLINE PyObject *__Pyx_PyImport_AddModuleRef(const char *name) { + PyObject *module = PyImport_AddModule(name); + Py_XINCREF(module); + return module; + } +#endif + +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define __Pyx_Py3Int_Check(op) PyLong_Check(op) + #define __Pyx_Py3Int_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#else + #define __Pyx_Py3Int_Check(op) (PyLong_Check(op) || PyInt_Check(op)) + #define __Pyx_Py3Int_CheckExact(op) (PyLong_CheckExact(op) || PyInt_CheckExact(op)) +#endif + +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif + +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif + +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t +#endif + +// backport of PyAsyncMethods from Py3.5 to older Py3.x versions +// (mis-)using the "tp_reserved" type slot which is re-activated as "tp_as_async" in Py3.5 +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + + +/////////////// IncludeStructmemberH.proto /////////////// + +#include + + +/////////////// SmallCodeConfig.proto /////////////// + +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + + +/////////////// PyModInitFuncType.proto /////////////// + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC + +#elif PY_MAJOR_VERSION < 3 +// Py2: define this to void manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition. +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif + +#else +// Py3+: define this to PyObject * manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition. +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +/////////////// FastTypeChecks.proto /////////////// + +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +#define __Pyx_TypeCheck2(obj, type1, type2) __Pyx_IsAnySubtype2(Py_TYPE(obj), (PyTypeObject *)type1, (PyTypeObject *)type2) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);/*proto*/ +static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b);/*proto*/ +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);/*proto*/ +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);/*proto*/ +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_TypeCheck2(obj, type1, type2) (PyObject_TypeCheck(obj, (PyTypeObject *)type1) || PyObject_TypeCheck(obj, (PyTypeObject *)type2)) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyErr_ExceptionMatches2(err1, err2) __Pyx_PyErr_GivenExceptionMatches2(__Pyx_PyErr_CurrentExceptionType(), err1, err2) + +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +/////////////// FastTypeChecks /////////////// +//@requires: Exceptions.c::PyThreadStateGet +//@requires: Exceptions.c::PyErrFetchRestore + +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = __Pyx_PyType_GetSlot(a, tp_base, PyTypeObject*); + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} + +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + // should only get here for incompletely initialised types, i.e. never under normal usage patterns + return __Pyx_InBases(a, b); +} + +static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (cls == a || cls == b) return 1; + mro = cls->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + PyObject *base = PyTuple_GET_ITEM(mro, i); + if (base == (PyObject *)a || base == (PyObject *)b) + return 1; + } + return 0; + } + // should only get here for incompletely initialised types, i.e. never under normal usage patterns + return __Pyx_InBases(cls, a) || __Pyx_InBases(cls, b); +} + + +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + // PyObject_IsSubclass() can recurse and therefore is not safe + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + // This function must not fail, so print the error here (which also clears it) + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + // This function must not fail, so print the error here (which also clears it) + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + if (exc_type1) { + return __Pyx_IsAnySubtype2((PyTypeObject*)err, (PyTypeObject*)exc_type1, (PyTypeObject*)exc_type2); + } else { + return __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } +} +#endif + +// so far, we only call PyErr_GivenExceptionMatches() with an exception type (not instance) as first argument +// => optimise for that case + +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + // the tighter subtype checking in Py3 allows faster out-of-order comparison + for (i=0; i pure safety check assertions. + assert(PyExceptionClass_Check(exc_type1)); + assert(PyExceptionClass_Check(exc_type2)); + if (likely(err == exc_type1 || err == exc_type2)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); + } + return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); +} + +#endif + + +/////////////// MathInitCode /////////////// + +#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS) + #if !defined(_USE_MATH_DEFINES) + #define _USE_MATH_DEFINES + #endif +#endif +#include + +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + // Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and + // a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is + // a quiet NaN. + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif + +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + + +/////////////// UtilityFunctionPredeclarations.proto /////////////// + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ + +/////////////// ForceInitThreads.proto /////////////// +//@proto_block: utility_code_proto_before_types + +#ifndef __PYX_FORCE_INIT_THREADS + #define __PYX_FORCE_INIT_THREADS 0 +#endif + +/////////////// InitThreads.init /////////////// + +#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 +PyEval_InitThreads(); +#endif + + +/////////////// ModuleCreationPEP489 /////////////// +//@substitute: naming + +//#if CYTHON_PEP489_MULTI_PHASE_INIT +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} + +#if CYTHON_COMPILING_IN_LIMITED_API +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *module, const char* from_name, const char* to_name, int allow_none) +#else +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) +#endif +{ + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { +#if CYTHON_COMPILING_IN_LIMITED_API + result = PyModule_AddObject(module, to_name, value); +#else + result = PyDict_SetItemString(moddict, to_name, value); +#endif + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} + +static CYTHON_SMALL_CODE PyObject* ${pymodule_create_func_cname}(PyObject *spec, PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + CYTHON_UNUSED_VAR(def); + + // For now, we only have exactly one module instance. + if (__Pyx_check_single_interpreter()) + return NULL; + if (${module_cname}) + return __Pyx_NewRef(${module_cname}); + + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + +#if CYTHON_COMPILING_IN_LIMITED_API + moddict = module; +#else + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + // moddict is a borrowed reference +#endif + + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + + return module; +bad: + Py_XDECREF(module); + return NULL; +} +//#endif + + +/////////////// CodeObjectCache.proto /////////////// + +#if !CYTHON_COMPILING_IN_LIMITED_API +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; + +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; + +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; + +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); +#endif + +/////////////// CodeObjectCache /////////////// +// Note that errors are simply ignored in the code below. +// This is just a cache, if a lookup or insertion fails - so what? + +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} + +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} + +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} +#endif + +/////////////// CodeObjectCache.cleanup /////////////// + + #if !CYTHON_COMPILING_IN_LIMITED_API + if (__pyx_code_cache.entries) { + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + int i, count = __pyx_code_cache.count; + __pyx_code_cache.count = 0; + __pyx_code_cache.max_count = 0; + __pyx_code_cache.entries = NULL; + for (i=0; i= 0x030B00A4 + return Py_Version & ~0xFFUL; +#else + const char* rt_version = Py_GetVersion(); + unsigned long version = 0; + unsigned long factor = 0x01000000UL; + unsigned int digit = 0; + int i = 0; + while (factor) { + while ('0' <= rt_version[i] && rt_version[i] <= '9') { + digit = digit * 10 + (unsigned int) (rt_version[i] - '0'); + ++i; + } + version += factor * digit; + if (rt_version[i] != '.') + break; + digit = 0; + factor >>= 8; + ++i; + } + return version; +#endif +} + +static int __Pyx_check_binary_version(unsigned long ct_version, unsigned long rt_version, int allow_newer) { + // runtime version is: -1 => older, 0 => equal, 1 => newer + const unsigned long MAJOR_MINOR = 0xFFFF0000UL; + if ((rt_version & MAJOR_MINOR) == (ct_version & MAJOR_MINOR)) + return 0; + if (likely(allow_newer && (rt_version & MAJOR_MINOR) > (ct_version & MAJOR_MINOR))) + return 1; + + { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compile time Python version %d.%d " + "of module '%.100s' " + "%s " + "runtime version %d.%d", + (int) (ct_version >> 24), (int) ((ct_version >> 16) & 0xFF), + __Pyx_MODULE_NAME, + (allow_newer) ? "was newer than" : "does not match", + (int) (rt_version >> 24), (int) ((rt_version >> 16) & 0xFF) + ); + // returns 0 or -1 + return PyErr_WarnEx(NULL, message, 1); + } +} + +/////////////// IsLittleEndian.proto /////////////// + +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/////////////// IsLittleEndian /////////////// + +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/////////////// Refnanny.proto /////////////// + +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif + +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, Py_ssize_t); + void (*DECREF)(void*, PyObject*, Py_ssize_t); + void (*GOTREF)(void*, PyObject*, Py_ssize_t); + void (*GIVEREF)(void*, PyObject*, Py_ssize_t); + void* (*SetupContext)(const char*, Py_ssize_t, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil) \ + if (acquire_gil) { \ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__)); \ + PyGILState_Release(__pyx_gilstate_save); \ + } else { \ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__)); \ + } + #define __Pyx_RefNannyFinishContextNogil() { \ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ + __Pyx_RefNannyFinishContext(); \ + PyGILState_Release(__pyx_gilstate_save); \ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil) \ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__)) + #define __Pyx_RefNannyFinishContextNogil() __Pyx_RefNannyFinishContext() +#endif + #define __Pyx_RefNannyFinishContextNogil() { \ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ + __Pyx_RefNannyFinishContext(); \ + PyGILState_Release(__pyx_gilstate_save); \ + } + #define __Pyx_RefNannyFinishContext() \ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_XINCREF(r) do { if((r) == NULL); else {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) == NULL); else {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) == NULL); else {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) == NULL); else {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContextNogil() + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif /* CYTHON_REFNANNY */ + +#define __Pyx_Py_XDECREF_SET(r, v) do { \ + PyObject *tmp = (PyObject *) r; \ + r = v; Py_XDECREF(tmp); \ + } while (0) +#define __Pyx_XDECREF_SET(r, v) do { \ + PyObject *tmp = (PyObject *) r; \ + r = v; __Pyx_XDECREF(tmp); \ + } while (0) +#define __Pyx_DECREF_SET(r, v) do { \ + PyObject *tmp = (PyObject *) r; \ + r = v; __Pyx_DECREF(tmp); \ + } while (0) + +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/////////////// Refnanny /////////////// + +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif /* CYTHON_REFNANNY */ + + +/////////////// ImportRefnannyAPI /////////////// + +#if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + + +/////////////// RegisterModuleCleanup.proto /////////////// +//@substitute: naming + +static void ${cleanup_cname}(PyObject *self); /*proto*/ + +#if PY_MAJOR_VERSION < 3 || CYTHON_COMPILING_IN_PYPY +static int __Pyx_RegisterCleanup(void); /*proto*/ +#else +#define __Pyx_RegisterCleanup() (0) +#endif + +/////////////// RegisterModuleCleanup /////////////// +//@substitute: naming + +#if PY_MAJOR_VERSION < 3 || CYTHON_COMPILING_IN_PYPY +static PyObject* ${cleanup_cname}_atexit(PyObject *module, PyObject *unused) { + CYTHON_UNUSED_VAR(unused); + ${cleanup_cname}(module); + Py_INCREF(Py_None); return Py_None; +} + +static int __Pyx_RegisterCleanup(void) { + // Don't use Py_AtExit because that has a 32-call limit and is called + // after python finalization. + // Also, we try to prepend the cleanup function to "atexit._exithandlers" + // in Py2 because CPython runs them last-to-first. Being run last allows + // user exit code to run before us that may depend on the globals + // and cached objects that we are about to clean up. + + static PyMethodDef cleanup_def = { + "__cleanup", (PyCFunction)${cleanup_cname}_atexit, METH_NOARGS, 0}; + + PyObject *cleanup_func = 0; + PyObject *atexit = 0; + PyObject *reg = 0; + PyObject *args = 0; + PyObject *res = 0; + int ret = -1; + + cleanup_func = PyCFunction_New(&cleanup_def, 0); + if (!cleanup_func) + goto bad; + + atexit = PyImport_ImportModule("atexit"); + if (!atexit) + goto bad; + reg = PyObject_GetAttrString(atexit, "_exithandlers"); + if (reg && PyList_Check(reg)) { + PyObject *a, *kw; + a = PyTuple_New(0); + kw = PyDict_New(); + if (!a || !kw) { + Py_XDECREF(a); + Py_XDECREF(kw); + goto bad; + } + args = PyTuple_Pack(3, cleanup_func, a, kw); + Py_DECREF(a); + Py_DECREF(kw); + if (!args) + goto bad; + ret = PyList_Insert(reg, 0, args); + } else { + if (!reg) + PyErr_Clear(); + Py_XDECREF(reg); + reg = PyObject_GetAttrString(atexit, "register"); + if (!reg) + goto bad; + args = PyTuple_Pack(1, cleanup_func); + if (!args) + goto bad; + res = PyObject_CallObject(reg, args); + if (!res) + goto bad; + ret = 0; + } +bad: + Py_XDECREF(cleanup_func); + Py_XDECREF(atexit); + Py_XDECREF(reg); + Py_XDECREF(args); + Py_XDECREF(res); + return ret; +} +#endif + +/////////////// FastGil.init /////////////// +#ifdef WITH_THREAD +__Pyx_FastGilFuncInit(); +#endif + +/////////////// NoFastGil.proto /////////////// +//@proto_block: utility_code_proto_before_types + +#define __Pyx_PyGILState_Ensure PyGILState_Ensure +#define __Pyx_PyGILState_Release PyGILState_Release +#define __Pyx_FastGIL_Remember() +#define __Pyx_FastGIL_Forget() +#define __Pyx_FastGilFuncInit() + +/////////////// FastGil.proto /////////////// +//@proto_block: utility_code_proto_before_types + +#if CYTHON_FAST_GIL + +struct __Pyx_FastGilVtab { + PyGILState_STATE (*Fast_PyGILState_Ensure)(void); + void (*Fast_PyGILState_Release)(PyGILState_STATE oldstate); + void (*FastGIL_Remember)(void); + void (*FastGIL_Forget)(void); +}; + +static void __Pyx_FastGIL_Noop(void) {} +static struct __Pyx_FastGilVtab __Pyx_FastGilFuncs = { + PyGILState_Ensure, + PyGILState_Release, + __Pyx_FastGIL_Noop, + __Pyx_FastGIL_Noop +}; + +static void __Pyx_FastGilFuncInit(void); + +#define __Pyx_PyGILState_Ensure __Pyx_FastGilFuncs.Fast_PyGILState_Ensure +#define __Pyx_PyGILState_Release __Pyx_FastGilFuncs.Fast_PyGILState_Release +#define __Pyx_FastGIL_Remember __Pyx_FastGilFuncs.FastGIL_Remember +#define __Pyx_FastGIL_Forget __Pyx_FastGilFuncs.FastGIL_Forget + +#ifdef WITH_THREAD + #ifndef CYTHON_THREAD_LOCAL + #if defined(__cplusplus) && __cplusplus >= 201103L + #define CYTHON_THREAD_LOCAL thread_local + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112 + #define CYTHON_THREAD_LOCAL _Thread_local + #elif defined(__GNUC__) + #define CYTHON_THREAD_LOCAL __thread + #elif defined(_MSC_VER) + #define CYTHON_THREAD_LOCAL __declspec(thread) + #endif + #endif +#endif + +#else +#define __Pyx_PyGILState_Ensure PyGILState_Ensure +#define __Pyx_PyGILState_Release PyGILState_Release +#define __Pyx_FastGIL_Remember() +#define __Pyx_FastGIL_Forget() +#define __Pyx_FastGilFuncInit() +#endif + +/////////////// FastGil /////////////// +// The implementations of PyGILState_Ensure/Release calls PyThread_get_key_value +// several times which is turns out to be quite slow (slower in fact than +// acquiring the GIL itself). Simply storing it in a thread local for the +// common case is much faster. +// To make optimal use of this thread local, we attempt to share it between +// modules. + +#if CYTHON_FAST_GIL + +#define __Pyx_FastGIL_ABI_module __PYX_ABI_MODULE_NAME +#define __Pyx_FastGIL_PyCapsuleName "FastGilFuncs" +#define __Pyx_FastGIL_PyCapsule \ + __Pyx_FastGIL_ABI_module "." __Pyx_FastGIL_PyCapsuleName + +#ifdef CYTHON_THREAD_LOCAL + +#include "pythread.h" +#include "pystate.h" + +static CYTHON_THREAD_LOCAL PyThreadState *__Pyx_FastGil_tcur = NULL; +static CYTHON_THREAD_LOCAL int __Pyx_FastGil_tcur_depth = 0; +static int __Pyx_FastGil_autoTLSkey = -1; + +static CYTHON_INLINE void __Pyx_FastGIL_Remember0(void) { + ++__Pyx_FastGil_tcur_depth; +} + +static CYTHON_INLINE void __Pyx_FastGIL_Forget0(void) { + if (--__Pyx_FastGil_tcur_depth == 0) { + __Pyx_FastGil_tcur = NULL; + } +} + +static CYTHON_INLINE PyThreadState *__Pyx_FastGil_get_tcur(void) { + PyThreadState *tcur = __Pyx_FastGil_tcur; + if (tcur == NULL) { + tcur = __Pyx_FastGil_tcur = (PyThreadState*)PyThread_get_key_value(__Pyx_FastGil_autoTLSkey); + } + return tcur; +} + +static PyGILState_STATE __Pyx_FastGil_PyGILState_Ensure(void) { + int current; + PyThreadState *tcur; + __Pyx_FastGIL_Remember0(); + tcur = __Pyx_FastGil_get_tcur(); + if (tcur == NULL) { + // Uninitialized, need to initialize now. + return PyGILState_Ensure(); + } + current = tcur == __Pyx_PyThreadState_Current; + if (current == 0) { + PyEval_RestoreThread(tcur); + } + ++tcur->gilstate_counter; + return current ? PyGILState_LOCKED : PyGILState_UNLOCKED; +} + +static void __Pyx_FastGil_PyGILState_Release(PyGILState_STATE oldstate) { + PyThreadState *tcur = __Pyx_FastGil_get_tcur(); + __Pyx_FastGIL_Forget0(); + if (tcur->gilstate_counter == 1) { + // This is the last lock, do all the cleanup as well. + PyGILState_Release(oldstate); + } else { + --tcur->gilstate_counter; + if (oldstate == PyGILState_UNLOCKED) { + PyEval_SaveThread(); + } + } +} + +static void __Pyx_FastGilFuncInit0(void) { + /* Try to detect autoTLSkey. */ + int key; + void* this_thread_state = (void*) PyGILState_GetThisThreadState(); + for (key = 0; key < 100; key++) { + if (PyThread_get_key_value(key) == this_thread_state) { + __Pyx_FastGil_autoTLSkey = key; + break; + } + } + if (__Pyx_FastGil_autoTLSkey != -1) { + PyObject* capsule = NULL; + PyObject* abi_module = NULL; + __Pyx_PyGILState_Ensure = __Pyx_FastGil_PyGILState_Ensure; + __Pyx_PyGILState_Release = __Pyx_FastGil_PyGILState_Release; + __Pyx_FastGIL_Remember = __Pyx_FastGIL_Remember0; + __Pyx_FastGIL_Forget = __Pyx_FastGIL_Forget0; + capsule = PyCapsule_New(&__Pyx_FastGilFuncs, __Pyx_FastGIL_PyCapsule, NULL); + if (capsule) { + abi_module = __Pyx_PyImport_AddModuleRef(__Pyx_FastGIL_ABI_module); + if (abi_module) { + PyObject_SetAttrString(abi_module, __Pyx_FastGIL_PyCapsuleName, capsule); + Py_DECREF(abi_module); + } + } + Py_XDECREF(capsule); + } +} + +#else + +static void __Pyx_FastGilFuncInit0(void) { +} + +#endif + +static void __Pyx_FastGilFuncInit(void) { + struct __Pyx_FastGilVtab* shared = (struct __Pyx_FastGilVtab*)PyCapsule_Import(__Pyx_FastGIL_PyCapsule, 1); + if (shared) { + __Pyx_FastGilFuncs = *shared; + } else { + PyErr_Clear(); + __Pyx_FastGilFuncInit0(); + } +} + +#endif + +///////////////////// UtilityCodePragmas ///////////////////////// + +#ifdef _MSC_VER +#pragma warning( push ) +/* Warning 4127: conditional expression is constant + * Cython uses constant conditional expressions to allow in inline functions to be optimized at + * compile-time, so this warning is not useful + */ +#pragma warning( disable : 4127 ) +#endif + +///////////////////// UtilityCodePragmasEnd ////////////////////// + +#ifdef _MSC_VER +#pragma warning( pop ) /* undo whatever Cython has done to warnings */ +#endif diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/NumpyImportArray.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/NumpyImportArray.c new file mode 100644 index 0000000000000000000000000000000000000000..37f74da794ea1af0884756289ca51e3188deeccf --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/NumpyImportArray.c @@ -0,0 +1,46 @@ +///////////////////////// NumpyImportArray.init //////////////////// + +// comment below is deliberately kept in the generated C file to +// help users debug where this came from: +/* + * Cython has automatically inserted a call to _import_array since + * you didn't include one when you cimported numpy. To disable this + * add the line + * numpy._import_array + */ +#ifdef NPY_FEATURE_VERSION /* This is a public define that makes us reasonably confident it's "real" Numpy */ +// NO_IMPORT_ARRAY is Numpy's mechanism for indicating that import_array is handled elsewhere +#ifndef NO_IMPORT_ARRAY /* https://numpy.org/doc/stable/reference/c-api/array.html#c.NO_IMPORT_ARRAY */ +if (unlikely(_import_array() == -1)) { + PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import " + "(auto-generated because you didn't call 'numpy.import_array()' after cimporting numpy; " + "use 'numpy._import_array' to disable if you are certain you don't need it)."); +} +#endif +#endif + +///////////////////////// NumpyImportUFunc.init //////////////////// + +// Unlike import_array, this is generated by the @cython.ufunc decorator +// so we're confident the right headers are present and don't need to override them + +{ + // NO_IMPORT_UFUNC is Numpy's mechanism for indicating that import_umath is handled elsewhere +#ifndef NO_IMPORT_UFUNC /* https://numpy.org/doc/stable/reference/c-api/ufunc.html#c.NO_IMPORT_UFUNC */ + if (unlikely(_import_umath() == -1)) { + PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import " + "(auto-generated by @cython.ufunc)."); + } +#else + if ((0)) {} +#endif + // NO_IMPORT_ARRAY is Numpy's mechanism for indicating that import_array is handled elsewhere +#ifndef NO_IMPORT_ARRAY /* https://numpy.org/doc/stable/reference/c-api/array.html#c.NO_IMPORT_ARRAY */ + else if (unlikely(_import_array() == -1)) { + PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import " + "(auto-generated by @cython.ufunc)."); + } +#endif +} + + diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/ObjectHandling.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/ObjectHandling.c new file mode 100644 index 0000000000000000000000000000000000000000..b0a8554a7c5f85e58d8a1e1d4ee3953e55a00ce1 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/ObjectHandling.c @@ -0,0 +1,3266 @@ +/* + * General object operations and protocol implementations, + * including their specialisations for certain builtins. + * + * Optional optimisations for builtins are in Optimize.c. + * + * Required replacements of builtins are in Builtins.c. + */ + +/////////////// RaiseNoneIterError.proto /////////////// + +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/////////////// RaiseNoneIterError /////////////// + +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/////////////// RaiseTooManyValuesToUnpack.proto /////////////// + +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/////////////// RaiseTooManyValuesToUnpack /////////////// + +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/////////////// RaiseNeedMoreValuesToUnpack.proto /////////////// + +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/////////////// RaiseNeedMoreValuesToUnpack /////////////// + +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/////////////// UnpackTupleError.proto /////////////// + +static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/ + +/////////////// UnpackTupleError /////////////// +//@requires: RaiseNoneIterError +//@requires: RaiseNeedMoreValuesToUnpack +//@requires: RaiseTooManyValuesToUnpack + +static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { + if (t == Py_None) { + __Pyx_RaiseNoneNotIterableError(); + } else if (PyTuple_GET_SIZE(t) < index) { + __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); + } else { + __Pyx_RaiseTooManyValuesError(index); + } +} + +/////////////// UnpackItemEndCheck.proto /////////////// + +static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/ + +/////////////// UnpackItemEndCheck /////////////// +//@requires: RaiseTooManyValuesToUnpack +//@requires: IterFinish + +static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { + if (unlikely(retval)) { + Py_DECREF(retval); + __Pyx_RaiseTooManyValuesError(expected); + return -1; + } + + return __Pyx_IterFinish(); +} + +/////////////// UnpackTuple2.proto /////////////// + +#define __Pyx_unpack_tuple2(tuple, value1, value2, is_tuple, has_known_size, decref_tuple) \ + (likely(is_tuple || PyTuple_Check(tuple)) ? \ + (likely(has_known_size || PyTuple_GET_SIZE(tuple) == 2) ? \ + __Pyx_unpack_tuple2_exact(tuple, value1, value2, decref_tuple) : \ + (__Pyx_UnpackTupleError(tuple, 2), -1)) : \ + __Pyx_unpack_tuple2_generic(tuple, value1, value2, has_known_size, decref_tuple)) + +static CYTHON_INLINE int __Pyx_unpack_tuple2_exact( + PyObject* tuple, PyObject** value1, PyObject** value2, int decref_tuple); +static int __Pyx_unpack_tuple2_generic( + PyObject* tuple, PyObject** value1, PyObject** value2, int has_known_size, int decref_tuple); + +/////////////// UnpackTuple2 /////////////// +//@requires: UnpackItemEndCheck +//@requires: UnpackTupleError +//@requires: RaiseNeedMoreValuesToUnpack + +static CYTHON_INLINE int __Pyx_unpack_tuple2_exact( + PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int decref_tuple) { + PyObject *value1 = NULL, *value2 = NULL; +#if CYTHON_COMPILING_IN_PYPY + value1 = PySequence_ITEM(tuple, 0); if (unlikely(!value1)) goto bad; + value2 = PySequence_ITEM(tuple, 1); if (unlikely(!value2)) goto bad; +#else + value1 = PyTuple_GET_ITEM(tuple, 0); Py_INCREF(value1); + value2 = PyTuple_GET_ITEM(tuple, 1); Py_INCREF(value2); +#endif + if (decref_tuple) { + Py_DECREF(tuple); + } + + *pvalue1 = value1; + *pvalue2 = value2; + return 0; +#if CYTHON_COMPILING_IN_PYPY +bad: + Py_XDECREF(value1); + Py_XDECREF(value2); + if (decref_tuple) { Py_XDECREF(tuple); } + return -1; +#endif +} + +static int __Pyx_unpack_tuple2_generic(PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, + int has_known_size, int decref_tuple) { + Py_ssize_t index; + PyObject *value1 = NULL, *value2 = NULL, *iter = NULL; + iternextfunc iternext; + + iter = PyObject_GetIter(tuple); + if (unlikely(!iter)) goto bad; + if (decref_tuple) { Py_DECREF(tuple); tuple = NULL; } + + iternext = __Pyx_PyObject_GetIterNextFunc(iter); + value1 = iternext(iter); if (unlikely(!value1)) { index = 0; goto unpacking_failed; } + value2 = iternext(iter); if (unlikely(!value2)) { index = 1; goto unpacking_failed; } + if (!has_known_size && unlikely(__Pyx_IternextUnpackEndCheck(iternext(iter), 2))) goto bad; + + Py_DECREF(iter); + *pvalue1 = value1; + *pvalue2 = value2; + return 0; + +unpacking_failed: + if (!has_known_size && __Pyx_IterFinish() == 0) + __Pyx_RaiseNeedMoreValuesError(index); +bad: + Py_XDECREF(iter); + Py_XDECREF(value1); + Py_XDECREF(value2); + if (decref_tuple) { Py_XDECREF(tuple); } + return -1; +} + + +/////////////// IterNext.proto /////////////// + +#define __Pyx_PyIter_Next(obj) __Pyx_PyIter_Next2(obj, NULL) +static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject *, PyObject *); /*proto*/ + +/////////////// IterNext /////////////// +//@requires: Exceptions.c::PyThreadStateGet +//@requires: Exceptions.c::PyErrFetchRestore + +static PyObject *__Pyx_PyIter_Next2Default(PyObject* defval) { + PyObject* exc_type; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + exc_type = __Pyx_PyErr_CurrentExceptionType(); + if (unlikely(exc_type)) { + if (!defval || unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) + return NULL; + __Pyx_PyErr_Clear(); + Py_INCREF(defval); + return defval; + } + if (defval) { + Py_INCREF(defval); + return defval; + } + __Pyx_PyErr_SetNone(PyExc_StopIteration); + return NULL; +} + +static void __Pyx_PyIter_Next_ErrorNoIterator(PyObject *iterator) { + __Pyx_TypeName iterator_type_name = __Pyx_PyType_GetName(Py_TYPE(iterator)); + PyErr_Format(PyExc_TypeError, + __Pyx_FMT_TYPENAME " object is not an iterator", iterator_type_name); + __Pyx_DECREF_TypeName(iterator_type_name); +} + +// originally copied from Py3's builtin_next() +static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject* iterator, PyObject* defval) { + PyObject* next; + // We always do a quick slot check because calling PyIter_Check() is so wasteful. + iternextfunc iternext = Py_TYPE(iterator)->tp_iternext; + if (likely(iternext)) { +#if CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY + next = iternext(iterator); + if (likely(next)) + return next; +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000 + if (unlikely(iternext == &_PyObject_NextNotImplemented)) + return NULL; +#endif +#else + // Since the slot was set, assume that PyIter_Next() will likely succeed, and properly fail otherwise. + // Note: PyIter_Next() crashes in CPython if "tp_iternext" is NULL. + next = PyIter_Next(iterator); + if (likely(next)) + return next; +#endif + } else if (CYTHON_USE_TYPE_SLOTS || unlikely(!PyIter_Check(iterator))) { + // If CYTHON_USE_TYPE_SLOTS, then the slot was not set and we don't have an iterable. + // Otherwise, don't trust "tp_iternext" and rely on PyIter_Check(). + __Pyx_PyIter_Next_ErrorNoIterator(iterator); + return NULL; + } +#if !CYTHON_USE_TYPE_SLOTS + else { + // We have an iterator with an empty "tp_iternext", but didn't call next() on it yet. + next = PyIter_Next(iterator); + if (likely(next)) + return next; + } +#endif + return __Pyx_PyIter_Next2Default(defval); +} + +/////////////// IterFinish.proto /////////////// + +static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/ + +/////////////// IterFinish /////////////// +//@requires: Exceptions.c::PyThreadStateGet +//@requires: Exceptions.c::PyErrFetchRestore + +// When PyIter_Next(iter) has returned NULL in order to signal termination, +// this function does the right cleanup and returns 0 on success. If it +// detects an error that occurred in the iterator, it returns -1. + +static CYTHON_INLINE int __Pyx_IterFinish(void) { + PyObject* exc_type; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + exc_type = __Pyx_PyErr_CurrentExceptionType(); + if (unlikely(exc_type)) { + if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) + return -1; + __Pyx_PyErr_Clear(); + return 0; + } + return 0; +} + + +/////////////// ObjectGetItem.proto /////////////// + +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key);/*proto*/ +#else +#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) +#endif + +/////////////// ObjectGetItem /////////////// +// //@requires: GetItemInt - added in IndexNode as it uses templating. +//@requires: PyObjectGetAttrStrNoError +//@requires: PyObjectCallOneArg + +#if CYTHON_USE_TYPE_SLOTS +static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject *index) { + // Get element from sequence object `obj` at index `index`. + PyObject *runerr = NULL; + Py_ssize_t key_value; + key_value = __Pyx_PyIndex_AsSsize_t(index); + if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { + return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); + } + + // Error handling code -- only manage OverflowError differently. + if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { + __Pyx_TypeName index_type_name = __Pyx_PyType_GetName(Py_TYPE(index)); + PyErr_Clear(); + PyErr_Format(PyExc_IndexError, + "cannot fit '" __Pyx_FMT_TYPENAME "' into an index-sized integer", index_type_name); + __Pyx_DECREF_TypeName(index_type_name); + } + return NULL; +} + +static PyObject *__Pyx_PyObject_GetItem_Slow(PyObject *obj, PyObject *key) { + __Pyx_TypeName obj_type_name; + // Handles less common slow-path checks for GetItem + if (likely(PyType_Check(obj))) { + PyObject *meth = __Pyx_PyObject_GetAttrStrNoError(obj, PYIDENT("__class_getitem__")); + if (!meth) { + PyErr_Clear(); + } else { + PyObject *result = __Pyx_PyObject_CallOneArg(meth, key); + Py_DECREF(meth); + return result; + } + } + + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "'" __Pyx_FMT_TYPENAME "' object is not subscriptable", obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + return NULL; +} + +static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key) { + PyTypeObject *tp = Py_TYPE(obj); + PyMappingMethods *mm = tp->tp_as_mapping; + PySequenceMethods *sm = tp->tp_as_sequence; + + if (likely(mm && mm->mp_subscript)) { + return mm->mp_subscript(obj, key); + } + if (likely(sm && sm->sq_item)) { + return __Pyx_PyObject_GetIndex(obj, key); + } + return __Pyx_PyObject_GetItem_Slow(obj, key); +} +#endif + + +/////////////// DictGetItem.proto /////////////// + +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);/*proto*/ + +#define __Pyx_PyObject_Dict_GetItem(obj, name) \ + (likely(PyDict_CheckExact(obj)) ? \ + __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name)) + +#else +#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) +#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name) +#endif + +/////////////// DictGetItem /////////////// + +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { + PyObject *value; + value = PyDict_GetItemWithError(d, key); + if (unlikely(!value)) { + if (!PyErr_Occurred()) { + if (unlikely(PyTuple_Check(key))) { + // CPython interprets tuples as separate arguments => must wrap them in another tuple. + PyObject* args = PyTuple_Pack(1, key); + if (likely(args)) { + PyErr_SetObject(PyExc_KeyError, args); + Py_DECREF(args); + } + } else { + // Avoid tuple packing if possible. + PyErr_SetObject(PyExc_KeyError, key); + } + } + return NULL; + } + Py_INCREF(value); + return value; +} +#endif + +/////////////// GetItemInt.proto /////////////// +//@substitute: tempita + +#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ + __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) : \ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) : \ + __Pyx_GetItemInt_Generic(o, to_py_func(i)))) + +{{for type in ['List', 'Tuple']}} +#define __Pyx_GetItemInt_{{type}}(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ + __Pyx_GetItemInt_{{type}}_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \ + (PyErr_SetString(PyExc_IndexError, "{{ type.lower() }} index out of range"), (PyObject*)NULL)) + +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_{{type}}_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +{{endfor}} + +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, int wraparound, int boundscheck); + +/////////////// GetItemInt /////////////// +//@substitute: tempita + +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { + PyObject *r; + if (unlikely(!j)) return NULL; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + return r; +} + +{{for type in ['List', 'Tuple']}} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_{{type}}_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += Py{{type}}_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, Py{{type}}_GET_SIZE(o)))) { + PyObject *r = Py{{type}}_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +{{endfor}} + +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); + if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { + PyObject *r = PyList_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } + else if (PyTuple_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); + if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } else { + // inlined PySequence_GetItem() + special cased length overflow + PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; + PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; + if (mm && mm->mp_subscript) { + PyObject *r, *key = PyInt_FromSsize_t(i); + if (unlikely(!key)) return NULL; + r = mm->mp_subscript(o, key); + Py_DECREF(key); + return r; + } + if (likely(sm && sm->sq_item)) { + if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { + Py_ssize_t l = sm->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + // if length > max(Py_ssize_t), maybe the object can wrap around itself? + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return NULL; + PyErr_Clear(); + } + } + return sm->sq_item(o, i); + } + } +#else + // PySequence_GetItem behaves differently to PyObject_GetItem for i<0 + // and possibly some other cases so can't generally be substituted + if (is_list || !PyMapping_Check(o)) { + return PySequence_GetItem(o, i); + } +#endif + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +} + +/////////////// SetItemInt.proto /////////////// + +#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ + __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) : \ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) : \ + __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) + +static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); +static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, + int is_list, int wraparound, int boundscheck); + +/////////////// SetItemInt /////////////// + +static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { + int r; + if (unlikely(!j)) return -1; + r = PyObject_SetItem(o, j, v); + Py_DECREF(j); + return r; +} + +static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, + CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); + if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) { + PyObject* old = PyList_GET_ITEM(o, n); + Py_INCREF(v); + PyList_SET_ITEM(o, n, v); + Py_DECREF(old); + return 1; + } + } else { + // inlined PySequence_SetItem() + special cased length overflow + PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; + PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; + if (mm && mm->mp_ass_subscript) { + int r; + PyObject *key = PyInt_FromSsize_t(i); + if (unlikely(!key)) return -1; + r = mm->mp_ass_subscript(o, key, v); + Py_DECREF(key); + return r; + } + if (likely(sm && sm->sq_ass_item)) { + if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { + Py_ssize_t l = sm->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + // if length > max(Py_ssize_t), maybe the object can wrap around itself? + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return -1; + PyErr_Clear(); + } + } + return sm->sq_ass_item(o, i, v); + } + } +#else + // PySequence_SetItem behaves differently to PyObject_SetItem for i<0 + // and possibly some other cases so can't generally be substituted + if (is_list || !PyMapping_Check(o)) + { + return PySequence_SetItem(o, i, v); + } +#endif + return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); +} + + +/////////////// DelItemInt.proto /////////////// + +#define __Pyx_DelItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ + __Pyx_DelItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound) : \ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) : \ + __Pyx_DelItem_Generic(o, to_py_func(i)))) + +static int __Pyx_DelItem_Generic(PyObject *o, PyObject *j); +static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, int wraparound); + +/////////////// DelItemInt /////////////// + +static int __Pyx_DelItem_Generic(PyObject *o, PyObject *j) { + int r; + if (unlikely(!j)) return -1; + r = PyObject_DelItem(o, j); + Py_DECREF(j); + return r; +} + +static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, CYTHON_NCP_UNUSED int wraparound) { +#if !CYTHON_USE_TYPE_SLOTS + // PySequence_DelItem behaves differently to PyObject_DelItem for i<0 + // and possibly some other cases so can't generally be substituted + if (is_list || !PyMapping_Check(o)) { + return PySequence_DelItem(o, i); + } +#else + // inlined PySequence_DelItem() + special cased length overflow + PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; + PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; + if ((!is_list) && mm && mm->mp_ass_subscript) { + PyObject *key = PyInt_FromSsize_t(i); + return likely(key) ? mm->mp_ass_subscript(o, key, (PyObject *)NULL) : -1; + } + if (likely(sm && sm->sq_ass_item)) { + if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { + Py_ssize_t l = sm->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + // if length > max(Py_ssize_t), maybe the object can wrap around itself? + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return -1; + PyErr_Clear(); + } + } + return sm->sq_ass_item(o, i, (PyObject *)NULL); + } +#endif + return __Pyx_DelItem_Generic(o, PyInt_FromSsize_t(i)); +} + + +/////////////// SliceObject.proto /////////////// + +// we pass pointer addresses to show the C compiler what is NULL and what isn't +{{if access == 'Get'}} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( + PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, + PyObject** py_start, PyObject** py_stop, PyObject** py_slice, + int has_cstart, int has_cstop, int wraparound); +{{else}} +#define __Pyx_PyObject_DelSlice(obj, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound) \ + __Pyx_PyObject_SetSlice(obj, (PyObject*)NULL, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound) + +// we pass pointer addresses to show the C compiler what is NULL and what isn't +static CYTHON_INLINE int __Pyx_PyObject_SetSlice( + PyObject* obj, PyObject* value, Py_ssize_t cstart, Py_ssize_t cstop, + PyObject** py_start, PyObject** py_stop, PyObject** py_slice, + int has_cstart, int has_cstop, int wraparound); +{{endif}} + +/////////////// SliceObject /////////////// + +{{if access == 'Get'}} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, +{{else}} +static CYTHON_INLINE int __Pyx_PyObject_SetSlice(PyObject* obj, PyObject* value, +{{endif}} + Py_ssize_t cstart, Py_ssize_t cstop, + PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, + int has_cstart, int has_cstop, int wraparound) { + __Pyx_TypeName obj_type_name; +#if CYTHON_USE_TYPE_SLOTS + PyMappingMethods* mp; +#if PY_MAJOR_VERSION < 3 + PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; + if (likely(ms && ms->sq_{{if access == 'Set'}}ass_{{endif}}slice)) { + if (!has_cstart) { + if (_py_start && (*_py_start != Py_None)) { + cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); + if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; + } else + cstart = 0; + } + if (!has_cstop) { + if (_py_stop && (*_py_stop != Py_None)) { + cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); + if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; + } else + cstop = PY_SSIZE_T_MAX; + } + if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { + Py_ssize_t l = ms->sq_length(obj); + if (likely(l >= 0)) { + if (cstop < 0) { + cstop += l; + if (cstop < 0) cstop = 0; + } + if (cstart < 0) { + cstart += l; + if (cstart < 0) cstart = 0; + } + } else { + // if length > max(Py_ssize_t), maybe the object can wrap around itself? + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + goto bad; + PyErr_Clear(); + } + } +{{if access == 'Get'}} + return ms->sq_slice(obj, cstart, cstop); +{{else}} + return ms->sq_ass_slice(obj, cstart, cstop, value); +{{endif}} + } +#else + CYTHON_UNUSED_VAR(wraparound); +#endif + + mp = Py_TYPE(obj)->tp_as_mapping; +{{if access == 'Get'}} + if (likely(mp && mp->mp_subscript)) +{{else}} + if (likely(mp && mp->mp_ass_subscript)) +{{endif}} +#else + CYTHON_UNUSED_VAR(wraparound); +#endif + { + {{if access == 'Get'}}PyObject*{{else}}int{{endif}} result; + PyObject *py_slice, *py_start, *py_stop; + if (_py_slice) { + py_slice = *_py_slice; + } else { + PyObject* owned_start = NULL; + PyObject* owned_stop = NULL; + if (_py_start) { + py_start = *_py_start; + } else { + if (has_cstart) { + owned_start = py_start = PyInt_FromSsize_t(cstart); + if (unlikely(!py_start)) goto bad; + } else + py_start = Py_None; + } + if (_py_stop) { + py_stop = *_py_stop; + } else { + if (has_cstop) { + owned_stop = py_stop = PyInt_FromSsize_t(cstop); + if (unlikely(!py_stop)) { + Py_XDECREF(owned_start); + goto bad; + } + } else + py_stop = Py_None; + } + py_slice = PySlice_New(py_start, py_stop, Py_None); + Py_XDECREF(owned_start); + Py_XDECREF(owned_stop); + if (unlikely(!py_slice)) goto bad; + } +#if CYTHON_USE_TYPE_SLOTS +{{if access == 'Get'}} + result = mp->mp_subscript(obj, py_slice); +#else + result = PyObject_GetItem(obj, py_slice); +{{else}} + result = mp->mp_ass_subscript(obj, py_slice, value); +#else + result = value ? PyObject_SetItem(obj, py_slice, value) : PyObject_DelItem(obj, py_slice); +{{endif}} +#endif + if (!_py_slice) { + Py_DECREF(py_slice); + } + return result; + } + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, +{{if access == 'Get'}} + "'" __Pyx_FMT_TYPENAME "' object is unsliceable", obj_type_name); +{{else}} + "'" __Pyx_FMT_TYPENAME "' object does not support slice %.10s", + obj_type_name, value ? "assignment" : "deletion"); +{{endif}} + __Pyx_DECREF_TypeName(obj_type_name); + +bad: + return {{if access == 'Get'}}NULL{{else}}-1{{endif}}; +} + + +/////////////// TupleAndListFromArray.proto /////////////// + +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n); +static CYTHON_INLINE PyObject* __Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n); +#endif + +/////////////// TupleAndListFromArray /////////////// +//@substitute: naming + +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE void __Pyx_copy_object_array(PyObject *const *CYTHON_RESTRICT src, PyObject** CYTHON_RESTRICT dest, Py_ssize_t length) { + PyObject *v; + Py_ssize_t i; + for (i = 0; i < length; i++) { + v = dest[i] = src[i]; + Py_INCREF(v); + } +} + +static CYTHON_INLINE PyObject * +__Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n) +{ + PyObject *res; + if (n <= 0) { + Py_INCREF($empty_tuple); + return $empty_tuple; + } + res = PyTuple_New(n); + if (unlikely(res == NULL)) return NULL; + __Pyx_copy_object_array(src, ((PyTupleObject*)res)->ob_item, n); + return res; +} + +static CYTHON_INLINE PyObject * +__Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n) +{ + PyObject *res; + if (n <= 0) { + return PyList_New(0); + } + res = PyList_New(n); + if (unlikely(res == NULL)) return NULL; + __Pyx_copy_object_array(src, ((PyListObject*)res)->ob_item, n); + return res; +} +#endif + + +/////////////// SliceTupleAndList.proto /////////////// + +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyList_GetSlice(PyObject* src, Py_ssize_t start, Py_ssize_t stop); +static CYTHON_INLINE PyObject* __Pyx_PyTuple_GetSlice(PyObject* src, Py_ssize_t start, Py_ssize_t stop); +#else +#define __Pyx_PyList_GetSlice(seq, start, stop) PySequence_GetSlice(seq, start, stop) +#define __Pyx_PyTuple_GetSlice(seq, start, stop) PySequence_GetSlice(seq, start, stop) +#endif + +/////////////// SliceTupleAndList /////////////// +//@requires: TupleAndListFromArray +//@substitute: tempita + +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE void __Pyx_crop_slice(Py_ssize_t* _start, Py_ssize_t* _stop, Py_ssize_t* _length) { + Py_ssize_t start = *_start, stop = *_stop, length = *_length; + if (start < 0) { + start += length; + if (start < 0) + start = 0; + } + + if (stop < 0) + stop += length; + else if (stop > length) + stop = length; + + *_length = stop - start; + *_start = start; + *_stop = stop; +} + +{{for type in ['List', 'Tuple']}} +static CYTHON_INLINE PyObject* __Pyx_Py{{type}}_GetSlice( + PyObject* src, Py_ssize_t start, Py_ssize_t stop) { + Py_ssize_t length = Py{{type}}_GET_SIZE(src); + __Pyx_crop_slice(&start, &stop, &length); +{{if type=='List'}} + if (length <= 0) { + // Avoid undefined behaviour when accessing `ob_item` of an empty list. + return PyList_New(0); + } +{{endif}} + return __Pyx_Py{{type}}_FromArray(((Py{{type}}Object*)src)->ob_item + start, length); +} +{{endfor}} +#endif + + +/////////////// CalculateMetaclass.proto /////////////// + +static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases); + +/////////////// CalculateMetaclass /////////////// + +static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) { + Py_ssize_t i, nbases; +#if CYTHON_ASSUME_SAFE_MACROS + nbases = PyTuple_GET_SIZE(bases); +#else + nbases = PyTuple_Size(bases); + if (nbases < 0) return NULL; +#endif + for (i=0; i < nbases; i++) { + PyTypeObject *tmptype; +#if CYTHON_ASSUME_SAFE_MACROS + PyObject *tmp = PyTuple_GET_ITEM(bases, i); +#else + PyObject *tmp = PyTuple_GetItem(bases, i); + if (!tmp) return NULL; +#endif + tmptype = Py_TYPE(tmp); +#if PY_MAJOR_VERSION < 3 + if (tmptype == &PyClass_Type) + continue; +#endif + if (!metaclass) { + metaclass = tmptype; + continue; + } + if (PyType_IsSubtype(metaclass, tmptype)) + continue; + if (PyType_IsSubtype(tmptype, metaclass)) { + metaclass = tmptype; + continue; + } + // else: + PyErr_SetString(PyExc_TypeError, + "metaclass conflict: " + "the metaclass of a derived class " + "must be a (non-strict) subclass " + "of the metaclasses of all its bases"); + return NULL; + } + if (!metaclass) { +#if PY_MAJOR_VERSION < 3 + metaclass = &PyClass_Type; +#else + metaclass = &PyType_Type; +#endif + } + // make owned reference + Py_INCREF((PyObject*) metaclass); + return (PyObject*) metaclass; +} + + +/////////////// FindInheritedMetaclass.proto /////////////// + +static PyObject *__Pyx_FindInheritedMetaclass(PyObject *bases); /*proto*/ + +/////////////// FindInheritedMetaclass /////////////// +//@requires: PyObjectGetAttrStr +//@requires: CalculateMetaclass + +static PyObject *__Pyx_FindInheritedMetaclass(PyObject *bases) { + PyObject *metaclass; + if (PyTuple_Check(bases) && PyTuple_GET_SIZE(bases) > 0) { + PyTypeObject *metatype; +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + PyObject *base = PyTuple_GET_ITEM(bases, 0); +#else + PyObject *base = PySequence_ITEM(bases, 0); +#endif +#if PY_MAJOR_VERSION < 3 + PyObject* basetype = __Pyx_PyObject_GetAttrStr(base, PYIDENT("__class__")); + if (basetype) { + metatype = (PyType_Check(basetype)) ? ((PyTypeObject*) basetype) : NULL; + } else { + PyErr_Clear(); + metatype = Py_TYPE(base); + basetype = (PyObject*) metatype; + Py_INCREF(basetype); + } +#else + metatype = Py_TYPE(base); +#endif + metaclass = __Pyx_CalculateMetaclass(metatype, bases); +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(base); +#endif +#if PY_MAJOR_VERSION < 3 + Py_DECREF(basetype); +#endif + } else { + // no bases => use default metaclass +#if PY_MAJOR_VERSION < 3 + metaclass = (PyObject *) &PyClass_Type; +#else + metaclass = (PyObject *) &PyType_Type; +#endif + Py_INCREF(metaclass); + } + return metaclass; +} + +/////////////// Py3MetaclassGet.proto /////////////// + +static PyObject *__Pyx_Py3MetaclassGet(PyObject *bases, PyObject *mkw); /*proto*/ + +/////////////// Py3MetaclassGet /////////////// +//@requires: FindInheritedMetaclass +//@requires: CalculateMetaclass + +static PyObject *__Pyx_Py3MetaclassGet(PyObject *bases, PyObject *mkw) { + PyObject *metaclass = mkw ? __Pyx_PyDict_GetItemStr(mkw, PYIDENT("metaclass")) : NULL; + if (metaclass) { + Py_INCREF(metaclass); + if (PyDict_DelItem(mkw, PYIDENT("metaclass")) < 0) { + Py_DECREF(metaclass); + return NULL; + } + if (PyType_Check(metaclass)) { + PyObject* orig = metaclass; + metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); + Py_DECREF(orig); + } + return metaclass; + } + return __Pyx_FindInheritedMetaclass(bases); +} + +/////////////// CreateClass.proto /////////////// + +static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, + PyObject *qualname, PyObject *modname); /*proto*/ + +/////////////// CreateClass /////////////// +//@requires: FindInheritedMetaclass +//@requires: CalculateMetaclass + +static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, + PyObject *qualname, PyObject *modname) { + PyObject *result; + PyObject *metaclass; + + if (unlikely(PyDict_SetItem(dict, PYIDENT("__module__"), modname) < 0)) + return NULL; +#if PY_VERSION_HEX >= 0x03030000 + if (unlikely(PyDict_SetItem(dict, PYIDENT("__qualname__"), qualname) < 0)) + return NULL; +#else + CYTHON_MAYBE_UNUSED_VAR(qualname); +#endif + + /* Python2 __metaclass__ */ + metaclass = __Pyx_PyDict_GetItemStr(dict, PYIDENT("__metaclass__")); + if (metaclass) { + Py_INCREF(metaclass); + if (PyType_Check(metaclass)) { + PyObject* orig = metaclass; + metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); + Py_DECREF(orig); + } + } else { + metaclass = __Pyx_FindInheritedMetaclass(bases); + } + if (unlikely(!metaclass)) + return NULL; + result = PyObject_CallFunctionObjArgs(metaclass, name, bases, dict, NULL); + Py_DECREF(metaclass); + return result; +} + +/////////////// Py3UpdateBases.proto /////////////// + +static PyObject* __Pyx_PEP560_update_bases(PyObject *bases); /* proto */ + +/////////////// Py3UpdateBases ///////////////////// +//@requires: PyObjectCallOneArg +//@requires: PyObjectGetAttrStrNoError + +/* Shamelessly adapted from cpython/bltinmodule.c update_bases */ +static PyObject* +__Pyx_PEP560_update_bases(PyObject *bases) +{ + Py_ssize_t i, j, size_bases; + PyObject *base, *meth, *new_base, *result, *new_bases = NULL; + /*assert(PyTuple_Check(bases));*/ + + size_bases = PyTuple_GET_SIZE(bases); + for (i = 0; i < size_bases; i++) { + // original code in CPython: base = args[i]; + base = PyTuple_GET_ITEM(bases, i); + if (PyType_Check(base)) { + if (new_bases) { + // If we already have made a replacement, then we append every normal base, + // otherwise just skip it. + if (PyList_Append(new_bases, base) < 0) { + goto error; + } + } + continue; + } + // original code in CPython: + // if (_PyObject_LookupAttrId(base, &PyId___mro_entries__, &meth) < 0) { + meth = __Pyx_PyObject_GetAttrStrNoError(base, PYIDENT("__mro_entries__")); + if (!meth && PyErr_Occurred()) { + goto error; + } + if (!meth) { + if (new_bases) { + if (PyList_Append(new_bases, base) < 0) { + goto error; + } + } + continue; + } + new_base = __Pyx_PyObject_CallOneArg(meth, bases); + Py_DECREF(meth); + if (!new_base) { + goto error; + } + if (!PyTuple_Check(new_base)) { + PyErr_SetString(PyExc_TypeError, + "__mro_entries__ must return a tuple"); + Py_DECREF(new_base); + goto error; + } + if (!new_bases) { + // If this is a first successful replacement, create new_bases list and + // copy previously encountered bases. + if (!(new_bases = PyList_New(i))) { + goto error; + } + for (j = 0; j < i; j++) { + // original code in CPython: base = args[j]; + base = PyTuple_GET_ITEM(bases, j); + PyList_SET_ITEM(new_bases, j, base); + Py_INCREF(base); + } + } + j = PyList_GET_SIZE(new_bases); + if (PyList_SetSlice(new_bases, j, j, new_base) < 0) { + goto error; + } + Py_DECREF(new_base); + } + if (!new_bases) { + // unlike the CPython implementation, always return a new reference + Py_INCREF(bases); + return bases; + } + result = PyList_AsTuple(new_bases); + Py_DECREF(new_bases); + return result; + +error: + Py_XDECREF(new_bases); + return NULL; +} + +/////////////// Py3ClassCreate.proto /////////////// + +static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, + PyObject *mkw, PyObject *modname, PyObject *doc); /*proto*/ +static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, + PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); /*proto*/ + +/////////////// Py3ClassCreate /////////////// +//@substitute: naming +//@requires: PyObjectGetAttrStrNoError +//@requires: CalculateMetaclass +//@requires: PyObjectFastCall +//@requires: PyObjectCall2Args +//@requires: PyObjectLookupSpecial +// only in fallback code: +//@requires: GetBuiltinName + +static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, + PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) { + PyObject *ns; + if (metaclass) { + PyObject *prep = __Pyx_PyObject_GetAttrStrNoError(metaclass, PYIDENT("__prepare__")); + if (prep) { + PyObject *pargs[3] = {NULL, name, bases}; + ns = __Pyx_PyObject_FastCallDict(prep, pargs+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, mkw); + Py_DECREF(prep); + } else { + if (unlikely(PyErr_Occurred())) + return NULL; + ns = PyDict_New(); + } + } else { + ns = PyDict_New(); + } + + if (unlikely(!ns)) + return NULL; + + /* Required here to emulate assignment order */ + if (unlikely(PyObject_SetItem(ns, PYIDENT("__module__"), modname) < 0)) goto bad; +#if PY_VERSION_HEX >= 0x03030000 + if (unlikely(PyObject_SetItem(ns, PYIDENT("__qualname__"), qualname) < 0)) goto bad; +#else + CYTHON_MAYBE_UNUSED_VAR(qualname); +#endif + if (unlikely(doc && PyObject_SetItem(ns, PYIDENT("__doc__"), doc) < 0)) goto bad; + return ns; +bad: + Py_DECREF(ns); + return NULL; +} + +#if PY_VERSION_HEX < 0x030600A4 && CYTHON_PEP487_INIT_SUBCLASS +// https://www.python.org/dev/peps/pep-0487/ +static int __Pyx_SetNamesPEP487(PyObject *type_obj) { + PyTypeObject *type = (PyTypeObject*) type_obj; + PyObject *names_to_set, *key, *value, *set_name, *tmp; + Py_ssize_t i = 0; + +#if CYTHON_USE_TYPE_SLOTS + names_to_set = PyDict_Copy(type->tp_dict); +#else + { + PyObject *d = PyObject_GetAttr(type_obj, PYIDENT("__dict__")); + names_to_set = NULL; + if (likely(d)) { + // d may not be a dict, e.g. PyDictProxy in PyPy2. + PyObject *names_to_set = PyDict_New(); + int ret = likely(names_to_set) ? PyDict_Update(names_to_set, d) : -1; + Py_DECREF(d); + if (unlikely(ret < 0)) + Py_CLEAR(names_to_set); + } + } +#endif + if (unlikely(names_to_set == NULL)) + goto bad; + + while (PyDict_Next(names_to_set, &i, &key, &value)) { + set_name = __Pyx_PyObject_LookupSpecialNoError(value, PYIDENT("__set_name__")); + if (unlikely(set_name != NULL)) { + tmp = __Pyx_PyObject_Call2Args(set_name, type_obj, key); + Py_DECREF(set_name); + if (unlikely(tmp == NULL)) { + __Pyx_TypeName value_type_name = + __Pyx_PyType_GetName(Py_TYPE(value)); + __Pyx_TypeName type_name = __Pyx_PyType_GetName(type); + PyErr_Format(PyExc_RuntimeError, +#if PY_MAJOR_VERSION >= 3 + "Error calling __set_name__ on '" __Pyx_FMT_TYPENAME "' instance %R " "in '" __Pyx_FMT_TYPENAME "'", + value_type_name, key, type_name); +#else + "Error calling __set_name__ on '" __Pyx_FMT_TYPENAME "' instance %.100s in '" __Pyx_FMT_TYPENAME "'", + value_type_name, + PyString_Check(key) ? PyString_AS_STRING(key) : "?", + type_name); +#endif + goto bad; + } else { + Py_DECREF(tmp); + } + } + else if (unlikely(PyErr_Occurred())) { + goto bad; + } + } + + Py_DECREF(names_to_set); + return 0; +bad: + Py_XDECREF(names_to_set); + return -1; +} + +static PyObject *__Pyx_InitSubclassPEP487(PyObject *type_obj, PyObject *mkw) { +#if CYTHON_USE_TYPE_SLOTS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +// Stripped-down version of "super(type_obj, type_obj).__init_subclass__(**mkw)" in CPython 3.8. + PyTypeObject *type = (PyTypeObject*) type_obj; + PyObject *mro = type->tp_mro; + Py_ssize_t i, nbases; + if (unlikely(!mro)) goto done; + + // avoid "unused" warning + (void) &__Pyx_GetBuiltinName; + + Py_INCREF(mro); + nbases = PyTuple_GET_SIZE(mro); + + // Skip over the type itself and 'object'. + assert(PyTuple_GET_ITEM(mro, 0) == type_obj); + for (i = 1; i < nbases-1; i++) { + PyObject *base, *dict, *meth; + base = PyTuple_GET_ITEM(mro, i); + dict = ((PyTypeObject *)base)->tp_dict; + meth = __Pyx_PyDict_GetItemStrWithError(dict, PYIDENT("__init_subclass__")); + if (unlikely(meth)) { + descrgetfunc f = Py_TYPE(meth)->tp_descr_get; + PyObject *res; + Py_INCREF(meth); + if (likely(f)) { + res = f(meth, NULL, type_obj); + Py_DECREF(meth); + if (unlikely(!res)) goto bad; + meth = res; + } + res = __Pyx_PyObject_FastCallDict(meth, NULL, 0, mkw); + Py_DECREF(meth); + if (unlikely(!res)) goto bad; + Py_DECREF(res); + goto done; + } else if (unlikely(PyErr_Occurred())) { + goto bad; + } + } + +done: + Py_XDECREF(mro); + return type_obj; + +bad: + Py_XDECREF(mro); + Py_DECREF(type_obj); + return NULL; + +// CYTHON_USE_TYPE_SLOTS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +#else +// Generic fallback: "super(type_obj, type_obj).__init_subclass__(**mkw)", as used in CPython 3.8. + PyObject *super_type, *super, *func, *res; + +#if CYTHON_COMPILING_IN_PYPY && !defined(PySuper_Type) + super_type = __Pyx_GetBuiltinName(PYIDENT("super")); +#else + super_type = (PyObject*) &PySuper_Type; + // avoid "unused" warning + (void) &__Pyx_GetBuiltinName; +#endif + super = likely(super_type) ? __Pyx_PyObject_Call2Args(super_type, type_obj, type_obj) : NULL; +#if CYTHON_COMPILING_IN_PYPY && !defined(PySuper_Type) + Py_XDECREF(super_type); +#endif + if (unlikely(!super)) { + Py_CLEAR(type_obj); + goto done; + } + func = __Pyx_PyObject_GetAttrStrNoError(super, PYIDENT("__init_subclass__")); + Py_DECREF(super); + if (likely(!func)) { + if (unlikely(PyErr_Occurred())) + Py_CLEAR(type_obj); + goto done; + } + res = __Pyx_PyObject_FastCallDict(func, NULL, 0, mkw); + Py_DECREF(func); + if (unlikely(!res)) + Py_CLEAR(type_obj); + Py_XDECREF(res); +done: + return type_obj; +#endif +} + +// PY_VERSION_HEX < 0x030600A4 && CYTHON_PEP487_INIT_SUBCLASS +#endif + +static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, + PyObject *dict, PyObject *mkw, + int calculate_metaclass, int allow_py2_metaclass) { + PyObject *result; + PyObject *owned_metaclass = NULL; + PyObject *margs[4] = {NULL, name, bases, dict}; + if (allow_py2_metaclass) { + /* honour Python2 __metaclass__ for backward compatibility */ + owned_metaclass = PyObject_GetItem(dict, PYIDENT("__metaclass__")); + if (owned_metaclass) { + metaclass = owned_metaclass; + } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) { + PyErr_Clear(); + } else { + return NULL; + } + } + if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) { + metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); + Py_XDECREF(owned_metaclass); + if (unlikely(!metaclass)) + return NULL; + owned_metaclass = metaclass; + } + result = __Pyx_PyObject_FastCallDict(metaclass, margs+1, 3 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, +#if PY_VERSION_HEX < 0x030600A4 + // Before PEP-487, type(a,b,c) did not accept any keyword arguments, so guard at least against that case. + (metaclass == (PyObject*)&PyType_Type) ? NULL : mkw +#else + mkw +#endif + ); + Py_XDECREF(owned_metaclass); + +#if PY_VERSION_HEX < 0x030600A4 && CYTHON_PEP487_INIT_SUBCLASS + if (likely(result) && likely(PyType_Check(result))) { + if (unlikely(__Pyx_SetNamesPEP487(result) < 0)) { + Py_CLEAR(result); + } else { + result = __Pyx_InitSubclassPEP487(result, mkw); + } + } +#else + // avoid "unused" warning + (void) &__Pyx_GetBuiltinName; +#endif + return result; +} + +/////////////// ExtTypeTest.proto /////////////// + +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ + +/////////////// ExtTypeTest /////////////// + +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + __Pyx_TypeName obj_type_name; + __Pyx_TypeName type_name; + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + type_name = __Pyx_PyType_GetName(type); + PyErr_Format(PyExc_TypeError, + "Cannot convert " __Pyx_FMT_TYPENAME " to " __Pyx_FMT_TYPENAME, + obj_type_name, type_name); + __Pyx_DECREF_TypeName(obj_type_name); + __Pyx_DECREF_TypeName(type_name); + return 0; +} + +/////////////// CallableCheck.proto /////////////// + +#if CYTHON_USE_TYPE_SLOTS && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyCallable_Check(obj) (Py_TYPE(obj)->tp_call != NULL) +#else +#define __Pyx_PyCallable_Check(obj) PyCallable_Check(obj) +#endif + +/////////////// PyDictContains.proto /////////////// + +static CYTHON_INLINE int __Pyx_PyDict_ContainsTF(PyObject* item, PyObject* dict, int eq) { + int result = PyDict_Contains(dict, item); + return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); +} + +/////////////// PySetContains.proto /////////////// + +static CYTHON_INLINE int __Pyx_PySet_ContainsTF(PyObject* key, PyObject* set, int eq); /* proto */ + +/////////////// PySetContains /////////////// +//@requires: Builtins.c::pyfrozenset_new + +static int __Pyx_PySet_ContainsUnhashable(PyObject *set, PyObject *key) { + int result = -1; + if (PySet_Check(key) && PyErr_ExceptionMatches(PyExc_TypeError)) { + /* Convert key to frozenset */ + PyObject *tmpkey; + PyErr_Clear(); + tmpkey = __Pyx_PyFrozenSet_New(key); + if (tmpkey != NULL) { + result = PySet_Contains(set, tmpkey); + Py_DECREF(tmpkey); + } + } + return result; +} + +static CYTHON_INLINE int __Pyx_PySet_ContainsTF(PyObject* key, PyObject* set, int eq) { + int result = PySet_Contains(set, key); + + if (unlikely(result < 0)) { + result = __Pyx_PySet_ContainsUnhashable(set, key); + } + return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); +} + +/////////////// PySequenceContains.proto /////////////// + +static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) { + int result = PySequence_Contains(seq, item); + return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); +} + +/////////////// PyBoolOrNullFromLong.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyBoolOrNull_FromLong(long b) { + return unlikely(b < 0) ? NULL : __Pyx_PyBool_FromLong(b); +} + +/////////////// GetBuiltinName.proto /////////////// + +static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/ + +/////////////// GetBuiltinName /////////////// +//@requires: PyObjectGetAttrStrNoError +//@substitute: naming + +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStrNoError($builtins_cname, name); + if (unlikely(!result) && !PyErr_Occurred()) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/////////////// GetNameInClass.proto /////////////// + +#define __Pyx_GetNameInClass(var, nmspace, name) (var) = __Pyx__GetNameInClass(nmspace, name) +static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name); /*proto*/ + +/////////////// GetNameInClass /////////////// +//@requires: GetModuleGlobalName + +static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name) { + PyObject *result; + PyObject *dict; + assert(PyType_Check(nmspace)); +#if CYTHON_USE_TYPE_SLOTS + dict = ((PyTypeObject*)nmspace)->tp_dict; + Py_XINCREF(dict); +#else + dict = PyObject_GetAttr(nmspace, PYIDENT("__dict__")); +#endif + if (likely(dict)) { + result = PyObject_GetItem(dict, name); + Py_DECREF(dict); + if (result) { + return result; + } + } + PyErr_Clear(); + __Pyx_GetModuleGlobalNameUncached(result, name); + return result; +} + + +/////////////// SetNameInClass.proto /////////////// + +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && PY_VERSION_HEX < 0x030d0000 +// Identifier names are always interned and have a pre-calculated hash value. +#define __Pyx_SetNameInClass(ns, name, value) \ + (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value)) +#elif CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_SetNameInClass(ns, name, value) \ + (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value)) +#else +#define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value) +#endif + +/////////////// SetNewInClass.proto /////////////// + +static int __Pyx_SetNewInClass(PyObject *ns, PyObject *name, PyObject *value); + +/////////////// SetNewInClass /////////////// +//@requires: SetNameInClass + +// Special-case setting __new__: if it's a Cython function, wrap it in a +// staticmethod. This is similar to what Python does for a Python function +// called __new__. +static int __Pyx_SetNewInClass(PyObject *ns, PyObject *name, PyObject *value) { +#ifdef __Pyx_CyFunction_USED + int ret; + if (__Pyx_CyFunction_Check(value)) { + PyObject *staticnew = PyStaticMethod_New(value); + if (unlikely(!staticnew)) return -1; + ret = __Pyx_SetNameInClass(ns, name, staticnew); + Py_DECREF(staticnew); + return ret; + } +#endif + return __Pyx_SetNameInClass(ns, name, value); +} + + +/////////////// GetModuleGlobalName.proto /////////////// +//@requires: PyDictVersioning +//@substitute: naming + +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) do { \ + static PY_UINT64_T __pyx_dict_version = 0; \ + static PyObject *__pyx_dict_cached_value = NULL; \ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION($moddict_cname))) ? \ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) : \ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value); \ +} while(0) +#define __Pyx_GetModuleGlobalNameUncached(var, name) do { \ + PY_UINT64_T __pyx_dict_version; \ + PyObject *__pyx_dict_cached_value; \ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value); \ +} while(0) +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); /*proto*/ +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); /*proto*/ +#endif + + +/////////////// GetModuleGlobalName /////////////// +//@requires: GetBuiltinName +//@substitute: naming + +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +// FIXME: clean up the macro guard order here: limited API first, then borrowed refs, then cpython +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && PY_VERSION_HEX < 0x030d0000 + // Identifier names are always interned and have a pre-calculated hash value. + result = _PyDict_GetItem_KnownHash($moddict_cname, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#elif CYTHON_COMPILING_IN_LIMITED_API + if (unlikely(!$module_cname)) { + return NULL; + } + result = PyObject_GetAttr($module_cname, name); + if (likely(result)) { + return result; + } +#else + result = PyDict_GetItem($moddict_cname, name); + __PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem($moddict_cname, name); + __PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +//////////////////// GetAttr.proto //////////////////// + +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /*proto*/ + +//////////////////// GetAttr //////////////////// +//@requires: PyObjectGetAttrStr + +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { +#if CYTHON_USE_TYPE_SLOTS +#if PY_MAJOR_VERSION >= 3 + if (likely(PyUnicode_Check(n))) +#else + if (likely(PyString_Check(n))) +#endif + return __Pyx_PyObject_GetAttrStr(o, n); +#endif + return PyObject_GetAttr(o, n); +} + + +/////////////// PyObjectLookupSpecial.proto /////////////// + +#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS +#define __Pyx_PyObject_LookupSpecialNoError(obj, attr_name) __Pyx__PyObject_LookupSpecial(obj, attr_name, 0) +#define __Pyx_PyObject_LookupSpecial(obj, attr_name) __Pyx__PyObject_LookupSpecial(obj, attr_name, 1) + +static CYTHON_INLINE PyObject* __Pyx__PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name, int with_error); /*proto*/ + +#else +#define __Pyx_PyObject_LookupSpecialNoError(o,n) __Pyx_PyObject_GetAttrStrNoError(o,n) +#define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n) +#endif + +/////////////// PyObjectLookupSpecial /////////////// +//@requires: PyObjectGetAttrStr +//@requires: PyObjectGetAttrStrNoError + +#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx__PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name, int with_error) { + PyObject *res; + PyTypeObject *tp = Py_TYPE(obj); +#if PY_MAJOR_VERSION < 3 + if (unlikely(PyInstance_Check(obj))) + return with_error ? __Pyx_PyObject_GetAttrStr(obj, attr_name) : __Pyx_PyObject_GetAttrStrNoError(obj, attr_name); +#endif + // adapted from CPython's special_lookup() in ceval.c + res = _PyType_Lookup(tp, attr_name); + if (likely(res)) { + descrgetfunc f = Py_TYPE(res)->tp_descr_get; + if (!f) { + Py_INCREF(res); + } else { + res = f(res, obj, (PyObject *)tp); + } + } else if (with_error) { + PyErr_SetObject(PyExc_AttributeError, attr_name); + } + return res; +} +#endif + + +/////////////// PyObject_GenericGetAttrNoDict.proto /////////////// + +// Setting "tp_getattro" to anything but "PyObject_GenericGetAttr" disables fast method calls in Py3.7. +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); +#else +// No-args macro to allow function pointer assignment. +#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr +#endif + +/////////////// PyObject_GenericGetAttrNoDict /////////////// + +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 + +static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { + __Pyx_TypeName type_name = __Pyx_PyType_GetName(tp); + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'", + type_name, attr_name); +#else + "'" __Pyx_FMT_TYPENAME "' object has no attribute '%.400s'", + type_name, PyString_AS_STRING(attr_name)); +#endif + __Pyx_DECREF_TypeName(type_name); + return NULL; +} + +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { + // Copied and adapted from _PyObject_GenericGetAttrWithDict() in CPython 3.6/3.7. + // To be used in the "tp_getattro" slot of extension types that have no instance dict and cannot be subclassed. + PyObject *descr; + PyTypeObject *tp = Py_TYPE(obj); + + if (unlikely(!PyString_Check(attr_name))) { + return PyObject_GenericGetAttr(obj, attr_name); + } + + assert(!tp->tp_dictoffset); + descr = _PyType_Lookup(tp, attr_name); + if (unlikely(!descr)) { + return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); + } + + Py_INCREF(descr); + + #if PY_MAJOR_VERSION < 3 + if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) + #endif + { + descrgetfunc f = Py_TYPE(descr)->tp_descr_get; + // Optimise for the non-descriptor case because it is faster. + if (unlikely(f)) { + PyObject *res = f(descr, obj, (PyObject *)tp); + Py_DECREF(descr); + return res; + } + } + return descr; +} +#endif + + +/////////////// PyObject_GenericGetAttr.proto /////////////// + +// Setting "tp_getattro" to anything but "PyObject_GenericGetAttr" disables fast method calls in Py3.7. +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); +#else +// No-args macro to allow function pointer assignment. +#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr +#endif + +/////////////// PyObject_GenericGetAttr /////////////// +//@requires: PyObject_GenericGetAttrNoDict + +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { + if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { + return PyObject_GenericGetAttr(obj, attr_name); + } + return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); +} +#endif + + +/////////////// PyObjectGetAttrStrNoError.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);/*proto*/ + +/////////////// PyObjectGetAttrStrNoError /////////////// +//@requires: PyObjectGetAttrStr +//@requires: Exceptions.c::PyThreadStateGet +//@requires: Exceptions.c::PyErrFetchRestore +//@requires: Exceptions.c::PyErrExceptionMatches + +#if __PYX_LIMITED_VERSION_HEX < 0x030d00A1 +static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + __Pyx_PyErr_Clear(); +} +#endif + +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { + PyObject *result; +#if __PYX_LIMITED_VERSION_HEX >= 0x030d00A1 + (void) PyObject_GetOptionalAttr(obj, attr_name, &result); + return result; +#else +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 + // _PyObject_GenericGetAttrWithDict() in CPython 3.7+ can avoid raising the AttributeError. + // See https://bugs.python.org/issue32544 + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { + return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); + } +#endif + result = __Pyx_PyObject_GetAttrStr(obj, attr_name); + if (unlikely(!result)) { + __Pyx_PyObject_GetAttrStr_ClearAttributeError(); + } + return result; +#endif +} + + +/////////////// PyObjectGetAttrStr.proto /////////////// + +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);/*proto*/ +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/////////////// PyObjectGetAttrStr /////////////// + +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + + +/////////////// PyObjectSetAttrStr.proto /////////////// + +#if CYTHON_USE_TYPE_SLOTS +#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) +static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value);/*proto*/ +#else +#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) +#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) +#endif + +/////////////// PyObjectSetAttrStr /////////////// + +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_setattro)) + return tp->tp_setattro(obj, attr_name, value); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_setattr)) + return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); +#endif + return PyObject_SetAttr(obj, attr_name, value); +} +#endif + + +/////////////// PyObjectGetMethod.proto /////////////// + +static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);/*proto*/ + +/////////////// PyObjectGetMethod /////////////// +//@requires: PyObjectGetAttrStr + +static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { + PyObject *attr; +#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP + __Pyx_TypeName type_name; + // Copied from _PyObject_GetMethod() in CPython 3.7 + PyTypeObject *tp = Py_TYPE(obj); + PyObject *descr; + descrgetfunc f = NULL; + PyObject **dictptr, *dict; + int meth_found = 0; + + assert (*method == NULL); + + if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { + attr = __Pyx_PyObject_GetAttrStr(obj, name); + goto try_unpack; + } + if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { + return 0; + } + + descr = _PyType_Lookup(tp, name); + if (likely(descr != NULL)) { + Py_INCREF(descr); +#if defined(Py_TPFLAGS_METHOD_DESCRIPTOR) && Py_TPFLAGS_METHOD_DESCRIPTOR + if (__Pyx_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)) +#elif PY_MAJOR_VERSION >= 3 + // Repeating the condition below accommodates for MSVC's inability to test macros inside of macro expansions. + #ifdef __Pyx_CyFunction_USED + if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) + #else + if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type))) + #endif +#else + // "PyMethodDescr_Type" is not part of the C-API in Py2. + #ifdef __Pyx_CyFunction_USED + if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr))) + #else + if (likely(PyFunction_Check(descr))) + #endif +#endif + { + meth_found = 1; + } else { + f = Py_TYPE(descr)->tp_descr_get; + if (f != NULL && PyDescr_IsData(descr)) { + attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); + Py_DECREF(descr); + goto try_unpack; + } + } + } + + dictptr = _PyObject_GetDictPtr(obj); + if (dictptr != NULL && (dict = *dictptr) != NULL) { + Py_INCREF(dict); + attr = __Pyx_PyDict_GetItemStr(dict, name); + if (attr != NULL) { + Py_INCREF(attr); + Py_DECREF(dict); + Py_XDECREF(descr); + goto try_unpack; + } + Py_DECREF(dict); + } + + if (meth_found) { + *method = descr; + return 1; + } + + if (f != NULL) { + attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); + Py_DECREF(descr); + goto try_unpack; + } + + if (likely(descr != NULL)) { + *method = descr; + return 0; + } + + type_name = __Pyx_PyType_GetName(tp); + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'", + type_name, name); +#else + "'" __Pyx_FMT_TYPENAME "' object has no attribute '%.400s'", + type_name, PyString_AS_STRING(name)); +#endif + __Pyx_DECREF_TypeName(type_name); + return 0; + +// Generic fallback implementation using normal attribute lookup. +#else + attr = __Pyx_PyObject_GetAttrStr(obj, name); + goto try_unpack; +#endif + +try_unpack: +#if CYTHON_UNPACK_METHODS + // Even if we failed to avoid creating a bound method object, it's still worth unpacking it now, if possible. + if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { + PyObject *function = PyMethod_GET_FUNCTION(attr); + Py_INCREF(function); + Py_DECREF(attr); + *method = function; + return 1; + } +#endif + *method = attr; + return 0; +} + + +/////////////// UnpackUnboundCMethod.proto /////////////// + +typedef struct { + PyObject *type; + PyObject **method_name; + // "func" is set on first access (direct C function pointer) + PyCFunction func; + // "method" is set on first access (fallback) + PyObject *method; + int flag; +} __Pyx_CachedCFunction; + +/////////////// UnpackUnboundCMethod /////////////// +//@requires: PyObjectGetAttrStr + +static PyObject *__Pyx_SelflessCall(PyObject *method, PyObject *args, PyObject *kwargs) { + // NOTE: possible optimization - use vectorcall + PyObject *result; + PyObject *selfless_args = PyTuple_GetSlice(args, 1, PyTuple_Size(args)); + if (unlikely(!selfless_args)) return NULL; + + result = PyObject_Call(method, selfless_args, kwargs); + Py_DECREF(selfless_args); + return result; +} + +static PyMethodDef __Pyx_UnboundCMethod_Def = { + /* .ml_name = */ "CythonUnboundCMethod", + /* .ml_meth = */ __PYX_REINTERPRET_FUNCION(PyCFunction, __Pyx_SelflessCall), + /* .ml_flags = */ METH_VARARGS | METH_KEYWORDS, + /* .ml_doc = */ NULL +}; + +static int __Pyx_TryUnpackUnboundCMethod(__Pyx_CachedCFunction* target) { + PyObject *method; + method = __Pyx_PyObject_GetAttrStr(target->type, *target->method_name); + if (unlikely(!method)) + return -1; + target->method = method; +// FIXME: use functionality from CythonFunction.c/ClassMethod +#if CYTHON_COMPILING_IN_CPYTHON + #if PY_MAJOR_VERSION >= 3 + if (likely(__Pyx_TypeCheck(method, &PyMethodDescr_Type))) + #else + // method descriptor type isn't exported in Py2.x, cannot easily check the type there. + // Therefore, reverse the check to the most likely alternative + // (which is returned for class methods) + if (likely(!__Pyx_CyOrPyCFunction_Check(method))) + #endif + { + PyMethodDescrObject *descr = (PyMethodDescrObject*) method; + target->func = descr->d_method->ml_meth; + target->flag = descr->d_method->ml_flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_STACKLESS); + } else +#endif + // bound classmethods need special treatment +#if CYTHON_COMPILING_IN_PYPY + // In PyPy, functions are regular methods, so just do the self check. +#else + if (PyCFunction_Check(method)) +#endif + { + PyObject *self; + int self_found; +#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY + self = PyObject_GetAttrString(method, "__self__"); + if (!self) { + PyErr_Clear(); + } +#else + self = PyCFunction_GET_SELF(method); +#endif + self_found = (self && self != Py_None); +#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY + Py_XDECREF(self); +#endif + if (self_found) { + PyObject *unbound_method = PyCFunction_New(&__Pyx_UnboundCMethod_Def, method); + if (unlikely(!unbound_method)) return -1; + // New PyCFunction will own method reference, thus decref __Pyx_PyObject_GetAttrStr + Py_DECREF(method); + target->method = unbound_method; + } + } + return 0; +} + + +/////////////// CallUnboundCMethod0.proto /////////////// +//@substitute: naming + +static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); /*proto*/ +#if CYTHON_COMPILING_IN_CPYTHON +// FASTCALL methods receive "&empty_tuple" as simple "PyObject[0]*" +#define __Pyx_CallUnboundCMethod0(cfunc, self) \ + (likely((cfunc)->func) ? \ + (likely((cfunc)->flag == METH_NOARGS) ? (*((cfunc)->func))(self, NULL) : \ + (PY_VERSION_HEX >= 0x030600B1 && likely((cfunc)->flag == METH_FASTCALL) ? \ + (PY_VERSION_HEX >= 0x030700A0 ? \ + (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0) : \ + (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0, NULL)) : \ + (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ? \ + (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0, NULL) : \ + (likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, $empty_tuple, NULL)) : \ + ((cfunc)->flag == METH_VARARGS ? (*((cfunc)->func))(self, $empty_tuple) : \ + __Pyx__CallUnboundCMethod0(cfunc, self)))))) : \ + __Pyx__CallUnboundCMethod0(cfunc, self)) +#else +#define __Pyx_CallUnboundCMethod0(cfunc, self) __Pyx__CallUnboundCMethod0(cfunc, self) +#endif + +/////////////// CallUnboundCMethod0 /////////////// +//@requires: UnpackUnboundCMethod +//@requires: PyObjectCall + +static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) { + PyObject *args, *result = NULL; + if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; +#if CYTHON_ASSUME_SAFE_MACROS + args = PyTuple_New(1); + if (unlikely(!args)) goto bad; + Py_INCREF(self); + PyTuple_SET_ITEM(args, 0, self); +#else + args = PyTuple_Pack(1, self); + if (unlikely(!args)) goto bad; +#endif + result = __Pyx_PyObject_Call(cfunc->method, args, NULL); + Py_DECREF(args); +bad: + return result; +} + + +/////////////// CallUnboundCMethod1.proto /////////////// + +static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);/*proto*/ + +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);/*proto*/ +#else +#define __Pyx_CallUnboundCMethod1(cfunc, self, arg) __Pyx__CallUnboundCMethod1(cfunc, self, arg) +#endif + +/////////////// CallUnboundCMethod1 /////////////// +//@requires: UnpackUnboundCMethod +//@requires: PyObjectCall + +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg) { + if (likely(cfunc->func)) { + int flag = cfunc->flag; + // Not using #ifdefs for PY_VERSION_HEX to avoid C compiler warnings about unused functions. + if (flag == METH_O) { + return (*(cfunc->func))(self, arg); + } else if ((PY_VERSION_HEX >= 0x030600B1) && flag == METH_FASTCALL) { + #if PY_VERSION_HEX >= 0x030700A0 + return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, &arg, 1); + #else + return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL); + #endif + } else if ((PY_VERSION_HEX >= 0x030700A0) && flag == (METH_FASTCALL | METH_KEYWORDS)) { + return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL); + } + } + return __Pyx__CallUnboundCMethod1(cfunc, self, arg); +} +#endif + +static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg){ + PyObject *args, *result = NULL; + if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; +#if CYTHON_COMPILING_IN_CPYTHON + if (cfunc->func && (cfunc->flag & METH_VARARGS)) { + args = PyTuple_New(1); + if (unlikely(!args)) goto bad; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + if (cfunc->flag & METH_KEYWORDS) + result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL); + else + result = (*cfunc->func)(self, args); + } else { + args = PyTuple_New(2); + if (unlikely(!args)) goto bad; + Py_INCREF(self); + PyTuple_SET_ITEM(args, 0, self); + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 1, arg); + result = __Pyx_PyObject_Call(cfunc->method, args, NULL); + } +#else + args = PyTuple_Pack(2, self, arg); + if (unlikely(!args)) goto bad; + result = __Pyx_PyObject_Call(cfunc->method, args, NULL); +#endif +bad: + Py_XDECREF(args); + return result; +} + + +/////////////// CallUnboundCMethod2.proto /////////////// + +static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2); /*proto*/ + +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030600B1 +static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2); /*proto*/ +#else +#define __Pyx_CallUnboundCMethod2(cfunc, self, arg1, arg2) __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2) +#endif + +/////////////// CallUnboundCMethod2 /////////////// +//@requires: UnpackUnboundCMethod +//@requires: PyObjectCall + +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030600B1 +static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2) { + if (likely(cfunc->func)) { + PyObject *args[2] = {arg1, arg2}; + if (cfunc->flag == METH_FASTCALL) { + #if PY_VERSION_HEX >= 0x030700A0 + return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, args, 2); + #else + return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, 2, NULL); + #endif + } + #if PY_VERSION_HEX >= 0x030700A0 + if (cfunc->flag == (METH_FASTCALL | METH_KEYWORDS)) + return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, 2, NULL); + #endif + } + return __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2); +} +#endif + +static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2){ + PyObject *args, *result = NULL; + if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; +#if CYTHON_COMPILING_IN_CPYTHON + if (cfunc->func && (cfunc->flag & METH_VARARGS)) { + args = PyTuple_New(2); + if (unlikely(!args)) goto bad; + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 0, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 1, arg2); + if (cfunc->flag & METH_KEYWORDS) + result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL); + else + result = (*cfunc->func)(self, args); + } else { + args = PyTuple_New(3); + if (unlikely(!args)) goto bad; + Py_INCREF(self); + PyTuple_SET_ITEM(args, 0, self); + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 1, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 2, arg2); + result = __Pyx_PyObject_Call(cfunc->method, args, NULL); + } +#else + args = PyTuple_Pack(3, self, arg1, arg2); + if (unlikely(!args)) goto bad; + result = __Pyx_PyObject_Call(cfunc->method, args, NULL); +#endif +bad: + Py_XDECREF(args); + return result; +} + + +/////////////// PyObjectFastCall.proto /////////////// + +#define __Pyx_PyObject_FastCall(func, args, nargs) __Pyx_PyObject_FastCallDict(func, args, (size_t)(nargs), NULL) +static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs); /*proto*/ + +/////////////// PyObjectFastCall /////////////// +//@requires: PyObjectCall +//@requires: PyFunctionFastCall +//@requires: PyObjectCallMethO +//@substitute: naming + +#if PY_VERSION_HEX < 0x03090000 || CYTHON_COMPILING_IN_LIMITED_API +static PyObject* __Pyx_PyObject_FastCall_fallback(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs) { + PyObject *argstuple; + PyObject *result = 0; + size_t i; + + argstuple = PyTuple_New((Py_ssize_t)nargs); + if (unlikely(!argstuple)) return NULL; + for (i = 0; i < nargs; i++) { + Py_INCREF(args[i]); + if (__Pyx_PyTuple_SET_ITEM(argstuple, (Py_ssize_t)i, args[i]) < 0) goto bad; + } + result = __Pyx_PyObject_Call(func, argstuple, kwargs); + bad: + Py_DECREF(argstuple); + return result; +} +#endif + +static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t _nargs, PyObject *kwargs) { + // Special fast paths for 0 and 1 arguments + // NOTE: in many cases, this is called with a constant value for nargs + // which is known at compile-time. So the branches below will typically + // be optimized away. + Py_ssize_t nargs = __Pyx_PyVectorcall_NARGS(_nargs); +#if CYTHON_COMPILING_IN_CPYTHON + if (nargs == 0 && kwargs == NULL) { + if (__Pyx_CyOrPyCFunction_Check(func) && likely( __Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_NOARGS)) + return __Pyx_PyObject_CallMethO(func, NULL); + } + else if (nargs == 1 && kwargs == NULL) { + if (__Pyx_CyOrPyCFunction_Check(func) && likely( __Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_O)) + return __Pyx_PyObject_CallMethO(func, args[0]); + } +#endif + + #if PY_VERSION_HEX < 0x030800B1 + #if CYTHON_FAST_PYCCALL + if (PyCFunction_Check(func)) { + if (kwargs) { + return _PyCFunction_FastCallDict(func, args, nargs, kwargs); + } else { + return _PyCFunction_FastCallKeywords(func, args, nargs, NULL); + } + } + #if PY_VERSION_HEX >= 0x030700A1 + if (!kwargs && __Pyx_IS_TYPE(func, &PyMethodDescr_Type)) { + return _PyMethodDescr_FastCallKeywords(func, args, nargs, NULL); + } + #endif + #endif + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs); + } + #endif + #endif + + if (kwargs == NULL) { + #if CYTHON_VECTORCALL + #if PY_VERSION_HEX < 0x03090000 + vectorcallfunc f = _PyVectorcall_Function(func); + #else + vectorcallfunc f = PyVectorcall_Function(func); + #endif + if (f) { + return f(func, args, (size_t)nargs, NULL); + } + #elif defined(__Pyx_CyFunction_USED) && CYTHON_BACKPORT_VECTORCALL + // exclude fused functions for now + if (__Pyx_CyFunction_CheckExact(func)) { + __pyx_vectorcallfunc f = __Pyx_CyFunction_func_vectorcall(func); + if (f) return f(func, args, (size_t)nargs, NULL); + } + #endif + } + + if (nargs == 0) { + return __Pyx_PyObject_Call(func, $empty_tuple, kwargs); + } + #if PY_VERSION_HEX >= 0x03090000 && !CYTHON_COMPILING_IN_LIMITED_API + return PyObject_VectorcallDict(func, args, (size_t)nargs, kwargs); + #else + return __Pyx_PyObject_FastCall_fallback(func, args, (size_t)nargs, kwargs); + #endif +} + + +/////////////// PyObjectCallMethod0.proto /////////////// + +static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); /*proto*/ + +/////////////// PyObjectCallMethod0 /////////////// +//@requires: PyObjectGetMethod +//@requires: PyObjectCallOneArg +//@requires: PyObjectCallNoArg + +static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { + PyObject *method = NULL, *result = NULL; + int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); + if (likely(is_method)) { + result = __Pyx_PyObject_CallOneArg(method, obj); + Py_DECREF(method); + return result; + } + if (unlikely(!method)) goto bad; + result = __Pyx_PyObject_CallNoArg(method); + Py_DECREF(method); +bad: + return result; +} + + +/////////////// PyObjectCallMethod1.proto /////////////// + +static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg); /*proto*/ + +/////////////// PyObjectCallMethod1 /////////////// +//@requires: PyObjectGetMethod +//@requires: PyObjectCallOneArg +//@requires: PyObjectCall2Args + +#if !(CYTHON_VECTORCALL && __PYX_LIMITED_VERSION_HEX >= 0x030C00A2) +static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) { + // Separate function to avoid excessive inlining. + PyObject *result = __Pyx_PyObject_CallOneArg(method, arg); + Py_DECREF(method); + return result; +} +#endif + +static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) { +#if CYTHON_VECTORCALL && __PYX_LIMITED_VERSION_HEX >= 0x030C00A2 + PyObject *args[2] = {obj, arg}; + // avoid unused functions + (void) __Pyx_PyObject_GetMethod; + (void) __Pyx_PyObject_CallOneArg; + (void) __Pyx_PyObject_Call2Args; + return PyObject_VectorcallMethod(method_name, args, 2 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL); +#else + PyObject *method = NULL, *result; + int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); + if (likely(is_method)) { + result = __Pyx_PyObject_Call2Args(method, obj, arg); + Py_DECREF(method); + return result; + } + if (unlikely(!method)) return NULL; + return __Pyx__PyObject_CallMethod1(method, arg); +#endif +} + + +/////////////// tp_new.proto /////////////// + +#define __Pyx_tp_new(type_obj, args) __Pyx_tp_new_kwargs(type_obj, args, NULL) +static CYTHON_INLINE PyObject* __Pyx_tp_new_kwargs(PyObject* type_obj, PyObject* args, PyObject* kwargs) { + return (PyObject*) (((PyTypeObject*)type_obj)->tp_new((PyTypeObject*)type_obj, args, kwargs)); +} + + +/////////////// PyObjectCall.proto /////////////// + +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/ +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/////////////// PyObjectCall /////////////// + +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = Py_TYPE(func)->tp_call; + + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + #if PY_MAJOR_VERSION < 3 + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + #else + if (unlikely(Py_EnterRecursiveCall(" while calling a Python object"))) + return NULL; + #endif + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + + +/////////////// PyObjectCallMethO.proto /////////////// + +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); /*proto*/ +#endif + +/////////////// PyObjectCallMethO /////////////// + +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = __Pyx_CyOrPyCFunction_GET_FUNCTION(func); + self = __Pyx_CyOrPyCFunction_GET_SELF(func); + + #if PY_MAJOR_VERSION < 3 + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + #else + if (unlikely(Py_EnterRecursiveCall(" while calling a Python object"))) + return NULL; + #endif + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + + +/////////////// PyFunctionFastCall.proto /////////////// + +#if CYTHON_FAST_PYCALL + +#if !CYTHON_VECTORCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs) \ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) + +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#endif + +// Backport from Python 3 +// Assert a build-time dependency, as an expression. +// Your compile will fail if the condition isn't true, or can't be evaluated +// by the compiler. This can be used in an expression: its value is 0. +// Example: +// #define foo_to_char(foo) \ +// ((char *)(foo) \ +// + Py_BUILD_ASSERT_EXPR(offsetof(struct foo, string) == 0)) +// +// Written by Rusty Russell, public domain, https://ccodearchive.net/ +#define __Pyx_BUILD_ASSERT_EXPR(cond) \ + (sizeof(char [1 - 2*!(cond)]) - 1) + +#ifndef Py_MEMBER_SIZE +// Get the size of a structure member in bytes +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif + +#if !CYTHON_VECTORCALL +#if PY_VERSION_HEX >= 0x03080000 + #include "frameobject.h" +#if PY_VERSION_HEX >= 0x030b00a6 && !CYTHON_COMPILING_IN_LIMITED_API + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif + #define __Pxy_PyFrame_Initialize_Offsets() + #define __Pyx_PyFrame_GetLocalsplus(frame) ((frame)->f_localsplus) +#else + // Initialised by module init code. + static size_t __pyx_pyframe_localsplus_offset = 0; + + #include "frameobject.h" + // This is the long runtime version of + // #define __Pyx_PyFrame_GetLocalsplus(frame) ((frame)->f_localsplus) + // offsetof(PyFrameObject, f_localsplus) differs between regular C-Python and Stackless Python < 3.8. + // Therefore the offset is computed at run time from PyFrame_type.tp_basicsize. That is feasible, + // because f_localsplus is the last field of PyFrameObject (checked by Py_BUILD_ASSERT_EXPR below). + #define __Pxy_PyFrame_Initialize_Offsets() \ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)), \ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame) \ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif +#endif /* !CYTHON_VECTORCALL */ + +#endif /* CYTHON_FAST_PYCALL */ + + +/////////////// PyFunctionFastCall /////////////// +// copied from CPython 3.6 ceval.c + +#if CYTHON_FAST_PYCALL && !CYTHON_VECTORCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + + return result; +} + + +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; + //#if PY_VERSION_HEX >= 0x03050000 + //PyObject *name, *qualname; + //#endif +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + + #if PY_MAJOR_VERSION < 3 + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) { + return NULL; + } + #else + if (unlikely(Py_EnterRecursiveCall(" while calling a Python object"))) { + return NULL; + } + #endif + + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + /* Fast paths */ + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); + //#if PY_VERSION_HEX >= 0x03050000 + //name = ((PyFunctionObject *)func) -> func_name; + //qualname = ((PyFunctionObject *)func) -> func_qualname; + //#endif +#endif + + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } + + //#if PY_VERSION_HEX >= 0x03050000 + //return _PyEval_EvalCodeWithName((PyObject*)co, globals, (PyObject *)NULL, + // args, nargs, + // NULL, 0, + // d, nd, kwdefs, + // closure, name, qualname); + //#elif PY_MAJOR_VERSION >= 3 +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); + +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif /* CYTHON_FAST_PYCALL && !CYTHON_VECTORCALL */ + + +/////////////// PyObjectCall2Args.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /*proto*/ + +/////////////// PyObjectCall2Args /////////////// +//@requires: PyObjectFastCall + +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { + PyObject *args[3] = {NULL, arg1, arg2}; + return __Pyx_PyObject_FastCall(function, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); +} + + +/////////////// PyObjectCallOneArg.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /*proto*/ + +/////////////// PyObjectCallOneArg /////////////// +//@requires: PyObjectFastCall + +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *args[2] = {NULL, arg}; + return __Pyx_PyObject_FastCall(func, args+1, 1 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); +} + + +/////////////// PyObjectCallNoArg.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); /*proto*/ + +/////////////// PyObjectCallNoArg /////////////// +//@requires: PyObjectFastCall + +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { + PyObject *arg[2] = {NULL, NULL}; + return __Pyx_PyObject_FastCall(func, arg + 1, 0 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); +} + + +/////////////// PyVectorcallFastCallDict.proto /////////////// + +#if CYTHON_METH_FASTCALL +static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw); +#endif + +/////////////// PyVectorcallFastCallDict /////////////// + +#if CYTHON_METH_FASTCALL +// Slow path when kw is non-empty +static PyObject *__Pyx_PyVectorcall_FastCallDict_kw(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) +{ + // Code based on _PyObject_FastCallDict() and _PyStack_UnpackDict() from CPython + PyObject *res = NULL; + PyObject *kwnames; + PyObject **newargs; + PyObject **kwvalues; + Py_ssize_t i, pos; + size_t j; + PyObject *key, *value; + unsigned long keys_are_strings; + Py_ssize_t nkw = PyDict_GET_SIZE(kw); + + // Copy positional arguments + newargs = (PyObject **)PyMem_Malloc((nargs + (size_t)nkw) * sizeof(args[0])); + if (unlikely(newargs == NULL)) { + PyErr_NoMemory(); + return NULL; + } + for (j = 0; j < nargs; j++) newargs[j] = args[j]; + + // Copy keyword arguments + kwnames = PyTuple_New(nkw); + if (unlikely(kwnames == NULL)) { + PyMem_Free(newargs); + return NULL; + } + kwvalues = newargs + nargs; + pos = i = 0; + keys_are_strings = Py_TPFLAGS_UNICODE_SUBCLASS; + while (PyDict_Next(kw, &pos, &key, &value)) { + keys_are_strings &= Py_TYPE(key)->tp_flags; + Py_INCREF(key); + Py_INCREF(value); + PyTuple_SET_ITEM(kwnames, i, key); + kwvalues[i] = value; + i++; + } + if (unlikely(!keys_are_strings)) { + PyErr_SetString(PyExc_TypeError, "keywords must be strings"); + goto cleanup; + } + + // The actual call + res = vc(func, newargs, nargs, kwnames); + +cleanup: + Py_DECREF(kwnames); + for (i = 0; i < nkw; i++) + Py_DECREF(kwvalues[i]); + PyMem_Free(newargs); + return res; +} + +static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) +{ + if (likely(kw == NULL) || PyDict_GET_SIZE(kw) == 0) { + return vc(func, args, nargs, NULL); + } + return __Pyx_PyVectorcall_FastCallDict_kw(func, vc, args, nargs, kw); +} +#endif + + +/////////////// MatrixMultiply.proto /////////////// + +#if PY_VERSION_HEX >= 0x03050000 + #define __Pyx_PyNumber_MatrixMultiply(x,y) PyNumber_MatrixMultiply(x,y) + #define __Pyx_PyNumber_InPlaceMatrixMultiply(x,y) PyNumber_InPlaceMatrixMultiply(x,y) +#else +#define __Pyx_PyNumber_MatrixMultiply(x,y) __Pyx__PyNumber_MatrixMultiply(x, y, "@") +static PyObject* __Pyx__PyNumber_MatrixMultiply(PyObject* x, PyObject* y, const char* op_name); +static PyObject* __Pyx_PyNumber_InPlaceMatrixMultiply(PyObject* x, PyObject* y); +#endif + +/////////////// MatrixMultiply /////////////// +//@requires: PyObjectGetAttrStrNoError +//@requires: PyObjectCallOneArg +//@requires: PyObjectCall2Args + +#if PY_VERSION_HEX < 0x03050000 +static PyObject* __Pyx_PyObject_CallMatrixMethod(PyObject* method, PyObject* arg) { + // NOTE: eats the method reference + PyObject *result = NULL; +#if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(method))) { + PyObject *self = PyMethod_GET_SELF(method); + if (likely(self)) { + PyObject *function = PyMethod_GET_FUNCTION(method); + result = __Pyx_PyObject_Call2Args(function, self, arg); + goto done; + } + } +#endif + result = __Pyx_PyObject_CallOneArg(method, arg); +done: + Py_DECREF(method); + return result; +} + +#define __Pyx_TryMatrixMethod(x, y, py_method_name) { \ + PyObject *func = __Pyx_PyObject_GetAttrStrNoError(x, py_method_name); \ + if (func) { \ + PyObject *result = __Pyx_PyObject_CallMatrixMethod(func, y); \ + if (result != Py_NotImplemented) \ + return result; \ + Py_DECREF(result); \ + } else if (unlikely(PyErr_Occurred())) { \ + return NULL; \ + } \ +} + +static PyObject* __Pyx__PyNumber_MatrixMultiply(PyObject* x, PyObject* y, const char* op_name) { + __Pyx_TypeName x_type_name; + __Pyx_TypeName y_type_name; + int right_is_subtype = PyObject_IsSubclass((PyObject*)Py_TYPE(y), (PyObject*)Py_TYPE(x)); + if (unlikely(right_is_subtype == -1)) + return NULL; + if (right_is_subtype) { + // to allow subtypes to override parent behaviour, try reversed operation first + // see note at https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types + __Pyx_TryMatrixMethod(y, x, PYIDENT("__rmatmul__")) + } + __Pyx_TryMatrixMethod(x, y, PYIDENT("__matmul__")) + if (!right_is_subtype) { + __Pyx_TryMatrixMethod(y, x, PYIDENT("__rmatmul__")) + } + x_type_name = __Pyx_PyType_GetName(Py_TYPE(x)); + y_type_name = __Pyx_PyType_GetName(Py_TYPE(y)); + PyErr_Format(PyExc_TypeError, + "unsupported operand type(s) for %.2s: '" __Pyx_FMT_TYPENAME "' and '" + __Pyx_FMT_TYPENAME "'", op_name, x_type_name, y_type_name); + __Pyx_DECREF_TypeName(x_type_name); + __Pyx_DECREF_TypeName(y_type_name); + return NULL; +} + +static PyObject* __Pyx_PyNumber_InPlaceMatrixMultiply(PyObject* x, PyObject* y) { + __Pyx_TryMatrixMethod(x, y, PYIDENT("__imatmul__")) + return __Pyx__PyNumber_MatrixMultiply(x, y, "@="); +} + +#undef __Pyx_TryMatrixMethod +#endif + + +/////////////// PyDictVersioning.proto /////////////// + +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) \ + (version_var) = __PYX_GET_DICT_VERSION(dict); \ + (cache_var) = (value); + +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) { \ + static PY_UINT64_T __pyx_dict_version = 0; \ + static PyObject *__pyx_dict_cached_value = NULL; \ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) { \ + (VAR) = __pyx_dict_cached_value; \ + } else { \ + (VAR) = __pyx_dict_cached_value = (LOOKUP); \ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT); \ + } \ +} + +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); /*proto*/ +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); /*proto*/ +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); /*proto*/ + +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/////////////// PyDictVersioning /////////////// + +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} + +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} + +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + + +/////////////// PyMethodNew.proto /////////////// + +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) { + PyObject *typesModule=NULL, *methodType=NULL, *result=NULL; + CYTHON_UNUSED_VAR(typ); + if (!self) + return __Pyx_NewRef(func); + typesModule = PyImport_ImportModule("types"); + if (!typesModule) return NULL; + methodType = PyObject_GetAttrString(typesModule, "MethodType"); + Py_DECREF(typesModule); + if (!methodType) return NULL; + result = PyObject_CallFunctionObjArgs(methodType, func, self, NULL); + Py_DECREF(methodType); + return result; +} +#elif PY_MAJOR_VERSION >= 3 +// This should be an actual function (not a macro), such that we can put it +// directly in a tp_descr_get slot. +static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) { + CYTHON_UNUSED_VAR(typ); + if (!self) + return __Pyx_NewRef(func); + return PyMethod_New(func, self); +} +#else + #define __Pyx_PyMethod_New PyMethod_New +#endif + +///////////// PyMethodNew2Arg.proto ///////////// + +// Another wrapping of PyMethod_New that matches the Python3 signature +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyMethod_New2Arg PyMethod_New +#else +#define __Pyx_PyMethod_New2Arg(func, self) PyMethod_New(func, self, (PyObject*)Py_TYPE(self)) +#endif + +/////////////// UnicodeConcatInPlace.proto //////////////// + +# if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 +// __Pyx_PyUnicode_ConcatInPlace may modify the first argument 'left' +// However, unlike `PyUnicode_Append` it will never NULL it. +// It behaves like a regular function - returns a new reference and NULL on error + #if CYTHON_REFNANNY + #define __Pyx_PyUnicode_ConcatInPlace(left, right) __Pyx_PyUnicode_ConcatInPlaceImpl(&left, right, __pyx_refnanny) + #else + #define __Pyx_PyUnicode_ConcatInPlace(left, right) __Pyx_PyUnicode_ConcatInPlaceImpl(&left, right) + #endif + // __Pyx_PyUnicode_ConcatInPlace is slightly odd because it has the potential to modify the input + // argument (but only in cases where no user should notice). Therefore, it needs to keep Cython's + // refnanny informed. + static CYTHON_INLINE PyObject *__Pyx_PyUnicode_ConcatInPlaceImpl(PyObject **p_left, PyObject *right + #if CYTHON_REFNANNY + , void* __pyx_refnanny + #endif + ); /* proto */ +#else +#define __Pyx_PyUnicode_ConcatInPlace __Pyx_PyUnicode_Concat +#endif +#define __Pyx_PyUnicode_ConcatInPlaceSafe(left, right) ((unlikely((left) == Py_None) || unlikely((right) == Py_None)) ? \ + PyNumber_InPlaceAdd(left, right) : __Pyx_PyUnicode_ConcatInPlace(left, right)) + +/////////////// UnicodeConcatInPlace //////////////// +//@substitute: naming + +# if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 +// copied directly from unicode_object.c "unicode_modifiable +// removing _PyUnicode_HASH since it's a macro we don't have +// - this is OK because trying PyUnicode_Resize on a non-modifyable +// object will still work, it just won't happen in place +static int +__Pyx_unicode_modifiable(PyObject *unicode) +{ + if (Py_REFCNT(unicode) != 1) + return 0; + if (!PyUnicode_CheckExact(unicode)) + return 0; + if (PyUnicode_CHECK_INTERNED(unicode)) + return 0; + return 1; +} + +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_ConcatInPlaceImpl(PyObject **p_left, PyObject *right + #if CYTHON_REFNANNY + , void* __pyx_refnanny + #endif + ) { + // heavily based on PyUnicode_Append + PyObject *left = *p_left; + Py_ssize_t left_len, right_len, new_len; + + if (unlikely(__Pyx_PyUnicode_READY(left) == -1)) + return NULL; + if (unlikely(__Pyx_PyUnicode_READY(right) == -1)) + return NULL; + + // Shortcuts + left_len = PyUnicode_GET_LENGTH(left); + if (left_len == 0) { + Py_INCREF(right); + return right; + } + right_len = PyUnicode_GET_LENGTH(right); + if (right_len == 0) { + Py_INCREF(left); + return left; + } + if (unlikely(left_len > PY_SSIZE_T_MAX - right_len)) { + PyErr_SetString(PyExc_OverflowError, + "strings are too large to concat"); + return NULL; + } + new_len = left_len + right_len; + + if (__Pyx_unicode_modifiable(left) + && PyUnicode_CheckExact(right) + && PyUnicode_KIND(right) <= PyUnicode_KIND(left) + // Don't resize for ascii += latin1. Convert ascii to latin1 requires + // to change the structure size, but characters are stored just after + // the structure, and so it requires to move all characters which is + // not so different than duplicating the string. + && !(PyUnicode_IS_ASCII(left) && !PyUnicode_IS_ASCII(right))) { + + int ret; + // GIVEREF/GOTREF since we expect *p_left to change (although it won't change on failures). + __Pyx_GIVEREF(*p_left); + ret = PyUnicode_Resize(p_left, new_len); + __Pyx_GOTREF(*p_left); + if (unlikely(ret != 0)) + return NULL; + + // copy 'right' into the newly allocated area of 'left' + #if PY_VERSION_HEX >= 0x030d0000 + if (unlikely(PyUnicode_CopyCharacters(*p_left, left_len, right, 0, right_len) < 0)) return NULL; + #else + _PyUnicode_FastCopyCharacters(*p_left, left_len, right, 0, right_len); + #endif + __Pyx_INCREF(*p_left); + __Pyx_GIVEREF(*p_left); + return *p_left; + } else { + return __Pyx_PyUnicode_Concat(left, right); + } + } +#endif + +////////////// StrConcatInPlace.proto /////////////////////// +//@requires: UnicodeConcatInPlace + +#if PY_MAJOR_VERSION >= 3 + // allow access to the more efficient versions where we know str_type is unicode + #define __Pyx_PyStr_Concat __Pyx_PyUnicode_Concat + #define __Pyx_PyStr_ConcatInPlace __Pyx_PyUnicode_ConcatInPlace +#else + #define __Pyx_PyStr_Concat PyNumber_Add + #define __Pyx_PyStr_ConcatInPlace PyNumber_InPlaceAdd +#endif +#define __Pyx_PyStr_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \ + PyNumber_Add(a, b) : __Pyx_PyStr_Concat(a, b)) +#define __Pyx_PyStr_ConcatInPlaceSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \ + PyNumber_InPlaceAdd(a, b) : __Pyx_PyStr_ConcatInPlace(a, b)) + + +/////////////// PySequenceMultiply.proto /////////////// + +#define __Pyx_PySequence_Multiply_Left(mul, seq) __Pyx_PySequence_Multiply(seq, mul) +static CYTHON_INLINE PyObject* __Pyx_PySequence_Multiply(PyObject *seq, Py_ssize_t mul); + +/////////////// PySequenceMultiply /////////////// + +static PyObject* __Pyx_PySequence_Multiply_Generic(PyObject *seq, Py_ssize_t mul) { + PyObject *result, *pymul = PyInt_FromSsize_t(mul); + if (unlikely(!pymul)) + return NULL; + result = PyNumber_Multiply(seq, pymul); + Py_DECREF(pymul); + return result; +} + +static CYTHON_INLINE PyObject* __Pyx_PySequence_Multiply(PyObject *seq, Py_ssize_t mul) { +#if CYTHON_USE_TYPE_SLOTS + PyTypeObject *type = Py_TYPE(seq); + if (likely(type->tp_as_sequence && type->tp_as_sequence->sq_repeat)) { + return type->tp_as_sequence->sq_repeat(seq, mul); + } else +#endif + { + return __Pyx_PySequence_Multiply_Generic(seq, mul); + } +} + + +/////////////// FormatTypeName.proto /////////////// + +#if CYTHON_COMPILING_IN_LIMITED_API +typedef PyObject *__Pyx_TypeName; +#define __Pyx_FMT_TYPENAME "%U" +static __Pyx_TypeName __Pyx_PyType_GetName(PyTypeObject* tp); /*proto*/ +#define __Pyx_DECREF_TypeName(obj) Py_XDECREF(obj) +#else +typedef const char *__Pyx_TypeName; +#define __Pyx_FMT_TYPENAME "%.200s" +#define __Pyx_PyType_GetName(tp) ((tp)->tp_name) +#define __Pyx_DECREF_TypeName(obj) +#endif + +/////////////// FormatTypeName /////////////// + +#if CYTHON_COMPILING_IN_LIMITED_API +static __Pyx_TypeName +__Pyx_PyType_GetName(PyTypeObject* tp) +{ + PyObject *name = __Pyx_PyObject_GetAttrStr((PyObject *)tp, + PYIDENT("__name__")); + if (unlikely(name == NULL) || unlikely(!PyUnicode_Check(name))) { + PyErr_Clear(); + Py_XDECREF(name); + name = __Pyx_NewRef(PYIDENT("?")); + } + return name; +} +#endif + + +/////////////// RaiseUnexpectedTypeError.proto /////////////// + +static int __Pyx_RaiseUnexpectedTypeError(const char *expected, PyObject *obj); /*proto*/ + +/////////////// RaiseUnexpectedTypeError /////////////// + +static int +__Pyx_RaiseUnexpectedTypeError(const char *expected, PyObject *obj) +{ + __Pyx_TypeName obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, "Expected %s, got " __Pyx_FMT_TYPENAME, + expected, obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + return 0; +} + + +/////////////// RaiseUnboundLocalError.proto /////////////// +static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);/*proto*/ + +/////////////// RaiseUnboundLocalError /////////////// +static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { + PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); +} + + +/////////////// RaiseClosureNameError.proto /////////////// +static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);/*proto*/ + +/////////////// RaiseClosureNameError /////////////// +static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) { + PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname); +} + + +/////////////// RaiseUnboundMemoryviewSliceNogil.proto /////////////// +static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);/*proto*/ + +/////////////// RaiseUnboundMemoryviewSliceNogil /////////////// +//@requires: RaiseUnboundLocalError + +// Don't inline the function, it should really never be called in production +static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) { + #ifdef WITH_THREAD + PyGILState_STATE gilstate = PyGILState_Ensure(); + #endif + __Pyx_RaiseUnboundLocalError(varname); + #ifdef WITH_THREAD + PyGILState_Release(gilstate); + #endif +} + +//////////////// RaiseCppGlobalNameError.proto /////////////////////// +static CYTHON_INLINE void __Pyx_RaiseCppGlobalNameError(const char *varname); /*proto*/ + +/////////////// RaiseCppGlobalNameError ////////////////////////////// +static CYTHON_INLINE void __Pyx_RaiseCppGlobalNameError(const char *varname) { + PyErr_Format(PyExc_NameError, "C++ global '%s' is not initialized", varname); +} + +//////////////// RaiseCppAttributeError.proto /////////////////////// +static CYTHON_INLINE void __Pyx_RaiseCppAttributeError(const char *varname); /*proto*/ + +/////////////// RaiseCppAttributeError ////////////////////////////// +static CYTHON_INLINE void __Pyx_RaiseCppAttributeError(const char *varname) { + PyErr_Format(PyExc_AttributeError, "C++ attribute '%s' is not initialized", varname); +} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Optimize.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Optimize.c new file mode 100644 index 0000000000000000000000000000000000000000..528ad8c6b8ec47fa7f11a0344a70659a2954024b --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Optimize.c @@ -0,0 +1,1616 @@ +/* + * Optional optimisations of built-in functions and methods. + * + * Required replacements of builtins are in Builtins.c. + * + * General object operations and protocols are in ObjectHandling.c. + */ + +/////////////// append.proto /////////////// + +static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x); /*proto*/ + +/////////////// append /////////////// +//@requires: ListAppend +//@requires: ObjectHandling.c::PyObjectCallMethod1 + +static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x) { + if (likely(PyList_CheckExact(L))) { + if (unlikely(__Pyx_PyList_Append(L, x) < 0)) return -1; + } else { + PyObject* retval = __Pyx_PyObject_CallMethod1(L, PYIDENT("append"), x); + if (unlikely(!retval)) + return -1; + Py_DECREF(retval); + } + return 0; +} + +/////////////// ListAppend.proto /////////////// + +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { + Py_INCREF(x); + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 + // In Py3.13a1, PyList_SET_ITEM() checks that the end index is lower than the current size. + // However, extending the size *before* setting the value would not be correct, + // so we cannot call PyList_SET_ITEM(). + L->ob_item[len] = x; + #else + PyList_SET_ITEM(list, len, x); + #endif + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) +#endif + +/////////////// ListCompAppend.proto /////////////// + +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len)) { + Py_INCREF(x); + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030d0000 + // In Py3.13a1, PyList_SET_ITEM() checks that the end index is lower than the current size. + // However, extending the size *before* setting the value would not be correct, + // so we cannot call PyList_SET_ITEM(). + L->ob_item[len] = x; + #else + PyList_SET_ITEM(list, len, x); + #endif + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) +#endif + +//////////////////// ListExtend.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000 + PyObject* none = _PyList_Extend((PyListObject*)L, v); + if (unlikely(!none)) + return -1; + Py_DECREF(none); + return 0; +#else + return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); +#endif +} + +/////////////// pop.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx__PyObject_Pop(PyObject* L); /*proto*/ + +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE PyObject* __Pyx_PyList_Pop(PyObject* L); /*proto*/ +#define __Pyx_PyObject_Pop(L) (likely(PyList_CheckExact(L)) ? \ + __Pyx_PyList_Pop(L) : __Pyx__PyObject_Pop(L)) + +#else +#define __Pyx_PyList_Pop(L) __Pyx__PyObject_Pop(L) +#define __Pyx_PyObject_Pop(L) __Pyx__PyObject_Pop(L) +#endif + +/////////////// pop /////////////// +//@requires: ObjectHandling.c::PyObjectCallMethod0 + +static CYTHON_INLINE PyObject* __Pyx__PyObject_Pop(PyObject* L) { + if (__Pyx_IS_TYPE(L, &PySet_Type)) { + return PySet_Pop(L); + } + return __Pyx_PyObject_CallMethod0(L, PYIDENT("pop")); +} + +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE PyObject* __Pyx_PyList_Pop(PyObject* L) { + /* Check that both the size is positive and no reallocation shrinking needs to be done. */ + if (likely(PyList_GET_SIZE(L) > (((PyListObject*)L)->allocated >> 1))) { + __Pyx_SET_SIZE(L, Py_SIZE(L) - 1); + return PyList_GET_ITEM(L, PyList_GET_SIZE(L)); + } + return CALL_UNBOUND_METHOD(PyList_Type, "pop", L); +} +#endif + + +/////////////// pop_index.proto /////////////// + +static PyObject* __Pyx__PyObject_PopNewIndex(PyObject* L, PyObject* py_ix); /*proto*/ +static PyObject* __Pyx__PyObject_PopIndex(PyObject* L, PyObject* py_ix); /*proto*/ + +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t ix); /*proto*/ + +#define __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \ + (likely(PyList_CheckExact(L) && __Pyx_fits_Py_ssize_t(ix, type, is_signed))) ? \ + __Pyx__PyList_PopIndex(L, py_ix, ix) : ( \ + (unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) : \ + __Pyx__PyObject_PopIndex(L, py_ix))) + +#define __Pyx_PyList_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \ + __Pyx_fits_Py_ssize_t(ix, type, is_signed) ? \ + __Pyx__PyList_PopIndex(L, py_ix, ix) : ( \ + (unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) : \ + __Pyx__PyObject_PopIndex(L, py_ix))) + +#else + +#define __Pyx_PyList_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) \ + __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) + +#define __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \ + (unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) : \ + __Pyx__PyObject_PopIndex(L, py_ix)) +#endif + +/////////////// pop_index /////////////// +//@requires: ObjectHandling.c::PyObjectCallMethod1 + +static PyObject* __Pyx__PyObject_PopNewIndex(PyObject* L, PyObject* py_ix) { + PyObject *r; + if (unlikely(!py_ix)) return NULL; + r = __Pyx__PyObject_PopIndex(L, py_ix); + Py_DECREF(py_ix); + return r; +} + +static PyObject* __Pyx__PyObject_PopIndex(PyObject* L, PyObject* py_ix) { + return __Pyx_PyObject_CallMethod1(L, PYIDENT("pop"), py_ix); +} + +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t ix) { + Py_ssize_t size = PyList_GET_SIZE(L); + if (likely(size > (((PyListObject*)L)->allocated >> 1))) { + Py_ssize_t cix = ix; + if (cix < 0) { + cix += size; + } + if (likely(__Pyx_is_valid_index(cix, size))) { + PyObject* v = PyList_GET_ITEM(L, cix); + __Pyx_SET_SIZE(L, Py_SIZE(L) - 1); + size -= 1; + memmove(&PyList_GET_ITEM(L, cix), &PyList_GET_ITEM(L, cix+1), (size_t)(size-cix)*sizeof(PyObject*)); + return v; + } + } + if (py_ix == Py_None) { + return __Pyx__PyObject_PopNewIndex(L, PyInt_FromSsize_t(ix)); + } else { + return __Pyx__PyObject_PopIndex(L, py_ix); + } +} +#endif + + +/////////////// dict_getitem_default.proto /////////////// + +static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObject* default_value); /*proto*/ + +/////////////// dict_getitem_default /////////////// + +static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObject* default_value) { + PyObject* value; +#if PY_MAJOR_VERSION >= 3 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000) + value = PyDict_GetItemWithError(d, key); + if (unlikely(!value)) { + if (unlikely(PyErr_Occurred())) + return NULL; + value = default_value; + } + Py_INCREF(value); + // avoid C compiler warning about unused utility functions + if ((1)); +#else + if (PyString_CheckExact(key) || PyUnicode_CheckExact(key) || PyInt_CheckExact(key)) { + /* these presumably have safe hash functions */ + value = PyDict_GetItem(d, key); + if (unlikely(!value)) { + value = default_value; + } + Py_INCREF(value); + } +#endif + else { + if (default_value == Py_None) + value = CALL_UNBOUND_METHOD(PyDict_Type, "get", d, key); + else + value = CALL_UNBOUND_METHOD(PyDict_Type, "get", d, key, default_value); + } + return value; +} + + +/////////////// dict_setdefault.proto /////////////// + +static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value, int is_safe_type); /*proto*/ + +/////////////// dict_setdefault /////////////// + +static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value, + int is_safe_type) { + PyObject* value; + CYTHON_MAYBE_UNUSED_VAR(is_safe_type); +#if PY_VERSION_HEX >= 0x030400A0 + // we keep the method call at the end to avoid "unused" C compiler warnings + if ((1)) { + value = PyDict_SetDefault(d, key, default_value); + if (unlikely(!value)) return NULL; + Py_INCREF(value); +#else + if (is_safe_type == 1 || (is_safe_type == -1 && + /* the following builtins presumably have repeatably safe and fast hash functions */ +#if PY_MAJOR_VERSION >= 3 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000) + (PyUnicode_CheckExact(key) || PyString_CheckExact(key) || PyLong_CheckExact(key)))) { + value = PyDict_GetItemWithError(d, key); + if (unlikely(!value)) { + if (unlikely(PyErr_Occurred())) + return NULL; + if (unlikely(PyDict_SetItem(d, key, default_value) == -1)) + return NULL; + value = default_value; + } + Py_INCREF(value); +#else + (PyString_CheckExact(key) || PyUnicode_CheckExact(key) || PyInt_CheckExact(key) || PyLong_CheckExact(key)))) { + value = PyDict_GetItem(d, key); + if (unlikely(!value)) { + if (unlikely(PyDict_SetItem(d, key, default_value) == -1)) + return NULL; + value = default_value; + } + Py_INCREF(value); +#endif +#endif + } else { + value = CALL_UNBOUND_METHOD(PyDict_Type, "setdefault", d, key, default_value); + } + return value; +} + + +/////////////// py_dict_clear.proto /////////////// + +#define __Pyx_PyDict_Clear(d) (PyDict_Clear(d), 0) + + +/////////////// py_dict_pop.proto /////////////// + +static CYTHON_INLINE PyObject *__Pyx_PyDict_Pop(PyObject *d, PyObject *key, PyObject *default_value); /*proto*/ + +/////////////// py_dict_pop /////////////// + +static CYTHON_INLINE PyObject *__Pyx_PyDict_Pop(PyObject *d, PyObject *key, PyObject *default_value) { +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B3 & PY_VERSION_HEX < 0x030d0000 + if ((1)) { + return _PyDict_Pop(d, key, default_value); + } else + // avoid "function unused" warnings +#endif + if (default_value) { + return CALL_UNBOUND_METHOD(PyDict_Type, "pop", d, key, default_value); + } else { + return CALL_UNBOUND_METHOD(PyDict_Type, "pop", d, key); + } +} + + +/////////////// dict_iter.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name, + Py_ssize_t* p_orig_length, int* p_is_dict); +static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* dict_or_iter, Py_ssize_t orig_length, Py_ssize_t* ppos, + PyObject** pkey, PyObject** pvalue, PyObject** pitem, int is_dict); + +/////////////// dict_iter /////////////// +//@requires: ObjectHandling.c::UnpackTuple2 +//@requires: ObjectHandling.c::IterFinish +//@requires: ObjectHandling.c::PyObjectCallMethod0 + +#if CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 +#include +#endif + +static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* iterable, int is_dict, PyObject* method_name, + Py_ssize_t* p_orig_length, int* p_source_is_dict) { + is_dict = is_dict || likely(PyDict_CheckExact(iterable)); + *p_source_is_dict = is_dict; + if (is_dict) { +#if !CYTHON_COMPILING_IN_PYPY + *p_orig_length = PyDict_Size(iterable); + Py_INCREF(iterable); + return iterable; +#elif PY_MAJOR_VERSION >= 3 + // On PyPy3, we need to translate manually a few method names. + // This logic is not needed on CPython thanks to the fast case above. + static PyObject *py_items = NULL, *py_keys = NULL, *py_values = NULL; + PyObject **pp = NULL; + if (method_name) { + const char *name = PyUnicode_AsUTF8(method_name); + if (strcmp(name, "iteritems") == 0) pp = &py_items; + else if (strcmp(name, "iterkeys") == 0) pp = &py_keys; + else if (strcmp(name, "itervalues") == 0) pp = &py_values; + if (pp) { + if (!*pp) { + *pp = PyUnicode_FromString(name + 4); + if (!*pp) + return NULL; + } + method_name = *pp; + } + } +#endif + } + *p_orig_length = 0; + if (method_name) { + PyObject* iter; + iterable = __Pyx_PyObject_CallMethod0(iterable, method_name); + if (!iterable) + return NULL; +#if !CYTHON_COMPILING_IN_PYPY + if (PyTuple_CheckExact(iterable) || PyList_CheckExact(iterable)) + return iterable; +#endif + iter = PyObject_GetIter(iterable); + Py_DECREF(iterable); + return iter; + } + return PyObject_GetIter(iterable); +} + +static CYTHON_INLINE int __Pyx_dict_iter_next( + PyObject* iter_obj, CYTHON_NCP_UNUSED Py_ssize_t orig_length, CYTHON_NCP_UNUSED Py_ssize_t* ppos, + PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) { + PyObject* next_item; +#if !CYTHON_COMPILING_IN_PYPY + if (source_is_dict) { + PyObject *key, *value; + if (unlikely(orig_length != PyDict_Size(iter_obj))) { + PyErr_SetString(PyExc_RuntimeError, "dictionary changed size during iteration"); + return -1; + } + if (unlikely(!PyDict_Next(iter_obj, ppos, &key, &value))) { + return 0; + } + if (pitem) { + PyObject* tuple = PyTuple_New(2); + if (unlikely(!tuple)) { + return -1; + } + Py_INCREF(key); + Py_INCREF(value); + PyTuple_SET_ITEM(tuple, 0, key); + PyTuple_SET_ITEM(tuple, 1, value); + *pitem = tuple; + } else { + if (pkey) { + Py_INCREF(key); + *pkey = key; + } + if (pvalue) { + Py_INCREF(value); + *pvalue = value; + } + } + return 1; + } else if (PyTuple_CheckExact(iter_obj)) { + Py_ssize_t pos = *ppos; + if (unlikely(pos >= PyTuple_GET_SIZE(iter_obj))) return 0; + *ppos = pos + 1; + next_item = PyTuple_GET_ITEM(iter_obj, pos); + Py_INCREF(next_item); + } else if (PyList_CheckExact(iter_obj)) { + Py_ssize_t pos = *ppos; + if (unlikely(pos >= PyList_GET_SIZE(iter_obj))) return 0; + *ppos = pos + 1; + next_item = PyList_GET_ITEM(iter_obj, pos); + Py_INCREF(next_item); + } else +#endif + { + next_item = PyIter_Next(iter_obj); + if (unlikely(!next_item)) { + return __Pyx_IterFinish(); + } + } + if (pitem) { + *pitem = next_item; + } else if (pkey && pvalue) { + if (__Pyx_unpack_tuple2(next_item, pkey, pvalue, source_is_dict, source_is_dict, 1)) + return -1; + } else if (pkey) { + *pkey = next_item; + } else { + *pvalue = next_item; + } + return 1; +} + + +/////////////// set_iter.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_set_iterator(PyObject* iterable, int is_set, + Py_ssize_t* p_orig_length, int* p_source_is_set); /*proto*/ +static CYTHON_INLINE int __Pyx_set_iter_next( + PyObject* iter_obj, Py_ssize_t orig_length, + Py_ssize_t* ppos, PyObject **value, + int source_is_set); /*proto*/ + +/////////////// set_iter /////////////// +//@requires: ObjectHandling.c::IterFinish + +static CYTHON_INLINE PyObject* __Pyx_set_iterator(PyObject* iterable, int is_set, + Py_ssize_t* p_orig_length, int* p_source_is_set) { +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000 + is_set = is_set || likely(PySet_CheckExact(iterable) || PyFrozenSet_CheckExact(iterable)); + *p_source_is_set = is_set; + if (likely(is_set)) { + *p_orig_length = PySet_Size(iterable); + Py_INCREF(iterable); + return iterable; + } +#else + CYTHON_UNUSED_VAR(is_set); + *p_source_is_set = 0; +#endif + *p_orig_length = 0; + return PyObject_GetIter(iterable); +} + +static CYTHON_INLINE int __Pyx_set_iter_next( + PyObject* iter_obj, Py_ssize_t orig_length, + Py_ssize_t* ppos, PyObject **value, + int source_is_set) { + if (!CYTHON_COMPILING_IN_CPYTHON || PY_VERSION_HEX >= 0x030d0000 || unlikely(!source_is_set)) { + *value = PyIter_Next(iter_obj); + if (unlikely(!*value)) { + return __Pyx_IterFinish(); + } + CYTHON_UNUSED_VAR(orig_length); + CYTHON_UNUSED_VAR(ppos); + return 1; + } +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000 + if (unlikely(PySet_GET_SIZE(iter_obj) != orig_length)) { + PyErr_SetString( + PyExc_RuntimeError, + "set changed size during iteration"); + return -1; + } + { + Py_hash_t hash; + int ret = _PySet_NextEntry(iter_obj, ppos, value, &hash); + // CPython does not raise errors here, only if !isinstance(iter_obj, set/frozenset) + assert (ret != -1); + if (likely(ret)) { + Py_INCREF(*value); + return 1; + } + } +#endif + return 0; +} + +/////////////// py_set_discard_unhashable /////////////// +//@requires: Builtins.c::pyfrozenset_new + +static int __Pyx_PySet_DiscardUnhashable(PyObject *set, PyObject *key) { + PyObject *tmpkey; + int rv; + + if (likely(!PySet_Check(key) || !PyErr_ExceptionMatches(PyExc_TypeError))) + return -1; + PyErr_Clear(); + tmpkey = __Pyx_PyFrozenSet_New(key); + if (tmpkey == NULL) + return -1; + rv = PySet_Discard(set, tmpkey); + Py_DECREF(tmpkey); + return rv; +} + + +/////////////// py_set_discard.proto /////////////// + +static CYTHON_INLINE int __Pyx_PySet_Discard(PyObject *set, PyObject *key); /*proto*/ + +/////////////// py_set_discard /////////////// +//@requires: py_set_discard_unhashable + +static CYTHON_INLINE int __Pyx_PySet_Discard(PyObject *set, PyObject *key) { + int found = PySet_Discard(set, key); + // Convert *key* to frozenset if necessary + if (unlikely(found < 0)) { + found = __Pyx_PySet_DiscardUnhashable(set, key); + } + // note: returns -1 on error, 0 (not found) or 1 (found) otherwise => error check for -1 or < 0 works + return found; +} + + +/////////////// py_set_remove.proto /////////////// + +static CYTHON_INLINE int __Pyx_PySet_Remove(PyObject *set, PyObject *key); /*proto*/ + +/////////////// py_set_remove /////////////// +//@requires: py_set_discard_unhashable + +static int __Pyx_PySet_RemoveNotFound(PyObject *set, PyObject *key, int found) { + // Convert *key* to frozenset if necessary + if (unlikely(found < 0)) { + found = __Pyx_PySet_DiscardUnhashable(set, key); + } + if (likely(found == 0)) { + // Not found + PyObject *tup; + tup = PyTuple_Pack(1, key); + if (!tup) + return -1; + PyErr_SetObject(PyExc_KeyError, tup); + Py_DECREF(tup); + return -1; + } + // note: returns -1 on error, 0 (not found) or 1 (found) otherwise => error check for -1 or < 0 works + return found; +} + +static CYTHON_INLINE int __Pyx_PySet_Remove(PyObject *set, PyObject *key) { + int found = PySet_Discard(set, key); + if (unlikely(found != 1)) { + // note: returns -1 on error, 0 (not found) or 1 (found) otherwise => error check for -1 or < 0 works + return __Pyx_PySet_RemoveNotFound(set, key, found); + } + return 0; +} + + +/////////////// unicode_iter.proto /////////////// + +static CYTHON_INLINE int __Pyx_init_unicode_iteration( + PyObject* ustring, Py_ssize_t *length, void** data, int *kind); /* proto */ + +/////////////// unicode_iter /////////////// + +static CYTHON_INLINE int __Pyx_init_unicode_iteration( + PyObject* ustring, Py_ssize_t *length, void** data, int *kind) { +#if CYTHON_COMPILING_IN_LIMITED_API + // In the limited API we just point data to the unicode object + *kind = 0; + *length = PyUnicode_GetLength(ustring); + *data = (void*)ustring; +#elif CYTHON_PEP393_ENABLED + if (unlikely(__Pyx_PyUnicode_READY(ustring) < 0)) return -1; + *kind = PyUnicode_KIND(ustring); + *length = PyUnicode_GET_LENGTH(ustring); + *data = PyUnicode_DATA(ustring); +#else + *kind = 0; + *length = PyUnicode_GET_SIZE(ustring); + *data = (void*)PyUnicode_AS_UNICODE(ustring); +#endif + return 0; +} + +/////////////// pyobject_as_double.proto /////////////// + +static double __Pyx__PyObject_AsDouble(PyObject* obj); /* proto */ + +#if CYTHON_COMPILING_IN_PYPY +#define __Pyx_PyObject_AsDouble(obj) \ +(likely(PyFloat_CheckExact(obj)) ? PyFloat_AS_DOUBLE(obj) : \ + likely(PyInt_CheckExact(obj)) ? \ + PyFloat_AsDouble(obj) : __Pyx__PyObject_AsDouble(obj)) +#else +#define __Pyx_PyObject_AsDouble(obj) \ +((likely(PyFloat_CheckExact(obj))) ? PyFloat_AS_DOUBLE(obj) : \ + likely(PyLong_CheckExact(obj)) ? \ + PyLong_AsDouble(obj) : __Pyx__PyObject_AsDouble(obj)) +#endif + +/////////////// pyobject_as_double /////////////// +//@requires: pybytes_as_double +//@requires: pyunicode_as_double +//@requires: ObjectHandling.c::PyObjectCallOneArg + +static double __Pyx__PyObject_AsDouble(PyObject* obj) { + if (PyUnicode_CheckExact(obj)) { + return __Pyx_PyUnicode_AsDouble(obj); + } else if (PyBytes_CheckExact(obj)) { + return __Pyx_PyBytes_AsDouble(obj); + } else if (PyByteArray_CheckExact(obj)) { + return __Pyx_PyByteArray_AsDouble(obj); + } else { + PyObject* float_value; +#if !CYTHON_USE_TYPE_SLOTS + float_value = PyNumber_Float(obj); if ((0)) goto bad; + // avoid "unused" warnings + (void)__Pyx_PyObject_CallOneArg; +#else + PyNumberMethods *nb = Py_TYPE(obj)->tp_as_number; + if (likely(nb) && likely(nb->nb_float)) { + float_value = nb->nb_float(obj); + if (likely(float_value) && unlikely(!PyFloat_Check(float_value))) { + __Pyx_TypeName float_value_type_name = __Pyx_PyType_GetName(Py_TYPE(float_value)); + PyErr_Format(PyExc_TypeError, + "__float__ returned non-float (type " __Pyx_FMT_TYPENAME ")", + float_value_type_name); + __Pyx_DECREF_TypeName(float_value_type_name); + Py_DECREF(float_value); + goto bad; + } + } else { + float_value = __Pyx_PyObject_CallOneArg((PyObject*)&PyFloat_Type, obj); + } +#endif + if (likely(float_value)) { + double value = PyFloat_AS_DOUBLE(float_value); + Py_DECREF(float_value); + return value; + } + } +bad: + return (double)-1; +} + + +/////////////// pystring_as_double.proto /////////////// +//@requires: pyunicode_as_double +//@requires: pybytes_as_double + +static CYTHON_INLINE double __Pyx_PyString_AsDouble(PyObject *obj) { + #if PY_MAJOR_VERSION >= 3 + (void)__Pyx_PyBytes_AsDouble; + return __Pyx_PyUnicode_AsDouble(obj); + #else + (void)__Pyx_PyUnicode_AsDouble; + return __Pyx_PyBytes_AsDouble(obj); + #endif +} + + +/////////////// pyunicode_as_double.proto /////////////// + +static CYTHON_INLINE double __Pyx_PyUnicode_AsDouble(PyObject *obj);/*proto*/ + +/////////////// pyunicode_as_double.proto /////////////// +//@requires: pybytes_as_double + +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY && CYTHON_ASSUME_SAFE_MACROS +static const char* __Pyx__PyUnicode_AsDouble_Copy(const void* data, const int kind, char* buffer, Py_ssize_t start, Py_ssize_t end) { + int last_was_punctuation; + Py_ssize_t i; + // number must not start with punctuation + last_was_punctuation = 1; + for (i=start; i <= end; i++) { + Py_UCS4 chr = PyUnicode_READ(kind, data, i); + int is_punctuation = (chr == '_') | (chr == '.'); + *buffer = (char)chr; + // reject sequences of '_' and '.' + buffer += (chr != '_'); + if (unlikely(chr > 127)) goto parse_failure; + if (unlikely(last_was_punctuation & is_punctuation)) goto parse_failure; + last_was_punctuation = is_punctuation; + } + if (unlikely(last_was_punctuation)) goto parse_failure; + *buffer = '\0'; + return buffer; + +parse_failure: + return NULL; +} + +static double __Pyx__PyUnicode_AsDouble_inf_nan(const void* data, int kind, Py_ssize_t start, Py_ssize_t length) { + int matches = 1; + Py_UCS4 chr; + Py_UCS4 sign = PyUnicode_READ(kind, data, start); + int is_signed = (sign == '-') | (sign == '+'); + start += is_signed; + length -= is_signed; + + switch (PyUnicode_READ(kind, data, start)) { + #ifdef Py_NAN + case 'n': + case 'N': + if (unlikely(length != 3)) goto parse_failure; + chr = PyUnicode_READ(kind, data, start+1); + matches &= (chr == 'a') | (chr == 'A'); + chr = PyUnicode_READ(kind, data, start+2); + matches &= (chr == 'n') | (chr == 'N'); + if (unlikely(!matches)) goto parse_failure; + return (sign == '-') ? -Py_NAN : Py_NAN; + #endif + case 'i': + case 'I': + if (unlikely(length < 3)) goto parse_failure; + chr = PyUnicode_READ(kind, data, start+1); + matches &= (chr == 'n') | (chr == 'N'); + chr = PyUnicode_READ(kind, data, start+2); + matches &= (chr == 'f') | (chr == 'F'); + if (likely(length == 3 && matches)) + return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; + if (unlikely(length != 8)) goto parse_failure; + chr = PyUnicode_READ(kind, data, start+3); + matches &= (chr == 'i') | (chr == 'I'); + chr = PyUnicode_READ(kind, data, start+4); + matches &= (chr == 'n') | (chr == 'N'); + chr = PyUnicode_READ(kind, data, start+5); + matches &= (chr == 'i') | (chr == 'I'); + chr = PyUnicode_READ(kind, data, start+6); + matches &= (chr == 't') | (chr == 'T'); + chr = PyUnicode_READ(kind, data, start+7); + matches &= (chr == 'y') | (chr == 'Y'); + if (unlikely(!matches)) goto parse_failure; + return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; + case '.': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': + break; + default: + goto parse_failure; + } + return 0.0; +parse_failure: + return -1.0; +} + +static double __Pyx_PyUnicode_AsDouble_WithSpaces(PyObject *obj) { + double value; + const char *last; + char *end; + Py_ssize_t start, length = PyUnicode_GET_LENGTH(obj); + const int kind = PyUnicode_KIND(obj); + const void* data = PyUnicode_DATA(obj); + + // strip spaces at start and end + start = 0; + while (Py_UNICODE_ISSPACE(PyUnicode_READ(kind, data, start))) + start++; + while (start < length - 1 && Py_UNICODE_ISSPACE(PyUnicode_READ(kind, data, length - 1))) + length--; + length -= start; + if (unlikely(length <= 0)) goto fallback; + + // parse NaN / inf + value = __Pyx__PyUnicode_AsDouble_inf_nan(data, kind, start, length); + if (unlikely(value == -1.0)) goto fallback; + if (value != 0.0) return value; + + if (length < 40) { + char number[40]; + last = __Pyx__PyUnicode_AsDouble_Copy(data, kind, number, start, start + length); + if (unlikely(!last)) goto fallback; + value = PyOS_string_to_double(number, &end, NULL); + } else { + char *number = (char*) PyMem_Malloc((length + 1) * sizeof(char)); + if (unlikely(!number)) goto fallback; + last = __Pyx__PyUnicode_AsDouble_Copy(data, kind, number, start, start + length); + if (unlikely(!last)) { + PyMem_Free(number); + goto fallback; + } + value = PyOS_string_to_double(number, &end, NULL); + PyMem_Free(number); + } + if (likely(end == last) || (value == (double)-1 && PyErr_Occurred())) { + return value; + } +fallback: + return __Pyx_SlowPyString_AsDouble(obj); +} +#endif + +static CYTHON_INLINE double __Pyx_PyUnicode_AsDouble(PyObject *obj) { + // Currently not optimised for Py2.7. +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY && CYTHON_ASSUME_SAFE_MACROS + if (unlikely(__Pyx_PyUnicode_READY(obj) == -1)) + return (double)-1; + if (likely(PyUnicode_IS_ASCII(obj))) { + const char *s; + Py_ssize_t length; + s = PyUnicode_AsUTF8AndSize(obj, &length); + return __Pyx__PyBytes_AsDouble(obj, s, length); + } + return __Pyx_PyUnicode_AsDouble_WithSpaces(obj); +#else + return __Pyx_SlowPyString_AsDouble(obj); +#endif +} + + +/////////////// pybytes_as_double.proto /////////////// + +static double __Pyx_SlowPyString_AsDouble(PyObject *obj);/*proto*/ +static double __Pyx__PyBytes_AsDouble(PyObject *obj, const char* start, Py_ssize_t length);/*proto*/ + +static CYTHON_INLINE double __Pyx_PyBytes_AsDouble(PyObject *obj) { + char* as_c_string; + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS + as_c_string = PyBytes_AS_STRING(obj); + size = PyBytes_GET_SIZE(obj); +#else + if (PyBytes_AsStringAndSize(obj, &as_c_string, &size) < 0) { + return (double)-1; + } +#endif + return __Pyx__PyBytes_AsDouble(obj, as_c_string, size); +} +static CYTHON_INLINE double __Pyx_PyByteArray_AsDouble(PyObject *obj) { + char* as_c_string; + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS + as_c_string = PyByteArray_AS_STRING(obj); + size = PyByteArray_GET_SIZE(obj); +#else + as_c_string = PyByteArray_AsString(obj); + if (as_c_string == NULL) { + return (double)-1; + } + size = PyByteArray_Size(obj); +#endif + return __Pyx__PyBytes_AsDouble(obj, as_c_string, size); +} + + +/////////////// pybytes_as_double /////////////// + +static double __Pyx_SlowPyString_AsDouble(PyObject *obj) { + PyObject *float_value; +#if PY_MAJOR_VERSION >= 3 + float_value = PyFloat_FromString(obj); +#else + float_value = PyFloat_FromString(obj, 0); +#endif + if (likely(float_value)) { +#if CYTHON_ASSUME_SAFE_MACROS + double value = PyFloat_AS_DOUBLE(float_value); +#else + double value = PyFloat_AsDouble(float_value); +#endif + Py_DECREF(float_value); + return value; + } + return (double)-1; +} + +static const char* __Pyx__PyBytes_AsDouble_Copy(const char* start, char* buffer, Py_ssize_t length) { + // number must not start with punctuation + int last_was_punctuation = 1; + Py_ssize_t i; + for (i=0; i < length; i++) { + char chr = start[i]; + int is_punctuation = (chr == '_') | (chr == '.') | (chr == 'e') | (chr == 'E'); + *buffer = chr; + buffer += (chr != '_'); + // reject sequences of '_' and '.' + if (unlikely(last_was_punctuation & is_punctuation)) goto parse_failure; + last_was_punctuation = is_punctuation; + } + if (unlikely(last_was_punctuation)) goto parse_failure; + *buffer = '\0'; + return buffer; + +parse_failure: + return NULL; +} + +static double __Pyx__PyBytes_AsDouble_inf_nan(const char* start, Py_ssize_t length) { + int matches = 1; + char sign = start[0]; + int is_signed = (sign == '+') | (sign == '-'); + start += is_signed; + length -= is_signed; + + switch (start[0]) { + #ifdef Py_NAN + case 'n': + case 'N': + if (unlikely(length != 3)) goto parse_failure; + matches &= (start[1] == 'a' || start[1] == 'A'); + matches &= (start[2] == 'n' || start[2] == 'N'); + if (unlikely(!matches)) goto parse_failure; + return (sign == '-') ? -Py_NAN : Py_NAN; + #endif + case 'i': + case 'I': + if (unlikely(length < 3)) goto parse_failure; + matches &= (start[1] == 'n' || start[1] == 'N'); + matches &= (start[2] == 'f' || start[2] == 'F'); + if (likely(length == 3 && matches)) + return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; + if (unlikely(length != 8)) goto parse_failure; + matches &= (start[3] == 'i' || start[3] == 'I'); + matches &= (start[4] == 'n' || start[4] == 'N'); + matches &= (start[5] == 'i' || start[5] == 'I'); + matches &= (start[6] == 't' || start[6] == 'T'); + matches &= (start[7] == 'y' || start[7] == 'Y'); + if (unlikely(!matches)) goto parse_failure; + return (sign == '-') ? -Py_HUGE_VAL : Py_HUGE_VAL; + case '.': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': + break; + default: + goto parse_failure; + } + return 0.0; +parse_failure: + return -1.0; +} + +static CYTHON_INLINE int __Pyx__PyBytes_AsDouble_IsSpace(char ch) { + // see Py_ISSPACE() in CPython + // https://github.com/python/cpython/blob/master/Python/pyctype.c + return (ch == 0x20) | !((ch < 0x9) | (ch > 0xd)); +} + +CYTHON_UNUSED static double __Pyx__PyBytes_AsDouble(PyObject *obj, const char* start, Py_ssize_t length) { + double value; + Py_ssize_t i, digits; + const char *last = start + length; + char *end; + + // strip spaces at start and end + while (__Pyx__PyBytes_AsDouble_IsSpace(*start)) + start++; + while (start < last - 1 && __Pyx__PyBytes_AsDouble_IsSpace(last[-1])) + last--; + length = last - start; + if (unlikely(length <= 0)) goto fallback; + + // parse NaN / inf + value = __Pyx__PyBytes_AsDouble_inf_nan(start, length); + if (unlikely(value == -1.0)) goto fallback; + if (value != 0.0) return value; + + // look for underscores + digits = 0; + for (i=0; i < length; digits += start[i++] != '_'); + + if (likely(digits == length)) { + value = PyOS_string_to_double(start, &end, NULL); + } else if (digits < 40) { + char number[40]; + last = __Pyx__PyBytes_AsDouble_Copy(start, number, length); + if (unlikely(!last)) goto fallback; + value = PyOS_string_to_double(number, &end, NULL); + } else { + char *number = (char*) PyMem_Malloc((digits + 1) * sizeof(char)); + if (unlikely(!number)) goto fallback; + last = __Pyx__PyBytes_AsDouble_Copy(start, number, length); + if (unlikely(!last)) { + PyMem_Free(number); + goto fallback; + } + value = PyOS_string_to_double(number, &end, NULL); + PyMem_Free(number); + } + if (likely(end == last) || (value == (double)-1 && PyErr_Occurred())) { + return value; + } +fallback: + return __Pyx_SlowPyString_AsDouble(obj); +} + + +/////////////// PyNumberPow2.proto /////////////// + +#define __Pyx_PyNumber_InPlacePowerOf2(a, b, c) __Pyx__PyNumber_PowerOf2(a, b, c, 1) +#define __Pyx_PyNumber_PowerOf2(a, b, c) __Pyx__PyNumber_PowerOf2(a, b, c, 0) + +static PyObject* __Pyx__PyNumber_PowerOf2(PyObject *two, PyObject *exp, PyObject *none, int inplace); /*proto*/ + +/////////////// PyNumberPow2 /////////////// + +static PyObject* __Pyx__PyNumber_PowerOf2(PyObject *two, PyObject *exp, PyObject *none, int inplace) { +// in CPython, 1<= 0)) { + if ((size_t)shiftby <= sizeof(long) * 8 - 2) { + long value = 1L << shiftby; + return PyInt_FromLong(value); +#ifdef HAVE_LONG_LONG + } else if ((size_t)shiftby <= sizeof(unsigned PY_LONG_LONG) * 8 - 1) { + unsigned PY_LONG_LONG value = ((unsigned PY_LONG_LONG)1) << shiftby; + return PyLong_FromUnsignedLongLong(value); +#endif + } else { + PyObject *result, *one = PyInt_FromLong(1L); + if (unlikely(!one)) return NULL; + result = PyNumber_Lshift(one, exp); + Py_DECREF(one); + return result; + } + } else if (shiftby == -1 && PyErr_Occurred()) { + PyErr_Clear(); + } +fallback: +#endif + return (inplace ? PyNumber_InPlacePower : PyNumber_Power)(two, exp, none); +} + + +/////////////// PyIntCompare.proto /////////////// + +{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} +static CYTHON_INLINE {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, long intval, long inplace); /*proto*/ + +/////////////// PyIntCompare /////////////// + +{{py: pyval, ival = ('op2', 'b') if order == 'CObj' else ('op1', 'a') }} +{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} +{{py: return_true = 'Py_RETURN_TRUE' if ret_type.is_pyobject else 'return 1'}} +{{py: return_false = 'Py_RETURN_FALSE' if ret_type.is_pyobject else 'return 0'}} +{{py: slot_name = op.lower() }} +{{py: c_op = {'Eq': '==', 'Ne': '!='}[op] }} +{{py: +return_compare = ( + (lambda a,b,c_op, return_true=return_true, return_false=return_false: "if ({a} {c_op} {b}) {return_true}; else {return_false};".format( + a=a, b=b, c_op=c_op, return_true=return_true, return_false=return_false)) + if ret_type.is_pyobject else + (lambda a,b,c_op: "return ({a} {c_op} {b});".format(a=a, b=b, c_op=c_op)) + ) +}} + +static CYTHON_INLINE {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, long intval, long inplace) { + CYTHON_MAYBE_UNUSED_VAR(intval); + CYTHON_UNUSED_VAR(inplace); + if (op1 == op2) { + {{return_true if op == 'Eq' else return_false}}; + } + + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact({{pyval}}))) { + const long {{'a' if order == 'CObj' else 'b'}} = intval; + long {{ival}} = PyInt_AS_LONG({{pyval}}); + {{return_compare('a', 'b', c_op)}} + } + #endif + + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact({{pyval}}))) { + int unequal; + unsigned long uintval; + Py_ssize_t size = __Pyx_PyLong_DigitCount({{pyval}}); + const digit* digits = __Pyx_PyLong_Digits({{pyval}}); + if (intval == 0) { + {{return_compare('__Pyx_PyLong_IsZero(%s)' % pyval, '1', c_op)}} + } else if (intval < 0) { + if (__Pyx_PyLong_IsNonNeg({{pyval}})) + {{return_false if op == 'Eq' else return_true}}; + // both are negative => can use absolute values now. + intval = -intval; + } else { + // > 0 => Py_SIZE(pyval) > 0 + if (__Pyx_PyLong_IsNeg({{pyval}})) + {{return_false if op == 'Eq' else return_true}}; + } + // After checking that the sign is the same (and excluding 0), now compare the absolute values. + // When inlining, the C compiler should select exactly one line from this unrolled loop. + uintval = (unsigned long) intval; + {{for _size in range(4, 0, -1)}} +#if PyLong_SHIFT * {{_size}} < SIZEOF_LONG*8 + if (uintval >> (PyLong_SHIFT * {{_size}})) { + // The C integer value is between (PyLong_BASE ** _size) and MIN(PyLong_BASE ** _size, LONG_MAX). + unequal = (size != {{_size+1}}) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) + {{for _i in range(1, _size+1)}} | (digits[{{_i}}] != ((uintval >> ({{_i}} * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)){{endfor}}; + } else +#endif + {{endfor}} + unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK)); + + {{return_compare('unequal', '0', c_op)}} + } + #endif + + if (PyFloat_CheckExact({{pyval}})) { + const long {{'a' if order == 'CObj' else 'b'}} = intval; +#if CYTHON_COMPILING_IN_LIMITED_API + double {{ival}} = __pyx_PyFloat_AsDouble({{pyval}}); +#else + double {{ival}} = PyFloat_AS_DOUBLE({{pyval}}); +#endif + {{return_compare('(double)a', '(double)b', c_op)}} + } + + return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( + PyObject_RichCompare(op1, op2, Py_{{op.upper()}})); +} + + +/////////////// PyIntBinop.proto /////////////// + +{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} +#if !CYTHON_COMPILING_IN_PYPY +static {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); /*proto*/ +#else +#define __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(op1, op2, intval, inplace, zerodivision_check) \ + {{if op in ('Eq', 'Ne')}}{{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(PyObject_RichCompare(op1, op2, Py_{{op.upper()}})) + {{else}}(inplace ? PyNumber_InPlace{{op}}(op1, op2) : PyNumber_{{op}}(op1, op2)) + {{endif}} +#endif + +/////////////// PyIntBinop /////////////// + +#if !CYTHON_COMPILING_IN_PYPY +{{py: from Cython.Utility import pylong_join }} +{{py: pyval, ival = ('op2', 'b') if order == 'CObj' else ('op1', 'a') }} +{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} +{{py: return_true = 'Py_RETURN_TRUE' if ret_type.is_pyobject else 'return 1'}} +{{py: return_false = 'Py_RETURN_FALSE' if ret_type.is_pyobject else 'return 0'}} +{{py: slot_name = {'TrueDivide': 'true_divide', 'FloorDivide': 'floor_divide'}.get(op, op.lower()) }} +{{py: cfunc_name = '__Pyx_PyInt_%s%s%s' % ('' if ret_type.is_pyobject else 'Bool', op, order)}} +{{py: +c_op = { + 'Add': '+', 'Subtract': '-', 'Multiply': '*', 'Remainder': '%', 'TrueDivide': '/', 'FloorDivide': '/', + 'Or': '|', 'Xor': '^', 'And': '&', 'Rshift': '>>', 'Lshift': '<<', + 'Eq': '==', 'Ne': '!=', + }[op] +}} +{{py: +def zerodiv_check(operand, optype='integer', _is_mod=op == 'Remainder', _needs_check=(order == 'CObj' and c_op in '%/')): + return ((( + 'if (unlikely(zerodivision_check && ((%s) == 0))) {' + ' PyErr_SetString(PyExc_ZeroDivisionError, "%s division%s by zero");' + ' return NULL;' + '}') % (operand, optype, ' or modulo' if _is_mod else '') + ) if _needs_check else '') +}} + +static {{c_ret_type}} {{cfunc_name}}(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { + CYTHON_MAYBE_UNUSED_VAR(intval); + CYTHON_MAYBE_UNUSED_VAR(inplace); + CYTHON_UNUSED_VAR(zerodivision_check); + + {{if op in ('Eq', 'Ne')}} + if (op1 == op2) { + {{return_true if op == 'Eq' else return_false}}; + } + {{endif}} + + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact({{pyval}}))) { + const long {{'a' if order == 'CObj' else 'b'}} = intval; + {{if c_op in '+-%' or op == 'FloorDivide'}} + long x; + {{endif}} + long {{ival}} = PyInt_AS_LONG({{pyval}}); + {{zerodiv_check('b')}} + + {{if op in ('Eq', 'Ne')}} + if (a {{c_op}} b) { + {{return_true}}; + } else { + {{return_false}}; + } + {{elif c_op in '+-'}} + // adapted from intobject.c in Py2.7: + // casts in the line below avoid undefined behaviour on overflow + x = (long)((unsigned long)a {{c_op}} (unsigned long)b); + if (likely((x^a) >= 0 || (x^{{ '~' if op == 'Subtract' else '' }}b) >= 0)) + return PyInt_FromLong(x); + return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2); + {{elif c_op == '%'}} + // see CMath.c :: ModInt utility code + x = a % b; + x += ((x != 0) & ((x ^ b) < 0)) * b; + return PyInt_FromLong(x); + {{elif op == 'TrueDivide'}} + if (8 * sizeof(long) <= 53 || likely(labs({{ival}}) <= ((PY_LONG_LONG)1 << 53))) { + return PyFloat_FromDouble((double)a / (double)b); + } + // let Python do the rounding + return PyInt_Type.tp_as_number->nb_{{slot_name}}(op1, op2); + {{elif op == 'FloorDivide'}} + // INT_MIN / -1 is the only case that overflows + if (unlikely(b == -1 && ((unsigned long)a) == 0-(unsigned long)a)) + return PyInt_Type.tp_as_number->nb_{{slot_name}}(op1, op2); + else { + long q, r; + // see CMath.c :: DivInt utility code + q = a / b; + r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + x = q; + } + return PyInt_FromLong(x); + {{elif op == 'Lshift'}} + if (likely(b < (long) (sizeof(long)*8) && a == (a << b) >> b) || !a) { + return PyInt_FromLong(a {{c_op}} b); + } + {{elif c_op == '*'}} +#ifdef HAVE_LONG_LONG + if (sizeof(PY_LONG_LONG) > sizeof(long)) { + PY_LONG_LONG result = (PY_LONG_LONG)a {{c_op}} (PY_LONG_LONG)b; + return (result >= LONG_MIN && result <= LONG_MAX) ? + PyInt_FromLong((long)result) : PyLong_FromLongLong(result); + } +#endif +#if CYTHON_USE_TYPE_SLOTS + return PyInt_Type.tp_as_number->nb_{{slot_name}}(op1, op2); +#else + return PyNumber_{{op}}(op1, op2); +#endif + {{else}} + // other operations are safe, no overflow + return PyInt_FromLong(a {{c_op}} b); + {{endif}} + } + #endif + + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact({{pyval}}))) { + const long {{'a' if order == 'CObj' else 'b'}} = intval; + long {{ival}}{{if op not in ('Eq', 'Ne')}}, x{{endif}}; + {{if op not in ('Eq', 'Ne', 'TrueDivide')}} +#ifdef HAVE_LONG_LONG + const PY_LONG_LONG ll{{'a' if order == 'CObj' else 'b'}} = intval; + PY_LONG_LONG ll{{ival}}, llx; +#endif + {{endif}} + {{if c_op == '&'}} + // special case for &-ing arbitrarily large numbers with known single digit operands + if ((intval & PyLong_MASK) == intval) { + // Calling PyLong_CompactValue() requires the PyLong value to be compact, we only need the last digit. + long last_digit = (long) __Pyx_PyLong_Digits({{pyval}})[0]; + long result = intval & (likely(__Pyx_PyLong_IsPos({{pyval}})) ? last_digit : (PyLong_MASK - last_digit + 1)); + return PyLong_FromLong(result); + } + {{endif}} + // special cases for 0: + - * % / // | ^ & >> << + if (unlikely(__Pyx_PyLong_IsZero({{pyval}}))) { + {{if order == 'CObj' and c_op in '%/'}} + // division by zero! + {{zerodiv_check('0')}} + {{elif order == 'CObj' and c_op in '+-|^>><<'}} + // x == x+0 == x-0 == x|0 == x^0 == x>>0 == x<<0 + return __Pyx_NewRef(op1); + {{elif order == 'CObj' and c_op in '*&'}} + // 0 == x*0 == x&0 + return __Pyx_NewRef(op2); + {{elif order == 'ObjC' and c_op in '+|^'}} + // x == 0+x == 0|x == 0^x + return __Pyx_NewRef(op2); + {{elif order == 'ObjC' and c_op == '-'}} + // -x == 0-x + return PyLong_FromLong(-intval); + {{elif order == 'ObjC' and (c_op in '*%&>><<' or op == 'FloorDivide')}} + // 0 == 0*x == 0%x == 0&x == 0>>x == 0< {{_size}} * PyLong_SHIFT{{if c_op == '*'}}+30{{endif}}{{if op == 'TrueDivide'}} && {{_size-1}} * PyLong_SHIFT < 53{{endif}}) { + {{ival}} = {{'-' if _case < 0 else ''}}(long) {{pylong_join(_size, 'digits')}}; + break; + {{if op not in ('Eq', 'Ne', 'TrueDivide')}} + #ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > {{_size}} * PyLong_SHIFT{{if c_op == '*'}}+30{{endif}}) { + ll{{ival}} = {{'-' if _case < 0 else ''}}(PY_LONG_LONG) {{pylong_join(_size, 'digits', 'unsigned PY_LONG_LONG')}}; + goto long_long; + #endif + {{endif}} + } + // if size doesn't fit into a long or PY_LONG_LONG anymore, fall through to default + CYTHON_FALLTHROUGH; + {{endfor}} + {{endfor}} + + {{if op in ('Eq', 'Ne')}} + #if PyLong_SHIFT < 30 && PyLong_SHIFT != 15 + // unusual setup - your fault + default: return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( + PyLong_Type.tp_richcompare({{'op1, op2' if order == 'ObjC' else 'op2, op1'}}, Py_{{op.upper()}})); + #else + // too large for the long values we allow => definitely not equal + default: {{return_false if op == 'Eq' else return_true}}; + #endif + {{else}} + default: return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2); + {{endif}} + } + } + {{if op in ('Eq', 'Ne')}} + if (a {{c_op}} b) { + {{return_true}}; + } else { + {{return_false}}; + } + {{else}} + {{if c_op == '*'}} + CYTHON_UNUSED_VAR(a); + CYTHON_UNUSED_VAR(b); + #ifdef HAVE_LONG_LONG + ll{{ival}} = {{ival}}; + goto long_long; + #else + return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2); + #endif + {{elif c_op == '%'}} + // see CMath.c :: ModInt utility code + x = a % b; + x += ((x != 0) & ((x ^ b) < 0)) * b; + {{elif op == 'TrueDivide'}} + if ((8 * sizeof(long) <= 53 || likely(labs({{ival}}) <= ((PY_LONG_LONG)1 << 53))) + || __Pyx_PyLong_DigitCount({{pyval}}) <= 52 / PyLong_SHIFT) { + return PyFloat_FromDouble((double)a / (double)b); + } + return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2); + {{elif op == 'FloorDivide'}} + { + long q, r; + // see CMath.c :: DivInt utility code + q = a / b; + r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + x = q; + } + {{else}} + x = a {{c_op}} b; + {{if op == 'Lshift'}} +#ifdef HAVE_LONG_LONG + if (unlikely(!(b < (long) (sizeof(long)*8) && a == x >> b)) && a) { + ll{{ival}} = {{ival}}; + goto long_long; + } +#else + if (likely(b < (long) (sizeof(long)*8) && a == x >> b) || !a) /* execute return statement below */ +#endif + {{endif}} + {{endif}} + return PyLong_FromLong(x); + + {{if op != 'TrueDivide'}} +#ifdef HAVE_LONG_LONG + long_long: + {{if c_op == '%'}} + // see CMath.c :: ModInt utility code + llx = lla % llb; + llx += ((llx != 0) & ((llx ^ llb) < 0)) * llb; + {{elif op == 'FloorDivide'}} + { + PY_LONG_LONG q, r; + // see CMath.c :: DivInt utility code + q = lla / llb; + r = lla - q*llb; + q -= ((r != 0) & ((r ^ llb) < 0)); + llx = q; + } + {{else}} + llx = lla {{c_op}} llb; + {{if op == 'Lshift'}} + if (likely(lla == llx >> llb)) /* then execute 'return' below */ + {{endif}} + {{endif}} + return PyLong_FromLongLong(llx); +#endif + {{endif}}{{# if op != 'TrueDivide' #}} + {{endif}}{{# if op in ('Eq', 'Ne') #}} + } + #endif + + {{if c_op in '+-*' or op in ('TrueDivide', 'Eq', 'Ne')}} + if (PyFloat_CheckExact({{pyval}})) { + const long {{'a' if order == 'CObj' else 'b'}} = intval; +#if CYTHON_COMPILING_IN_LIMITED_API + double {{ival}} = __pyx_PyFloat_AsDouble({{pyval}}); +#else + double {{ival}} = PyFloat_AS_DOUBLE({{pyval}}); +#endif + {{if op in ('Eq', 'Ne')}} + if ((double)a {{c_op}} (double)b) { + {{return_true}}; + } else { + {{return_false}}; + } + {{else}} + double result; + {{zerodiv_check('b', 'float')}} + // copied from floatobject.c in Py3.5: + PyFPE_START_PROTECT("{{op.lower() if not op.endswith('Divide') else 'divide'}}", return NULL) + result = ((double)a) {{c_op}} (double)b; + PyFPE_END_PROTECT(result) + return PyFloat_FromDouble(result); + {{endif}} + } + {{endif}} + + {{if op in ('Eq', 'Ne')}} + return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( + PyObject_RichCompare(op1, op2, Py_{{op.upper()}})); + {{else}} + return (inplace ? PyNumber_InPlace{{op}} : PyNumber_{{op}})(op1, op2); + {{endif}} +} +#endif + +/////////////// PyFloatBinop.proto /////////////// + +{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} +#if !CYTHON_COMPILING_IN_PYPY +static {{c_ret_type}} __Pyx_PyFloat_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); /*proto*/ +#else +#define __Pyx_PyFloat_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(op1, op2, floatval, inplace, zerodivision_check) \ + {{if op in ('Eq', 'Ne')}}{{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(PyObject_RichCompare(op1, op2, Py_{{op.upper()}})) + {{elif op == 'Divide'}}((inplace ? __Pyx_PyNumber_InPlaceDivide(op1, op2) : __Pyx_PyNumber_Divide(op1, op2))) + {{else}}(inplace ? PyNumber_InPlace{{op}}(op1, op2) : PyNumber_{{op}}(op1, op2)) + {{endif}} +#endif + +/////////////// PyFloatBinop /////////////// + +#if !CYTHON_COMPILING_IN_PYPY +{{py: from Cython.Utility import pylong_join }} +{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}} +{{py: return_true = 'Py_RETURN_TRUE' if ret_type.is_pyobject else 'return 1'}} +{{py: return_false = 'Py_RETURN_FALSE' if ret_type.is_pyobject else 'return 0'}} +{{py: pyval, fval = ('op2', 'b') if order == 'CObj' else ('op1', 'a') }} +{{py: cfunc_name = '__Pyx_PyFloat_%s%s%s' % ('' if ret_type.is_pyobject else 'Bool', op, order) }} +{{py: +c_op = { + 'Add': '+', 'Subtract': '-', 'TrueDivide': '/', 'Divide': '/', 'Remainder': '%', + 'Eq': '==', 'Ne': '!=', + }[op] +}} +{{py: +def zerodiv_check(operand, _is_mod=op == 'Remainder', _needs_check=(order == 'CObj' and c_op in '%/')): + return ((( + 'if (unlikely(zerodivision_check && ((%s) == 0.0))) {' + ' PyErr_SetString(PyExc_ZeroDivisionError, "float division%s by zero");' + ' return NULL;' + '}') % (operand, ' or modulo' if _is_mod else '') + ) if _needs_check else '') +}} + +static {{c_ret_type}} {{cfunc_name}}(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { + const double {{'a' if order == 'CObj' else 'b'}} = floatval; + double {{fval}}{{if op not in ('Eq', 'Ne')}}, result{{endif}}; + CYTHON_UNUSED_VAR(inplace); + CYTHON_UNUSED_VAR(zerodivision_check); + + {{if op in ('Eq', 'Ne')}} + if (op1 == op2) { + {{return_true if op == 'Eq' else return_false}}; + } + {{endif}} + + if (likely(PyFloat_CheckExact({{pyval}}))) { +#if CYTHON_COMPILING_IN_LIMITED_API + {{fval}} = __pyx_PyFloat_AsDouble({{pyval}}); +#else + {{fval}} = PyFloat_AS_DOUBLE({{pyval}}); +#endif + {{zerodiv_check(fval)}} + } else + + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact({{pyval}}))) { + {{fval}} = (double) PyInt_AS_LONG({{pyval}}); + {{zerodiv_check(fval)}} + } else + #endif + + if (likely(PyLong_CheckExact({{pyval}}))) { + #if CYTHON_USE_PYLONG_INTERNALS + if (__Pyx_PyLong_IsZero({{pyval}})) { + {{fval}} = 0.0; + {{zerodiv_check(fval)}} + } else if (__Pyx_PyLong_IsCompact({{pyval}})) { + {{fval}} = (double) __Pyx_PyLong_CompactValue({{pyval}}); + } else { + const digit* digits = __Pyx_PyLong_Digits({{pyval}}); + const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount({{pyval}}); + switch (size) { + {{for _size in (2, 3, 4)}} + case -{{_size}}: + case {{_size}}: + if (8 * sizeof(unsigned long) > {{_size}} * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || ({{_size-1}} * PyLong_SHIFT < 53))) { + {{fval}} = (double) {{pylong_join(_size, 'digits')}}; + // let CPython do its own float rounding from 2**53 on (max. consecutive integer in double float) + if ((8 * sizeof(unsigned long) < 53) || ({{_size}} * PyLong_SHIFT < 53) || ({{fval}} < (double) ((PY_LONG_LONG)1 << 53))) { + if (size == {{-_size}}) + {{fval}} = -{{fval}}; + break; + } + } + // Fall through if size doesn't fit safely into a double anymore. + // It may not be obvious that this is a safe fall-through given the "fval < 2**53" + // check above. However, the number of digits that CPython uses for a given PyLong + // value is minimal, and together with the "(size-1) * SHIFT < 53" check above, + // this should make it safe. + CYTHON_FALLTHROUGH; + {{endfor}} + default: + #endif + {{if op in ('Eq', 'Ne')}} + return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( + PyFloat_Type.tp_richcompare({{'op1, op2' if order == 'CObj' else 'op2, op1'}}, Py_{{op.upper()}})); + {{else}} + {{fval}} = PyLong_AsDouble({{pyval}}); + if (unlikely({{fval}} == -1.0 && PyErr_Occurred())) return NULL; + {{if zerodiv_check(fval)}} + #if !CYTHON_USE_PYLONG_INTERNALS + {{zerodiv_check(fval)}} + #endif + {{endif}} + {{endif}} + #if CYTHON_USE_PYLONG_INTERNALS + } + } + #endif + } else { + {{if op in ('Eq', 'Ne')}} + return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}( + PyObject_RichCompare(op1, op2, Py_{{op.upper()}})); + {{elif op == 'Divide'}} + return (inplace ? __Pyx_PyNumber_InPlaceDivide(op1, op2) : __Pyx_PyNumber_Divide(op1, op2)); + {{else}} + return (inplace ? PyNumber_InPlace{{op}} : PyNumber_{{op}})(op1, op2); + {{endif}} + } + + {{if op in ('Eq', 'Ne')}} + if (a {{c_op}} b) { + {{return_true}}; + } else { + {{return_false}}; + } + {{else}} + // copied from floatobject.c in Py3.5: + PyFPE_START_PROTECT("{{op.lower() if not op.endswith('Divide') else 'divide'}}", return NULL) + {{if c_op == '%'}} + result = fmod(a, b); + if (result) + result += ((result < 0) ^ (b < 0)) * b; + else + result = copysign(0.0, b); + {{else}} + result = a {{c_op}} b; + {{endif}} + PyFPE_END_PROTECT(result) + return PyFloat_FromDouble(result); + {{endif}} +} +#endif diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Overflow.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Overflow.c new file mode 100644 index 0000000000000000000000000000000000000000..395456c872160e93dbc410b6caa52c945dbb47fd --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Overflow.c @@ -0,0 +1,404 @@ +/* +These functions provide integer arithmetic with integer checking. They do not +actually raise an exception when an overflow is detected, but rather set a bit +in the overflow parameter. (This parameter may be re-used across several +arithmetic operations, so should be or-ed rather than assigned to.) + +The implementation is divided into two parts, the signed and unsigned basecases, +which is where the magic happens, and a generic template matching a specific +type to an implementation based on its (c-compile-time) size and signedness. + +When possible, branching is avoided, and preference is given to speed over +accuracy (a low rate of falsely "detected" overflows are acceptable, +undetected overflows are not). + + +TODO: Hook up checking. +TODO: Conditionally support 128-bit with intmax_t? +*/ + +/////////////// Common.proto /////////////// + +static int __Pyx_check_twos_complement(void) { + if ((-1) != (~0)) { + PyErr_SetString(PyExc_RuntimeError, "Two's complement required for overflow checks."); + return 1; + } else if ((sizeof(short) == sizeof(int))) { + PyErr_SetString(PyExc_RuntimeError, "sizeof(short) < sizeof(int) required for overflow checks."); + return 1; + } else { + return 0; + } +} + +#define __PYX_SIGN_BIT(type) ((((unsigned type) 1) << (sizeof(type) * 8 - 1))) +#define __PYX_HALF_MAX(type) ((((type) 1) << (sizeof(type) * 8 - 2))) +#define __PYX_MIN(type) ((__PYX_IS_UNSIGNED(type) ? (type) 0 : 0 - __PYX_HALF_MAX(type) - __PYX_HALF_MAX(type))) +#define __PYX_MAX(type) ((~__PYX_MIN(type))) + +#define __Pyx_add_no_overflow(a, b, overflow) ((a) + (b)) +#define __Pyx_add_const_no_overflow(a, b, overflow) ((a) + (b)) +#define __Pyx_sub_no_overflow(a, b, overflow) ((a) - (b)) +#define __Pyx_sub_const_no_overflow(a, b, overflow) ((a) - (b)) +#define __Pyx_mul_no_overflow(a, b, overflow) ((a) * (b)) +#define __Pyx_mul_const_no_overflow(a, b, overflow) ((a) * (b)) +#define __Pyx_div_no_overflow(a, b, overflow) ((a) / (b)) +#define __Pyx_div_const_no_overflow(a, b, overflow) ((a) / (b)) + +#if defined(__has_builtin) +# if __has_builtin(__builtin_add_overflow) && !defined(__ibmxl__) +# define __PYX_HAVE_BUILTIN_OVERFLOW +# endif +#elif defined(__GNUC__) && (__GNUC__ >= 5) && (!defined(__INTEL_COMPILER) || (__INTEL_COMPILER >= 1800)) +# define __PYX_HAVE_BUILTIN_OVERFLOW +#endif + +#if defined(__GNUC__) +# define __Pyx_is_constant(x) (__builtin_constant_p(x)) +#elif defined(__has_builtin) +# if __has_builtin(__builtin_constant_p) +# define __Pyx_is_constant(x) (__builtin_constant_p(x)) +# endif +#else +# define __Pyx_is_constant(x) (0) +#endif + +/////////////// Common.init /////////////// + +if (likely(__Pyx_check_twos_complement() == 0)); else +// error propagation code is appended automatically + +/////////////// BaseCaseUnsigned.proto /////////////// + +{{if UINT == "long long"}}#ifdef HAVE_LONG_LONG{{endif}} + +static CYTHON_INLINE {{UINT}} __Pyx_add_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow); +static CYTHON_INLINE {{UINT}} __Pyx_sub_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow); +static CYTHON_INLINE {{UINT}} __Pyx_mul_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow); +static CYTHON_INLINE {{UINT}} __Pyx_div_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow); + +// Use these when b is known at compile time. +#define __Pyx_add_const_{{NAME}}_checking_overflow __Pyx_add_{{NAME}}_checking_overflow +#define __Pyx_sub_const_{{NAME}}_checking_overflow __Pyx_sub_{{NAME}}_checking_overflow +#if defined(__PYX_HAVE_BUILTIN_OVERFLOW) +#define __Pyx_mul_const_{{NAME}}_checking_overflow __Pyx_mul_{{NAME}}_checking_overflow +#else +static CYTHON_INLINE {{UINT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} constant, int *overflow); +#endif +#define __Pyx_div_const_{{NAME}}_checking_overflow __Pyx_div_{{NAME}}_checking_overflow + +{{if UINT == "long long"}}#endif{{endif}} + +/////////////// BaseCaseUnsigned /////////////// + +{{if UINT == "long long"}}#ifdef HAVE_LONG_LONG{{endif}} + +#if defined(__PYX_HAVE_BUILTIN_OVERFLOW) + +static CYTHON_INLINE {{UINT}} __Pyx_add_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) { + {{UINT}} result; + *overflow |= __builtin_add_overflow(a, b, &result); + return result; +} + +static CYTHON_INLINE {{UINT}} __Pyx_sub_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) { + {{UINT}} result; + *overflow |= __builtin_sub_overflow(a, b, &result); + return result; +} + +static CYTHON_INLINE {{UINT}} __Pyx_mul_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) { + {{UINT}} result; + *overflow |= __builtin_mul_overflow(a, b, &result); + return result; +} + +#else + +static CYTHON_INLINE {{UINT}} __Pyx_add_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) { + {{UINT}} r = a + b; + *overflow |= r < a; + return r; +} + +static CYTHON_INLINE {{UINT}} __Pyx_sub_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) { + {{UINT}} r = a - b; + *overflow |= r > a; + return r; +} + +static CYTHON_INLINE {{UINT}} __Pyx_mul_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) { + // if we have a constant, use the constant version + if (__Pyx_is_constant(b)) { + return __Pyx_mul_const_{{NAME}}_checking_overflow(a, b, overflow); + } else if (__Pyx_is_constant(a)) { + return __Pyx_mul_const_{{NAME}}_checking_overflow(b, a, overflow); + } else if ((sizeof({{UINT}}) < sizeof(unsigned long))) { + unsigned long big_r = ((unsigned long) a) * ((unsigned long) b); + {{UINT}} r = ({{UINT}}) big_r; + *overflow |= big_r != r; + return r; +#ifdef HAVE_LONG_LONG + } else if ((sizeof({{UINT}}) < sizeof(unsigned PY_LONG_LONG))) { + unsigned PY_LONG_LONG big_r = ((unsigned PY_LONG_LONG) a) * ((unsigned PY_LONG_LONG) b); + {{UINT}} r = ({{UINT}}) big_r; + *overflow |= big_r != r; + return r; +#endif + } else { + return __Pyx_mul_const_{{NAME}}_checking_overflow(a, b, overflow); + } +} + +static CYTHON_INLINE {{UINT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) { + // note that deliberately the overflow check is written such that it divides by b; this + // function is used when b is a constant thus the compiler should be able to eliminate the + // (very slow on most CPUs!) division operation + {{UINT}} prod; + if (__Pyx_is_constant(a) && !__Pyx_is_constant(b)) { + // if a is a compile-time constant and b isn't, swap them + {{UINT}} temp = b; + b = a; + a = temp; + } + prod = a * b; + if (b != 0) + *overflow |= a > (__PYX_MAX({{UINT}}) / b); + return prod; +} +#endif // __PYX_HAVE_BUILTIN_OVERFLOW + + +static CYTHON_INLINE {{UINT}} __Pyx_div_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) { + if (b == 0) { + *overflow |= 1; + return 0; + } + return a / b; +} + +{{if UINT == "long long"}}#endif{{endif}} + + +/////////////// BaseCaseSigned.proto /////////////// + +{{if INT == "long long"}}#ifdef HAVE_LONG_LONG{{endif}} + +static CYTHON_INLINE {{INT}} __Pyx_add_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow); +static CYTHON_INLINE {{INT}} __Pyx_sub_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow); +static CYTHON_INLINE {{INT}} __Pyx_mul_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow); +static CYTHON_INLINE {{INT}} __Pyx_div_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow); + + +// Use when b is known at compile time. +#define __Pyx_add_const_{{NAME}}_checking_overflow __Pyx_add_{{NAME}}_checking_overflow +#define __Pyx_sub_const_{{NAME}}_checking_overflow __Pyx_sub_{{NAME}}_checking_overflow +#if defined(__PYX_HAVE_BUILTIN_OVERFLOW) +#define __Pyx_mul_const_{{NAME}}_checking_overflow __Pyx_mul_{{NAME}}_checking_overflow +#else +static CYTHON_INLINE {{INT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} constant, int *overflow); +#endif +#define __Pyx_div_const_{{NAME}}_checking_overflow __Pyx_div_{{NAME}}_checking_overflow + +{{if INT == "long long"}}#endif{{endif}} + +/////////////// BaseCaseSigned /////////////// + +{{if INT == "long long"}}#ifdef HAVE_LONG_LONG{{endif}} + +#if defined(__PYX_HAVE_BUILTIN_OVERFLOW) + +static CYTHON_INLINE {{INT}} __Pyx_add_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) { + {{INT}} result; + *overflow |= __builtin_add_overflow(a, b, &result); + return result; +} + +static CYTHON_INLINE {{INT}} __Pyx_sub_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) { + {{INT}} result; + *overflow |= __builtin_sub_overflow(a, b, &result); + return result; +} + +static CYTHON_INLINE {{INT}} __Pyx_mul_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) { + {{INT}} result; + *overflow |= __builtin_mul_overflow(a, b, &result); + return result; +} + +#else + +static CYTHON_INLINE {{INT}} __Pyx_add_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) { + if ((sizeof({{INT}}) < sizeof(long))) { + long big_r = ((long) a) + ((long) b); + {{INT}} r = ({{INT}}) big_r; + *overflow |= big_r != r; + return r; +#ifdef HAVE_LONG_LONG + } else if ((sizeof({{INT}}) < sizeof(PY_LONG_LONG))) { + PY_LONG_LONG big_r = ((PY_LONG_LONG) a) + ((PY_LONG_LONG) b); + {{INT}} r = ({{INT}}) big_r; + *overflow |= big_r != r; + return r; +#endif + } else { + // Signed overflow undefined, but unsigned overflow is well defined. Casting is + // implementation-defined, but we assume two's complement (see __Pyx_check_twos_complement + // above), and arithmetic in two's-complement is the same as unsigned arithmetic. + unsigned {{INT}} r = (unsigned {{INT}}) a + (unsigned {{INT}}) b; + // Overflow happened if the operands have the same sign, but the result + // has opposite sign. + *overflow |= (((unsigned {{INT}})a ^ r) & ((unsigned {{INT}})b ^ r)) >> (8 * sizeof({{INT}}) - 1); + return ({{INT}}) r; + } +} + +static CYTHON_INLINE {{INT}} __Pyx_sub_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) { + // Compilers don't handle widening as well in the subtraction case, so don't bother + unsigned {{INT}} r = (unsigned {{INT}}) a - (unsigned {{INT}}) b; + // Overflow happened if the operands differing signs, and the result + // has opposite sign to a. + *overflow |= (((unsigned {{INT}})a ^ (unsigned {{INT}})b) & ((unsigned {{INT}})a ^ r)) >> (8 * sizeof({{INT}}) - 1); + return ({{INT}}) r; +} + +static CYTHON_INLINE {{INT}} __Pyx_mul_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) { + // if we have a constant, use the constant version + if (__Pyx_is_constant(b)) { + return __Pyx_mul_const_{{NAME}}_checking_overflow(a, b, overflow); + } else if (__Pyx_is_constant(a)) { + return __Pyx_mul_const_{{NAME}}_checking_overflow(b, a, overflow); + } else if ((sizeof({{INT}}) < sizeof(long))) { + long big_r = ((long) a) * ((long) b); + {{INT}} r = ({{INT}}) big_r; + *overflow |= big_r != r; + return ({{INT}}) r; +#ifdef HAVE_LONG_LONG + } else if ((sizeof({{INT}}) < sizeof(PY_LONG_LONG))) { + PY_LONG_LONG big_r = ((PY_LONG_LONG) a) * ((PY_LONG_LONG) b); + {{INT}} r = ({{INT}}) big_r; + *overflow |= big_r != r; + return ({{INT}}) r; +#endif + } else { + return __Pyx_mul_const_{{NAME}}_checking_overflow(a, b, overflow); + } +} + +static CYTHON_INLINE {{INT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) { + // note that deliberately all these comparisons are written such that they divide by b; this + // function is used when b is a constant thus the compiler should be able to eliminate the + // (very slow on most CPUs!) division operations + if (__Pyx_is_constant(a) && !__Pyx_is_constant(b)) { + // if a is a compile-time constant and b isn't, swap them + {{INT}} temp = b; + b = a; + a = temp; + } + if (b > 1) { + *overflow |= a > __PYX_MAX({{INT}}) / b; + *overflow |= a < __PYX_MIN({{INT}}) / b; + } else if (b == -1) { + *overflow |= a == __PYX_MIN({{INT}}); + } else if (b < -1) { + *overflow |= a > __PYX_MIN({{INT}}) / b; + *overflow |= a < __PYX_MAX({{INT}}) / b; + } + return ({{INT}}) (((unsigned {{INT}})a) * ((unsigned {{INT}}) b)); +} +#endif // defined(__PYX_HAVE_BUILTIN_OVERFLOW) + +static CYTHON_INLINE {{INT}} __Pyx_div_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) { + if (b == 0) { + *overflow |= 1; + return 0; + } + *overflow |= a == __PYX_MIN({{INT}}) && b == -1; + return ({{INT}}) ((unsigned {{INT}}) a / (unsigned {{INT}}) b); +} + +{{if INT == "long long"}}#endif{{endif}} + + +/////////////// SizeCheck.init /////////////// + +if (likely(__Pyx_check_sane_{{NAME}}() == 0)); else +// error propagation code is appended automatically + +/////////////// SizeCheck.proto /////////////// + +static int __Pyx_check_sane_{{NAME}}(void) { + if (((sizeof({{TYPE}}) <= sizeof(int)) || +#ifdef HAVE_LONG_LONG + (sizeof({{TYPE}}) == sizeof(PY_LONG_LONG)) || +#endif + (sizeof({{TYPE}}) == sizeof(long)))) { + return 0; + } else { + PyErr_Format(PyExc_RuntimeError, \ + "Bad size for int type %.{{max(60, len(TYPE))}}s: %d", "{{TYPE}}", (int) sizeof({{TYPE}})); + return 1; + } +} + + +/////////////// Binop.proto /////////////// + +static CYTHON_INLINE {{TYPE}} __Pyx_{{BINOP}}_{{NAME}}_checking_overflow({{TYPE}} a, {{TYPE}} b, int *overflow); + +/////////////// Binop /////////////// + +static CYTHON_INLINE {{TYPE}} __Pyx_{{BINOP}}_{{NAME}}_checking_overflow({{TYPE}} a, {{TYPE}} b, int *overflow) { + if ((sizeof({{TYPE}}) < sizeof(int))) { + return __Pyx_{{BINOP}}_no_overflow(a, b, overflow); + } else if (__PYX_IS_UNSIGNED({{TYPE}})) { + if ((sizeof({{TYPE}}) == sizeof(unsigned int))) { + return ({{TYPE}}) __Pyx_{{BINOP}}_unsigned_int_checking_overflow(a, b, overflow); + } else if ((sizeof({{TYPE}}) == sizeof(unsigned long))) { + return ({{TYPE}}) __Pyx_{{BINOP}}_unsigned_long_checking_overflow(a, b, overflow); +#ifdef HAVE_LONG_LONG + } else if ((sizeof({{TYPE}}) == sizeof(unsigned PY_LONG_LONG))) { + return ({{TYPE}}) __Pyx_{{BINOP}}_unsigned_long_long_checking_overflow(a, b, overflow); +#endif + } else { + abort(); return 0; /* handled elsewhere */ + } + } else { + if ((sizeof({{TYPE}}) == sizeof(int))) { + return ({{TYPE}}) __Pyx_{{BINOP}}_int_checking_overflow(a, b, overflow); + } else if ((sizeof({{TYPE}}) == sizeof(long))) { + return ({{TYPE}}) __Pyx_{{BINOP}}_long_checking_overflow(a, b, overflow); +#ifdef HAVE_LONG_LONG + } else if ((sizeof({{TYPE}}) == sizeof(PY_LONG_LONG))) { + return ({{TYPE}}) __Pyx_{{BINOP}}_long_long_checking_overflow(a, b, overflow); +#endif + } else { + abort(); return 0; /* handled elsewhere */ + } + } +} + +/////////////// LeftShift.proto /////////////// + +static CYTHON_INLINE {{TYPE}} __Pyx_lshift_{{NAME}}_checking_overflow({{TYPE}} a, {{TYPE}} b, int *overflow) { + int overflow_check = +#if {{SIGNED}} + (a < 0) || (b < 0) || +#endif + // the following must be a _logical_ OR as the RHS is undefined if the LHS is true + (b >= ({{TYPE}}) (8 * sizeof({{TYPE}}))) || (a > (__PYX_MAX({{TYPE}}) >> b)); + if (overflow_check) { + *overflow |= 1; + return 0; + } else { + return a << b; + } +} +#define __Pyx_lshift_const_{{NAME}}_checking_overflow __Pyx_lshift_{{NAME}}_checking_overflow + + +/////////////// UnaryNegOverflows.proto /////////////// + +// from intobject.c +#define __Pyx_UNARY_NEG_WOULD_OVERFLOW(x) \ + (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Printing.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Printing.c new file mode 100644 index 0000000000000000000000000000000000000000..71aa7eafe95d64f82afe719c479e3aac32f0bd6c --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Printing.c @@ -0,0 +1,176 @@ +////////////////////// Print.proto ////////////////////// +//@substitute: naming + +static int __Pyx_Print(PyObject*, PyObject *, int); /*proto*/ +#if CYTHON_COMPILING_IN_PYPY || PY_MAJOR_VERSION >= 3 +static PyObject* $print_function = 0; +static PyObject* $print_function_kwargs = 0; +#endif + +////////////////////// Print.cleanup ////////////////////// +//@substitute: naming + +#if CYTHON_COMPILING_IN_PYPY || PY_MAJOR_VERSION >= 3 +Py_CLEAR($print_function); +Py_CLEAR($print_function_kwargs); +#endif + +////////////////////// Print ////////////////////// +//@substitute: naming + +#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION < 3 +static PyObject *__Pyx_GetStdout(void) { + PyObject *f = PySys_GetObject((char *)"stdout"); + if (!f) { + PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout"); + } + return f; +} + +static int __Pyx_Print(PyObject* f, PyObject *arg_tuple, int newline) { + int i; + + if (!f) { + if (!(f = __Pyx_GetStdout())) + return -1; + } + Py_INCREF(f); + for (i=0; i < PyTuple_GET_SIZE(arg_tuple); i++) { + PyObject* v; + if (PyFile_SoftSpace(f, 1)) { + if (PyFile_WriteString(" ", f) < 0) + goto error; + } + v = PyTuple_GET_ITEM(arg_tuple, i); + if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0) + goto error; + if (PyString_Check(v)) { + char *s = PyString_AsString(v); + Py_ssize_t len = PyString_Size(v); + if (len > 0) { + // append soft-space if necessary (not using isspace() due to C/C++ problem on MacOS-X) + switch (s[len-1]) { + case ' ': break; + case '\f': case '\r': case '\n': case '\t': case '\v': + PyFile_SoftSpace(f, 0); + break; + default: break; + } + } + } + } + if (newline) { + if (PyFile_WriteString("\n", f) < 0) + goto error; + PyFile_SoftSpace(f, 0); + } + Py_DECREF(f); + return 0; +error: + Py_DECREF(f); + return -1; +} + +#else /* Python 3 has a print function */ + +static int __Pyx_Print(PyObject* stream, PyObject *arg_tuple, int newline) { + PyObject* kwargs = 0; + PyObject* result = 0; + PyObject* end_string; + if (unlikely(!$print_function)) { + $print_function = PyObject_GetAttr($builtins_cname, PYIDENT("print")); + if (!$print_function) + return -1; + } + if (stream) { + kwargs = PyDict_New(); + if (unlikely(!kwargs)) + return -1; + if (unlikely(PyDict_SetItem(kwargs, PYIDENT("file"), stream) < 0)) + goto bad; + if (!newline) { + end_string = PyUnicode_FromStringAndSize(" ", 1); + if (unlikely(!end_string)) + goto bad; + if (PyDict_SetItem(kwargs, PYIDENT("end"), end_string) < 0) { + Py_DECREF(end_string); + goto bad; + } + Py_DECREF(end_string); + } + } else if (!newline) { + if (unlikely(!$print_function_kwargs)) { + $print_function_kwargs = PyDict_New(); + if (unlikely(!$print_function_kwargs)) + return -1; + end_string = PyUnicode_FromStringAndSize(" ", 1); + if (unlikely(!end_string)) + return -1; + if (PyDict_SetItem($print_function_kwargs, PYIDENT("end"), end_string) < 0) { + Py_DECREF(end_string); + return -1; + } + Py_DECREF(end_string); + } + kwargs = $print_function_kwargs; + } + result = PyObject_Call($print_function, arg_tuple, kwargs); + if (unlikely(kwargs) && (kwargs != $print_function_kwargs)) + Py_DECREF(kwargs); + if (!result) + return -1; + Py_DECREF(result); + return 0; +bad: + if (kwargs != $print_function_kwargs) + Py_XDECREF(kwargs); + return -1; +} +#endif + +////////////////////// PrintOne.proto ////////////////////// +//@requires: Print + +static int __Pyx_PrintOne(PyObject* stream, PyObject *o); /*proto*/ + +////////////////////// PrintOne ////////////////////// + +#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION < 3 + +static int __Pyx_PrintOne(PyObject* f, PyObject *o) { + if (!f) { + if (!(f = __Pyx_GetStdout())) + return -1; + } + Py_INCREF(f); + if (PyFile_SoftSpace(f, 0)) { + if (PyFile_WriteString(" ", f) < 0) + goto error; + } + if (PyFile_WriteObject(o, f, Py_PRINT_RAW) < 0) + goto error; + if (PyFile_WriteString("\n", f) < 0) + goto error; + Py_DECREF(f); + return 0; +error: + Py_DECREF(f); + return -1; + /* the line below is just to avoid C compiler + * warnings about unused functions */ + return __Pyx_Print(f, NULL, 0); +} + +#else /* Python 3 has a print function */ + +static int __Pyx_PrintOne(PyObject* stream, PyObject *o) { + int res; + PyObject* arg_tuple = PyTuple_Pack(1, o); + if (unlikely(!arg_tuple)) + return -1; + res = __Pyx_Print(stream, arg_tuple, 1); + Py_DECREF(arg_tuple); + return res; +} + +#endif diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Profile.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Profile.c new file mode 100644 index 0000000000000000000000000000000000000000..2b8564b226fd5b3b74f52ca3f6dc7d25fd581762 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/Profile.c @@ -0,0 +1,383 @@ +/////////////// Profile.proto /////////////// +//@requires: Exceptions.c::PyErrFetchRestore +//@substitute: naming + +// Note that cPython ignores PyTrace_EXCEPTION, +// but maybe some other profilers don't. + +#ifndef CYTHON_PROFILE +#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY + #define CYTHON_PROFILE 0 +#else + #define CYTHON_PROFILE 1 +#endif +#endif + +#ifndef CYTHON_TRACE_NOGIL + #define CYTHON_TRACE_NOGIL 0 +#else + #if CYTHON_TRACE_NOGIL && !defined(CYTHON_TRACE) + #define CYTHON_TRACE 1 + #endif +#endif + +#ifndef CYTHON_TRACE + #define CYTHON_TRACE 0 +#endif + +#if CYTHON_TRACE + #undef CYTHON_PROFILE_REUSE_FRAME +#endif + +#ifndef CYTHON_PROFILE_REUSE_FRAME + #define CYTHON_PROFILE_REUSE_FRAME 0 +#endif + +#if CYTHON_PROFILE || CYTHON_TRACE + + #include "compile.h" + #include "frameobject.h" + #include "traceback.h" +#if PY_VERSION_HEX >= 0x030b00a6 + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif + + #if CYTHON_PROFILE_REUSE_FRAME + #define CYTHON_FRAME_MODIFIER static + #define CYTHON_FRAME_DEL(frame) + #else + #define CYTHON_FRAME_MODIFIER + #define CYTHON_FRAME_DEL(frame) Py_CLEAR(frame) + #endif + + #define __Pyx_TraceDeclarations \ + static PyCodeObject *$frame_code_cname = NULL; \ + CYTHON_FRAME_MODIFIER PyFrameObject *$frame_cname = NULL; \ + int __Pyx_use_tracing = 0; + + #define __Pyx_TraceFrameInit(codeobj) \ + if (codeobj) $frame_code_cname = (PyCodeObject*) codeobj; + + +#if PY_VERSION_HEX >= 0x030b00a2 + #if PY_VERSION_HEX >= 0x030C00b1 + #define __Pyx_IsTracing(tstate, check_tracing, check_funcs) \ + ((!(check_tracing) || !(tstate)->tracing) && \ + (!(check_funcs) || (tstate)->c_profilefunc || (CYTHON_TRACE && (tstate)->c_tracefunc))) + #else + #define __Pyx_IsTracing(tstate, check_tracing, check_funcs) \ + (unlikely((tstate)->cframe->use_tracing) && \ + (!(check_tracing) || !(tstate)->tracing) && \ + (!(check_funcs) || (tstate)->c_profilefunc || (CYTHON_TRACE && (tstate)->c_tracefunc))) + #endif + + #define __Pyx_EnterTracing(tstate) PyThreadState_EnterTracing(tstate) + #define __Pyx_LeaveTracing(tstate) PyThreadState_LeaveTracing(tstate) + +#elif PY_VERSION_HEX >= 0x030a00b1 + #define __Pyx_IsTracing(tstate, check_tracing, check_funcs) \ + (unlikely((tstate)->cframe->use_tracing) && \ + (!(check_tracing) || !(tstate)->tracing) && \ + (!(check_funcs) || (tstate)->c_profilefunc || (CYTHON_TRACE && (tstate)->c_tracefunc))) + + #define __Pyx_EnterTracing(tstate) \ + do { tstate->tracing++; tstate->cframe->use_tracing = 0; } while (0) + + #define __Pyx_LeaveTracing(tstate) \ + do { \ + tstate->tracing--; \ + tstate->cframe->use_tracing = ((CYTHON_TRACE && tstate->c_tracefunc != NULL) \ + || tstate->c_profilefunc != NULL); \ + } while (0) + +#else + #define __Pyx_IsTracing(tstate, check_tracing, check_funcs) \ + (unlikely((tstate)->use_tracing) && \ + (!(check_tracing) || !(tstate)->tracing) && \ + (!(check_funcs) || (tstate)->c_profilefunc || (CYTHON_TRACE && (tstate)->c_tracefunc))) + + #define __Pyx_EnterTracing(tstate) \ + do { tstate->tracing++; tstate->use_tracing = 0; } while (0) + + #define __Pyx_LeaveTracing(tstate) \ + do { \ + tstate->tracing--; \ + tstate->use_tracing = ((CYTHON_TRACE && tstate->c_tracefunc != NULL) \ + || tstate->c_profilefunc != NULL); \ + } while (0) + +#endif + + #ifdef WITH_THREAD + #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) \ + if (nogil) { \ + if (CYTHON_TRACE_NOGIL) { \ + PyThreadState *tstate; \ + PyGILState_STATE state = PyGILState_Ensure(); \ + tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 1, 1)) { \ + __Pyx_use_tracing = __Pyx_TraceSetupAndCall(&$frame_code_cname, &$frame_cname, tstate, funcname, srcfile, firstlineno); \ + } \ + PyGILState_Release(state); \ + if (unlikely(__Pyx_use_tracing < 0)) goto_error; \ + } \ + } else { \ + PyThreadState* tstate = PyThreadState_GET(); \ + if (__Pyx_IsTracing(tstate, 1, 1)) { \ + __Pyx_use_tracing = __Pyx_TraceSetupAndCall(&$frame_code_cname, &$frame_cname, tstate, funcname, srcfile, firstlineno); \ + if (unlikely(__Pyx_use_tracing < 0)) goto_error; \ + } \ + } + #else + #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) \ + { PyThreadState* tstate = PyThreadState_GET(); \ + if (__Pyx_IsTracing(tstate, 1, 1)) { \ + __Pyx_use_tracing = __Pyx_TraceSetupAndCall(&$frame_code_cname, &$frame_cname, tstate, funcname, srcfile, firstlineno); \ + if (unlikely(__Pyx_use_tracing < 0)) goto_error; \ + } \ + } + #endif + + #define __Pyx_TraceException() \ + if (likely(!__Pyx_use_tracing)); else { \ + PyThreadState* tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 1)) { \ + __Pyx_EnterTracing(tstate); \ + PyObject *exc_info = __Pyx_GetExceptionTuple(tstate); \ + if (exc_info) { \ + if (CYTHON_TRACE && tstate->c_tracefunc) \ + tstate->c_tracefunc( \ + tstate->c_traceobj, $frame_cname, PyTrace_EXCEPTION, exc_info); \ + tstate->c_profilefunc( \ + tstate->c_profileobj, $frame_cname, PyTrace_EXCEPTION, exc_info); \ + Py_DECREF(exc_info); \ + } \ + __Pyx_LeaveTracing(tstate); \ + } \ + } + + static void __Pyx_call_return_trace_func(PyThreadState *tstate, PyFrameObject *frame, PyObject *result) { + PyObject *type, *value, *traceback; + __Pyx_ErrFetchInState(tstate, &type, &value, &traceback); + __Pyx_EnterTracing(tstate); + if (CYTHON_TRACE && tstate->c_tracefunc) + tstate->c_tracefunc(tstate->c_traceobj, frame, PyTrace_RETURN, result); + if (tstate->c_profilefunc) + tstate->c_profilefunc(tstate->c_profileobj, frame, PyTrace_RETURN, result); + CYTHON_FRAME_DEL(frame); + __Pyx_LeaveTracing(tstate); + __Pyx_ErrRestoreInState(tstate, type, value, traceback); + } + + #ifdef WITH_THREAD + #define __Pyx_TraceReturn(result, nogil) \ + if (likely(!__Pyx_use_tracing)); else { \ + if (nogil) { \ + if (CYTHON_TRACE_NOGIL) { \ + PyThreadState *tstate; \ + PyGILState_STATE state = PyGILState_Ensure(); \ + tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 0)) { \ + __Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \ + } \ + PyGILState_Release(state); \ + } \ + } else { \ + PyThreadState* tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 0)) { \ + __Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \ + } \ + } \ + } + #else + #define __Pyx_TraceReturn(result, nogil) \ + if (likely(!__Pyx_use_tracing)); else { \ + PyThreadState* tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 0)) { \ + __Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \ + } \ + } + #endif + + static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const char *srcfile, int firstlineno); /*proto*/ + static int __Pyx_TraceSetupAndCall(PyCodeObject** code, PyFrameObject** frame, PyThreadState* tstate, const char *funcname, const char *srcfile, int firstlineno); /*proto*/ + +#else + + #define __Pyx_TraceDeclarations + #define __Pyx_TraceFrameInit(codeobj) + // mark error label as used to avoid compiler warnings + #define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) if ((1)); else goto_error; + #define __Pyx_TraceException() + #define __Pyx_TraceReturn(result, nogil) + +#endif /* CYTHON_PROFILE */ + +#if CYTHON_TRACE + // see call_trace_protected() in CPython's ceval.c + static int __Pyx_call_line_trace_func(PyThreadState *tstate, PyFrameObject *frame, int lineno) { + int ret; + PyObject *type, *value, *traceback; + __Pyx_ErrFetchInState(tstate, &type, &value, &traceback); + __Pyx_PyFrame_SetLineNumber(frame, lineno); + __Pyx_EnterTracing(tstate); + + ret = tstate->c_tracefunc(tstate->c_traceobj, frame, PyTrace_LINE, NULL); + + __Pyx_LeaveTracing(tstate); + if (likely(!ret)) { + __Pyx_ErrRestoreInState(tstate, type, value, traceback); + } else { + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + } + return ret; + } + + #ifdef WITH_THREAD + #define __Pyx_TraceLine(lineno, nogil, goto_error) \ + if (likely(!__Pyx_use_tracing)); else { \ + if (nogil) { \ + if (CYTHON_TRACE_NOGIL) { \ + int ret = 0; \ + PyThreadState *tstate; \ + PyGILState_STATE state = __Pyx_PyGILState_Ensure(); \ + tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 0) && tstate->c_tracefunc && $frame_cname->f_trace) { \ + ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \ + } \ + __Pyx_PyGILState_Release(state); \ + if (unlikely(ret)) goto_error; \ + } \ + } else { \ + PyThreadState* tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 0) && tstate->c_tracefunc && $frame_cname->f_trace) { \ + int ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \ + if (unlikely(ret)) goto_error; \ + } \ + } \ + } + #else + #define __Pyx_TraceLine(lineno, nogil, goto_error) \ + if (likely(!__Pyx_use_tracing)); else { \ + PyThreadState* tstate = __Pyx_PyThreadState_Current; \ + if (__Pyx_IsTracing(tstate, 0, 0) && tstate->c_tracefunc && $frame_cname->f_trace) { \ + int ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \ + if (unlikely(ret)) goto_error; \ + } \ + } + #endif +#else + // mark error label as used to avoid compiler warnings + #define __Pyx_TraceLine(lineno, nogil, goto_error) if ((1)); else goto_error; +#endif + +/////////////// Profile /////////////// +//@substitute: naming + +#if CYTHON_PROFILE + +static int __Pyx_TraceSetupAndCall(PyCodeObject** code, + PyFrameObject** frame, + PyThreadState* tstate, + const char *funcname, + const char *srcfile, + int firstlineno) { + PyObject *type, *value, *traceback; + int retval; + if (*frame == NULL || !CYTHON_PROFILE_REUSE_FRAME) { + if (*code == NULL) { + *code = __Pyx_createFrameCodeObject(funcname, srcfile, firstlineno); + if (*code == NULL) return 0; + } + *frame = PyFrame_New( + tstate, /*PyThreadState *tstate*/ + *code, /*PyCodeObject *code*/ + $moddict_cname, /*PyObject *globals*/ + 0 /*PyObject *locals*/ + ); + if (*frame == NULL) return 0; + if (CYTHON_TRACE && (*frame)->f_trace == NULL) { + // this enables "f_lineno" lookup, at least in CPython ... + Py_INCREF(Py_None); + (*frame)->f_trace = Py_None; + } +#if PY_VERSION_HEX < 0x030400B1 + } else { + (*frame)->f_tstate = tstate; +#endif + } + __Pyx_PyFrame_SetLineNumber(*frame, firstlineno); + + retval = 1; + __Pyx_EnterTracing(tstate); + __Pyx_ErrFetchInState(tstate, &type, &value, &traceback); + + #if CYTHON_TRACE + if (tstate->c_tracefunc) + retval = tstate->c_tracefunc(tstate->c_traceobj, *frame, PyTrace_CALL, NULL) == 0; + if (retval && tstate->c_profilefunc) + #endif + retval = tstate->c_profilefunc(tstate->c_profileobj, *frame, PyTrace_CALL, NULL) == 0; + + __Pyx_LeaveTracing(tstate); + if (retval) { + __Pyx_ErrRestoreInState(tstate, type, value, traceback); + return __Pyx_IsTracing(tstate, 0, 0) && retval; + } else { + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(traceback); + return -1; + } +} + +static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const char *srcfile, int firstlineno) { + PyCodeObject *py_code = 0; + +#if PY_MAJOR_VERSION >= 3 + py_code = PyCode_NewEmpty(srcfile, funcname, firstlineno); + // make CPython use a fresh dict for "f_locals" at need (see GH #1836) + if (likely(py_code)) { + py_code->co_flags |= CO_OPTIMIZED | CO_NEWLOCALS; + } +#else + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + + py_funcname = PyString_FromString(funcname); + if (unlikely(!py_funcname)) goto bad; + py_srcfile = PyString_FromString(srcfile); + if (unlikely(!py_srcfile)) goto bad; + + py_code = PyCode_New( + 0, /*int argcount,*/ + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + // make CPython use a fresh dict for "f_locals" at need (see GH #1836) + CO_OPTIMIZED | CO_NEWLOCALS, /*int flags,*/ + $empty_bytes, /*PyObject *code,*/ + $empty_tuple, /*PyObject *consts,*/ + $empty_tuple, /*PyObject *names,*/ + $empty_tuple, /*PyObject *varnames,*/ + $empty_tuple, /*PyObject *freevars,*/ + $empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + firstlineno, /*int firstlineno,*/ + $empty_bytes /*PyObject *lnotab*/ + ); + +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); +#endif + + return py_code; +} + +#endif /* CYTHON_PROFILE */ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/StringTools.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/StringTools.c new file mode 100644 index 0000000000000000000000000000000000000000..db36bb38e4274ee435462c190a0de946ae10baf9 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/StringTools.c @@ -0,0 +1,1271 @@ + +//////////////////// IncludeStringH.proto //////////////////// + +#include + +//////////////////// IncludeCppStringH.proto //////////////////// + +#include + + +//////////////////// ssize_pyunicode_strlen.proto //////////////////// + +static CYTHON_INLINE Py_ssize_t __Pyx_Py_UNICODE_ssize_strlen(const Py_UNICODE *u);/*proto*/ + +//////////////////// ssize_pyunicode_strlen //////////////////// + +static CYTHON_INLINE Py_ssize_t __Pyx_Py_UNICODE_ssize_strlen(const Py_UNICODE *u) { + size_t len = __Pyx_Py_UNICODE_strlen(u); + if (unlikely(len > PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, "Py_UNICODE string is too long"); + return -1; + } + return (Py_ssize_t) len; +} + + +//////////////////// InitStrings.proto //////////////////// + +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ + +//////////////////// InitStrings //////////////////// + +#if PY_MAJOR_VERSION >= 3 +static int __Pyx_InitString(__Pyx_StringTabEntry t, PyObject **str) { + if (t.is_unicode | t.is_str) { + if (t.intern) { + *str = PyUnicode_InternFromString(t.s); + } else if (t.encoding) { + *str = PyUnicode_Decode(t.s, t.n - 1, t.encoding, NULL); + } else { + *str = PyUnicode_FromStringAndSize(t.s, t.n - 1); + } + } else { + *str = PyBytes_FromStringAndSize(t.s, t.n - 1); + } + if (!*str) + return -1; + // initialise cached hash value + if (PyObject_Hash(*str) == -1) + return -1; + return 0; +} +#endif + +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION >= 3 /* Python 3+ has unicode identifiers */ + __Pyx_InitString(*t, t->p); + #else + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + if (!*t->p) + return -1; + // initialise cached hash value + if (PyObject_Hash(*t->p) == -1) + return -1; + #endif + ++t; + } + return 0; +} + +//////////////////// BytesContains.proto //////////////////// + +static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character); /*proto*/ + +//////////////////// BytesContains //////////////////// +//@requires: IncludeStringH + +static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character) { + const Py_ssize_t length = PyBytes_GET_SIZE(bytes); + char* char_start = PyBytes_AS_STRING(bytes); + return memchr(char_start, (unsigned char)character, (size_t)length) != NULL; +} + + +//////////////////// PyUCS4InUnicode.proto //////////////////// + +static CYTHON_INLINE int __Pyx_UnicodeContainsUCS4(PyObject* unicode, Py_UCS4 character); /*proto*/ + +//////////////////// PyUCS4InUnicode //////////////////// + +#if PY_VERSION_HEX < 0x03090000 || (defined(PyUnicode_WCHAR_KIND) && defined(PyUnicode_AS_UNICODE)) + +#if PY_VERSION_HEX < 0x03090000 +#define __Pyx_PyUnicode_AS_UNICODE(op) PyUnicode_AS_UNICODE(op) +#define __Pyx_PyUnicode_GET_SIZE(op) PyUnicode_GET_SIZE(op) +#else +// Avoid calling deprecated C-API functions in Py3.9+ that PEP-623 schedules for removal in Py3.12. +// https://www.python.org/dev/peps/pep-0623/ +#define __Pyx_PyUnicode_AS_UNICODE(op) (((PyASCIIObject *)(op))->wstr) +#define __Pyx_PyUnicode_GET_SIZE(op) ((PyCompactUnicodeObject *)(op))->wstr_length +#endif + +#if !defined(Py_UNICODE_SIZE) || Py_UNICODE_SIZE == 2 +static int __Pyx_PyUnicodeBufferContainsUCS4_SP(Py_UNICODE* buffer, Py_ssize_t length, Py_UCS4 character) { + /* handle surrogate pairs for Py_UNICODE buffers in 16bit Unicode builds */ + Py_UNICODE high_val, low_val; + Py_UNICODE* pos; + high_val = (Py_UNICODE) (0xD800 | (((character - 0x10000) >> 10) & ((1<<10)-1))); + low_val = (Py_UNICODE) (0xDC00 | ( (character - 0x10000) & ((1<<10)-1))); + for (pos=buffer; pos < buffer+length-1; pos++) { + if (unlikely((high_val == pos[0]) & (low_val == pos[1]))) return 1; + } + return 0; +} +#endif + +static int __Pyx_PyUnicodeBufferContainsUCS4_BMP(Py_UNICODE* buffer, Py_ssize_t length, Py_UCS4 character) { + Py_UNICODE uchar; + Py_UNICODE* pos; + uchar = (Py_UNICODE) character; + for (pos=buffer; pos < buffer+length; pos++) { + if (unlikely(uchar == pos[0])) return 1; + } + return 0; +} +#endif + +static CYTHON_INLINE int __Pyx_UnicodeContainsUCS4(PyObject* unicode, Py_UCS4 character) { +#if CYTHON_PEP393_ENABLED + const int kind = PyUnicode_KIND(unicode); + #ifdef PyUnicode_WCHAR_KIND + if (likely(kind != PyUnicode_WCHAR_KIND)) + #endif + { + Py_ssize_t i; + const void* udata = PyUnicode_DATA(unicode); + const Py_ssize_t length = PyUnicode_GET_LENGTH(unicode); + for (i=0; i < length; i++) { + if (unlikely(character == PyUnicode_READ(kind, udata, i))) return 1; + } + return 0; + } +#elif PY_VERSION_HEX >= 0x03090000 + #error Cannot use "UChar in Unicode" in Python 3.9 without PEP-393 unicode strings. +#elif !defined(PyUnicode_AS_UNICODE) + #error Cannot use "UChar in Unicode" in Python < 3.9 without Py_UNICODE support. +#endif + +#if PY_VERSION_HEX < 0x03090000 || (defined(PyUnicode_WCHAR_KIND) && defined(PyUnicode_AS_UNICODE)) +#if !defined(Py_UNICODE_SIZE) || Py_UNICODE_SIZE == 2 + if ((sizeof(Py_UNICODE) == 2) && unlikely(character > 65535)) { + return __Pyx_PyUnicodeBufferContainsUCS4_SP( + __Pyx_PyUnicode_AS_UNICODE(unicode), + __Pyx_PyUnicode_GET_SIZE(unicode), + character); + } else +#endif + { + return __Pyx_PyUnicodeBufferContainsUCS4_BMP( + __Pyx_PyUnicode_AS_UNICODE(unicode), + __Pyx_PyUnicode_GET_SIZE(unicode), + character); + + } +#endif +} + + +//////////////////// PyUnicodeContains.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PyUnicode_ContainsTF(PyObject* substring, PyObject* text, int eq) { + int result = PyUnicode_Contains(text, substring); + return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); +} + + +//////////////////// CStringEquals.proto //////////////////// + +static CYTHON_INLINE int __Pyx_StrEq(const char *, const char *); /*proto*/ + +//////////////////// CStringEquals //////////////////// + +static CYTHON_INLINE int __Pyx_StrEq(const char *s1, const char *s2) { + while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } + return *s1 == *s2; +} + + +//////////////////// StrEquals.proto //////////////////// +//@requires: BytesEquals +//@requires: UnicodeEquals + +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals +#else +#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals +#endif + + +//////////////////// UnicodeEquals.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/ + +//////////////////// UnicodeEquals //////////////////// +//@requires: BytesEquals + +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API + return PyObject_RichCompareBool(s1, s2, equals); +#else +#if PY_MAJOR_VERSION < 3 + PyObject* owned_ref = NULL; +#endif + int s1_is_unicode, s2_is_unicode; + if (s1 == s2) { + /* as done by PyObject_RichCompareBool(); also catches the (interned) empty string */ + goto return_eq; + } + s1_is_unicode = PyUnicode_CheckExact(s1); + s2_is_unicode = PyUnicode_CheckExact(s2); +#if PY_MAJOR_VERSION < 3 + if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { + owned_ref = PyUnicode_FromObject(s2); + if (unlikely(!owned_ref)) + return -1; + s2 = owned_ref; + s2_is_unicode = 1; + } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { + owned_ref = PyUnicode_FromObject(s1); + if (unlikely(!owned_ref)) + return -1; + s1 = owned_ref; + s1_is_unicode = 1; + } else if (((!s2_is_unicode) & (!s1_is_unicode))) { + return __Pyx_PyBytes_Equals(s1, s2, equals); + } +#endif + if (s1_is_unicode & s2_is_unicode) { + Py_ssize_t length; + int kind; + void *data1, *data2; + if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) + return -1; + length = __Pyx_PyUnicode_GET_LENGTH(s1); + if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { + goto return_ne; + } +#if CYTHON_USE_UNICODE_INTERNALS + { + Py_hash_t hash1, hash2; + #if CYTHON_PEP393_ENABLED + hash1 = ((PyASCIIObject*)s1)->hash; + hash2 = ((PyASCIIObject*)s2)->hash; + #else + hash1 = ((PyUnicodeObject*)s1)->hash; + hash2 = ((PyUnicodeObject*)s2)->hash; + #endif + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + goto return_ne; + } + } +#endif + // len(s1) == len(s2) >= 1 (empty string is interned, and "s1 is not s2") + kind = __Pyx_PyUnicode_KIND(s1); + if (kind != __Pyx_PyUnicode_KIND(s2)) { + goto return_ne; + } + data1 = __Pyx_PyUnicode_DATA(s1); + data2 = __Pyx_PyUnicode_DATA(s2); + if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { + goto return_ne; + } else if (length == 1) { + goto return_eq; + } else { + int result = memcmp(data1, data2, (size_t)(length * kind)); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & s2_is_unicode) { + goto return_ne; + } else if ((s2 == Py_None) & s1_is_unicode) { + goto return_ne; + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +return_eq: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ); +return_ne: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_NE); +#endif +} + + +//////////////////// BytesEquals.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/ + +//////////////////// BytesEquals //////////////////// +//@requires: IncludeStringH + +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API + return PyObject_RichCompareBool(s1, s2, equals); +#else + if (s1 == s2) { + /* as done by PyObject_RichCompareBool(); also catches the (interned) empty string */ + return (equals == Py_EQ); + } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { + const char *ps1, *ps2; + Py_ssize_t length = PyBytes_GET_SIZE(s1); + if (length != PyBytes_GET_SIZE(s2)) + return (equals == Py_NE); + // len(s1) == len(s2) >= 1 (empty string is interned, and "s1 is not s2") + ps1 = PyBytes_AS_STRING(s1); + ps2 = PyBytes_AS_STRING(s2); + if (ps1[0] != ps2[0]) { + return (equals == Py_NE); + } else if (length == 1) { + return (equals == Py_EQ); + } else { + int result; +#if CYTHON_USE_UNICODE_INTERNALS && (PY_VERSION_HEX < 0x030B0000) + Py_hash_t hash1, hash2; + hash1 = ((PyBytesObject*)s1)->ob_shash; + hash2 = ((PyBytesObject*)s2)->ob_shash; + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + return (equals == Py_NE); + } +#endif + result = memcmp(ps1, ps2, (size_t)length); + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { + return (equals == Py_NE); + } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { + return (equals == Py_NE); + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +#endif +} + +//////////////////// GetItemIntByteArray.proto //////////////////// + +#define __Pyx_GetItemInt_ByteArray(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ + __Pyx_GetItemInt_ByteArray_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \ + (PyErr_SetString(PyExc_IndexError, "bytearray index out of range"), -1)) + +static CYTHON_INLINE int __Pyx_GetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i, + int wraparound, int boundscheck); + +//////////////////// GetItemIntByteArray //////////////////// + +static CYTHON_INLINE int __Pyx_GetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i, + int wraparound, int boundscheck) { + Py_ssize_t length; + if (wraparound | boundscheck) { + length = PyByteArray_GET_SIZE(string); + if (wraparound & unlikely(i < 0)) i += length; + if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) { + return (unsigned char) (PyByteArray_AS_STRING(string)[i]); + } else { + PyErr_SetString(PyExc_IndexError, "bytearray index out of range"); + return -1; + } + } else { + return (unsigned char) (PyByteArray_AS_STRING(string)[i]); + } +} + + +//////////////////// SetItemIntByteArray.proto //////////////////// + +#define __Pyx_SetItemInt_ByteArray(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ + __Pyx_SetItemInt_ByteArray_Fast(o, (Py_ssize_t)i, v, wraparound, boundscheck) : \ + (PyErr_SetString(PyExc_IndexError, "bytearray index out of range"), -1)) + +static CYTHON_INLINE int __Pyx_SetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i, unsigned char v, + int wraparound, int boundscheck); + +//////////////////// SetItemIntByteArray //////////////////// + +static CYTHON_INLINE int __Pyx_SetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i, unsigned char v, + int wraparound, int boundscheck) { + Py_ssize_t length; + if (wraparound | boundscheck) { + length = PyByteArray_GET_SIZE(string); + if (wraparound & unlikely(i < 0)) i += length; + if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) { + PyByteArray_AS_STRING(string)[i] = (char) v; + return 0; + } else { + PyErr_SetString(PyExc_IndexError, "bytearray index out of range"); + return -1; + } + } else { + PyByteArray_AS_STRING(string)[i] = (char) v; + return 0; + } +} + + +//////////////////// GetItemIntUnicode.proto //////////////////// + +#define __Pyx_GetItemInt_Unicode(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ + __Pyx_GetItemInt_Unicode_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \ + (PyErr_SetString(PyExc_IndexError, "string index out of range"), (Py_UCS4)-1)) + +static CYTHON_INLINE Py_UCS4 __Pyx_GetItemInt_Unicode_Fast(PyObject* ustring, Py_ssize_t i, + int wraparound, int boundscheck); + +//////////////////// GetItemIntUnicode //////////////////// + +static CYTHON_INLINE Py_UCS4 __Pyx_GetItemInt_Unicode_Fast(PyObject* ustring, Py_ssize_t i, + int wraparound, int boundscheck) { + Py_ssize_t length; + if (unlikely(__Pyx_PyUnicode_READY(ustring) < 0)) return (Py_UCS4)-1; + if (wraparound | boundscheck) { + length = __Pyx_PyUnicode_GET_LENGTH(ustring); + if (wraparound & unlikely(i < 0)) i += length; + if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) { + return __Pyx_PyUnicode_READ_CHAR(ustring, i); + } else { + PyErr_SetString(PyExc_IndexError, "string index out of range"); + return (Py_UCS4)-1; + } + } else { + return __Pyx_PyUnicode_READ_CHAR(ustring, i); + } +} + + +/////////////// decode_c_string_utf16.proto /////////////// + +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 0; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = -1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} + +/////////////// decode_cpp_string.proto /////////////// +//@requires: IncludeCppStringH +//@requires: decode_c_bytes + +static CYTHON_INLINE PyObject* __Pyx_decode_cpp_string( + std::string cppstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + return __Pyx_decode_c_bytes( + cppstring.data(), cppstring.size(), start, stop, encoding, errors, decode_func); +} + +/////////////// decode_c_string.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_decode_c_string( + const char* cstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); + +/////////////// decode_c_string /////////////// +//@requires: IncludeStringH +//@requires: decode_c_string_utf16 +//@substitute: naming + +/* duplicate code to avoid calling strlen() if start >= 0 and stop >= 0 */ +static CYTHON_INLINE PyObject* __Pyx_decode_c_string( + const char* cstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + Py_ssize_t length; + if (unlikely((start < 0) | (stop < 0))) { + size_t slen = strlen(cstring); + if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, + "c-string too long to convert to Python"); + return NULL; + } + length = (Py_ssize_t) slen; + if (start < 0) { + start += length; + if (start < 0) + start = 0; + } + if (stop < 0) + stop += length; + } + if (unlikely(stop <= start)) + return __Pyx_NewRef($empty_unicode); + length = stop - start; + cstring += start; + if (decode_func) { + return decode_func(cstring, length, errors); + } else { + return PyUnicode_Decode(cstring, length, encoding, errors); + } +} + +/////////////// decode_c_bytes.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes( + const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); + +/////////////// decode_c_bytes /////////////// +//@requires: decode_c_string_utf16 +//@substitute: naming + +static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes( + const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + if (unlikely((start < 0) | (stop < 0))) { + if (start < 0) { + start += length; + if (start < 0) + start = 0; + } + if (stop < 0) + stop += length; + } + if (stop > length) + stop = length; + if (unlikely(stop <= start)) + return __Pyx_NewRef($empty_unicode); + length = stop - start; + cstring += start; + if (decode_func) { + return decode_func(cstring, length, errors); + } else { + return PyUnicode_Decode(cstring, length, encoding, errors); + } +} + +/////////////// decode_bytes.proto /////////////// +//@requires: decode_c_bytes + +static CYTHON_INLINE PyObject* __Pyx_decode_bytes( + PyObject* string, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + char* as_c_string; + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS + as_c_string = PyBytes_AS_STRING(string); + size = PyBytes_GET_SIZE(string); +#else + if (PyBytes_AsStringAndSize(string, &as_c_string, &size) < 0) { + return NULL; + } +#endif + return __Pyx_decode_c_bytes( + as_c_string, size, + start, stop, encoding, errors, decode_func); +} + +/////////////// decode_bytearray.proto /////////////// +//@requires: decode_c_bytes + +static CYTHON_INLINE PyObject* __Pyx_decode_bytearray( + PyObject* string, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + char* as_c_string; + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS + as_c_string = PyByteArray_AS_STRING(string); + size = PyByteArray_GET_SIZE(string); +#else + if (!(as_c_string = PyByteArray_AsString(string))) return NULL; + if ((size = PyByteArray_Size(string)) < 0) return NULL; +#endif + return __Pyx_decode_c_bytes( + as_c_string, size, + start, stop, encoding, errors, decode_func); +} + +/////////////// PyUnicode_Substring.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Substring( + PyObject* text, Py_ssize_t start, Py_ssize_t stop); + +/////////////// PyUnicode_Substring /////////////// +//@substitute: naming + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Substring( + PyObject* text, Py_ssize_t start, Py_ssize_t stop) { + Py_ssize_t length; + if (unlikely(__Pyx_PyUnicode_READY(text) == -1)) return NULL; + length = __Pyx_PyUnicode_GET_LENGTH(text); + if (start < 0) { + start += length; + if (start < 0) + start = 0; + } + if (stop < 0) + stop += length; + else if (stop > length) + stop = length; + if (stop <= start) + return __Pyx_NewRef($empty_unicode); + if (start == 0 && stop == length) + return __Pyx_NewRef(text); +#if CYTHON_PEP393_ENABLED + return PyUnicode_FromKindAndData(PyUnicode_KIND(text), + PyUnicode_1BYTE_DATA(text) + start*PyUnicode_KIND(text), stop-start); +#else + return PyUnicode_FromUnicode(PyUnicode_AS_UNICODE(text)+start, stop-start); +#endif +} + + +/////////////// py_unicode_istitle.proto /////////////// + +// Py_UNICODE_ISTITLE() doesn't match unicode.istitle() as the latter +// additionally allows character that comply with Py_UNICODE_ISUPPER() + +#if PY_VERSION_HEX < 0x030200A2 +static CYTHON_INLINE int __Pyx_Py_UNICODE_ISTITLE(Py_UNICODE uchar) +#else +static CYTHON_INLINE int __Pyx_Py_UNICODE_ISTITLE(Py_UCS4 uchar) +#endif +{ + return Py_UNICODE_ISTITLE(uchar) || Py_UNICODE_ISUPPER(uchar); +} + + +/////////////// unicode_tailmatch.proto /////////////// + +static int __Pyx_PyUnicode_Tailmatch( + PyObject* s, PyObject* substr, Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/ + +/////////////// unicode_tailmatch /////////////// + +// Python's unicode.startswith() and unicode.endswith() support a +// tuple of prefixes/suffixes, whereas it's much more common to +// test for a single unicode string. + +static int __Pyx_PyUnicode_TailmatchTuple(PyObject* s, PyObject* substrings, + Py_ssize_t start, Py_ssize_t end, int direction) { + Py_ssize_t i, count = PyTuple_GET_SIZE(substrings); + for (i = 0; i < count; i++) { + Py_ssize_t result; +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + result = PyUnicode_Tailmatch(s, PyTuple_GET_ITEM(substrings, i), + start, end, direction); +#else + PyObject* sub = PySequence_ITEM(substrings, i); + if (unlikely(!sub)) return -1; + result = PyUnicode_Tailmatch(s, sub, start, end, direction); + Py_DECREF(sub); +#endif + if (result) { + return (int) result; + } + } + return 0; +} + +static int __Pyx_PyUnicode_Tailmatch(PyObject* s, PyObject* substr, + Py_ssize_t start, Py_ssize_t end, int direction) { + if (unlikely(PyTuple_Check(substr))) { + return __Pyx_PyUnicode_TailmatchTuple(s, substr, start, end, direction); + } + return (int) PyUnicode_Tailmatch(s, substr, start, end, direction); +} + + +/////////////// bytes_tailmatch.proto /////////////// + +static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg, + Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/ +static int __Pyx_PyBytes_Tailmatch(PyObject* self, PyObject* substr, + Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/ + +/////////////// bytes_tailmatch /////////////// + +static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg, + Py_ssize_t start, Py_ssize_t end, int direction) { + const char* self_ptr = PyBytes_AS_STRING(self); + Py_ssize_t self_len = PyBytes_GET_SIZE(self); + const char* sub_ptr; + Py_ssize_t sub_len; + int retval; + + Py_buffer view; + view.obj = NULL; + + if ( PyBytes_Check(arg) ) { + sub_ptr = PyBytes_AS_STRING(arg); + sub_len = PyBytes_GET_SIZE(arg); + } +#if PY_MAJOR_VERSION < 3 + // Python 2.x allows mixing unicode and str + else if ( PyUnicode_Check(arg) ) { + return (int) PyUnicode_Tailmatch(self, arg, start, end, direction); + } +#endif + else { + if (unlikely(PyObject_GetBuffer(self, &view, PyBUF_SIMPLE) == -1)) + return -1; + sub_ptr = (const char*) view.buf; + sub_len = view.len; + } + + if (end > self_len) + end = self_len; + else if (end < 0) + end += self_len; + if (end < 0) + end = 0; + if (start < 0) + start += self_len; + if (start < 0) + start = 0; + + if (direction > 0) { + /* endswith */ + if (end-sub_len > start) + start = end - sub_len; + } + + if (start + sub_len <= end) + retval = !memcmp(self_ptr+start, sub_ptr, (size_t)sub_len); + else + retval = 0; + + if (view.obj) + PyBuffer_Release(&view); + + return retval; +} + +static int __Pyx_PyBytes_TailmatchTuple(PyObject* self, PyObject* substrings, + Py_ssize_t start, Py_ssize_t end, int direction) { + Py_ssize_t i, count = PyTuple_GET_SIZE(substrings); + for (i = 0; i < count; i++) { + int result; +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + result = __Pyx_PyBytes_SingleTailmatch(self, PyTuple_GET_ITEM(substrings, i), + start, end, direction); +#else + PyObject* sub = PySequence_ITEM(substrings, i); + if (unlikely(!sub)) return -1; + result = __Pyx_PyBytes_SingleTailmatch(self, sub, start, end, direction); + Py_DECREF(sub); +#endif + if (result) { + return result; + } + } + return 0; +} + +static int __Pyx_PyBytes_Tailmatch(PyObject* self, PyObject* substr, + Py_ssize_t start, Py_ssize_t end, int direction) { + if (unlikely(PyTuple_Check(substr))) { + return __Pyx_PyBytes_TailmatchTuple(self, substr, start, end, direction); + } + + return __Pyx_PyBytes_SingleTailmatch(self, substr, start, end, direction); +} + + +/////////////// str_tailmatch.proto /////////////// + +static CYTHON_INLINE int __Pyx_PyStr_Tailmatch(PyObject* self, PyObject* arg, Py_ssize_t start, + Py_ssize_t end, int direction); /*proto*/ + +/////////////// str_tailmatch /////////////// +//@requires: bytes_tailmatch +//@requires: unicode_tailmatch + +static CYTHON_INLINE int __Pyx_PyStr_Tailmatch(PyObject* self, PyObject* arg, Py_ssize_t start, + Py_ssize_t end, int direction) +{ + // We do not use a C compiler macro here to avoid "unused function" + // warnings for the *_Tailmatch() function that is not being used in + // the specific CPython version. The C compiler will generate the same + // code anyway, and will usually just remove the unused function. + if (PY_MAJOR_VERSION < 3) + return __Pyx_PyBytes_Tailmatch(self, arg, start, end, direction); + else + return __Pyx_PyUnicode_Tailmatch(self, arg, start, end, direction); +} + + +/////////////// bytes_index.proto /////////////// + +static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* bytes, Py_ssize_t index, int check_bounds); /*proto*/ + +/////////////// bytes_index /////////////// + +static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* bytes, Py_ssize_t index, int check_bounds) { + if (index < 0) + index += PyBytes_GET_SIZE(bytes); + if (check_bounds) { + Py_ssize_t size = PyBytes_GET_SIZE(bytes); + if (unlikely(!__Pyx_is_valid_index(index, size))) { + PyErr_SetString(PyExc_IndexError, "string index out of range"); + return (char) -1; + } + } + return PyBytes_AS_STRING(bytes)[index]; +} + + +//////////////////// StringJoin.proto //////////////////// + +#if PY_MAJOR_VERSION < 3 +#define __Pyx_PyString_Join __Pyx_PyBytes_Join +#define __Pyx_PyBaseString_Join(s, v) (PyUnicode_CheckExact(s) ? PyUnicode_Join(s, v) : __Pyx_PyBytes_Join(s, v)) +#else +#define __Pyx_PyString_Join PyUnicode_Join +#define __Pyx_PyBaseString_Join PyUnicode_Join +#endif +static CYTHON_INLINE PyObject* __Pyx_PyBytes_Join(PyObject* sep, PyObject* values); /*proto*/ + +//////////////////// StringJoin //////////////////// +//@requires: ObjectHandling.c::PyObjectCallMethod1 + +static CYTHON_INLINE PyObject* __Pyx_PyBytes_Join(PyObject* sep, PyObject* values) { + // avoid unused function + (void) __Pyx_PyObject_CallMethod1; +#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION < 3 + return _PyString_Join(sep, values); +#elif CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030d0000 + return _PyBytes_Join(sep, values); +#else + return __Pyx_PyObject_CallMethod1(sep, PYIDENT("join"), values); +#endif +} + + +/////////////// JoinPyUnicode.proto /////////////// + +static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, + Py_UCS4 max_char); + +/////////////// JoinPyUnicode /////////////// +//@requires: IncludeStringH +//@substitute: naming + +static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, + Py_UCS4 max_char) { +#if CYTHON_USE_UNICODE_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + PyObject *result_uval; + int result_ukind, kind_shift; + Py_ssize_t i, char_pos; + void *result_udata; + CYTHON_MAYBE_UNUSED_VAR(max_char); +#if CYTHON_PEP393_ENABLED + // Py 3.3+ (post PEP-393) + result_uval = PyUnicode_New(result_ulength, max_char); + if (unlikely(!result_uval)) return NULL; + result_ukind = (max_char <= 255) ? PyUnicode_1BYTE_KIND : (max_char <= 65535) ? PyUnicode_2BYTE_KIND : PyUnicode_4BYTE_KIND; + kind_shift = (result_ukind == PyUnicode_4BYTE_KIND) ? 2 : result_ukind - 1; + result_udata = PyUnicode_DATA(result_uval); +#else + // Py 2.x/3.2 (pre PEP-393) + result_uval = PyUnicode_FromUnicode(NULL, result_ulength); + if (unlikely(!result_uval)) return NULL; + result_ukind = sizeof(Py_UNICODE); + kind_shift = (result_ukind == 4) ? 2 : result_ukind - 1; + result_udata = PyUnicode_AS_UNICODE(result_uval); +#endif + assert(kind_shift == 2 || kind_shift == 1 || kind_shift == 0); + + char_pos = 0; + for (i=0; i < value_count; i++) { + int ukind; + Py_ssize_t ulength; + void *udata; + PyObject *uval = PyTuple_GET_ITEM(value_tuple, i); + if (unlikely(__Pyx_PyUnicode_READY(uval))) + goto bad; + ulength = __Pyx_PyUnicode_GET_LENGTH(uval); + if (unlikely(!ulength)) + continue; + if (unlikely((PY_SSIZE_T_MAX >> kind_shift) - ulength < char_pos)) + goto overflow; + ukind = __Pyx_PyUnicode_KIND(uval); + udata = __Pyx_PyUnicode_DATA(uval); + if (!CYTHON_PEP393_ENABLED || ukind == result_ukind) { + memcpy((char *)result_udata + (char_pos << kind_shift), udata, (size_t) (ulength << kind_shift)); + } else { + #if PY_VERSION_HEX >= 0x030d0000 + if (unlikely(PyUnicode_CopyCharacters(result_uval, char_pos, uval, 0, ulength) < 0)) goto bad; + #elif CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030300F0 || defined(_PyUnicode_FastCopyCharacters) + _PyUnicode_FastCopyCharacters(result_uval, char_pos, uval, 0, ulength); + #else + Py_ssize_t j; + for (j=0; j < ulength; j++) { + Py_UCS4 uchar = __Pyx_PyUnicode_READ(ukind, udata, j); + __Pyx_PyUnicode_WRITE(result_ukind, result_udata, char_pos+j, uchar); + } + #endif + } + char_pos += ulength; + } + return result_uval; +overflow: + PyErr_SetString(PyExc_OverflowError, "join() result is too long for a Python string"); +bad: + Py_DECREF(result_uval); + return NULL; +#else + // non-CPython fallback + CYTHON_UNUSED_VAR(max_char); + CYTHON_UNUSED_VAR(result_ulength); + CYTHON_UNUSED_VAR(value_count); + return PyUnicode_Join($empty_unicode, value_tuple); +#endif +} + + +/////////////// BuildPyUnicode.proto /////////////// + +static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, + int prepend_sign, char padding_char); + +/////////////// BuildPyUnicode /////////////// + +// Create a PyUnicode object from an ASCII char*, e.g. a formatted number. + +static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, + int prepend_sign, char padding_char) { + PyObject *uval; + Py_ssize_t uoffset = ulength - clength; +#if CYTHON_USE_UNICODE_INTERNALS + Py_ssize_t i; +#if CYTHON_PEP393_ENABLED + // Py 3.3+ (post PEP-393) + void *udata; + uval = PyUnicode_New(ulength, 127); + if (unlikely(!uval)) return NULL; + udata = PyUnicode_DATA(uval); +#else + // Py 2.x/3.2 (pre PEP-393) + Py_UNICODE *udata; + uval = PyUnicode_FromUnicode(NULL, ulength); + if (unlikely(!uval)) return NULL; + udata = PyUnicode_AS_UNICODE(uval); +#endif + if (uoffset > 0) { + i = 0; + if (prepend_sign) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, 0, '-'); + i++; + } + for (; i < uoffset; i++) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, i, padding_char); + } + } + for (i=0; i < clength; i++) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, uoffset+i, chars[i]); + } + +#else + // non-CPython + { + PyObject *sign = NULL, *padding = NULL; + uval = NULL; + if (uoffset > 0) { + prepend_sign = !!prepend_sign; + if (uoffset > prepend_sign) { + padding = PyUnicode_FromOrdinal(padding_char); + if (likely(padding) && uoffset > prepend_sign + 1) { + PyObject *tmp; + PyObject *repeat = PyInt_FromSsize_t(uoffset - prepend_sign); + if (unlikely(!repeat)) goto done_or_error; + tmp = PyNumber_Multiply(padding, repeat); + Py_DECREF(repeat); + Py_DECREF(padding); + padding = tmp; + } + if (unlikely(!padding)) goto done_or_error; + } + if (prepend_sign) { + sign = PyUnicode_FromOrdinal('-'); + if (unlikely(!sign)) goto done_or_error; + } + } + + uval = PyUnicode_DecodeASCII(chars, clength, NULL); + if (likely(uval) && padding) { + PyObject *tmp = PyNumber_Add(padding, uval); + Py_DECREF(uval); + uval = tmp; + } + if (likely(uval) && sign) { + PyObject *tmp = PyNumber_Add(sign, uval); + Py_DECREF(uval); + uval = tmp; + } +done_or_error: + Py_XDECREF(padding); + Py_XDECREF(sign); + } +#endif + + return uval; +} + + +//////////////////// ByteArrayAppendObject.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PyByteArray_AppendObject(PyObject* bytearray, PyObject* value); + +//////////////////// ByteArrayAppendObject //////////////////// +//@requires: ByteArrayAppend + +static CYTHON_INLINE int __Pyx_PyByteArray_AppendObject(PyObject* bytearray, PyObject* value) { + Py_ssize_t ival; +#if PY_MAJOR_VERSION < 3 + if (unlikely(PyString_Check(value))) { + if (unlikely(PyString_GET_SIZE(value) != 1)) { + PyErr_SetString(PyExc_ValueError, "string must be of size 1"); + return -1; + } + ival = (unsigned char) (PyString_AS_STRING(value)[0]); + } else +#endif +#if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(value)) && likely(__Pyx_PyLong_IsCompact(value))) { + if (__Pyx_PyLong_IsZero(value)) { + ival = 0; + } else { + ival = __Pyx_PyLong_CompactValue(value); + if (unlikely(ival > 255)) goto bad_range; + } + } else +#endif + { + // CPython calls PyNumber_Index() internally + ival = __Pyx_PyIndex_AsSsize_t(value); + if (unlikely(!__Pyx_is_valid_index(ival, 256))) { + if (ival == -1 && PyErr_Occurred()) + return -1; + goto bad_range; + } + } + return __Pyx_PyByteArray_Append(bytearray, ival); +bad_range: + PyErr_SetString(PyExc_ValueError, "byte must be in range(0, 256)"); + return -1; +} + +//////////////////// ByteArrayAppend.proto //////////////////// + +static CYTHON_INLINE int __Pyx_PyByteArray_Append(PyObject* bytearray, int value); + +//////////////////// ByteArrayAppend //////////////////// +//@requires: ObjectHandling.c::PyObjectCallMethod1 + +static CYTHON_INLINE int __Pyx_PyByteArray_Append(PyObject* bytearray, int value) { + PyObject *pyval, *retval; +#if CYTHON_COMPILING_IN_CPYTHON + if (likely(__Pyx_is_valid_index(value, 256))) { + Py_ssize_t n = Py_SIZE(bytearray); + if (likely(n != PY_SSIZE_T_MAX)) { + if (unlikely(PyByteArray_Resize(bytearray, n + 1) < 0)) + return -1; + PyByteArray_AS_STRING(bytearray)[n] = value; + return 0; + } + } else { + PyErr_SetString(PyExc_ValueError, "byte must be in range(0, 256)"); + return -1; + } +#endif + pyval = PyInt_FromLong(value); + if (unlikely(!pyval)) + return -1; + retval = __Pyx_PyObject_CallMethod1(bytearray, PYIDENT("append"), pyval); + Py_DECREF(pyval); + if (unlikely(!retval)) + return -1; + Py_DECREF(retval); + return 0; +} + + +//////////////////// PyObjectFormat.proto //////////////////// + +#if CYTHON_USE_UNICODE_WRITER +static PyObject* __Pyx_PyObject_Format(PyObject* s, PyObject* f); +#else +#define __Pyx_PyObject_Format(s, f) PyObject_Format(s, f) +#endif + +//////////////////// PyObjectFormat //////////////////// + +#if CYTHON_USE_UNICODE_WRITER +static PyObject* __Pyx_PyObject_Format(PyObject* obj, PyObject* format_spec) { + int ret; + _PyUnicodeWriter writer; + + if (likely(PyFloat_CheckExact(obj))) { + // copied from CPython 3.5 "float__format__()" in floatobject.c +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x03040000 + _PyUnicodeWriter_Init(&writer, 0); +#else + _PyUnicodeWriter_Init(&writer); +#endif + ret = _PyFloat_FormatAdvancedWriter( + &writer, + obj, + format_spec, 0, PyUnicode_GET_LENGTH(format_spec)); + } else if (likely(PyLong_CheckExact(obj))) { + // copied from CPython 3.5 "long__format__()" in longobject.c +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x03040000 + _PyUnicodeWriter_Init(&writer, 0); +#else + _PyUnicodeWriter_Init(&writer); +#endif + ret = _PyLong_FormatAdvancedWriter( + &writer, + obj, + format_spec, 0, PyUnicode_GET_LENGTH(format_spec)); + } else { + return PyObject_Format(obj, format_spec); + } + + if (unlikely(ret == -1)) { + _PyUnicodeWriter_Dealloc(&writer); + return NULL; + } + return _PyUnicodeWriter_Finish(&writer); +} +#endif + + +//////////////////// PyObjectFormatSimple.proto //////////////////// + +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyObject_FormatSimple(s, f) ( \ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ + PyObject_Format(s, f)) +#elif PY_MAJOR_VERSION < 3 + // str is common in Py2, but formatting must return a Unicode string + #define __Pyx_PyObject_FormatSimple(s, f) ( \ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ + likely(PyString_CheckExact(s)) ? PyUnicode_FromEncodedObject(s, NULL, "strict") : \ + PyObject_Format(s, f)) +#elif CYTHON_USE_TYPE_SLOTS + // Py3 nicely returns unicode strings from str() and repr(), which makes this quite efficient for builtin types. + // In Py3.8+, tp_str() delegates to tp_repr(), so we call tp_repr() directly here. + #define __Pyx_PyObject_FormatSimple(s, f) ( \ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ + likely(PyLong_CheckExact(s)) ? PyLong_Type.tp_repr(s) : \ + likely(PyFloat_CheckExact(s)) ? PyFloat_Type.tp_repr(s) : \ + PyObject_Format(s, f)) +#else + #define __Pyx_PyObject_FormatSimple(s, f) ( \ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \ + PyObject_Format(s, f)) +#endif + + +//////////////////// PyObjectFormatAndDecref.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f); +static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f); + +//////////////////// PyObjectFormatAndDecref //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f) { + if (unlikely(!s)) return NULL; + if (likely(PyUnicode_CheckExact(s))) return s; + #if PY_MAJOR_VERSION < 3 + // str is common in Py2, but formatting must return a Unicode string + if (likely(PyString_CheckExact(s))) { + PyObject *result = PyUnicode_FromEncodedObject(s, NULL, "strict"); + Py_DECREF(s); + return result; + } + #endif + return __Pyx_PyObject_FormatAndDecref(s, f); +} + +static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f) { + PyObject *result; + if (unlikely(!s)) return NULL; + result = PyObject_Format(s, f); + Py_DECREF(s); + return result; +} + + +//////////////////// PyUnicode_Unicode.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Unicode(PyObject *obj);/*proto*/ + +//////////////////// PyUnicode_Unicode //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Unicode(PyObject *obj) { + if (unlikely(obj == Py_None)) + obj = PYUNICODE("None"); + return __Pyx_NewRef(obj); +} + + +//////////////////// PyObject_Unicode.proto //////////////////// + +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyObject_Unicode(obj) \ + (likely(PyUnicode_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Str(obj)) +#else +#define __Pyx_PyObject_Unicode(obj) \ + (likely(PyUnicode_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Unicode(obj)) +#endif + + +//////////////////// PyStr_Str.proto //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyStr_Str(PyObject *obj);/*proto*/ + +//////////////////// PyStr_Str //////////////////// + +static CYTHON_INLINE PyObject* __Pyx_PyStr_Str(PyObject *obj) { + if (unlikely(obj == Py_None)) + obj = PYIDENT("None"); + return __Pyx_NewRef(obj); +} + + +//////////////////// PyObject_Str.proto //////////////////// + +#define __Pyx_PyObject_Str(obj) \ + (likely(PyString_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Str(obj)) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/TestCyUtilityLoader.pyx b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/TestCyUtilityLoader.pyx new file mode 100644 index 0000000000000000000000000000000000000000..00e7a7681b8534a71c8de66c517d4ef3e49c7d30 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/TestCyUtilityLoader.pyx @@ -0,0 +1,8 @@ +########## TestCyUtilityLoader ########## +#@requires: OtherUtility + +test {{cy_loader}} impl + + +########## OtherUtility ########## +req {{cy_loader}} impl diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/TestCythonScope.pyx b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/TestCythonScope.pyx new file mode 100644 index 0000000000000000000000000000000000000000..7da7665c3039ff86dccf1fc1acbdd11c1aa45c01 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/TestCythonScope.pyx @@ -0,0 +1,70 @@ +########## TestClass ########## +# These utilities are for testing purposes + +# The "cythonscope" test calls METH_O functions with their (self, arg) signature. +# cython: always_allow_keywords=False + +from __future__ import print_function + +cdef extern from *: + cdef object __pyx_test_dep(object) + +@cname('__pyx_TestClass') +cdef class TestClass(object): + cdef public int value + + def __init__(self, int value): + self.value = value + + def __str__(self): + return f'TestClass({self.value})' + + cdef cdef_method(self, int value): + print('Hello from cdef_method', value) + + cpdef cpdef_method(self, int value): + print('Hello from cpdef_method', value) + + def def_method(self, int value): + print('Hello from def_method', value) + + @cname('cdef_cname') + cdef cdef_cname_method(self, int value): + print("Hello from cdef_cname_method", value) + + @cname('cpdef_cname') + cpdef cpdef_cname_method(self, int value): + print("Hello from cpdef_cname_method", value) + + @cname('def_cname') + def def_cname_method(self, int value): + print("Hello from def_cname_method", value) + +@cname('__pyx_test_call_other_cy_util') +cdef test_call(obj): + print('test_call') + __pyx_test_dep(obj) + +@cname('__pyx_TestClass_New') +cdef _testclass_new(int value): + return TestClass(value) + +########### TestDep ########## + +from __future__ import print_function + +@cname('__pyx_test_dep') +cdef test_dep(obj): + print('test_dep', obj) + +########## TestScope ########## + +@cname('__pyx_testscope') +cdef object _testscope(int value): + return f"hello from cython scope, value={value}" + +########## View.TestScope ########## + +@cname('__pyx_view_testscope') +cdef object _testscope(int value): + return f"hello from cython.view scope, value={value}" diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/TestUtilityLoader.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/TestUtilityLoader.c new file mode 100644 index 0000000000000000000000000000000000000000..595305f211bd172ec1dfc10c3cf6bab4e92d7fba --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/TestUtilityLoader.c @@ -0,0 +1,12 @@ +////////// TestUtilityLoader.proto ////////// +test {{loader}} prototype + +////////// TestUtilityLoader ////////// +//@requires: OtherUtility +test {{loader}} impl + +////////// OtherUtility.proto ////////// +req {{loader}} proto + +////////// OtherUtility ////////// +req {{loader}} impl diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/TypeConversion.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/TypeConversion.c new file mode 100644 index 0000000000000000000000000000000000000000..d2f62e67381725ea46a0ac0410f1cce69b741d6f --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/TypeConversion.c @@ -0,0 +1,1324 @@ +/////////////// TypeConversions.proto /////////////// +//@requires: StringTools.c::IncludeStringH + +/* Type Conversion Predeclarations */ + +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) + +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \ + (sizeof(type) < sizeof(Py_ssize_t)) || \ + (sizeof(type) > sizeof(Py_ssize_t) && \ + likely(v < (type)PY_SSIZE_T_MAX || \ + v == (type)PY_SSIZE_T_MAX) && \ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \ + v == (type)PY_SSIZE_T_MIN))) || \ + (sizeof(type) == sizeof(Py_ssize_t) && \ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX || \ + v == (type)PY_SSIZE_T_MAX))) ) + +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + // Optimisation from Section 14.2 "Bounds Checking" in + // https://www.agner.org/optimize/optimizing_cpp.pdf + // See https://bugs.python.org/issue28397 + // The cast to unsigned effectively tests for "0 <= i < limit". + return (size_t) i < (size_t) limit; +} + +// fast and unsafe abs(Py_ssize_t) that ignores the overflow for (-PY_SSIZE_T_MAX-1) +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + // abs() is defined for long, but 64-bits type on MSVC is long long. + // Use MS-specific _abs64 instead. + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + // gcc or clang on 64 bit windows. + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif + +static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s); +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); + +static CYTHON_INLINE PyObject* __Pyx_PyByteArray_FromString(const char*); +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); + +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif + +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) + +// There used to be a Py_UNICODE_strlen() in CPython 3.x, but it is deprecated since Py3.3. +#if CYTHON_COMPILING_IN_LIMITED_API +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const wchar_t *u) +{ + const wchar_t *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#else +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) +{ + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#endif + +#define __Pyx_PyUnicode_FromOrdinal(o) PyUnicode_FromOrdinal((int)o) +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode + +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); + +#define __Pyx_PySequence_Tuple(obj) \ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) + +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); + +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) + +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +// __Pyx_PyNumber_Float is now in its own section since it has dependencies (needed to make +// string conversion work the same in all circumstances). + +#if CYTHON_USE_PYLONG_INTERNALS + #if PY_VERSION_HEX >= 0x030C00A7 + #ifndef _PyLong_SIGN_MASK + #define _PyLong_SIGN_MASK 3 + #endif + #ifndef _PyLong_NON_SIZE_BITS + #define _PyLong_NON_SIZE_BITS 3 + #endif + #define __Pyx_PyLong_Sign(x) (((PyLongObject*)x)->long_value.lv_tag & _PyLong_SIGN_MASK) + #define __Pyx_PyLong_IsNeg(x) ((__Pyx_PyLong_Sign(x) & 2) != 0) + #define __Pyx_PyLong_IsNonNeg(x) (!__Pyx_PyLong_IsNeg(x)) + #define __Pyx_PyLong_IsZero(x) (__Pyx_PyLong_Sign(x) & 1) + #define __Pyx_PyLong_IsPos(x) (__Pyx_PyLong_Sign(x) == 0) + #define __Pyx_PyLong_CompactValueUnsigned(x) (__Pyx_PyLong_Digits(x)[0]) + #define __Pyx_PyLong_DigitCount(x) ((Py_ssize_t) (((PyLongObject*)x)->long_value.lv_tag >> _PyLong_NON_SIZE_BITS)) + #define __Pyx_PyLong_SignedDigitCount(x) \ + ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * __Pyx_PyLong_DigitCount(x)) + + #if defined(PyUnstable_Long_IsCompact) && defined(PyUnstable_Long_CompactValue) + #define __Pyx_PyLong_IsCompact(x) PyUnstable_Long_IsCompact((PyLongObject*) x) + #define __Pyx_PyLong_CompactValue(x) PyUnstable_Long_CompactValue((PyLongObject*) x) + #else + #define __Pyx_PyLong_IsCompact(x) (((PyLongObject*)x)->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS)) + #define __Pyx_PyLong_CompactValue(x) ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * (Py_ssize_t) __Pyx_PyLong_Digits(x)[0]) + #endif + + // CPython 3.12 requires C99, which defines 'size_t' (but not 'ssize_t') + typedef Py_ssize_t __Pyx_compact_pylong; + typedef size_t __Pyx_compact_upylong; + + #else /* Py < 3.12 */ + #define __Pyx_PyLong_IsNeg(x) (Py_SIZE(x) < 0) + #define __Pyx_PyLong_IsNonNeg(x) (Py_SIZE(x) >= 0) + #define __Pyx_PyLong_IsZero(x) (Py_SIZE(x) == 0) + #define __Pyx_PyLong_IsPos(x) (Py_SIZE(x) > 0) + #define __Pyx_PyLong_CompactValueUnsigned(x) ((Py_SIZE(x) == 0) ? 0 : __Pyx_PyLong_Digits(x)[0]) + #define __Pyx_PyLong_DigitCount(x) __Pyx_sst_abs(Py_SIZE(x)) + #define __Pyx_PyLong_SignedDigitCount(x) Py_SIZE(x) + + #define __Pyx_PyLong_IsCompact(x) (Py_SIZE(x) == 0 || Py_SIZE(x) == 1 || Py_SIZE(x) == -1) + #define __Pyx_PyLong_CompactValue(x) \ + ((Py_SIZE(x) == 0) ? (sdigit) 0 : ((Py_SIZE(x) < 0) ? -(sdigit)__Pyx_PyLong_Digits(x)[0] : (sdigit)__Pyx_PyLong_Digits(x)[0])) + + typedef sdigit __Pyx_compact_pylong; + typedef digit __Pyx_compact_upylong; + #endif + + #if PY_VERSION_HEX >= 0x030C00A5 + #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->long_value.ob_digit) + #else + #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->ob_digit) + #endif +#endif + +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +#include + +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = (char) c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif + +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) + +// __PYX_DEFAULT_STRING_ENCODING is either a user provided string constant +// or we need to look it up here +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#include + +static char* __PYX_DEFAULT_STRING_ENCODING; + +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + +/////////////// TypeConversions /////////////// + +/* Type Conversion Functions */ + +#include + +static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s) { + size_t len = strlen(s); + if (unlikely(len > (size_t) PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, "byte string is too long"); + return -1; + } + return (Py_ssize_t) len; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + Py_ssize_t len = __Pyx_ssize_strlen(c_str); + if (unlikely(len < 0)) return NULL; + return __Pyx_PyUnicode_FromStringAndSize(c_str, len); +} + +static CYTHON_INLINE PyObject* __Pyx_PyByteArray_FromString(const char* c_str) { + Py_ssize_t len = __Pyx_ssize_strlen(c_str); + if (unlikely(len < 0)) return NULL; + return PyByteArray_FromStringAndSize(c_str, len); +} + +// Py3.7 returns a "const char*" for unicode strings +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} + +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + // borrowed reference, cached internally in 'o' by CPython + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + // raise the error + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/ + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} + +#else /* CYTHON_PEP393_ENABLED: */ + +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + // cached for the lifetime of the object + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + // raise the error + PyUnicode_AsASCIIString(o); + return NULL; + } +#else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ + return PyUnicode_AsUTF8AndSize(o, length); +#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ +} +#endif /* CYTHON_PEP393_ENABLED */ +#endif + +// Py3.7 returns a "const char*" for unicode strings +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */ + +#if (!CYTHON_COMPILING_IN_PYPY && !CYTHON_COMPILING_IN_LIMITED_API) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} + +/* Note: __Pyx_PyObject_IsTrue is written to minimize branching. */ +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} + +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} + +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { + __Pyx_TypeName result_type_name = __Pyx_PyType_GetName(Py_TYPE(result)); +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + // CPython issue #17576: warn if 'result' not of exact type int. + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type " __Pyx_FMT_TYPENAME "). " + "The ability to return an instance of a strict subclass of int is deprecated, " + "and may be removed in a future version of Python.", + result_type_name)) { + __Pyx_DECREF_TypeName(result_type_name); + Py_DECREF(result); + return NULL; + } + __Pyx_DECREF_TypeName(result_type_name); + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type " __Pyx_FMT_TYPENAME ")", + type_name, type_name, result_type_name); + __Pyx_DECREF_TypeName(result_type_name); + Py_DECREF(result); + return NULL; +} + +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} + +{{py: from Cython.Utility import pylong_join }} + +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + // handle most common case first to avoid indirect branch and optimise branch prediction + if (likely(__Pyx_PyLong_IsCompact(b))) { + return __Pyx_PyLong_CompactValue(b); + } else { + const digit* digits = __Pyx_PyLong_Digits(b); + const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(b); + switch (size) { + {{for _size in (2, 3, 4)}} + {{for _case in (_size, -_size)}} + case {{_case}}: + if (8 * sizeof(Py_ssize_t) > {{_size}} * PyLong_SHIFT) { + return {{'-' if _case < 0 else ''}}(Py_ssize_t) {{pylong_join(_size, 'digits', 'size_t')}}; + } + break; + {{endfor}} + {{endfor}} + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} + + +static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { + if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { + return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); +#if PY_MAJOR_VERSION < 3 + } else if (likely(PyInt_CheckExact(o))) { + return PyInt_AS_LONG(o); +#endif + } else { + Py_ssize_t ival; + PyObject *x; + x = PyNumber_Index(o); + if (!x) return -1; + ival = PyInt_AsLong(x); + Py_DECREF(x); + return ival; + } +} + + +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} + + +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + +/////////////// pynumber_float.proto /////////////// + +static CYTHON_INLINE PyObject* __Pyx__PyNumber_Float(PyObject* obj); /* proto */ +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : __Pyx__PyNumber_Float(x)) + +/////////////// pynumber_float /////////////// +//@requires: Optimize.c::pybytes_as_double +//@requires: Optimize.c::pyunicode_as_double + +static CYTHON_INLINE PyObject* __Pyx__PyNumber_Float(PyObject* obj) { + // 'obj is PyFloat' is handled in the calling macro + double val; + if (PyLong_CheckExact(obj)) { +#if CYTHON_USE_PYLONG_INTERNALS + if (likely(__Pyx_PyLong_IsCompact(obj))) { + val = (double) __Pyx_PyLong_CompactValue(obj); + goto no_error; + } +#endif + val = PyLong_AsDouble(obj); + } else if (PyUnicode_CheckExact(obj)) { + val = __Pyx_PyUnicode_AsDouble(obj); + } else if (PyBytes_CheckExact(obj)) { + val = __Pyx_PyBytes_AsDouble(obj); + } else if (PyByteArray_CheckExact(obj)) { + val = __Pyx_PyByteArray_AsDouble(obj); + } else { + return PyNumber_Float(obj); + } + + if (unlikely(val == -1 && PyErr_Occurred())) { + return NULL; + } +#if CYTHON_USE_PYLONG_INTERNALS +no_error: +#endif + return PyFloat_FromDouble(val); +} + +/////////////// GCCDiagnostics.proto /////////////// + +// GCC diagnostic pragmas were introduced in GCC 4.6 +// Used to silence conversion warnings that are ok but cannot be avoided. +#if !defined(__INTEL_COMPILER) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#define __Pyx_HAS_GCC_DIAGNOSTIC +#endif + + +/////////////// ToPyCTupleUtility.proto /////////////// +static PyObject* {{funcname}}({{struct_type_decl}}); + +/////////////// ToPyCTupleUtility /////////////// +static PyObject* {{funcname}}({{struct_type_decl}} value) { + PyObject* item = NULL; + PyObject* result = PyTuple_New({{size}}); + if (!result) goto bad; + + {{for ix, component in enumerate(components):}} + {{py:attr = "value.f%s" % ix}} + item = {{component.to_py_function}}({{attr}}); + if (!item) goto bad; + PyTuple_SET_ITEM(result, {{ix}}, item); + {{endfor}} + + return result; +bad: + Py_XDECREF(item); + Py_XDECREF(result); + return NULL; +} + + +/////////////// FromPyCTupleUtility.proto /////////////// +static CYTHON_INLINE {{struct_type_decl}} {{funcname}}(PyObject *); + +/////////////// FromPyCTupleUtility /////////////// + +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +static void __Pyx_tuple_{{funcname}}(PyObject * o, {{struct_type_decl}} *result) { + {{for ix, component in enumerate(components):}} + {{py:attr = "result->f%s" % ix}} + {{attr}} = {{component.from_py_function}}(PyTuple_GET_ITEM(o, {{ix}})); + if ({{component.error_condition(attr)}}) goto bad; + {{endfor}} + return; +bad: + return; +} + +static void __Pyx_list_{{funcname}}(PyObject * o, {{struct_type_decl}} *result) { + {{for ix, component in enumerate(components):}} + {{py:attr = "result->f%s" % ix}} + {{attr}} = {{component.from_py_function}}(PyList_GET_ITEM(o, {{ix}})); + if ({{component.error_condition(attr)}}) goto bad; + {{endfor}} + return; +bad: + return; +} +#endif + +static void __Pyx_seq_{{funcname}}(PyObject * o, {{struct_type_decl}} *result) { + if (unlikely(!PySequence_Check(o))) { + __Pyx_TypeName o_type_name = __Pyx_PyType_GetName(Py_TYPE(o)); + PyErr_Format(PyExc_TypeError, + "Expected a sequence of size %zd, got " __Pyx_FMT_TYPENAME, (Py_ssize_t) {{size}}, o_type_name); + __Pyx_DECREF_TypeName(o_type_name); + goto bad; + } else if (unlikely(PySequence_Length(o) != {{size}})) { + PyErr_Format(PyExc_TypeError, + "Expected a sequence of size %zd, got size %zd", (Py_ssize_t) {{size}}, PySequence_Length(o)); + goto bad; + } + + { + PyObject *item; + {{for ix, component in enumerate(components):}} + {{py:attr = "result->f%s" % ix}} + item = PySequence_ITEM(o, {{ix}}); if (unlikely(!item)) goto bad; + {{attr}} = {{component.from_py_function}}(item); + Py_DECREF(item); + if ({{component.error_condition(attr)}}) goto bad; + {{endfor}} + } + return; +bad: + return; +} + +static CYTHON_INLINE {{struct_type_decl}} {{funcname}}(PyObject * o) { + {{struct_type_decl}} result; + + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + if (likely(PyTuple_Check(o) && PyTuple_GET_SIZE(o) == {{size}})) { + __Pyx_tuple_{{funcname}}(o, &result); + } else if (likely(PyList_Check(o) && PyList_GET_SIZE(o) == {{size}})) { + __Pyx_list_{{funcname}}(o, &result); + } else + #endif + { + __Pyx_seq_{{funcname}}(o, &result); + } + + return result; +} + + +/////////////// UnicodeAsUCS4.proto /////////////// + +static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject*); + +/////////////// UnicodeAsUCS4 /////////////// + +static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject* x) { + Py_ssize_t length; + #if CYTHON_PEP393_ENABLED + length = PyUnicode_GET_LENGTH(x); + if (likely(length == 1)) { + return PyUnicode_READ_CHAR(x, 0); + } + #else + length = PyUnicode_GET_SIZE(x); + if (likely(length == 1)) { + return PyUnicode_AS_UNICODE(x)[0]; + } + #if Py_UNICODE_SIZE == 2 + else if (PyUnicode_GET_SIZE(x) == 2) { + Py_UCS4 high_val = PyUnicode_AS_UNICODE(x)[0]; + if (high_val >= 0xD800 && high_val <= 0xDBFF) { + Py_UCS4 low_val = PyUnicode_AS_UNICODE(x)[1]; + if (low_val >= 0xDC00 && low_val <= 0xDFFF) { + return 0x10000 + (((high_val & ((1<<10)-1)) << 10) | (low_val & ((1<<10)-1))); + } + } + } + #endif + #endif + PyErr_Format(PyExc_ValueError, + "only single character unicode strings can be converted to Py_UCS4, " + "got length %" CYTHON_FORMAT_SSIZE_T "d", length); + return (Py_UCS4)-1; +} + + +/////////////// ObjectAsUCS4.proto /////////////// +//@requires: UnicodeAsUCS4 + +#define __Pyx_PyObject_AsPy_UCS4(x) \ + (likely(PyUnicode_Check(x)) ? __Pyx_PyUnicode_AsPy_UCS4(x) : __Pyx__PyObject_AsPy_UCS4(x)) +static Py_UCS4 __Pyx__PyObject_AsPy_UCS4(PyObject*); + +/////////////// ObjectAsUCS4 /////////////// + +static Py_UCS4 __Pyx__PyObject_AsPy_UCS4_raise_error(long ival) { + if (ival < 0) { + if (!PyErr_Occurred()) + PyErr_SetString(PyExc_OverflowError, + "cannot convert negative value to Py_UCS4"); + } else { + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to Py_UCS4"); + } + return (Py_UCS4)-1; +} + +static Py_UCS4 __Pyx__PyObject_AsPy_UCS4(PyObject* x) { + long ival; + ival = __Pyx_PyInt_As_long(x); + if (unlikely(!__Pyx_is_valid_index(ival, 1114111 + 1))) { + return __Pyx__PyObject_AsPy_UCS4_raise_error(ival); + } + return (Py_UCS4)ival; +} + + +/////////////// ObjectAsPyUnicode.proto /////////////// + +static CYTHON_INLINE Py_UNICODE __Pyx_PyObject_AsPy_UNICODE(PyObject*); + +/////////////// ObjectAsPyUnicode /////////////// + +static CYTHON_INLINE Py_UNICODE __Pyx_PyObject_AsPy_UNICODE(PyObject* x) { + long ival; + #if CYTHON_PEP393_ENABLED + #if Py_UNICODE_SIZE > 2 + const long maxval = 1114111; + #else + const long maxval = 65535; + #endif + #else + static long maxval = 0; + #endif + if (PyUnicode_Check(x)) { + if (unlikely(__Pyx_PyUnicode_GET_LENGTH(x) != 1)) { + PyErr_Format(PyExc_ValueError, + "only single character unicode strings can be converted to Py_UNICODE, " + "got length %" CYTHON_FORMAT_SSIZE_T "d", __Pyx_PyUnicode_GET_LENGTH(x)); + return (Py_UNICODE)-1; + } + #if CYTHON_PEP393_ENABLED + ival = PyUnicode_READ_CHAR(x, 0); + #else + return PyUnicode_AS_UNICODE(x)[0]; + #endif + } else { + #if !CYTHON_PEP393_ENABLED + if (unlikely(!maxval)) + maxval = (long)PyUnicode_GetMax(); + #endif + ival = __Pyx_PyInt_As_long(x); + } + if (unlikely(!__Pyx_is_valid_index(ival, maxval + 1))) { + if (ival < 0) { + if (!PyErr_Occurred()) + PyErr_SetString(PyExc_OverflowError, + "cannot convert negative value to Py_UNICODE"); + return (Py_UNICODE)-1; + } else { + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to Py_UNICODE"); + } + return (Py_UNICODE)-1; + } + return (Py_UNICODE)ival; +} + + +/////////////// CIntToPy.proto /////////////// + +static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value); + +/////////////// CIntToPy /////////////// +//@requires: GCCDiagnostics + +static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const {{TYPE}} neg_one = ({{TYPE}}) -1, const_zero = ({{TYPE}}) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof({{TYPE}}) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof({{TYPE}}) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof({{TYPE}}) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof({{TYPE}}) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof({{TYPE}}) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; +#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000 + return _PyLong_FromByteArray(bytes, sizeof({{TYPE}}), + little, !is_unsigned); +#else + // call int.from_bytes() + PyObject *from_bytes, *result = NULL; + PyObject *py_bytes = NULL, *arg_tuple = NULL, *kwds = NULL, *order_str = NULL; + from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes"); + if (!from_bytes) return NULL; + py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof({{TYPE}})); + if (!py_bytes) goto limited_bad; + // I'm deliberately not using PYIDENT here because this code path is very unlikely + // to ever run so it seems a pessimization mostly. + order_str = PyUnicode_FromString(little ? "little" : "big"); + if (!order_str) goto limited_bad; + arg_tuple = PyTuple_Pack(2, py_bytes, order_str); + if (!arg_tuple) goto limited_bad; + if (!is_unsigned) { + // default is signed=False + kwds = PyDict_New(); + if (!kwds) goto limited_bad; + if (PyDict_SetItemString(kwds, "signed", __Pyx_NewRef(Py_True))) goto limited_bad; + } + result = PyObject_Call(from_bytes, arg_tuple, kwds); + + limited_bad: + Py_XDECREF(kwds); + Py_XDECREF(arg_tuple); + Py_XDECREF(order_str); + Py_XDECREF(py_bytes); + Py_XDECREF(from_bytes); + return result; +#endif + } +} + + +/////////////// CIntToDigits /////////////// + +static const char DIGIT_PAIRS_10[2*10*10+1] = { + "00010203040506070809" + "10111213141516171819" + "20212223242526272829" + "30313233343536373839" + "40414243444546474849" + "50515253545556575859" + "60616263646566676869" + "70717273747576777879" + "80818283848586878889" + "90919293949596979899" +}; + +static const char DIGIT_PAIRS_8[2*8*8+1] = { + "0001020304050607" + "1011121314151617" + "2021222324252627" + "3031323334353637" + "4041424344454647" + "5051525354555657" + "6061626364656667" + "7071727374757677" +}; + +static const char DIGITS_HEX[2*16+1] = { + "0123456789abcdef" + "0123456789ABCDEF" +}; + + +/////////////// CIntToPyUnicode.proto /////////////// + +static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value, Py_ssize_t width, char padding_char, char format_char); + +/////////////// CIntToPyUnicode /////////////// +//@requires: StringTools.c::IncludeStringH +//@requires: StringTools.c::BuildPyUnicode +//@requires: CIntToDigits +//@requires: GCCDiagnostics + +// NOTE: inlining because most arguments are constant, which collapses lots of code below + +static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value, Py_ssize_t width, char padding_char, char format_char) { + // simple and conservative C string allocation on the stack: each byte gives at most 3 digits, plus sign + char digits[sizeof({{TYPE}})*3+2]; + // 'dpos' points to end of digits array + 1 initially to allow for pre-decrement looping + char *dpos, *end = digits + sizeof({{TYPE}})*3+2; + const char *hex_digits = DIGITS_HEX; + Py_ssize_t length, ulength; + int prepend_sign, last_one_off; + {{TYPE}} remaining; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const {{TYPE}} neg_one = ({{TYPE}}) -1, const_zero = ({{TYPE}}) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + + if (format_char == 'X') { + hex_digits += 16; + format_char = 'x'; + } + + // surprise: even trivial sprintf() calls don't get optimised in gcc (4.8) + remaining = value; /* not using abs(value) to avoid overflow problems */ + last_one_off = 0; + dpos = end; + do { + int digit_pos; + switch (format_char) { + case 'o': + digit_pos = abs((int)(remaining % (8*8))); + remaining = ({{TYPE}}) (remaining / (8*8)); + dpos -= 2; + memcpy(dpos, DIGIT_PAIRS_8 + digit_pos * 2, 2); /* copy 2 digits at a time, unaligned */ + last_one_off = (digit_pos < 8); + break; + case 'd': + digit_pos = abs((int)(remaining % (10*10))); + remaining = ({{TYPE}}) (remaining / (10*10)); + dpos -= 2; + memcpy(dpos, DIGIT_PAIRS_10 + digit_pos * 2, 2); /* copy 2 digits at a time, unaligned */ + last_one_off = (digit_pos < 10); + break; + case 'x': + *(--dpos) = hex_digits[abs((int)(remaining % 16))]; + remaining = ({{TYPE}}) (remaining / 16); + break; + default: + assert(0); + break; + } + } while (unlikely(remaining != 0)); + + // Correct dpos by 1 if we read an excess digit. + assert(!last_one_off || *dpos == '0'); + dpos += last_one_off; + + length = end - dpos; + ulength = length; + prepend_sign = 0; + if (!is_unsigned && value <= neg_one) { + if (padding_char == ' ' || width <= length + 1) { + *(--dpos) = '-'; + ++length; + } else { + prepend_sign = 1; + } + ++ulength; + } + if (width > ulength) { + ulength = width; + } + // single character unicode strings are cached in CPython => use PyUnicode_FromOrdinal() for them + if (ulength == 1) { + return PyUnicode_FromOrdinal(*dpos); + } + return __Pyx_PyUnicode_BuildFromAscii(ulength, dpos, (int) length, prepend_sign, padding_char); +} + + +/////////////// CBIntToPyUnicode.proto /////////////// + +#define {{TO_PY_FUNCTION}}(value) \ + ((value) ? __Pyx_NewRef({{TRUE_CONST}}) : __Pyx_NewRef({{FALSE_CONST}})) + + +/////////////// PyIntFromDouble.proto /////////////// + +#if PY_MAJOR_VERSION < 3 +static CYTHON_INLINE PyObject* __Pyx_PyInt_FromDouble(double value); +#else +#define __Pyx_PyInt_FromDouble(value) PyLong_FromDouble(value) +#endif + +/////////////// PyIntFromDouble /////////////// + +#if PY_MAJOR_VERSION < 3 +static CYTHON_INLINE PyObject* __Pyx_PyInt_FromDouble(double value) { + if (value >= (double)LONG_MIN && value <= (double)LONG_MAX) { + return PyInt_FromLong((long)value); + } + return PyLong_FromDouble(value); +} +#endif + + +/////////////// CIntFromPyVerify /////////////// + +// see CIntFromPy +#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value) \ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) + +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value) \ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) + +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc) \ + { \ + func_type value = func_value; \ + if (sizeof(target_type) < sizeof(func_type)) { \ + if (unlikely(value != (func_type) (target_type) value)) { \ + func_type zero = 0; \ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred())) \ + return (target_type) -1; \ + if (is_unsigned && unlikely(value < zero)) \ + goto raise_neg_overflow; \ + else \ + goto raise_overflow; \ + } \ + } \ + return (target_type) value; \ + } + + +/////////////// CIntFromPy.proto /////////////// + +static CYTHON_INLINE {{TYPE}} {{FROM_PY_FUNCTION}}(PyObject *); + +/////////////// CIntFromPy /////////////// +//@requires: CIntFromPyVerify +//@requires: GCCDiagnostics + +{{py: from Cython.Utility import pylong_join }} + +static CYTHON_INLINE {{TYPE}} {{FROM_PY_FUNCTION}}(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const {{TYPE}} neg_one = ({{TYPE}}) -1, const_zero = ({{TYPE}}) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if ((sizeof({{TYPE}}) < sizeof(long))) { + __PYX_VERIFY_RETURN_INT({{TYPE}}, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return ({{TYPE}}) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + if (unlikely(__Pyx_PyLong_IsNeg(x))) { + goto raise_neg_overflow; + //} else if (__Pyx_PyLong_IsZero(x)) { + // return ({{TYPE}}) 0; + } else if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT({{TYPE}}, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_DigitCount(x)) { + {{for _size in (2, 3, 4)}} + case {{_size}}: + if ((8 * sizeof({{TYPE}}) > {{_size-1}} * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > {{_size}} * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT({{TYPE}}, unsigned long, {{pylong_join(_size, 'digits')}}) + } else if ((8 * sizeof({{TYPE}}) >= {{_size}} * PyLong_SHIFT)) { + return ({{TYPE}}) {{pylong_join(_size, 'digits', TYPE)}}; + } + } + break; + {{endfor}} + } + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + // misuse Py_False as a quick way to compare to a '0' int object in PyPy + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return ({{TYPE}}) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if ((sizeof({{TYPE}}) <= sizeof(unsigned long))) { + __PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if ((sizeof({{TYPE}}) <= sizeof(unsigned PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { + // signed +#if CYTHON_USE_PYLONG_INTERNALS + if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT({{TYPE}}, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_SignedDigitCount(x)) { + {{for _size in (2, 3, 4)}} + {{for _case in (-_size, _size)}} + case {{_case}}: + if ((8 * sizeof({{TYPE}}){{' - 1' if _case < 0 else ''}} > {{_size-1}} * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > {{_size}} * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT({{TYPE}}, {{'long' if _case < 0 else 'unsigned long'}}, {{'-(long) ' if _case < 0 else ''}}{{pylong_join(_size, 'digits')}}) + } else if ((8 * sizeof({{TYPE}}) - 1 > {{_size}} * PyLong_SHIFT)) { + return ({{TYPE}}) ({{'((%s)-1)*' % TYPE if _case < 0 else ''}}{{pylong_join(_size, 'digits', TYPE)}}); + } + } + break; + {{endfor}} + {{endfor}} + } + } +#endif + if ((sizeof({{TYPE}}) <= sizeof(long))) { + __PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if ((sizeof({{TYPE}}) <= sizeof(PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + + {{if IS_ENUM}} + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available, cannot convert large enums"); + return ({{TYPE}}) -1; + {{else}} + // large integer type and no access to PyLong internals => allow for a more expensive conversion + { + {{TYPE}} val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); +#if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } +#endif + if (likely(v)) { + int ret = -1; +#if PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); +#else +// Inefficient copy of bit chunks through the C-API. Probably still better than a "cannot do this" exception. +// This is substantially faster in CPython (>30%) than calling "int.to_bytes()" + PyObject *stepval = NULL, *mask = NULL, *shift = NULL; + int bits, remaining_bits, is_negative = 0; + long idigit; + int chunk_size = (sizeof(long) < 8) ? 30 : 62; + + // use exact PyLong to prevent user defined &&/<= (1L << remaining_bits))) + goto raise_overflow; + val |= (({{TYPE}}) idigit) << bits; + + #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 + unpacking_done: + #endif + // handle sign and overflow into sign bit + if (!is_unsigned) { + // gcc warns about unsigned (val < 0) => test sign bit instead + if (unlikely(val & ((({{TYPE}}) 1) << (sizeof({{TYPE}}) * 8 - 1)))) + goto raise_overflow; + // undo the PyNumber_Invert() above + if (is_negative) + val = ~val; + } + ret = 0; + done: + Py_XDECREF(shift); + Py_XDECREF(mask); + Py_XDECREF(stepval); +#endif + Py_DECREF(v); + if (likely(!ret)) + return val; + } + return ({{TYPE}}) -1; + } + {{endif}} + } else { + {{TYPE}} val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return ({{TYPE}}) -1; + val = {{FROM_PY_FUNCTION}}(tmp); + Py_DECREF(tmp); + return val; + } + +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to {{TYPE}}"); + return ({{TYPE}}) -1; + +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to {{TYPE}}"); + return ({{TYPE}}) -1; +} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/UFuncs.pyx b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/UFuncs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..c2043e8cc6bb20fa3c434c3e1d111f769dabf606 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/UFuncs.pyx @@ -0,0 +1,51 @@ +##################### UFuncDefinition ###################### + +cdef extern from *: + ctypedef int npy_intp + struct PyObject + PyObject* __Pyx_NewRef(object) + {{inline_func_declaration}} + +# variable names have to come from tempita to avoid duplication +@cname("{{func_cname}}") +cdef void {{func_cname}}(char **args, const npy_intp *dimensions, const npy_intp* steps, void* data) except * {{"nogil" if will_be_called_without_gil else ""}}: + cdef npy_intp i + cdef npy_intp n = dimensions[0] + {{for idx, tp in enumerate(in_types)}} + cdef char* in_{{idx}} = args[{{idx}}] + cdef {{tp.empty_declaration_code(pyrex=True)}} cast_in_{{idx}} + {{endfor}} + {{for idx, tp in enumerate(out_types)}} + cdef char* out_{{idx}} = args[{{idx+len(in_types)}}] + cdef {{tp.empty_declaration_code(pyrex=True)}} cast_out_{{idx}} + {{endfor}} + {{for idx in range(len(out_types)+len(in_types))}} + cdef npy_intp step_{{idx}} = steps[{{idx}}] + {{endfor}} + + {{"with gil" if (not nogil and will_be_called_without_gil) else "if True"}}: + for i in range(n): + {{for idx, tp in enumerate(in_types)}} + {{if tp.is_pyobject}} + cast_in_{{idx}} = (<{{tp.empty_declaration_code(pyrex=True)}}>(in_{{idx}})[0]) + {{else}} + cast_in_{{idx}} = (<{{tp.empty_declaration_code(pyrex=True)}}*>in_{{idx}})[0] + {{endif}} + {{endfor}} + + {{", ".join("cast_out_{}".format(idx) for idx in range(len(out_types)))}} = \ + {{inline_func_call}}({{", ".join("cast_in_{}".format(idx) for idx in range(len(in_types)))}}) + + {{for idx, tp in enumerate(out_types)}} + {{if tp.is_pyobject}} + (out_{{idx}})[0] = __Pyx_NewRef(cast_out_{{idx}}) + {{else}} + (<{{tp.empty_declaration_code(pyrex=True)}}*>out_{{idx}})[0] = cast_out_{{idx}} + {{endif}} + {{endfor}} + {{for idx in range(len(in_types))}} + in_{{idx}} += step_{{idx}} + {{endfor}} + {{for idx in range(len(out_types))}} + out_{{idx}} += step_{{idx+len(in_types)}} + {{endfor}} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/UFuncs_C.c b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/UFuncs_C.c new file mode 100644 index 0000000000000000000000000000000000000000..2115e4b2b52da8b7e10e173805947efcdc131014 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/UFuncs_C.c @@ -0,0 +1,42 @@ +///////////////////////// UFuncsInit.proto ///////////////////////// +//@proto_block: utility_code_proto_before_types + +#include +#include + +// account for change in type of arguments to PyUFuncGenericFunction in Numpy 1.19.x +// Unfortunately we can only test against Numpy version 1.20.x since it wasn't marked +// as an API break. Therefore, I'm "solving" the issue by casting function pointer types +// on lower Numpy versions. +#if NPY_API_VERSION >= 0x0000000e // Numpy 1.20.x +#define __PYX_PYUFUNCGENERICFUNCTION_CAST(x) x +#else +#define __PYX_PYUFUNCGENERICFUNCTION_CAST(x) (PyUFuncGenericFunction)x +#endif + +/////////////////////// UFuncConsts.proto //////////////////// + +// getter functions because we can't forward-declare arrays +static PyUFuncGenericFunction* {{ufunc_funcs_name}}(void); /* proto */ +static char* {{ufunc_types_name}}(void); /* proto */ +static void* {{ufunc_data_name}}[] = {NULL}; /* always null */ + +/////////////////////// UFuncConsts ///////////////////////// + +static PyUFuncGenericFunction* {{ufunc_funcs_name}}(void) { + static PyUFuncGenericFunction arr[] = { + {{for loop, cname in looper(func_cnames)}} + __PYX_PYUFUNCGENERICFUNCTION_CAST(&{{cname}}){{if not loop.last}},{{endif}} + {{endfor}} + }; + return arr; +} + +static char* {{ufunc_types_name}}(void) { + static char arr[] = { + {{for loop, tp in looper(type_constants)}} + {{tp}}{{if not loop.last}},{{endif}} + {{endfor}} + }; + return arr; +} diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..73ccc1e2cbaa15855dc21b33dd5473ae3222e2ea --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/__init__.py @@ -0,0 +1,29 @@ + +def pylong_join(count, digits_ptr='digits', join_type='unsigned long'): + """ + Generate an unrolled shift-then-or loop over the first 'count' digits. + Assumes that they fit into 'join_type'. + + (((d[2] << n) | d[1]) << n) | d[0] + """ + return ('(' * (count * 2) + ' | '.join( + "(%s)%s[%d])%s)" % (join_type, digits_ptr, _i, " << PyLong_SHIFT" if _i else '') + for _i in range(count-1, -1, -1))) + + +# although it could potentially make use of data independence, +# this implementation is a bit slower than the simpler one above +def _pylong_join(count, digits_ptr='digits', join_type='unsigned long'): + """ + Generate an or-ed series of shifts for the first 'count' digits. + Assumes that they fit into 'join_type'. + + (d[2] << 2*n) | (d[1] << 1*n) | d[0] + """ + def shift(n): + # avoid compiler warnings for overly large shifts that will be discarded anyway + return " << (%d * PyLong_SHIFT < 8 * sizeof(%s) ? %d * PyLong_SHIFT : 0)" % (n, join_type, n) if n else '' + + return '(%s)' % ' | '.join( + "(((%s)%s[%d])%s)" % (join_type, digits_ptr, i, shift(i)) + for i in range(count-1, -1, -1)) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/arrayarray.h b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/arrayarray.h new file mode 100644 index 0000000000000000000000000000000000000000..a9e49237856c30130b6b620708fb59e85525183a --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utility/arrayarray.h @@ -0,0 +1,149 @@ +/////////////// ArrayAPI.proto /////////////// + +// arrayarray.h +// +// Artificial C-API for Python's type, +// used by array.pxd +// +// last changes: 2009-05-15 rk +// 2012-05-02 andreasvc +// (see revision control) +// + +#ifndef _ARRAYARRAY_H +#define _ARRAYARRAY_H + +// These two forward declarations are explicitly handled in the type +// declaration code, as including them here is too late for cython-defined +// types to use them. +// struct arrayobject; +// typedef struct arrayobject arrayobject; + +// All possible arraydescr values are defined in the vector "descriptors" +// below. That's defined later because the appropriate get and set +// functions aren't visible yet. +typedef struct arraydescr { + int typecode; + int itemsize; + PyObject * (*getitem)(struct arrayobject *, Py_ssize_t); + int (*setitem)(struct arrayobject *, Py_ssize_t, PyObject *); +#if PY_MAJOR_VERSION >= 3 + char *formats; +#endif +} arraydescr; + + +struct arrayobject { + PyObject_HEAD + Py_ssize_t ob_size; + union { + char *ob_item; + float *as_floats; + double *as_doubles; + int *as_ints; + unsigned int *as_uints; + unsigned char *as_uchars; + signed char *as_schars; + char *as_chars; + unsigned long *as_ulongs; + long *as_longs; +#if PY_MAJOR_VERSION >= 3 + unsigned long long *as_ulonglongs; + long long *as_longlongs; +#endif + short *as_shorts; + unsigned short *as_ushorts; + Py_UNICODE *as_pyunicodes; + void *as_voidptr; + } data; + Py_ssize_t allocated; + struct arraydescr *ob_descr; + PyObject *weakreflist; /* List of weak references */ +#if PY_MAJOR_VERSION >= 3 + int ob_exports; /* Number of exported buffers */ +#endif +}; + +#ifndef NO_NEWARRAY_INLINE +// fast creation of a new array +static CYTHON_INLINE PyObject * newarrayobject(PyTypeObject *type, Py_ssize_t size, + struct arraydescr *descr) { + arrayobject *op; + size_t nbytes; + + if (size < 0) { + PyErr_BadInternalCall(); + return NULL; + } + + nbytes = size * descr->itemsize; + // Check for overflow + if (nbytes / descr->itemsize != (size_t)size) { + return PyErr_NoMemory(); + } + op = (arrayobject *) type->tp_alloc(type, 0); + if (op == NULL) { + return NULL; + } + op->ob_descr = descr; + op->allocated = size; + op->weakreflist = NULL; + __Pyx_SET_SIZE(op, size); + if (size <= 0) { + op->data.ob_item = NULL; + } + else { + op->data.ob_item = PyMem_NEW(char, nbytes); + if (op->data.ob_item == NULL) { + Py_DECREF(op); + return PyErr_NoMemory(); + } + } + return (PyObject *) op; +} +#else +PyObject* newarrayobject(PyTypeObject *type, Py_ssize_t size, + struct arraydescr *descr); +#endif /* ifndef NO_NEWARRAY_INLINE */ + +// fast resize (reallocation to the point) +// not designed for filing small increments (but for fast opaque array apps) +static CYTHON_INLINE int resize(arrayobject *self, Py_ssize_t n) { + void *items = (void*) self->data.ob_item; + PyMem_Resize(items, char, (size_t)(n * self->ob_descr->itemsize)); + if (items == NULL) { + PyErr_NoMemory(); + return -1; + } + self->data.ob_item = (char*) items; + __Pyx_SET_SIZE(self, n); + self->allocated = n; + return 0; +} + +// suitable for small increments; over allocation 50% ; +static CYTHON_INLINE int resize_smart(arrayobject *self, Py_ssize_t n) { + void *items = (void*) self->data.ob_item; + Py_ssize_t newsize; + if (n < self->allocated && n*4 > self->allocated) { + __Pyx_SET_SIZE(self, n); + return 0; + } + newsize = n + (n / 2) + 1; + if (newsize <= n) { /* overflow */ + PyErr_NoMemory(); + return -1; + } + PyMem_Resize(items, char, (size_t)(newsize * self->ob_descr->itemsize)); + if (items == NULL) { + PyErr_NoMemory(); + return -1; + } + self->data.ob_item = (char*) items; + __Pyx_SET_SIZE(self, n); + self->allocated = newsize; + return 0; +} + +#endif +/* _ARRAYARRAY_H */ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utils.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utils.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..5e596f7303153699f0b7c1b8796981d4e1519c8c Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utils.cp39-win_amd64.pyd differ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utils.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b3bfd8602f80827e5309196e3764ccedf16a4178 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Utils.py @@ -0,0 +1,718 @@ +""" +Cython -- Things that don't belong anywhere else in particular +""" + +from __future__ import absolute_import + +import cython + +cython.declare( + basestring=object, + os=object, sys=object, re=object, io=object, codecs=object, glob=object, shutil=object, tempfile=object, + cython_version=object, + _function_caches=list, _parse_file_version=object, _match_file_encoding=object, +) + +try: + from __builtin__ import basestring +except ImportError: + basestring = str + +try: + FileNotFoundError +except NameError: + FileNotFoundError = OSError + +import os +import sys +import re +import io +import codecs +import glob +import shutil +import tempfile +from functools import wraps + +from . import __version__ as cython_version + +PACKAGE_FILES = ("__init__.py", "__init__.pyc", "__init__.pyx", "__init__.pxd") + +_build_cache_name = "__{0}_cache".format +_CACHE_NAME_PATTERN = re.compile(r"^__(.+)_cache$") + +modification_time = os.path.getmtime + +GENERATED_BY_MARKER = "/* Generated by Cython %s */" % cython_version +GENERATED_BY_MARKER_BYTES = GENERATED_BY_MARKER.encode('us-ascii') + + +class _TryFinallyGeneratorContextManager(object): + """ + Fast, bare minimum @contextmanager, only for try-finally, not for exception handling. + """ + def __init__(self, gen): + self._gen = gen + + def __enter__(self): + return next(self._gen) + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + next(self._gen) + except (StopIteration, GeneratorExit): + pass + + +def try_finally_contextmanager(gen_func): + @wraps(gen_func) + def make_gen(*args, **kwargs): + return _TryFinallyGeneratorContextManager(gen_func(*args, **kwargs)) + return make_gen + + +_function_caches = [] + + +def clear_function_caches(): + for cache in _function_caches: + cache.clear() + + +def cached_function(f): + cache = {} + _function_caches.append(cache) + uncomputed = object() + + @wraps(f) + def wrapper(*args): + res = cache.get(args, uncomputed) + if res is uncomputed: + res = cache[args] = f(*args) + return res + + wrapper.uncached = f + return wrapper + + +def _find_cache_attributes(obj): + """The function iterates over the attributes of the object and, + if it finds the name of the cache, it returns it and the corresponding method name. + The method may not be present in the object. + """ + for attr_name in dir(obj): + match = _CACHE_NAME_PATTERN.match(attr_name) + if match is not None: + yield attr_name, match.group(1) + + +def clear_method_caches(obj): + """Removes every cache found in the object, + if a corresponding method exists for that cache. + """ + for cache_name, method_name in _find_cache_attributes(obj): + if hasattr(obj, method_name): + delattr(obj, cache_name) + # if there is no corresponding method, then we assume + # that this attribute was not created by our cached method + + +def cached_method(f): + cache_name = _build_cache_name(f.__name__) + + def wrapper(self, *args): + cache = getattr(self, cache_name, None) + if cache is None: + cache = {} + setattr(self, cache_name, cache) + if args in cache: + return cache[args] + res = cache[args] = f(self, *args) + return res + + return wrapper + + +def replace_suffix(path, newsuf): + base, _ = os.path.splitext(path) + return base + newsuf + + +def open_new_file(path): + if os.path.exists(path): + # Make sure to create a new file here so we can + # safely hard link the output files. + os.unlink(path) + + # we use the ISO-8859-1 encoding here because we only write pure + # ASCII strings or (e.g. for file names) byte encoded strings as + # Unicode, so we need a direct mapping from the first 256 Unicode + # characters to a byte sequence, which ISO-8859-1 provides + + # note: can't use io.open() in Py2 as we may be writing str objects + return codecs.open(path, "w", encoding="ISO-8859-1") + + +def castrate_file(path, st): + # Remove junk contents from an output file after a + # failed compilation. + # Also sets access and modification times back to + # those specified by st (a stat struct). + if not is_cython_generated_file(path, allow_failed=True, if_not_found=False): + return + + try: + f = open_new_file(path) + except EnvironmentError: + pass + else: + f.write( + "#error Do not use this file, it is the result of a failed Cython compilation.\n") + f.close() + if st: + os.utime(path, (st.st_atime, st.st_mtime-1)) + + +def is_cython_generated_file(path, allow_failed=False, if_not_found=True): + failure_marker = b"#error Do not use this file, it is the result of a failed Cython compilation." + file_content = None + if os.path.exists(path): + try: + with open(path, "rb") as f: + file_content = f.read(len(failure_marker)) + except (OSError, IOError): + pass # Probably just doesn't exist any more + + if file_content is None: + # file does not exist (yet) + return if_not_found + + return ( + # Cython C file? + file_content.startswith(b"/* Generated by Cython ") or + # Cython output file after previous failures? + (allow_failed and file_content == failure_marker) or + # Let's allow overwriting empty files as well. They might have resulted from previous failures. + not file_content + ) + + +def file_generated_by_this_cython(path): + file_content = b'' + if os.path.exists(path): + try: + with open(path, "rb") as f: + file_content = f.read(len(GENERATED_BY_MARKER_BYTES)) + except (OSError, IOError): + pass # Probably just doesn't exist any more + return file_content and file_content.startswith(GENERATED_BY_MARKER_BYTES) + + +def file_newer_than(path, time): + ftime = modification_time(path) + return ftime > time + + +def safe_makedirs(path): + try: + os.makedirs(path) + except OSError: + if not os.path.isdir(path): + raise + + +def copy_file_to_dir_if_newer(sourcefile, destdir): + """ + Copy file sourcefile to directory destdir (creating it if needed), + preserving metadata. If the destination file exists and is not + older than the source file, the copying is skipped. + """ + destfile = os.path.join(destdir, os.path.basename(sourcefile)) + try: + desttime = modification_time(destfile) + except OSError: + # New file does not exist, destdir may or may not exist + safe_makedirs(destdir) + else: + # New file already exists + if not file_newer_than(sourcefile, desttime): + return + shutil.copy2(sourcefile, destfile) + + +@cached_function +def find_root_package_dir(file_path): + dir = os.path.dirname(file_path) + if file_path == dir: + return dir + elif is_package_dir(dir): + return find_root_package_dir(dir) + else: + return dir + + +@cached_function +def check_package_dir(dir_path, package_names): + namespace = True + for dirname in package_names: + dir_path = os.path.join(dir_path, dirname) + has_init = contains_init(dir_path) + if has_init: + namespace = False + return dir_path, namespace + + +@cached_function +def contains_init(dir_path): + for filename in PACKAGE_FILES: + path = os.path.join(dir_path, filename) + if path_exists(path): + return 1 + + +def is_package_dir(dir_path): + if contains_init(dir_path): + return 1 + + +@cached_function +def path_exists(path): + # try on the filesystem first + if os.path.exists(path): + return True + # figure out if a PEP 302 loader is around + try: + loader = __loader__ + # XXX the code below assumes a 'zipimport.zipimporter' instance + # XXX should be easy to generalize, but too lazy right now to write it + archive_path = getattr(loader, 'archive', None) + if archive_path: + normpath = os.path.normpath(path) + if normpath.startswith(archive_path): + arcname = normpath[len(archive_path)+1:] + try: + loader.get_data(arcname) + return True + except IOError: + return False + except NameError: + pass + return False + + +_parse_file_version = re.compile(r".*[.]cython-([0-9]+)[.][^./\\]+$").findall + + +@cached_function +def find_versioned_file(directory, filename, suffix, + _current_version=int(re.sub(r"^([0-9]+)[.]([0-9]+).*", r"\1\2", cython_version))): + """ + Search a directory for versioned pxd files, e.g. "lib.cython-30.pxd" for a Cython 3.0+ version. + + @param directory: the directory to search + @param filename: the filename without suffix + @param suffix: the filename extension including the dot, e.g. ".pxd" + @return: the file path if found, or None + """ + assert not suffix or suffix[:1] == '.' + path_prefix = os.path.join(directory, filename) + + matching_files = glob.glob(path_prefix + ".cython-*" + suffix) + path = path_prefix + suffix + if not os.path.exists(path): + path = None + best_match = (-1, path) # last resort, if we do not have versioned .pxd files + + for path in matching_files: + versions = _parse_file_version(path) + if versions: + int_version = int(versions[0]) + # Let's assume no duplicates. + if best_match[0] < int_version <= _current_version: + best_match = (int_version, path) + return best_match[1] + + +# file name encodings + +def decode_filename(filename): + if isinstance(filename, bytes): + try: + filename_encoding = sys.getfilesystemencoding() + if filename_encoding is None: + filename_encoding = sys.getdefaultencoding() + filename = filename.decode(filename_encoding) + except UnicodeDecodeError: + pass + return filename + + +# support for source file encoding detection + +_match_file_encoding = re.compile(br"(\w*coding)[:=]\s*([-\w.]+)").search + + +def detect_opened_file_encoding(f, default='UTF-8'): + # PEPs 263 and 3120 + # Most of the time the first two lines fall in the first couple of hundred chars, + # and this bulk read/split is much faster. + lines = () + start = b'' + while len(lines) < 3: + data = f.read(500) + start += data + lines = start.split(b"\n") + if not data: + break + + m = _match_file_encoding(lines[0]) + if m and m.group(1) != b'c_string_encoding': + return m.group(2).decode('iso8859-1') + elif len(lines) > 1: + m = _match_file_encoding(lines[1]) + if m: + return m.group(2).decode('iso8859-1') + return default + + +def skip_bom(f): + """ + Read past a BOM at the beginning of a source file. + This could be added to the scanner, but it's *substantially* easier + to keep it at this level. + """ + if f.read(1) != u'\uFEFF': + f.seek(0) + + +def open_source_file(source_filename, encoding=None, error_handling=None): + stream = None + try: + if encoding is None: + # Most of the time the encoding is not specified, so try hard to open the file only once. + f = io.open(source_filename, 'rb') + encoding = detect_opened_file_encoding(f) + f.seek(0) + stream = io.TextIOWrapper(f, encoding=encoding, errors=error_handling) + else: + stream = io.open(source_filename, encoding=encoding, errors=error_handling) + + except OSError: + if os.path.exists(source_filename): + raise # File is there, but something went wrong reading from it. + # Allow source files to be in zip files etc. + try: + loader = __loader__ + if source_filename.startswith(loader.archive): + stream = open_source_from_loader( + loader, source_filename, + encoding, error_handling) + except (NameError, AttributeError): + pass + + if stream is None: + raise FileNotFoundError(source_filename) + skip_bom(stream) + return stream + + +def open_source_from_loader(loader, + source_filename, + encoding=None, error_handling=None): + nrmpath = os.path.normpath(source_filename) + arcname = nrmpath[len(loader.archive)+1:] + data = loader.get_data(arcname) + return io.TextIOWrapper(io.BytesIO(data), + encoding=encoding, + errors=error_handling) + + +def str_to_number(value): + # note: this expects a string as input that was accepted by the + # parser already, with an optional "-" sign in front + is_neg = False + if value[:1] == '-': + is_neg = True + value = value[1:] + if len(value) < 2: + value = int(value, 0) + elif value[0] == '0': + literal_type = value[1] # 0'o' - 0'b' - 0'x' + if literal_type in 'xX': + # hex notation ('0x1AF') + value = strip_py2_long_suffix(value) + value = int(value[2:], 16) + elif literal_type in 'oO': + # Py3 octal notation ('0o136') + value = int(value[2:], 8) + elif literal_type in 'bB': + # Py3 binary notation ('0b101') + value = int(value[2:], 2) + else: + # Py2 octal notation ('0136') + value = int(value, 8) + else: + value = int(value, 0) + return -value if is_neg else value + + +def strip_py2_long_suffix(value_str): + """ + Python 2 likes to append 'L' to stringified numbers + which in then can't process when converting them to numbers. + """ + if value_str[-1] in 'lL': + return value_str[:-1] + return value_str + + +def long_literal(value): + if isinstance(value, basestring): + value = str_to_number(value) + return not -2**31 <= value < 2**31 + + +@cached_function +def get_cython_cache_dir(): + r""" + Return the base directory containing Cython's caches. + + Priority: + + 1. CYTHON_CACHE_DIR + 2. (OS X): ~/Library/Caches/Cython + (posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined + 3. ~/.cython + + """ + if 'CYTHON_CACHE_DIR' in os.environ: + return os.environ['CYTHON_CACHE_DIR'] + + parent = None + if os.name == 'posix': + if sys.platform == 'darwin': + parent = os.path.expanduser('~/Library/Caches') + else: + # this could fallback on ~/.cache + parent = os.environ.get('XDG_CACHE_HOME') + + if parent and os.path.isdir(parent): + return os.path.join(parent, 'cython') + + # last fallback: ~/.cython + return os.path.expanduser(os.path.join('~', '.cython')) + + +@try_finally_contextmanager +def captured_fd(stream=2, encoding=None): + orig_stream = os.dup(stream) # keep copy of original stream + try: + with tempfile.TemporaryFile(mode="a+b") as temp_file: + def read_output(_output=[b'']): + if not temp_file.closed: + temp_file.seek(0) + _output[0] = temp_file.read() + return _output[0] + + os.dup2(temp_file.fileno(), stream) # replace stream by copy of pipe + def get_output(): + result = read_output() + return result.decode(encoding) if encoding else result + + yield get_output + # note: @contextlib.contextmanager requires try-finally here + os.dup2(orig_stream, stream) # restore original stream + read_output() # keep the output in case it's used after closing the context manager + finally: + os.close(orig_stream) + + +def get_encoding_candidates(): + candidates = [sys.getdefaultencoding()] + for stream in (sys.stdout, sys.stdin, sys.__stdout__, sys.__stdin__): + encoding = getattr(stream, 'encoding', None) + # encoding might be None (e.g. somebody redirects stdout): + if encoding is not None and encoding not in candidates: + candidates.append(encoding) + return candidates + + +def prepare_captured(captured): + captured_bytes = captured.strip() + if not captured_bytes: + return None + for encoding in get_encoding_candidates(): + try: + return captured_bytes.decode(encoding) + except UnicodeDecodeError: + pass + # last resort: print at least the readable ascii parts correctly. + return captured_bytes.decode('latin-1') + + +def print_captured(captured, output, header_line=None): + captured = prepare_captured(captured) + if captured: + if header_line: + output.write(header_line) + output.write(captured) + + +def print_bytes(s, header_text=None, end=b'\n', file=sys.stdout, flush=True): + if header_text: + file.write(header_text) # note: text! => file.write() instead of out.write() + file.flush() + try: + out = file.buffer # Py3 + except AttributeError: + out = file # Py2 + out.write(s) + if end: + out.write(end) + if flush: + out.flush() + + +class OrderedSet(object): + def __init__(self, elements=()): + self._list = [] + self._set = set() + self.update(elements) + + def __iter__(self): + return iter(self._list) + + def update(self, elements): + for e in elements: + self.add(e) + + def add(self, e): + if e not in self._set: + self._list.append(e) + self._set.add(e) + + def __bool__(self): + return bool(self._set) + + __nonzero__ = __bool__ + + +# Class decorator that adds a metaclass and recreates the class with it. +# Copied from 'six'. +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def raise_error_if_module_name_forbidden(full_module_name): + # it is bad idea to call the pyx-file cython.pyx, so fail early + if full_module_name == 'cython' or full_module_name.startswith('cython.'): + raise ValueError('cython is a special module, cannot be used as a module name') + + +def build_hex_version(version_string): + """ + Parse and translate public version identifier like '4.3a1' into the readable hex representation '0x040300A1' (like PY_VERSION_HEX). + + SEE: https://peps.python.org/pep-0440/#public-version-identifiers + """ + # Parse '4.12a1' into [4, 12, 0, 0xA01] + # And ignore .dev, .pre and .post segments + digits = [] + release_status = 0xF0 + for segment in re.split(r'(\D+)', version_string): + if segment in ('a', 'b', 'rc'): + release_status = {'a': 0xA0, 'b': 0xB0, 'rc': 0xC0}[segment] + digits = (digits + [0, 0])[:3] # 1.2a1 -> 1.2.0a1 + elif segment in ('.dev', '.pre', '.post'): + break # break since those are the last segments + elif segment != '.': + digits.append(int(segment)) + + digits = (digits + [0] * 3)[:4] + digits[3] += release_status + + # Then, build a single hex value, two hex digits per version part. + hexversion = 0 + for digit in digits: + hexversion = (hexversion << 8) + digit + + return '0x%08X' % hexversion + + +def write_depfile(target, source, dependencies): + src_base_dir = os.path.dirname(source) + cwd = os.getcwd() + if not src_base_dir.endswith(os.sep): + src_base_dir += os.sep + # paths below the base_dir are relative, otherwise absolute + paths = [] + for fname in dependencies: + if fname.startswith(src_base_dir): + try: + newpath = os.path.relpath(fname, cwd) + except ValueError: + # if they are on different Windows drives, absolute is fine + newpath = os.path.abspath(fname) + else: + newpath = os.path.abspath(fname) + paths.append(newpath) + + depline = os.path.relpath(target, cwd) + ": \\\n " + depline += " \\\n ".join(paths) + "\n" + + with open(target+'.dep', 'w') as outfile: + outfile.write(depline) + + +def print_version(): + print("Cython version %s" % cython_version) + # For legacy reasons, we also write the version to stderr. + # New tools should expect it in stdout, but existing ones still pipe from stderr, or from both. + if sys.stderr.isatty() or sys.stdout == sys.stderr: + return + if os.fstat(1) == os.fstat(2): + # This is somewhat unsafe since sys.stdout/err might not really be linked to streams 1/2. + # However, in most *relevant* cases, where Cython is run as an external tool, they are linked. + return + sys.stderr.write("Cython version %s\n" % cython_version) + + +def normalise_float_repr(float_str): + """ + Generate a 'normalised', simple digits string representation of a float value + to allow string comparisons. Examples: '.123', '123.456', '123.' + """ + str_value = float_str.lower().lstrip('0') + + exp = 0 + if 'E' in str_value or 'e' in str_value: + str_value, exp = str_value.split('E' if 'E' in str_value else 'e', 1) + exp = int(exp) + + if '.' in str_value: + num_int_digits = str_value.index('.') + str_value = str_value[:num_int_digits] + str_value[num_int_digits + 1:] + else: + num_int_digits = len(str_value) + exp += num_int_digits + + result = ( + str_value[:exp] + + '0' * (exp - len(str_value)) + + '.' + + '0' * -exp + + str_value[exp:] + ).rstrip('0') + + return result if result != '.' else '.0' diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..549246b8a378c4586e602884a93b317987b7101c --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/__init__.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +from .Shadow import __version__ + +# Void cython.* directives (for case insensitive operating systems). +from .Shadow import * + + +def load_ipython_extension(ip): + """Load the extension in IPython.""" + from .Build.IpythonMagic import CythonMagics # pylint: disable=cyclic-import + ip.register_magics(CythonMagics) diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/COPYING.txt b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/COPYING.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a05a211a01a5a585a1c8c3d305daab5381862c3 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/COPYING.txt @@ -0,0 +1,19 @@ +The original Pyrex code as of 2006-04 is licensed under the following +license: "Copyright stuff: Pyrex is free of restrictions. You may use, +redistribute, modify and distribute modified versions." + +------------------ + +Cython, which derives from Pyrex, is licensed under the Apache 2.0 +Software License. More precisely, all modifications and new code +made to go from Pyrex to Cython are so licensed. + +See LICENSE.txt for more details. + +------------------ + +The output of a Cython compilation is NOT considered a derivative +work of Cython. Specifically, though the compilation process may +embed snippets of varying lengths into the final output, these +snippets, as embedded in the output, do not encumber the resulting +output with any license restrictions. diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/LICENSE.txt b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..4f6f63985556c1de7fd12b3a9a5790d31615d67b --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/LICENSE.txt @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/PKG-INFO b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..31141d45075dfb14fcaa22d6a4fc64c45c2aa5c5 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/PKG-INFO @@ -0,0 +1,63 @@ +Metadata-Version: 2.1 +Name: Cython +Version: 3.0.8 +Summary: The Cython compiler for writing C extensions in the Python language. +Home-page: https://cython.org/ +Author: Robert Bradshaw, Stefan Behnel, Dag Seljebotn, Greg Ewing, et al. +Author-email: cython-devel@python.org +License: Apache-2.0 +Project-URL: Documentation, https://cython.readthedocs.io/ +Project-URL: Donate, https://cython.readthedocs.io/en/latest/src/donating.html +Project-URL: Source Code, https://github.com/cython/cython +Project-URL: Bug Tracker, https://github.com/cython/cython/issues +Project-URL: User Group, https://groups.google.com/g/cython-users +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: C +Classifier: Programming Language :: Cython +Classifier: Topic :: Software Development :: Code Generators +Classifier: Topic :: Software Development :: Compilers +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* +License-File: LICENSE.txt +License-File: COPYING.txt + +The Cython language makes writing C extensions for the Python language as +easy as Python itself. Cython is a source code translator based on Pyrex_, +but supports more cutting edge functionality and optimizations. + +The Cython language is a superset of the Python language (almost all Python +code is also valid Cython code), but Cython additionally supports optional +static typing to natively call C functions, operate with C++ classes and +declare fast C types on variables and class attributes. This allows the +compiler to generate very efficient C code from Cython code. + +This makes Cython the ideal language for writing glue code for external +C/C++ libraries, and for fast C modules that speed up the execution of +Python code. + +Note that for one-time builds, e.g. for CI/testing, on platforms that are not +covered by one of the wheel packages provided on PyPI *and* the pure Python wheel +that we provide is not used, it is substantially faster than a full source build +to install an uncompiled (slower) version of Cython with:: + + pip install Cython --install-option="--no-cython-compile" + +.. _Pyrex: https://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/RECORD b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ea0e040ba85024de129f797122edd1b2dc61ef0b --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/RECORD @@ -0,0 +1,317 @@ +cython.py,sha256=QLhiwfetr0jJ-3L4S7QJCk5xkzVIBQctXcuusbSoXyY,544 +Cython/CodeWriter.py,sha256=vYtuEUNkfFOGSsNdw1tlHAXzRHN7QDfIWiOY4sowhtc,25366 +Cython/Coverage.py,sha256=26OCppQLW6IqGMC_WcXrOTOsjMlHz4tb5P2iOiLeCWg,18900 +Cython/Debugging.py,sha256=gr-gbNyN1yPTAdGgErZXv_l0SSdwhsyNfxfhaREFssM,572 +Cython/Shadow.py,sha256=cyPvIhkFsyl7twiCESGEmaFZJwpKCyWJrSRNsqIsl2k,17886 +Cython/StringIOTree.cp39-win_amd64.pyd,sha256=S2RtnIcG2xqPa1Q_JW3ST161dpjD0kSvKoqz9ClyxDo,57344 +Cython/StringIOTree.py,sha256=Kmsz_0sgyidhzaMz2qxG9UpWgNhQyXZUiX3n6VfYBEM,5911 +Cython/TestUtils.py,sha256=sGVYUz3OtUE9Em3IkTw2KFugdyg4hsCISEYV35soFt0,15101 +Cython/Utils.cp39-win_amd64.pyd,sha256=5CQm7uoeG7JcorZX8kMalr9StgJLB5h2gLPEPrvZlk4,253440 +Cython/Utils.py,sha256=1Lre2EomD-qO78ujZ-SZ2-Yeku_rL9OV0_0mDJuoSK0,22653 +Cython/__init__.py,sha256=Mu3ZZzg-CrIPURm79JfIUn7zyIdSZf7eNzg4xcUOATQ,370 +Cython/Build/BuildExecutable.py,sha256=m6IZ7UZ_h2tNpPSHosgmaJLeLmDT59AcI5oAFfo0IKU,4959 +Cython/Build/Cythonize.py,sha256=BNiJ1SdnCEixcNnwTlFazNCA2dIhjZdOvgcC7SqaWus,10085 +Cython/Build/Dependencies.py,sha256=DzncslCetaCZHSdQgRDKwYd47Vk-Qb6lsZVaYuVM3b8,54310 +Cython/Build/Distutils.py,sha256=hxXo722D0j7cfyYCbancdFsKYAuPdfSu9nYmGu6kL6w,50 +Cython/Build/Inline.py,sha256=nK2x4k8tuTRtZz_XSz_fCo9TO_oCdNdoUDunlSVW8Xg,13460 +Cython/Build/IpythonMagic.py,sha256=PPUEuBQ1r8kAer2hPqJPGgg8ORWKQ1pYrD2JUCQEz8c,22539 +Cython/Build/__init__.py,sha256=b6Aw5K0E5t667e-D-xK745uQRAnchOLFSieqle62bIU,415 +Cython/Build/Tests/TestCyCache.py,sha256=cJhDOuMV8J8JvQI-YWtb76TdVW-LDsfHbdrL93bpNCo,4785 +Cython/Build/Tests/TestCythonizeArgsParser.py,sha256=Y8MNkwXwf3UlYJ1rRgjoLh_9ciFNPta4qOlo0N6bdw4,20828 +Cython/Build/Tests/TestDependencies.py,sha256=nBwB0aDTGrLpoWhZHp_YYbAPwBY23Wn6whKdvBxNLDs,5977 +Cython/Build/Tests/TestInline.py,sha256=XhOeBf5f3lNwsKHCtInPBMbSNvJOdkrnH9DMuNuW9sY,3599 +Cython/Build/Tests/TestIpythonMagic.py,sha256=PNKz3xDGnnL6rbfn3AyXCe6yfQuf7hdEsTaMOoMdApc,9706 +Cython/Build/Tests/TestRecythonize.py,sha256=Dw7_qzfsbfT4N7BwSn3ndG3mRgxOwKK7VXoBORPtgTM,6488 +Cython/Build/Tests/TestStripLiterals.py,sha256=5PgFteJ91xCt1DYDjn8RNKxiMwRGLY3U9iIlss5B0fA,1605 +Cython/Build/Tests/__init__.py,sha256=qTr7l4s1u10pcMfFjP9cFZGS1PKT6v2Ml_vy3dreto0,14 +Cython/Compiler/AnalysedTreeTransforms.py,sha256=vydcZ6Gn79iREYfL5uVLvVFNL4rhrGhkYZD2Cwmb4j8,3933 +Cython/Compiler/Annotate.py,sha256=z849wv5dVrfR3JsE5qcXS8dnh1LwMODuXZG27HCVMhA,14494 +Cython/Compiler/AutoDocTransforms.py,sha256=tD313LL4iaGApVGmrm307YDeBpvaiRDmPOBAwJ9YEK0,12056 +Cython/Compiler/Buffer.py,sha256=ZMU7RkdoEf5u9PNZdAQLvWYp621_XvtqBOP9_IVqxuk,30053 +Cython/Compiler/Builtin.py,sha256=Loan_cIZUleavCbinMlrWgYC6XwwAStqJw-X_Ml0enA,32711 +Cython/Compiler/CmdLine.py,sha256=0S6mqX3-qWtSnTVOeyZRF0XAbWy_oa7R0JYvID2oNr4,12777 +Cython/Compiler/Code.cp39-win_amd64.pyd,sha256=CnTvlPrhY9TpPFcOJa1KU2ugBNctg56bhgldkqAqjLU,838144 +Cython/Compiler/Code.pxd,sha256=ECP1S9TuHAbwUztwa0ghsgY8GBL7AIqNOvBlYM-EVVg,3679 +Cython/Compiler/Code.py,sha256=SpnFFEZl3gA1caUqAQE7LigrW8cpfDSMZTnVAXqwelk,106901 +Cython/Compiler/CodeGeneration.py,sha256=q3tokV8Ae6Ck6AUYNGUn2R16KRbfXTvQrI7MKJwgg58,1143 +Cython/Compiler/CythonScope.py,sha256=iIHzxpNuNUrSJvo4JxD3Lg0TSD-SGWfAtkFxEve4cl8,7044 +Cython/Compiler/Dataclass.py,sha256=Dad1R-j8ARUJajrqvmAA_NcPW5jZvfIFGE0GBdXszdA,36871 +Cython/Compiler/DebugFlags.py,sha256=5j4pt7JRK5bwWzeDN8906xJVywsNGDmOoiYhFnevbs8,644 +Cython/Compiler/Errors.py,sha256=NEGZGwRv9YHAmU8Pf5j-Nqex1Zi2GyDt9aoaxpoBbvQ,9612 +Cython/Compiler/ExprNodes.py,sha256=MoYf-8UwohrQ1fWPQl1dy4SX8rJ4gH5oOLQCTSd4MTE,615855 +Cython/Compiler/FlowControl.cp39-win_amd64.pyd,sha256=HMM3jCtS2Ke8WTIrJcyGAomqaRuQsND65sFsqPXhJfg,431104 +Cython/Compiler/FlowControl.pxd,sha256=NbvCrSZVFsPtu8YwxRaFcoRRmWFeNrtStz6Yp31HHtY,3090 +Cython/Compiler/FlowControl.py,sha256=1aak2rodOfe91EkdjShQXyvUFhnD9dd4sYlc24EYW9Y,50245 +Cython/Compiler/FusedNode.cp39-win_amd64.pyd,sha256=0_Ma46T8cLgU06zIs6xjyR3M74-gUp-f-rgQGBwAbPc,296960 +Cython/Compiler/FusedNode.py,sha256=n1goafevt2Z2Sf6KoLkoUqiZOikYK0PxdN33moawPIw,44375 +Cython/Compiler/Future.py,sha256=2Nk9GuDPE_ssf1eSJHNvScjtp-ukvqKTW7ZQOnAgQtc,645 +Cython/Compiler/Interpreter.py,sha256=5KuRATS9X3T1w7AZuTV2FtyfWvw6CIuSOqmrq0T8qE8,2178 +Cython/Compiler/Lexicon.py,sha256=zHaGbKOZa7-hVaHaRjJW7hI1UzD2raym3KiIXbFzInc,22114 +Cython/Compiler/Main.py,sha256=vu3ysm_do5-pN_dLQkUvl0wt9c07CweXecbn8sYKt80,32994 +Cython/Compiler/MemoryView.py,sha256=lGg4EANDgwegNKB_fdeAnLP_ihBCm3hDioBB3gxG-pg,31245 +Cython/Compiler/ModuleNode.py,sha256=GWglaemGjkDrzXbgGDc6uNEaMvtYhX6Ztbq9fUeHa4g,187366 +Cython/Compiler/Naming.py,sha256=kEwC2GPkRxr5qzNwJuUaDlSng-zokEJxx8NwbG_wtRc,8358 +Cython/Compiler/Nodes.py,sha256=w_Leqjb5Nft8Ki6VFPbRbzWt3yg5iEx9CmqorbUlP94,450733 +Cython/Compiler/Optimize.py,sha256=mQ4dkWOvK2_CCBsaC70AlN8EQEESljlASwCFdKcLyVE,231558 +Cython/Compiler/Options.py,sha256=M9j84WoXkgRUlZ2c8x1Wb80-NLIZXU25YIzAIdO1Q40,31445 +Cython/Compiler/ParseTreeTransforms.pxd,sha256=kaicujhdwGVYs5-zKyE_VwI0jPFBnKuZ0w6QE7GrpMY,2667 +Cython/Compiler/ParseTreeTransforms.py,sha256=n3_y0JTSonDYMANuImTPIiafjPLRDjtNVWhFAGe_ibk,175597 +Cython/Compiler/Parsing.cp39-win_amd64.pyd,sha256=7NGUC4VoVqsyQo_ntI19pgc5NH6M35v2wNGoXcWS8XA,712704 +Cython/Compiler/Parsing.pxd,sha256=jE5HG2e5ePWZDFC1JZuHZ7Mhpb1XuZe4082qSLd5JtE,9371 +Cython/Compiler/Parsing.py,sha256=QnTC5GiOsJRfR4kMNvRjtbcC2r7KUyMwUs0nzDETk5c,144034 +Cython/Compiler/Pipeline.py,sha256=kozpch-FZlAv_N40zIl7FwfArBFDikj9u7BB0ddFa_s,16050 +Cython/Compiler/PyrexTypes.py,sha256=bhaDvI9GXJTsjP6X3wqhhfEuB6T7hynmiIIH6dE3xLw,212143 +Cython/Compiler/Pythran.py,sha256=NKDNEEgsnFgDTeAXqDeVG5ZsfD-HZqb-eAt5XZKjCiw,7494 +Cython/Compiler/Scanning.cp39-win_amd64.pyd,sha256=qVVfccuGlX8xGusp-SvKdfklwaXnWjwGp3EauNs1oQc,218624 +Cython/Compiler/Scanning.pxd,sha256=QOMWvJyz2WHZcuEqo_0Ha2wVZfO7tSb8UPTPM5UmqSw,2134 +Cython/Compiler/Scanning.py,sha256=YaEMTqvp0anPtlHLXIrgTQXsq9a5UW7wA62IHjhh9HI,20696 +Cython/Compiler/StringEncoding.py,sha256=xV-gBh82qiiPWleTO_VuPGYw0_B8VrsdDa_vN-bkvWw,12120 +Cython/Compiler/Symtab.py,sha256=rhny8nidLT1-cT7pY4-7fPydiyp2_447gFuMb7HuWS8,131771 +Cython/Compiler/TreeFragment.py,sha256=h8xqculRh6bbcKsM2JTJddlRrQLZTmn1gJQML7ULuVQ,9989 +Cython/Compiler/TreePath.py,sha256=PyyMZE5r-A8kcmoSdBTKW8qcGwQ40comtaBBKQLdI6M,7937 +Cython/Compiler/TypeInference.py,sha256=1AS7dhiLkZ_YVi1RxtTfxeAiH7q4p4jJyy_X1pRiMr4,23434 +Cython/Compiler/TypeSlots.py,sha256=6C7s46CfEjYqSJNjgstZ4d7sqJ8r-Qggt1B0HuhRRWI,51541 +Cython/Compiler/UFuncs.py,sha256=J86EtQAz5YlXHL084TyocfuZubGLWgqflo93_w1DI8Q,9452 +Cython/Compiler/UtilNodes.py,sha256=cgOMMOKGZBJyTjA0EkC4GKPZBfRdxwcTL0uXqoiuEgI,12851 +Cython/Compiler/UtilityCode.py,sha256=POGz0rcCTtTLGUhyh7vYC6ORqjAEESHjE7Xxa1Y4XpA,11218 +Cython/Compiler/Version.py,sha256=VjgFLzyBKxFHNN5RPglcTAa7HEiUmT-CtzQgX6SgHrc,190 +Cython/Compiler/Visitor.cp39-win_amd64.pyd,sha256=VTcRAb3QMmpcnGCQGKrSrth52wQTOROUBnFVLzC-6Ak,233472 +Cython/Compiler/Visitor.pxd,sha256=z7aWTgkW0fLdHRLeu_F6JgsNCENPHsl-lun98REc2vQ,1869 +Cython/Compiler/Visitor.py,sha256=OXsDb45WADARHgRIFVPJJoUlJ3TcQPlCbE717nNbEQg,32498 +Cython/Compiler/__init__.py,sha256=qTr7l4s1u10pcMfFjP9cFZGS1PKT6v2Ml_vy3dreto0,14 +Cython/Compiler/Tests/TestBuffer.py,sha256=9XVjnJCET8jQP-qXQiWFgqJNDsEL4fI_t7ky09Jkkfc,4261 +Cython/Compiler/Tests/TestCmdLine.py,sha256=_eir0HqW3mdvIP87LMjT8XHTdVRf60uHZHX9bJRr3Xc,22423 +Cython/Compiler/Tests/TestFlowControl.py,sha256=Lk7Md-owaNd4W0jgUZdD3GI5koRaf7T-otHSAPUVZwg,1916 +Cython/Compiler/Tests/TestGrammar.py,sha256=6gBqrzgKg5VIRexGSvF9b5vcDFwqCmpOrWxxwa7Mr78,5331 +Cython/Compiler/Tests/TestMemView.py,sha256=gxoHsXE_GMR9i-S_PfQxr8ks87066GqFDOiqmDTLBBg,2588 +Cython/Compiler/Tests/TestParseTreeTransforms.py,sha256=e89g4S0va7PS02POWj6Kgj6GnLoIHozXE7bQJaf7rjQ,9215 +Cython/Compiler/Tests/TestScanning.py,sha256=_xkgETyhng-kWaZ2lJHQYVBfFdI1Od00GWq_Lewzkrg,4907 +Cython/Compiler/Tests/TestSignatureMatching.py,sha256=lS2zsqq7n_5lJSTE1O2cmY0Vg5x2Zsh3D6dxLvJ7pho,3415 +Cython/Compiler/Tests/TestStringEncoding.py,sha256=caK5Ba948txhW3auiKJVlAm6Pc_Ca55H3y5HNyuX4L4,2359 +Cython/Compiler/Tests/TestTreeFragment.py,sha256=eT4G2rY1QvYmm-Mcm_zOlfJ3XOhx4n06v0hCG44GePE,2229 +Cython/Compiler/Tests/TestTreePath.py,sha256=IXfif0sk56Y_jLQoWSInGZN-j4wAar0-m5PR0by-828,4285 +Cython/Compiler/Tests/TestTypes.py,sha256=P9yuXtBMa_5yYgEv7cYXoZuf1t9QYQcHbgg-2tgHLv8,3370 +Cython/Compiler/Tests/TestUtilityLoad.py,sha256=7FE2o7rklJ4kR-g3wCwioV22AlVSyBxm22wnfCJA7aw,3213 +Cython/Compiler/Tests/TestVisitor.py,sha256=AmO82RIIkx86E7BPjAx1oDM4TarT65Sjuu3q96ahdHk,2289 +Cython/Compiler/Tests/Utils.py,sha256=NP0oZ2ln0hkVpMUVwusrqA2pcg4Ded5kHXtRkKVU_mY,1101 +Cython/Compiler/Tests/__init__.py,sha256=qTr7l4s1u10pcMfFjP9cFZGS1PKT6v2Ml_vy3dreto0,14 +Cython/Debugger/Cygdb.py,sha256=NEc8lFR8ZH0vdJoUZsJjgGhuC6duTUM-g1q40M8GM6w,7091 +Cython/Debugger/DebugWriter.py,sha256=wuYsCpYmVcMhBurXkb9rfVMOR1LkKxIxEJ44TxVVn84,2576 +Cython/Debugger/__init__.py,sha256=qTr7l4s1u10pcMfFjP9cFZGS1PKT6v2Ml_vy3dreto0,14 +Cython/Debugger/libcython.py,sha256=Vom5YnM7FTZMVCnOO7mHvE3qqzzs-kmePRF_Bti7tII,47999 +Cython/Debugger/libpython.py,sha256=1kSsZ9jb4jLAHB5cUxqmRDJT_ERJf3nkRTAFaiq7xFI,96854 +Cython/Debugger/Tests/TestLibCython.py,sha256=s9wX1BRZSQKHcmV88vsASgX-wNJ8qlPPtaDaAvAy1tM,8731 +Cython/Debugger/Tests/__init__.py,sha256=qTr7l4s1u10pcMfFjP9cFZGS1PKT6v2Ml_vy3dreto0,14 +Cython/Debugger/Tests/cfuncs.c,sha256=0kVEVGhvczwZEmHF5mcoY-_D_Gjcla4bk5GDGsVp7AU,79 +Cython/Debugger/Tests/codefile,sha256=uQwkatsrp4fgWu4mYGaamnbYXetHRmHqOdlOFQ0FDr8,691 +Cython/Debugger/Tests/test_libcython_in_gdb.py,sha256=Wfcw_XuoH8u5IByAWD-JRbc6TALxcrHrwhLasJKp4Ac,18601 +Cython/Debugger/Tests/test_libpython_in_gdb.py,sha256=Cl8y8XFTXzGh6_zoC0Ns1zTENRepx7n4eFMddwJtcbU,4161 +Cython/Distutils/__init__.py,sha256=TIPzU9Hr1mcxoWCWMaSpqHtFrFvI9jPSVV3vY4e_D_0,100 +Cython/Distutils/build_ext.py,sha256=Kn1-QpsYzL_8Gp7lrhn9d2DzGUu5HFfJnjm4Md6Dq0U,5846 +Cython/Distutils/extension.py,sha256=3ti-4Jdm4WNUJFBfgJE8JArvGsCMkIRsv6U_lBLzxNQ,4763 +Cython/Distutils/old_build_ext.py,sha256=H3x8lpfCeHp5MoIBK6n8q30qUX7i8eDbwID9CyD7MH0,14182 +Cython/Includes/openmp.pxd,sha256=vbg8-WbMFB5gtpSrW5eTN2End53FgnjXb46uUDci72Q,1762 +Cython/Includes/cpython/__init__.pxd,sha256=6vIYhBuIK4CdiLayreovD96AMwCyE6N-P9gv3aaA_MM,8493 +Cython/Includes/cpython/array.pxd,sha256=7zd9RoCTb-rvaG-ZL_xaEHpNvR6CxashbUWxuLPjnNE,6532 +Cython/Includes/cpython/bool.pxd,sha256=zZuBTAAmxxi648diuEMDauxsWkv6UflqTqaeCdhrOzw,1395 +Cython/Includes/cpython/buffer.pxd,sha256=qKidu0GQaHS9ZI21s5gZyGm8Zzva5aTDfL8jNtdKWqM,4982 +Cython/Includes/cpython/bytearray.pxd,sha256=kS_Za3f5R9aAjkPDtOXtMeqMw04xyCXYcB4phK9naSM,1476 +Cython/Includes/cpython/bytes.pxd,sha256=76NmZiixsUFYB-MSM8ljgZQ8hEdxtPf0Sfo8_4BqpPE,10266 +Cython/Includes/cpython/cellobject.pxd,sha256=85vWthm3Eaz6osWFHWGir_3A_bkSr84rp0efboO00TI,1425 +Cython/Includes/cpython/ceval.pxd,sha256=6zfXLYBT0BA2_1ETW9s7zKd7HGxLUAuCfcD-ufjJrDE,244 +Cython/Includes/cpython/cobject.pxd,sha256=GSM8CF79I32Z4rDiDH-D4xR27zZNybPgX7YFR2bEVH0,1560 +Cython/Includes/cpython/codecs.pxd,sha256=dManadMa8Q4C36EhWF1Y2H2ULyKLJ81Op-ghKzJ_7kM,5205 +Cython/Includes/cpython/complex.pxd,sha256=Cwx5wieBZPZBsc3kjiuXL0VsrrGc_oy8Wsp9tWhb-6s,1879 +Cython/Includes/cpython/contextvars.pxd,sha256=AoaCZIzsOetNqH286PJRx6SKaxdroK04NnGGwOg7vog,5871 +Cython/Includes/cpython/conversion.pxd,sha256=nNpUufRfWUJRGm7UxMd1LqsZbZuFMkPXmp5b3lUNJW8,1732 +Cython/Includes/cpython/datetime.pxd,sha256=5AjG_xyOqblJLWGqjVKXhukhq-OvptSCSWR_Ou4I3w8,15859 +Cython/Includes/cpython/descr.pxd,sha256=QhWBf2vOQPM-dZuI_Y9fyUhPB8mS-A0uGegpsR_ogd4,754 +Cython/Includes/cpython/dict.pxd,sha256=RRnxrINalJ0uXR2UgpYFyOwEmLh1ZX_qnxsQG2GyrVc,8125 +Cython/Includes/cpython/exc.pxd,sha256=T4j6sLOVd3DnhTKr7_bCyVrAbCqJKQyTE-uHwLjOV0s,14093 +Cython/Includes/cpython/fileobject.pxd,sha256=r1_hRqogYsY4qbWM2Ha4zuDN91KKexrmXoU_lgHO7fA,2946 +Cython/Includes/cpython/float.pxd,sha256=oW_pjxlD3hXHgCPMOHBMDuC-Rlk2k9v5nH6QLpJMpEg,1696 +Cython/Includes/cpython/function.pxd,sha256=IQUqqRgcyUb66LCU29DHPnHtIjBi0CqVK85WEsd-80I,2736 +Cython/Includes/cpython/genobject.pxd,sha256=opzIi9hfc1BKak0GkkfbZOQyfd4aazZlKJ9vTUKckIk,1077 +Cython/Includes/cpython/getargs.pxd,sha256=FB2VRMRBu_-tJddq5z4qt6_0AMhFD9dhnfiNpYxpd4Q,787 +Cython/Includes/cpython/instance.pxd,sha256=QJhpwFewdoesdDrA6n8_WVk_bm4gzgVnlWjTEejSY5U,1010 +Cython/Includes/cpython/int.pxd,sha256=X6TQzCZUpH58YrVDlbIEUzQWIivqDi5xRoXpRTloDxA,4220 +Cython/Includes/cpython/iterator.pxd,sha256=nlXir1W8Twwr_Z8WLUdSBU9sT-gYs0sCSLShPmj284I,1355 +Cython/Includes/cpython/iterobject.pxd,sha256=eLH3Jn-3JE45Tk0qPOkg4vyAJiZ19Aft3cYrANPZIC0,1060 +Cython/Includes/cpython/list.pxd,sha256=7uIMppkZMBnamDSfXvawcqlfdp2tG__9n4oM7Vm7jLE,4188 +Cython/Includes/cpython/long.pxd,sha256=5eii-LnUDC50yj5EAtjKe-fuaE7KG_aCqPjaif66mYc,7196 +Cython/Includes/cpython/longintrepr.pxd,sha256=ntKRiWayBSycBCo7xp9ApNHndPsajVQv1vaUC0O4mno,499 +Cython/Includes/cpython/mapping.pxd,sha256=Z5LZuj_LFDx4A9p3upUZke94bvuUMZsdam95xJEUT0U,2755 +Cython/Includes/cpython/marshal.pxd,sha256=6QWSzLG9Y9KKyLwlhfRAPIthXgtdFJ2UYTHwCgSGMO4,2963 +Cython/Includes/cpython/mem.pxd,sha256=4ABFEJy2Y9mx2Kpe1JPF8iwhlRU1oTBLvw485VBXPZ0,6032 +Cython/Includes/cpython/memoryview.pxd,sha256=9hoTVibI_42w44IL2vl6nKoElSUN-14NvrMP-Rl8vsw,2554 +Cython/Includes/cpython/method.pxd,sha256=7kVK3IL7JoBUTDjoU27bGzSW33UsfBsa4LnzwRW50kg,2245 +Cython/Includes/cpython/module.pxd,sha256=QEKerLC-FO0_rmp4ElRFjukVXjCnwdu_h2i3fbnTAws,10336 +Cython/Includes/cpython/number.pxd,sha256=mpfvgeOnEaFlwh0fZVYVW-Xk63WQDrDWSxt0a2VKH-g,12187 +Cython/Includes/cpython/object.pxd,sha256=HXaZHzyPm5EXJ8TC6p6SQFA0-NcGmKLYQuHX42qw2q0,20443 +Cython/Includes/cpython/oldbuffer.pxd,sha256=kWOvn_itUYoDchAEroxsXqFWNlkvyyPOHzyZDwP6Bec,2979 +Cython/Includes/cpython/pycapsule.pxd,sha256=NivcmjqfGDZBCajreWWQEYbURBRycafpEHG6OK-P13U,5843 +Cython/Includes/cpython/pylifecycle.pxd,sha256=XysyqlFavqZdT3iAL0ZAeUyVHgz7KoJlbkCH4r_oZCU,2068 +Cython/Includes/cpython/pyport.pxd,sha256=WsGtMoWtzdu8BuyX5LwCfP3Ev1k37lAqfqGPrvk1EQY,230 +Cython/Includes/cpython/pystate.pxd,sha256=fkOJxwgMsl7U23NDptFoHxs6wkgRCHVzr1RM4OkErLY,3874 +Cython/Includes/cpython/pythread.pxd,sha256=SMHFiVCunyePliGzsw3qxwYqT40UnSbgcsgT281adOE,1999 +Cython/Includes/cpython/ref.pxd,sha256=QMDNmHwQyYAdfYOCajuWva8GbuFYotOCFH3gyR8liEM,2606 +Cython/Includes/cpython/sequence.pxd,sha256=uvdZ4gFqeWxbLSWukmOJ72lJCI04GOXEnJ6DEo_MIs4,6140 +Cython/Includes/cpython/set.pxd,sha256=J-GJ5q0WtQ5rg1p7YlSNo2S52sDGKH3AWlm6My76gJk,5502 +Cython/Includes/cpython/slice.pxd,sha256=JBO8KAj55bAZR8bmJfhNanQahMAuz41XXx6h1RQxO7Y,3181 +Cython/Includes/cpython/string.pxd,sha256=Y1PdH6xAYobU966AgyuJNF4G0tW-aGlcBJDj5bI8vQw,10138 +Cython/Includes/cpython/time.pxd,sha256=VlZhnU85rqp_OGccevReZIIRcm-w7oiAquitii4cGm0,1407 +Cython/Includes/cpython/tuple.pxd,sha256=HkEM7OFtbH-GLk6Y1o_QF8WfY50BaF-95_cA2a9Tt2U,3291 +Cython/Includes/cpython/type.pxd,sha256=n0c_-H-9naDf8ywXSHZuUQTMebPQSkGfynOgk_170d8,2120 +Cython/Includes/cpython/unicode.pxd,sha256=j2t3mWsUJtwfiv8kkuNxaQmQFSDC64ED6Gq-s35wwFA,31274 +Cython/Includes/cpython/version.pxd,sha256=ErhXAeFc75yGSMPcxSFPr1tJxdt00m0itsLMPtPWDP4,879 +Cython/Includes/cpython/weakref.pxd,sha256=gi7MCgySrWvcW_rV2zZUXWKAylBqoykSRHzBwS0gKa4,2026 +Cython/Includes/libc/__init__.pxd,sha256=qTr7l4s1u10pcMfFjP9cFZGS1PKT6v2Ml_vy3dreto0,14 +Cython/Includes/libc/complex.pxd,sha256=VfgRxKSvIP8GG8hJCsXmS1PYGjcROL7U_TqXPJRJ-GY,1259 +Cython/Includes/libc/errno.pxd,sha256=IXvn3dzp0Ou8vy8waJY55QsAT-FEhLwBQQh4UGr_9Tw,2176 +Cython/Includes/libc/float.pxd,sha256=2ULVOZskbze52RGzLh0pPUcJhkiC96Z0bwDNHcg0ktc,1009 +Cython/Includes/libc/limits.pxd,sha256=RFcJ6kRt6W-v_TwZXdsVygMfyFmxLycA2SP-NAQGY2I,649 +Cython/Includes/libc/locale.pxd,sha256=CG-MkozdWa3M8mTGpYa3uvRK_SL3zlXI9P8cvXMwhyw,1186 +Cython/Includes/libc/math.pxd,sha256=cYBGwXaKcz9MHd1JWUtweowKUIH2SZ__3kXK1oo4lJg,6790 +Cython/Includes/libc/setjmp.pxd,sha256=K9jXROXUSLe3HFkbhCHEovNmjUm536iWC_1GKzLExkI,307 +Cython/Includes/libc/signal.pxd,sha256=CCZIummR_6naQIxX30FAoKdxX5xMUOwCDoX5TpRXF8o,1243 +Cython/Includes/libc/stddef.pxd,sha256=koA_flVJ_GYt_naMynLAjGXCso5wDgDITza1JIaqANY,173 +Cython/Includes/libc/stdint.pxd,sha256=Euh2--utaCmIRbOGCvrFkAtatB3X9Vdx-pr2AOVCsuo,3554 +Cython/Includes/libc/stdio.pxd,sha256=lqnoSSBcYIU2u0vTBD8TwRcLqa7E0GIOkl_-A463Ht0,2556 +Cython/Includes/libc/stdlib.pxd,sha256=XOiWmkplEFgEip83rF-1PvfXZGWnbFl_pdAaS_wDz1E,2516 +Cython/Includes/libc/string.pxd,sha256=11ac2iMPeDVnthkNneKTyEdk4RO5NLlbWU2CfBHAkBo,2088 +Cython/Includes/libc/time.pxd,sha256=SV0rV9W5GbfzvkHbwzkVYVC8M-HFMUIvxIRsIh-eumw,1401 +Cython/Includes/libcpp/__init__.pxd,sha256=iiP2e0YRUsH-6o5S5Hz28Ex9aqB95bFavTgUIArIsJE,98 +Cython/Includes/libcpp/algorithm.pxd,sha256=HbvUntLfiMDlgyRZ4pby4L8CnAsICoFWESquZJqFvxM,24024 +Cython/Includes/libcpp/any.pxd,sha256=-1qlgN9NYtp1Pn-iLjal1fSpm_0qlJiqHLOaKCRSj6c,441 +Cython/Includes/libcpp/atomic.pxd,sha256=PVI97tAHMzgrXy52xAKSX5hFOwSQlvnOi_tmkEgn1XM,1764 +Cython/Includes/libcpp/bit.pxd,sha256=puRstKjEYW0tU7aiWiGxqodLyP5LaFtyRJDVCjVdj6E,778 +Cython/Includes/libcpp/cast.pxd,sha256=H417Gs7oTKaAGrYWkj6X2SAV_6RhjLvb93gyPJBg5G8,513 +Cython/Includes/libcpp/cmath.pxd,sha256=KpNvTbZ5pFPoRSqjff-URoz3CgGCu9ZJR7ccYLubQhA,20453 +Cython/Includes/libcpp/complex.pxd,sha256=di_LGtEdFnMLt--V3qCIMzZiHlzBqPMZfKkUSty0NIk,3113 +Cython/Includes/libcpp/deque.pxd,sha256=Tr6rUxRQiLA0YdinvwU7msBgGVh4kjJ66NsGz1-QIms,6804 +Cython/Includes/libcpp/execution.pxd,sha256=B_SBKxUCzJwtww3WxRIEmkVXUbCZHoLhYld7eUxNw8Q,530 +Cython/Includes/libcpp/forward_list.pxd,sha256=Cq1I8W6-NwvfbQaVvrMKCyQI2I0CAXwXfAMk1DwZkYQ,2492 +Cython/Includes/libcpp/functional.pxd,sha256=MGGlhbq2KgVdDPiSjbemhPtoHAaLbv--w64dDHe1A7c,748 +Cython/Includes/libcpp/iterator.pxd,sha256=3QsYr_EeLm8FIwJbDoimdbPOmqXqHtVs_7mSeQM_hOo,1546 +Cython/Includes/libcpp/limits.pxd,sha256=jzE3NUl6zjJ05AemEo_DZ5wq3agwVzirQr-fb4rQJlo,1882 +Cython/Includes/libcpp/list.pxd,sha256=qOZ1FjkrJhOR0wZy3KU_VlhQjS2qLXNeyXmiRChNBRs,4555 +Cython/Includes/libcpp/map.pxd,sha256=wT3_jyoH8yNad2hxxy33O9mgVCTelbxakL4b_JSyR_E,10733 +Cython/Includes/libcpp/memory.pxd,sha256=nm56Tx4Oi4L0q3veopuH5Ma5CySnpUpSoVHmrAOwgV4,3708 +Cython/Includes/libcpp/numbers.pxd,sha256=sb09CA2fOcVPV7HlrsRkeCXcyjy3_EbsHkXVFoB9T7A,410 +Cython/Includes/libcpp/numeric.pxd,sha256=6dXyjSsgzFT7EYjPTy4ytrnYRWuGzdEH7EpdQy_0VWM,6702 +Cython/Includes/libcpp/optional.pxd,sha256=8DctLBOpvR0JbMcB2yPi2QnUD1FrAqw_tgd0EAKi2gE,1015 +Cython/Includes/libcpp/pair.pxd,sha256=Y2DuWJDXEPYbjpq5tWCRoL3VQX3BPYTSf13LX4y74W8,28 +Cython/Includes/libcpp/queue.pxd,sha256=DNZO-oHeryd0n0lnYKn8hOodpnXoGHV8i0Yq9m686Ug,674 +Cython/Includes/libcpp/random.pxd,sha256=WuOsS7UYAvsCzE_TqrutiOby-i7_hM78l-xrnRHmwlQ,6369 +Cython/Includes/libcpp/set.pxd,sha256=72YSpfsYRtWDmc3IxrpmGDCIrQTW9dwAq8zjHYYw19M,9404 +Cython/Includes/libcpp/stack.pxd,sha256=CUEZWp3K0eEgRlNmYIEJddfU1hwPyyxHCUNRf9821eM,312 +Cython/Includes/libcpp/string.pxd,sha256=aN1nmMrv5xnV_oSFebUysvIHr1aBHviFpvNsyKwYnk0,13438 +Cython/Includes/libcpp/typeindex.pxd,sha256=nhVh7eKB8MMRwqmtuLdzEt3APgE0MzOCQF6luHLsRtg,539 +Cython/Includes/libcpp/typeinfo.pxd,sha256=hEjs5d-hZXcMJaCps7mT7yC8XJKRXK-gk19nUaGZxZI,314 +Cython/Includes/libcpp/unordered_map.pxd,sha256=xtlblgMAxtNs_p8V_OxuQXZV58UawRy6XLxyQykRZDM,8138 +Cython/Includes/libcpp/unordered_set.pxd,sha256=77p1l9dlXAn5-Jq20aG2vhcHGXI4Rh3Knr2txLZ33HI,5962 +Cython/Includes/libcpp/utility.pxd,sha256=YXAj5htNEbatmvEIO2NnF_RC3Y3cgLmhuEAE6WzZaD8,1070 +Cython/Includes/libcpp/vector.pxd,sha256=W4dh_Pp57Bx4uo8pLR6N2TFm7y0yJ0n7gibXjEdNxD4,7006 +Cython/Includes/numpy/__init__.pxd,sha256=xM7NScKrtjEI2s7tvtdiCoZvqjG-EBoFKEOmbi8jAo8,37514 +Cython/Includes/numpy/math.pxd,sha256=pNwDWvWHy8flAHY9O_sCWuj2Bh4hOjJsLah2F6-l1Rg,5940 +Cython/Includes/posix/__init__.pxd,sha256=qTr7l4s1u10pcMfFjP9cFZGS1PKT6v2Ml_vy3dreto0,14 +Cython/Includes/posix/dlfcn.pxd,sha256=-jmKUbrxB9nC0TAfyaVRARh9fbDxOZx-HnuRK95-xRc,370 +Cython/Includes/posix/fcntl.pxd,sha256=F1CRQ5Z7sWBJw6HhNUPwgZLdHhwSJAUqfl7Ip8xalp0,1783 +Cython/Includes/posix/ioctl.pxd,sha256=RzlkJVru0u1sF07_A5ZL6Erwul2OzCJDzW_23Wpjoao,103 +Cython/Includes/posix/mman.pxd,sha256=D08m5iPDGAtAjDfTfXS4tPVK1LY5a_qAFhwRSmaAyDU,3576 +Cython/Includes/posix/resource.pxd,sha256=yQdIqcZx2mZAd7wypTA-uqqnPrguUFGJcrGGtu5lvxY,1395 +Cython/Includes/posix/select.pxd,sha256=RKlotP_ZuumcmJw9Be_IuLTdtPtrjluWXTWiMf1Tvbo,640 +Cython/Includes/posix/signal.pxd,sha256=V8yD9IR-fO0ojfzGts-RyRZqCbspYk5-LDzcAso9B_U,1949 +Cython/Includes/posix/stat.pxd,sha256=vBMhGC7AnrjHyV2lj8JwPW9Y3ntOAZ1C3Pcqp8qVESI,2793 +Cython/Includes/posix/stdio.pxd,sha256=Ja5GfzWVQbu4iWybCy1pTGFzD-CVz9tfEevUPYTNSJs,1092 +Cython/Includes/posix/stdlib.pxd,sha256=mCTDQ7vJJGHeJw7qsTnkph5n_t9FRu6lQ7Ex013_DZ4,964 +Cython/Includes/posix/strings.pxd,sha256=dlQxN5qaErBGfDHY841nmrHackQceukW92eBapnM4Nw,383 +Cython/Includes/posix/time.pxd,sha256=cNxvN3-_2VgCM9vXxzmwBEKZJDN5ca7i1qJvI1zCXq0,2052 +Cython/Includes/posix/types.pxd,sha256=C2finliVEqGNeEJCNyaDbYY14U4AtOi7Djpg1WRIWIA,1192 +Cython/Includes/posix/uio.pxd,sha256=fcyEBYJUGMjYrDOmlN6l1ee49OQctATd9ZFi5BKocus,848 +Cython/Includes/posix/unistd.pxd,sha256=gEBSESCgNxXYQ-0m858AAyjRIrL3x3vX7LfkSjsJ65Y,8332 +Cython/Includes/posix/wait.pxd,sha256=VWCwCsnKG8_z-ME9QwK5y2q0Xr4s_hASr2Ivh8FxCRE,1284 +Cython/Plex/Actions.cp39-win_amd64.pyd,sha256=r46dZEd-7u3Fdtkulk17W02jRx9h2hFLEikuSD9O1B8,52224 +Cython/Plex/Actions.pxd,sha256=AX5UHQEovcLHsFsTox15QQxSxaMb8sH-xAe8i6_1U7M,607 +Cython/Plex/Actions.py,sha256=Pju0yOq--MAo3uOI6M3A0vN_KYSiG2XhCilTnRjdIew,3040 +Cython/Plex/DFA.cp39-win_amd64.pyd,sha256=Hucb3k5lOwixOuScduKpbdsSw89jYdAoSQlr0NP9isc,77824 +Cython/Plex/DFA.pxd,sha256=S55YV9creBzHLNbNeu7PcujmrFadkPAJ6TDxm-xJrUY,806 +Cython/Plex/DFA.py,sha256=H3ohMXzZKx6WxwxZXEUhErk5JQ90slj3QKPKEKwBKD4,5576 +Cython/Plex/Errors.py,sha256=n-R2119WeROkHgPfSI3cjF0HYYyPsLl-9XDlHHr5RUc,1024 +Cython/Plex/Lexicons.py,sha256=Kjp7pSSbQlBF6WLigQjrd9px5UJxdTvg6z_SBlGPc_c,6125 +Cython/Plex/Machines.cp39-win_amd64.pyd,sha256=7szRNJ_WQ0NYHjLf5ZZvGhE4sGwHYr1O0UhI7onx4m0,107520 +Cython/Plex/Machines.pxd,sha256=JTYnwgL_5eBCJP93cm0hjDk4DK1qwDCTAMjP4NI7mU4,765 +Cython/Plex/Machines.py,sha256=ntPWDMdRjDnbgqmNbI-SV5FRdVWHRWuFhTWTau-eOm0,7926 +Cython/Plex/Regexps.py,sha256=CHzEWEAy_0Hsccp5iW4wm5NaLXdiF9f3AGbwEvhIinA,15497 +Cython/Plex/Scanners.cp39-win_amd64.pyd,sha256=kAQN7tFuEk_H9t3etgjgP6TTR8Goo6-nXk877V2lQTo,77824 +Cython/Plex/Scanners.pxd,sha256=7mG6_kOA2y7uYzu9tVNlf7WayFaipmUx0G9Q76r5kEQ,1600 +Cython/Plex/Scanners.py,sha256=NLssAIMcilN1KF4SM6SnnteQtyt2Cx54PmiU9t_y8gc,13298 +Cython/Plex/Transitions.cp39-win_amd64.pyd,sha256=zMH3W398ScLal6YY2Q8LAyb0umsL0vPXHrR6jBN_tFs,84992 +Cython/Plex/Transitions.pxd,sha256=-FKBO3SUfjlCncGfid4mWEM3X-wPVxBCgVVC2ayfrJU,612 +Cython/Plex/Transitions.py,sha256=2yOB2Ub5_8qU8MklAWO20JI4v0KU0kdGg9lRRdtbECo,6995 +Cython/Plex/__init__.py,sha256=Uk6hFjDwFtgsvlvRogNM9YkMbRd1dhjngy0eQ5sT1VQ,1190 +Cython/Runtime/__init__.py,sha256=qTr7l4s1u10pcMfFjP9cFZGS1PKT6v2Ml_vy3dreto0,14 +Cython/Runtime/refnanny.cp39-win_amd64.pyd,sha256=r8cIKuVBhH5eYh1JU2LI6qMf1p_zAin3X3pIBwTS8HE,56832 +Cython/Runtime/refnanny.pyx,sha256=32nbl0LcjIAQbh4605oeisZAGjM8UUKjSHzPssFbFwU,6803 +Cython/Tempita/__init__.py,sha256=f-PIUAOGDzWIsa0jPz4hrg30WCl3PcGHiBPhRCvrwhg,156 +Cython/Tempita/_looper.py,sha256=w1J9BWt0YwNZw_IlCRGh_IGBqFSPEiMw4Af5YIqhCLE,4331 +Cython/Tempita/_tempita.cp39-win_amd64.pyd,sha256=mg234mSRavw7QgivZz05xOIBy3aTgKWNQwk8xJ92bPU,355328 +Cython/Tempita/_tempita.py,sha256=7PnDWIBZGNPBriWu5euhHxKOaC8M5Nv8vZle5-nIXj8,38741 +Cython/Tempita/compat3.py,sha256=rJ-gkuakJo43sO260R2sRNxWJIwDVX9SYZYle8dp-ys,950 +Cython/Tests/TestCodeWriter.py,sha256=o31QUXhQcYdzOkHHlAOIjiiFLiyzkqy10FNWWr6Yr38,3927 +Cython/Tests/TestCythonUtils.py,sha256=JzhWB52kgSAxXt34FAqIcy0ySQ2LMP-bolADldgM078,7030 +Cython/Tests/TestJediTyper.py,sha256=eq7KOaD6v11SKj0UVcLai99iCNovWPoYkMKZs0kuXN4,7246 +Cython/Tests/TestShadow.py,sha256=PdeabbiiK4B9bl6zayJtzjHvJDLeyQjymrAxKzIqv4M,3463 +Cython/Tests/TestStringIOTree.py,sha256=F15qCQYEqSKS8TlrIbeZ7k7cwkBGtMC1GFQoNuzpi7w,2013 +Cython/Tests/TestTestUtils.py,sha256=ql8sU20UU9RszKWwnc8V_HTUuTYGPQcMQxN5T_R2sYY,3058 +Cython/Tests/__init__.py,sha256=qTr7l4s1u10pcMfFjP9cFZGS1PKT6v2Ml_vy3dreto0,14 +Cython/Tests/xmlrunner.py,sha256=vj13NkmQVQUrJpcqzX-ThU0dM-xoiNnQZSfHHrJ3ulg,15172 +Cython/Utility/AsyncGen.c,sha256=3FXrTZLtvjItk95VWsIIVo9zEMdSFH8h4BsBliHx1VI,49245 +Cython/Utility/Buffer.c,sha256=tblFK4HEZP5_JBjKrVc_xfmDKe8jrWPGuf8glwbVB18,30843 +Cython/Utility/Builtins.c,sha256=ho79w-7VwNu4Kwfy7qJx5xjkBLEtlX7gA87Ew5e8NLI,19988 +Cython/Utility/CConvert.pyx,sha256=wUjc3CrtIB7IK1-8qfj_yrdr-gywcP0O9luJW_mwZYA,4553 +Cython/Utility/CMath.c,sha256=4TPbfACvlKPJWxj4ANZwwjL3zIL92c7n7IwL-zkLXMc,2661 +Cython/Utility/CommonStructures.c,sha256=X7iZ9_rPeQBQOurT-YokV71QtDhRTVWFKPBrKoPgjOk,4829 +Cython/Utility/Complex.c,sha256=yM6xoReXiiYEqWzg9HSERXUGtn61F4HTafMUWtXvsgk,13975 +Cython/Utility/Coroutine.c,sha256=AlYXvxO0mnsy-jI40naj_WU3wT6jYJ3QRgkN16kReVQ,101471 +Cython/Utility/CpdefEnums.pyx,sha256=WGroyla_rRtbnDd8rRRZIYWPyhJaYU2zKO5c47wJ3I4,5675 +Cython/Utility/CppConvert.pyx,sha256=zb3uW6OJmGs-jTKLVVu7WKttvN2rK4JVgOcsvbe4q88,7327 +Cython/Utility/CppSupport.cpp,sha256=PNQ21D_1FrMSklVty2Mirl2L-7ACUjoyBDvr9im1XcI,5067 +Cython/Utility/CythonFunction.c,sha256=29LlSg7lJkoht1EYJeGXYR35Ismnmv4s7aEJXvsyrGY,65841 +Cython/Utility/Dataclasses.c,sha256=2O9KyLd-4w8zOXSxWYPw1EDBCu5miC0-XgkSNurKwvM,7459 +Cython/Utility/Dataclasses.py,sha256=ueL7xNyPqaIVMXruhbIs1oyuBgNQIHpNrt39VDe9xVg,4189 +Cython/Utility/Embed.c,sha256=CrFrtsp7LnnP-RkwN1zOKiPc2QNzCUdUkDL2sMmwDeY,7691 +Cython/Utility/Exceptions.c,sha256=C6iFsYaoXmMApXxkAidMSi1C7xkUf9K1ZCXuZJd845c,40092 +Cython/Utility/ExtensionTypes.c,sha256=mUEp0FAX5SE5t8uECqOqogdoJB1aFtdj04opG53IQx0,26077 +Cython/Utility/FunctionArguments.c,sha256=HYxyvxHsxSuOxTXFCeGubARJlPDmBMlfd1zDSd6fXyg,21327 +Cython/Utility/ImportExport.c,sha256=4auKONqxWlGiL-yw8zUb7eV4XQgUfeLGHFNs9cTb-1U,30911 +Cython/Utility/MemoryView.pyx,sha256=XSnIC4xgCxT93sW6EH1YF0LqslAw07E9owwOCyaRgqM,51063 +Cython/Utility/MemoryView_C.c,sha256=i7xVGQhbSclUURYYGE1xoJqe8Twu09iHkp_vDS_BpdE,32634 +Cython/Utility/ModuleSetupCode.c,sha256=66FLHEfPhGMTzpiSMIyoe4GncsqCYD9DGtNw1Hp0WKw,87203 +Cython/Utility/NumpyImportArray.c,sha256=ylUhO1xwMcuOLwHhhJx8cAxmF88gSP6G3NVAIvMcCIA,2079 +Cython/Utility/ObjectHandling.c,sha256=CKwwuFGDSggwIhr9WeHBvbQDTC0qTR1Yqz-e_YfjYn4,119822 +Cython/Utility/Optimize.c,sha256=n0jIXkdQ3HDW27TXoVXy4S7MqaTHskU_588mmGLfri0,62645 +Cython/Utility/Overflow.c,sha256=wC-SqLF0ppqjWdGCFXO-2803aqZcyXrSZumh8IrQnbU,16278 +Cython/Utility/Printing.c,sha256=X2bUfkGmu6MObLA6w55J6cQVqsXmeG-zC6GaluyUBeI,5279 +Cython/Utility/Profile.c,sha256=-hHU8HL8O8EbCFIsb23ihQeVRoeKuP3asxeG9hN25WQ,18577 +Cython/Utility/StringTools.c,sha256=xLQ2I9hF3DcZ9LkMIWVZGmb30ps_DaiOgFDQc2JAwc8,46143 +Cython/Utility/TestCyUtilityLoader.pyx,sha256=wsPqsYGkqXrqV_sxF8NKMTFXIqKCO-O4lXbNsRcokjA,160 +Cython/Utility/TestCythonScope.pyx,sha256=jPupaW_JBl5IBTKhaJRYamNGAyd23hOq_WoKGKIb5Ts,1865 +Cython/Utility/TestUtilityLoader.c,sha256=ix8dIu4YzXADIVIfBiTiBfiG1J15RxzphNP54Bxa2Gk,291 +Cython/Utility/TypeConversion.c,sha256=hZFwZXpeq5Ho2lUxOsJV_6CMZ5MgwLpMTG83rFuBvas,50399 +Cython/Utility/UFuncs.pyx,sha256=47nPr17bqogb1KyKtVhy6ZGpcUnTQ8e20ColWXd251M,2230 +Cython/Utility/UFuncs_C.c,sha256=6kb5CwA6OkOPdgUrnVbHWa0JXjgc4UKyqYtDx9TbVs4,1561 +Cython/Utility/__init__.py,sha256=e_qkVfB5Rvy57CZHmdH5fNrghtPdNGVNSREY4meZJUw,1188 +Cython/Utility/arrayarray.h,sha256=71CEek7FG2F7_R1UqGAOuBVi3IaJt__yzIdQdWB0Bls,4238 +pyximport/__init__.py,sha256=NceVc9JV-ifFiQ_XMcyVwAs3iSCD9rgEnNCXXUOvORw,83 +pyximport/_pyximport2.py,sha256=Yat1plxcLn7ELOgfIrwWuZtlbYQ-8TyXDhAigwscUjI,24940 +pyximport/_pyximport3.py,sha256=vl_nmpaQe8vwOVJpYyZyKgNbSmdi3wEJyCdP75u-CtU,18814 +pyximport/pyxbuild.py,sha256=3SFwqu0bUZgGUBuyWpgGJ0M0pOmczkqBQ7zI8sdGdkY,5862 +pyximport/pyximport.py,sha256=XMqMN-mrPKwSbwLpwBaX4KDJDaLxb6I2vcN7qM7Phks,332 +Cython-3.0.8.dist-info/COPYING.txt,sha256=bBH9iu7VCJgyoSHsOxxkbtWZiCRTKqBoHdVcQpWl6oY,775 +Cython-3.0.8.dist-info/LICENSE.txt,sha256=BTbJRMi5EDon8TFTjjeYv71yPfqJiCpPkSC-F28EnDU,10350 +Cython-3.0.8.dist-info/METADATA,sha256=4BW1qj7MwIshKFAZTjYxSr4amgDGUm83rXnjXdzLu4M,3223 +Cython-3.0.8.dist-info/WHEEL,sha256=eep6QWEFiQfg2wcclssb_WY-D33AnLYLnEKGA9Rn-VU,100 +Cython-3.0.8.dist-info/entry_points.txt,sha256=VU8NX8gnQyFbyqiWMzfh9BHvYMuoQRS3Nbm3kKcKQeY,139 +Cython-3.0.8.dist-info/top_level.txt,sha256=jLV8tZV98iCbIfiJR4DVzTX5Ru1Y_pYMZ59wkMCe6SY,24 +Cython-3.0.8.dist-info/RECORD,, diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/WHEEL b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d5ae687e23d0112bda36fe6af4e8353bddd09a8d --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.40.0) +Root-Is-Purelib: false +Tag: cp39-cp39-win_amd64 + diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/entry_points.txt b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..f8b3914454cfa870a17cba0b8d784613731fbb3f --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/entry_points.txt @@ -0,0 +1,4 @@ +[console_scripts] +cygdb = Cython.Debugger.Cygdb:main +cython = Cython.Compiler.Main:setuptools_main +cythonize = Cython.Build.Cythonize:main diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/top_level.txt b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..48cccd7fc20547bc35dcd7d02343d52723903b63 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/EGG-INFO/top_level.txt @@ -0,0 +1,3 @@ +Cython +cython +pyximport diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/cython.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/cython.py new file mode 100644 index 0000000000000000000000000000000000000000..9283c4d9e5efbedd7adb3cfd219f84db2d0d7af2 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/cython.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +# +# Cython -- Main Program, generic +# + +if __name__ == '__main__': + + import os + import sys + + # Make sure we import the right Cython + cythonpath, _ = os.path.split(os.path.realpath(__file__)) + sys.path.insert(0, cythonpath) + + from Cython.Compiler.Main import main + main(command_line = 1) + +else: + # Void cython.* directives. + from Cython.Shadow import * + ## and bring in the __version__ + from Cython import __version__ + from Cython import load_ipython_extension diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..378c42281f903e292bd7c9cf3263849812cd09af --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/__init__.py @@ -0,0 +1,4 @@ +from .pyximport import * + +# replicate docstring +from .pyximport import __doc__ diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/_pyximport2.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/_pyximport2.py new file mode 100644 index 0000000000000000000000000000000000000000..afb360195a88798a03172a030353169aae2e5dd9 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/_pyximport2.py @@ -0,0 +1,620 @@ +""" +Import hooks; when installed with the install() function, these hooks +allow importing .pyx files as if they were Python modules. + +If you want the hook installed every time you run Python +you can add it to your Python version by adding these lines to +sitecustomize.py (which you can create from scratch in site-packages +if it doesn't exist there or somewhere else on your python path):: + + import pyximport + pyximport.install() + +For instance on the Mac with a non-system Python 2.3, you could create +sitecustomize.py with only those two lines at +/usr/local/lib/python2.3/site-packages/sitecustomize.py . + +A custom distutils.core.Extension instance and setup() args +(Distribution) for for the build can be defined by a .pyxbld +file like: + +# examplemod.pyxbld +def make_ext(modname, pyxfilename): + from distutils.extension import Extension + return Extension(name = modname, + sources=[pyxfilename, 'hello.c'], + include_dirs=['/myinclude'] ) +def make_setup_args(): + return dict(script_args=["--compiler=mingw32"]) + +Extra dependencies can be defined by a .pyxdep . +See README. + +Since Cython 0.11, the :mod:`pyximport` module also has experimental +compilation support for normal Python modules. This allows you to +automatically run Cython on every .pyx and .py module that Python +imports, including parts of the standard library and installed +packages. Cython will still fail to compile a lot of Python modules, +in which case the import mechanism will fall back to loading the +Python source modules instead. The .py import mechanism is installed +like this:: + + pyximport.install(pyimport = True) + +Running this module as a top-level script will run a test and then print +the documentation. + +This code is based on the Py2.3+ import protocol as described in PEP 302. +""" + +import glob +import imp +import os +import sys +from zipimport import zipimporter, ZipImportError + +mod_name = "pyximport" + +PYX_EXT = ".pyx" +PYXDEP_EXT = ".pyxdep" +PYXBLD_EXT = ".pyxbld" + +DEBUG_IMPORT = False + + +def _print(message, args): + if args: + message = message % args + print(message) + + +def _debug(message, *args): + if DEBUG_IMPORT: + _print(message, args) + + +def _info(message, *args): + _print(message, args) + + +# Performance problem: for every PYX file that is imported, we will +# invoke the whole distutils infrastructure even if the module is +# already built. It might be more efficient to only do it when the +# mod time of the .pyx is newer than the mod time of the .so but +# the question is how to get distutils to tell me the name of the .so +# before it builds it. Maybe it is easy...but maybe the performance +# issue isn't real. +def _load_pyrex(name, filename): + "Load a pyrex file given a name and filename." + + +def get_distutils_extension(modname, pyxfilename, language_level=None): +# try: +# import hashlib +# except ImportError: +# import md5 as hashlib +# extra = "_" + hashlib.md5(open(pyxfilename).read()).hexdigest() +# modname = modname + extra + extension_mod,setup_args = handle_special_build(modname, pyxfilename) + if not extension_mod: + if not isinstance(pyxfilename, str): + # distutils is stupid in Py2 and requires exactly 'str' + # => encode accidentally coerced unicode strings back to str + pyxfilename = pyxfilename.encode(sys.getfilesystemencoding()) + from distutils.extension import Extension + extension_mod = Extension(name = modname, sources=[pyxfilename]) + if language_level is not None: + extension_mod.cython_directives = {'language_level': language_level} + return extension_mod,setup_args + + +def handle_special_build(modname, pyxfilename): + special_build = os.path.splitext(pyxfilename)[0] + PYXBLD_EXT + ext = None + setup_args={} + if os.path.exists(special_build): + # globls = {} + # locs = {} + # execfile(special_build, globls, locs) + # ext = locs["make_ext"](modname, pyxfilename) + with open(special_build) as fid: + mod = imp.load_source("XXXX", special_build, fid) + make_ext = getattr(mod,'make_ext',None) + if make_ext: + ext = make_ext(modname, pyxfilename) + assert ext and ext.sources, "make_ext in %s did not return Extension" % special_build + make_setup_args = getattr(mod, 'make_setup_args',None) + if make_setup_args: + setup_args = make_setup_args() + assert isinstance(setup_args,dict), ("make_setup_args in %s did not return a dict" + % special_build) + assert set or setup_args, ("neither make_ext nor make_setup_args %s" + % special_build) + ext.sources = [os.path.join(os.path.dirname(special_build), source) + for source in ext.sources] + return ext, setup_args + + +def handle_dependencies(pyxfilename): + testing = '_test_files' in globals() + dependfile = os.path.splitext(pyxfilename)[0] + PYXDEP_EXT + + # by default let distutils decide whether to rebuild on its own + # (it has a better idea of what the output file will be) + + # but we know more about dependencies so force a rebuild if + # some of the dependencies are newer than the pyxfile. + if os.path.exists(dependfile): + with open(dependfile) as fid: + depends = fid.readlines() + depends = [depend.strip() for depend in depends] + + # gather dependencies in the "files" variable + # the dependency file is itself a dependency + files = [dependfile] + for depend in depends: + fullpath = os.path.join(os.path.dirname(dependfile), + depend) + files.extend(glob.glob(fullpath)) + + # only for unit testing to see we did the right thing + if testing: + _test_files[:] = [] #$pycheck_no + + # if any file that the pyxfile depends upon is newer than + # the pyx file, 'touch' the pyx file so that distutils will + # be tricked into rebuilding it. + for file in files: + from distutils.dep_util import newer + if newer(file, pyxfilename): + _debug("Rebuilding %s because of %s", pyxfilename, file) + filetime = os.path.getmtime(file) + os.utime(pyxfilename, (filetime, filetime)) + if testing: + _test_files.append(file) + + +def build_module(name, pyxfilename, pyxbuild_dir=None, inplace=False, language_level=None): + assert os.path.exists(pyxfilename), "Path does not exist: %s" % pyxfilename + handle_dependencies(pyxfilename) + + extension_mod, setup_args = get_distutils_extension(name, pyxfilename, language_level) + build_in_temp = pyxargs.build_in_temp + sargs = pyxargs.setup_args.copy() + sargs.update(setup_args) + build_in_temp = sargs.pop('build_in_temp',build_in_temp) + + from . import pyxbuild + olddir = os.getcwd() + common = '' + if pyxbuild_dir: + # Windows concatenates the pyxbuild_dir to the pyxfilename when + # compiling, and then complains that the filename is too long + common = os.path.commonprefix([pyxbuild_dir, pyxfilename]) + if len(common) > 30: + pyxfilename = os.path.relpath(pyxfilename) + pyxbuild_dir = os.path.relpath(pyxbuild_dir) + os.chdir(common) + try: + so_path = pyxbuild.pyx_to_dll(pyxfilename, extension_mod, + build_in_temp=build_in_temp, + pyxbuild_dir=pyxbuild_dir, + setup_args=sargs, + inplace=inplace, + reload_support=pyxargs.reload_support) + finally: + os.chdir(olddir) + so_path = os.path.join(common, so_path) + assert os.path.exists(so_path), "Cannot find: %s" % so_path + + junkpath = os.path.join(os.path.dirname(so_path), name+"_*") #very dangerous with --inplace ? yes, indeed, trying to eat my files ;) + junkstuff = glob.glob(junkpath) + for path in junkstuff: + if path != so_path: + try: + os.remove(path) + except IOError: + _info("Couldn't remove %s", path) + + return so_path + + +def load_module(name, pyxfilename, pyxbuild_dir=None, is_package=False, + build_inplace=False, language_level=None, so_path=None): + try: + if so_path is None: + if is_package: + module_name = name + '.__init__' + else: + module_name = name + so_path = build_module(module_name, pyxfilename, pyxbuild_dir, + inplace=build_inplace, language_level=language_level) + mod = imp.load_dynamic(name, so_path) + if is_package and not hasattr(mod, '__path__'): + mod.__path__ = [os.path.dirname(so_path)] + assert mod.__file__ == so_path, (mod.__file__, so_path) + except Exception as failure_exc: + _debug("Failed to load extension module: %r" % failure_exc) + if pyxargs.load_py_module_on_import_failure and pyxfilename.endswith('.py'): + # try to fall back to normal import + mod = imp.load_source(name, pyxfilename) + assert mod.__file__ in (pyxfilename, pyxfilename+'c', pyxfilename+'o'), (mod.__file__, pyxfilename) + else: + tb = sys.exc_info()[2] + import traceback + exc = ImportError("Building module %s failed: %s" % ( + name, traceback.format_exception_only(*sys.exc_info()[:2]))) + if sys.version_info[0] >= 3: + raise exc.with_traceback(tb) + else: + exec("raise exc, None, tb", {'exc': exc, 'tb': tb}) + return mod + + +# import hooks + +class PyxImporter(object): + """A meta-path importer for .pyx files. + """ + def __init__(self, extension=PYX_EXT, pyxbuild_dir=None, inplace=False, + language_level=None): + self.extension = extension + self.pyxbuild_dir = pyxbuild_dir + self.inplace = inplace + self.language_level = language_level + + def find_module(self, fullname, package_path=None): + if fullname in sys.modules and not pyxargs.reload_support: + return None # only here when reload() + + # package_path might be a _NamespacePath. Convert that into a list... + if package_path is not None and not isinstance(package_path, list): + package_path = list(package_path) + try: + fp, pathname, (ext,mode,ty) = imp.find_module(fullname,package_path) + if fp: fp.close() # Python should offer a Default-Loader to avoid this double find/open! + if pathname and ty == imp.PKG_DIRECTORY: + pkg_file = os.path.join(pathname, '__init__'+self.extension) + if os.path.isfile(pkg_file): + return PyxLoader(fullname, pathname, + init_path=pkg_file, + pyxbuild_dir=self.pyxbuild_dir, + inplace=self.inplace, + language_level=self.language_level) + if pathname and pathname.endswith(self.extension): + return PyxLoader(fullname, pathname, + pyxbuild_dir=self.pyxbuild_dir, + inplace=self.inplace, + language_level=self.language_level) + if ty != imp.C_EXTENSION: # only when an extension, check if we have a .pyx next! + return None + + # find .pyx fast, when .so/.pyd exist --inplace + pyxpath = os.path.splitext(pathname)[0]+self.extension + if os.path.isfile(pyxpath): + return PyxLoader(fullname, pyxpath, + pyxbuild_dir=self.pyxbuild_dir, + inplace=self.inplace, + language_level=self.language_level) + + # .so/.pyd's on PATH should not be remote from .pyx's + # think no need to implement PyxArgs.importer_search_remote here? + + except ImportError: + pass + + # searching sys.path ... + + #if DEBUG_IMPORT: print "SEARCHING", fullname, package_path + + mod_parts = fullname.split('.') + module_name = mod_parts[-1] + pyx_module_name = module_name + self.extension + + # this may work, but it returns the file content, not its path + #import pkgutil + #pyx_source = pkgutil.get_data(package, pyx_module_name) + + paths = package_path or sys.path + for path in paths: + pyx_data = None + if not path: + path = os.getcwd() + elif os.path.isfile(path): + try: + zi = zipimporter(path) + pyx_data = zi.get_data(pyx_module_name) + except (ZipImportError, IOError, OSError): + continue # Module not found. + # unzip the imported file into the build dir + # FIXME: can interfere with later imports if build dir is in sys.path and comes before zip file + path = self.pyxbuild_dir + elif not os.path.isabs(path): + path = os.path.abspath(path) + + pyx_module_path = os.path.join(path, pyx_module_name) + if pyx_data is not None: + if not os.path.exists(path): + try: + os.makedirs(path) + except OSError: + # concurrency issue? + if not os.path.exists(path): + raise + with open(pyx_module_path, "wb") as f: + f.write(pyx_data) + elif not os.path.isfile(pyx_module_path): + continue # Module not found. + + return PyxLoader(fullname, pyx_module_path, + pyxbuild_dir=self.pyxbuild_dir, + inplace=self.inplace, + language_level=self.language_level) + + # not found, normal package, not a .pyx file, none of our business + _debug("%s not found" % fullname) + return None + + +class PyImporter(PyxImporter): + """A meta-path importer for normal .py files. + """ + def __init__(self, pyxbuild_dir=None, inplace=False, language_level=None): + if language_level is None: + language_level = sys.version_info[0] + self.super = super(PyImporter, self) + self.super.__init__(extension='.py', pyxbuild_dir=pyxbuild_dir, inplace=inplace, + language_level=language_level) + self.uncompilable_modules = {} + self.blocked_modules = ['Cython', 'pyxbuild', 'pyximport.pyxbuild', + 'distutils'] + self.blocked_packages = ['Cython.', 'distutils.'] + + def find_module(self, fullname, package_path=None): + if fullname in sys.modules: + return None + if any([fullname.startswith(pkg) for pkg in self.blocked_packages]): + return None + if fullname in self.blocked_modules: + # prevent infinite recursion + return None + if _lib_loader.knows(fullname): + return _lib_loader + _debug("trying import of module '%s'", fullname) + if fullname in self.uncompilable_modules: + path, last_modified = self.uncompilable_modules[fullname] + try: + new_last_modified = os.stat(path).st_mtime + if new_last_modified > last_modified: + # import would fail again + return None + except OSError: + # module is no longer where we found it, retry the import + pass + + self.blocked_modules.append(fullname) + try: + importer = self.super.find_module(fullname, package_path) + if importer is not None: + if importer.init_path: + path = importer.init_path + real_name = fullname + '.__init__' + else: + path = importer.path + real_name = fullname + _debug("importer found path %s for module %s", path, real_name) + try: + so_path = build_module( + real_name, path, + pyxbuild_dir=self.pyxbuild_dir, + language_level=self.language_level, + inplace=self.inplace) + _lib_loader.add_lib(fullname, path, so_path, + is_package=bool(importer.init_path)) + return _lib_loader + except Exception: + if DEBUG_IMPORT: + import traceback + traceback.print_exc() + # build failed, not a compilable Python module + try: + last_modified = os.stat(path).st_mtime + except OSError: + last_modified = 0 + self.uncompilable_modules[fullname] = (path, last_modified) + importer = None + finally: + self.blocked_modules.pop() + return importer + + +class LibLoader(object): + def __init__(self): + self._libs = {} + + def load_module(self, fullname): + try: + source_path, so_path, is_package = self._libs[fullname] + except KeyError: + raise ValueError("invalid module %s" % fullname) + _debug("Loading shared library module '%s' from %s", fullname, so_path) + return load_module(fullname, source_path, so_path=so_path, is_package=is_package) + + def add_lib(self, fullname, path, so_path, is_package): + self._libs[fullname] = (path, so_path, is_package) + + def knows(self, fullname): + return fullname in self._libs + +_lib_loader = LibLoader() + + +class PyxLoader(object): + def __init__(self, fullname, path, init_path=None, pyxbuild_dir=None, + inplace=False, language_level=None): + _debug("PyxLoader created for loading %s from %s (init path: %s)", + fullname, path, init_path) + self.fullname = fullname + self.path, self.init_path = path, init_path + self.pyxbuild_dir = pyxbuild_dir + self.inplace = inplace + self.language_level = language_level + + def load_module(self, fullname): + assert self.fullname == fullname, ( + "invalid module, expected %s, got %s" % ( + self.fullname, fullname)) + if self.init_path: + # package + #print "PACKAGE", fullname + module = load_module(fullname, self.init_path, + self.pyxbuild_dir, is_package=True, + build_inplace=self.inplace, + language_level=self.language_level) + module.__path__ = [self.path] + else: + #print "MODULE", fullname + module = load_module(fullname, self.path, + self.pyxbuild_dir, + build_inplace=self.inplace, + language_level=self.language_level) + return module + + +#install args +class PyxArgs(object): + build_dir=True + build_in_temp=True + setup_args={} #None + +##pyxargs=None + + +def _have_importers(): + has_py_importer = False + has_pyx_importer = False + for importer in sys.meta_path: + if isinstance(importer, PyxImporter): + if isinstance(importer, PyImporter): + has_py_importer = True + else: + has_pyx_importer = True + + return has_py_importer, has_pyx_importer + + +def install(pyximport=True, pyimport=False, build_dir=None, build_in_temp=True, + setup_args=None, reload_support=False, + load_py_module_on_import_failure=False, inplace=False, + language_level=None): + """ Main entry point for pyxinstall. + + Call this to install the ``.pyx`` import hook in + your meta-path for a single Python process. If you want it to be + installed whenever you use Python, add it to your ``sitecustomize`` + (as described above). + + :param pyximport: If set to False, does not try to import ``.pyx`` files. + + :param pyimport: You can pass ``pyimport=True`` to also + install the ``.py`` import hook + in your meta-path. Note, however, that it is rather experimental, + will not work at all for some ``.py`` files and packages, and will + heavily slow down your imports due to search and compilation. + Use at your own risk. + + :param build_dir: By default, compiled modules will end up in a ``.pyxbld`` + directory in the user's home directory. Passing a different path + as ``build_dir`` will override this. + + :param build_in_temp: If ``False``, will produce the C files locally. Working + with complex dependencies and debugging becomes more easy. This + can principally interfere with existing files of the same name. + + :param setup_args: Dict of arguments for Distribution. + See ``distutils.core.setup()``. + + :param reload_support: Enables support for dynamic + ``reload(my_module)``, e.g. after a change in the Cython code. + Additional files ``.reloadNN`` may arise on that account, when + the previously loaded module file cannot be overwritten. + + :param load_py_module_on_import_failure: If the compilation of a ``.py`` + file succeeds, but the subsequent import fails for some reason, + retry the import with the normal ``.py`` module instead of the + compiled module. Note that this may lead to unpredictable results + for modules that change the system state during their import, as + the second import will rerun these modifications in whatever state + the system was left after the import of the compiled module + failed. + + :param inplace: Install the compiled module + (``.so`` for Linux and Mac / ``.pyd`` for Windows) + next to the source file. + + :param language_level: The source language level to use: 2 or 3. + The default is to use the language level of the current Python + runtime for .py files and Py2 for ``.pyx`` files. + """ + if setup_args is None: + setup_args = {} + if not build_dir: + build_dir = os.path.join(os.path.expanduser('~'), '.pyxbld') + + global pyxargs + pyxargs = PyxArgs() #$pycheck_no + pyxargs.build_dir = build_dir + pyxargs.build_in_temp = build_in_temp + pyxargs.setup_args = (setup_args or {}).copy() + pyxargs.reload_support = reload_support + pyxargs.load_py_module_on_import_failure = load_py_module_on_import_failure + + has_py_importer, has_pyx_importer = _have_importers() + py_importer, pyx_importer = None, None + + if pyimport and not has_py_importer: + py_importer = PyImporter(pyxbuild_dir=build_dir, inplace=inplace, + language_level=language_level) + # make sure we import Cython before we install the import hook + import Cython.Compiler.Main, Cython.Compiler.Pipeline, Cython.Compiler.Optimize + sys.meta_path.insert(0, py_importer) + + if pyximport and not has_pyx_importer: + pyx_importer = PyxImporter(pyxbuild_dir=build_dir, inplace=inplace, + language_level=language_level) + sys.meta_path.append(pyx_importer) + + return py_importer, pyx_importer + + +def uninstall(py_importer, pyx_importer): + """ + Uninstall an import hook. + """ + try: + sys.meta_path.remove(py_importer) + except ValueError: + pass + + try: + sys.meta_path.remove(pyx_importer) + except ValueError: + pass + + +# MAIN + +def show_docs(): + import __main__ + __main__.__name__ = mod_name + for name in dir(__main__): + item = getattr(__main__, name) + try: + setattr(item, "__module__", mod_name) + except (AttributeError, TypeError): + pass + help(__main__) + + +if __name__ == '__main__': + show_docs() diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/_pyximport3.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/_pyximport3.py new file mode 100644 index 0000000000000000000000000000000000000000..629325933ec8c9b78985d57a50b822ee99577248 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/_pyximport3.py @@ -0,0 +1,478 @@ +""" +Import hooks; when installed with the install() function, these hooks +allow importing .pyx files as if they were Python modules. + +If you want the hook installed every time you run Python +you can add it to your Python version by adding these lines to +sitecustomize.py (which you can create from scratch in site-packages +if it doesn't exist there or somewhere else on your python path):: + + import pyximport + pyximport.install() + +For instance on the Mac with a non-system Python 2.3, you could create +sitecustomize.py with only those two lines at +/usr/local/lib/python2.3/site-packages/sitecustomize.py . + +A custom distutils.core.Extension instance and setup() args +(Distribution) for for the build can be defined by a .pyxbld +file like: + +# examplemod.pyxbld +def make_ext(modname, pyxfilename): + from distutils.extension import Extension + return Extension(name = modname, + sources=[pyxfilename, 'hello.c'], + include_dirs=['/myinclude'] ) +def make_setup_args(): + return dict(script_args=["--compiler=mingw32"]) + +Extra dependencies can be defined by a .pyxdep . +See README. + +Since Cython 0.11, the :mod:`pyximport` module also has experimental +compilation support for normal Python modules. This allows you to +automatically run Cython on every .pyx and .py module that Python +imports, including parts of the standard library and installed +packages. Cython will still fail to compile a lot of Python modules, +in which case the import mechanism will fall back to loading the +Python source modules instead. The .py import mechanism is installed +like this:: + + pyximport.install(pyimport = True) + +Running this module as a top-level script will run a test and then print +the documentation. +""" + +import glob +import importlib +import os +import sys +from importlib.abc import MetaPathFinder +from importlib.machinery import ExtensionFileLoader, SourceFileLoader +from importlib.util import spec_from_file_location + +mod_name = "pyximport" + +PY_EXT = ".py" +PYX_EXT = ".pyx" +PYXDEP_EXT = ".pyxdep" +PYXBLD_EXT = ".pyxbld" + +DEBUG_IMPORT = False + + +def _print(message, args): + if args: + message = message % args + print(message) + + +def _debug(message, *args): + if DEBUG_IMPORT: + _print(message, args) + + +def _info(message, *args): + _print(message, args) + + +def load_source(file_path): + import importlib.util + from importlib.machinery import SourceFileLoader + spec = importlib.util.spec_from_file_location("XXXX", file_path, loader=SourceFileLoader("XXXX", file_path)) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def get_distutils_extension(modname, pyxfilename, language_level=None): +# try: +# import hashlib +# except ImportError: +# import md5 as hashlib +# extra = "_" + hashlib.md5(open(pyxfilename).read()).hexdigest() +# modname = modname + extra + extension_mod,setup_args = handle_special_build(modname, pyxfilename) + if not extension_mod: + if not isinstance(pyxfilename, str): + # distutils is stupid in Py2 and requires exactly 'str' + # => encode accidentally coerced unicode strings back to str + pyxfilename = pyxfilename.encode(sys.getfilesystemencoding()) + from distutils.extension import Extension + extension_mod = Extension(name = modname, sources=[pyxfilename]) + if language_level is not None: + extension_mod.cython_directives = {'language_level': language_level} + return extension_mod,setup_args + + +def handle_special_build(modname, pyxfilename): + special_build = os.path.splitext(pyxfilename)[0] + PYXBLD_EXT + ext = None + setup_args={} + if os.path.exists(special_build): + # globls = {} + # locs = {} + # execfile(special_build, globls, locs) + # ext = locs["make_ext"](modname, pyxfilename) + mod = load_source(special_build) + make_ext = getattr(mod,'make_ext',None) + if make_ext: + ext = make_ext(modname, pyxfilename) + assert ext and ext.sources, "make_ext in %s did not return Extension" % special_build + make_setup_args = getattr(mod, 'make_setup_args',None) + if make_setup_args: + setup_args = make_setup_args() + assert isinstance(setup_args,dict), ("make_setup_args in %s did not return a dict" + % special_build) + assert set or setup_args, ("neither make_ext nor make_setup_args %s" + % special_build) + ext.sources = [os.path.join(os.path.dirname(special_build), source) + for source in ext.sources] + return ext, setup_args + + +def handle_dependencies(pyxfilename): + testing = '_test_files' in globals() + dependfile = os.path.splitext(pyxfilename)[0] + PYXDEP_EXT + + # by default let distutils decide whether to rebuild on its own + # (it has a better idea of what the output file will be) + + # but we know more about dependencies so force a rebuild if + # some of the dependencies are newer than the pyxfile. + if os.path.exists(dependfile): + with open(dependfile) as fid: + depends = fid.readlines() + depends = [depend.strip() for depend in depends] + + # gather dependencies in the "files" variable + # the dependency file is itself a dependency + files = [dependfile] + for depend in depends: + fullpath = os.path.join(os.path.dirname(dependfile), + depend) + files.extend(glob.glob(fullpath)) + + # only for unit testing to see we did the right thing + if testing: + _test_files[:] = [] #$pycheck_no + + # if any file that the pyxfile depends upon is newer than + # the pyx file, 'touch' the pyx file so that distutils will + # be tricked into rebuilding it. + for file in files: + from distutils.dep_util import newer + if newer(file, pyxfilename): + _debug("Rebuilding %s because of %s", pyxfilename, file) + filetime = os.path.getmtime(file) + os.utime(pyxfilename, (filetime, filetime)) + if testing: + _test_files.append(file) + + +def build_module(name, pyxfilename, pyxbuild_dir=None, inplace=False, language_level=None): + assert os.path.exists(pyxfilename), "Path does not exist: %s" % pyxfilename + handle_dependencies(pyxfilename) + + extension_mod, setup_args = get_distutils_extension(name, pyxfilename, language_level) + build_in_temp = pyxargs.build_in_temp + sargs = pyxargs.setup_args.copy() + sargs.update(setup_args) + build_in_temp = sargs.pop('build_in_temp',build_in_temp) + + from . import pyxbuild + olddir = os.getcwd() + common = '' + if pyxbuild_dir: + # Windows concatenates the pyxbuild_dir to the pyxfilename when + # compiling, and then complains that the filename is too long + common = os.path.commonprefix([pyxbuild_dir, pyxfilename]) + if len(common) > 30: + pyxfilename = os.path.relpath(pyxfilename) + pyxbuild_dir = os.path.relpath(pyxbuild_dir) + os.chdir(common) + try: + so_path = pyxbuild.pyx_to_dll(pyxfilename, extension_mod, + build_in_temp=build_in_temp, + pyxbuild_dir=pyxbuild_dir, + setup_args=sargs, + inplace=inplace, + reload_support=pyxargs.reload_support) + finally: + os.chdir(olddir) + so_path = os.path.join(common, so_path) + assert os.path.exists(so_path), "Cannot find: %s" % so_path + + junkpath = os.path.join(os.path.dirname(so_path), name+"_*") #very dangerous with --inplace ? yes, indeed, trying to eat my files ;) + junkstuff = glob.glob(junkpath) + for path in junkstuff: + if path != so_path: + try: + os.remove(path) + except IOError: + _info("Couldn't remove %s", path) + + return so_path + + +# import hooks + +class PyxImportMetaFinder(MetaPathFinder): + + def __init__(self, extension=PYX_EXT, pyxbuild_dir=None, inplace=False, language_level=None): + self.pyxbuild_dir = pyxbuild_dir + self.inplace = inplace + self.language_level = language_level + self.extension = extension + + def find_spec(self, fullname, path, target=None): + if not path: + path = [os.getcwd()] # top level import -- + if "." in fullname: + *parents, name = fullname.split(".") + else: + name = fullname + for entry in path: + if os.path.isdir(os.path.join(entry, name)): + # this module has child modules + filename = os.path.join(entry, name, "__init__" + self.extension) + submodule_locations = [os.path.join(entry, name)] + else: + filename = os.path.join(entry, name + self.extension) + submodule_locations = None + if not os.path.exists(filename): + continue + + return spec_from_file_location( + fullname, filename, + loader=PyxImportLoader(filename, self.pyxbuild_dir, self.inplace, self.language_level), + submodule_search_locations=submodule_locations) + + return None # we don't know how to import this + + +class PyImportMetaFinder(MetaPathFinder): + + def __init__(self, extension=PY_EXT, pyxbuild_dir=None, inplace=False, language_level=None): + self.pyxbuild_dir = pyxbuild_dir + self.inplace = inplace + self.language_level = language_level + self.extension = extension + self.uncompilable_modules = {} + self.blocked_modules = ['Cython', 'pyxbuild', 'pyximport.pyxbuild', + 'distutils', 'cython'] + self.blocked_packages = ['Cython.', 'distutils.'] + + def find_spec(self, fullname, path, target=None): + if fullname in sys.modules: + return None + if any([fullname.startswith(pkg) for pkg in self.blocked_packages]): + return None + if fullname in self.blocked_modules: + # prevent infinite recursion + return None + + self.blocked_modules.append(fullname) + name = fullname + if not path: + path = [os.getcwd()] # top level import -- + try: + for entry in path: + if os.path.isdir(os.path.join(entry, name)): + # this module has child modules + filename = os.path.join(entry, name, "__init__" + self.extension) + submodule_locations = [os.path.join(entry, name)] + else: + filename = os.path.join(entry, name + self.extension) + submodule_locations = None + if not os.path.exists(filename): + continue + + return spec_from_file_location( + fullname, filename, + loader=PyxImportLoader(filename, self.pyxbuild_dir, self.inplace, self.language_level), + submodule_search_locations=submodule_locations) + finally: + self.blocked_modules.pop() + + return None # we don't know how to import this + + +class PyxImportLoader(ExtensionFileLoader): + + def __init__(self, filename, pyxbuild_dir, inplace, language_level): + module_name = os.path.splitext(os.path.basename(filename))[0] + super().__init__(module_name, filename) + self._pyxbuild_dir = pyxbuild_dir + self._inplace = inplace + self._language_level = language_level + + def create_module(self, spec): + try: + so_path = build_module(spec.name, pyxfilename=spec.origin, pyxbuild_dir=self._pyxbuild_dir, + inplace=self._inplace, language_level=self._language_level) + self.path = so_path + spec.origin = so_path + return super().create_module(spec) + except Exception as failure_exc: + _debug("Failed to load extension module: %r" % failure_exc) + if pyxargs.load_py_module_on_import_failure and spec.origin.endswith(PY_EXT): + spec = importlib.util.spec_from_file_location(spec.name, spec.origin, + loader=SourceFileLoader(spec.name, spec.origin)) + mod = importlib.util.module_from_spec(spec) + assert mod.__file__ in (spec.origin, spec.origin + 'c', spec.origin + 'o'), (mod.__file__, spec.origin) + return mod + else: + tb = sys.exc_info()[2] + import traceback + exc = ImportError("Building module %s failed: %s" % ( + spec.name, traceback.format_exception_only(*sys.exc_info()[:2]))) + raise exc.with_traceback(tb) + + def exec_module(self, module): + try: + return super().exec_module(module) + except Exception as failure_exc: + import traceback + _debug("Failed to load extension module: %r" % failure_exc) + raise ImportError("Executing module %s failed %s" % ( + module.__file__, traceback.format_exception_only(*sys.exc_info()[:2]))) + + +#install args +class PyxArgs(object): + build_dir=True + build_in_temp=True + setup_args={} #None + + +def _have_importers(): + has_py_importer = False + has_pyx_importer = False + for importer in sys.meta_path: + if isinstance(importer, PyxImportMetaFinder): + if isinstance(importer, PyImportMetaFinder): + has_py_importer = True + else: + has_pyx_importer = True + + return has_py_importer, has_pyx_importer + + +def install(pyximport=True, pyimport=False, build_dir=None, build_in_temp=True, + setup_args=None, reload_support=False, + load_py_module_on_import_failure=False, inplace=False, + language_level=None): + """ Main entry point for pyxinstall. + + Call this to install the ``.pyx`` import hook in + your meta-path for a single Python process. If you want it to be + installed whenever you use Python, add it to your ``sitecustomize`` + (as described above). + + :param pyximport: If set to False, does not try to import ``.pyx`` files. + + :param pyimport: You can pass ``pyimport=True`` to also + install the ``.py`` import hook + in your meta-path. Note, however, that it is rather experimental, + will not work at all for some ``.py`` files and packages, and will + heavily slow down your imports due to search and compilation. + Use at your own risk. + + :param build_dir: By default, compiled modules will end up in a ``.pyxbld`` + directory in the user's home directory. Passing a different path + as ``build_dir`` will override this. + + :param build_in_temp: If ``False``, will produce the C files locally. Working + with complex dependencies and debugging becomes more easy. This + can principally interfere with existing files of the same name. + + :param setup_args: Dict of arguments for Distribution. + See ``distutils.core.setup()``. + + :param reload_support: Enables support for dynamic + ``reload(my_module)``, e.g. after a change in the Cython code. + Additional files ``.reloadNN`` may arise on that account, when + the previously loaded module file cannot be overwritten. + + :param load_py_module_on_import_failure: If the compilation of a ``.py`` + file succeeds, but the subsequent import fails for some reason, + retry the import with the normal ``.py`` module instead of the + compiled module. Note that this may lead to unpredictable results + for modules that change the system state during their import, as + the second import will rerun these modifications in whatever state + the system was left after the import of the compiled module + failed. + + :param inplace: Install the compiled module + (``.so`` for Linux and Mac / ``.pyd`` for Windows) + next to the source file. + + :param language_level: The source language level to use: 2 or 3. + The default is to use the language level of the current Python + runtime for .py files and Py2 for ``.pyx`` files. + """ + if setup_args is None: + setup_args = {} + if not build_dir: + build_dir = os.path.join(os.path.expanduser('~'), '.pyxbld') + + global pyxargs + pyxargs = PyxArgs() #$pycheck_no + pyxargs.build_dir = build_dir + pyxargs.build_in_temp = build_in_temp + pyxargs.setup_args = (setup_args or {}).copy() + pyxargs.reload_support = reload_support + pyxargs.load_py_module_on_import_failure = load_py_module_on_import_failure + + has_py_importer, has_pyx_importer = _have_importers() + py_importer, pyx_importer = None, None + + if pyimport and not has_py_importer: + py_importer = PyImportMetaFinder(pyxbuild_dir=build_dir, inplace=inplace, + language_level=language_level) + # make sure we import Cython before we install the import hook + import Cython.Compiler.Main, Cython.Compiler.Pipeline, Cython.Compiler.Optimize + sys.meta_path.insert(0, py_importer) + + if pyximport and not has_pyx_importer: + pyx_importer = PyxImportMetaFinder(pyxbuild_dir=build_dir, inplace=inplace, + language_level=language_level) + sys.meta_path.append(pyx_importer) + + return py_importer, pyx_importer + + +def uninstall(py_importer, pyx_importer): + """ + Uninstall an import hook. + """ + try: + sys.meta_path.remove(py_importer) + except ValueError: + pass + + try: + sys.meta_path.remove(pyx_importer) + except ValueError: + pass + + +# MAIN + +def show_docs(): + import __main__ + __main__.__name__ = mod_name + for name in dir(__main__): + item = getattr(__main__, name) + try: + setattr(item, "__module__", mod_name) + except (AttributeError, TypeError): + pass + help(__main__) + + +if __name__ == '__main__': + show_docs() diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/pyxbuild.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/pyxbuild.py new file mode 100644 index 0000000000000000000000000000000000000000..61f9747eec4ea549db9970500f6199e8aaa4e7a3 --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/pyxbuild.py @@ -0,0 +1,160 @@ +"""Build a Pyrex file from .pyx source to .so loadable module using +the installed distutils infrastructure. Call: + +out_fname = pyx_to_dll("foo.pyx") +""" +import os +import sys + +from distutils.errors import DistutilsArgError, DistutilsError, CCompilerError +from distutils.extension import Extension +from distutils.util import grok_environment_error +try: + from Cython.Distutils.build_ext import build_ext + HAS_CYTHON = True +except ImportError: + HAS_CYTHON = False + +DEBUG = 0 + +_reloads={} + + +def pyx_to_dll(filename, ext=None, force_rebuild=0, build_in_temp=False, pyxbuild_dir=None, + setup_args=None, reload_support=False, inplace=False): + """Compile a PYX file to a DLL and return the name of the generated .so + or .dll .""" + assert os.path.exists(filename), "Could not find %s" % os.path.abspath(filename) + + path, name = os.path.split(os.path.abspath(filename)) + + if not ext: + modname, extension = os.path.splitext(name) + assert extension in (".pyx", ".py"), extension + if not HAS_CYTHON: + filename = filename[:-len(extension)] + '.c' + ext = Extension(name=modname, sources=[filename]) + + if setup_args is None: + setup_args = {} + if not pyxbuild_dir: + pyxbuild_dir = os.path.join(path, "_pyxbld") + + package_base_dir = path + for package_name in ext.name.split('.')[-2::-1]: + package_base_dir, pname = os.path.split(package_base_dir) + if pname != package_name: + # something is wrong - package path doesn't match file path + package_base_dir = None + break + + script_args=setup_args.get("script_args",[]) + if DEBUG or "--verbose" in script_args: + quiet = "--verbose" + else: + quiet = "--quiet" + if build_in_temp: + args = [quiet, "build_ext", '--cython-c-in-temp'] + else: + args = [quiet, "build_ext"] + if force_rebuild: + args.append("--force") + if inplace and package_base_dir: + args.extend(['--build-lib', package_base_dir]) + if ext.name == '__init__' or ext.name.endswith('.__init__'): + # package => provide __path__ early + if not hasattr(ext, 'cython_directives'): + ext.cython_directives = {'set_initial_path' : 'SOURCEFILE'} + elif 'set_initial_path' not in ext.cython_directives: + ext.cython_directives['set_initial_path'] = 'SOURCEFILE' + + sargs = setup_args.copy() + sargs.update({ + "script_name": None, + "script_args": args + script_args, + }) + # late import, in case setuptools replaced it + from distutils.dist import Distribution + dist = Distribution(sargs) + if not dist.ext_modules: + dist.ext_modules = [] + dist.ext_modules.append(ext) + if HAS_CYTHON: + dist.cmdclass = {'build_ext': build_ext} + build = dist.get_command_obj('build') + build.build_base = pyxbuild_dir + + cfgfiles = dist.find_config_files() + dist.parse_config_files(cfgfiles) + + try: + ok = dist.parse_command_line() + except DistutilsArgError: + raise + + if DEBUG: + print("options (after parsing command line):") + dist.dump_option_dicts() + assert ok + + + try: + obj_build_ext = dist.get_command_obj("build_ext") + dist.run_commands() + so_path = obj_build_ext.get_outputs()[0] + if obj_build_ext.inplace: + # Python distutils get_outputs()[ returns a wrong so_path + # when --inplace ; see https://bugs.python.org/issue5977 + # workaround: + so_path = os.path.join(os.path.dirname(filename), + os.path.basename(so_path)) + if reload_support: + org_path = so_path + timestamp = os.path.getmtime(org_path) + global _reloads + last_timestamp, last_path, count = _reloads.get(org_path, (None,None,0) ) + if last_timestamp == timestamp: + so_path = last_path + else: + basename = os.path.basename(org_path) + while count < 100: + count += 1 + r_path = os.path.join(obj_build_ext.build_lib, + basename + '.reload%s' % count) + try: + import shutil # late import / reload_support is: debugging + try: + # Try to unlink first --- if the .so file + # is mmapped by another process, + # overwriting its contents corrupts the + # loaded image (on Linux) and crashes the + # other process. On Windows, unlinking an + # open file just fails. + if os.path.isfile(r_path): + os.unlink(r_path) + except OSError: + continue + shutil.copy2(org_path, r_path) + so_path = r_path + except IOError: + continue + break + else: + # used up all 100 slots + raise ImportError("reload count for %s reached maximum" % org_path) + _reloads[org_path]=(timestamp, so_path, count) + return so_path + except KeyboardInterrupt: + sys.exit(1) + except (IOError, os.error): + exc = sys.exc_info()[1] + error = grok_environment_error(exc) + + if DEBUG: + sys.stderr.write(error + "\n") + raise + + +if __name__=="__main__": + pyx_to_dll("dummy.pyx") + from . import test diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/pyximport.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/pyximport.py new file mode 100644 index 0000000000000000000000000000000000000000..9d575815afb9dc59834f058b1861dcfa38834ada --- /dev/null +++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/pyximport/pyximport.py @@ -0,0 +1,11 @@ +from __future__ import absolute_import +import sys + +if sys.version_info < (3, 5): + # _pyximport3 module requires at least Python 3.5 + from pyximport._pyximport2 import install, uninstall, show_docs +else: + from pyximport._pyximport3 import install, uninstall, show_docs + +if __name__ == '__main__': + show_docs() diff --git a/.eggs/README.txt b/.eggs/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..5d01668824f45c3a6683e12d1b9048bb1d273041 --- /dev/null +++ b/.eggs/README.txt @@ -0,0 +1,6 @@ +This directory contains eggs that were downloaded by setuptools to build, test, and run plug-ins. + +This directory caches those eggs to prevent repeated downloads. + +However, it is safe to delete this directory. + diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..49e648b1c6eb5313cd6d5d09d2e4999611098ae1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +assets/anime_landscapes.png filter=lfs diff=lfs merge=lfs -text +assets/teaser.png filter=lfs diff=lfs merge=lfs -text diff --git a/MODEL_ZOO.md b/MODEL_ZOO.md new file mode 100644 index 0000000000000000000000000000000000000000..5299083ecd45e1b2749eaba0eda851445a9d17f3 --- /dev/null +++ b/MODEL_ZOO.md @@ -0,0 +1,13 @@ +## DDColor Model Zoo + +| Model | Description | Note | +| ---------------------- | :------------------ | :-----| +| [ddcolor_paper.pth](https://huggingface.co/piddnad/DDColor-models/resolve/main/ddcolor_paper.pth) | DDColor-L trained on ImageNet | paper model, use it only if you want to reproduce some of the images in the paper. +| [ddcolor_modelscope.pth](https://huggingface.co/piddnad/DDColor-models/resolve/main/ddcolor_modelscope.pth) (***default***) | DDColor-L trained on ImageNet | We trained this model using the same data cleaning scheme as [BigColor](https://github.com/KIMGEONUNG/BigColor/issues/2#issuecomment-1196287574), so it can get the best qualitative results with little degrading FID performance. Use this model by default if you want to test images outside the ImageNet. It can also be easily downloaded through ModelScope [in this way](README.md#inference-with-modelscope-library). +| [ddcolor_artistic.pth](https://huggingface.co/piddnad/DDColor-models/resolve/main/ddcolor_artistic.pth) | DDColor-L trained on ImageNet + private data | We trained this model with an extended dataset containing many high-quality artistic images. Also, we didn't use colorfulness loss during training, so there may be fewer unreasonable color artifacts. Use this model if you want to try different colorization results. +| [ddcolor_paper_tiny.pth](https://huggingface.co/piddnad/DDColor-models/resolve/main/ddcolor_paper_tiny.pth) | DDColor-T trained on ImageNet | The most lightweight version of ddcolor model, using the same training scheme as ddcolor_paper. + +## Discussions + +* About Colorfulness Loss (CL): CL can encourage more "colorful" results and help improve CF scores, however, it sometimes leads to the generation of unpleasant color blocks (eg. red color artifacts). If something goes wrong, I personally recommend trying to remove it during training. + diff --git a/README.md b/README.md index 6f82571ada6900bec4611c098cbfd490301f2c28..2481ccd2ab816c07fc5fad7a035d6934b19a9f48 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,199 @@ ---- -title: DDColor -emoji: 🏢 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 4.19.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +# 🎨 DDColor + +Official PyTorch implementation of ICCV 2023 Paper "DDColor: Towards Photo-Realistic Image Colorization via Dual Decoders". + + +[![arXiv](https://img.shields.io/badge/arXiv-2212.11613-b31b1b.svg)](https://arxiv.org/abs/2212.11613) +[![HuggingFace](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Models-FF8000)](https://huggingface.co/piddnad/DDColor-models) +[![ModelScope demo](https://img.shields.io/badge/%F0%9F%91%BE%20ModelScope-Demo-8A2BE2)](https://www.modelscope.cn/models/damo/cv_ddcolor_image-colorization/summary) +[![Replicate](https://replicate.com/piddnad/ddcolor/badge)](https://replicate.com/piddnad/ddcolor) +![visitors](https://visitor-badge.laobi.icu/badge?page_id=piddnad/DDColor) + + +> Xiaoyang Kang, Tao Yang, Wenqi Ouyang, Peiran Ren, Lingzhi Li, Xuansong Xie +> +> *DAMO Academy, Alibaba Group* + +🪄 DDColor can provide vivid and natural colorization for historical black and white old photos. + +

+ +

+ +🎲 It can even colorize/recolor landscapes from anime games, transforming your animated scenery into a realistic real-life style! (Image source: Genshin Impact) + +

+ +

+ + +## 🔥 News + +* [2024-01-28] Support inferencing via Hugging Face! Thanks @[Niels](https://github.com/NielsRogge) for the suggestion and example code and @[Skwara](https://github.com/Skwarson96) for fixing bug. + +* [2024-01-18] Add Replicate demo and API! Thanks @[Chenxi](https://github.com/chenxwh). + +* [2023-12-13] Release the DDColor-tiny pre-trained model! + +* [2023-09-07] Add the Model Zoo and release three pretrained models! + +* [2023-05-15] Code release for training and inference! + +* [2023-05-05] The online demo is available! + +## Online Demo + +We provide online demos through ModelScope at [![ModelScope demo](https://img.shields.io/badge/%F0%9F%91%BE%20ModelScope-Demo-8A2BE2)](https://www.modelscope.cn/models/damo/cv_ddcolor_image-colorization/summary) and Replicate at [![Replicate](https://replicate.com/piddnad/ddcolor/badge)](https://replicate.com/piddnad/ddcolor) . + +Feel free to try them out! + +## Methods + +*In short:* DDColor uses multi-scale visual features to optimize **learnable color tokens** (i.e. color queries) and achieves state-of-the-art performance on automatic image colorization. + +

+ +

+ + +## Installation + +### Requirements + +- Python >= 3.7 +- PyTorch >= 1.7 + +### Install with conda (Recommend) + +``` +conda create -n ddcolor python=3.8 +conda activate ddcolor +pip install -r requirements.txt + +python3 setup.py develop # install basicsr +``` + +## Quick Start + +### Inference with Modelscope library + +1. Install modelscope: + +``` +pip install "modelscope[cv]" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html +``` + +2. Run the following codes: + +``` +import cv2 +from modelscope.outputs import OutputKeys +from modelscope.pipelines import pipeline +from modelscope.utils.constant import Tasks + +img_colorization = pipeline(Tasks.image_colorization, model='damo/cv_ddcolor_image-colorization') +result = img_colorization('https://modelscope.oss-cn-beijing.aliyuncs.com/test/images/audrey_hepburn.jpg') +cv2.imwrite('result.png', result[OutputKeys.OUTPUT_IMG]) +``` + +It will automatically download the DDColor models. + +You can find the model file `pytorch_model.pt` in the local path ~/.cache/modelscope/hub/damo. + +### Inference from local script + +1. Download the pretrained model file by simply running: + +``` +from modelscope.hub.snapshot_download import snapshot_download + +model_dir = snapshot_download('damo/cv_ddcolor_image-colorization', cache_dir='./modelscope') +print('model assets saved to %s'%model_dir) +``` + +then the weights will be `modelscope/damo/cv_ddcolor_image-colorization/pytorch_model.pt`. + +Or, download the model from [Hugging Face](https://huggingface.co/piddnad/DDColor-models). + +2. Run +``` +sh scripts/inference.sh +``` + +### Inference with Hugging Face + +Now we can load model via Huggingface Hub like this: + +``` +from inference.colorization_pipeline_hf import DDColorHF + +ddcolor_paper_tiny = DDColorHF.from_pretrained("piddnad/ddcolor_paper_tiny") +ddcolor_paper = DDColorHF.from_pretrained("piddnad/ddcolor_paper") +ddcolor_modelscope = DDColorHF.from_pretrained("piddnad/ddcolor_modelscope") +ddcolor_artistic = DDColorHF.from_pretrained("piddnad/ddcolor_artistic") +``` + +Check `inference/colorization_pipeline_hf.py` for the details of the inference, or directly perform model inference by simply running: + +``` +python inference/colorization_pipeline_hf.py --model_name ddcolor_modelscope --input ./assets/test_images +# model_name: [ddcolor_paper | ddcolor_modelscope | ddcolor_artistic | ddcolor_paper_tiny] +``` + +### Gradio Demo + +1. Install the gradio and other required libraries + +```python +!pip install gradio gradio_imageslider timm -q +``` + +2. Run the demo + +```python +python gradio_app.py +``` + +## Model Zoo + +We provide several different versions of pretrained models, please check out [Model Zoo](MODEL_ZOO.md). + + +## Train + +1. Dataset preparation: download [ImageNet](https://www.image-net.org/) dataset, or prepare any custom dataset of your own. Use the following script to get the dataset list file: + +``` +python data_list/get_meta_file.py +``` + +2. Download pretrained weights for [ConvNeXt](https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth) and [InceptionV3](https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth) and put it into `pretrain` folder. + +3. Specify 'meta_info_file' and other options in `options/train/train_ddcolor.yml`. + +4. Run + +``` +sh scripts/train.sh +``` + +## Citation + +If our work is helpful for your research, please consider citing: + +``` +@inproceedings{kang2023ddcolor, + title={DDColor: Towards Photo-Realistic Image Colorization via Dual Decoders}, + author={Kang, Xiaoyang and Yang, Tao and Ouyang, Wenqi and Ren, Peiran and Li, Lingzhi and Xie, Xuansong}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={328--338}, + year={2023} +} +``` + +## Acknowledgments +We thank the authors of BasicSR for the awesome training pipeline. + +> Xintao Wang, Ke Yu, Kelvin C.K. Chan, Chao Dong and Chen Change Loy. BasicSR: Open Source Image and Video Restoration Toolbox. https://github.com/xinntao/BasicSR, 2020. + +Some codes are adapted from [ColorFormer](https://github.com/jixiaozhong/ColorFormer), [BigColor](https://github.com/KIMGEONUNG/BigColor), [ConvNeXt](https://github.com/facebookresearch/ConvNeXt), [Mask2Former](https://github.com/facebookresearch/Mask2Former), and [DETR](https://github.com/facebookresearch/detr). Thanks for their excellent work! diff --git a/VERSION b/VERSION new file mode 100644 index 0000000000000000000000000000000000000000..ac2cb1ce5b8e6fc84799d048f9b8fd6fe5118e2b --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.3.4.6 diff --git a/assets/anime_landscapes.png b/assets/anime_landscapes.png new file mode 100644 index 0000000000000000000000000000000000000000..7fdeb0ee9899ef8b0fb81d9eee946602af8402f1 --- /dev/null +++ b/assets/anime_landscapes.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:592ca355853bba3f5bb331af08f76190ecdece9bbb04ff9d9fc75db2b6debc1a +size 8221293 diff --git a/assets/network_arch.jpg b/assets/network_arch.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0489fa7e2d9207d67c6c2da2389ffd4f9901259b Binary files /dev/null and b/assets/network_arch.jpg differ diff --git a/assets/teaser.png b/assets/teaser.png new file mode 100644 index 0000000000000000000000000000000000000000..33a2cb9e42f62abc62ece37f32e7f6419acab7d7 --- /dev/null +++ b/assets/teaser.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bf224bd3bbfe1da643569fb408259b39fa8d9cdd0231bfddcca48d96a53b1e3 +size 3593411 diff --git a/assets/test_images/Abandoned Boy Holding a Stuffed Toy Animal. London 1945.jpg b/assets/test_images/Abandoned Boy Holding a Stuffed Toy Animal. London 1945.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b46537215f52201d49017f6dc9ca74098ea1ece1 Binary files /dev/null and b/assets/test_images/Abandoned Boy Holding a Stuffed Toy Animal. London 1945.jpg differ diff --git a/assets/test_images/Acrobats Balance On Top Of The Empire State Building, 1934.jpg b/assets/test_images/Acrobats Balance On Top Of The Empire State Building, 1934.jpg new file mode 100644 index 0000000000000000000000000000000000000000..068d79f975e2c5447a239d40a74f06328af22766 Binary files /dev/null and b/assets/test_images/Acrobats Balance On Top Of The Empire State Building, 1934.jpg differ diff --git a/assets/test_images/Ansel Adams _ Moore Photography.jpeg b/assets/test_images/Ansel Adams _ Moore Photography.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..391530607356890a009710d3d5bafb5c30d8d13d Binary files /dev/null and b/assets/test_images/Ansel Adams _ Moore Photography.jpeg differ diff --git a/assets/test_images/Audrey Hepburn.jpg b/assets/test_images/Audrey Hepburn.jpg new file mode 100644 index 0000000000000000000000000000000000000000..974bf311a67ba0f6a0f1ce6f2b2fb690fb2f050a Binary files /dev/null and b/assets/test_images/Audrey Hepburn.jpg differ diff --git a/assets/test_images/Broadway at the United States Hotel Saratoga Springs, N.Y. ca 1900-1915.jpg b/assets/test_images/Broadway at the United States Hotel Saratoga Springs, N.Y. ca 1900-1915.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a266757744274e968d0523049c3ded3d7d8ac14a Binary files /dev/null and b/assets/test_images/Broadway at the United States Hotel Saratoga Springs, N.Y. ca 1900-1915.jpg differ diff --git a/assets/test_images/Buffalo Bank Buffalo, New York, circa 1908. Erie County Savings Bank, Niagara Street.jpg b/assets/test_images/Buffalo Bank Buffalo, New York, circa 1908. Erie County Savings Bank, Niagara Street.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fcb26166876ceba67b3a56d2af5fa6db8f24b8cc Binary files /dev/null and b/assets/test_images/Buffalo Bank Buffalo, New York, circa 1908. Erie County Savings Bank, Niagara Street.jpg differ diff --git a/assets/test_images/Detroit circa 1915.jpg b/assets/test_images/Detroit circa 1915.jpg new file mode 100644 index 0000000000000000000000000000000000000000..72b9c8d42b64a6bb62d204ceda8ff4857decc0a5 Binary files /dev/null and b/assets/test_images/Detroit circa 1915.jpg differ diff --git a/assets/test_images/Einstein, Rejection, and Crafting a Future.jpeg b/assets/test_images/Einstein, Rejection, and Crafting a Future.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..b1a06c20b9893dbf87f3f2e139aeda3e3fc8906c Binary files /dev/null and b/assets/test_images/Einstein, Rejection, and Crafting a Future.jpeg differ diff --git a/assets/test_images/February 1936. Nipomo, Calif. Destitute pea pickers living in tent in migrant camp. Mother of seven children. Age 32.jpg b/assets/test_images/February 1936. Nipomo, Calif. Destitute pea pickers living in tent in migrant camp. Mother of seven children. Age 32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a240cbc29ff90c0aceb44ee5e825bfd72fcbe792 Binary files /dev/null and b/assets/test_images/February 1936. Nipomo, Calif. Destitute pea pickers living in tent in migrant camp. Mother of seven children. Age 32.jpg differ diff --git a/assets/test_images/Helen Keller meeting Charlie Chaplin in 1919.jpg b/assets/test_images/Helen Keller meeting Charlie Chaplin in 1919.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1365ed16840df58a79885dd13e6fcde0b664145a Binary files /dev/null and b/assets/test_images/Helen Keller meeting Charlie Chaplin in 1919.jpg differ diff --git a/assets/test_images/Louis Armstrong practicing in his dressing room, ca 1946.jpg b/assets/test_images/Louis Armstrong practicing in his dressing room, ca 1946.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8722a8bc311bf22e691ed50bd698aa804567eccc Binary files /dev/null and b/assets/test_images/Louis Armstrong practicing in his dressing room, ca 1946.jpg differ diff --git a/assets/test_images/New York Riverfront December 15, 1931.jpg b/assets/test_images/New York Riverfront December 15, 1931.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18640a4546a85159b35a4ca69c0f4e25c03eede3 Binary files /dev/null and b/assets/test_images/New York Riverfront December 15, 1931.jpg differ diff --git a/assets/test_images/colorized-historical-photos-vintage-photography-39.jpg b/assets/test_images/colorized-historical-photos-vintage-photography-39.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea558d52e7ed2fcb80f3c610e9c452f324a51c92 Binary files /dev/null and b/assets/test_images/colorized-historical-photos-vintage-photography-39.jpg differ diff --git a/basicsr.egg-info/PKG-INFO b/basicsr.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..38a959be238324dda18ad4fbd1d7e92543af2b8c --- /dev/null +++ b/basicsr.egg-info/PKG-INFO @@ -0,0 +1,231 @@ +Metadata-Version: 2.1 +Name: basicsr +Version: 1.3.4.6 +Summary: Open Source Image and Video Super-Resolution Toolbox +Home-page: https://github.com/xinntao/BasicSR +Author: Xintao Wang +Author-email: xintao.wang@outlook.com +License: Apache License 2.0 +Keywords: computer vision,restoration,super resolution +Classifier: Development Status :: 4 - Beta +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Description-Content-Type: text/markdown +Requires-Dist: dlib==19.24.2 +Requires-Dist: lmdb==1.4.1 +Requires-Dist: numpy==1.24.3 +Requires-Dist: opencv_python==4.7.0.72 +Requires-Dist: Pillow==10.1.0 +Requires-Dist: PyYAML==6.0.1 +Requires-Dist: Requests==2.31.0 +Requires-Dist: scipy==1.9.1 +Requires-Dist: timm==0.9.2 +Requires-Dist: torch==2.2.0 +Requires-Dist: torchvision==0.17.0 +Requires-Dist: tqdm==4.65.0 +Requires-Dist: wandb==0.15.5 +Requires-Dist: scikit-image==0.22.0 +Requires-Dist: tensorboard==2.15.1 + +# 🎨 DDColor + +Official PyTorch implementation of ICCV 2023 Paper "DDColor: Towards Photo-Realistic Image Colorization via Dual Decoders". + + +[![arXiv](https://img.shields.io/badge/arXiv-2212.11613-b31b1b.svg)](https://arxiv.org/abs/2212.11613) +[![HuggingFace](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Models-FF8000)](https://huggingface.co/piddnad/DDColor-models) +[![ModelScope demo](https://img.shields.io/badge/%F0%9F%91%BE%20ModelScope-Demo-8A2BE2)](https://www.modelscope.cn/models/damo/cv_ddcolor_image-colorization/summary) +[![Replicate](https://replicate.com/piddnad/ddcolor/badge)](https://replicate.com/piddnad/ddcolor) +![visitors](https://visitor-badge.laobi.icu/badge?page_id=piddnad/DDColor) + + +> Xiaoyang Kang, Tao Yang, Wenqi Ouyang, Peiran Ren, Lingzhi Li, Xuansong Xie +> +> *DAMO Academy, Alibaba Group* + +🪄 DDColor can provide vivid and natural colorization for historical black and white old photos. + +

+ +

+ +🎲 It can even colorize/recolor landscapes from anime games, transforming your animated scenery into a realistic real-life style! (Image source: Genshin Impact) + +

+ +

+ + +## 🔥 News + +* [2024-01-28] Support inferencing via Hugging Face! Thanks @[Niels](https://github.com/NielsRogge) for the suggestion and example code and @[Skwara](https://github.com/Skwarson96) for fixing bug. + +* [2024-01-18] Add Replicate demo and API! Thanks @[Chenxi](https://github.com/chenxwh). + +* [2023-12-13] Release the DDColor-tiny pre-trained model! + +* [2023-09-07] Add the Model Zoo and release three pretrained models! + +* [2023-05-15] Code release for training and inference! + +* [2023-05-05] The online demo is available! + +## Online Demo + +We provide online demos through ModelScope at [![ModelScope demo](https://img.shields.io/badge/%F0%9F%91%BE%20ModelScope-Demo-8A2BE2)](https://www.modelscope.cn/models/damo/cv_ddcolor_image-colorization/summary) and Replicate at [![Replicate](https://replicate.com/piddnad/ddcolor/badge)](https://replicate.com/piddnad/ddcolor) . + +Feel free to try them out! + +## Methods + +*In short:* DDColor uses multi-scale visual features to optimize **learnable color tokens** (i.e. color queries) and achieves state-of-the-art performance on automatic image colorization. + +

+ +

+ + +## Installation + +### Requirements + +- Python >= 3.7 +- PyTorch >= 1.7 + +### Install with conda (Recommend) + +``` +conda create -n ddcolor python=3.8 +conda activate ddcolor +pip install -r requirements.txt + +python3 setup.py develop # install basicsr +``` + +## Quick Start + +### Inference with Modelscope library + +1. Install modelscope: + +``` +pip install "modelscope[cv]" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html +``` + +2. Run the following codes: + +``` +import cv2 +from modelscope.outputs import OutputKeys +from modelscope.pipelines import pipeline +from modelscope.utils.constant import Tasks + +img_colorization = pipeline(Tasks.image_colorization, model='damo/cv_ddcolor_image-colorization') +result = img_colorization('https://modelscope.oss-cn-beijing.aliyuncs.com/test/images/audrey_hepburn.jpg') +cv2.imwrite('result.png', result[OutputKeys.OUTPUT_IMG]) +``` + +It will automatically download the DDColor models. + +You can find the model file `pytorch_model.pt` in the local path ~/.cache/modelscope/hub/damo. + +### Inference from local script + +1. Download the pretrained model file by simply running: + +``` +from modelscope.hub.snapshot_download import snapshot_download + +model_dir = snapshot_download('damo/cv_ddcolor_image-colorization', cache_dir='./modelscope') +print('model assets saved to %s'%model_dir) +``` + +then the weights will be `modelscope/damo/cv_ddcolor_image-colorization/pytorch_model.pt`. + +Or, download the model from [Hugging Face](https://huggingface.co/piddnad/DDColor-models). + +2. Run +``` +sh scripts/inference.sh +``` + +### Inference with Hugging Face + +Now we can load model via Huggingface Hub like this: + +``` +from inference.colorization_pipeline_hf import DDColorHF + +ddcolor_paper_tiny = DDColorHF.from_pretrained("piddnad/ddcolor_paper_tiny") +ddcolor_paper = DDColorHF.from_pretrained("piddnad/ddcolor_paper") +ddcolor_modelscope = DDColorHF.from_pretrained("piddnad/ddcolor_modelscope") +ddcolor_artistic = DDColorHF.from_pretrained("piddnad/ddcolor_artistic") +``` + +Check `inference/colorization_pipeline_hf.py` for the details of the inference, or directly perform model inference by simply running: + +``` +python inference/colorization_pipeline_hf.py --model_name ddcolor_modelscope --input ./assets/test_images +# model_name: [ddcolor_paper | ddcolor_modelscope | ddcolor_artistic | ddcolor_paper_tiny] +``` + +### Gradio Demo + +1. Install the gradio and other required libraries + +```python +!pip install gradio gradio_imageslider timm -q +``` + +2. Run the demo + +```python +python gradio_app.py +``` + +## Model Zoo + +We provide several different versions of pretrained models, please check out [Model Zoo](MODEL_ZOO.md). + + +## Train + +1. Dataset preparation: download [ImageNet](https://www.image-net.org/) dataset, or prepare any custom dataset of your own. Use the following script to get the dataset list file: + +``` +python data_list/get_meta_file.py +``` + +2. Download pretrained weights for [ConvNeXt](https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth) and [InceptionV3](https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth) and put it into `pretrain` folder. + +3. Specify 'meta_info_file' and other options in `options/train/train_ddcolor.yml`. + +4. Run + +``` +sh scripts/train.sh +``` + +## Citation + +If our work is helpful for your research, please consider citing: + +``` +@inproceedings{kang2023ddcolor, + title={DDColor: Towards Photo-Realistic Image Colorization via Dual Decoders}, + author={Kang, Xiaoyang and Yang, Tao and Ouyang, Wenqi and Ren, Peiran and Li, Lingzhi and Xie, Xuansong}, + booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, + pages={328--338}, + year={2023} +} +``` + +## Acknowledgments +We thank the authors of BasicSR for the awesome training pipeline. + +> Xintao Wang, Ke Yu, Kelvin C.K. Chan, Chao Dong and Chen Change Loy. BasicSR: Open Source Image and Video Restoration Toolbox. https://github.com/xinntao/BasicSR, 2020. + +Some codes are adapted from [ColorFormer](https://github.com/jixiaozhong/ColorFormer), [BigColor](https://github.com/KIMGEONUNG/BigColor), [ConvNeXt](https://github.com/facebookresearch/ConvNeXt), [Mask2Former](https://github.com/facebookresearch/Mask2Former), and [DETR](https://github.com/facebookresearch/detr). Thanks for their excellent work! diff --git a/basicsr.egg-info/SOURCES.txt b/basicsr.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..c5c2bdf2ab62fe897654848345acacb36306d1b0 --- /dev/null +++ b/basicsr.egg-info/SOURCES.txt @@ -0,0 +1,51 @@ +README.md +setup.cfg +setup.py +basicsr/__init__.py +basicsr/train.py +basicsr/version.py +basicsr.egg-info/PKG-INFO +basicsr.egg-info/SOURCES.txt +basicsr.egg-info/dependency_links.txt +basicsr.egg-info/not-zip-safe +basicsr.egg-info/requires.txt +basicsr.egg-info/top_level.txt +basicsr/archs/__init__.py +basicsr/archs/ddcolor_arch.py +basicsr/archs/discriminator_arch.py +basicsr/archs/vgg_arch.py +basicsr/data/__init__.py +basicsr/data/data_sampler.py +basicsr/data/data_util.py +basicsr/data/fmix.py +basicsr/data/lab_dataset.py +basicsr/data/prefetch_dataloader.py +basicsr/data/transforms.py +basicsr/losses/__init__.py +basicsr/losses/loss_util.py +basicsr/losses/losses.py +basicsr/metrics/__init__.py +basicsr/metrics/colorfulness.py +basicsr/metrics/custom_fid.py +basicsr/metrics/metric_util.py +basicsr/metrics/psnr_ssim.py +basicsr/models/__init__.py +basicsr/models/base_model.py +basicsr/models/color_model.py +basicsr/models/lr_scheduler.py +basicsr/utils/__init__.py +basicsr/utils/color_enhance.py +basicsr/utils/diffjpeg.py +basicsr/utils/dist_util.py +basicsr/utils/download_util.py +basicsr/utils/face_util.py +basicsr/utils/file_client.py +basicsr/utils/flow_util.py +basicsr/utils/img_process_util.py +basicsr/utils/img_util.py +basicsr/utils/lmdb_util.py +basicsr/utils/logger.py +basicsr/utils/matlab_functions.py +basicsr/utils/misc.py +basicsr/utils/options.py +basicsr/utils/registry.py \ No newline at end of file diff --git a/basicsr.egg-info/dependency_links.txt b/basicsr.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/basicsr.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/basicsr.egg-info/not-zip-safe b/basicsr.egg-info/not-zip-safe new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/basicsr.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/basicsr.egg-info/requires.txt b/basicsr.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..289d6338f9be50e366febcf68443d431ea20ab80 --- /dev/null +++ b/basicsr.egg-info/requires.txt @@ -0,0 +1,15 @@ +dlib==19.24.2 +lmdb==1.4.1 +numpy==1.24.3 +opencv_python==4.7.0.72 +Pillow==10.1.0 +PyYAML==6.0.1 +Requests==2.31.0 +scipy==1.9.1 +timm==0.9.2 +torch==2.2.0 +torchvision==0.17.0 +tqdm==4.65.0 +wandb==0.15.5 +scikit-image==0.22.0 +tensorboard==2.15.1 diff --git a/basicsr.egg-info/top_level.txt b/basicsr.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..13b862af86b4068b161107ed6bbadcfbc30bebe0 --- /dev/null +++ b/basicsr.egg-info/top_level.txt @@ -0,0 +1 @@ +basicsr diff --git a/basicsr/__init__.py b/basicsr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d15344e8eeb9a287e37eba139db4107c409bd5ac --- /dev/null +++ b/basicsr/__init__.py @@ -0,0 +1,12 @@ +# https://github.com/xinntao/BasicSR +# flake8: noqa +from .archs import * +from .data import * +from .losses import * +from .metrics import * +from .models import * +# from .ops import * +# from .test import * +from .train import * +from .utils import * +from .version import __gitsha__, __version__ diff --git a/basicsr/__pycache__/__init__.cpython-39.pyc b/basicsr/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31321e819955b9658486765739ed93e21eadcd76 Binary files /dev/null and b/basicsr/__pycache__/__init__.cpython-39.pyc differ diff --git a/basicsr/__pycache__/train.cpython-39.pyc b/basicsr/__pycache__/train.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b3b62811aca48058d105e49dba5046dd0e577ee Binary files /dev/null and b/basicsr/__pycache__/train.cpython-39.pyc differ diff --git a/basicsr/__pycache__/version.cpython-39.pyc b/basicsr/__pycache__/version.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b046aaaf80586676164ec74735c1a126865a70e8 Binary files /dev/null and b/basicsr/__pycache__/version.cpython-39.pyc differ diff --git a/basicsr/archs/__init__.py b/basicsr/archs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cfb1e4d7bb221c429082bd389d9140e5b1cc07b0 --- /dev/null +++ b/basicsr/archs/__init__.py @@ -0,0 +1,25 @@ +import importlib +from copy import deepcopy +from os import path as osp + +from basicsr.utils import get_root_logger, scandir +from basicsr.utils.registry import ARCH_REGISTRY + +__all__ = ['build_network'] + +# automatically scan and import arch modules for registry +# scan all the files under the 'archs' folder and collect files ending with +# '_arch.py' +arch_folder = osp.dirname(osp.abspath(__file__)) +arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] +# import all the arch modules +_arch_modules = [importlib.import_module(f'basicsr.archs.{file_name}') for file_name in arch_filenames] + + +def build_network(opt): + opt = deepcopy(opt) + network_type = opt.pop('type') + net = ARCH_REGISTRY.get(network_type)(**opt) + logger = get_root_logger() + logger.info(f'Network [{net.__class__.__name__}] is created.') + return net diff --git a/basicsr/archs/__pycache__/__init__.cpython-39.pyc b/basicsr/archs/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9563b8b5826382500f9efd3f91868c7e01df396 Binary files /dev/null and b/basicsr/archs/__pycache__/__init__.cpython-39.pyc differ diff --git a/basicsr/archs/__pycache__/ddcolor_arch.cpython-39.pyc b/basicsr/archs/__pycache__/ddcolor_arch.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d113c6029b8e0f1dd1ea662a2380a5be4e90586 Binary files /dev/null and b/basicsr/archs/__pycache__/ddcolor_arch.cpython-39.pyc differ diff --git a/basicsr/archs/__pycache__/discriminator_arch.cpython-39.pyc b/basicsr/archs/__pycache__/discriminator_arch.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01163f77bda5cf58b27b974965e35259d250a916 Binary files /dev/null and b/basicsr/archs/__pycache__/discriminator_arch.cpython-39.pyc differ diff --git a/basicsr/archs/__pycache__/vgg_arch.cpython-39.pyc b/basicsr/archs/__pycache__/vgg_arch.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd17351afdcd5f8d5fbfc164f21b4ed7a377dc77 Binary files /dev/null and b/basicsr/archs/__pycache__/vgg_arch.cpython-39.pyc differ diff --git a/basicsr/archs/ddcolor_arch.py b/basicsr/archs/ddcolor_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..098ac74bbe24fd48a9a11dd05dbae3b40145b538 --- /dev/null +++ b/basicsr/archs/ddcolor_arch.py @@ -0,0 +1,385 @@ +import torch +import torch.nn as nn + +from basicsr.archs.ddcolor_arch_utils.unet import Hook, CustomPixelShuffle_ICNR, UnetBlockWide, NormType, custom_conv_layer +from basicsr.archs.ddcolor_arch_utils.convnext import ConvNeXt +from basicsr.archs.ddcolor_arch_utils.transformer_utils import SelfAttentionLayer, CrossAttentionLayer, FFNLayer, MLP +from basicsr.archs.ddcolor_arch_utils.position_encoding import PositionEmbeddingSine +from basicsr.archs.ddcolor_arch_utils.transformer import Transformer +from basicsr.utils.registry import ARCH_REGISTRY + + +@ARCH_REGISTRY.register() +class DDColor(nn.Module): + + def __init__(self, + encoder_name='convnext-l', + decoder_name='MultiScaleColorDecoder', + num_input_channels=3, + input_size=(256, 256), + nf=512, + num_output_channels=3, + last_norm='Weight', + do_normalize=False, + num_queries=256, + num_scales=3, + dec_layers=9, + encoder_from_pretrain=False): + super().__init__() + + self.encoder = Encoder(encoder_name, ['norm0', 'norm1', 'norm2', 'norm3'], from_pretrain=encoder_from_pretrain) + self.encoder.eval() + test_input = torch.randn(1, num_input_channels, *input_size) + self.encoder(test_input) + + self.decoder = Decoder( + self.encoder.hooks, + nf=nf, + last_norm=last_norm, + num_queries=num_queries, + num_scales=num_scales, + dec_layers=dec_layers, + decoder_name=decoder_name + ) + self.refine_net = nn.Sequential(custom_conv_layer(num_queries + 3, num_output_channels, ks=1, use_activ=False, norm_type=NormType.Spectral)) + + self.do_normalize = do_normalize + self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)) + self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)) + + def normalize(self, img): + return (img - self.mean) / self.std + + def denormalize(self, img): + return img * self.std + self.mean + + def forward(self, x): + if x.shape[1] == 3: + x = self.normalize(x) + + self.encoder(x) + out_feat = self.decoder() + coarse_input = torch.cat([out_feat, x], dim=1) + out = self.refine_net(coarse_input) + + if self.do_normalize: + out = self.denormalize(out) + return out + + +class Decoder(nn.Module): + + def __init__(self, + hooks, + nf=512, + blur=True, + last_norm='Weight', + num_queries=256, + num_scales=3, + dec_layers=9, + decoder_name='MultiScaleColorDecoder'): + super().__init__() + self.hooks = hooks + self.nf = nf + self.blur = blur + self.last_norm = getattr(NormType, last_norm) + self.decoder_name = decoder_name + + self.layers = self.make_layers() + embed_dim = nf // 2 + + self.last_shuf = CustomPixelShuffle_ICNR(embed_dim, embed_dim, blur=self.blur, norm_type=self.last_norm, scale=4) + + if self.decoder_name == 'MultiScaleColorDecoder': + self.color_decoder = MultiScaleColorDecoder( + in_channels=[512, 512, 256], + num_queries=num_queries, + num_scales=num_scales, + dec_layers=dec_layers, + ) + else: + self.color_decoder = SingleColorDecoder( + in_channels=hooks[-1].feature.shape[1], + num_queries=num_queries, + ) + + + def forward(self): + encode_feat = self.hooks[-1].feature + out0 = self.layers[0](encode_feat) + out1 = self.layers[1](out0) + out2 = self.layers[2](out1) + out3 = self.last_shuf(out2) + + if self.decoder_name == 'MultiScaleColorDecoder': + out = self.color_decoder([out0, out1, out2], out3) + else: + out = self.color_decoder(out3, encode_feat) + + return out + + def make_layers(self): + decoder_layers = [] + + e_in_c = self.hooks[-1].feature.shape[1] + in_c = e_in_c + + out_c = self.nf + setup_hooks = self.hooks[-2::-1] + for layer_index, hook in enumerate(setup_hooks): + feature_c = hook.feature.shape[1] + if layer_index == len(setup_hooks) - 1: + out_c = out_c // 2 + decoder_layers.append( + UnetBlockWide( + in_c, feature_c, out_c, hook, blur=self.blur, self_attention=False, norm_type=NormType.Spectral)) + in_c = out_c + return nn.Sequential(*decoder_layers) + + +class Encoder(nn.Module): + + def __init__(self, encoder_name, hook_names, from_pretrain, **kwargs): + super().__init__() + + if encoder_name == 'convnext-t' or encoder_name == 'convnext': + self.arch = ConvNeXt() + elif encoder_name == 'convnext-s': + self.arch = ConvNeXt(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768]) + elif encoder_name == 'convnext-b': + self.arch = ConvNeXt(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024]) + elif encoder_name == 'convnext-l': + self.arch = ConvNeXt(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536]) + else: + raise NotImplementedError + + self.encoder_name = encoder_name + self.hook_names = hook_names + self.hooks = self.setup_hooks() + + if from_pretrain: + self.load_pretrain_model() + + def setup_hooks(self): + hooks = [Hook(self.arch._modules[name]) for name in self.hook_names] + return hooks + + def forward(self, x): + return self.arch(x) + + def load_pretrain_model(self): + if self.encoder_name == 'convnext-t' or self.encoder_name == 'convnext': + self.load('pretrain/convnext_tiny_22k_224.pth') + elif self.encoder_name == 'convnext-s': + self.load('pretrain/convnext_small_22k_224.pth') + elif self.encoder_name == 'convnext-b': + self.load('pretrain/convnext_base_22k_224.pth') + elif self.encoder_name == 'convnext-l': + self.load('pretrain/convnext_large_22k_224.pth') + else: + raise NotImplementedError + print('Loaded pretrained convnext model.') + + def load(self, path): + from basicsr.utils import get_root_logger + logger = get_root_logger() + if not path: + logger.info("No checkpoint found. Initializing model from scratch") + return + logger.info("[Encoder] Loading from {} ...".format(path)) + checkpoint = torch.load(path, map_location=torch.device("cpu")) + checkpoint_state_dict = checkpoint['model'] if 'model' in checkpoint.keys() else checkpoint + incompatible = self.arch.load_state_dict(checkpoint_state_dict, strict=False) + + if incompatible.missing_keys: + msg = "Some model parameters or buffers are not found in the checkpoint:\n" + msg += str(incompatible.missing_keys) + logger.warning(msg) + if incompatible.unexpected_keys: + msg = "The checkpoint state_dict contains keys that are not used by the model:\n" + msg += str(incompatible.unexpected_keys) + logger.warning(msg) + + +class MultiScaleColorDecoder(nn.Module): + + def __init__( + self, + in_channels, + hidden_dim=256, + num_queries=100, + nheads=8, + dim_feedforward=2048, + dec_layers=9, + pre_norm=False, + color_embed_dim=256, + enforce_input_project=True, + num_scales=3 + ): + super().__init__() + + # positional encoding + N_steps = hidden_dim // 2 + self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True) + + # define Transformer decoder here + self.num_heads = nheads + self.num_layers = dec_layers + self.transformer_self_attention_layers = nn.ModuleList() + self.transformer_cross_attention_layers = nn.ModuleList() + self.transformer_ffn_layers = nn.ModuleList() + + for _ in range(self.num_layers): + self.transformer_self_attention_layers.append( + SelfAttentionLayer( + d_model=hidden_dim, + nhead=nheads, + dropout=0.0, + normalize_before=pre_norm, + ) + ) + self.transformer_cross_attention_layers.append( + CrossAttentionLayer( + d_model=hidden_dim, + nhead=nheads, + dropout=0.0, + normalize_before=pre_norm, + ) + ) + self.transformer_ffn_layers.append( + FFNLayer( + d_model=hidden_dim, + dim_feedforward=dim_feedforward, + dropout=0.0, + normalize_before=pre_norm, + ) + ) + + self.decoder_norm = nn.LayerNorm(hidden_dim) + + self.num_queries = num_queries + # learnable color query features + self.query_feat = nn.Embedding(num_queries, hidden_dim) + # learnable color query p.e. + self.query_embed = nn.Embedding(num_queries, hidden_dim) + + # level embedding + self.num_feature_levels = num_scales + self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim) + + # input projections + self.input_proj = nn.ModuleList() + for i in range(self.num_feature_levels): + if in_channels[i] != hidden_dim or enforce_input_project: + self.input_proj.append(nn.Conv2d(in_channels[i], hidden_dim, kernel_size=1)) + nn.init.kaiming_uniform_(self.input_proj[-1].weight, a=1) + if self.input_proj[-1].bias is not None: + nn.init.constant_(self.input_proj[-1].bias, 0) + else: + self.input_proj.append(nn.Sequential()) + + # output FFNs + self.color_embed = MLP(hidden_dim, hidden_dim, color_embed_dim, 3) + + def forward(self, x, img_features): + # x is a list of multi-scale feature + assert len(x) == self.num_feature_levels + src = [] + pos = [] + + for i in range(self.num_feature_levels): + pos.append(self.pe_layer(x[i], None).flatten(2)) + src.append(self.input_proj[i](x[i]).flatten(2) + self.level_embed.weight[i][None, :, None]) + + # flatten NxCxHxW to HWxNxC + pos[-1] = pos[-1].permute(2, 0, 1) + src[-1] = src[-1].permute(2, 0, 1) + + _, bs, _ = src[0].shape + + # QxNxC + query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1) + output = self.query_feat.weight.unsqueeze(1).repeat(1, bs, 1) + + for i in range(self.num_layers): + level_index = i % self.num_feature_levels + # attention: cross-attention first + output = self.transformer_cross_attention_layers[i]( + output, src[level_index], + memory_mask=None, + memory_key_padding_mask=None, + pos=pos[level_index], query_pos=query_embed + ) + output = self.transformer_self_attention_layers[i]( + output, tgt_mask=None, + tgt_key_padding_mask=None, + query_pos=query_embed + ) + # FFN + output = self.transformer_ffn_layers[i]( + output + ) + + decoder_output = self.decoder_norm(output) + decoder_output = decoder_output.transpose(0, 1) # [N, bs, C] -> [bs, N, C] + color_embed = self.color_embed(decoder_output) + out = torch.einsum("bqc,bchw->bqhw", color_embed, img_features) + + return out + + +class SingleColorDecoder(nn.Module): + + def __init__( + self, + in_channels=768, + hidden_dim=256, + num_queries=256, # 100 + nheads=8, + dropout=0.1, + dim_feedforward=2048, + enc_layers=0, + dec_layers=6, + pre_norm=False, + deep_supervision=True, + enforce_input_project=True, + ): + + super().__init__() + + N_steps = hidden_dim // 2 + self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True) + + transformer = Transformer( + d_model=hidden_dim, + dropout=dropout, + nhead=nheads, + dim_feedforward=dim_feedforward, + num_encoder_layers=enc_layers, + num_decoder_layers=dec_layers, + normalize_before=pre_norm, + return_intermediate_dec=deep_supervision, + ) + self.num_queries = num_queries + self.transformer = transformer + + self.query_embed = nn.Embedding(num_queries, hidden_dim) + + if in_channels != hidden_dim or enforce_input_project: + self.input_proj = nn.Conv2d(in_channels, hidden_dim, kernel_size=1) + nn.init.kaiming_uniform_(self.input_proj.weight, a=1) + if self.input_proj.bias is not None: + nn.init.constant_(self.input_proj.bias, 0) + else: + self.input_proj = nn.Sequential() + + + def forward(self, img_features, encode_feat): + pos = self.pe_layer(encode_feat) + src = encode_feat + mask = None + hs, memory = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos) + color_embed = hs[-1] + color_preds = torch.einsum('bqc,bchw->bqhw', color_embed, img_features) + return color_preds + diff --git a/basicsr/archs/ddcolor_arch_utils/__int__.py b/basicsr/archs/ddcolor_arch_utils/__int__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/basicsr/archs/ddcolor_arch_utils/__pycache__/convnext.cpython-38.pyc b/basicsr/archs/ddcolor_arch_utils/__pycache__/convnext.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b7edf7fba485cd5564bbad08224b8946c46b893 Binary files /dev/null and b/basicsr/archs/ddcolor_arch_utils/__pycache__/convnext.cpython-38.pyc differ diff --git a/basicsr/archs/ddcolor_arch_utils/__pycache__/convnext.cpython-39.pyc b/basicsr/archs/ddcolor_arch_utils/__pycache__/convnext.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d97bbc3798c080031a4ce0cc02a230565ca70f55 Binary files /dev/null and b/basicsr/archs/ddcolor_arch_utils/__pycache__/convnext.cpython-39.pyc differ diff --git a/basicsr/archs/ddcolor_arch_utils/__pycache__/position_encoding.cpython-38.pyc b/basicsr/archs/ddcolor_arch_utils/__pycache__/position_encoding.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43095a8ca57e53205b9db6caf4feddc8fb622a39 Binary files /dev/null and b/basicsr/archs/ddcolor_arch_utils/__pycache__/position_encoding.cpython-38.pyc differ diff --git a/basicsr/archs/ddcolor_arch_utils/__pycache__/position_encoding.cpython-39.pyc b/basicsr/archs/ddcolor_arch_utils/__pycache__/position_encoding.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..403576104f73915f2bf7a354ec833b9f5d4f8ca7 Binary files /dev/null and b/basicsr/archs/ddcolor_arch_utils/__pycache__/position_encoding.cpython-39.pyc differ diff --git a/basicsr/archs/ddcolor_arch_utils/__pycache__/transformer.cpython-38.pyc b/basicsr/archs/ddcolor_arch_utils/__pycache__/transformer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13e1659385498b55db05097124eff5147d6a5b02 Binary files /dev/null and b/basicsr/archs/ddcolor_arch_utils/__pycache__/transformer.cpython-38.pyc differ diff --git a/basicsr/archs/ddcolor_arch_utils/__pycache__/transformer.cpython-39.pyc b/basicsr/archs/ddcolor_arch_utils/__pycache__/transformer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee707165d31baf7471d0a2ab23c13b9ac8f49b28 Binary files /dev/null and b/basicsr/archs/ddcolor_arch_utils/__pycache__/transformer.cpython-39.pyc differ diff --git a/basicsr/archs/ddcolor_arch_utils/__pycache__/transformer_utils.cpython-38.pyc b/basicsr/archs/ddcolor_arch_utils/__pycache__/transformer_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4284e013d967c1ac266752a0a7b5e36427f48d9 Binary files /dev/null and b/basicsr/archs/ddcolor_arch_utils/__pycache__/transformer_utils.cpython-38.pyc differ diff --git a/basicsr/archs/ddcolor_arch_utils/__pycache__/transformer_utils.cpython-39.pyc b/basicsr/archs/ddcolor_arch_utils/__pycache__/transformer_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ead393c12672a40883715dc004a9e05509253ef Binary files /dev/null and b/basicsr/archs/ddcolor_arch_utils/__pycache__/transformer_utils.cpython-39.pyc differ diff --git a/basicsr/archs/ddcolor_arch_utils/__pycache__/unet.cpython-38.pyc b/basicsr/archs/ddcolor_arch_utils/__pycache__/unet.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a81a37a96dc69ed172147583f8fb895249133eab Binary files /dev/null and b/basicsr/archs/ddcolor_arch_utils/__pycache__/unet.cpython-38.pyc differ diff --git a/basicsr/archs/ddcolor_arch_utils/__pycache__/unet.cpython-39.pyc b/basicsr/archs/ddcolor_arch_utils/__pycache__/unet.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c56909c3d7df27a162d1642fb6c430cab47e0286 Binary files /dev/null and b/basicsr/archs/ddcolor_arch_utils/__pycache__/unet.cpython-39.pyc differ diff --git a/basicsr/archs/ddcolor_arch_utils/convnext.py b/basicsr/archs/ddcolor_arch_utils/convnext.py new file mode 100644 index 0000000000000000000000000000000000000000..e44fd6bec4d98bbb713d13df493ce75d6f2e6da6 --- /dev/null +++ b/basicsr/archs/ddcolor_arch_utils/convnext.py @@ -0,0 +1,155 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. + +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import torch +import torch.nn as nn +import torch.nn.functional as F +from timm.models.layers import trunc_normal_, DropPath + +class Block(nn.Module): + r""" ConvNeXt Block. There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + We use (2) as we find it slightly faster in PyTorch + + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6): + super().__init__() + self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv + self.norm = LayerNorm(dim, eps=1e-6) + self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers + self.act = nn.GELU() + self.pwconv2 = nn.Linear(4 * dim, dim) + self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), + requires_grad=True) if layer_scale_init_value > 0 else None + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + input = x + x = self.dwconv(x) + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.norm(x) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = input + self.drop_path(x) + return x + +class ConvNeXt(nn.Module): + r""" ConvNeXt + A PyTorch impl of : `A ConvNet for the 2020s` - + https://arxiv.org/pdf/2201.03545.pdf + Args: + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3] + dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768] + drop_path_rate (float): Stochastic depth rate. Default: 0. + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1. + """ + def __init__(self, in_chans=3, num_classes=1000, + depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0., + layer_scale_init_value=1e-6, head_init_scale=1., + ): + super().__init__() + + self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers + stem = nn.Sequential( + nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4), + LayerNorm(dims[0], eps=1e-6, data_format="channels_first") + ) + self.downsample_layers.append(stem) + for i in range(3): + downsample_layer = nn.Sequential( + LayerNorm(dims[i], eps=1e-6, data_format="channels_first"), + nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2), + ) + self.downsample_layers.append(downsample_layer) + + self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks + dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] + cur = 0 + for i in range(4): + stage = nn.Sequential( + *[Block(dim=dims[i], drop_path=dp_rates[cur + j], + layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])] + ) + self.stages.append(stage) + cur += depths[i] + + # add norm layers for each output + out_indices = (0, 1, 2, 3) + for i in out_indices: + layer = LayerNorm(dims[i], eps=1e-6, data_format="channels_first") + # layer = nn.Identity() + layer_name = f'norm{i}' + self.add_module(layer_name, layer) + + self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer + # self.head_cls = nn.Linear(dims[-1], 4) + + self.apply(self._init_weights) + # self.head_cls.weight.data.mul_(head_init_scale) + # self.head_cls.bias.data.mul_(head_init_scale) + + def _init_weights(self, m): + if isinstance(m, (nn.Conv2d, nn.Linear)): + trunc_normal_(m.weight, std=.02) + nn.init.constant_(m.bias, 0) + + def forward_features(self, x): + for i in range(4): + x = self.downsample_layers[i](x) + x = self.stages[i](x) + + # add extra norm + norm_layer = getattr(self, f'norm{i}') + # x = norm_layer(x) + norm_layer(x) + + return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C) + + def forward(self, x): + x = self.forward_features(x) + # x = self.head_cls(x) + return x + +class LayerNorm(nn.Module): + r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with + shape (batch_size, height, width, channels) while channels_first corresponds to inputs + with shape (batch_size, channels, height, width). + """ + def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.data_format = data_format + if self.data_format not in ["channels_last", "channels_first"]: + raise NotImplementedError + self.normalized_shape = (normalized_shape, ) + + def forward(self, x): + if self.data_format == "channels_last": # B H W C + return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + elif self.data_format == "channels_first": # B C H W + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x diff --git a/basicsr/archs/ddcolor_arch_utils/position_encoding.py b/basicsr/archs/ddcolor_arch_utils/position_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..9fa7b9e0d0d103596d5cae16dfb578a5de8b45c5 --- /dev/null +++ b/basicsr/archs/ddcolor_arch_utils/position_encoding.py @@ -0,0 +1,52 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified from: https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py +""" +Various positional encodings for the transformer. +""" +import math + +import torch +from torch import nn + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + + def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, x, mask=None): + if mask is None: + mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) + not_mask = ~mask + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos \ No newline at end of file diff --git a/basicsr/archs/ddcolor_arch_utils/transformer.py b/basicsr/archs/ddcolor_arch_utils/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..f462a10e34a0578304270538524192bbe63e9fde --- /dev/null +++ b/basicsr/archs/ddcolor_arch_utils/transformer.py @@ -0,0 +1,368 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified from: https://github.com/facebookresearch/detr/blob/master/models/transformer.py +""" +Transformer class. +Copy-paste from torch.nn.Transformer with modifications: + * positional encodings are passed in MHattention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers +""" +import copy +from typing import List, Optional + +import torch +import torch.nn.functional as F +from torch import Tensor, nn + + +class Transformer(nn.Module): + def __init__( + self, + d_model=512, + nhead=8, + num_encoder_layers=6, + num_decoder_layers=6, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + return_intermediate_dec=False, + ): + super().__init__() + + encoder_layer = TransformerEncoderLayer( + d_model, nhead, dim_feedforward, dropout, activation, normalize_before + ) + encoder_norm = nn.LayerNorm(d_model) if normalize_before else None + self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) + + decoder_layer = TransformerDecoderLayer( + d_model, nhead, dim_feedforward, dropout, activation, normalize_before + ) + decoder_norm = nn.LayerNorm(d_model) + self.decoder = TransformerDecoder( + decoder_layer, + num_decoder_layers, + decoder_norm, + return_intermediate=return_intermediate_dec, + ) + + self._reset_parameters() + + self.d_model = d_model + self.nhead = nhead + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, src, mask, query_embed, pos_embed): + # flatten NxCxHxW to HWxNxC + bs, c, h, w = src.shape + src = src.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) + if mask is not None: + mask = mask.flatten(1) + + tgt = torch.zeros_like(query_embed) + memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) + hs = self.decoder( + tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed + ) + return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w) + + +class TransformerEncoder(nn.Module): + def __init__(self, encoder_layer, num_layers, norm=None): + super().__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + + def forward( + self, + src, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + ): + output = src + + for layer in self.layers: + output = layer( + output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos + ) + + if self.norm is not None: + output = self.norm(output) + + return output + + +class TransformerDecoder(nn.Module): + def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): + super().__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + self.return_intermediate = return_intermediate + + def forward( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + output = tgt + + intermediate = [] + + for layer in self.layers: + output = layer( + output, + memory, + tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, + query_pos=query_pos, + ) + if self.return_intermediate: + intermediate.append(self.norm(output)) + + if self.norm is not None: + output = self.norm(output) + if self.return_intermediate: + intermediate.pop() + intermediate.append(output) + + if self.return_intermediate: + return torch.stack(intermediate) + + return output.unsqueeze(0) + + +class TransformerEncoderLayer(nn.Module): + def __init__( + self, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + ): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post( + self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + ): + q = k = self.with_pos_embed(src, pos) + src2 = self.self_attn( + q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask + )[0] + src = src + self.dropout1(src2) + src = self.norm1(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src + + def forward_pre( + self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + ): + src2 = self.norm1(src) + q = k = self.with_pos_embed(src2, pos) + src2 = self.self_attn( + q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask + )[0] + src = src + self.dropout1(src2) + src2 = self.norm2(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) + src = src + self.dropout2(src2) + return src + + def forward( + self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + ): + if self.normalize_before: + return self.forward_pre(src, src_mask, src_key_padding_mask, pos) + return self.forward_post(src, src_mask, src_key_padding_mask, pos) + + +class TransformerDecoderLayer(nn.Module): + def __init__( + self, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + ): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn( + q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask + )[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + tgt2 = self.multihead_attn( + query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + )[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward_pre( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + tgt2 = self.norm1(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn( + q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask + )[0] + tgt = tgt + self.dropout1(tgt2) + tgt2 = self.norm2(tgt) + tgt2 = self.multihead_attn( + query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + )[0] + tgt = tgt + self.dropout2(tgt2) + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + def forward( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + if self.normalize_before: + return self.forward_pre( + tgt, + memory, + tgt_mask, + memory_mask, + tgt_key_padding_mask, + memory_key_padding_mask, + pos, + query_pos, + ) + return self.forward_post( + tgt, + memory, + tgt_mask, + memory_mask, + tgt_key_padding_mask, + memory_key_padding_mask, + pos, + query_pos, + ) + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(f"activation should be relu/gelu, not {activation}.") \ No newline at end of file diff --git a/basicsr/archs/ddcolor_arch_utils/transformer_utils.py b/basicsr/archs/ddcolor_arch_utils/transformer_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ea38f399d541ccbc25999fb14201cfcffa3d8f8a --- /dev/null +++ b/basicsr/archs/ddcolor_arch_utils/transformer_utils.py @@ -0,0 +1,192 @@ +from typing import Optional +from torch import nn, Tensor +from torch.nn import functional as F + +class SelfAttentionLayer(nn.Module): + + def __init__(self, d_model, nhead, dropout=0.0, + activation="relu", normalize_before=False): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + + self.norm = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt, + tgt_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout(tgt2) + tgt = self.norm(tgt) + + return tgt + + def forward_pre(self, tgt, + tgt_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + tgt2 = self.norm(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout(tgt2) + + return tgt + + def forward(self, tgt, + tgt_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(tgt, tgt_mask, + tgt_key_padding_mask, query_pos) + return self.forward_post(tgt, tgt_mask, + tgt_key_padding_mask, query_pos) + + +class CrossAttentionLayer(nn.Module): + + def __init__(self, d_model, nhead, dropout=0.0, + activation="relu", normalize_before=False): + super().__init__() + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + + self.norm = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt, memory, + memory_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout(tgt2) + tgt = self.norm(tgt) + + return tgt + + def forward_pre(self, tgt, memory, + memory_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + tgt2 = self.norm(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout(tgt2) + + return tgt + + def forward(self, tgt, memory, + memory_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(tgt, memory, memory_mask, + memory_key_padding_mask, pos, query_pos) + return self.forward_post(tgt, memory, memory_mask, + memory_key_padding_mask, pos, query_pos) + + +class FFNLayer(nn.Module): + + def __init__(self, d_model, dim_feedforward=2048, dropout=0.0, + activation="relu", normalize_before=False): + super().__init__() + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm = nn.LayerNorm(d_model) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt): + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout(tgt2) + tgt = self.norm(tgt) + return tgt + + def forward_pre(self, tgt): + tgt2 = self.norm(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout(tgt2) + return tgt + + def forward(self, tgt): + if self.normalize_before: + return self.forward_pre(tgt) + return self.forward_post(tgt) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") + + +class MLP(nn.Module): + """ Very simple multi-layer perceptron (also called FFN)""" + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x \ No newline at end of file diff --git a/basicsr/archs/ddcolor_arch_utils/unet.py b/basicsr/archs/ddcolor_arch_utils/unet.py new file mode 100644 index 0000000000000000000000000000000000000000..4610ba589242266383ba1b7cddca12c653b97daa --- /dev/null +++ b/basicsr/archs/ddcolor_arch_utils/unet.py @@ -0,0 +1,208 @@ +from enum import Enum +import torch +import torch.nn as nn +from torch.nn import functional as F +import collections + + +NormType = Enum('NormType', 'Batch BatchZero Weight Spectral') + + +class Hook: + feature = None + + def __init__(self, module): + self.hook = module.register_forward_hook(self.hook_fn) + + def hook_fn(self, module, input, output): + if isinstance(output, torch.Tensor): + self.feature = output + elif isinstance(output, collections.OrderedDict): + self.feature = output['out'] + + def remove(self): + self.hook.remove() + + +class SelfAttention(nn.Module): + "Self attention layer for nd." + + def __init__(self, n_channels: int): + super().__init__() + self.query = conv1d(n_channels, n_channels // 8) + self.key = conv1d(n_channels, n_channels // 8) + self.value = conv1d(n_channels, n_channels) + self.gamma = nn.Parameter(torch.tensor([0.])) + + def forward(self, x): + #Notation from https://arxiv.org/pdf/1805.08318.pdf + size = x.size() + x = x.view(*size[:2], -1) + f, g, h = self.query(x), self.key(x), self.value(x) + beta = F.softmax(torch.bmm(f.permute(0, 2, 1).contiguous(), g), dim=1) + o = self.gamma * torch.bmm(h, beta) + x + return o.view(*size).contiguous() + + +def batchnorm_2d(nf: int, norm_type: NormType = NormType.Batch): + "A batchnorm2d layer with `nf` features initialized depending on `norm_type`." + bn = nn.BatchNorm2d(nf) + with torch.no_grad(): + bn.bias.fill_(1e-3) + bn.weight.fill_(0. if norm_type == NormType.BatchZero else 1.) + return bn + + +def init_default(m: nn.Module, func=nn.init.kaiming_normal_) -> None: + "Initialize `m` weights with `func` and set `bias` to 0." + if func: + if hasattr(m, 'weight'): func(m.weight) + if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.) + return m + + +def icnr(x, scale=2, init=nn.init.kaiming_normal_): + "ICNR init of `x`, with `scale` and `init` function." + ni, nf, h, w = x.shape + ni2 = int(ni / (scale**2)) + k = init(torch.zeros([ni2, nf, h, w])).transpose(0, 1) + k = k.contiguous().view(ni2, nf, -1) + k = k.repeat(1, 1, scale**2) + k = k.contiguous().view([nf, ni, h, w]).transpose(0, 1) + x.data.copy_(k) + + +def conv1d(ni: int, no: int, ks: int = 1, stride: int = 1, padding: int = 0, bias: bool = False): + "Create and initialize a `nn.Conv1d` layer with spectral normalization." + conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias) + nn.init.kaiming_normal_(conv.weight) + if bias: conv.bias.data.zero_() + return nn.utils.spectral_norm(conv) + + +def custom_conv_layer( + ni: int, + nf: int, + ks: int = 3, + stride: int = 1, + padding: int = None, + bias: bool = None, + is_1d: bool = False, + norm_type=NormType.Batch, + use_activ: bool = True, + transpose: bool = False, + init=nn.init.kaiming_normal_, + self_attention: bool = False, + extra_bn: bool = False, +): + "Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and batchnorm (if `bn`) layers." + if padding is None: + padding = (ks - 1) // 2 if not transpose else 0 + bn = norm_type in (NormType.Batch, NormType.BatchZero) or extra_bn == True + if bias is None: + bias = not bn + conv_func = nn.ConvTranspose2d if transpose else nn.Conv1d if is_1d else nn.Conv2d + conv = init_default( + conv_func(ni, nf, kernel_size=ks, bias=bias, stride=stride, padding=padding), + init, + ) + + if norm_type == NormType.Weight: + conv = nn.utils.weight_norm(conv) + elif norm_type == NormType.Spectral: + conv = nn.utils.spectral_norm(conv) + layers = [conv] + if use_activ: + layers.append(nn.ReLU(True)) + if bn: + layers.append((nn.BatchNorm1d if is_1d else nn.BatchNorm2d)(nf)) + if self_attention: + layers.append(SelfAttention(nf)) + return nn.Sequential(*layers) + + +def conv_layer(ni: int, + nf: int, + ks: int = 3, + stride: int = 1, + padding: int = None, + bias: bool = None, + is_1d: bool = False, + norm_type=NormType.Batch, + use_activ: bool = True, + transpose: bool = False, + init=nn.init.kaiming_normal_, + self_attention: bool = False): + "Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and batchnorm (if `bn`) layers." + if padding is None: padding = (ks - 1) // 2 if not transpose else 0 + bn = norm_type in (NormType.Batch, NormType.BatchZero) + if bias is None: bias = not bn + conv_func = nn.ConvTranspose2d if transpose else nn.Conv1d if is_1d else nn.Conv2d + conv = init_default(conv_func(ni, nf, kernel_size=ks, bias=bias, stride=stride, padding=padding), init) + if norm_type == NormType.Weight: conv = nn.utils.weight_norm(conv) + elif norm_type == NormType.Spectral: conv = nn.utils.spectral_norm(conv) + layers = [conv] + if use_activ: layers.append(nn.ReLU(True)) + if bn: layers.append((nn.BatchNorm1d if is_1d else nn.BatchNorm2d)(nf)) + if self_attention: layers.append(SelfAttention(nf)) + return nn.Sequential(*layers) + + +def _conv(ni: int, nf: int, ks: int = 3, stride: int = 1, **kwargs): + return conv_layer(ni, nf, ks=ks, stride=stride, norm_type=NormType.Spectral, **kwargs) + + +class CustomPixelShuffle_ICNR(nn.Module): + "Upsample by `scale` from `ni` filters to `nf` (default `ni`), using `nn.PixelShuffle`, `icnr` init, and `weight_norm`." + + def __init__(self, + ni: int, + nf: int = None, + scale: int = 2, + blur: bool = True, + norm_type=NormType.Spectral, + extra_bn=False): + super().__init__() + self.conv = custom_conv_layer( + ni, nf * (scale**2), ks=1, use_activ=False, norm_type=norm_type, extra_bn=extra_bn) + icnr(self.conv[0].weight) + self.shuf = nn.PixelShuffle(scale) + self.do_blur = blur + # Blurring over (h*w) kernel + # "Super-Resolution using Convolutional Neural Networks without Any Checkerboard Artifacts" + # - https://arxiv.org/abs/1806.02658 + self.pad = nn.ReplicationPad2d((1, 0, 1, 0)) + self.blur = nn.AvgPool2d(2, stride=1) + self.relu = nn.ReLU(True) + + def forward(self, x): + x = self.shuf(self.relu(self.conv(x))) + return self.blur(self.pad(x)) if self.do_blur else x + + +class UnetBlockWide(nn.Module): + "A quasi-UNet block, using `PixelShuffle_ICNR upsampling`." + + def __init__(self, + up_in_c: int, + x_in_c: int, + n_out: int, + hook, + blur: bool = False, + self_attention: bool = False, + norm_type=NormType.Spectral): + super().__init__() + + self.hook = hook + up_out = n_out + self.shuf = CustomPixelShuffle_ICNR(up_in_c, up_out, blur=blur, norm_type=norm_type, extra_bn=True) + self.bn = batchnorm_2d(x_in_c) + ni = up_out + x_in_c + self.conv = custom_conv_layer(ni, n_out, norm_type=norm_type, self_attention=self_attention, extra_bn=True) + self.relu = nn.ReLU() + + def forward(self, up_in): + s = self.hook.feature + up_out = self.shuf(up_in) + cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1)) + return self.conv(cat_x) diff --git a/basicsr/archs/ddcolor_arch_utils/util.py b/basicsr/archs/ddcolor_arch_utils/util.py new file mode 100644 index 0000000000000000000000000000000000000000..c7226d8af6b66f8873b0cc8d7035692061118ec3 --- /dev/null +++ b/basicsr/archs/ddcolor_arch_utils/util.py @@ -0,0 +1,63 @@ +import numpy as np +import torch +from skimage import color + + +def rgb2lab(img_rgb): + img_lab = color.rgb2lab(img_rgb) + return img_lab[:, :, :1], img_lab[:, :, 1:] + + +def tensor_lab2rgb(labs, illuminant="D65", observer="2"): + """ + Args: + lab : (B, C, H, W) + Returns: + tuple : (B, C, H, W) + """ + illuminants = \ + {"A": {'2': (1.098466069456375, 1, 0.3558228003436005), + '10': (1.111420406956693, 1, 0.3519978321919493)}, + "D50": {'2': (0.9642119944211994, 1, 0.8251882845188288), + '10': (0.9672062750333777, 1, 0.8142801513128616)}, + "D55": {'2': (0.956797052643698, 1, 0.9214805860173273), + '10': (0.9579665682254781, 1, 0.9092525159847462)}, + "D65": {'2': (0.95047, 1., 1.08883), # This was: `lab_ref_white` + '10': (0.94809667673716, 1, 1.0730513595166162)}, + "D75": {'2': (0.9497220898840717, 1, 1.226393520724154), + '10': (0.9441713925645873, 1, 1.2064272211720228)}, + "E": {'2': (1.0, 1.0, 1.0), + '10': (1.0, 1.0, 1.0)}} + xyz_from_rgb = np.array([[0.412453, 0.357580, 0.180423], [0.212671, 0.715160, 0.072169], + [0.019334, 0.119193, 0.950227]]) + + rgb_from_xyz = np.array([[3.240481340, -0.96925495, 0.055646640], [-1.53715152, 1.875990000, -0.20404134], + [-0.49853633, 0.041555930, 1.057311070]]) + B, C, H, W = labs.shape + arrs = labs.permute((0, 2, 3, 1)).contiguous() # (B, 3, H, W) -> (B, H, W, 3) + L, a, b = arrs[:, :, :, 0:1], arrs[:, :, :, 1:2], arrs[:, :, :, 2:] + y = (L + 16.) / 116. + x = (a / 500.) + y + z = y - (b / 200.) + invalid = z.data < 0 + z[invalid] = 0 + xyz = torch.cat([x, y, z], dim=3) + mask = xyz.data > 0.2068966 + mask_xyz = xyz.clone() + mask_xyz[mask] = torch.pow(xyz[mask], 3.0) + mask_xyz[~mask] = (xyz[~mask] - 16.0 / 116.) / 7.787 + xyz_ref_white = illuminants[illuminant][observer] + for i in range(C): + mask_xyz[:, :, :, i] = mask_xyz[:, :, :, i] * xyz_ref_white[i] + + rgb_trans = torch.mm(mask_xyz.view(-1, 3), torch.from_numpy(rgb_from_xyz).type_as(xyz)).view(B, H, W, C) + rgb = rgb_trans.permute((0, 3, 1, 2)).contiguous() + mask = rgb.data > 0.0031308 + mask_rgb = rgb.clone() + mask_rgb[mask] = 1.055 * torch.pow(rgb[mask], 1 / 2.4) - 0.055 + mask_rgb[~mask] = rgb[~mask] * 12.92 + neg_mask = mask_rgb.data < 0 + large_mask = mask_rgb.data > 1 + mask_rgb[neg_mask] = 0 + mask_rgb[large_mask] = 1 + return mask_rgb \ No newline at end of file diff --git a/basicsr/archs/discriminator_arch.py b/basicsr/archs/discriminator_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..a3784102f02a430f717fa5678b306926400c885d --- /dev/null +++ b/basicsr/archs/discriminator_arch.py @@ -0,0 +1,28 @@ +import torch +import torch.nn as nn +from torchvision import models +import numpy as np + +from basicsr.archs.ddcolor_arch_utils.unet import _conv +from basicsr.utils.registry import ARCH_REGISTRY + + +@ARCH_REGISTRY.register() +class DynamicUNetDiscriminator(nn.Module): + + def __init__(self, n_channels: int = 3, nf: int = 256, n_blocks: int = 3): + super().__init__() + layers = [_conv(n_channels, nf, ks=4, stride=2)] + for i in range(n_blocks): + layers += [ + _conv(nf, nf, ks=3, stride=1), + _conv(nf, nf * 2, ks=4, stride=2, self_attention=(i == 0)), + ] + nf *= 2 + layers += [_conv(nf, nf, ks=3, stride=1), _conv(nf, 1, ks=4, bias=False, padding=0, use_activ=False)] + self.layers = nn.Sequential(*layers) + + def forward(self, x): + out = self.layers(x) + out = out.view(out.size(0), -1) + return out diff --git a/basicsr/archs/vgg_arch.py b/basicsr/archs/vgg_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..436d231915e6d6ee5dd71ac21189492f022d6fca --- /dev/null +++ b/basicsr/archs/vgg_arch.py @@ -0,0 +1,165 @@ +import os +import torch +from collections import OrderedDict +from torch import nn as nn +from torchvision.models import vgg as vgg + +from basicsr.utils.registry import ARCH_REGISTRY + +VGG_PRETRAIN_PATH = { + 'vgg19': './pretrain/vgg19-dcbb9e9d.pth', + 'vgg16_bn': './pretrain/vgg16_bn-6c64b313.pth' +} + +NAMES = { + 'vgg11': [ + 'conv1_1', 'relu1_1', 'pool1', 'conv2_1', 'relu2_1', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', + 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', + 'pool5' + ], + 'vgg13': [ + 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', + 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'pool4', + 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'pool5' + ], + 'vgg16': [ + 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', + 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', + 'relu4_2', 'conv4_3', 'relu4_3', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', + 'pool5' + ], + 'vgg19': [ + 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', + 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 'conv4_1', + 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1', + 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5' + ] +} + + +def insert_bn(names): + """Insert bn layer after each conv. + + Args: + names (list): The list of layer names. + + Returns: + list: The list of layer names with bn layers. + """ + names_bn = [] + for name in names: + names_bn.append(name) + if 'conv' in name: + position = name.replace('conv', '') + names_bn.append('bn' + position) + return names_bn + + +@ARCH_REGISTRY.register() +class VGGFeatureExtractor(nn.Module): + """VGG network for feature extraction. + + In this implementation, we allow users to choose whether use normalization + in the input feature and the type of vgg network. Note that the pretrained + path must fit the vgg type. + + Args: + layer_name_list (list[str]): Forward function returns the corresponding + features according to the layer_name_list. + Example: {'relu1_1', 'relu2_1', 'relu3_1'}. + vgg_type (str): Set the type of vgg network. Default: 'vgg19'. + use_input_norm (bool): If True, normalize the input image. Importantly, + the input feature must in the range [0, 1]. Default: True. + range_norm (bool): If True, norm images with range [-1, 1] to [0, 1]. + Default: False. + requires_grad (bool): If true, the parameters of VGG network will be + optimized. Default: False. + remove_pooling (bool): If true, the max pooling operations in VGG net + will be removed. Default: False. + pooling_stride (int): The stride of max pooling operation. Default: 2. + """ + + def __init__(self, + layer_name_list, + vgg_type='vgg19', + use_input_norm=True, + range_norm=False, + requires_grad=False, + remove_pooling=False, + pooling_stride=2): + super(VGGFeatureExtractor, self).__init__() + + self.layer_name_list = layer_name_list + self.use_input_norm = use_input_norm + self.range_norm = range_norm + + self.names = NAMES[vgg_type.replace('_bn', '')] + if 'bn' in vgg_type: + self.names = insert_bn(self.names) + + # only borrow layers that will be used to avoid unused params + max_idx = 0 + for v in layer_name_list: + idx = self.names.index(v) + if idx > max_idx: + max_idx = idx + + if os.path.exists(VGG_PRETRAIN_PATH[vgg_type]): + vgg_net = getattr(vgg, vgg_type)(pretrained=False) + state_dict = torch.load(VGG_PRETRAIN_PATH[vgg_type], map_location=lambda storage, loc: storage) + vgg_net.load_state_dict(state_dict) + else: + vgg_net = getattr(vgg, vgg_type)(pretrained=True) + + features = vgg_net.features[:max_idx + 1] + + modified_net = OrderedDict() + for k, v in zip(self.names, features): + if 'pool' in k: + # if remove_pooling is true, pooling operation will be removed + if remove_pooling: + continue + else: + # in some cases, we may want to change the default stride + modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride) + else: + modified_net[k] = v + + self.vgg_net = nn.Sequential(modified_net) + + if not requires_grad: + self.vgg_net.eval() + for param in self.parameters(): + param.requires_grad = False + else: + self.vgg_net.train() + for param in self.parameters(): + param.requires_grad = True + + if self.use_input_norm: + # the mean is for image with range [0, 1] + self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)) + # the std is for image with range [0, 1] + self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)) + + def forward(self, x): + """Forward function. + + Args: + x (Tensor): Input tensor with shape (n, c, h, w). + + Returns: + Tensor: Forward results. + """ + if self.range_norm: + x = (x + 1) / 2 + if self.use_input_norm: + x = (x - self.mean) / self.std + + output = {} + for key, layer in self.vgg_net._modules.items(): + x = layer(x) + if key in self.layer_name_list: + output[key] = x.clone() + + return output diff --git a/basicsr/data/__init__.py b/basicsr/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6aa0af876863ca27daa799ef269a235df784f6ea --- /dev/null +++ b/basicsr/data/__init__.py @@ -0,0 +1,101 @@ +import importlib +import numpy as np +import random +import torch +import torch.utils.data +from copy import deepcopy +from functools import partial +from os import path as osp + +from basicsr.data.prefetch_dataloader import PrefetchDataLoader +from basicsr.utils import get_root_logger, scandir +from basicsr.utils.dist_util import get_dist_info +from basicsr.utils.registry import DATASET_REGISTRY + +__all__ = ['build_dataset', 'build_dataloader'] + +# automatically scan and import dataset modules for registry +# scan all the files under the data folder with '_dataset' in file names +data_folder = osp.dirname(osp.abspath(__file__)) +dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')] +# import all the dataset modules +_dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames] + + +def build_dataset(dataset_opt): + """Build dataset from options. + + Args: + dataset_opt (dict): Configuration for dataset. It must contain: + name (str): Dataset name. + type (str): Dataset type. + """ + dataset_opt = deepcopy(dataset_opt) + dataset = DATASET_REGISTRY.get(dataset_opt['type'])(dataset_opt) + logger = get_root_logger() + logger.info(f'Dataset [{dataset.__class__.__name__}] - {dataset_opt["name"]} ' 'is built.') + return dataset + + +def build_dataloader(dataset, dataset_opt, num_gpu=1, dist=False, sampler=None, seed=None): + """Build dataloader. + + Args: + dataset (torch.utils.data.Dataset): Dataset. + dataset_opt (dict): Dataset options. It contains the following keys: + phase (str): 'train' or 'val'. + num_worker_per_gpu (int): Number of workers for each GPU. + batch_size_per_gpu (int): Training batch size for each GPU. + num_gpu (int): Number of GPUs. Used only in the train phase. + Default: 1. + dist (bool): Whether in distributed training. Used only in the train + phase. Default: False. + sampler (torch.utils.data.sampler): Data sampler. Default: None. + seed (int | None): Seed. Default: None + """ + phase = dataset_opt['phase'] + rank, _ = get_dist_info() + if phase == 'train': + if dist: # distributed training + batch_size = dataset_opt['batch_size_per_gpu'] + num_workers = dataset_opt['num_worker_per_gpu'] + else: # non-distributed training + multiplier = 1 if num_gpu == 0 else num_gpu + batch_size = dataset_opt['batch_size_per_gpu'] * multiplier + num_workers = dataset_opt['num_worker_per_gpu'] * multiplier + dataloader_args = dict( + dataset=dataset, + batch_size=batch_size, + shuffle=False, + num_workers=num_workers, + sampler=sampler, + drop_last=True) + if sampler is None: + dataloader_args['shuffle'] = True + dataloader_args['worker_init_fn'] = partial( + worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if seed is not None else None + elif phase in ['val', 'test']: # validation + dataloader_args = dict(dataset=dataset, batch_size=1, shuffle=False, num_workers=0) + else: + raise ValueError(f'Wrong dataset phase: {phase}. ' "Supported ones are 'train', 'val' and 'test'.") + + dataloader_args['pin_memory'] = dataset_opt.get('pin_memory', False) + dataloader_args['persistent_workers'] = dataset_opt.get('persistent_workers', False) + + prefetch_mode = dataset_opt.get('prefetch_mode') + if prefetch_mode == 'cpu': # CPUPrefetcher + num_prefetch_queue = dataset_opt.get('num_prefetch_queue', 1) + logger = get_root_logger() + logger.info(f'Use {prefetch_mode} prefetch dataloader: num_prefetch_queue = {num_prefetch_queue}') + return PrefetchDataLoader(num_prefetch_queue=num_prefetch_queue, **dataloader_args) + else: + # prefetch_mode=None: Normal dataloader + # prefetch_mode='cuda': dataloader for CUDAPrefetcher + return torch.utils.data.DataLoader(**dataloader_args) + + +def worker_init_fn(worker_id, num_workers, rank, seed): + # Set the worker seed to num_workers * rank + worker_id + seed + worker_seed = num_workers * rank + worker_id + seed + np.random.seed(worker_seed) + random.seed(worker_seed) diff --git a/basicsr/data/__pycache__/__init__.cpython-39.pyc b/basicsr/data/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..316015ce15b1ae70517264899c664a2b6bc44699 Binary files /dev/null and b/basicsr/data/__pycache__/__init__.cpython-39.pyc differ diff --git a/basicsr/data/__pycache__/data_sampler.cpython-39.pyc b/basicsr/data/__pycache__/data_sampler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dad753ae12aac60ed4ae13793c9acf41490dd165 Binary files /dev/null and b/basicsr/data/__pycache__/data_sampler.cpython-39.pyc differ diff --git a/basicsr/data/__pycache__/fmix.cpython-39.pyc b/basicsr/data/__pycache__/fmix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52c7f685d41a7a90368e4c9fd045b09bc297696a Binary files /dev/null and b/basicsr/data/__pycache__/fmix.cpython-39.pyc differ diff --git a/basicsr/data/__pycache__/lab_dataset.cpython-39.pyc b/basicsr/data/__pycache__/lab_dataset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a497a79dd21f3a48d471f9159e5d82252395cfe2 Binary files /dev/null and b/basicsr/data/__pycache__/lab_dataset.cpython-39.pyc differ diff --git a/basicsr/data/__pycache__/prefetch_dataloader.cpython-39.pyc b/basicsr/data/__pycache__/prefetch_dataloader.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5614e78732a2a58ab7d985fa864b4e80b21a5739 Binary files /dev/null and b/basicsr/data/__pycache__/prefetch_dataloader.cpython-39.pyc differ diff --git a/basicsr/data/__pycache__/transforms.cpython-39.pyc b/basicsr/data/__pycache__/transforms.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a66a296a6f7e0c6856d36823c73de6f02c8cdfde Binary files /dev/null and b/basicsr/data/__pycache__/transforms.cpython-39.pyc differ diff --git a/basicsr/data/data_sampler.py b/basicsr/data/data_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..575452d9f844a928f7f42296c81635cfbadec7c2 --- /dev/null +++ b/basicsr/data/data_sampler.py @@ -0,0 +1,48 @@ +import math +import torch +from torch.utils.data.sampler import Sampler + + +class EnlargedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + + Modified from torch.utils.data.distributed.DistributedSampler + Support enlarging the dataset for iteration-based training, for saving + time when restart the dataloader after each epoch + + Args: + dataset (torch.utils.data.Dataset): Dataset used for sampling. + num_replicas (int | None): Number of processes participating in + the training. It is usually the world_size. + rank (int | None): Rank of the current process within num_replicas. + ratio (int): Enlarging ratio. Default: 1. + """ + + def __init__(self, dataset, num_replicas, rank, ratio=1): + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.num_samples = math.ceil(len(self.dataset) * ratio / self.num_replicas) + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices = torch.randperm(self.total_size, generator=g).tolist() + + dataset_size = len(self.dataset) + indices = [v % dataset_size for v in indices] + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch diff --git a/basicsr/data/data_util.py b/basicsr/data/data_util.py new file mode 100644 index 0000000000000000000000000000000000000000..40db8154c3ca8f447d1eadf218c769a1eb482a04 --- /dev/null +++ b/basicsr/data/data_util.py @@ -0,0 +1,313 @@ +import cv2 +import numpy as np +import torch +from os import path as osp +from torch.nn import functional as F + +from basicsr.data.transforms import mod_crop +from basicsr.utils import img2tensor, scandir + + +def read_img_seq(path, require_mod_crop=False, scale=1, return_imgname=False): + """Read a sequence of images from a given folder path. + + Args: + path (list[str] | str): List of image paths or image folder path. + require_mod_crop (bool): Require mod crop for each image. + Default: False. + scale (int): Scale factor for mod_crop. Default: 1. + return_imgname(bool): Whether return image names. Default False. + + Returns: + Tensor: size (t, c, h, w), RGB, [0, 1]. + list[str]: Returned image name list. + """ + if isinstance(path, list): + img_paths = path + else: + img_paths = sorted(list(scandir(path, full_path=True))) + imgs = [cv2.imread(v).astype(np.float32) / 255. for v in img_paths] + + if require_mod_crop: + imgs = [mod_crop(img, scale) for img in imgs] + imgs = img2tensor(imgs, bgr2rgb=True, float32=True) + imgs = torch.stack(imgs, dim=0) + + if return_imgname: + imgnames = [osp.splitext(osp.basename(path))[0] for path in img_paths] + return imgs, imgnames + else: + return imgs + + +def generate_frame_indices(crt_idx, max_frame_num, num_frames, padding='reflection'): + """Generate an index list for reading `num_frames` frames from a sequence + of images. + + Args: + crt_idx (int): Current center index. + max_frame_num (int): Max number of the sequence of images (from 1). + num_frames (int): Reading num_frames frames. + padding (str): Padding mode, one of + 'replicate' | 'reflection' | 'reflection_circle' | 'circle' + Examples: current_idx = 0, num_frames = 5 + The generated frame indices under different padding mode: + replicate: [0, 0, 0, 1, 2] + reflection: [2, 1, 0, 1, 2] + reflection_circle: [4, 3, 0, 1, 2] + circle: [3, 4, 0, 1, 2] + + Returns: + list[int]: A list of indices. + """ + assert num_frames % 2 == 1, 'num_frames should be an odd number.' + assert padding in ('replicate', 'reflection', 'reflection_circle', 'circle'), f'Wrong padding mode: {padding}.' + + max_frame_num = max_frame_num - 1 # start from 0 + num_pad = num_frames // 2 + + indices = [] + for i in range(crt_idx - num_pad, crt_idx + num_pad + 1): + if i < 0: + if padding == 'replicate': + pad_idx = 0 + elif padding == 'reflection': + pad_idx = -i + elif padding == 'reflection_circle': + pad_idx = crt_idx + num_pad - i + else: + pad_idx = num_frames + i + elif i > max_frame_num: + if padding == 'replicate': + pad_idx = max_frame_num + elif padding == 'reflection': + pad_idx = max_frame_num * 2 - i + elif padding == 'reflection_circle': + pad_idx = (crt_idx - num_pad) - (i - max_frame_num) + else: + pad_idx = i - num_frames + else: + pad_idx = i + indices.append(pad_idx) + return indices + + +def paired_paths_from_lmdb(folders, keys): + """Generate paired paths from lmdb files. + + Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is: + + lq.lmdb + ├── data.mdb + ├── lock.mdb + ├── meta_info.txt + + The data.mdb and lock.mdb are standard lmdb files and you can refer to + https://lmdb.readthedocs.io/en/release/ for more details. + + The meta_info.txt is a specified txt file to record the meta information + of our datasets. It will be automatically created when preparing + datasets by our provided dataset tools. + Each line in the txt file records + 1)image name (with extension), + 2)image shape, + 3)compression level, separated by a white space. + Example: `baboon.png (120,125,3) 1` + + We use the image name without extension as the lmdb key. + Note that we use the same key for the corresponding lq and gt images. + + Args: + folders (list[str]): A list of folder path. The order of list should + be [input_folder, gt_folder]. + keys (list[str]): A list of keys identifying folders. The order should + be in consistent with folders, e.g., ['lq', 'gt']. + Note that this key is different from lmdb keys. + + Returns: + list[str]: Returned path list. + """ + assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' + f'But got {len(folders)}') + assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' + input_folder, gt_folder = folders + input_key, gt_key = keys + + if not (input_folder.endswith('.lmdb') and gt_folder.endswith('.lmdb')): + raise ValueError(f'{input_key} folder and {gt_key} folder should both in lmdb ' + f'formats. But received {input_key}: {input_folder}; ' + f'{gt_key}: {gt_folder}') + # ensure that the two meta_info files are the same + with open(osp.join(input_folder, 'meta_info.txt')) as fin: + input_lmdb_keys = [line.split('.')[0] for line in fin] + with open(osp.join(gt_folder, 'meta_info.txt')) as fin: + gt_lmdb_keys = [line.split('.')[0] for line in fin] + if set(input_lmdb_keys) != set(gt_lmdb_keys): + raise ValueError(f'Keys in {input_key}_folder and {gt_key}_folder are different.') + else: + paths = [] + for lmdb_key in sorted(input_lmdb_keys): + paths.append(dict([(f'{input_key}_path', lmdb_key), (f'{gt_key}_path', lmdb_key)])) + return paths + + +def paired_paths_from_meta_info_file(folders, keys, meta_info_file, filename_tmpl): + """Generate paired paths from an meta information file. + + Each line in the meta information file contains the image names and + image shape (usually for gt), separated by a white space. + + Example of an meta information file: + ``` + 0001_s001.png (480,480,3) + 0001_s002.png (480,480,3) + ``` + + Args: + folders (list[str]): A list of folder path. The order of list should + be [input_folder, gt_folder]. + keys (list[str]): A list of keys identifying folders. The order should + be in consistent with folders, e.g., ['lq', 'gt']. + meta_info_file (str): Path to the meta information file. + filename_tmpl (str): Template for each filename. Note that the + template excludes the file extension. Usually the filename_tmpl is + for files in the input folder. + + Returns: + list[str]: Returned path list. + """ + assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' + f'But got {len(folders)}') + assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' + input_folder, gt_folder = folders + input_key, gt_key = keys + + with open(meta_info_file, 'r') as fin: + gt_names = [line.split(' ')[0] for line in fin] + + paths = [] + for gt_name in gt_names: + basename, ext = osp.splitext(osp.basename(gt_name)) + input_name = f'{filename_tmpl.format(basename)}{ext}' + input_path = osp.join(input_folder, input_name) + gt_path = osp.join(gt_folder, gt_name) + paths.append(dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)])) + return paths + + +def paired_paths_from_folder(folders, keys, filename_tmpl): + """Generate paired paths from folders. + + Args: + folders (list[str]): A list of folder path. The order of list should + be [input_folder, gt_folder]. + keys (list[str]): A list of keys identifying folders. The order should + be in consistent with folders, e.g., ['lq', 'gt']. + filename_tmpl (str): Template for each filename. Note that the + template excludes the file extension. Usually the filename_tmpl is + for files in the input folder. + + Returns: + list[str]: Returned path list. + """ + assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' + f'But got {len(folders)}') + assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' + input_folder, gt_folder = folders + input_key, gt_key = keys + + input_paths = list(scandir(input_folder)) + gt_paths = list(scandir(gt_folder)) + assert len(input_paths) == len(gt_paths), (f'{input_key} and {gt_key} datasets have different number of images: ' + f'{len(input_paths)}, {len(gt_paths)}.') + paths = [] + for gt_path in gt_paths: + basename, ext = osp.splitext(osp.basename(gt_path)) + input_name = f'{filename_tmpl.format(basename)}{ext}' + input_path = osp.join(input_folder, input_name) + assert input_name in input_paths, f'{input_name} is not in {input_key}_paths.' + gt_path = osp.join(gt_folder, gt_path) + paths.append(dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)])) + return paths + + +def paths_from_folder(folder): + """Generate paths from folder. + + Args: + folder (str): Folder path. + + Returns: + list[str]: Returned path list. + """ + + paths = list(scandir(folder)) + paths = [osp.join(folder, path) for path in paths] + return paths + + +def paths_from_lmdb(folder): + """Generate paths from lmdb. + + Args: + folder (str): Folder path. + + Returns: + list[str]: Returned path list. + """ + if not folder.endswith('.lmdb'): + raise ValueError(f'Folder {folder}folder should in lmdb format.') + with open(osp.join(folder, 'meta_info.txt')) as fin: + paths = [line.split('.')[0] for line in fin] + return paths + + +def generate_gaussian_kernel(kernel_size=13, sigma=1.6): + """Generate Gaussian kernel used in `duf_downsample`. + + Args: + kernel_size (int): Kernel size. Default: 13. + sigma (float): Sigma of the Gaussian kernel. Default: 1.6. + + Returns: + np.array: The Gaussian kernel. + """ + from scipy.ndimage import filters as filters + kernel = np.zeros((kernel_size, kernel_size)) + # set element at the middle to one, a dirac delta + kernel[kernel_size // 2, kernel_size // 2] = 1 + # gaussian-smooth the dirac, resulting in a gaussian filter + return filters.gaussian_filter(kernel, sigma) + + +def duf_downsample(x, kernel_size=13, scale=4): + """Downsamping with Gaussian kernel used in the DUF official code. + + Args: + x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w). + kernel_size (int): Kernel size. Default: 13. + scale (int): Downsampling factor. Supported scale: (2, 3, 4). + Default: 4. + + Returns: + Tensor: DUF downsampled frames. + """ + assert scale in (2, 3, 4), f'Only support scale (2, 3, 4), but got {scale}.' + + squeeze_flag = False + if x.ndim == 4: + squeeze_flag = True + x = x.unsqueeze(0) + b, t, c, h, w = x.size() + x = x.view(-1, 1, h, w) + pad_w, pad_h = kernel_size // 2 + scale * 2, kernel_size // 2 + scale * 2 + x = F.pad(x, (pad_w, pad_w, pad_h, pad_h), 'reflect') + + gaussian_filter = generate_gaussian_kernel(kernel_size, 0.4 * scale) + gaussian_filter = torch.from_numpy(gaussian_filter).type_as(x).unsqueeze(0).unsqueeze(0) + x = F.conv2d(x, gaussian_filter, stride=scale) + x = x[:, :, 2:-2, 2:-2] + x = x.view(b, t, c, x.size(2), x.size(3)) + if squeeze_flag: + x = x.squeeze(0) + return x diff --git a/basicsr/data/fmix.py b/basicsr/data/fmix.py new file mode 100644 index 0000000000000000000000000000000000000000..31c2073ba75e1c0180a63c73e4fd852af0d70aa1 --- /dev/null +++ b/basicsr/data/fmix.py @@ -0,0 +1,206 @@ +''' +Fmix paper from arxiv: https://arxiv.org/abs/2002.12047 +Fmix code from github : https://github.com/ecs-vlc/FMix +''' +import math +import random +import numpy as np +from scipy.stats import beta + + +def fftfreqnd(h, w=None, z=None): + """ Get bin values for discrete fourier transform of size (h, w, z) + :param h: Required, first dimension size + :param w: Optional, second dimension size + :param z: Optional, third dimension size + """ + fz = fx = 0 + fy = np.fft.fftfreq(h) + + if w is not None: + fy = np.expand_dims(fy, -1) + + if w % 2 == 1: + fx = np.fft.fftfreq(w)[: w // 2 + 2] + else: + fx = np.fft.fftfreq(w)[: w // 2 + 1] + + if z is not None: + fy = np.expand_dims(fy, -1) + if z % 2 == 1: + fz = np.fft.fftfreq(z)[:, None] + else: + fz = np.fft.fftfreq(z)[:, None] + + return np.sqrt(fx * fx + fy * fy + fz * fz) + + +def get_spectrum(freqs, decay_power, ch, h, w=0, z=0): + """ Samples a fourier image with given size and frequencies decayed by decay power + :param freqs: Bin values for the discrete fourier transform + :param decay_power: Decay power for frequency decay prop 1/f**d + :param ch: Number of channels for the resulting mask + :param h: Required, first dimension size + :param w: Optional, second dimension size + :param z: Optional, third dimension size + """ + scale = np.ones(1) / (np.maximum(freqs, np.array([1. / max(w, h, z)])) ** decay_power) + + param_size = [ch] + list(freqs.shape) + [2] + param = np.random.randn(*param_size) + + scale = np.expand_dims(scale, -1)[None, :] + + return scale * param + + +def make_low_freq_image(decay, shape, ch=1): + """ Sample a low frequency image from fourier space + :param decay_power: Decay power for frequency decay prop 1/f**d + :param shape: Shape of desired mask, list up to 3 dims + :param ch: Number of channels for desired mask + """ + freqs = fftfreqnd(*shape) + spectrum = get_spectrum(freqs, decay, ch, *shape)#.reshape((1, *shape[:-1], -1)) + spectrum = spectrum[:, 0] + 1j * spectrum[:, 1] + mask = np.real(np.fft.irfftn(spectrum, shape)) + + if len(shape) == 1: + mask = mask[:1, :shape[0]] + if len(shape) == 2: + mask = mask[:1, :shape[0], :shape[1]] + if len(shape) == 3: + mask = mask[:1, :shape[0], :shape[1], :shape[2]] + + mask = mask + mask = (mask - mask.min()) + mask = mask / mask.max() + return mask + + +def sample_lam(alpha, reformulate=False): + """ Sample a lambda from symmetric beta distribution with given alpha + :param alpha: Alpha value for beta distribution + :param reformulate: If True, uses the reformulation of [1]. + """ + if reformulate: + lam = beta.rvs(alpha+1, alpha) # rvs(arg1,arg2,loc=期望, scale=标准差, size=生成随机数的个数) 从分布中生成指定个数的随机数 + else: + lam = beta.rvs(alpha, alpha) # rvs(arg1,arg2,loc=期望, scale=标准差, size=生成随机数的个数) 从分布中生成指定个数的随机数 + + return lam + + +def binarise_mask(mask, lam, in_shape, max_soft=0.0): + """ Binarises a given low frequency image such that it has mean lambda. + :param mask: Low frequency image, usually the result of `make_low_freq_image` + :param lam: Mean value of final mask + :param in_shape: Shape of inputs + :param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask. + :return: + """ + idx = mask.reshape(-1).argsort()[::-1] + mask = mask.reshape(-1) + num = math.ceil(lam * mask.size) if random.random() > 0.5 else math.floor(lam * mask.size) + + eff_soft = max_soft + if max_soft > lam or max_soft > (1-lam): + eff_soft = min(lam, 1-lam) + + soft = int(mask.size * eff_soft) + num_low = num - soft + num_high = num + soft + + mask[idx[:num_high]] = 1 + mask[idx[num_low:]] = 0 + mask[idx[num_low:num_high]] = np.linspace(1, 0, (num_high - num_low)) + + mask = mask.reshape((1, *in_shape)) + return mask + + +def sample_mask(alpha, decay_power, shape, max_soft=0.0, reformulate=False): + """ Samples a mean lambda from beta distribution parametrised by alpha, creates a low frequency image and binarises + it based on this lambda + :param alpha: Alpha value for beta distribution from which to sample mean of mask + :param decay_power: Decay power for frequency decay prop 1/f**d + :param shape: Shape of desired mask, list up to 3 dims + :param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask. + :param reformulate: If True, uses the reformulation of [1]. + """ + if isinstance(shape, int): + shape = (shape,) + + # Choose lambda + lam = sample_lam(alpha, reformulate) + + # Make mask, get mean / std + mask = make_low_freq_image(decay_power, shape) + mask = binarise_mask(mask, lam, shape, max_soft) + + return lam, mask + + +def sample_and_apply(x, alpha, decay_power, shape, max_soft=0.0, reformulate=False): + """ + :param x: Image batch on which to apply fmix of shape [b, c, shape*] + :param alpha: Alpha value for beta distribution from which to sample mean of mask + :param decay_power: Decay power for frequency decay prop 1/f**d + :param shape: Shape of desired mask, list up to 3 dims + :param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask. + :param reformulate: If True, uses the reformulation of [1]. + :return: mixed input, permutation indices, lambda value of mix, + """ + lam, mask = sample_mask(alpha, decay_power, shape, max_soft, reformulate) + index = np.random.permutation(x.shape[0]) + + x1, x2 = x * mask, x[index] * (1-mask) + return x1+x2, index, lam + + +class FMixBase: + """ FMix augmentation + Args: + decay_power (float): Decay power for frequency decay prop 1/f**d + alpha (float): Alpha value for beta distribution from which to sample mean of mask + size ([int] | [int, int] | [int, int, int]): Shape of desired mask, list up to 3 dims + max_soft (float): Softening value between 0 and 0.5 which smooths hard edges in the mask. + reformulate (bool): If True, uses the reformulation of [1]. + """ + + def __init__(self, decay_power=3, alpha=1, size=(32, 32), max_soft=0.0, reformulate=False): + super().__init__() + self.decay_power = decay_power + self.reformulate = reformulate + self.size = size + self.alpha = alpha + self.max_soft = max_soft + self.index = None + self.lam = None + + def __call__(self, x): + raise NotImplementedError + + def loss(self, *args, **kwargs): + raise NotImplementedError + + +if __name__ == '__main__': + # para = {'alpha':1.,'decay_power':3.,'shape':(10,10),'max_soft':0.0,'reformulate':False} + # lam, mask = sample_mask(**para) + # mask = mask.transpose(1, 2, 0) + # img1 = np.zeros((10, 10, 3)) + # img2 = np.ones((10, 10, 3)) + # img_gt = mask * img1 + (1. - mask) * img2 + # import ipdb; ipdb.set_trace() + + # test + import cv2 + i1 = cv2.imread('output/ILSVRC2012_val_00000001.JPEG') + i2 = cv2.imread('output/ILSVRC2012_val_00000002.JPEG') + para = {'alpha':1.,'decay_power':3.,'shape':(256, 256),'max_soft':0.0,'reformulate':False} + lam, mask = sample_mask(**para) + mask = mask.transpose(1, 2, 0) + i = mask * i1 + (1. - mask) * i2 + #i = i.astype(np.uint8) + cv2.imwrite('fmix.jpg', i) \ No newline at end of file diff --git a/basicsr/data/lab_dataset.py b/basicsr/data/lab_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..000920f6b944cbbfee9f2592b7882e1a9c2c86ba --- /dev/null +++ b/basicsr/data/lab_dataset.py @@ -0,0 +1,159 @@ +import cv2 +import random +import time +import numpy as np +import torch +from torch.utils import data as data + +from basicsr.data.transforms import rgb2lab +from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor +from basicsr.utils.registry import DATASET_REGISTRY +from basicsr.data.fmix import sample_mask + + +@DATASET_REGISTRY.register() +class LabDataset(data.Dataset): + """ + Dataset used for Lab colorizaion + """ + + def __init__(self, opt): + super(LabDataset, self).__init__() + self.opt = opt + # file client (io backend) + self.file_client = None + self.io_backend_opt = opt['io_backend'] + self.gt_folder = opt['dataroot_gt'] + + meta_info_file = self.opt['meta_info_file'] + assert meta_info_file is not None + if not isinstance(meta_info_file, list): + meta_info_file = [meta_info_file] + self.paths = [] + for meta_info in meta_info_file: + with open(meta_info, 'r') as fin: + self.paths.extend([line.strip() for line in fin]) + + self.min_ab, self.max_ab = -128, 128 + self.interval_ab = 4 + self.ab_palette = [i for i in range(self.min_ab, self.max_ab + self.interval_ab, self.interval_ab)] + # print(self.ab_palette) + + self.do_fmix = opt['do_fmix'] + self.fmix_params = {'alpha':1.,'decay_power':3.,'shape':(256,256),'max_soft':0.0,'reformulate':False} + self.fmix_p = opt['fmix_p'] + self.do_cutmix = opt['do_cutmix'] + self.cutmix_params = {'alpha':1.} + self.cutmix_p = opt['cutmix_p'] + + + def __getitem__(self, index): + if self.file_client is None: + self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) + + # -------------------------------- Load gt images -------------------------------- # + # Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32. + gt_path = self.paths[index] + gt_size = self.opt['gt_size'] + # avoid errors caused by high latency in reading files + retry = 3 + while retry > 0: + try: + img_bytes = self.file_client.get(gt_path, 'gt') + except Exception as e: + logger = get_root_logger() + logger.warn(f'File client error: {e}, remaining retry times: {retry - 1}') + # change another file to read + index = random.randint(0, self.__len__()) + gt_path = self.paths[index] + time.sleep(1) # sleep 1s for occasional server congestion + else: + break + finally: + retry -= 1 + img_gt = imfrombytes(img_bytes, float32=True) + img_gt = cv2.resize(img_gt, (gt_size, gt_size)) # TODO: 直接resize是否是最佳方案? + + # -------------------------------- (Optional) CutMix & FMix -------------------------------- # + if self.do_fmix and np.random.uniform(0., 1., size=1)[0] > self.fmix_p: + with torch.no_grad(): + lam, mask = sample_mask(**self.fmix_params) + + fmix_index = random.randint(0, self.__len__()) + fmix_img_path = self.paths[fmix_index] + fmix_img_bytes = self.file_client.get(fmix_img_path, 'gt') + fmix_img = imfrombytes(fmix_img_bytes, float32=True) + fmix_img = cv2.resize(fmix_img, (gt_size, gt_size)) + + mask = mask.transpose(1, 2, 0) # (1, 256, 256) -> # (256, 256, 1) + img_gt = mask * img_gt + (1. - mask) * fmix_img + img_gt = img_gt.astype(np.float32) + + if self.do_cutmix and np.random.uniform(0., 1., size=1)[0] > self.cutmix_p: + with torch.no_grad(): + cmix_index = random.randint(0, self.__len__()) + cmix_img_path = self.paths[cmix_index] + cmix_img_bytes = self.file_client.get(cmix_img_path, 'gt') + cmix_img = imfrombytes(cmix_img_bytes, float32=True) + cmix_img = cv2.resize(cmix_img, (gt_size, gt_size)) + + lam = np.clip(np.random.beta(self.cutmix_params['alpha'], self.cutmix_params['alpha']), 0.3, 0.4) + bbx1, bby1, bbx2, bby2 = rand_bbox(cmix_img.shape[:2], lam) + + img_gt[:, bbx1:bbx2, bby1:bby2] = cmix_img[:, bbx1:bbx2, bby1:bby2] + + + # ----------------------------- Get gray lq, to tentor ----------------------------- # + # convert to gray + img_gt = cv2.cvtColor(img_gt, cv2.COLOR_BGR2RGB) + img_l, img_ab = rgb2lab(img_gt) + + target_a, target_b = self.ab2int(img_ab) + + # numpy to tensor + img_l, img_ab = img2tensor([img_l, img_ab], bgr2rgb=False, float32=True) + target_a, target_b = torch.LongTensor(target_a), torch.LongTensor(target_b) + return_d = { + 'lq': img_l, + 'gt': img_ab, + 'target_a': target_a, + 'target_b': target_b, + 'lq_path': gt_path, + 'gt_path': gt_path + } + return return_d + + def ab2int(self, img_ab): + img_a, img_b = img_ab[:, :, 0], img_ab[:, :, 1] + int_a = (img_a - self.min_ab) / self.interval_ab + int_b = (img_b - self.min_ab) / self.interval_ab + + return np.round(int_a), np.round(int_b) + + def __len__(self): + return len(self.paths) + + +def rand_bbox(size, lam): + '''cutmix 的 bbox 截取函数 + Args: + size : tuple 图片尺寸 e.g (256,256) + lam : float 截取比例 + Returns: + bbox 的左上角和右下角坐标 + int,int,int,int + ''' + W = size[0] # 截取图片的宽度 + H = size[1] # 截取图片的高度 + cut_rat = np.sqrt(1. - lam) # 需要截取的 bbox 比例 + cut_w = np.int(W * cut_rat) # 需要截取的 bbox 宽度 + cut_h = np.int(H * cut_rat) # 需要截取的 bbox 高度 + + cx = np.random.randint(W) # 均匀分布采样,随机选择截取的 bbox 的中心点 x 坐标 + cy = np.random.randint(H) # 均匀分布采样,随机选择截取的 bbox 的中心点 y 坐标 + + bbx1 = np.clip(cx - cut_w // 2, 0, W) # 左上角 x 坐标 + bby1 = np.clip(cy - cut_h // 2, 0, H) # 左上角 y 坐标 + bbx2 = np.clip(cx + cut_w // 2, 0, W) # 右下角 x 坐标 + bby2 = np.clip(cy + cut_h // 2, 0, H) # 右下角 y 坐标 + return bbx1, bby1, bbx2, bby2 \ No newline at end of file diff --git a/basicsr/data/prefetch_dataloader.py b/basicsr/data/prefetch_dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..5088425050d4cc98114a9b93eb50ea60273f35a0 --- /dev/null +++ b/basicsr/data/prefetch_dataloader.py @@ -0,0 +1,125 @@ +import queue as Queue +import threading +import torch +from torch.utils.data import DataLoader + + +class PrefetchGenerator(threading.Thread): + """A general prefetch generator. + + Ref: + https://stackoverflow.com/questions/7323664/python-generator-pre-fetch + + Args: + generator: Python generator. + num_prefetch_queue (int): Number of prefetch queue. + """ + + def __init__(self, generator, num_prefetch_queue): + threading.Thread.__init__(self) + self.queue = Queue.Queue(num_prefetch_queue) + self.generator = generator + self.daemon = True + self.start() + + def run(self): + for item in self.generator: + self.queue.put(item) + self.queue.put(None) + + def __next__(self): + next_item = self.queue.get() + if next_item is None: + raise StopIteration + return next_item + + def __iter__(self): + return self + + +class PrefetchDataLoader(DataLoader): + """Prefetch version of dataloader. + + Ref: + https://github.com/IgorSusmelj/pytorch-styleguide/issues/5# + + TODO: + Need to test on single gpu and ddp (multi-gpu). There is a known issue in + ddp. + + Args: + num_prefetch_queue (int): Number of prefetch queue. + kwargs (dict): Other arguments for dataloader. + """ + + def __init__(self, num_prefetch_queue, **kwargs): + self.num_prefetch_queue = num_prefetch_queue + super(PrefetchDataLoader, self).__init__(**kwargs) + + def __iter__(self): + return PrefetchGenerator(super().__iter__(), self.num_prefetch_queue) + + +class CPUPrefetcher(): + """CPU prefetcher. + + Args: + loader: Dataloader. + """ + + def __init__(self, loader): + self.ori_loader = loader + self.loader = iter(loader) + + def next(self): + try: + return next(self.loader) + except StopIteration: + return None + + def reset(self): + self.loader = iter(self.ori_loader) + + +class CUDAPrefetcher(): + """CUDA prefetcher. + + Ref: + https://github.com/NVIDIA/apex/issues/304# + + It may consums more GPU memory. + + Args: + loader: Dataloader. + opt (dict): Options. + """ + + def __init__(self, loader, opt): + self.ori_loader = loader + self.loader = iter(loader) + self.opt = opt + self.stream = torch.cuda.Stream() + self.device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu') + self.preload() + + def preload(self): + try: + self.batch = next(self.loader) # self.batch is a dict + except StopIteration: + self.batch = None + return None + # put tensors to gpu + with torch.cuda.stream(self.stream): + for k, v in self.batch.items(): + if torch.is_tensor(v): + self.batch[k] = self.batch[k].to(device=self.device, non_blocking=True) + + def next(self): + torch.cuda.current_stream().wait_stream(self.stream) + batch = self.batch + self.preload() + return batch + + def reset(self): + self.loader = iter(self.ori_loader) + self.preload() diff --git a/basicsr/data/transforms.py b/basicsr/data/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..0f623c4eb42df8e694ccec4f41029a16ac0c9faf --- /dev/null +++ b/basicsr/data/transforms.py @@ -0,0 +1,192 @@ +import cv2 +import numpy as np +import torch +import random +from scipy import special +from skimage import color + + +def mod_crop(img, scale): + """Mod crop images, used during testing. + + Args: + img (ndarray): Input image. + scale (int): Scale factor. + + Returns: + ndarray: Result image. + """ + img = img.copy() + if img.ndim in (2, 3): + h, w = img.shape[0], img.shape[1] + h_remainder, w_remainder = h % scale, w % scale + img = img[:h - h_remainder, :w - w_remainder, ...] + else: + raise ValueError(f'Wrong img ndim: {img.ndim}.') + return img + + +def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path=None): + """Paired random crop. Support Numpy array and Tensor inputs. + + It crops lists of lq and gt images with corresponding locations. + + Args: + img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images + should have the same shape. If the input is an ndarray, it will + be transformed to a list containing itself. + img_lqs (list[ndarray] | ndarray): LQ images. Note that all images + should have the same shape. If the input is an ndarray, it will + be transformed to a list containing itself. + gt_patch_size (int): GT patch size. + scale (int): Scale factor. + gt_path (str): Path to ground-truth. Default: None. + + Returns: + list[ndarray] | ndarray: GT images and LQ images. If returned results + only have one element, just return ndarray. + """ + + if not isinstance(img_gts, list): + img_gts = [img_gts] + if not isinstance(img_lqs, list): + img_lqs = [img_lqs] + + # determine input type: Numpy array or Tensor + input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy' + + if input_type == 'Tensor': + h_lq, w_lq = img_lqs[0].size()[-2:] + h_gt, w_gt = img_gts[0].size()[-2:] + else: + h_lq, w_lq = img_lqs[0].shape[0:2] + h_gt, w_gt = img_gts[0].shape[0:2] + lq_patch_size = gt_patch_size // scale + + if h_gt != h_lq * scale or w_gt != w_lq * scale: + raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ', + f'multiplication of LQ ({h_lq}, {w_lq}).') + if h_lq < lq_patch_size or w_lq < lq_patch_size: + raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size ' + f'({lq_patch_size}, {lq_patch_size}). ' + f'Please remove {gt_path}.') + + # randomly choose top and left coordinates for lq patch + top = random.randint(0, h_lq - lq_patch_size) + left = random.randint(0, w_lq - lq_patch_size) + + # crop lq patch + if input_type == 'Tensor': + img_lqs = [v[:, :, top:top + lq_patch_size, left:left + lq_patch_size] for v in img_lqs] + else: + img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs] + + # crop corresponding gt patch + top_gt, left_gt = int(top * scale), int(left * scale) + if input_type == 'Tensor': + img_gts = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_gts] + else: + img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts] + if len(img_gts) == 1: + img_gts = img_gts[0] + if len(img_lqs) == 1: + img_lqs = img_lqs[0] + return img_gts, img_lqs + + +def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False): + """Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees). + + We use vertical flip and transpose for rotation implementation. + All the images in the list use the same augmentation. + + Args: + imgs (list[ndarray] | ndarray): Images to be augmented. If the input + is an ndarray, it will be transformed to a list. + hflip (bool): Horizontal flip. Default: True. + rotation (bool): Ratotation. Default: True. + flows (list[ndarray]: Flows to be augmented. If the input is an + ndarray, it will be transformed to a list. + Dimension is (h, w, 2). Default: None. + return_status (bool): Return the status of flip and rotation. + Default: False. + + Returns: + list[ndarray] | ndarray: Augmented images and flows. If returned + results only have one element, just return ndarray. + + """ + hflip = hflip and random.random() < 0.5 + vflip = rotation and random.random() < 0.5 + rot90 = rotation and random.random() < 0.5 + + def _augment(img): + if hflip: # horizontal + cv2.flip(img, 1, img) + if vflip: # vertical + cv2.flip(img, 0, img) + if rot90: + img = img.transpose(1, 0, 2) + return img + + def _augment_flow(flow): + if hflip: # horizontal + cv2.flip(flow, 1, flow) + flow[:, :, 0] *= -1 + if vflip: # vertical + cv2.flip(flow, 0, flow) + flow[:, :, 1] *= -1 + if rot90: + flow = flow.transpose(1, 0, 2) + flow = flow[:, :, [1, 0]] + return flow + + if not isinstance(imgs, list): + imgs = [imgs] + imgs = [_augment(img) for img in imgs] + if len(imgs) == 1: + imgs = imgs[0] + + if flows is not None: + if not isinstance(flows, list): + flows = [flows] + flows = [_augment_flow(flow) for flow in flows] + if len(flows) == 1: + flows = flows[0] + return imgs, flows + else: + if return_status: + return imgs, (hflip, vflip, rot90) + else: + return imgs + + +def img_rotate(img, angle, center=None, scale=1.0, borderMode=cv2.BORDER_CONSTANT, borderValue=0.): + """Rotate image. + + Args: + img (ndarray): Image to be rotated. + angle (float): Rotation angle in degrees. Positive values mean + counter-clockwise rotation. + center (tuple[int]): Rotation center. If the center is None, + initialize it as the center of the image. Default: None. + scale (float): Isotropic scale factor. Default: 1.0. + """ + (h, w) = img.shape[:2] + + if center is None: + center = (w // 2, h // 2) + + matrix = cv2.getRotationMatrix2D(center, angle, scale) + rotated_img = cv2.warpAffine(img, matrix, (w, h), borderMode=borderMode, borderValue=borderValue) + return rotated_img + + +def rgb2lab(img_rgb): + img_lab = color.rgb2lab(img_rgb) + img_l = img_lab[:, :, :1] + img_ab = img_lab[:, :, 1:] + return img_l, img_ab + + + diff --git a/basicsr/losses/__init__.py b/basicsr/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b1570dd2d683ba5983bfc715d37fc611af7b6ba5 --- /dev/null +++ b/basicsr/losses/__init__.py @@ -0,0 +1,26 @@ +from copy import deepcopy + +from basicsr.utils import get_root_logger +from basicsr.utils.registry import LOSS_REGISTRY +from .losses import (CharbonnierLoss, GANLoss, L1Loss, MSELoss, PerceptualLoss, WeightedTVLoss, g_path_regularize, + gradient_penalty_loss, r1_penalty) + +__all__ = [ + 'L1Loss', 'MSELoss', 'CharbonnierLoss', 'WeightedTVLoss', 'PerceptualLoss', 'GANLoss', 'gradient_penalty_loss', + 'r1_penalty', 'g_path_regularize' +] + + +def build_loss(opt): + """Build loss from options. + + Args: + opt (dict): Configuration. It must contain: + type (str): Model type. + """ + opt = deepcopy(opt) + loss_type = opt.pop('type') + loss = LOSS_REGISTRY.get(loss_type)(**opt) + logger = get_root_logger() + logger.info(f'Loss [{loss.__class__.__name__}] is created.') + return loss diff --git a/basicsr/losses/__pycache__/__init__.cpython-39.pyc b/basicsr/losses/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca752baa02438db2b8bfd33f0840d1c212bc191a Binary files /dev/null and b/basicsr/losses/__pycache__/__init__.cpython-39.pyc differ diff --git a/basicsr/losses/__pycache__/loss_util.cpython-39.pyc b/basicsr/losses/__pycache__/loss_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..207105c4669ecc2b679d536416f430ccb20a9e0a Binary files /dev/null and b/basicsr/losses/__pycache__/loss_util.cpython-39.pyc differ diff --git a/basicsr/losses/__pycache__/losses.cpython-39.pyc b/basicsr/losses/__pycache__/losses.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..005275ee0061bcf56279719f7b34e922ebe87250 Binary files /dev/null and b/basicsr/losses/__pycache__/losses.cpython-39.pyc differ diff --git a/basicsr/losses/loss_util.py b/basicsr/losses/loss_util.py new file mode 100644 index 0000000000000000000000000000000000000000..744eeb46d1f3b5a7b4553ca23237ddd9c899a698 --- /dev/null +++ b/basicsr/losses/loss_util.py @@ -0,0 +1,95 @@ +import functools +from torch.nn import functional as F + + +def reduce_loss(loss, reduction): + """Reduce loss as specified. + + Args: + loss (Tensor): Elementwise loss tensor. + reduction (str): Options are 'none', 'mean' and 'sum'. + + Returns: + Tensor: Reduced loss tensor. + """ + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + else: + return loss.sum() + + +def weight_reduce_loss(loss, weight=None, reduction='mean'): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): Element-wise loss. + weight (Tensor): Element-wise weights. Default: None. + reduction (str): Same as built-in losses of PyTorch. Options are + 'none', 'mean' and 'sum'. Default: 'mean'. + + Returns: + Tensor: Loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + assert weight.dim() == loss.dim() + assert weight.size(1) == 1 or weight.size(1) == loss.size(1) + loss = loss * weight + + # if weight is not specified or reduction is sum, just reduce the loss + if weight is None or reduction == 'sum': + loss = reduce_loss(loss, reduction) + # if reduction is mean, then compute mean over weight region + elif reduction == 'mean': + if weight.size(1) > 1: + weight = weight.sum() + else: + weight = weight.sum() * loss.size(1) + loss = loss.sum() / weight + + return loss + + +def weighted_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + `loss_func(pred, target, **kwargs)`. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like `loss_func(pred, target, weight=None, reduction='mean', + **kwargs)`. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.5000) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, reduction='sum') + tensor(3.) + """ + + @functools.wraps(loss_func) + def wrapper(pred, target, weight=None, reduction='mean', **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = weight_reduce_loss(loss, weight, reduction) + return loss + + return wrapper diff --git a/basicsr/losses/losses.py b/basicsr/losses/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..46b0a415560befbe0a98b9687ab99161c624e042 --- /dev/null +++ b/basicsr/losses/losses.py @@ -0,0 +1,551 @@ +import math +import torch +from torch import autograd as autograd +from torch import nn as nn +from torch.nn import functional as F + +from basicsr.archs.vgg_arch import VGGFeatureExtractor +from basicsr.utils.registry import LOSS_REGISTRY +from .loss_util import weighted_loss + +_reduction_modes = ['none', 'mean', 'sum'] + + +@weighted_loss +def l1_loss(pred, target): + return F.l1_loss(pred, target, reduction='none') + + +@weighted_loss +def mse_loss(pred, target): + return F.mse_loss(pred, target, reduction='none') + + +@weighted_loss +def charbonnier_loss(pred, target, eps=1e-12): + return torch.sqrt((pred - target)**2 + eps) + + +@LOSS_REGISTRY.register() +class L1Loss(nn.Module): + """L1 (mean absolute error, MAE) loss. + + Args: + loss_weight (float): Loss weight for L1 loss. Default: 1.0. + reduction (str): Specifies the reduction to apply to the output. + Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. + """ + + def __init__(self, loss_weight=1.0, reduction='mean'): + super(L1Loss, self).__init__() + if reduction not in ['none', 'mean', 'sum']: + raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}') + + self.loss_weight = loss_weight + self.reduction = reduction + + def forward(self, pred, target, weight=None, **kwargs): + """ + Args: + pred (Tensor): of shape (N, C, H, W). Predicted tensor. + target (Tensor): of shape (N, C, H, W). Ground truth tensor. + weight (Tensor, optional): of shape (N, C, H, W). Element-wise + weights. Default: None. + """ + return self.loss_weight * l1_loss(pred, target, weight, reduction=self.reduction) + + +@LOSS_REGISTRY.register() +class MSELoss(nn.Module): + """MSE (L2) loss. + + Args: + loss_weight (float): Loss weight for MSE loss. Default: 1.0. + reduction (str): Specifies the reduction to apply to the output. + Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. + """ + + def __init__(self, loss_weight=1.0, reduction='mean'): + super(MSELoss, self).__init__() + if reduction not in ['none', 'mean', 'sum']: + raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}') + + self.loss_weight = loss_weight + self.reduction = reduction + + def forward(self, pred, target, weight=None, **kwargs): + """ + Args: + pred (Tensor): of shape (N, C, H, W). Predicted tensor. + target (Tensor): of shape (N, C, H, W). Ground truth tensor. + weight (Tensor, optional): of shape (N, C, H, W). Element-wise + weights. Default: None. + """ + return self.loss_weight * mse_loss(pred, target, weight, reduction=self.reduction) + + +@LOSS_REGISTRY.register() +class CharbonnierLoss(nn.Module): + """Charbonnier loss (one variant of Robust L1Loss, a differentiable + variant of L1Loss). + + Described in "Deep Laplacian Pyramid Networks for Fast and Accurate + Super-Resolution". + + Args: + loss_weight (float): Loss weight for L1 loss. Default: 1.0. + reduction (str): Specifies the reduction to apply to the output. + Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. + eps (float): A value used to control the curvature near zero. + Default: 1e-12. + """ + + def __init__(self, loss_weight=1.0, reduction='mean', eps=1e-12): + super(CharbonnierLoss, self).__init__() + if reduction not in ['none', 'mean', 'sum']: + raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}') + + self.loss_weight = loss_weight + self.reduction = reduction + self.eps = eps + + def forward(self, pred, target, weight=None, **kwargs): + """ + Args: + pred (Tensor): of shape (N, C, H, W). Predicted tensor. + target (Tensor): of shape (N, C, H, W). Ground truth tensor. + weight (Tensor, optional): of shape (N, C, H, W). Element-wise + weights. Default: None. + """ + return self.loss_weight * charbonnier_loss(pred, target, weight, eps=self.eps, reduction=self.reduction) + + +@LOSS_REGISTRY.register() +class WeightedTVLoss(L1Loss): + """Weighted TV loss. + + Args: + loss_weight (float): Loss weight. Default: 1.0. + """ + + def __init__(self, loss_weight=1.0): + super(WeightedTVLoss, self).__init__(loss_weight=loss_weight) + + def forward(self, pred, weight=None): + if weight is None: + y_weight = None + x_weight = None + else: + y_weight = weight[:, :, :-1, :] + x_weight = weight[:, :, :, :-1] + + y_diff = super(WeightedTVLoss, self).forward(pred[:, :, :-1, :], pred[:, :, 1:, :], weight=y_weight) + x_diff = super(WeightedTVLoss, self).forward(pred[:, :, :, :-1], pred[:, :, :, 1:], weight=x_weight) + + loss = x_diff + y_diff + + return loss + + +@LOSS_REGISTRY.register() +class PerceptualLoss(nn.Module): + """Perceptual loss with commonly used style loss. + + Args: + layer_weights (dict): The weight for each layer of vgg feature. + Here is an example: {'conv5_4': 1.}, which means the conv5_4 + feature layer (before relu5_4) will be extracted with weight + 1.0 in calculating losses. + vgg_type (str): The type of vgg network used as feature extractor. + Default: 'vgg19'. + use_input_norm (bool): If True, normalize the input image in vgg. + Default: True. + range_norm (bool): If True, norm images with range [-1, 1] to [0, 1]. + Default: False. + perceptual_weight (float): If `perceptual_weight > 0`, the perceptual + loss will be calculated and the loss will multiplied by the + weight. Default: 1.0. + style_weight (float): If `style_weight > 0`, the style loss will be + calculated and the loss will multiplied by the weight. + Default: 0. + criterion (str): Criterion used for perceptual loss. Default: 'l1'. + """ + + def __init__(self, + layer_weights, + vgg_type='vgg19', + use_input_norm=True, + range_norm=False, + perceptual_weight=1.0, + style_weight=0., + criterion='l1'): + super(PerceptualLoss, self).__init__() + self.perceptual_weight = perceptual_weight + self.style_weight = style_weight + self.layer_weights = layer_weights + self.vgg = VGGFeatureExtractor( + layer_name_list=list(layer_weights.keys()), + vgg_type=vgg_type, + use_input_norm=use_input_norm, + range_norm=range_norm) + + self.criterion_type = criterion + if self.criterion_type == 'l1': + self.criterion = torch.nn.L1Loss() + elif self.criterion_type == 'l2': + self.criterion = torch.nn.L2loss() + elif self.criterion_type == 'fro': + self.criterion = None + else: + raise NotImplementedError(f'{criterion} criterion has not been supported.') + + def forward(self, x, gt): + """Forward function. + + Args: + x (Tensor): Input tensor with shape (n, c, h, w). + gt (Tensor): Ground-truth tensor with shape (n, c, h, w). + + Returns: + Tensor: Forward results. + """ + # extract vgg features + x_features = self.vgg(x) + gt_features = self.vgg(gt.detach()) + + # calculate perceptual loss + if self.perceptual_weight > 0: + percep_loss = 0 + for k in x_features.keys(): + if self.criterion_type == 'fro': + percep_loss += torch.norm(x_features[k] - gt_features[k], p='fro') * self.layer_weights[k] + else: + percep_loss += self.criterion(x_features[k], gt_features[k]) * self.layer_weights[k] + percep_loss *= self.perceptual_weight + else: + percep_loss = None + + # calculate style loss + if self.style_weight > 0: + style_loss = 0 + for k in x_features.keys(): + if self.criterion_type == 'fro': + style_loss += torch.norm( + self._gram_mat(x_features[k]) - self._gram_mat(gt_features[k]), p='fro') * self.layer_weights[k] + else: + style_loss += self.criterion(self._gram_mat(x_features[k]), self._gram_mat( + gt_features[k])) * self.layer_weights[k] + style_loss *= self.style_weight + else: + style_loss = None + + return percep_loss, style_loss + + def _gram_mat(self, x): + """Calculate Gram matrix. + + Args: + x (torch.Tensor): Tensor with shape of (n, c, h, w). + + Returns: + torch.Tensor: Gram matrix. + """ + n, c, h, w = x.size() + features = x.view(n, c, w * h) + features_t = features.transpose(1, 2) + gram = features.bmm(features_t) / (c * h * w) + return gram + + +@LOSS_REGISTRY.register() +class GANLoss(nn.Module): + """Define GAN loss. + + Args: + gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'. + real_label_val (float): The value for real label. Default: 1.0. + fake_label_val (float): The value for fake label. Default: 0.0. + loss_weight (float): Loss weight. Default: 1.0. + Note that loss_weight is only for generators; and it is always 1.0 + for discriminators. + """ + + def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0, loss_weight=1.0): + super(GANLoss, self).__init__() + self.gan_type = gan_type + self.loss_weight = loss_weight + self.real_label_val = real_label_val + self.fake_label_val = fake_label_val + + if self.gan_type == 'vanilla': + self.loss = nn.BCEWithLogitsLoss() + elif self.gan_type == 'lsgan': + self.loss = nn.MSELoss() + elif self.gan_type == 'wgan': + self.loss = self._wgan_loss + elif self.gan_type == 'wgan_softplus': + self.loss = self._wgan_softplus_loss + elif self.gan_type == 'hinge': + self.loss = nn.ReLU() + else: + raise NotImplementedError(f'GAN type {self.gan_type} is not implemented.') + + def _wgan_loss(self, input, target): + """wgan loss. + + Args: + input (Tensor): Input tensor. + target (bool): Target label. + + Returns: + Tensor: wgan loss. + """ + return -input.mean() if target else input.mean() + + def _wgan_softplus_loss(self, input, target): + """wgan loss with soft plus. softplus is a smooth approximation to the + ReLU function. + + In StyleGAN2, it is called: + Logistic loss for discriminator; + Non-saturating loss for generator. + + Args: + input (Tensor): Input tensor. + target (bool): Target label. + + Returns: + Tensor: wgan loss. + """ + return F.softplus(-input).mean() if target else F.softplus(input).mean() + + def get_target_label(self, input, target_is_real): + """Get target label. + + Args: + input (Tensor): Input tensor. + target_is_real (bool): Whether the target is real or fake. + + Returns: + (bool | Tensor): Target tensor. Return bool for wgan, otherwise, + return Tensor. + """ + + if self.gan_type in ['wgan', 'wgan_softplus']: + return target_is_real + target_val = (self.real_label_val if target_is_real else self.fake_label_val) + return input.new_ones(input.size()) * target_val + + def forward(self, input, target_is_real, is_disc=False): + """ + Args: + input (Tensor): The input for the loss module, i.e., the network + prediction. + target_is_real (bool): Whether the targe is real or fake. + is_disc (bool): Whether the loss for discriminators or not. + Default: False. + + Returns: + Tensor: GAN loss value. + """ + target_label = self.get_target_label(input, target_is_real) + if self.gan_type == 'hinge': + if is_disc: # for discriminators in hinge-gan + input = -input if target_is_real else input + loss = self.loss(1 + input).mean() + else: # for generators in hinge-gan + loss = -input.mean() + else: # other gan types + loss = self.loss(input, target_label) + + # loss_weight is always 1.0 for discriminators + return loss if is_disc else loss * self.loss_weight + + +@LOSS_REGISTRY.register() +class MultiScaleGANLoss(GANLoss): + """ + MultiScaleGANLoss accepts a list of predictions + """ + + def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0, loss_weight=1.0): + super(MultiScaleGANLoss, self).__init__(gan_type, real_label_val, fake_label_val, loss_weight) + + def forward(self, input, target_is_real, is_disc=False): + """ + The input is a list of tensors, or a list of (a list of tensors) + """ + if isinstance(input, list): + loss = 0 + for pred_i in input: + if isinstance(pred_i, list): + # Only compute GAN loss for the last layer + # in case of multiscale feature matching + pred_i = pred_i[-1] + # Safe operation: 0-dim tensor calling self.mean() does nothing + loss_tensor = super().forward(pred_i, target_is_real, is_disc).mean() + loss += loss_tensor + return loss / len(input) + else: + return super().forward(input, target_is_real, is_disc) + + +def r1_penalty(real_pred, real_img): + """R1 regularization for discriminator. The core idea is to + penalize the gradient on real data alone: when the + generator distribution produces the true data distribution + and the discriminator is equal to 0 on the data manifold, the + gradient penalty ensures that the discriminator cannot create + a non-zero gradient orthogonal to the data manifold without + suffering a loss in the GAN game. + + Ref: + Eq. 9 in Which training methods for GANs do actually converge. + """ + grad_real = autograd.grad(outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0] + grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean() + return grad_penalty + + +def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01): + noise = torch.randn_like(fake_img) / math.sqrt(fake_img.shape[2] * fake_img.shape[3]) + grad = autograd.grad(outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)[0] + path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1)) + + path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length) + + path_penalty = (path_lengths - path_mean).pow(2).mean() + + return path_penalty, path_lengths.detach().mean(), path_mean.detach() + + +def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None): + """Calculate gradient penalty for wgan-gp. + + Args: + discriminator (nn.Module): Network for the discriminator. + real_data (Tensor): Real input data. + fake_data (Tensor): Fake input data. + weight (Tensor): Weight tensor. Default: None. + + Returns: + Tensor: A tensor for gradient penalty. + """ + + batch_size = real_data.size(0) + alpha = real_data.new_tensor(torch.rand(batch_size, 1, 1, 1)) + + # interpolate between real_data and fake_data + interpolates = alpha * real_data + (1. - alpha) * fake_data + interpolates = autograd.Variable(interpolates, requires_grad=True) + + disc_interpolates = discriminator(interpolates) + gradients = autograd.grad( + outputs=disc_interpolates, + inputs=interpolates, + grad_outputs=torch.ones_like(disc_interpolates), + create_graph=True, + retain_graph=True, + only_inputs=True)[0] + + if weight is not None: + gradients = gradients * weight + + gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean() + if weight is not None: + gradients_penalty /= torch.mean(weight) + + return gradients_penalty + + +@LOSS_REGISTRY.register() +class GANFeatLoss(nn.Module): + """Define feature matching loss for gans + + Args: + criterion (str): Support 'l1', 'l2', 'charbonnier'. + loss_weight (float): Loss weight. Default: 1.0. + reduction (str): Specifies the reduction to apply to the output. + Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. + """ + + def __init__(self, criterion='l1', loss_weight=1.0, reduction='mean'): + super(GANFeatLoss, self).__init__() + if criterion == 'l1': + self.loss_op = L1Loss(loss_weight, reduction) + elif criterion == 'l2': + self.loss_op = MSELoss(loss_weight, reduction) + elif criterion == 'charbonnier': + self.loss_op = CharbonnierLoss(loss_weight, reduction) + else: + raise ValueError(f'Unsupported loss mode: {criterion}. Supported ones are: l1|l2|charbonnier') + + self.loss_weight = loss_weight + + def forward(self, pred_fake, pred_real): + num_d = len(pred_fake) + loss = 0 + for i in range(num_d): # for each discriminator + # last output is the final prediction, exclude it + num_intermediate_outputs = len(pred_fake[i]) - 1 + for j in range(num_intermediate_outputs): # for each layer output + unweighted_loss = self.loss_op(pred_fake[i][j], pred_real[i][j].detach()) + loss += unweighted_loss / num_d + return loss * self.loss_weight + + +class sobel_loss(nn.Module): + def __init__(self, weight=1.0): + super().__init__() + kernel_x = torch.Tensor([[-1.0, 0.0, 1.0], [-2.0, 0.0, 2.0], [-1.0, 0.0, 1.0]]) + kernel_y = torch.Tensor([[-1.0, -2.0, -1.0], [0.0, 0.0, 0.0], [1.0, 2.0, 1.0]]) + kernel = torch.stack([kernel_x, kernel_y]) + kernel.requires_grad = False + kernel = kernel.unsqueeze(1) + self.register_buffer('sobel_kernel', kernel) + self.weight = weight + + def forward(self, input_tensor, target_tensor): + b, c, h, w = input_tensor.size() + input_tensor = input_tensor.view(b * c, 1, h, w) + input_edge = F.conv2d(input_tensor, self.sobel_kernel, padding=1) + input_edge = input_edge.view(b, 2*c, h, w) + + target_tensor = target_tensor.view(-1, 1, h, w) + target_edge = F.conv2d(target_tensor, self.sobel_kernel, padding=1) + target_edge = target_edge.view(b, 2*c, h, w) + + return self.weight * F.l1_loss(input_edge, target_edge) + + +@LOSS_REGISTRY.register() +class ColorfulnessLoss(nn.Module): + """Colorfulness loss. + + Args: + loss_weight (float): Loss weight for Colorfulness loss. Default: 1.0. + + """ + + def __init__(self, loss_weight=1.0): + super(ColorfulnessLoss, self).__init__() + + self.loss_weight = loss_weight + + def forward(self, pred, **kwargs): + """ + Args: + pred (Tensor): of shape (N, C, H, W). Predicted tensor. + """ + colorfulness_loss = 0 + for i in range(pred.shape[0]): + (R, G, B) = pred[i][0], pred[i][1], pred[i][2] + rg = torch.abs(R - G) + yb = torch.abs(0.5 * (R+G) - B) + (rbMean, rbStd) = (torch.mean(rg), torch.std(rg)) + (ybMean, ybStd) = (torch.mean(yb), torch.std(yb)) + stdRoot = torch.sqrt((rbStd ** 2) + (ybStd ** 2)) + meanRoot = torch.sqrt((rbMean ** 2) + (ybMean ** 2)) + colorfulness = stdRoot + (0.3 * meanRoot) + colorfulness_loss += (1 - colorfulness) + return self.loss_weight * colorfulness_loss diff --git a/basicsr/metrics/__init__.py b/basicsr/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..247dd10dea2b184275156adb8dc92a4b611da4cd --- /dev/null +++ b/basicsr/metrics/__init__.py @@ -0,0 +1,20 @@ +from copy import deepcopy + +from basicsr.utils.registry import METRIC_REGISTRY +from .psnr_ssim import calculate_psnr, calculate_ssim +from .colorfulness import calculate_cf + +__all__ = ['calculate_psnr', 'calculate_ssim', 'calculate_cf'] + + +def calculate_metric(data, opt): + """Calculate metric from data and options. + + Args: + opt (dict): Configuration. It must contain: + type (str): Model type. + """ + opt = deepcopy(opt) + metric_type = opt.pop('type') + metric = METRIC_REGISTRY.get(metric_type)(**data, **opt) + return metric diff --git a/basicsr/metrics/__pycache__/__init__.cpython-39.pyc b/basicsr/metrics/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02b1a9e046ee14376f7019664d228d849a442568 Binary files /dev/null and b/basicsr/metrics/__pycache__/__init__.cpython-39.pyc differ diff --git a/basicsr/metrics/__pycache__/colorfulness.cpython-39.pyc b/basicsr/metrics/__pycache__/colorfulness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6c55a313888de554a8c26fed4e224f190416193 Binary files /dev/null and b/basicsr/metrics/__pycache__/colorfulness.cpython-39.pyc differ diff --git a/basicsr/metrics/__pycache__/custom_fid.cpython-39.pyc b/basicsr/metrics/__pycache__/custom_fid.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef08fb81d315c322d832242930d457e3b5e804d7 Binary files /dev/null and b/basicsr/metrics/__pycache__/custom_fid.cpython-39.pyc differ diff --git a/basicsr/metrics/__pycache__/metric_util.cpython-39.pyc b/basicsr/metrics/__pycache__/metric_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ba747cd175fece1d140f0776b28db811db81eaf Binary files /dev/null and b/basicsr/metrics/__pycache__/metric_util.cpython-39.pyc differ diff --git a/basicsr/metrics/__pycache__/psnr_ssim.cpython-39.pyc b/basicsr/metrics/__pycache__/psnr_ssim.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..431a5a3594cba45b85165f213c04ca19b38074e8 Binary files /dev/null and b/basicsr/metrics/__pycache__/psnr_ssim.cpython-39.pyc differ diff --git a/basicsr/metrics/colorfulness.py b/basicsr/metrics/colorfulness.py new file mode 100644 index 0000000000000000000000000000000000000000..a2ee6537c0119ec5576f0cf4898b10351b7a2a49 --- /dev/null +++ b/basicsr/metrics/colorfulness.py @@ -0,0 +1,17 @@ +import cv2 +import numpy as np +from basicsr.utils.registry import METRIC_REGISTRY + + +@METRIC_REGISTRY.register() +def calculate_cf(img, **kwargs): + """Calculate Colorfulness. + """ + (B, G, R) = cv2.split(img.astype('float')) + rg = np.absolute(R - G) + yb = np.absolute(0.5 * (R+G) - B) + (rbMean, rbStd) = (np.mean(rg), np.std(rg)) + (ybMean, ybStd) = (np.mean(yb), np.std(yb)) + stdRoot = np.sqrt((rbStd ** 2) + (ybStd ** 2)) + meanRoot = np.sqrt((rbMean ** 2) + (ybMean ** 2)) + return stdRoot + (0.3 * meanRoot) diff --git a/basicsr/metrics/custom_fid.py b/basicsr/metrics/custom_fid.py new file mode 100644 index 0000000000000000000000000000000000000000..4fe19a19be486ac0b38e726438281534280ade5f --- /dev/null +++ b/basicsr/metrics/custom_fid.py @@ -0,0 +1,260 @@ +import numpy as np +import torch.nn.functional as F +from scipy import linalg +import torch.nn as nn +from torchvision import models + + +class INCEPTION_V3_FID(nn.Module): + """pretrained InceptionV3 network returning feature maps""" + # Index of default block of inception to return, + # corresponds to output of final average pooling + DEFAULT_BLOCK_INDEX = 3 + + # Maps feature dimensionality to their output blocks indices + BLOCK_INDEX_BY_DIM = { + 64: 0, # First max pooling features + 192: 1, # Second max pooling featurs + 768: 2, # Pre-aux classifier features + 2048: 3 # Final average pooling features + } + + def __init__(self, + incep_state_dict, + output_blocks=[DEFAULT_BLOCK_INDEX], + resize_input=True): + """Build pretrained InceptionV3 + Parameters + ---------- + output_blocks : list of int + Indices of blocks to return features of. Possible values are: + - 0: corresponds to output of first max pooling + - 1: corresponds to output of second max pooling + - 2: corresponds to output which is fed to aux classifier + - 3: corresponds to output of final average pooling + resize_input : bool + If true, bilinearly resizes input to width and height 299 before + feeding input to model. As the network without fully connected + layers is fully convolutional, it should be able to handle inputs + of arbitrary size, so resizing might not be strictly needed + normalize_input : bool + If true, normalizes the input to the statistics the pretrained + Inception network expects + """ + super(INCEPTION_V3_FID, self).__init__() + + self.resize_input = resize_input + self.output_blocks = sorted(output_blocks) + self.last_needed_block = max(output_blocks) + + assert self.last_needed_block <= 3, \ + 'Last possible output block index is 3' + + self.blocks = nn.ModuleList() + + inception = models.inception_v3() + inception.load_state_dict(incep_state_dict) + for param in inception.parameters(): + param.requires_grad = False + + # Block 0: input to maxpool1 + block0 = [ + inception.Conv2d_1a_3x3, + inception.Conv2d_2a_3x3, + inception.Conv2d_2b_3x3, + nn.MaxPool2d(kernel_size=3, stride=2) + ] + self.blocks.append(nn.Sequential(*block0)) + + # Block 1: maxpool1 to maxpool2 + if self.last_needed_block >= 1: + block1 = [ + inception.Conv2d_3b_1x1, + inception.Conv2d_4a_3x3, + nn.MaxPool2d(kernel_size=3, stride=2) + ] + self.blocks.append(nn.Sequential(*block1)) + + # Block 2: maxpool2 to aux classifier + if self.last_needed_block >= 2: + block2 = [ + inception.Mixed_5b, + inception.Mixed_5c, + inception.Mixed_5d, + inception.Mixed_6a, + inception.Mixed_6b, + inception.Mixed_6c, + inception.Mixed_6d, + inception.Mixed_6e, + ] + self.blocks.append(nn.Sequential(*block2)) + + # Block 3: aux classifier to final avgpool + if self.last_needed_block >= 3: + block3 = [ + inception.Mixed_7a, + inception.Mixed_7b, + inception.Mixed_7c, + nn.AdaptiveAvgPool2d(output_size=(1, 1)) + ] + self.blocks.append(nn.Sequential(*block3)) + + def forward(self, inp): + """Get Inception feature maps + Parameters + ---------- + inp : torch.autograd.Variable + Input tensor of shape Bx3xHxW. Values are expected to be in + range (0, 1) + Returns + ------- + List of torch.autograd.Variable, corresponding to the selected output + block, sorted ascending by index + """ + outp = [] + x = inp + + if self.resize_input: + x = F.interpolate(x, size=(299, 299), mode='bilinear') + + x = x.clone() + # [-1.0, 1.0] --> [0, 1.0] + x = x * 0.5 + 0.5 + x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + + for idx, block in enumerate(self.blocks): + x = block(x) + if idx in self.output_blocks: + outp.append(x) + + if idx == self.last_needed_block: + break + + return outp + + +def get_activations(images, model, batch_size, verbose=False): + """Calculates the activations of the pool_3 layer for all images. + Params: + -- images : Numpy array of dimension (n_images, 3, hi, wi). The values + must lie between 0 and 1. + -- model : Instance of inception model + -- batch_size : the images numpy array is split into batches with + batch size batch_size. A reasonable batch size depends + on the hardware. + -- verbose : If set to True and parameter out_step is given, the number + of calculated batches is reported. + Returns: + -- A numpy array of dimension (num images, dims) that contains the + activations of the given tensor when feeding inception with the + query tensor. + """ + model.eval() + + #d0 = images.shape[0] + d0 = int(images.size(0)) + if batch_size > d0: + print(('Warning: batch size is bigger than the data size. ' + 'Setting batch size to data size')) + batch_size = d0 + + n_batches = d0 // batch_size + n_used_imgs = n_batches * batch_size + + pred_arr = np.empty((n_used_imgs, 2048)) + for i in range(n_batches): + if verbose: + print('\rPropagating batch %d/%d' % (i + 1, n_batches), end='', flush=True) + start = i * batch_size + end = start + batch_size + + '''batch = torch.from_numpy(images[start:end]).type(torch.FloatTensor) + batch = Variable(batch, volatile=True) + if cfg.CUDA: + batch = batch.cuda()''' + batch = images[start:end] + + pred = model(batch)[0] + + # If model output is not scalar, apply global spatial average pooling. + # This happens if you choose a dimensionality not equal 2048. + if pred.shape[2] != 1 or pred.shape[3] != 1: + pred = F.adaptive_avg_pool2d(pred, output_size=(1, 1)) + + pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, -1) + + if verbose: + print(' done') + + return pred_arr + + +def calculate_activation_statistics(act): + """Calculation of the statistics used by the FID. + Params: + -- act : Numpy array of dimension (n_images, dim (e.g. 2048)). + Returns: + -- mu : The mean over samples of the activations of the pool_3 layer of + the inception model. + -- sigma : The covariance matrix of the activations of the pool_3 layer of + the inception model. + """ + mu = np.mean(act, axis=0) + sigma = np.cov(act, rowvar=False) + return mu, sigma + + +def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): + """Numpy implementation of the Frechet Distance. + The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) + and X_2 ~ N(mu_2, C_2) is + d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). + Stable version by Dougal J. Sutherland. + Params: + -- mu1 : Numpy array containing the activations of a layer of the + inception net (like returned by the function 'get_predictions') + for generated samples. + -- mu2 : The sample mean over activations, precalculated on an + representive data set. + -- sigma1: The covariance matrix over activations for generated samples. + -- sigma2: The covariance matrix over activations, precalculated on an + representive data set. + Returns: + -- : The Frechet Distance. + """ + + mu1 = np.atleast_1d(mu1) + mu2 = np.atleast_1d(mu2) + + sigma1 = np.atleast_2d(sigma1) + sigma2 = np.atleast_2d(sigma2) + + assert mu1.shape == mu2.shape, \ + 'Training and test mean vectors have different lengths' + assert sigma1.shape == sigma2.shape, \ + 'Training and test covariances have different dimensions' + + diff = mu1 - mu2 + + # Product might be almost singular + covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) + if not np.isfinite(covmean).all(): + msg = ('fid calculation produces singular product; ' + 'adding %s to diagonal of cov estimates') % eps + print(msg) + offset = np.eye(sigma1.shape[0]) * eps + covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) + + # Numerical error might give slight imaginary component + if np.iscomplexobj(covmean): + if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): + m = np.max(np.abs(covmean.imag)) + raise ValueError('Imaginary component {}'.format(m)) + covmean = covmean.real + + tr_covmean = np.trace(covmean) + + return (diff.dot(diff) + np.trace(sigma1) + + np.trace(sigma2) - 2 * tr_covmean) diff --git a/basicsr/metrics/metric_util.py b/basicsr/metrics/metric_util.py new file mode 100644 index 0000000000000000000000000000000000000000..4d18f0f7816431bed6af9d58319c6435bdf5c971 --- /dev/null +++ b/basicsr/metrics/metric_util.py @@ -0,0 +1,45 @@ +import numpy as np + +from basicsr.utils.matlab_functions import bgr2ycbcr + + +def reorder_image(img, input_order='HWC'): + """Reorder images to 'HWC' order. + + If the input_order is (h, w), return (h, w, 1); + If the input_order is (c, h, w), return (h, w, c); + If the input_order is (h, w, c), return as it is. + + Args: + img (ndarray): Input image. + input_order (str): Whether the input order is 'HWC' or 'CHW'. + If the input image shape is (h, w), input_order will not have + effects. Default: 'HWC'. + + Returns: + ndarray: reordered image. + """ + + if input_order not in ['HWC', 'CHW']: + raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'") + if len(img.shape) == 2: + img = img[..., None] + if input_order == 'CHW': + img = img.transpose(1, 2, 0) + return img + + +def to_y_channel(img): + """Change to Y channel of YCbCr. + + Args: + img (ndarray): Images with range [0, 255]. + + Returns: + (ndarray): Images with range [0, 255] (float type) without round. + """ + img = img.astype(np.float32) / 255. + if img.ndim == 3 and img.shape[2] == 3: + img = bgr2ycbcr(img, y_only=True) + img = img[..., None] + return img * 255. diff --git a/basicsr/metrics/psnr_ssim.py b/basicsr/metrics/psnr_ssim.py new file mode 100644 index 0000000000000000000000000000000000000000..241d04d210ebfe97319461f33a0e7341b60b295d --- /dev/null +++ b/basicsr/metrics/psnr_ssim.py @@ -0,0 +1,128 @@ +import cv2 +import numpy as np + +from basicsr.metrics.metric_util import reorder_image, to_y_channel +from basicsr.utils.registry import METRIC_REGISTRY + + +@METRIC_REGISTRY.register() +def calculate_psnr(img, img2, crop_border, input_order='HWC', test_y_channel=False, **kwargs): + """Calculate PSNR (Peak Signal-to-Noise Ratio). + + Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio + + Args: + img (ndarray): Images with range [0, 255]. + img2 (ndarray): Images with range [0, 255]. + crop_border (int): Cropped pixels in each edge of an image. These + pixels are not involved in the PSNR calculation. + input_order (str): Whether the input order is 'HWC' or 'CHW'. + Default: 'HWC'. + test_y_channel (bool): Test on Y channel of YCbCr. Default: False. + + Returns: + float: psnr result. + """ + + assert img.shape == img2.shape, (f'Image shapes are different: {img.shape}, {img2.shape}.') + if input_order not in ['HWC', 'CHW']: + raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"') + img = reorder_image(img, input_order=input_order) + img2 = reorder_image(img2, input_order=input_order) + img = img.astype(np.float64) + img2 = img2.astype(np.float64) + + if crop_border != 0: + img = img[crop_border:-crop_border, crop_border:-crop_border, ...] + img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] + + if test_y_channel: + img = to_y_channel(img) + img2 = to_y_channel(img2) + + mse = np.mean((img - img2)**2) + if mse == 0: + return float('inf') + return 20. * np.log10(255. / np.sqrt(mse)) + + +def _ssim(img, img2): + """Calculate SSIM (structural similarity) for one channel images. + + It is called by func:`calculate_ssim`. + + Args: + img (ndarray): Images with range [0, 255] with order 'HWC'. + img2 (ndarray): Images with range [0, 255] with order 'HWC'. + + Returns: + float: ssim result. + """ + + c1 = (0.01 * 255)**2 + c2 = (0.03 * 255)**2 + + img = img.astype(np.float64) + img2 = img2.astype(np.float64) + kernel = cv2.getGaussianKernel(11, 1.5) + window = np.outer(kernel, kernel.transpose()) + + mu1 = cv2.filter2D(img, -1, window)[5:-5, 5:-5] + mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] + mu1_sq = mu1**2 + mu2_sq = mu2**2 + mu1_mu2 = mu1 * mu2 + sigma1_sq = cv2.filter2D(img**2, -1, window)[5:-5, 5:-5] - mu1_sq + sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq + sigma12 = cv2.filter2D(img * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 + + ssim_map = ((2 * mu1_mu2 + c1) * (2 * sigma12 + c2)) / ((mu1_sq + mu2_sq + c1) * (sigma1_sq + sigma2_sq + c2)) + return ssim_map.mean() + + +@METRIC_REGISTRY.register() +def calculate_ssim(img, img2, crop_border, input_order='HWC', test_y_channel=False, **kwargs): + """Calculate SSIM (structural similarity). + + Ref: + Image quality assessment: From error visibility to structural similarity + + The results are the same as that of the official released MATLAB code in + https://ece.uwaterloo.ca/~z70wang/research/ssim/. + + For three-channel images, SSIM is calculated for each channel and then + averaged. + + Args: + img (ndarray): Images with range [0, 255]. + img2 (ndarray): Images with range [0, 255]. + crop_border (int): Cropped pixels in each edge of an image. These + pixels are not involved in the SSIM calculation. + input_order (str): Whether the input order is 'HWC' or 'CHW'. + Default: 'HWC'. + test_y_channel (bool): Test on Y channel of YCbCr. Default: False. + + Returns: + float: ssim result. + """ + + assert img.shape == img2.shape, (f'Image shapes are different: {img.shape}, {img2.shape}.') + if input_order not in ['HWC', 'CHW']: + raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"') + img = reorder_image(img, input_order=input_order) + img2 = reorder_image(img2, input_order=input_order) + img = img.astype(np.float64) + img2 = img2.astype(np.float64) + + if crop_border != 0: + img = img[crop_border:-crop_border, crop_border:-crop_border, ...] + img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] + + if test_y_channel: + img = to_y_channel(img) + img2 = to_y_channel(img2) + + ssims = [] + for i in range(img.shape[2]): + ssims.append(_ssim(img[..., i], img2[..., i])) + return np.array(ssims).mean() diff --git a/basicsr/models/__init__.py b/basicsr/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..285ce3ef90550f5cd6cb61467388f8ae4b73f14a --- /dev/null +++ b/basicsr/models/__init__.py @@ -0,0 +1,30 @@ +import importlib +from copy import deepcopy +from os import path as osp + +from basicsr.utils import get_root_logger, scandir +from basicsr.utils.registry import MODEL_REGISTRY + +__all__ = ['build_model'] + +# automatically scan and import model modules for registry +# scan all the files under the 'models' folder and collect files ending with +# '_model.py' +model_folder = osp.dirname(osp.abspath(__file__)) +model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')] +# import all the model modules +_model_modules = [importlib.import_module(f'basicsr.models.{file_name}') for file_name in model_filenames] + + +def build_model(opt): + """Build model from options. + + Args: + opt (dict): Configuration. It must contain: + model_type (str): Model type. + """ + opt = deepcopy(opt) + model = MODEL_REGISTRY.get(opt['model_type'])(opt) + logger = get_root_logger() + logger.info(f'Model [{model.__class__.__name__}] is created.') + return model diff --git a/basicsr/models/__pycache__/__init__.cpython-39.pyc b/basicsr/models/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..145830d70a586fd0f8e3f22bc131b10a9d4b2241 Binary files /dev/null and b/basicsr/models/__pycache__/__init__.cpython-39.pyc differ diff --git a/basicsr/models/__pycache__/base_model.cpython-39.pyc b/basicsr/models/__pycache__/base_model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..805c841df43e4253f67257cc771ae3a77f0840a0 Binary files /dev/null and b/basicsr/models/__pycache__/base_model.cpython-39.pyc differ diff --git a/basicsr/models/__pycache__/color_model.cpython-39.pyc b/basicsr/models/__pycache__/color_model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dcc041cbef64e62d0411800a9bc98a40eded121 Binary files /dev/null and b/basicsr/models/__pycache__/color_model.cpython-39.pyc differ diff --git a/basicsr/models/__pycache__/lr_scheduler.cpython-39.pyc b/basicsr/models/__pycache__/lr_scheduler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33a0757c03d0c603fb7e8741e7ce850fc5bdafe9 Binary files /dev/null and b/basicsr/models/__pycache__/lr_scheduler.cpython-39.pyc differ diff --git a/basicsr/models/base_model.py b/basicsr/models/base_model.py new file mode 100644 index 0000000000000000000000000000000000000000..d662195db4634fc4a45407588e23935df3df6527 --- /dev/null +++ b/basicsr/models/base_model.py @@ -0,0 +1,382 @@ +import os +import time +import torch +from collections import OrderedDict +from copy import deepcopy +from torch.nn.parallel import DataParallel, DistributedDataParallel + +from basicsr.models import lr_scheduler as lr_scheduler +from basicsr.utils import get_root_logger +from basicsr.utils.dist_util import master_only + + +class BaseModel(): + """Base model.""" + + def __init__(self, opt): + self.opt = opt + self.device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu') + self.is_train = opt['is_train'] + self.schedulers = [] + self.optimizers = [] + + def feed_data(self, data): + pass + + def optimize_parameters(self): + pass + + def get_current_visuals(self): + pass + + def save(self, epoch, current_iter): + """Save networks and training state.""" + pass + + def validation(self, dataloader, current_iter, tb_logger, save_img=False): + """Validation function. + + Args: + dataloader (torch.utils.data.DataLoader): Validation dataloader. + current_iter (int): Current iteration. + tb_logger (tensorboard logger): Tensorboard logger. + save_img (bool): Whether to save images. Default: False. + """ + if self.opt['dist']: + self.dist_validation(dataloader, current_iter, tb_logger, save_img) + else: + self.nondist_validation(dataloader, current_iter, tb_logger, save_img) + + def _initialize_best_metric_results(self, dataset_name): + """Initialize the best metric results dict for recording the best metric value and iteration.""" + if hasattr(self, 'best_metric_results') and dataset_name in self.best_metric_results: + return + elif not hasattr(self, 'best_metric_results'): + self.best_metric_results = dict() + + # add a dataset record + record = dict() + for metric, content in self.opt['val']['metrics'].items(): + better = content.get('better', 'higher') + init_val = float('-inf') if better == 'higher' else float('inf') + record[metric] = dict(better=better, val=init_val, iter=-1) + self.best_metric_results[dataset_name] = record + + def _update_best_metric_result(self, dataset_name, metric, val, current_iter): + if self.best_metric_results[dataset_name][metric]['better'] == 'higher': + if val >= self.best_metric_results[dataset_name][metric]['val']: + self.best_metric_results[dataset_name][metric]['val'] = val + self.best_metric_results[dataset_name][metric]['iter'] = current_iter + else: + if val <= self.best_metric_results[dataset_name][metric]['val']: + self.best_metric_results[dataset_name][metric]['val'] = val + self.best_metric_results[dataset_name][metric]['iter'] = current_iter + + def model_ema(self, decay=0.999): + net_g = self.get_bare_model(self.net_g) + + net_g_params = dict(net_g.named_parameters()) + net_g_ema_params = dict(self.net_g_ema.named_parameters()) + + for k in net_g_ema_params.keys(): + net_g_ema_params[k].data.mul_(decay).add_(net_g_params[k].data, alpha=1 - decay) + + def get_current_log(self): + return self.log_dict + + def model_to_device(self, net): + """Model to device. It also warps models with DistributedDataParallel + or DataParallel. + + Args: + net (nn.Module) + """ + net = net.to(self.device) + if self.opt['dist']: + find_unused_parameters = self.opt.get('find_unused_parameters', False) + net = DistributedDataParallel( + net, device_ids=[torch.cuda.current_device()], find_unused_parameters=find_unused_parameters) + elif self.opt['num_gpu'] > 1: + net = DataParallel(net) + return net + + def get_optimizer(self, optim_type, params, lr, **kwargs): + if optim_type == 'Adam': + optimizer = torch.optim.Adam(params, lr, **kwargs) + elif optim_type == 'AdamW': + optimizer = torch.optim.AdamW(params, lr, **kwargs) + else: + raise NotImplementedError(f'optimizer {optim_type} is not supperted yet.') + return optimizer + + def setup_schedulers(self): + """Set up schedulers.""" + train_opt = self.opt['train'] + scheduler_type = train_opt['scheduler'].pop('type') + if scheduler_type in ['MultiStepLR', 'MultiStepRestartLR']: + for optimizer in self.optimizers: + self.schedulers.append(lr_scheduler.MultiStepRestartLR(optimizer, **train_opt['scheduler'])) + elif scheduler_type == 'CosineAnnealingRestartLR': + for optimizer in self.optimizers: + self.schedulers.append(lr_scheduler.CosineAnnealingRestartLR(optimizer, **train_opt['scheduler'])) + else: + raise NotImplementedError(f'Scheduler {scheduler_type} is not implemented yet.') + + def get_bare_model(self, net): + """Get bare model, especially under wrapping with + DistributedDataParallel or DataParallel. + """ + if isinstance(net, (DataParallel, DistributedDataParallel)): + net = net.module + return net + + @master_only + def print_network(self, net): + """Print the str and parameter number of a network. + + Args: + net (nn.Module) + """ + if isinstance(net, (DataParallel, DistributedDataParallel)): + net_cls_str = f'{net.__class__.__name__} - {net.module.__class__.__name__}' + else: + net_cls_str = f'{net.__class__.__name__}' + + net = self.get_bare_model(net) + net_str = str(net) + net_params = sum(map(lambda x: x.numel(), net.parameters())) + + logger = get_root_logger() + logger.info(f'Network: {net_cls_str}, with parameters: {net_params:,d}') + logger.info(net_str) + + def _set_lr(self, lr_groups_l): + """Set learning rate for warmup. + + Args: + lr_groups_l (list): List for lr_groups, each for an optimizer. + """ + for optimizer, lr_groups in zip(self.optimizers, lr_groups_l): + for param_group, lr in zip(optimizer.param_groups, lr_groups): + param_group['lr'] = lr + + def _get_init_lr(self): + """Get the initial lr, which is set by the scheduler. + """ + init_lr_groups_l = [] + for optimizer in self.optimizers: + init_lr_groups_l.append([v['initial_lr'] for v in optimizer.param_groups]) + return init_lr_groups_l + + def update_learning_rate(self, current_iter, warmup_iter=-1): + """Update learning rate. + + Args: + current_iter (int): Current iteration. + warmup_iter (int): Warmup iter numbers. -1 for no warmup. + Default: -1. + """ + if current_iter > 1: + for scheduler in self.schedulers: + scheduler.step() + # set up warm-up learning rate + if current_iter < warmup_iter: + # get initial lr for each group + init_lr_g_l = self._get_init_lr() + # modify warming-up learning rates + # currently only support linearly warm up + warm_up_lr_l = [] + for init_lr_g in init_lr_g_l: + warm_up_lr_l.append([v / warmup_iter * current_iter for v in init_lr_g]) + # set learning rate + self._set_lr(warm_up_lr_l) + + def get_current_learning_rate(self): + return [param_group['lr'] for param_group in self.optimizers[0].param_groups] + + @master_only + def save_network(self, net, net_label, current_iter, param_key='params'): + """Save networks. + + Args: + net (nn.Module | list[nn.Module]): Network(s) to be saved. + net_label (str): Network label. + current_iter (int): Current iter number. + param_key (str | list[str]): The parameter key(s) to save network. + Default: 'params'. + """ + if current_iter == -1: + current_iter = 'latest' + save_filename = f'{net_label}_{current_iter}.pth' + save_path = os.path.join(self.opt['path']['models'], save_filename) + + net = net if isinstance(net, list) else [net] + param_key = param_key if isinstance(param_key, list) else [param_key] + assert len(net) == len(param_key), 'The lengths of net and param_key should be the same.' + + save_dict = {} + for net_, param_key_ in zip(net, param_key): + net_ = self.get_bare_model(net_) + state_dict = net_.state_dict() + for key, param in state_dict.items(): + if key.startswith('module.'): # remove unnecessary 'module.' + key = key[7:] + state_dict[key] = param.cpu() + save_dict[param_key_] = state_dict + + # avoid occasional writing errors + retry = 3 + while retry > 0: + try: + torch.save(save_dict, save_path) + except Exception as e: + logger = get_root_logger() + logger.warning(f'Save model error: {e}, remaining retry times: {retry - 1}') + time.sleep(1) + else: + break + finally: + retry -= 1 + if retry == 0: + logger.warning(f'Still cannot save {save_path}. Just ignore it.') + # raise IOError(f'Cannot save {save_path}.') + + def _print_different_keys_loading(self, crt_net, load_net, strict=True): + """Print keys with different name or different size when loading models. + + 1. Print keys with different names. + 2. If strict=False, print the same key but with different tensor size. + It also ignore these keys with different sizes (not load). + + Args: + crt_net (torch model): Current network. + load_net (dict): Loaded network. + strict (bool): Whether strictly loaded. Default: True. + """ + crt_net = self.get_bare_model(crt_net) + crt_net = crt_net.state_dict() + crt_net_keys = set(crt_net.keys()) + load_net_keys = set(load_net.keys()) + + logger = get_root_logger() + if crt_net_keys != load_net_keys: + logger.warning('Current net - loaded net:') + for v in sorted(list(crt_net_keys - load_net_keys)): + logger.warning(f' {v}') + logger.warning('Loaded net - current net:') + for v in sorted(list(load_net_keys - crt_net_keys)): + logger.warning(f' {v}') + + # check the size for the same keys + if not strict: + common_keys = crt_net_keys & load_net_keys + for k in common_keys: + if crt_net[k].size() != load_net[k].size(): + logger.warning(f'Size different, ignore [{k}]: crt_net: ' + f'{crt_net[k].shape}; load_net: {load_net[k].shape}') + load_net[k + '.ignore'] = load_net.pop(k) + + def load_network(self, net, load_path, strict=True, param_key='params'): + """Load network. + + Args: + load_path (str): The path of networks to be loaded. + net (nn.Module): Network. + strict (bool): Whether strictly loaded. + param_key (str): The parameter key of loaded network. If set to + None, use the root 'path'. + Default: 'params'. + """ + logger = get_root_logger() + net = self.get_bare_model(net) + load_net = torch.load(load_path, map_location=lambda storage, loc: storage) + if param_key is not None: + if param_key not in load_net and 'params' in load_net: + param_key = 'params' + logger.info('Loading: params_ema does not exist, use params.') + load_net = load_net[param_key] + logger.info(f'Loading {net.__class__.__name__} model from {load_path}, with param key: [{param_key}].') + # remove unnecessary 'module.' + for k, v in deepcopy(load_net).items(): + if k.startswith('module.'): + load_net[k[7:]] = v + load_net.pop(k) + self._print_different_keys_loading(net, load_net, strict) + net.load_state_dict(load_net, strict=strict) + + @master_only + def save_training_state(self, epoch, current_iter): + """Save training states during training, which will be used for + resuming. + + Args: + epoch (int): Current epoch. + current_iter (int): Current iteration. + """ + if current_iter != -1: + state = {'epoch': epoch, 'iter': current_iter, 'optimizers': [], 'schedulers': []} + for o in self.optimizers: + state['optimizers'].append(o.state_dict()) + for s in self.schedulers: + state['schedulers'].append(s.state_dict()) + save_filename = f'{current_iter}.state' + save_path = os.path.join(self.opt['path']['training_states'], save_filename) + + # avoid occasional writing errors + retry = 3 + while retry > 0: + try: + torch.save(state, save_path) + except Exception as e: + logger = get_root_logger() + logger.warning(f'Save training state error: {e}, remaining retry times: {retry - 1}') + time.sleep(1) + else: + break + finally: + retry -= 1 + if retry == 0: + logger.warning(f'Still cannot save {save_path}. Just ignore it.') + # raise IOError(f'Cannot save {save_path}.') + + def resume_training(self, resume_state): + """Reload the optimizers and schedulers for resumed training. + + Args: + resume_state (dict): Resume state. + """ + resume_optimizers = resume_state['optimizers'] + resume_schedulers = resume_state['schedulers'] + assert len(resume_optimizers) == len(self.optimizers), 'Wrong lengths of optimizers' + assert len(resume_schedulers) == len(self.schedulers), 'Wrong lengths of schedulers' + for i, o in enumerate(resume_optimizers): + self.optimizers[i].load_state_dict(o) + for i, s in enumerate(resume_schedulers): + self.schedulers[i].load_state_dict(s) + + def reduce_loss_dict(self, loss_dict): + """reduce loss dict. + + In distributed training, it averages the losses among different GPUs . + + Args: + loss_dict (OrderedDict): Loss dict. + """ + with torch.no_grad(): + if self.opt['dist']: + keys = [] + losses = [] + for name, value in loss_dict.items(): + keys.append(name) + losses.append(value) + losses = torch.stack(losses, 0) + torch.distributed.reduce(losses, dst=0) + if self.opt['rank'] == 0: + losses /= self.opt['world_size'] + loss_dict = {key: loss for key, loss in zip(keys, losses)} + + log_dict = OrderedDict() + for name, value in loss_dict.items(): + log_dict[name] = value.mean().item() + + return log_dict diff --git a/basicsr/models/color_model.py b/basicsr/models/color_model.py new file mode 100644 index 0000000000000000000000000000000000000000..bd02b171c64e81c725a8071e357b7486f0015b3b --- /dev/null +++ b/basicsr/models/color_model.py @@ -0,0 +1,369 @@ +import os +import torch +from collections import OrderedDict +from os import path as osp +from tqdm import tqdm +import numpy as np + +from basicsr.archs import build_network +from basicsr.losses import build_loss +from basicsr.metrics import calculate_metric +from basicsr.utils import get_root_logger, imwrite, tensor2img +from basicsr.utils.img_util import tensor_lab2rgb +from basicsr.utils.dist_util import master_only +from basicsr.utils.registry import MODEL_REGISTRY +from .base_model import BaseModel +from basicsr.metrics.custom_fid import INCEPTION_V3_FID, get_activations, calculate_activation_statistics, calculate_frechet_distance +from basicsr.utils.color_enhance import color_enhacne_blend + + +@MODEL_REGISTRY.register() +class ColorModel(BaseModel): + """Colorization model for single image colorization.""" + + def __init__(self, opt): + super(ColorModel, self).__init__(opt) + + # define network net_g + self.net_g = build_network(opt['network_g']) + self.net_g = self.model_to_device(self.net_g) + self.print_network(self.net_g) + + # load pretrained model for net_g + load_path = self.opt['path'].get('pretrain_network_g', None) + if load_path is not None: + param_key = self.opt['path'].get('param_key_g', 'params') + self.load_network(self.net_g, load_path, self.opt['path'].get('strict_load_g', True), param_key) + + if self.is_train: + self.init_training_settings() + + def init_training_settings(self): + train_opt = self.opt['train'] + + self.ema_decay = train_opt.get('ema_decay', 0) + if self.ema_decay > 0: + logger = get_root_logger() + logger.info(f'Use Exponential Moving Average with decay: {self.ema_decay}') + # define network net_g with Exponential Moving Average (EMA) + # net_g_ema is used only for testing on one GPU and saving + # There is no need to wrap with DistributedDataParallel + self.net_g_ema = build_network(self.opt['network_g']).to(self.device) + # load pretrained model + load_path = self.opt['path'].get('pretrain_network_g', None) + if load_path is not None: + self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema') + else: + self.model_ema(0) # copy net_g weight + self.net_g_ema.eval() + + # define network net_d + self.net_d = build_network(self.opt['network_d']) + self.net_d = self.model_to_device(self.net_d) + self.print_network(self.net_d) + + # load pretrained model for net_d + load_path = self.opt['path'].get('pretrain_network_d', None) + if load_path is not None: + param_key = self.opt['path'].get('param_key_d', 'params') + self.load_network(self.net_d, load_path, self.opt['path'].get('strict_load_d', True), param_key) + + self.net_g.train() + self.net_d.train() + + # define losses + if train_opt.get('pixel_opt'): + self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device) + else: + self.cri_pix = None + + if train_opt.get('perceptual_opt'): + self.cri_perceptual = build_loss(train_opt['perceptual_opt']).to(self.device) + else: + self.cri_perceptual = None + + if train_opt.get('gan_opt'): + self.cri_gan = build_loss(train_opt['gan_opt']).to(self.device) + else: + self.cri_gan = None + + if self.cri_pix is None and self.cri_perceptual is None: + raise ValueError('Both pixel and perceptual losses are None.') + + if train_opt.get('colorfulness_opt'): + self.cri_colorfulness = build_loss(train_opt['colorfulness_opt']).to(self.device) + else: + self.cri_colorfulness = None + + # set up optimizers and schedulers + self.setup_optimizers() + self.setup_schedulers() + + # set real dataset cache for fid metric computing + self.real_mu, self.real_sigma = None, None + if self.opt['val'].get('metrics') is not None and self.opt['val']['metrics'].get('fid') is not None: + self._prepare_inception_model_fid() + + def setup_optimizers(self): + train_opt = self.opt['train'] + # optim_params_g = [] + # for k, v in self.net_g.named_parameters(): + # if v.requires_grad: + # optim_params_g.append(v) + # else: + # logger = get_root_logger() + # logger.warning(f'Params {k} will not be optimized.') + optim_params_g = self.net_g.parameters() + + # optimizer g + optim_type = train_opt['optim_g'].pop('type') + self.optimizer_g = self.get_optimizer(optim_type, optim_params_g, **train_opt['optim_g']) + self.optimizers.append(self.optimizer_g) + + # optimizer d + optim_type = train_opt['optim_d'].pop('type') + self.optimizer_d = self.get_optimizer(optim_type, self.net_d.parameters(), **train_opt['optim_d']) + self.optimizers.append(self.optimizer_d) + + def feed_data(self, data): + self.lq = data['lq'].to(self.device) + self.lq_rgb = tensor_lab2rgb(torch.cat([self.lq, torch.zeros_like(self.lq), torch.zeros_like(self.lq)], dim=1)) + if 'gt' in data: + self.gt = data['gt'].to(self.device) + self.gt_lab = torch.cat([self.lq, self.gt], dim=1) + self.gt_rgb = tensor_lab2rgb(self.gt_lab) + + if self.opt['train'].get('color_enhance', False): + for i in range(self.gt_rgb.shape[0]): + self.gt_rgb[i] = color_enhacne_blend(self.gt_rgb[i], factor=self.opt['train'].get('color_enhance_factor')) + + def optimize_parameters(self, current_iter): + # optimize net_g + for p in self.net_d.parameters(): + p.requires_grad = False + self.optimizer_g.zero_grad() + + self.output_ab = self.net_g(self.lq_rgb) + self.output_lab = torch.cat([self.lq, self.output_ab], dim=1) + self.output_rgb = tensor_lab2rgb(self.output_lab) + + l_g_total = 0 + loss_dict = OrderedDict() + # pixel loss + if self.cri_pix: + l_g_pix = self.cri_pix(self.output_ab, self.gt) + l_g_total += l_g_pix + loss_dict['l_g_pix'] = l_g_pix + + # perceptual loss + if self.cri_perceptual: + l_g_percep, l_g_style = self.cri_perceptual(self.output_rgb, self.gt_rgb) + if l_g_percep is not None: + l_g_total += l_g_percep + loss_dict['l_g_percep'] = l_g_percep + if l_g_style is not None: + l_g_total += l_g_style + loss_dict['l_g_style'] = l_g_style + # gan loss + if self.cri_gan: + fake_g_pred = self.net_d(self.output_rgb) + l_g_gan = self.cri_gan(fake_g_pred, target_is_real=True, is_disc=False) + l_g_total += l_g_gan + loss_dict['l_g_gan'] = l_g_gan + # colorfulness loss + if self.cri_colorfulness: + l_g_color = self.cri_colorfulness(self.output_rgb) + l_g_total += l_g_color + loss_dict['l_g_color'] = l_g_color + + l_g_total.backward() + self.optimizer_g.step() + + # optimize net_d + for p in self.net_d.parameters(): + p.requires_grad = True + self.optimizer_d.zero_grad() + + real_d_pred = self.net_d(self.gt_rgb) + fake_d_pred = self.net_d(self.output_rgb.detach()) + l_d = self.cri_gan(real_d_pred, target_is_real=True, is_disc=True) + self.cri_gan(fake_d_pred, target_is_real=False, is_disc=True) + loss_dict['l_d'] = l_d + loss_dict['real_score'] = real_d_pred.detach().mean() + loss_dict['fake_score'] = fake_d_pred.detach().mean() + + l_d.backward() + self.optimizer_d.step() + + self.log_dict = self.reduce_loss_dict(loss_dict) + + if self.ema_decay > 0: + self.model_ema(decay=self.ema_decay) + + def get_current_visuals(self): + out_dict = OrderedDict() + out_dict['lq'] = self.lq_rgb.detach().cpu() + out_dict['result'] = self.output_rgb.detach().cpu() + if self.opt['logger'].get('save_snapshot_verbose', False): # only for verbose + self.output_lab_chroma = torch.cat([torch.ones_like(self.lq) * 50, self.output_ab], dim=1) + self.output_rgb_chroma = tensor_lab2rgb(self.output_lab_chroma) + out_dict['result_chroma'] = self.output_rgb_chroma.detach().cpu() + + if hasattr(self, 'gt'): + out_dict['gt'] = self.gt_rgb.detach().cpu() + if self.opt['logger'].get('save_snapshot_verbose', False): # only for verbose + self.gt_lab_chroma = torch.cat([torch.ones_like(self.lq) * 50, self.gt], dim=1) + self.gt_rgb_chroma = tensor_lab2rgb(self.gt_lab_chroma) + out_dict['gt_chroma'] = self.gt_rgb_chroma.detach().cpu() + return out_dict + + def test(self): + if hasattr(self, 'net_g_ema'): + self.net_g_ema.eval() + with torch.no_grad(): + self.output_ab = self.net_g_ema(self.lq_rgb) + self.output_lab = torch.cat([self.lq, self.output_ab], dim=1) + self.output_rgb = tensor_lab2rgb(self.output_lab) + else: + self.net_g.eval() + with torch.no_grad(): + self.output_ab = self.net_g(self.lq_rgb) + self.output_lab = torch.cat([self.lq, self.output_ab], dim=1) + self.output_rgb = tensor_lab2rgb(self.output_lab) + self.net_g.train() + + def dist_validation(self, dataloader, current_iter, tb_logger, save_img): + if self.opt['rank'] == 0: + self.nondist_validation(dataloader, current_iter, tb_logger, save_img) + + def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): + dataset_name = dataloader.dataset.opt['name'] + with_metrics = self.opt['val'].get('metrics') is not None + use_pbar = self.opt['val'].get('pbar', False) + + if with_metrics and not hasattr(self, 'metric_results'): # only execute in the first run + self.metric_results = {metric: 0 for metric in self.opt['val']['metrics'].keys()} + # initialize the best metric results for each dataset_name (supporting multiple validation datasets) + if with_metrics: + self._initialize_best_metric_results(dataset_name) + # zero self.metric_results + if with_metrics: + self.metric_results = {metric: 0 for metric in self.metric_results} + + metric_data = dict() + if use_pbar: + pbar = tqdm(total=len(dataloader), unit='image') + + if self.opt['val']['metrics'].get('fid') is not None: + fake_acts_set, acts_set = [], [] + + for idx, val_data in enumerate(dataloader): + # if idx == 100: + # break + img_name = osp.splitext(osp.basename(val_data['lq_path'][0]))[0] + if hasattr(self, 'gt'): + del self.gt + self.feed_data(val_data) + self.test() + + visuals = self.get_current_visuals() + sr_img = tensor2img([visuals['result']]) + metric_data['img'] = sr_img + if 'gt' in visuals: + gt_img = tensor2img([visuals['gt']]) + metric_data['img2'] = gt_img + + torch.cuda.empty_cache() + + if save_img: + if self.opt['is_train']: + save_dir = osp.join(self.opt['path']['visualization'], img_name) + for key in visuals: + save_path = os.path.join(save_dir, '{}_{}.png'.format(current_iter, key)) + img = tensor2img(visuals[key]) + imwrite(img, save_path) + else: + if self.opt['val']['suffix']: + save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, + f'{img_name}_{self.opt["val"]["suffix"]}.png') + else: + save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, + f'{img_name}_{self.opt["name"]}.png') + imwrite(sr_img, save_img_path) + + if with_metrics: + # calculate metrics + for name, opt_ in self.opt['val']['metrics'].items(): + if name == 'fid': + pred, gt = visuals['result'].cuda(), visuals['gt'].cuda() + fake_act = get_activations(pred, self.inception_model_fid, 1) + fake_acts_set.append(fake_act) + if self.real_mu is None: + real_act = get_activations(gt, self.inception_model_fid, 1) + acts_set.append(real_act) + else: + self.metric_results[name] += calculate_metric(metric_data, opt_) + if use_pbar: + pbar.update(1) + pbar.set_description(f'Test {img_name}') + if use_pbar: + pbar.close() + + if with_metrics: + if self.opt['val']['metrics'].get('fid') is not None: + if self.real_mu is None: + acts_set = np.concatenate(acts_set, 0) + self.real_mu, self.real_sigma = calculate_activation_statistics(acts_set) + fake_acts_set = np.concatenate(fake_acts_set, 0) + fake_mu, fake_sigma = calculate_activation_statistics(fake_acts_set) + + fid_score = calculate_frechet_distance(self.real_mu, self.real_sigma, fake_mu, fake_sigma) + self.metric_results['fid'] = fid_score + + for metric in self.metric_results.keys(): + if metric != 'fid': + self.metric_results[metric] /= (idx + 1) + # update the best metric result + self._update_best_metric_result(dataset_name, metric, self.metric_results[metric], current_iter) + + self._log_validation_metric_values(current_iter, dataset_name, tb_logger) + + def _log_validation_metric_values(self, current_iter, dataset_name, tb_logger): + log_str = f'Validation {dataset_name}\n' + for metric, value in self.metric_results.items(): + log_str += f'\t # {metric}: {value:.4f}' + if hasattr(self, 'best_metric_results'): + log_str += (f'\tBest: {self.best_metric_results[dataset_name][metric]["val"]:.4f} @ ' + f'{self.best_metric_results[dataset_name][metric]["iter"]} iter') + log_str += '\n' + + logger = get_root_logger() + logger.info(log_str) + if tb_logger: + for metric, value in self.metric_results.items(): + tb_logger.add_scalar(f'metrics/{dataset_name}/{metric}', value, current_iter) + + def _prepare_inception_model_fid(self, path='pretrain/inception_v3_google-1a9a5a14.pth'): + incep_state_dict = torch.load(path, map_location='cpu') + block_idx = INCEPTION_V3_FID.BLOCK_INDEX_BY_DIM[2048] + self.inception_model_fid = INCEPTION_V3_FID(incep_state_dict, [block_idx]) + self.inception_model_fid.cuda() + self.inception_model_fid.eval() + + @master_only + def save_training_images(self, current_iter): + visuals = self.get_current_visuals() + save_dir = osp.join(self.opt['root_path'], 'experiments', self.opt['name'], 'training_images_snapshot') + os.makedirs(save_dir, exist_ok=True) + + for key in visuals: + save_path = os.path.join(save_dir, '{}_{}.png'.format(current_iter, key)) + img = tensor2img(visuals[key]) + imwrite(img, save_path) + + def save(self, epoch, current_iter): + if hasattr(self, 'net_g_ema'): + self.save_network([self.net_g, self.net_g_ema], 'net_g', current_iter, param_key=['params', 'params_ema']) + else: + self.save_network(self.net_g, 'net_g', current_iter) + self.save_network(self.net_d, 'net_d', current_iter) + self.save_training_state(epoch, current_iter) diff --git a/basicsr/models/lr_scheduler.py b/basicsr/models/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..11e1c6c7a74f5233accda52370f92681d3d3cecf --- /dev/null +++ b/basicsr/models/lr_scheduler.py @@ -0,0 +1,96 @@ +import math +from collections import Counter +from torch.optim.lr_scheduler import _LRScheduler + + +class MultiStepRestartLR(_LRScheduler): + """ MultiStep with restarts learning rate scheme. + + Args: + optimizer (torch.nn.optimizer): Torch optimizer. + milestones (list): Iterations that will decrease learning rate. + gamma (float): Decrease ratio. Default: 0.1. + restarts (list): Restart iterations. Default: [0]. + restart_weights (list): Restart weights at each restart iteration. + Default: [1]. + last_epoch (int): Used in _LRScheduler. Default: -1. + """ + + def __init__(self, optimizer, milestones, gamma=0.1, restarts=(0, ), restart_weights=(1, ), last_epoch=-1): + self.milestones = Counter(milestones) + self.gamma = gamma + self.restarts = restarts + self.restart_weights = restart_weights + assert len(self.restarts) == len(self.restart_weights), 'restarts and their weights do not match.' + super(MultiStepRestartLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + if self.last_epoch in self.restarts: + weight = self.restart_weights[self.restarts.index(self.last_epoch)] + return [group['initial_lr'] * weight for group in self.optimizer.param_groups] + if self.last_epoch not in self.milestones: + return [group['lr'] for group in self.optimizer.param_groups] + return [group['lr'] * self.gamma**self.milestones[self.last_epoch] for group in self.optimizer.param_groups] + + +def get_position_from_periods(iteration, cumulative_period): + """Get the position from a period list. + + It will return the index of the right-closest number in the period list. + For example, the cumulative_period = [100, 200, 300, 400], + if iteration == 50, return 0; + if iteration == 210, return 2; + if iteration == 300, return 2. + + Args: + iteration (int): Current iteration. + cumulative_period (list[int]): Cumulative period list. + + Returns: + int: The position of the right-closest number in the period list. + """ + for i, period in enumerate(cumulative_period): + if iteration <= period: + return i + + +class CosineAnnealingRestartLR(_LRScheduler): + """ Cosine annealing with restarts learning rate scheme. + + An example of config: + periods = [10, 10, 10, 10] + restart_weights = [1, 0.5, 0.5, 0.5] + eta_min=1e-7 + + It has four cycles, each has 10 iterations. At 10th, 20th, 30th, the + scheduler will restart with the weights in restart_weights. + + Args: + optimizer (torch.nn.optimizer): Torch optimizer. + periods (list): Period for each cosine anneling cycle. + restart_weights (list): Restart weights at each restart iteration. + Default: [1]. + eta_min (float): The minimum lr. Default: 0. + last_epoch (int): Used in _LRScheduler. Default: -1. + """ + + def __init__(self, optimizer, periods, restart_weights=(1, ), eta_min=0, last_epoch=-1): + self.periods = periods + self.restart_weights = restart_weights + self.eta_min = eta_min + assert (len(self.periods) == len( + self.restart_weights)), 'periods and restart_weights should have the same length.' + self.cumulative_period = [sum(self.periods[0:i + 1]) for i in range(0, len(self.periods))] + super(CosineAnnealingRestartLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + idx = get_position_from_periods(self.last_epoch, self.cumulative_period) + current_weight = self.restart_weights[idx] + nearest_restart = 0 if idx == 0 else self.cumulative_period[idx - 1] + current_period = self.periods[idx] + + return [ + self.eta_min + current_weight * 0.5 * (base_lr - self.eta_min) * + (1 + math.cos(math.pi * ((self.last_epoch - nearest_restart) / current_period))) + for base_lr in self.base_lrs + ] diff --git a/basicsr/train.py b/basicsr/train.py new file mode 100644 index 0000000000000000000000000000000000000000..49d89aebd197c6955f16d3837a2d60472a7f3270 --- /dev/null +++ b/basicsr/train.py @@ -0,0 +1,224 @@ +import datetime +import logging +import math +import time +import torch +import warnings + +warnings.filterwarnings("ignore") + +from os import path as osp + +from basicsr.data import build_dataloader, build_dataset +from basicsr.data.data_sampler import EnlargedSampler +from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher +from basicsr.models import build_model +from basicsr.utils import (AvgTimer, MessageLogger, check_resume, get_env_info, get_root_logger, get_time_str, + init_tb_logger, init_wandb_logger, make_exp_dirs, mkdir_and_rename, scandir) +from basicsr.utils.options import copy_opt_file, dict2str, parse_options + + +def init_tb_loggers(opt): + # initialize wandb logger before tensorboard logger to allow proper sync + if (opt['logger'].get('wandb') is not None) and (opt['logger']['wandb'].get('project') + is not None) and ('debug' not in opt['name']): + assert opt['logger'].get('use_tb_logger') is True, ('should turn on tensorboard when using wandb') + init_wandb_logger(opt) + tb_logger = None + if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name']: + tb_logger = init_tb_logger(log_dir=osp.join(opt['root_path'], 'tb_logger', opt['name'])) + return tb_logger + + +def create_train_val_dataloader(opt, logger): + # create train and val dataloaders + train_loader, val_loaders = None, [] + for phase, dataset_opt in opt['datasets'].items(): + if phase == 'train': + dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1) + train_set = build_dataset(dataset_opt) + train_sampler = EnlargedSampler(train_set, opt['world_size'], opt['rank'], dataset_enlarge_ratio) + train_loader = build_dataloader( + train_set, + dataset_opt, + num_gpu=opt['num_gpu'], + dist=opt['dist'], + sampler=train_sampler, + seed=opt['manual_seed']) + + num_iter_per_epoch = math.ceil( + len(train_set) * dataset_enlarge_ratio / (dataset_opt['batch_size_per_gpu'] * opt['world_size'])) + total_iters = int(opt['train']['total_iter']) + total_epochs = math.ceil(total_iters / (num_iter_per_epoch)) + logger.info('Training statistics:' + f'\n\tNumber of train images: {len(train_set)}' + f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}' + f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}' + f'\n\tWorld size (gpu number): {opt["world_size"]}' + f'\n\tRequire iter number per epoch: {num_iter_per_epoch}' + f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.') + elif phase.split('_')[0] == 'val': + val_set = build_dataset(dataset_opt) + val_loader = build_dataloader( + val_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed']) + logger.info(f'Number of val images/folders in {dataset_opt["name"]}: {len(val_set)}') + val_loaders.append(val_loader) + else: + raise ValueError(f'Dataset phase {phase} is not recognized.') + + return train_loader, train_sampler, val_loaders, total_epochs, total_iters + + +def load_resume_state(opt): + resume_state_path = None + if opt['auto_resume']: + state_path = osp.join(opt['root_path'], 'experiments', opt['name'], 'training_states') + if osp.isdir(state_path): + states = list(scandir(state_path, suffix='state', recursive=False, full_path=False)) + if len(states) != 0: + states = [float(v.split('.state')[0]) for v in states] + resume_state_path = osp.join(state_path, f'{max(states):.0f}.state') + opt['path']['resume_state'] = resume_state_path + else: + if opt['path'].get('resume_state'): + resume_state_path = opt['path']['resume_state'] + + if resume_state_path is None: + resume_state = None + else: + device_id = torch.cuda.current_device() + resume_state = torch.load(resume_state_path, map_location=lambda storage, loc: storage.cuda(device_id)) + check_resume(opt, resume_state['iter']) + return resume_state + + +def train_pipeline(root_path): + # parse options, set distributed setting, set ramdom seed + opt, args = parse_options(root_path, is_train=True) + opt['root_path'] = root_path + + torch.backends.cudnn.benchmark = True + # torch.backends.cudnn.deterministic = True + + # load resume states if necessary + resume_state = load_resume_state(opt) + # mkdir for experiments and logger + if resume_state is None: + make_exp_dirs(opt) + if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name'] and opt['rank'] == 0: + mkdir_and_rename(osp.join(opt['root_path'], 'tb_logger', opt['name'])) + + # copy the yml file to the experiment root + copy_opt_file(args.opt, opt['path']['experiments_root']) + + # WARNING: should not use get_root_logger in the above codes, including the called functions + # Otherwise the logger will not be properly initialized + log_file = osp.join(opt['path']['log'], f"train_{opt['name']}_{get_time_str()}.log") + logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file) + logger.info(get_env_info()) + logger.info(dict2str(opt)) + # initialize wandb and tb loggers + tb_logger = init_tb_loggers(opt) + + # create train and validation dataloaders + result = create_train_val_dataloader(opt, logger) + train_loader, train_sampler, val_loaders, total_epochs, total_iters = result + + # create model + model = build_model(opt) + if resume_state: # resume training + model.resume_training(resume_state) # handle optimizers and schedulers + logger.info(f"Resuming training from epoch: {resume_state['epoch']}, " f"iter: {resume_state['iter']}.") + start_epoch = resume_state['epoch'] + current_iter = resume_state['iter'] + else: + start_epoch = 0 + current_iter = 0 + + # create message logger (formatted outputs) + msg_logger = MessageLogger(opt, current_iter, tb_logger) + + # dataloader prefetcher + prefetch_mode = opt['datasets']['train'].get('prefetch_mode') + if prefetch_mode is None or prefetch_mode == 'cpu': + prefetcher = CPUPrefetcher(train_loader) + elif prefetch_mode == 'cuda': + prefetcher = CUDAPrefetcher(train_loader, opt) + logger.info(f'Use {prefetch_mode} prefetch dataloader') + if opt['datasets']['train'].get('pin_memory') is not True: + raise ValueError('Please set pin_memory=True for CUDAPrefetcher.') + else: + raise ValueError(f'Wrong prefetch_mode {prefetch_mode}.' "Supported ones are: None, 'cuda', 'cpu'.") + + # training + logger.info(f'Start training from epoch: {start_epoch}, iter: {current_iter}') + data_timer, iter_timer = AvgTimer(), AvgTimer() + start_time = time.time() + + for epoch in range(start_epoch, total_epochs + 1): + train_sampler.set_epoch(epoch) + prefetcher.reset() + train_data = prefetcher.next() + + while train_data is not None: + data_timer.record() + + current_iter += 1 + if current_iter > total_iters: + break + # update learning rate + model.update_learning_rate(current_iter, warmup_iter=opt['train'].get('warmup_iter', -1)) + # training + model.feed_data(train_data) + model.optimize_parameters(current_iter) + iter_timer.record() + if current_iter == 1: + # reset start time in msg_logger for more accurate eta_time + # not work in resume mode + msg_logger.reset_start_time() + # log + if current_iter % opt['logger']['print_freq'] == 0: + log_vars = {'epoch': epoch, 'iter': current_iter} + log_vars.update({'lrs': model.get_current_learning_rate()}) + log_vars.update({'time': iter_timer.get_avg_time(), 'data_time': data_timer.get_avg_time()}) + log_vars.update(model.get_current_log()) + msg_logger(log_vars) + + # save training images snapshot save_snapshot_freq + if opt['logger'][ + 'save_snapshot_freq'] is not None and current_iter % opt['logger']['save_snapshot_freq'] == 0: + model.save_training_images(current_iter) + + # save models and training states + if current_iter % opt['logger']['save_checkpoint_freq'] == 0: + logger.info('Saving models and training states.') + model.save(epoch, current_iter) + + # validation + if opt.get('val') is not None and (current_iter % opt['val']['val_freq'] == 0): + if len(val_loaders) > 1: + logger.warning('Multiple validation datasets are *only* supported by SRModel.') + for val_loader in val_loaders: + model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img']) + + data_timer.start() + iter_timer.start() + train_data = prefetcher.next() + # end of iter + + # end of epoch + + consumed_time = str(datetime.timedelta(seconds=int(time.time() - start_time))) + logger.info(f'End of training. Time consumed: {consumed_time}') + logger.info('Save the latest model.') + model.save(epoch=-1, current_iter=-1) # -1 stands for the latest + if opt.get('val') is not None: + for val_loader in val_loaders: + model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img']) + if tb_logger: + tb_logger.close() + + +if __name__ == '__main__': + root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) + train_pipeline(root_path) diff --git a/basicsr/utils/__init__.py b/basicsr/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..902b293cdf189e8324731d438249fd3089d2b0fa --- /dev/null +++ b/basicsr/utils/__init__.py @@ -0,0 +1,37 @@ +from .diffjpeg import DiffJPEG +from .file_client import FileClient +from .img_process_util import USMSharp, usm_sharp +from .img_util import crop_border, imfrombytes, img2tensor, imwrite, tensor2img +from .logger import AvgTimer, MessageLogger, get_env_info, get_root_logger, init_tb_logger, init_wandb_logger +from .misc import check_resume, get_time_str, make_exp_dirs, mkdir_and_rename, scandir, set_random_seed, sizeof_fmt + +__all__ = [ + # file_client.py + 'FileClient', + # img_util.py + 'img2tensor', + 'tensor2img', + 'imfrombytes', + 'imwrite', + 'crop_border', + # logger.py + 'MessageLogger', + 'AvgTimer', + 'init_tb_logger', + 'init_wandb_logger', + 'get_root_logger', + 'get_env_info', + # misc.py + 'set_random_seed', + 'get_time_str', + 'mkdir_and_rename', + 'make_exp_dirs', + 'scandir', + 'check_resume', + 'sizeof_fmt', + # diffjpeg + 'DiffJPEG', + # img_process_util + 'USMSharp', + 'usm_sharp' +] diff --git a/basicsr/utils/__pycache__/__init__.cpython-39.pyc b/basicsr/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2c2677e1be9a11d3d3786f87d7a95e7d76473dd Binary files /dev/null and b/basicsr/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/basicsr/utils/__pycache__/color_enhance.cpython-39.pyc b/basicsr/utils/__pycache__/color_enhance.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a9102b4be3db3788728ea1649bacbe16eba0b4a Binary files /dev/null and b/basicsr/utils/__pycache__/color_enhance.cpython-39.pyc differ diff --git a/basicsr/utils/__pycache__/diffjpeg.cpython-39.pyc b/basicsr/utils/__pycache__/diffjpeg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..222784a1fcfe7df64a1167f7697fc32050fef719 Binary files /dev/null and b/basicsr/utils/__pycache__/diffjpeg.cpython-39.pyc differ diff --git a/basicsr/utils/__pycache__/dist_util.cpython-39.pyc b/basicsr/utils/__pycache__/dist_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8508d59acfbb9b60f962fcd3ca2ec408660cbe3d Binary files /dev/null and b/basicsr/utils/__pycache__/dist_util.cpython-39.pyc differ diff --git a/basicsr/utils/__pycache__/file_client.cpython-39.pyc b/basicsr/utils/__pycache__/file_client.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1adea0e0ba78f168551a3715385a0bbf2c11b849 Binary files /dev/null and b/basicsr/utils/__pycache__/file_client.cpython-39.pyc differ diff --git a/basicsr/utils/__pycache__/img_process_util.cpython-39.pyc b/basicsr/utils/__pycache__/img_process_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7aecfc75e8f443626037316d8779afd6d2255885 Binary files /dev/null and b/basicsr/utils/__pycache__/img_process_util.cpython-39.pyc differ diff --git a/basicsr/utils/__pycache__/img_util.cpython-39.pyc b/basicsr/utils/__pycache__/img_util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..390db891be029d078d1100c6dd92e69002167459 Binary files /dev/null and b/basicsr/utils/__pycache__/img_util.cpython-39.pyc differ diff --git a/basicsr/utils/__pycache__/logger.cpython-39.pyc b/basicsr/utils/__pycache__/logger.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d433aaedcc2c3875a6c9e0207107baaf749f85f9 Binary files /dev/null and b/basicsr/utils/__pycache__/logger.cpython-39.pyc differ diff --git a/basicsr/utils/__pycache__/matlab_functions.cpython-39.pyc b/basicsr/utils/__pycache__/matlab_functions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..484e8b883abd0d638a01edbed8176fe4adcd7be1 Binary files /dev/null and b/basicsr/utils/__pycache__/matlab_functions.cpython-39.pyc differ diff --git a/basicsr/utils/__pycache__/misc.cpython-39.pyc b/basicsr/utils/__pycache__/misc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f1c3f4b9a789b16cdd2ee78e3f1710bfb8fa043 Binary files /dev/null and b/basicsr/utils/__pycache__/misc.cpython-39.pyc differ diff --git a/basicsr/utils/__pycache__/options.cpython-39.pyc b/basicsr/utils/__pycache__/options.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39114a47a863f7f1af4df87b9fd51907362abd21 Binary files /dev/null and b/basicsr/utils/__pycache__/options.cpython-39.pyc differ diff --git a/basicsr/utils/__pycache__/registry.cpython-39.pyc b/basicsr/utils/__pycache__/registry.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae36d6c24ccb7133b990feda8fb584e95f7b5bc1 Binary files /dev/null and b/basicsr/utils/__pycache__/registry.cpython-39.pyc differ diff --git a/basicsr/utils/color_enhance.py b/basicsr/utils/color_enhance.py new file mode 100644 index 0000000000000000000000000000000000000000..3121f47c8588b5aa98c628f6934bc0f06013826b --- /dev/null +++ b/basicsr/utils/color_enhance.py @@ -0,0 +1,9 @@ +from torchvision.transforms import ToTensor, Grayscale + + +def color_enhacne_blend(x, factor=1.2): + x_g = Grayscale(3)(x) + out = x_g * (1.0 - factor) + x * factor + out[out < 0] = 0 + out[out > 1] = 1 + return out \ No newline at end of file diff --git a/basicsr/utils/diffjpeg.py b/basicsr/utils/diffjpeg.py new file mode 100644 index 0000000000000000000000000000000000000000..65f96b44f9e7f3f8a589668f0003adf328cc5742 --- /dev/null +++ b/basicsr/utils/diffjpeg.py @@ -0,0 +1,515 @@ +""" +Modified from https://github.com/mlomnitz/DiffJPEG + +For images not divisible by 8 +https://dsp.stackexchange.com/questions/35339/jpeg-dct-padding/35343#35343 +""" +import itertools +import numpy as np +import torch +import torch.nn as nn +from torch.nn import functional as F + +# ------------------------ utils ------------------------# +y_table = np.array( + [[16, 11, 10, 16, 24, 40, 51, 61], [12, 12, 14, 19, 26, 58, 60, 55], [14, 13, 16, 24, 40, 57, 69, 56], + [14, 17, 22, 29, 51, 87, 80, 62], [18, 22, 37, 56, 68, 109, 103, 77], [24, 35, 55, 64, 81, 104, 113, 92], + [49, 64, 78, 87, 103, 121, 120, 101], [72, 92, 95, 98, 112, 100, 103, 99]], + dtype=np.float32).T +y_table = nn.Parameter(torch.from_numpy(y_table)) +c_table = np.empty((8, 8), dtype=np.float32) +c_table.fill(99) +c_table[:4, :4] = np.array([[17, 18, 24, 47], [18, 21, 26, 66], [24, 26, 56, 99], [47, 66, 99, 99]]).T +c_table = nn.Parameter(torch.from_numpy(c_table)) + + +def diff_round(x): + """ Differentiable rounding function + """ + return torch.round(x) + (x - torch.round(x))**3 + + +def quality_to_factor(quality): + """ Calculate factor corresponding to quality + + Args: + quality(float): Quality for jpeg compression. + + Returns: + float: Compression factor. + """ + if quality < 50: + quality = 5000. / quality + else: + quality = 200. - quality * 2 + return quality / 100. + + +# ------------------------ compression ------------------------# +class RGB2YCbCrJpeg(nn.Module): + """ Converts RGB image to YCbCr + """ + + def __init__(self): + super(RGB2YCbCrJpeg, self).__init__() + matrix = np.array([[0.299, 0.587, 0.114], [-0.168736, -0.331264, 0.5], [0.5, -0.418688, -0.081312]], + dtype=np.float32).T + self.shift = nn.Parameter(torch.tensor([0., 128., 128.])) + self.matrix = nn.Parameter(torch.from_numpy(matrix)) + + def forward(self, image): + """ + Args: + image(Tensor): batch x 3 x height x width + + Returns: + Tensor: batch x height x width x 3 + """ + image = image.permute(0, 2, 3, 1) + result = torch.tensordot(image, self.matrix, dims=1) + self.shift + return result.view(image.shape) + + +class ChromaSubsampling(nn.Module): + """ Chroma subsampling on CbCr channels + """ + + def __init__(self): + super(ChromaSubsampling, self).__init__() + + def forward(self, image): + """ + Args: + image(tensor): batch x height x width x 3 + + Returns: + y(tensor): batch x height x width + cb(tensor): batch x height/2 x width/2 + cr(tensor): batch x height/2 x width/2 + """ + image_2 = image.permute(0, 3, 1, 2).clone() + cb = F.avg_pool2d(image_2[:, 1, :, :].unsqueeze(1), kernel_size=2, stride=(2, 2), count_include_pad=False) + cr = F.avg_pool2d(image_2[:, 2, :, :].unsqueeze(1), kernel_size=2, stride=(2, 2), count_include_pad=False) + cb = cb.permute(0, 2, 3, 1) + cr = cr.permute(0, 2, 3, 1) + return image[:, :, :, 0], cb.squeeze(3), cr.squeeze(3) + + +class BlockSplitting(nn.Module): + """ Splitting image into patches + """ + + def __init__(self): + super(BlockSplitting, self).__init__() + self.k = 8 + + def forward(self, image): + """ + Args: + image(tensor): batch x height x width + + Returns: + Tensor: batch x h*w/64 x h x w + """ + height, _ = image.shape[1:3] + batch_size = image.shape[0] + image_reshaped = image.view(batch_size, height // self.k, self.k, -1, self.k) + image_transposed = image_reshaped.permute(0, 1, 3, 2, 4) + return image_transposed.contiguous().view(batch_size, -1, self.k, self.k) + + +class DCT8x8(nn.Module): + """ Discrete Cosine Transformation + """ + + def __init__(self): + super(DCT8x8, self).__init__() + tensor = np.zeros((8, 8, 8, 8), dtype=np.float32) + for x, y, u, v in itertools.product(range(8), repeat=4): + tensor[x, y, u, v] = np.cos((2 * x + 1) * u * np.pi / 16) * np.cos((2 * y + 1) * v * np.pi / 16) + alpha = np.array([1. / np.sqrt(2)] + [1] * 7) + self.tensor = nn.Parameter(torch.from_numpy(tensor).float()) + self.scale = nn.Parameter(torch.from_numpy(np.outer(alpha, alpha) * 0.25).float()) + + def forward(self, image): + """ + Args: + image(tensor): batch x height x width + + Returns: + Tensor: batch x height x width + """ + image = image - 128 + result = self.scale * torch.tensordot(image, self.tensor, dims=2) + result.view(image.shape) + return result + + +class YQuantize(nn.Module): + """ JPEG Quantization for Y channel + + Args: + rounding(function): rounding function to use + """ + + def __init__(self, rounding): + super(YQuantize, self).__init__() + self.rounding = rounding + self.y_table = y_table + + def forward(self, image, factor=1): + """ + Args: + image(tensor): batch x height x width + + Returns: + Tensor: batch x height x width + """ + if isinstance(factor, (int, float)): + image = image.float() / (self.y_table * factor) + else: + b = factor.size(0) + table = self.y_table.expand(b, 1, 8, 8) * factor.view(b, 1, 1, 1) + image = image.float() / table + image = self.rounding(image) + return image + + +class CQuantize(nn.Module): + """ JPEG Quantization for CbCr channels + + Args: + rounding(function): rounding function to use + """ + + def __init__(self, rounding): + super(CQuantize, self).__init__() + self.rounding = rounding + self.c_table = c_table + + def forward(self, image, factor=1): + """ + Args: + image(tensor): batch x height x width + + Returns: + Tensor: batch x height x width + """ + if isinstance(factor, (int, float)): + image = image.float() / (self.c_table * factor) + else: + b = factor.size(0) + table = self.c_table.expand(b, 1, 8, 8) * factor.view(b, 1, 1, 1) + image = image.float() / table + image = self.rounding(image) + return image + + +class CompressJpeg(nn.Module): + """Full JPEG compression algorithm + + Args: + rounding(function): rounding function to use + """ + + def __init__(self, rounding=torch.round): + super(CompressJpeg, self).__init__() + self.l1 = nn.Sequential(RGB2YCbCrJpeg(), ChromaSubsampling()) + self.l2 = nn.Sequential(BlockSplitting(), DCT8x8()) + self.c_quantize = CQuantize(rounding=rounding) + self.y_quantize = YQuantize(rounding=rounding) + + def forward(self, image, factor=1): + """ + Args: + image(tensor): batch x 3 x height x width + + Returns: + dict(tensor): Compressed tensor with batch x h*w/64 x 8 x 8. + """ + y, cb, cr = self.l1(image * 255) + components = {'y': y, 'cb': cb, 'cr': cr} + for k in components.keys(): + comp = self.l2(components[k]) + if k in ('cb', 'cr'): + comp = self.c_quantize(comp, factor=factor) + else: + comp = self.y_quantize(comp, factor=factor) + + components[k] = comp + + return components['y'], components['cb'], components['cr'] + + +# ------------------------ decompression ------------------------# + + +class YDequantize(nn.Module): + """Dequantize Y channel + """ + + def __init__(self): + super(YDequantize, self).__init__() + self.y_table = y_table + + def forward(self, image, factor=1): + """ + Args: + image(tensor): batch x height x width + + Returns: + Tensor: batch x height x width + """ + if isinstance(factor, (int, float)): + out = image * (self.y_table * factor) + else: + b = factor.size(0) + table = self.y_table.expand(b, 1, 8, 8) * factor.view(b, 1, 1, 1) + out = image * table + return out + + +class CDequantize(nn.Module): + """Dequantize CbCr channel + """ + + def __init__(self): + super(CDequantize, self).__init__() + self.c_table = c_table + + def forward(self, image, factor=1): + """ + Args: + image(tensor): batch x height x width + + Returns: + Tensor: batch x height x width + """ + if isinstance(factor, (int, float)): + out = image * (self.c_table * factor) + else: + b = factor.size(0) + table = self.c_table.expand(b, 1, 8, 8) * factor.view(b, 1, 1, 1) + out = image * table + return out + + +class iDCT8x8(nn.Module): + """Inverse discrete Cosine Transformation + """ + + def __init__(self): + super(iDCT8x8, self).__init__() + alpha = np.array([1. / np.sqrt(2)] + [1] * 7) + self.alpha = nn.Parameter(torch.from_numpy(np.outer(alpha, alpha)).float()) + tensor = np.zeros((8, 8, 8, 8), dtype=np.float32) + for x, y, u, v in itertools.product(range(8), repeat=4): + tensor[x, y, u, v] = np.cos((2 * u + 1) * x * np.pi / 16) * np.cos((2 * v + 1) * y * np.pi / 16) + self.tensor = nn.Parameter(torch.from_numpy(tensor).float()) + + def forward(self, image): + """ + Args: + image(tensor): batch x height x width + + Returns: + Tensor: batch x height x width + """ + image = image * self.alpha + result = 0.25 * torch.tensordot(image, self.tensor, dims=2) + 128 + result.view(image.shape) + return result + + +class BlockMerging(nn.Module): + """Merge patches into image + """ + + def __init__(self): + super(BlockMerging, self).__init__() + + def forward(self, patches, height, width): + """ + Args: + patches(tensor) batch x height*width/64, height x width + height(int) + width(int) + + Returns: + Tensor: batch x height x width + """ + k = 8 + batch_size = patches.shape[0] + image_reshaped = patches.view(batch_size, height // k, width // k, k, k) + image_transposed = image_reshaped.permute(0, 1, 3, 2, 4) + return image_transposed.contiguous().view(batch_size, height, width) + + +class ChromaUpsampling(nn.Module): + """Upsample chroma layers + """ + + def __init__(self): + super(ChromaUpsampling, self).__init__() + + def forward(self, y, cb, cr): + """ + Args: + y(tensor): y channel image + cb(tensor): cb channel + cr(tensor): cr channel + + Returns: + Tensor: batch x height x width x 3 + """ + + def repeat(x, k=2): + height, width = x.shape[1:3] + x = x.unsqueeze(-1) + x = x.repeat(1, 1, k, k) + x = x.view(-1, height * k, width * k) + return x + + cb = repeat(cb) + cr = repeat(cr) + return torch.cat([y.unsqueeze(3), cb.unsqueeze(3), cr.unsqueeze(3)], dim=3) + + +class YCbCr2RGBJpeg(nn.Module): + """Converts YCbCr image to RGB JPEG + """ + + def __init__(self): + super(YCbCr2RGBJpeg, self).__init__() + + matrix = np.array([[1., 0., 1.402], [1, -0.344136, -0.714136], [1, 1.772, 0]], dtype=np.float32).T + self.shift = nn.Parameter(torch.tensor([0, -128., -128.])) + self.matrix = nn.Parameter(torch.from_numpy(matrix)) + + def forward(self, image): + """ + Args: + image(tensor): batch x height x width x 3 + + Returns: + Tensor: batch x 3 x height x width + """ + result = torch.tensordot(image + self.shift, self.matrix, dims=1) + return result.view(image.shape).permute(0, 3, 1, 2) + + +class DeCompressJpeg(nn.Module): + """Full JPEG decompression algorithm + + Args: + rounding(function): rounding function to use + """ + + def __init__(self, rounding=torch.round): + super(DeCompressJpeg, self).__init__() + self.c_dequantize = CDequantize() + self.y_dequantize = YDequantize() + self.idct = iDCT8x8() + self.merging = BlockMerging() + self.chroma = ChromaUpsampling() + self.colors = YCbCr2RGBJpeg() + + def forward(self, y, cb, cr, imgh, imgw, factor=1): + """ + Args: + compressed(dict(tensor)): batch x h*w/64 x 8 x 8 + imgh(int) + imgw(int) + factor(float) + + Returns: + Tensor: batch x 3 x height x width + """ + components = {'y': y, 'cb': cb, 'cr': cr} + for k in components.keys(): + if k in ('cb', 'cr'): + comp = self.c_dequantize(components[k], factor=factor) + height, width = int(imgh / 2), int(imgw / 2) + else: + comp = self.y_dequantize(components[k], factor=factor) + height, width = imgh, imgw + comp = self.idct(comp) + components[k] = self.merging(comp, height, width) + # + image = self.chroma(components['y'], components['cb'], components['cr']) + image = self.colors(image) + + image = torch.min(255 * torch.ones_like(image), torch.max(torch.zeros_like(image), image)) + return image / 255 + + +# ------------------------ main DiffJPEG ------------------------ # + + +class DiffJPEG(nn.Module): + """This JPEG algorithm result is slightly different from cv2. + DiffJPEG supports batch processing. + + Args: + differentiable(bool): If True, uses custom differentiable rounding function, if False, uses standard torch.round + """ + + def __init__(self, differentiable=True): + super(DiffJPEG, self).__init__() + if differentiable: + rounding = diff_round + else: + rounding = torch.round + + self.compress = CompressJpeg(rounding=rounding) + self.decompress = DeCompressJpeg(rounding=rounding) + + def forward(self, x, quality): + """ + Args: + x (Tensor): Input image, bchw, rgb, [0, 1] + quality(float): Quality factor for jpeg compression scheme. + """ + factor = quality + if isinstance(factor, (int, float)): + factor = quality_to_factor(factor) + else: + for i in range(factor.size(0)): + factor[i] = quality_to_factor(factor[i]) + h, w = x.size()[-2:] + h_pad, w_pad = 0, 0 + # why should use 16 + if h % 16 != 0: + h_pad = 16 - h % 16 + if w % 16 != 0: + w_pad = 16 - w % 16 + x = F.pad(x, (0, w_pad, 0, h_pad), mode='constant', value=0) + + y, cb, cr = self.compress(x, factor=factor) + recovered = self.decompress(y, cb, cr, (h + h_pad), (w + w_pad), factor=factor) + recovered = recovered[:, :, 0:h, 0:w] + return recovered + + +if __name__ == '__main__': + import cv2 + + from basicsr.utils import img2tensor, tensor2img + + img_gt = cv2.imread('test.png') / 255. + + # -------------- cv2 -------------- # + encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 20] + _, encimg = cv2.imencode('.jpg', img_gt * 255., encode_param) + img_lq = np.float32(cv2.imdecode(encimg, 1)) + cv2.imwrite('cv2_JPEG_20.png', img_lq) + + # -------------- DiffJPEG -------------- # + jpeger = DiffJPEG(differentiable=False).cuda() + img_gt = img2tensor(img_gt) + img_gt = torch.stack([img_gt, img_gt]).cuda() + quality = img_gt.new_tensor([20, 40]) + out = jpeger(img_gt, quality=quality) + + cv2.imwrite('pt_JPEG_20.png', tensor2img(out[0])) + cv2.imwrite('pt_JPEG_40.png', tensor2img(out[1])) diff --git a/basicsr/utils/dist_util.py b/basicsr/utils/dist_util.py new file mode 100644 index 0000000000000000000000000000000000000000..0fab887b2cb1ce8533d2e8fdee72ae0c24f68fd0 --- /dev/null +++ b/basicsr/utils/dist_util.py @@ -0,0 +1,82 @@ +# Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/dist_utils.py # noqa: E501 +import functools +import os +import subprocess +import torch +import torch.distributed as dist +import torch.multiprocessing as mp + + +def init_dist(launcher, backend='nccl', **kwargs): + if mp.get_start_method(allow_none=True) is None: + mp.set_start_method('spawn') + if launcher == 'pytorch': + _init_dist_pytorch(backend, **kwargs) + elif launcher == 'slurm': + _init_dist_slurm(backend, **kwargs) + else: + raise ValueError(f'Invalid launcher type: {launcher}') + + +def _init_dist_pytorch(backend, **kwargs): + rank = int(os.environ['RANK']) + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(rank % num_gpus) + dist.init_process_group(backend=backend, **kwargs) + + +def _init_dist_slurm(backend, port=None): + """Initialize slurm distributed training environment. + + If argument ``port`` is not specified, then the master port will be system + environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system + environment variable, then a default port ``29500`` will be used. + + Args: + backend (str): Backend of torch.distributed. + port (int, optional): Master port. Defaults to None. + """ + proc_id = int(os.environ['SLURM_PROCID']) + ntasks = int(os.environ['SLURM_NTASKS']) + node_list = os.environ['SLURM_NODELIST'] + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(proc_id % num_gpus) + addr = subprocess.getoutput(f'scontrol show hostname {node_list} | head -n1') + # specify master port + if port is not None: + os.environ['MASTER_PORT'] = str(port) + elif 'MASTER_PORT' in os.environ: + pass # use MASTER_PORT in the environment variable + else: + # 29500 is torch.distributed default port + os.environ['MASTER_PORT'] = '29500' + os.environ['MASTER_ADDR'] = addr + os.environ['WORLD_SIZE'] = str(ntasks) + os.environ['LOCAL_RANK'] = str(proc_id % num_gpus) + os.environ['RANK'] = str(proc_id) + dist.init_process_group(backend=backend) + + +def get_dist_info(): + if dist.is_available(): + initialized = dist.is_initialized() + else: + initialized = False + if initialized: + rank = dist.get_rank() + world_size = dist.get_world_size() + else: + rank = 0 + world_size = 1 + return rank, world_size + + +def master_only(func): + + @functools.wraps(func) + def wrapper(*args, **kwargs): + rank, _ = get_dist_info() + if rank == 0: + return func(*args, **kwargs) + + return wrapper diff --git a/basicsr/utils/download_util.py b/basicsr/utils/download_util.py new file mode 100644 index 0000000000000000000000000000000000000000..6e12898bbb165de7f9bba31d5177e5c0bbcf4404 --- /dev/null +++ b/basicsr/utils/download_util.py @@ -0,0 +1,64 @@ +import math +import requests +from tqdm import tqdm + +from .misc import sizeof_fmt + + +def download_file_from_google_drive(file_id, save_path): + """Download files from google drive. + + Ref: + https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive # noqa E501 + + Args: + file_id (str): File id. + save_path (str): Save path. + """ + + session = requests.Session() + URL = 'https://docs.google.com/uc?export=download' + params = {'id': file_id} + + response = session.get(URL, params=params, stream=True) + token = get_confirm_token(response) + if token: + params['confirm'] = token + response = session.get(URL, params=params, stream=True) + + # get file size + response_file_size = session.get(URL, params=params, stream=True, headers={'Range': 'bytes=0-2'}) + if 'Content-Range' in response_file_size.headers: + file_size = int(response_file_size.headers['Content-Range'].split('/')[1]) + else: + file_size = None + + save_response_content(response, save_path, file_size) + + +def get_confirm_token(response): + for key, value in response.cookies.items(): + if key.startswith('download_warning'): + return value + return None + + +def save_response_content(response, destination, file_size=None, chunk_size=32768): + if file_size is not None: + pbar = tqdm(total=math.ceil(file_size / chunk_size), unit='chunk') + + readable_file_size = sizeof_fmt(file_size) + else: + pbar = None + + with open(destination, 'wb') as f: + downloaded_size = 0 + for chunk in response.iter_content(chunk_size): + downloaded_size += chunk_size + if pbar is not None: + pbar.update(1) + pbar.set_description(f'Download {sizeof_fmt(downloaded_size)} / {readable_file_size}') + if chunk: # filter out keep-alive new chunks + f.write(chunk) + if pbar is not None: + pbar.close() diff --git a/basicsr/utils/face_util.py b/basicsr/utils/face_util.py new file mode 100644 index 0000000000000000000000000000000000000000..e6f2552ed459e7c15929c30b503347d7d8b9cbdb --- /dev/null +++ b/basicsr/utils/face_util.py @@ -0,0 +1,192 @@ +import cv2 +import numpy as np +import os +import torch +from skimage import transform as trans + +from basicsr.utils import imwrite + +try: + import dlib +except ImportError: + print('Please install dlib before testing face restoration.' 'Reference: https://github.com/davisking/dlib') + + +class FaceRestorationHelper(object): + """Helper for the face restoration pipeline.""" + + def __init__(self, upscale_factor, face_size=512): + self.upscale_factor = upscale_factor + self.face_size = (face_size, face_size) + + # standard 5 landmarks for FFHQ faces with 1024 x 1024 + self.face_template = np.array([[686.77227723, 488.62376238], [586.77227723, 493.59405941], + [337.91089109, 488.38613861], [437.95049505, 493.51485149], + [513.58415842, 678.5049505]]) + self.face_template = self.face_template / (1024 // face_size) + # for estimation the 2D similarity transformation + self.similarity_trans = trans.SimilarityTransform() + + self.all_landmarks_5 = [] + self.all_landmarks_68 = [] + self.affine_matrices = [] + self.inverse_affine_matrices = [] + self.cropped_faces = [] + self.restored_faces = [] + self.save_png = True + + def init_dlib(self, detection_path, landmark5_path, landmark68_path): + """Initialize the dlib detectors and predictors.""" + self.face_detector = dlib.cnn_face_detection_model_v1(detection_path) + self.shape_predictor_5 = dlib.shape_predictor(landmark5_path) + self.shape_predictor_68 = dlib.shape_predictor(landmark68_path) + + def free_dlib_gpu_memory(self): + del self.face_detector + del self.shape_predictor_5 + del self.shape_predictor_68 + + def read_input_image(self, img_path): + # self.input_img is Numpy array, (h, w, c) with RGB order + self.input_img = dlib.load_rgb_image(img_path) + + def detect_faces(self, img_path, upsample_num_times=1, only_keep_largest=False): + """ + Args: + img_path (str): Image path. + upsample_num_times (int): Upsamples the image before running the + face detector + + Returns: + int: Number of detected faces. + """ + self.read_input_image(img_path) + det_faces = self.face_detector(self.input_img, upsample_num_times) + if len(det_faces) == 0: + print('No face detected. Try to increase upsample_num_times.') + else: + if only_keep_largest: + print('Detect several faces and only keep the largest.') + face_areas = [] + for i in range(len(det_faces)): + face_area = (det_faces[i].rect.right() - det_faces[i].rect.left()) * ( + det_faces[i].rect.bottom() - det_faces[i].rect.top()) + face_areas.append(face_area) + largest_idx = face_areas.index(max(face_areas)) + self.det_faces = [det_faces[largest_idx]] + else: + self.det_faces = det_faces + return len(self.det_faces) + + def get_face_landmarks_5(self): + for face in self.det_faces: + shape = self.shape_predictor_5(self.input_img, face.rect) + landmark = np.array([[part.x, part.y] for part in shape.parts()]) + self.all_landmarks_5.append(landmark) + return len(self.all_landmarks_5) + + def get_face_landmarks_68(self): + """Get 68 densemarks for cropped images. + + Should only have one face at most in the cropped image. + """ + num_detected_face = 0 + for idx, face in enumerate(self.cropped_faces): + # face detection + det_face = self.face_detector(face, 1) # TODO: can we remove it? + if len(det_face) == 0: + print(f'Cannot find faces in cropped image with index {idx}.') + self.all_landmarks_68.append(None) + else: + if len(det_face) > 1: + print('Detect several faces in the cropped face. Use the ' + ' largest one. Note that it will also cause overlap ' + 'during paste_faces_to_input_image.') + face_areas = [] + for i in range(len(det_face)): + face_area = (det_face[i].rect.right() - det_face[i].rect.left()) * ( + det_face[i].rect.bottom() - det_face[i].rect.top()) + face_areas.append(face_area) + largest_idx = face_areas.index(max(face_areas)) + face_rect = det_face[largest_idx].rect + else: + face_rect = det_face[0].rect + shape = self.shape_predictor_68(face, face_rect) + landmark = np.array([[part.x, part.y] for part in shape.parts()]) + self.all_landmarks_68.append(landmark) + num_detected_face += 1 + + return num_detected_face + + def warp_crop_faces(self, save_cropped_path=None, save_inverse_affine_path=None): + """Get affine matrix, warp and cropped faces. + + Also get inverse affine matrix for post-processing. + """ + for idx, landmark in enumerate(self.all_landmarks_5): + # use 5 landmarks to get affine matrix + self.similarity_trans.estimate(landmark, self.face_template) + affine_matrix = self.similarity_trans.params[0:2, :] + self.affine_matrices.append(affine_matrix) + # warp and crop faces + cropped_face = cv2.warpAffine(self.input_img, affine_matrix, self.face_size) + self.cropped_faces.append(cropped_face) + # save the cropped face + if save_cropped_path is not None: + path, ext = os.path.splitext(save_cropped_path) + if self.save_png: + save_path = f'{path}_{idx:02d}.png' + else: + save_path = f'{path}_{idx:02d}{ext}' + + imwrite(cv2.cvtColor(cropped_face, cv2.COLOR_RGB2BGR), save_path) + + # get inverse affine matrix + self.similarity_trans.estimate(self.face_template, landmark * self.upscale_factor) + inverse_affine = self.similarity_trans.params[0:2, :] + self.inverse_affine_matrices.append(inverse_affine) + # save inverse affine matrices + if save_inverse_affine_path is not None: + path, _ = os.path.splitext(save_inverse_affine_path) + save_path = f'{path}_{idx:02d}.pth' + torch.save(inverse_affine, save_path) + + def add_restored_face(self, face): + self.restored_faces.append(face) + + def paste_faces_to_input_image(self, save_path): + # operate in the BGR order + input_img = cv2.cvtColor(self.input_img, cv2.COLOR_RGB2BGR) + h, w, _ = input_img.shape + h_up, w_up = h * self.upscale_factor, w * self.upscale_factor + # simply resize the background + upsample_img = cv2.resize(input_img, (w_up, h_up)) + assert len(self.restored_faces) == len( + self.inverse_affine_matrices), ('length of restored_faces and affine_matrices are different.') + for restored_face, inverse_affine in zip(self.restored_faces, self.inverse_affine_matrices): + inv_restored = cv2.warpAffine(restored_face, inverse_affine, (w_up, h_up)) + mask = np.ones((*self.face_size, 3), dtype=np.float32) + inv_mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up)) + # remove the black borders + inv_mask_erosion = cv2.erode(inv_mask, np.ones((2 * self.upscale_factor, 2 * self.upscale_factor), + np.uint8)) + inv_restored_remove_border = inv_mask_erosion * inv_restored + total_face_area = np.sum(inv_mask_erosion) // 3 + # compute the fusion edge based on the area of face + w_edge = int(total_face_area**0.5) // 20 + erosion_radius = w_edge * 2 + inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8)) + blur_size = w_edge * 2 + inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0) + upsample_img = inv_soft_mask * inv_restored_remove_border + (1 - inv_soft_mask) * upsample_img + if self.save_png: + save_path = save_path.replace('.jpg', '.png').replace('.jpeg', '.png') + imwrite(upsample_img.astype(np.uint8), save_path) + + def clean_all(self): + self.all_landmarks_5 = [] + self.all_landmarks_68 = [] + self.restored_faces = [] + self.affine_matrices = [] + self.cropped_faces = [] + self.inverse_affine_matrices = [] diff --git a/basicsr/utils/file_client.py b/basicsr/utils/file_client.py new file mode 100644 index 0000000000000000000000000000000000000000..bb6286a6556c85a29589b054a532200340f4a745 --- /dev/null +++ b/basicsr/utils/file_client.py @@ -0,0 +1,167 @@ +# Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py # noqa: E501 +from abc import ABCMeta, abstractmethod + + +class BaseStorageBackend(metaclass=ABCMeta): + """Abstract class of storage backends. + + All backends need to implement two apis: ``get()`` and ``get_text()``. + ``get()`` reads the file as a byte stream and ``get_text()`` reads the file + as texts. + """ + + @abstractmethod + def get(self, filepath): + pass + + @abstractmethod + def get_text(self, filepath): + pass + + +class MemcachedBackend(BaseStorageBackend): + """Memcached storage backend. + + Attributes: + server_list_cfg (str): Config file for memcached server list. + client_cfg (str): Config file for memcached client. + sys_path (str | None): Additional path to be appended to `sys.path`. + Default: None. + """ + + def __init__(self, server_list_cfg, client_cfg, sys_path=None): + if sys_path is not None: + import sys + sys.path.append(sys_path) + try: + import mc + except ImportError: + raise ImportError('Please install memcached to enable MemcachedBackend.') + + self.server_list_cfg = server_list_cfg + self.client_cfg = client_cfg + self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, self.client_cfg) + # mc.pyvector servers as a point which points to a memory cache + self._mc_buffer = mc.pyvector() + + def get(self, filepath): + filepath = str(filepath) + import mc + self._client.Get(filepath, self._mc_buffer) + value_buf = mc.ConvertBuffer(self._mc_buffer) + return value_buf + + def get_text(self, filepath): + raise NotImplementedError + + +class HardDiskBackend(BaseStorageBackend): + """Raw hard disks storage backend.""" + + def get(self, filepath): + filepath = str(filepath) + with open(filepath, 'rb') as f: + value_buf = f.read() + return value_buf + + def get_text(self, filepath): + filepath = str(filepath) + with open(filepath, 'r') as f: + value_buf = f.read() + return value_buf + + +class LmdbBackend(BaseStorageBackend): + """Lmdb storage backend. + + Args: + db_paths (str | list[str]): Lmdb database paths. + client_keys (str | list[str]): Lmdb client keys. Default: 'default'. + readonly (bool, optional): Lmdb environment parameter. If True, + disallow any write operations. Default: True. + lock (bool, optional): Lmdb environment parameter. If False, when + concurrent access occurs, do not lock the database. Default: False. + readahead (bool, optional): Lmdb environment parameter. If False, + disable the OS filesystem readahead mechanism, which may improve + random read performance when a database is larger than RAM. + Default: False. + + Attributes: + db_paths (list): Lmdb database path. + _client (list): A list of several lmdb envs. + """ + + def __init__(self, db_paths, client_keys='default', readonly=True, lock=False, readahead=False, **kwargs): + try: + import lmdb + except ImportError: + raise ImportError('Please install lmdb to enable LmdbBackend.') + + if isinstance(client_keys, str): + client_keys = [client_keys] + + if isinstance(db_paths, list): + self.db_paths = [str(v) for v in db_paths] + elif isinstance(db_paths, str): + self.db_paths = [str(db_paths)] + assert len(client_keys) == len(self.db_paths), ('client_keys and db_paths should have the same length, ' + f'but received {len(client_keys)} and {len(self.db_paths)}.') + + self._client = {} + for client, path in zip(client_keys, self.db_paths): + self._client[client] = lmdb.open(path, readonly=readonly, lock=lock, readahead=readahead, **kwargs) + + def get(self, filepath, client_key): + """Get values according to the filepath from one lmdb named client_key. + + Args: + filepath (str | obj:`Path`): Here, filepath is the lmdb key. + client_key (str): Used for distinguishing different lmdb envs. + """ + filepath = str(filepath) + assert client_key in self._client, (f'client_key {client_key} is not ' 'in lmdb clients.') + client = self._client[client_key] + with client.begin(write=False) as txn: + value_buf = txn.get(filepath.encode('ascii')) + return value_buf + + def get_text(self, filepath): + raise NotImplementedError + + +class FileClient(object): + """A general file client to access files in different backend. + + The client loads a file or text in a specified backend from its path + and return it as a binary file. it can also register other backend + accessor with a given name and backend class. + + Attributes: + backend (str): The storage backend type. Options are "disk", + "memcached" and "lmdb". + client (:obj:`BaseStorageBackend`): The backend object. + """ + + _backends = { + 'disk': HardDiskBackend, + 'memcached': MemcachedBackend, + 'lmdb': LmdbBackend, + } + + def __init__(self, backend='disk', **kwargs): + if backend not in self._backends: + raise ValueError(f'Backend {backend} is not supported. Currently supported ones' + f' are {list(self._backends.keys())}') + self.backend = backend + self.client = self._backends[backend](**kwargs) + + def get(self, filepath, client_key='default'): + # client_key is used only for lmdb, where different fileclients have + # different lmdb environments. + if self.backend == 'lmdb': + return self.client.get(filepath, client_key) + else: + return self.client.get(filepath) + + def get_text(self, filepath): + return self.client.get_text(filepath) diff --git a/basicsr/utils/flow_util.py b/basicsr/utils/flow_util.py new file mode 100644 index 0000000000000000000000000000000000000000..cb89d0e0f87651d9125eaecf7796e853044b487a --- /dev/null +++ b/basicsr/utils/flow_util.py @@ -0,0 +1,170 @@ +# Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/video/optflow.py # noqa: E501 +import cv2 +import numpy as np +import os + + +def flowread(flow_path, quantize=False, concat_axis=0, *args, **kwargs): + """Read an optical flow map. + + Args: + flow_path (ndarray or str): Flow path. + quantize (bool): whether to read quantized pair, if set to True, + remaining args will be passed to :func:`dequantize_flow`. + concat_axis (int): The axis that dx and dy are concatenated, + can be either 0 or 1. Ignored if quantize is False. + + Returns: + ndarray: Optical flow represented as a (h, w, 2) numpy array + """ + if quantize: + assert concat_axis in [0, 1] + cat_flow = cv2.imread(flow_path, cv2.IMREAD_UNCHANGED) + if cat_flow.ndim != 2: + raise IOError(f'{flow_path} is not a valid quantized flow file, its dimension is {cat_flow.ndim}.') + assert cat_flow.shape[concat_axis] % 2 == 0 + dx, dy = np.split(cat_flow, 2, axis=concat_axis) + flow = dequantize_flow(dx, dy, *args, **kwargs) + else: + with open(flow_path, 'rb') as f: + try: + header = f.read(4).decode('utf-8') + except Exception: + raise IOError(f'Invalid flow file: {flow_path}') + else: + if header != 'PIEH': + raise IOError(f'Invalid flow file: {flow_path}, ' 'header does not contain PIEH') + + w = np.fromfile(f, np.int32, 1).squeeze() + h = np.fromfile(f, np.int32, 1).squeeze() + flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2)) + + return flow.astype(np.float32) + + +def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs): + """Write optical flow to file. + + If the flow is not quantized, it will be saved as a .flo file losslessly, + otherwise a jpeg image which is lossy but of much smaller size. (dx and dy + will be concatenated horizontally into a single image if quantize is True.) + + Args: + flow (ndarray): (h, w, 2) array of optical flow. + filename (str): Output filepath. + quantize (bool): Whether to quantize the flow and save it to 2 jpeg + images. If set to True, remaining args will be passed to + :func:`quantize_flow`. + concat_axis (int): The axis that dx and dy are concatenated, + can be either 0 or 1. Ignored if quantize is False. + """ + if not quantize: + with open(filename, 'wb') as f: + f.write('PIEH'.encode('utf-8')) + np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f) + flow = flow.astype(np.float32) + flow.tofile(f) + f.flush() + else: + assert concat_axis in [0, 1] + dx, dy = quantize_flow(flow, *args, **kwargs) + dxdy = np.concatenate((dx, dy), axis=concat_axis) + os.makedirs(os.path.dirname(filename), exist_ok=True) + cv2.imwrite(filename, dxdy) + + +def quantize_flow(flow, max_val=0.02, norm=True): + """Quantize flow to [0, 255]. + + After this step, the size of flow will be much smaller, and can be + dumped as jpeg images. + + Args: + flow (ndarray): (h, w, 2) array of optical flow. + max_val (float): Maximum value of flow, values beyond + [-max_val, max_val] will be truncated. + norm (bool): Whether to divide flow values by image width/height. + + Returns: + tuple[ndarray]: Quantized dx and dy. + """ + h, w, _ = flow.shape + dx = flow[..., 0] + dy = flow[..., 1] + if norm: + dx = dx / w # avoid inplace operations + dy = dy / h + # use 255 levels instead of 256 to make sure 0 is 0 after dequantization. + flow_comps = [quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy]] + return tuple(flow_comps) + + +def dequantize_flow(dx, dy, max_val=0.02, denorm=True): + """Recover from quantized flow. + + Args: + dx (ndarray): Quantized dx. + dy (ndarray): Quantized dy. + max_val (float): Maximum value used when quantizing. + denorm (bool): Whether to multiply flow values with width/height. + + Returns: + ndarray: Dequantized flow. + """ + assert dx.shape == dy.shape + assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1) + + dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]] + + if denorm: + dx *= dx.shape[1] + dy *= dx.shape[0] + flow = np.dstack((dx, dy)) + return flow + + +def quantize(arr, min_val, max_val, levels, dtype=np.int64): + """Quantize an array of (-inf, inf) to [0, levels-1]. + + Args: + arr (ndarray): Input array. + min_val (scalar): Minimum value to be clipped. + max_val (scalar): Maximum value to be clipped. + levels (int): Quantization levels. + dtype (np.type): The type of the quantized array. + + Returns: + tuple: Quantized array. + """ + if not (isinstance(levels, int) and levels > 1): + raise ValueError(f'levels must be a positive integer, but got {levels}') + if min_val >= max_val: + raise ValueError(f'min_val ({min_val}) must be smaller than max_val ({max_val})') + + arr = np.clip(arr, min_val, max_val) - min_val + quantized_arr = np.minimum(np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1) + + return quantized_arr + + +def dequantize(arr, min_val, max_val, levels, dtype=np.float64): + """Dequantize an array. + + Args: + arr (ndarray): Input array. + min_val (scalar): Minimum value to be clipped. + max_val (scalar): Maximum value to be clipped. + levels (int): Quantization levels. + dtype (np.type): The type of the dequantized array. + + Returns: + tuple: Dequantized array. + """ + if not (isinstance(levels, int) and levels > 1): + raise ValueError(f'levels must be a positive integer, but got {levels}') + if min_val >= max_val: + raise ValueError(f'min_val ({min_val}) must be smaller than max_val ({max_val})') + + dequantized_arr = (arr + 0.5).astype(dtype) * (max_val - min_val) / levels + min_val + + return dequantized_arr diff --git a/basicsr/utils/img_process_util.py b/basicsr/utils/img_process_util.py new file mode 100644 index 0000000000000000000000000000000000000000..52e02f09930dbf13bcd12bbe16b76e4fce52578e --- /dev/null +++ b/basicsr/utils/img_process_util.py @@ -0,0 +1,83 @@ +import cv2 +import numpy as np +import torch +from torch.nn import functional as F + + +def filter2D(img, kernel): + """PyTorch version of cv2.filter2D + + Args: + img (Tensor): (b, c, h, w) + kernel (Tensor): (b, k, k) + """ + k = kernel.size(-1) + b, c, h, w = img.size() + if k % 2 == 1: + img = F.pad(img, (k // 2, k // 2, k // 2, k // 2), mode='reflect') + else: + raise ValueError('Wrong kernel size') + + ph, pw = img.size()[-2:] + + if kernel.size(0) == 1: + # apply the same kernel to all batch images + img = img.view(b * c, 1, ph, pw) + kernel = kernel.view(1, 1, k, k) + return F.conv2d(img, kernel, padding=0).view(b, c, h, w) + else: + img = img.view(1, b * c, ph, pw) + kernel = kernel.view(b, 1, k, k).repeat(1, c, 1, 1).view(b * c, 1, k, k) + return F.conv2d(img, kernel, groups=b * c).view(b, c, h, w) + + +def usm_sharp(img, weight=0.5, radius=50, threshold=10): + """USM sharpening. + + Input image: I; Blurry image: B. + 1. sharp = I + weight * (I - B) + 2. Mask = 1 if abs(I - B) > threshold, else: 0 + 3. Blur mask: + 4. Out = Mask * sharp + (1 - Mask) * I + + + Args: + img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. + weight (float): Sharp weight. Default: 1. + radius (float): Kernel size of Gaussian blur. Default: 50. + threshold (int): + """ + if radius % 2 == 0: + radius += 1 + blur = cv2.GaussianBlur(img, (radius, radius), 0) + residual = img - blur + mask = np.abs(residual) * 255 > threshold + mask = mask.astype('float32') + soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) + + sharp = img + weight * residual + sharp = np.clip(sharp, 0, 1) + return soft_mask * sharp + (1 - soft_mask) * img + + +class USMSharp(torch.nn.Module): + + def __init__(self, radius=50, sigma=0): + super(USMSharp, self).__init__() + if radius % 2 == 0: + radius += 1 + self.radius = radius + kernel = cv2.getGaussianKernel(radius, sigma) + kernel = torch.FloatTensor(np.dot(kernel, kernel.transpose())).unsqueeze_(0) + self.register_buffer('kernel', kernel) + + def forward(self, img, weight=0.5, threshold=10): + blur = filter2D(img, self.kernel) + residual = img - blur + + mask = torch.abs(residual) * 255 > threshold + mask = mask.float() + soft_mask = filter2D(mask, self.kernel) + sharp = img + weight * residual + sharp = torch.clip(sharp, 0, 1) + return soft_mask * sharp + (1 - soft_mask) * img diff --git a/basicsr/utils/img_util.py b/basicsr/utils/img_util.py new file mode 100644 index 0000000000000000000000000000000000000000..921bcb0e33093b480924d75d27706dc38783554b --- /dev/null +++ b/basicsr/utils/img_util.py @@ -0,0 +1,227 @@ +import cv2 +import math +import numpy as np +import os +import torch +from torchvision.utils import make_grid + + +def img2tensor(imgs, bgr2rgb=True, float32=True): + """Numpy array to tensor. + + Args: + imgs (list[ndarray] | ndarray): Input images. + bgr2rgb (bool): Whether to change bgr to rgb. + float32 (bool): Whether to change to float32. + + Returns: + list[tensor] | tensor: Tensor images. If returned results only have + one element, just return tensor. + """ + + def _totensor(img, bgr2rgb, float32): + if img.shape[2] == 3 and bgr2rgb: + if img.dtype == 'float64': + img = img.astype('float32') + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = torch.from_numpy(img.transpose(2, 0, 1)) + if float32: + img = img.float() + return img + + if isinstance(imgs, list): + return [_totensor(img, bgr2rgb, float32) for img in imgs] + else: + return _totensor(imgs, bgr2rgb, float32) + + +def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)): + """Convert torch Tensors into image numpy arrays. + + After clamping to [min, max], values will be normalized to [0, 1]. + + Args: + tensor (Tensor or list[Tensor]): Accept shapes: + 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W); + 2) 3D Tensor of shape (3/1 x H x W); + 3) 2D Tensor of shape (H x W). + Tensor channel should be in RGB order. + rgb2bgr (bool): Whether to change rgb to bgr. + out_type (numpy type): output types. If ``np.uint8``, transform outputs + to uint8 type with range [0, 255]; otherwise, float type with + range [0, 1]. Default: ``np.uint8``. + min_max (tuple[int]): min and max values for clamp. + + Returns: + (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of + shape (H x W). The channel order is BGR. + """ + if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): + raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}') + + if torch.is_tensor(tensor): + tensor = [tensor] + result = [] + for _tensor in tensor: + _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max) + _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0]) + + n_dim = _tensor.dim() + if n_dim == 4: + img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy() + img_np = img_np.transpose(1, 2, 0) + if rgb2bgr: + img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) + elif n_dim == 3: + img_np = _tensor.numpy() + img_np = img_np.transpose(1, 2, 0) + if img_np.shape[2] == 1: # gray image + img_np = np.squeeze(img_np, axis=2) + else: + if rgb2bgr: + img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) + elif n_dim == 2: + img_np = _tensor.numpy() + else: + raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}') + if out_type == np.uint8: + # Unlike MATLAB, numpy.unit8() WILL NOT round by default. + img_np = (img_np * 255.0).round() + img_np = img_np.astype(out_type) + result.append(img_np) + if len(result) == 1: + result = result[0] + return result + + +def tensor2img_fast(tensor, rgb2bgr=True, min_max=(0, 1)): + """This implementation is slightly faster than tensor2img. + It now only supports torch tensor with shape (1, c, h, w). + + Args: + tensor (Tensor): Now only support torch tensor with (1, c, h, w). + rgb2bgr (bool): Whether to change rgb to bgr. Default: True. + min_max (tuple[int]): min and max values for clamp. + """ + output = tensor.squeeze(0).detach().clamp_(*min_max).permute(1, 2, 0) + output = (output - min_max[0]) / (min_max[1] - min_max[0]) * 255 + output = output.type(torch.uint8).cpu().numpy() + if rgb2bgr: + output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) + return output + + +def imfrombytes(content, flag='color', float32=False): + """Read an image from bytes. + + Args: + content (bytes): Image bytes got from files or other streams. + flag (str): Flags specifying the color type of a loaded image, + candidates are `color`, `grayscale` and `unchanged`. + float32 (bool): Whether to change to float32., If True, will also norm + to [0, 1]. Default: False. + + Returns: + ndarray: Loaded image array. + """ + img_np = np.frombuffer(content, np.uint8) + imread_flags = {'color': cv2.IMREAD_COLOR, 'grayscale': cv2.IMREAD_GRAYSCALE, 'unchanged': cv2.IMREAD_UNCHANGED} + img = cv2.imdecode(img_np, imread_flags[flag]) + if float32: + img = img.astype(np.float32) / 255. + return img + + +def imwrite(img, file_path, params=None, auto_mkdir=True): + """Write image to file. + + Args: + img (ndarray): Image array to be written. + file_path (str): Image file path. + params (None or list): Same as opencv's :func:`imwrite` interface. + auto_mkdir (bool): If the parent folder of `file_path` does not exist, + whether to create it automatically. + + Returns: + bool: Successful or not. + """ + if auto_mkdir: + dir_name = os.path.abspath(os.path.dirname(file_path)) + os.makedirs(dir_name, exist_ok=True) + ok = cv2.imwrite(file_path, img, params) + if not ok: + raise IOError('Failed in writing images.') + + +def crop_border(imgs, crop_border): + """Crop borders of images. + + Args: + imgs (list[ndarray] | ndarray): Images with shape (h, w, c). + crop_border (int): Crop border for each end of height and weight. + + Returns: + list[ndarray]: Cropped images. + """ + if crop_border == 0: + return imgs + else: + if isinstance(imgs, list): + return [v[crop_border:-crop_border, crop_border:-crop_border, ...] for v in imgs] + else: + return imgs[crop_border:-crop_border, crop_border:-crop_border, ...] + + +def tensor_lab2rgb(labs, illuminant="D65", observer="2"): + """ + Args: + lab : (B, C, H, W) + Returns: + tuple : (C, H, W) + """ + illuminants = \ + {"A": {'2': (1.098466069456375, 1, 0.3558228003436005), + '10': (1.111420406956693, 1, 0.3519978321919493)}, + "D50": {'2': (0.9642119944211994, 1, 0.8251882845188288), + '10': (0.9672062750333777, 1, 0.8142801513128616)}, + "D55": {'2': (0.956797052643698, 1, 0.9214805860173273), + '10': (0.9579665682254781, 1, 0.9092525159847462)}, + "D65": {'2': (0.95047, 1., 1.08883), # This was: `lab_ref_white` + '10': (0.94809667673716, 1, 1.0730513595166162)}, + "D75": {'2': (0.9497220898840717, 1, 1.226393520724154), + '10': (0.9441713925645873, 1, 1.2064272211720228)}, + "E": {'2': (1.0, 1.0, 1.0), + '10': (1.0, 1.0, 1.0)}} + xyz_from_rgb = np.array([[0.412453, 0.357580, 0.180423], [0.212671, 0.715160, 0.072169], + [0.019334, 0.119193, 0.950227]]) + + rgb_from_xyz = np.array([[3.240481340, -0.96925495, 0.055646640], [-1.53715152, 1.875990000, -0.20404134], + [-0.49853633, 0.041555930, 1.057311070]]) + B, C, H, W = labs.shape + arrs = labs.permute((0, 2, 3, 1)).contiguous() # (B, 3, H, W) -> (B, H, W, 3) + L, a, b = arrs[:, :, :, 0:1], arrs[:, :, :, 1:2], arrs[:, :, :, 2:] + y = (L + 16.) / 116. + x = (a / 500.) + y + z = y - (b / 200.) + invalid = z.data < 0 + z[invalid] = 0 + xyz = torch.cat([x, y, z], dim=3) + mask = xyz.data > 0.2068966 + mask_xyz = xyz.clone() + mask_xyz[mask] = torch.pow(xyz[mask], 3.0) + mask_xyz[~mask] = (xyz[~mask] - 16.0 / 116.) / 7.787 + xyz_ref_white = illuminants[illuminant][observer] + for i in range(C): + mask_xyz[:, :, :, i] = mask_xyz[:, :, :, i] * xyz_ref_white[i] + + rgb_trans = torch.mm(mask_xyz.view(-1, 3), torch.from_numpy(rgb_from_xyz).type_as(xyz)).view(B, H, W, C) + rgb = rgb_trans.permute((0, 3, 1, 2)).contiguous() + mask = rgb.data > 0.0031308 + mask_rgb = rgb.clone() + mask_rgb[mask] = 1.055 * torch.pow(rgb[mask], 1 / 2.4) - 0.055 + mask_rgb[~mask] = rgb[~mask] * 12.92 + neg_mask = mask_rgb.data < 0 + large_mask = mask_rgb.data > 1 + mask_rgb[neg_mask] = 0 + mask_rgb[large_mask] = 1 + return mask_rgb \ No newline at end of file diff --git a/basicsr/utils/lmdb_util.py b/basicsr/utils/lmdb_util.py new file mode 100644 index 0000000000000000000000000000000000000000..e0a10f60ffca2e36ac5f5564aafd70e79d06a723 --- /dev/null +++ b/basicsr/utils/lmdb_util.py @@ -0,0 +1,196 @@ +import cv2 +import lmdb +import sys +from multiprocessing import Pool +from os import path as osp +from tqdm import tqdm + + +def make_lmdb_from_imgs(data_path, + lmdb_path, + img_path_list, + keys, + batch=5000, + compress_level=1, + multiprocessing_read=False, + n_thread=40, + map_size=None): + """Make lmdb from images. + + Contents of lmdb. The file structure is: + example.lmdb + ├── data.mdb + ├── lock.mdb + ├── meta_info.txt + + The data.mdb and lock.mdb are standard lmdb files and you can refer to + https://lmdb.readthedocs.io/en/release/ for more details. + + The meta_info.txt is a specified txt file to record the meta information + of our datasets. It will be automatically created when preparing + datasets by our provided dataset tools. + Each line in the txt file records 1)image name (with extension), + 2)image shape, and 3)compression level, separated by a white space. + + For example, the meta information could be: + `000_00000000.png (720,1280,3) 1`, which means: + 1) image name (with extension): 000_00000000.png; + 2) image shape: (720,1280,3); + 3) compression level: 1 + + We use the image name without extension as the lmdb key. + + If `multiprocessing_read` is True, it will read all the images to memory + using multiprocessing. Thus, your server needs to have enough memory. + + Args: + data_path (str): Data path for reading images. + lmdb_path (str): Lmdb save path. + img_path_list (str): Image path list. + keys (str): Used for lmdb keys. + batch (int): After processing batch images, lmdb commits. + Default: 5000. + compress_level (int): Compress level when encoding images. Default: 1. + multiprocessing_read (bool): Whether use multiprocessing to read all + the images to memory. Default: False. + n_thread (int): For multiprocessing. + map_size (int | None): Map size for lmdb env. If None, use the + estimated size from images. Default: None + """ + + assert len(img_path_list) == len(keys), ('img_path_list and keys should have the same length, ' + f'but got {len(img_path_list)} and {len(keys)}') + print(f'Create lmdb for {data_path}, save to {lmdb_path}...') + print(f'Totoal images: {len(img_path_list)}') + if not lmdb_path.endswith('.lmdb'): + raise ValueError("lmdb_path must end with '.lmdb'.") + if osp.exists(lmdb_path): + print(f'Folder {lmdb_path} already exists. Exit.') + sys.exit(1) + + if multiprocessing_read: + # read all the images to memory (multiprocessing) + dataset = {} # use dict to keep the order for multiprocessing + shapes = {} + print(f'Read images with multiprocessing, #thread: {n_thread} ...') + pbar = tqdm(total=len(img_path_list), unit='image') + + def callback(arg): + """get the image data and update pbar.""" + key, dataset[key], shapes[key] = arg + pbar.update(1) + pbar.set_description(f'Read {key}') + + pool = Pool(n_thread) + for path, key in zip(img_path_list, keys): + pool.apply_async(read_img_worker, args=(osp.join(data_path, path), key, compress_level), callback=callback) + pool.close() + pool.join() + pbar.close() + print(f'Finish reading {len(img_path_list)} images.') + + # create lmdb environment + if map_size is None: + # obtain data size for one image + img = cv2.imread(osp.join(data_path, img_path_list[0]), cv2.IMREAD_UNCHANGED) + _, img_byte = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) + data_size_per_img = img_byte.nbytes + print('Data size per image is: ', data_size_per_img) + data_size = data_size_per_img * len(img_path_list) + map_size = data_size * 10 + + env = lmdb.open(lmdb_path, map_size=map_size) + + # write data to lmdb + pbar = tqdm(total=len(img_path_list), unit='chunk') + txn = env.begin(write=True) + txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w') + for idx, (path, key) in enumerate(zip(img_path_list, keys)): + pbar.update(1) + pbar.set_description(f'Write {key}') + key_byte = key.encode('ascii') + if multiprocessing_read: + img_byte = dataset[key] + h, w, c = shapes[key] + else: + _, img_byte, img_shape = read_img_worker(osp.join(data_path, path), key, compress_level) + h, w, c = img_shape + + txn.put(key_byte, img_byte) + # write meta information + txt_file.write(f'{key}.png ({h},{w},{c}) {compress_level}\n') + if idx % batch == 0: + txn.commit() + txn = env.begin(write=True) + pbar.close() + txn.commit() + env.close() + txt_file.close() + print('\nFinish writing lmdb.') + + +def read_img_worker(path, key, compress_level): + """Read image worker. + + Args: + path (str): Image path. + key (str): Image key. + compress_level (int): Compress level when encoding images. + + Returns: + str: Image key. + byte: Image byte. + tuple[int]: Image shape. + """ + + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) + if img.ndim == 2: + h, w = img.shape + c = 1 + else: + h, w, c = img.shape + _, img_byte = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) + return (key, img_byte, (h, w, c)) + + +class LmdbMaker(): + """LMDB Maker. + + Args: + lmdb_path (str): Lmdb save path. + map_size (int): Map size for lmdb env. Default: 1024 ** 4, 1TB. + batch (int): After processing batch images, lmdb commits. + Default: 5000. + compress_level (int): Compress level when encoding images. Default: 1. + """ + + def __init__(self, lmdb_path, map_size=1024**4, batch=5000, compress_level=1): + if not lmdb_path.endswith('.lmdb'): + raise ValueError("lmdb_path must end with '.lmdb'.") + if osp.exists(lmdb_path): + print(f'Folder {lmdb_path} already exists. Exit.') + sys.exit(1) + + self.lmdb_path = lmdb_path + self.batch = batch + self.compress_level = compress_level + self.env = lmdb.open(lmdb_path, map_size=map_size) + self.txn = self.env.begin(write=True) + self.txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w') + self.counter = 0 + + def put(self, img_byte, key, img_shape): + self.counter += 1 + key_byte = key.encode('ascii') + self.txn.put(key_byte, img_byte) + # write meta information + h, w, c = img_shape + self.txt_file.write(f'{key}.png ({h},{w},{c}) {self.compress_level}\n') + if self.counter % self.batch == 0: + self.txn.commit() + self.txn = self.env.begin(write=True) + + def close(self): + self.txn.commit() + self.env.close() + self.txt_file.close() diff --git a/basicsr/utils/logger.py b/basicsr/utils/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..2e1dd13b423a0b9a4cee6ffe0f1213c995068b16 --- /dev/null +++ b/basicsr/utils/logger.py @@ -0,0 +1,209 @@ +import datetime +import logging +import time + +from .dist_util import get_dist_info, master_only + +initialized_logger = {} + + +class AvgTimer(): + + def __init__(self, window=200): + self.window = window # average window + self.current_time = 0 + self.total_time = 0 + self.count = 0 + self.avg_time = 0 + self.start() + + def start(self): + self.start_time = time.time() + + def record(self): + self.count += 1 + self.current_time = time.time() - self.start_time + self.total_time += self.current_time + # calculate average time + self.avg_time = self.total_time / self.count + # reset + if self.count > self.window: + self.count = 0 + self.total_time = 0 + + def get_current_time(self): + return self.current_time + + def get_avg_time(self): + return self.avg_time + + +class MessageLogger(): + """Message logger for printing. + + Args: + opt (dict): Config. It contains the following keys: + name (str): Exp name. + logger (dict): Contains 'print_freq' (str) for logger interval. + train (dict): Contains 'total_iter' (int) for total iters. + use_tb_logger (bool): Use tensorboard logger. + start_iter (int): Start iter. Default: 1. + tb_logger (obj:`tb_logger`): Tensorboard logger. Default: None. + """ + + def __init__(self, opt, start_iter=1, tb_logger=None): + self.exp_name = opt['name'] + self.interval = opt['logger']['print_freq'] + self.start_iter = start_iter + self.max_iters = opt['train']['total_iter'] + self.use_tb_logger = opt['logger']['use_tb_logger'] + self.tb_logger = tb_logger + self.start_time = time.time() + self.logger = get_root_logger() + + def reset_start_time(self): + self.start_time = time.time() + + @master_only + def __call__(self, log_vars): + """Format logging message. + + Args: + log_vars (dict): It contains the following keys: + epoch (int): Epoch number. + iter (int): Current iter. + lrs (list): List for learning rates. + + time (float): Iter time. + data_time (float): Data time for each iter. + """ + # epoch, iter, learning rates + epoch = log_vars.pop('epoch') + current_iter = log_vars.pop('iter') + lrs = log_vars.pop('lrs') + + message = (f'[{self.exp_name[:5]}..][epoch:{epoch:3d}, iter:{current_iter:8,d}, lr:(') + for v in lrs: + message += f'{v:.3e},' + message += ')] ' + + # time and estimated time + if 'time' in log_vars.keys(): + iter_time = log_vars.pop('time') + data_time = log_vars.pop('data_time') + + total_time = time.time() - self.start_time + time_sec_avg = total_time / (current_iter - self.start_iter + 1) + eta_sec = time_sec_avg * (self.max_iters - current_iter - 1) + eta_str = str(datetime.timedelta(seconds=int(eta_sec))) + message += f'[eta: {eta_str}, ' + message += f'time (data): {iter_time:.3f} ({data_time:.3f})] ' + + # other items, especially losses + for k, v in log_vars.items(): + message += f'{k}: {v:.4e} ' + # tensorboard logger + if self.use_tb_logger and 'debug' not in self.exp_name: + if k.startswith('l_'): + self.tb_logger.add_scalar(f'losses/{k}', v, current_iter) + else: + self.tb_logger.add_scalar(k, v, current_iter) + self.logger.info(message) + + +@master_only +def init_tb_logger(log_dir): + from torch.utils.tensorboard import SummaryWriter + tb_logger = SummaryWriter(log_dir=log_dir) + return tb_logger + + +@master_only +def init_wandb_logger(opt): + """We now only use wandb to sync tensorboard log.""" + import wandb + logger = get_root_logger() + + project = opt['logger']['wandb']['project'] + resume_id = opt['logger']['wandb'].get('resume_id') + if resume_id: + wandb_id = resume_id + resume = 'allow' + logger.warning(f'Resume wandb logger with id={wandb_id}.') + else: + wandb_id = wandb.util.generate_id() + resume = 'never' + + wandb.init(id=wandb_id, resume=resume, name=opt['name'], config=opt, project=project, sync_tensorboard=True) + + logger.info(f'Use wandb logger with id={wandb_id}; project={project}.') + + +def get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=None): + """Get the root logger. + + The logger will be initialized if it has not been initialized. By default a + StreamHandler will be added. If `log_file` is specified, a FileHandler will + also be added. + + Args: + logger_name (str): root logger name. Default: 'basicsr'. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the root logger. + log_level (int): The root logger level. Note that only the process of + rank 0 is affected, while other processes will set the level to + "Error" and be silent most of the time. + + Returns: + logging.Logger: The root logger. + """ + logger = logging.getLogger(logger_name) + # if the logger has been initialized, just return it + if logger_name in initialized_logger: + return logger + + format_str = '%(asctime)s %(levelname)s: %(message)s' + stream_handler = logging.StreamHandler() + stream_handler.setFormatter(logging.Formatter(format_str)) + logger.addHandler(stream_handler) + logger.propagate = False + rank, _ = get_dist_info() + if rank != 0: + logger.setLevel('ERROR') + elif log_file is not None: + logger.setLevel(log_level) + # add file handler + file_handler = logging.FileHandler(log_file, 'w') + file_handler.setFormatter(logging.Formatter(format_str)) + file_handler.setLevel(log_level) + logger.addHandler(file_handler) + initialized_logger[logger_name] = True + return logger + + +def get_env_info(): + """Get environment information. + + Currently, only log the software version. + """ + import torch + import torchvision + + from basicsr.version import __version__ + msg = r""" + ____ _ _____ ____ + / __ ) ____ _ _____ (_)_____/ ___/ / __ \ + / __ |/ __ `// ___// // ___/\__ \ / /_/ / + / /_/ // /_/ /(__ )/ // /__ ___/ // _, _/ + /_____/ \__,_//____//_/ \___//____//_/ |_| + ______ __ __ __ __ + / ____/____ ____ ____/ / / / __ __ _____ / /__ / / + / / __ / __ \ / __ \ / __ / / / / / / // ___// //_/ / / + / /_/ // /_/ // /_/ // /_/ / / /___/ /_/ // /__ / /< /_/ + \____/ \____/ \____/ \____/ /_____/\____/ \___//_/|_| (_) + """ + msg += ('\nVersion Information: ' + f'\n\tBasicSR: {__version__}' + f'\n\tPyTorch: {torch.__version__}' + f'\n\tTorchVision: {torchvision.__version__}') + return msg diff --git a/basicsr/utils/matlab_functions.py b/basicsr/utils/matlab_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..f9f1a83bc8beee468dd7c9ca734966e926fd9fde --- /dev/null +++ b/basicsr/utils/matlab_functions.py @@ -0,0 +1,359 @@ +import math +import numpy as np +import torch + + +def cubic(x): + """cubic function used for calculate_weights_indices.""" + absx = torch.abs(x) + absx2 = absx**2 + absx3 = absx**3 + return (1.5 * absx3 - 2.5 * absx2 + 1) * ( + (absx <= 1).type_as(absx)) + (-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2) * (((absx > 1) * + (absx <= 2)).type_as(absx)) + + +def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing): + """Calculate weights and indices, used for imresize function. + + Args: + in_length (int): Input length. + out_length (int): Output length. + scale (float): Scale factor. + kernel_width (int): Kernel width. + antialisaing (bool): Whether to apply anti-aliasing when downsampling. + """ + + if (scale < 1) and antialiasing: + # Use a modified kernel (larger kernel width) to simultaneously + # interpolate and antialias + kernel_width = kernel_width / scale + + # Output-space coordinates + x = torch.linspace(1, out_length, out_length) + + # Input-space coordinates. Calculate the inverse mapping such that 0.5 + # in output space maps to 0.5 in input space, and 0.5 + scale in output + # space maps to 1.5 in input space. + u = x / scale + 0.5 * (1 - 1 / scale) + + # What is the left-most pixel that can be involved in the computation? + left = torch.floor(u - kernel_width / 2) + + # What is the maximum number of pixels that can be involved in the + # computation? Note: it's OK to use an extra pixel here; if the + # corresponding weights are all zero, it will be eliminated at the end + # of this function. + p = math.ceil(kernel_width) + 2 + + # The indices of the input pixels involved in computing the k-th output + # pixel are in row k of the indices matrix. + indices = left.view(out_length, 1).expand(out_length, p) + torch.linspace(0, p - 1, p).view(1, p).expand( + out_length, p) + + # The weights used to compute the k-th output pixel are in row k of the + # weights matrix. + distance_to_center = u.view(out_length, 1).expand(out_length, p) - indices + + # apply cubic kernel + if (scale < 1) and antialiasing: + weights = scale * cubic(distance_to_center * scale) + else: + weights = cubic(distance_to_center) + + # Normalize the weights matrix so that each row sums to 1. + weights_sum = torch.sum(weights, 1).view(out_length, 1) + weights = weights / weights_sum.expand(out_length, p) + + # If a column in weights is all zero, get rid of it. only consider the + # first and last column. + weights_zero_tmp = torch.sum((weights == 0), 0) + if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6): + indices = indices.narrow(1, 1, p - 2) + weights = weights.narrow(1, 1, p - 2) + if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6): + indices = indices.narrow(1, 0, p - 2) + weights = weights.narrow(1, 0, p - 2) + weights = weights.contiguous() + indices = indices.contiguous() + sym_len_s = -indices.min() + 1 + sym_len_e = indices.max() - in_length + indices = indices + sym_len_s - 1 + return weights, indices, int(sym_len_s), int(sym_len_e) + + +@torch.no_grad() +def imresize(img, scale, antialiasing=True): + """imresize function same as MATLAB. + + It now only supports bicubic. + The same scale applies for both height and width. + + Args: + img (Tensor | Numpy array): + Tensor: Input image with shape (c, h, w), [0, 1] range. + Numpy: Input image with shape (h, w, c), [0, 1] range. + scale (float): Scale factor. The same scale applies for both height + and width. + antialisaing (bool): Whether to apply anti-aliasing when downsampling. + Default: True. + + Returns: + Tensor: Output image with shape (c, h, w), [0, 1] range, w/o round. + """ + squeeze_flag = False + if type(img).__module__ == np.__name__: # numpy type + numpy_type = True + if img.ndim == 2: + img = img[:, :, None] + squeeze_flag = True + img = torch.from_numpy(img.transpose(2, 0, 1)).float() + else: + numpy_type = False + if img.ndim == 2: + img = img.unsqueeze(0) + squeeze_flag = True + + in_c, in_h, in_w = img.size() + out_h, out_w = math.ceil(in_h * scale), math.ceil(in_w * scale) + kernel_width = 4 + kernel = 'cubic' + + # get weights and indices + weights_h, indices_h, sym_len_hs, sym_len_he = calculate_weights_indices(in_h, out_h, scale, kernel, kernel_width, + antialiasing) + weights_w, indices_w, sym_len_ws, sym_len_we = calculate_weights_indices(in_w, out_w, scale, kernel, kernel_width, + antialiasing) + # process H dimension + # symmetric copying + img_aug = torch.FloatTensor(in_c, in_h + sym_len_hs + sym_len_he, in_w) + img_aug.narrow(1, sym_len_hs, in_h).copy_(img) + + sym_patch = img[:, :sym_len_hs, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + img_aug.narrow(1, 0, sym_len_hs).copy_(sym_patch_inv) + + sym_patch = img[:, -sym_len_he:, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + img_aug.narrow(1, sym_len_hs + in_h, sym_len_he).copy_(sym_patch_inv) + + out_1 = torch.FloatTensor(in_c, out_h, in_w) + kernel_width = weights_h.size(1) + for i in range(out_h): + idx = int(indices_h[i][0]) + for j in range(in_c): + out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_h[i]) + + # process W dimension + # symmetric copying + out_1_aug = torch.FloatTensor(in_c, out_h, in_w + sym_len_ws + sym_len_we) + out_1_aug.narrow(2, sym_len_ws, in_w).copy_(out_1) + + sym_patch = out_1[:, :, :sym_len_ws] + inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(2, inv_idx) + out_1_aug.narrow(2, 0, sym_len_ws).copy_(sym_patch_inv) + + sym_patch = out_1[:, :, -sym_len_we:] + inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(2, inv_idx) + out_1_aug.narrow(2, sym_len_ws + in_w, sym_len_we).copy_(sym_patch_inv) + + out_2 = torch.FloatTensor(in_c, out_h, out_w) + kernel_width = weights_w.size(1) + for i in range(out_w): + idx = int(indices_w[i][0]) + for j in range(in_c): + out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_w[i]) + + if squeeze_flag: + out_2 = out_2.squeeze(0) + if numpy_type: + out_2 = out_2.numpy() + if not squeeze_flag: + out_2 = out_2.transpose(1, 2, 0) + + return out_2 + + +def rgb2ycbcr(img, y_only=False): + """Convert a RGB image to YCbCr image. + + This function produces the same results as Matlab's `rgb2ycbcr` function. + It implements the ITU-R BT.601 conversion for standard-definition + television. See more details in + https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. + + It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`. + In OpenCV, it implements a JPEG conversion. See more details in + https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + y_only (bool): Whether to only return Y channel. Default: False. + + Returns: + ndarray: The converted YCbCr image. The output image has the same type + and range as input image. + """ + img_type = img.dtype + img = _convert_input_type_range(img) + if y_only: + out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0 + else: + out_img = np.matmul( + img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], [24.966, 112.0, -18.214]]) + [16, 128, 128] + out_img = _convert_output_type_range(out_img, img_type) + return out_img + + +def bgr2ycbcr(img, y_only=False): + """Convert a BGR image to YCbCr image. + + The bgr version of rgb2ycbcr. + It implements the ITU-R BT.601 conversion for standard-definition + television. See more details in + https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. + + It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`. + In OpenCV, it implements a JPEG conversion. See more details in + https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + y_only (bool): Whether to only return Y channel. Default: False. + + Returns: + ndarray: The converted YCbCr image. The output image has the same type + and range as input image. + """ + img_type = img.dtype + img = _convert_input_type_range(img) + if y_only: + out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0 + else: + out_img = np.matmul( + img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) + [16, 128, 128] + out_img = _convert_output_type_range(out_img, img_type) + return out_img + + +def ycbcr2rgb(img): + """Convert a YCbCr image to RGB image. + + This function produces the same results as Matlab's ycbcr2rgb function. + It implements the ITU-R BT.601 conversion for standard-definition + television. See more details in + https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. + + It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`. + In OpenCV, it implements a JPEG conversion. See more details in + https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + + Returns: + ndarray: The converted RGB image. The output image has the same type + and range as input image. + """ + img_type = img.dtype + img = _convert_input_type_range(img) * 255 + out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], + [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] # noqa: E126 + out_img = _convert_output_type_range(out_img, img_type) + return out_img + + +def ycbcr2bgr(img): + """Convert a YCbCr image to BGR image. + + The bgr version of ycbcr2rgb. + It implements the ITU-R BT.601 conversion for standard-definition + television. See more details in + https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. + + It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`. + In OpenCV, it implements a JPEG conversion. See more details in + https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + + Returns: + ndarray: The converted BGR image. The output image has the same type + and range as input image. + """ + img_type = img.dtype + img = _convert_input_type_range(img) * 255 + out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0.00791071, -0.00153632, 0], + [0, -0.00318811, 0.00625893]]) * 255.0 + [-276.836, 135.576, -222.921] # noqa: E126 + out_img = _convert_output_type_range(out_img, img_type) + return out_img + + +def _convert_input_type_range(img): + """Convert the type and range of the input image. + + It converts the input image to np.float32 type and range of [0, 1]. + It is mainly used for pre-processing the input image in colorspace + conversion functions such as rgb2ycbcr and ycbcr2rgb. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + + Returns: + (ndarray): The converted image with type of np.float32 and range of + [0, 1]. + """ + img_type = img.dtype + img = img.astype(np.float32) + if img_type == np.float32: + pass + elif img_type == np.uint8: + img /= 255. + else: + raise TypeError(f'The img type should be np.float32 or np.uint8, but got {img_type}') + return img + + +def _convert_output_type_range(img, dst_type): + """Convert the type and range of the image according to dst_type. + + It converts the image to desired type and range. If `dst_type` is np.uint8, + images will be converted to np.uint8 type with range [0, 255]. If + `dst_type` is np.float32, it converts the image to np.float32 type with + range [0, 1]. + It is mainly used for post-processing images in colorspace conversion + functions such as rgb2ycbcr and ycbcr2rgb. + + Args: + img (ndarray): The image to be converted with np.float32 type and + range [0, 255]. + dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it + converts the image to np.uint8 type with range [0, 255]. If + dst_type is np.float32, it converts the image to np.float32 type + with range [0, 1]. + + Returns: + (ndarray): The converted image with desired type and range. + """ + if dst_type not in (np.uint8, np.float32): + raise TypeError(f'The dst_type should be np.float32 or np.uint8, but got {dst_type}') + if dst_type == np.uint8: + img = img.round() + else: + img /= 255. + return img.astype(dst_type) diff --git a/basicsr/utils/misc.py b/basicsr/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..728fef857d0071875c82ffcbc8c74b6fbe029e22 --- /dev/null +++ b/basicsr/utils/misc.py @@ -0,0 +1,141 @@ +import numpy as np +import os +import random +import time +import torch +from os import path as osp + +from .dist_util import master_only + + +def set_random_seed(seed): + """Set random seeds.""" + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + +def get_time_str(): + return time.strftime('%Y%m%d_%H%M%S', time.localtime()) + + +def mkdir_and_rename(path): + """mkdirs. If path exists, rename it with timestamp and create a new one. + + Args: + path (str): Folder path. + """ + if osp.exists(path): + new_name = path + '_archived_' + get_time_str() + print(f'Path already exists. Rename it to {new_name}', flush=True) + os.rename(path, new_name) + os.makedirs(path, exist_ok=True) + + +@master_only +def make_exp_dirs(opt): + """Make dirs for experiments.""" + path_opt = opt['path'].copy() + if opt['is_train']: + mkdir_and_rename(path_opt.pop('experiments_root')) + else: + mkdir_and_rename(path_opt.pop('results_root')) + for key, path in path_opt.items(): + if ('strict_load' in key) or ('pretrain_network' in key) or ('resume' in key) or ('param_key' in key): + continue + else: + os.makedirs(path, exist_ok=True) + + +def scandir(dir_path, suffix=None, recursive=False, full_path=False): + """Scan a directory to find the interested files. + + Args: + dir_path (str): Path of the directory. + suffix (str | tuple(str), optional): File suffix that we are + interested in. Default: None. + recursive (bool, optional): If set to True, recursively scan the + directory. Default: False. + full_path (bool, optional): If set to True, include the dir_path. + Default: False. + + Returns: + A generator for all the interested files with relative paths. + """ + + if (suffix is not None) and not isinstance(suffix, (str, tuple)): + raise TypeError('"suffix" must be a string or tuple of strings') + + root = dir_path + + def _scandir(dir_path, suffix, recursive): + for entry in os.scandir(dir_path): + if not entry.name.startswith('.') and entry.is_file(): + if full_path: + return_path = entry.path + else: + return_path = osp.relpath(entry.path, root) + + if suffix is None: + yield return_path + elif return_path.endswith(suffix): + yield return_path + else: + if recursive: + yield from _scandir(entry.path, suffix=suffix, recursive=recursive) + else: + continue + + return _scandir(dir_path, suffix=suffix, recursive=recursive) + + +def check_resume(opt, resume_iter): + """Check resume states and pretrain_network paths. + + Args: + opt (dict): Options. + resume_iter (int): Resume iteration. + """ + if opt['path']['resume_state']: + # get all the networks + networks = [key for key in opt.keys() if key.startswith('network_')] + flag_pretrain = False + for network in networks: + if opt['path'].get(f'pretrain_{network}') is not None: + flag_pretrain = True + if flag_pretrain: + print('pretrain_network path will be ignored during resuming.') + # set pretrained model paths + for network in networks: + name = f'pretrain_{network}' + basename = network.replace('network_', '') + if opt['path'].get('ignore_resume_networks') is None or (network + not in opt['path']['ignore_resume_networks']): + opt['path'][name] = osp.join(opt['path']['models'], f'net_{basename}_{resume_iter}.pth') + print(f"Set {name} to {opt['path'][name]}") + + # change param_key to params in resume + param_keys = [key for key in opt['path'].keys() if key.startswith('param_key')] + for param_key in param_keys: + if opt['path'][param_key] == 'params_ema': + opt['path'][param_key] = 'params' + print(f'Set {param_key} to params') + + +def sizeof_fmt(size, suffix='B'): + """Get human readable file size. + + Args: + size (int): File size. + suffix (str): Suffix. Default: 'B'. + + Return: + str: Formatted file siz. + """ + for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: + if abs(size) < 1024.0: + return f'{size:3.1f} {unit}{suffix}' + size /= 1024.0 + return f'{size:3.1f} Y{suffix}' diff --git a/basicsr/utils/options.py b/basicsr/utils/options.py new file mode 100644 index 0000000000000000000000000000000000000000..0a9ab73dc05a1fbb25fb09ad57e710b5ec677a7d --- /dev/null +++ b/basicsr/utils/options.py @@ -0,0 +1,195 @@ +import argparse +import random +import torch +import yaml +from collections import OrderedDict +from os import path as osp + +from basicsr.utils import set_random_seed +from basicsr.utils.dist_util import get_dist_info, init_dist, master_only + + +def ordered_yaml(): + """Support OrderedDict for yaml. + + Returns: + yaml Loader and Dumper. + """ + try: + from yaml import CDumper as Dumper + from yaml import CLoader as Loader + except ImportError: + from yaml import Dumper, Loader + + _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG + + def dict_representer(dumper, data): + return dumper.represent_dict(data.items()) + + def dict_constructor(loader, node): + return OrderedDict(loader.construct_pairs(node)) + + Dumper.add_representer(OrderedDict, dict_representer) + Loader.add_constructor(_mapping_tag, dict_constructor) + return Loader, Dumper + + +def dict2str(opt, indent_level=1): + """dict to string for printing options. + + Args: + opt (dict): Option dict. + indent_level (int): Indent level. Default: 1. + + Return: + (str): Option string for printing. + """ + msg = '\n' + for k, v in opt.items(): + if isinstance(v, dict): + msg += ' ' * (indent_level * 2) + k + ':[' + msg += dict2str(v, indent_level + 1) + msg += ' ' * (indent_level * 2) + ']\n' + else: + msg += ' ' * (indent_level * 2) + k + ': ' + str(v) + '\n' + return msg + + +def _postprocess_yml_value(value): + # None + if value == '~' or value.lower() == 'none': + return None + # bool + if value.lower() == 'true': + return True + elif value.lower() == 'false': + return False + # !!float number + if value.startswith('!!float'): + return float(value.replace('!!float', '')) + # number + if value.isdigit(): + return int(value) + elif value.replace('.', '', 1).isdigit() and value.count('.') < 2: + return float(value) + # list + if value.startswith('['): + return eval(value) + # str + return value + + +def parse_options(root_path, is_train=True): + parser = argparse.ArgumentParser() + parser.add_argument('-opt', type=str, required=True, help='Path to option YAML file.') + parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none', help='job launcher') + parser.add_argument('--auto_resume', action='store_true') + parser.add_argument('--debug', action='store_true') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument( + '--force_yml', nargs='+', default=None, help='Force to update yml files. Examples: train:ema_decay=0.999') + args = parser.parse_args() + + # parse yml to dict + with open(args.opt, mode='r') as f: + opt = yaml.load(f, Loader=ordered_yaml()[0]) + + # distributed settings + if args.launcher == 'none': + opt['dist'] = False + print('Disable distributed.', flush=True) + else: + opt['dist'] = True + if args.launcher == 'slurm' and 'dist_params' in opt: + init_dist(args.launcher, **opt['dist_params']) + else: + init_dist(args.launcher) + opt['rank'], opt['world_size'] = get_dist_info() + + # random seed + seed = opt.get('manual_seed') + if seed is None: + seed = random.randint(1, 10000) + opt['manual_seed'] = seed + set_random_seed(seed + opt['rank']) + + # force to update yml options + if args.force_yml is not None: + for entry in args.force_yml: + # now do not support creating new keys + keys, value = entry.split('=') + keys, value = keys.strip(), value.strip() + value = _postprocess_yml_value(value) + eval_str = 'opt' + for key in keys.split(':'): + eval_str += f'["{key}"]' + eval_str += '=value' + # using exec function + exec(eval_str) + + opt['auto_resume'] = args.auto_resume + opt['is_train'] = is_train + + # debug setting + if args.debug and not opt['name'].startswith('debug'): + opt['name'] = 'debug_' + opt['name'] + + if opt['num_gpu'] == 'auto': + opt['num_gpu'] = torch.cuda.device_count() + + # datasets + for phase, dataset in opt['datasets'].items(): + # for multiple datasets, e.g., val_1, val_2; test_1, test_2 + phase = phase.split('_')[0] + dataset['phase'] = phase + if 'scale' in opt: + dataset['scale'] = opt['scale'] + if dataset.get('dataroot_gt') is not None: + dataset['dataroot_gt'] = osp.expanduser(dataset['dataroot_gt']) + if dataset.get('dataroot_lq') is not None: + dataset['dataroot_lq'] = osp.expanduser(dataset['dataroot_lq']) + + # paths + for key, val in opt['path'].items(): + if (val is not None) and ('resume_state' in key or 'pretrain_network' in key): + opt['path'][key] = osp.expanduser(val) + + if is_train: + experiments_root = osp.join(root_path, 'experiments', opt['name']) + opt['path']['experiments_root'] = experiments_root + opt['path']['models'] = osp.join(experiments_root, 'models') + opt['path']['training_states'] = osp.join(experiments_root, 'training_states') + opt['path']['log'] = experiments_root + opt['path']['visualization'] = osp.join(experiments_root, 'visualization') + + # change some options for debug mode + if 'debug' in opt['name']: + if 'val' in opt: + opt['val']['val_freq'] = 8 + opt['logger']['print_freq'] = 1 + opt['logger']['save_snapshot_freq'] = 4 + opt['logger']['save_checkpoint_freq'] = 8 + else: # test + results_root = osp.join(root_path, 'results', opt['name']) + opt['path']['results_root'] = results_root + opt['path']['log'] = results_root + opt['path']['visualization'] = osp.join(results_root, 'visualization') + + return opt, args + + +@master_only +def copy_opt_file(opt_file, experiments_root): + # copy the yml file to the experiment root + import sys + import time + from shutil import copyfile + cmd = ' '.join(sys.argv) + filename = osp.join(experiments_root, osp.basename(opt_file)) + copyfile(opt_file, filename) + + with open(filename, 'r+') as f: + lines = f.readlines() + lines.insert(0, f'# GENERATE TIME: {time.asctime()}\n# CMD:\n# {cmd}\n\n') + f.seek(0) + f.writelines(lines) diff --git a/basicsr/utils/registry.py b/basicsr/utils/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..655753b3b9cbd0cfe73fe93a77cf1fcc3db6d827 --- /dev/null +++ b/basicsr/utils/registry.py @@ -0,0 +1,82 @@ +# Modified from: https://github.com/facebookresearch/fvcore/blob/master/fvcore/common/registry.py # noqa: E501 + + +class Registry(): + """ + The registry that provides name -> object mapping, to support third-party + users' custom modules. + + To create a registry (e.g. a backbone registry): + + .. code-block:: python + + BACKBONE_REGISTRY = Registry('BACKBONE') + + To register an object: + + .. code-block:: python + + @BACKBONE_REGISTRY.register() + class MyBackbone(): + ... + + Or: + + .. code-block:: python + + BACKBONE_REGISTRY.register(MyBackbone) + """ + + def __init__(self, name): + """ + Args: + name (str): the name of this registry + """ + self._name = name + self._obj_map = {} + + def _do_register(self, name, obj): + assert (name not in self._obj_map), (f"An object named '{name}' was already registered " + f"in '{self._name}' registry!") + self._obj_map[name] = obj + + def register(self, obj=None): + """ + Register the given object under the the name `obj.__name__`. + Can be used as either a decorator or not. + See docstring of this class for usage. + """ + if obj is None: + # used as a decorator + def deco(func_or_class): + name = func_or_class.__name__ + self._do_register(name, func_or_class) + return func_or_class + + return deco + + # used as a function call + name = obj.__name__ + self._do_register(name, obj) + + def get(self, name): + ret = self._obj_map.get(name) + if ret is None: + raise KeyError(f"No object named '{name}' found in '{self._name}' registry!") + return ret + + def __contains__(self, name): + return name in self._obj_map + + def __iter__(self): + return iter(self._obj_map.items()) + + def keys(self): + return self._obj_map.keys() + + +DATASET_REGISTRY = Registry('dataset') +ARCH_REGISTRY = Registry('arch') +MODEL_REGISTRY = Registry('model') +LOSS_REGISTRY = Registry('loss') +METRIC_REGISTRY = Registry('metric') diff --git a/basicsr/version.py b/basicsr/version.py new file mode 100644 index 0000000000000000000000000000000000000000..ee93374887f8717fabd1b31d7edfbaadd7113166 --- /dev/null +++ b/basicsr/version.py @@ -0,0 +1,5 @@ +# GENERATED VERSION FILE +# TIME: Thu Feb 29 13:11:41 2024 +__version__ = '1.3.4.6' +__gitsha__ = '6fbe0b4' +version_info = (1, 3, 4, 6) diff --git a/cog.yaml b/cog.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d81ac738d16b86de393206aedbd0cd5acdc56288 --- /dev/null +++ b/cog.yaml @@ -0,0 +1,24 @@ +# Configuration for Cog ⚙️ +# Reference: https://github.com/replicate/cog/blob/main/docs/yaml.md + +build: + gpu: true + system_packages: + - "libgl1-mesa-glx" + - "libglib2.0-0" + python_version: "3.11" + python_packages: + - torch==2.0.1 + - torchvision==0.15.2 + - lmdb==1.4.1 + - opencv_python==4.9.0.80 + - Pillow==10.1.0 + - PyYAML==6.0.1 + - scipy==1.11.4 + - timm==0.9.12 + - tqdm==4.65.0 + - scikit-image==0.22.0 + - ipython + run: + - pip install dlib +predict: "predict.py:Predictor" diff --git a/data_list/get_meta_file.py b/data_list/get_meta_file.py new file mode 100644 index 0000000000000000000000000000000000000000..efbdad0fdbe1ebe9608d04f914c94606d7d2d4f2 --- /dev/null +++ b/data_list/get_meta_file.py @@ -0,0 +1,29 @@ +from tqdm import tqdm +import argparse +import os + + +def find_and_save_images(data_path, output_name): + all_img_path = [] + + for root, dirs, files in os.walk(data_path): + for ext in ['png', 'jpg', 'jpeg', 'JPG', 'JPEG', 'PNG']: + all_img_path += [os.path.join(root, file) for file in files if file.lower().endswith(f'.{ext}')] + + with open(output_name, 'w') as f: + for path in tqdm(all_img_path): + f.write(path + '\n') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--output-name', type=str, required=True, default='dataset_name.txt', help='Path output file.') + parser.add_argument('--data-path', type=str, required=True, default='./', help='Path to dataset') + + args = parser.parse_args() + + print(f'Generating {args.output_name} from {args.data_path} ...') + + find_and_save_images(args.data_path, args.output_name) + + print('Done.') diff --git a/dd.py b/dd.py new file mode 100644 index 0000000000000000000000000000000000000000..e12df9e8ae7c008851b78c8c41def72bd2e3289e --- /dev/null +++ b/dd.py @@ -0,0 +1,8 @@ +import cv2 +from modelscope.outputs import OutputKeys +from modelscope.pipelines import pipeline +from modelscope.utils.constant import Tasks + +img_colorization = pipeline(Tasks.image_colorization, model='damo/cv_ddcolor_image-colorization') +result = img_colorization('https://modelscope.oss-cn-beijing.aliyuncs.com/test/images/audrey_hepburn.jpg') +cv2.imwrite('result.png', result[OutputKeys.OUTPUT_IMG]) \ No newline at end of file diff --git a/gradio_app.py b/gradio_app.py new file mode 100644 index 0000000000000000000000000000000000000000..fd54d1c18d3a5d77ed3a37ec6db826dfe67fb673 --- /dev/null +++ b/gradio_app.py @@ -0,0 +1,124 @@ +import sys +sys.path.append('/DDColor') + +import argparse +import cv2 +import numpy as np +import os +from tqdm import tqdm +import torch +from basicsr.archs.ddcolor_arch import DDColor +import torch.nn.functional as F + +import gradio as gr +from gradio_imageslider import ImageSlider +import uuid +from PIL import Image + +model_path = 'modelscope/damo/cv_ddcolor_image-colorization/pytorch_model.pt' +input_size = 512 +model_size = 'large' + + +# Create Image Colorization Pipeline +class ImageColorizationPipeline(object): + + def __init__(self, model_path, input_size=256, model_size='large'): + + self.input_size = input_size + if torch.cuda.is_available(): + self.device = torch.device('cuda') + else: + self.device = torch.device('cpu') + + if model_size == 'tiny': + self.encoder_name = 'convnext-t' + else: + self.encoder_name = 'convnext-l' + + self.decoder_type = "MultiScaleColorDecoder" + + if self.decoder_type == 'MultiScaleColorDecoder': + self.model = DDColor( + encoder_name=self.encoder_name, + decoder_name='MultiScaleColorDecoder', + input_size=[self.input_size, self.input_size], + num_output_channels=2, + last_norm='Spectral', + do_normalize=False, + num_queries=100, + num_scales=3, + dec_layers=9, + ).to(self.device) + else: + self.model = DDColor( + encoder_name=self.encoder_name, + decoder_name='SingleColorDecoder', + input_size=[self.input_size, self.input_size], + num_output_channels=2, + last_norm='Spectral', + do_normalize=False, + num_queries=256, + ).to(self.device) + + self.model.load_state_dict( + torch.load(model_path, map_location=torch.device('cpu'))['params'], + strict=False) + self.model.eval() + + @torch.no_grad() + def process(self, img): + self.height, self.width = img.shape[:2] + img = (img / 255.0).astype(np.float32) + orig_l = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)[:, :, :1] # (h, w, 1) + + # resize rgb image -> lab -> get grey -> rgb + img = cv2.resize(img, (self.input_size, self.input_size)) + img_l = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)[:, :, :1] + img_gray_lab = np.concatenate((img_l, np.zeros_like(img_l), np.zeros_like(img_l)), axis=-1) + img_gray_rgb = cv2.cvtColor(img_gray_lab, cv2.COLOR_LAB2RGB) + + tensor_gray_rgb = torch.from_numpy(img_gray_rgb.transpose((2, 0, 1))).float().unsqueeze(0).to(self.device) + output_ab = self.model(tensor_gray_rgb).cpu() # (1, 2, self.height, self.width) + + # resize ab -> concat original l -> rgb + output_ab_resize = F.interpolate(output_ab, size=(self.height, self.width))[0].float().numpy().transpose(1, 2, 0) + output_lab = np.concatenate((orig_l, output_ab_resize), axis=-1) + output_bgr = cv2.cvtColor(output_lab, cv2.COLOR_LAB2BGR) + + output_img = (output_bgr * 255.0).round().astype(np.uint8) + + return output_img + + +# Initialize +colorizer = ImageColorizationPipeline(model_path=model_path, + input_size=input_size, + model_size=model_size) + + +# Create inference function for gradio app +def colorize(img): + image_out = colorizer.process(img) + # Generate a unique filename using UUID + #unique_imgfilename = str(uuid.uuid4()) + '.png' + unique_imgfilename = "result.png" + cv2.imwrite(unique_imgfilename, image_out) + return (img, unique_imgfilename,unique_imgfilename) + + +# Gradio demo using the Image-Slider custom component +with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(): + bw_image = gr.Image(label='Black and White Input Image') + btn = gr.Button('Convert using DDColor') + with gr.Column(): + col_image_slider = ImageSlider(position=0.5, + label='Colored Image with Slider-view') + with gr.Column(): + dl = gr.File(label="Download the output image") + + + btn.click(colorize, bw_image, col_image_slider,dl) +demo.launch() \ No newline at end of file diff --git a/inference/colorization_pipeline.py b/inference/colorization_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..dcc430e6e9df8e94bf107bf8a74514007036bd0d --- /dev/null +++ b/inference/colorization_pipeline.py @@ -0,0 +1,108 @@ +import argparse +import cv2 +import numpy as np +import os +from tqdm import tqdm +import torch +from basicsr.archs.ddcolor_arch import DDColor +import torch.nn.functional as F + + +class ImageColorizationPipeline(object): + + def __init__(self, model_path, input_size=256, model_size='large'): + + self.input_size = input_size + if torch.cuda.is_available(): + self.device = torch.device('cuda') + else: + self.device = torch.device('cpu') + + if model_size == 'tiny': + self.encoder_name = 'convnext-t' + else: + self.encoder_name = 'convnext-l' + + self.decoder_type = "MultiScaleColorDecoder" + + if self.decoder_type == 'MultiScaleColorDecoder': + self.model = DDColor( + encoder_name=self.encoder_name, + decoder_name='MultiScaleColorDecoder', + input_size=[self.input_size, self.input_size], + num_output_channels=2, + last_norm='Spectral', + do_normalize=False, + num_queries=100, + num_scales=3, + dec_layers=9, + ).to(self.device) + else: + self.model = DDColor( + encoder_name=self.encoder_name, + decoder_name='SingleColorDecoder', + input_size=[self.input_size, self.input_size], + num_output_channels=2, + last_norm='Spectral', + do_normalize=False, + num_queries=256, + ).to(self.device) + + self.model.load_state_dict( + torch.load(model_path, map_location=torch.device('cpu'))['params'], + strict=False) + self.model.eval() + + @torch.no_grad() + def process(self, img): + self.height, self.width = img.shape[:2] + # print(self.width, self.height) + # if self.width * self.height < 100000: + # self.input_size = 256 + + img = (img / 255.0).astype(np.float32) + orig_l = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)[:, :, :1] # (h, w, 1) + + # resize rgb image -> lab -> get grey -> rgb + img = cv2.resize(img, (self.input_size, self.input_size)) + img_l = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)[:, :, :1] + img_gray_lab = np.concatenate((img_l, np.zeros_like(img_l), np.zeros_like(img_l)), axis=-1) + img_gray_rgb = cv2.cvtColor(img_gray_lab, cv2.COLOR_LAB2RGB) + + tensor_gray_rgb = torch.from_numpy(img_gray_rgb.transpose((2, 0, 1))).float().unsqueeze(0).to(self.device) + output_ab = self.model(tensor_gray_rgb).cpu() # (1, 2, self.height, self.width) + + # resize ab -> concat original l -> rgb + output_ab_resize = F.interpolate(output_ab, size=(self.height, self.width))[0].float().numpy().transpose(1, 2, 0) + output_lab = np.concatenate((orig_l, output_ab_resize), axis=-1) + output_bgr = cv2.cvtColor(output_lab, cv2.COLOR_LAB2BGR) + + output_img = (output_bgr * 255.0).round().astype(np.uint8) + + return output_img + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--model_path', type=str, default='pretrain/net_g_200000.pth') + parser.add_argument('--input', type=str, default='figure/', help='input test image folder or video path') + parser.add_argument('--output', type=str, default='results', help='output folder or video path') + parser.add_argument('--input_size', type=int, default=512, help='input size for model') + parser.add_argument('--model_size', type=str, default='large', help='ddcolor model size') + args = parser.parse_args() + + print(f'Output path: {args.output}') + os.makedirs(args.output, exist_ok=True) + img_list = os.listdir(args.input) + assert len(img_list) > 0 + + colorizer = ImageColorizationPipeline(model_path=args.model_path, input_size=args.input_size, model_size=args.model_size) + + for name in tqdm(img_list): + img = cv2.imread(os.path.join(args.input, name)) + image_out = colorizer.process(img) + cv2.imwrite(os.path.join(args.output, name), image_out) + + +if __name__ == '__main__': + main() diff --git a/inference/colorization_pipeline_hf.py b/inference/colorization_pipeline_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..19d3f0a076003bc0e660e3c32c25cce41b842290 --- /dev/null +++ b/inference/colorization_pipeline_hf.py @@ -0,0 +1,65 @@ +import argparse +import cv2 +import os +from tqdm import tqdm +import torch +from basicsr.archs.ddcolor_arch import DDColor + +from huggingface_hub import PyTorchModelHubMixin, hf_hub_download +from inference.colorization_pipeline import ImageColorizationPipeline + + +class DDColorHF(DDColor, PyTorchModelHubMixin): + def __init__(self, config): + super().__init__(**config) + + +class ImageColorizationPipelineHF(ImageColorizationPipeline): + def __init__(self, model, input_size): + self.input_size = input_size + if torch.cuda.is_available(): + self.device = torch.device("cuda") + else: + self.device = torch.device("cpu") + + self.model = model.to(self.device) + self.model.eval() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--model_name", type=str, default="ddcolor_modelscope") + parser.add_argument( + "--input", + type=str, + default="figure/", + help="input test image folder or video path", + ) + parser.add_argument( + "--output", type=str, default="results", help="output folder or video path" + ) + parser.add_argument( + "--input_size", type=int, default=512, help="input size for model" + ) + + args = parser.parse_args() + + ddcolor_model = DDColorHF.from_pretrained(f"piddnad/{args.model_name}") + + print(f"Output path: {args.output}") + os.makedirs(args.output, exist_ok=True) + img_list = os.listdir(args.input) + assert len(img_list) > 0 + + colorizer = ImageColorizationPipelineHF( + model=ddcolor_model, input_size=args.input_size + ) + + for name in tqdm(img_list): + img = cv2.imread(os.path.join(args.input, name)) + image_out = colorizer.process(img) + cv2.imwrite(os.path.join(args.output, name), image_out) + + +if __name__ == "__main__": + main() diff --git a/modelscope/damo/cv_ddcolor_image-colorization/pytorch_model.pt b/modelscope/damo/cv_ddcolor_image-colorization/pytorch_model.pt new file mode 100644 index 0000000000000000000000000000000000000000..a78f0e3dc978c07a011a7d6005c0829de352e655 --- /dev/null +++ b/modelscope/damo/cv_ddcolor_image-colorization/pytorch_model.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17c460d7e55b32a598370621d77173be59e03c24b0823f06821db23a50c263ce +size 911950059 diff --git a/options/train/train_ddcolor.yml b/options/train/train_ddcolor.yml new file mode 100644 index 0000000000000000000000000000000000000000..bee3fcde799861761357874066128c1ac56890a2 --- /dev/null +++ b/options/train/train_ddcolor.yml @@ -0,0 +1,172 @@ +# general settings +name: train_ddcolor_l +model_type: ColorModel +scale: 1 +num_gpu: 4 +manual_seed: 0 +queue_size: 64 + +# dataset and data loader settings +datasets: + train: + name: ImageNet + type: LabDataset + dataroot_gt: / + meta_info_file: ['data_list/imagenet.txt'] + io_backend: + type: disk + + gt_size: 256 + + # augmentation config + ## flip & rotate90 + use_hflip: True + use_rot: False + + # cutmix / fmix + do_cutmix: False + cutmix_p: 0.5 + do_fmix: False + fmix_p: 0.5 + + # data loader + use_shuffle: true + num_worker_per_gpu: 8 + batch_size_per_gpu: 4 + dataset_enlarge_ratio: 1 + prefetch_mode: ~ + + # Uncomment these for validation + val: + name: ImageNet + type: LabDataset + # dataroot_gt: path_to_gt + dataroot_gt: / + meta_info_file: 'data_list/imagenet_val_5k.txt' + gt_size: 256 + io_backend: + type: disk + + # cutmix / fmix + do_cutmix: False + cutmix_p: 0.5 + do_fmix: False + fmix_p: 0.5 + +# network structures +network_g: + type: DDColor + + encoder_name: convnext-l + encoder_from_pretrain: True + + decoder_name: MultiScaleColorDecoder + num_queries: 100 + num_scales: 3 + dec_layers: 9 + + last_norm: Spectral + num_output_channels: 2 + do_normalize: False + +network_d: + type: DynamicUNetDiscriminator + nf: 64 + n_channels: 3 + + +# path +path: + pretrain_network_g: ~ + strict_load_g: true + resume_state: ~ + +# training settings +train: + color_enhance: True + color_enhance_factor: 1.2 + optim_g: + type: AdamW + lr: !!float 1e-4 + weight_decay: 0.01 + betas: [0.9, 0.99] + optim_d: + type: Adam + lr: !!float 1e-4 + weight_decay: 0 + betas: [0.9, 0.99] + + scheduler: + type: MultiStepLR + milestones: [80000, 120000, 160000, 200000, 240000, 280000, 320000, 360000] + gamma: 0.5 + + total_iter: 400000 + warmup_iter: -1 # no warm up + + # losses + pixel_opt: + type: L1Loss + loss_weight: 0.1 + reduction: mean + perceptual_opt: + type: PerceptualLoss + layer_weights: + 'conv1_1': 0.0625 + 'conv2_1': 0.125 + 'conv3_1': 0.25 + 'conv4_1': 0.5 + 'conv5_1': 1.0 + vgg_type: vgg16_bn + use_input_norm: true + range_norm: false + perceptual_weight: 5.0 + style_weight: 0 + criterion: l1 + gan_opt: + type: GANLoss + gan_type: vanilla + real_label_val: 1.0 + fake_label_val: 0.0 + loss_weight: 1.0 + colorfulness_opt: + type: ColorfulnessLoss + loss_weight: 0.5 + +# Uncomment these for validation +# validation settings +val: + val_freq: !!float 1e4 + save_img: False + pbar: True + + metrics: + fid: # metric name, can be arbitrary + type: calculate_fid + better: lower + cf: + type: calculate_cf + better: higher +# psnr: +# type: calculate_psnr +# crop_border: 4 +# test_y_channel: false + +# logging settings +logger: + print_freq: 100 + save_snapshot_freq: !!float 1e3 + save_snapshot_verbose: True + save_checkpoint_freq: !!float 1e4 + use_tb_logger: true + wandb: + project: ~ + resume_id: ~ + + +# dist training settings +dist_params: + backend: nccl + port: 29500 + +find_unused_parameters: true diff --git a/predict.py b/predict.py new file mode 100644 index 0000000000000000000000000000000000000000..6b8af6c98ea5a43eabf655934e39b9d997a09eb5 --- /dev/null +++ b/predict.py @@ -0,0 +1,117 @@ +# Prediction interface for Cog ⚙️ +# https://github.com/replicate/cog/blob/main/docs/python.md + +import cv2 +import numpy as np +from subprocess import call +import torch +import torch.nn.functional as F +from cog import BasePredictor, Input, Path + + +class Predictor(BasePredictor): + def setup(self) -> None: + """Load the model into memory to make running multiple predictions efficient""" + # download the weights to "checkpoints" + + from basicsr.archs.ddcolor_arch import DDColor + + class ImageColorizationPipeline(object): + def __init__(self, model_path, input_size=256, model_size="large"): + self.input_size = input_size + if torch.cuda.is_available(): + self.device = torch.device("cuda") + else: + self.device = torch.device("cpu") + + if model_size == "tiny": + self.encoder_name = "convnext-t" + else: + self.encoder_name = "convnext-l" + + self.decoder_type = "MultiScaleColorDecoder" + + self.model = DDColor( + encoder_name=self.encoder_name, + decoder_name="MultiScaleColorDecoder", + input_size=[self.input_size, self.input_size], + num_output_channels=2, + last_norm="Spectral", + do_normalize=False, + num_queries=100, + num_scales=3, + dec_layers=9, + ).to(self.device) + + self.model.load_state_dict( + torch.load(model_path, map_location=torch.device("cpu"))["params"], + strict=False, + ) + self.model.eval() + + @torch.no_grad() + def process(self, img): + self.height, self.width = img.shape[:2] + img = (img / 255.0).astype(np.float32) + orig_l = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)[:, :, :1] # (h, w, 1) + + # resize rgb image -> lab -> get grey -> rgb + img = cv2.resize(img, (self.input_size, self.input_size)) + img_l = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)[:, :, :1] + img_gray_lab = np.concatenate( + (img_l, np.zeros_like(img_l), np.zeros_like(img_l)), axis=-1 + ) + img_gray_rgb = cv2.cvtColor(img_gray_lab, cv2.COLOR_LAB2RGB) + + tensor_gray_rgb = ( + torch.from_numpy(img_gray_rgb.transpose((2, 0, 1))) + .float() + .unsqueeze(0) + .to(self.device) + ) + output_ab = self.model( + tensor_gray_rgb + ).cpu() # (1, 2, self.height, self.width) + + # resize ab -> concat original l -> rgb + output_ab_resize = ( + F.interpolate(output_ab, size=(self.height, self.width))[0] + .float() + .numpy() + .transpose(1, 2, 0) + ) + output_lab = np.concatenate((orig_l, output_ab_resize), axis=-1) + output_bgr = cv2.cvtColor(output_lab, cv2.COLOR_LAB2BGR) + + output_img = (output_bgr * 255.0).round().astype(np.uint8) + + return output_img + + self.colorizer = ImageColorizationPipeline( + model_path="checkpoints/ddcolor_modelscope.pth", + input_size=512, + model_size="large", + ) + self.colorizer_tiny = ImageColorizationPipeline( + model_path="checkpoints/ddcolor_paper_tiny.pth", + input_size=512, + model_size="tiny", + ) + + def predict( + self, + image: Path = Input(description="Grayscale input image."), + model_size: str = Input( + description="Choose the model size.", + choices=["large", "tiny"], + default="large", + ), + ) -> Path: + """Run a single prediction on the model""" + + img = cv2.imread(str(image)) + colorizer = self.colorizer_tiny if model_size == "tiny" else self.colorizer + image_out = colorizer.process(img) + out_path = "/tmp/out.png" + cv2.imwrite(out_path, image_out) + return Path(out_path) diff --git a/pretrain/put_pretrain_weights_here.txt b/pretrain/put_pretrain_weights_here.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ab7d0291b069ab7ae18c4641c0ae1b2738b308a8 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,15 @@ +dlib==19.24.2 +lmdb==1.4.1 +numpy==1.24.3 +opencv_python==4.7.0.72 +Pillow==10.1.0 +PyYAML==6.0.1 +Requests==2.31.0 +scipy==1.9.1 +timm==0.9.2 +torch==2.2.0 +torchvision==0.17.0 +tqdm==4.65.0 +wandb==0.15.5 +scikit-image==0.22.0 +tensorboard==2.15.1 \ No newline at end of file diff --git a/result.png b/result.png new file mode 100644 index 0000000000000000000000000000000000000000..7b731fc6ae5b01bc987ab9fccbfa8a74079091d9 Binary files /dev/null and b/result.png differ diff --git a/scripts/inference.sh b/scripts/inference.sh new file mode 100644 index 0000000000000000000000000000000000000000..45e6bff5ec4aa1567dd199fa54b38db0adda1445 --- /dev/null +++ b/scripts/inference.sh @@ -0,0 +1,4 @@ +CUDA_VISIBLE_DEVICES=0 \ +python3 inference/colorization_pipeline.py \ + --input ./assets/test_images --output ./colorize_output \ + --model_path modelscope/damo/cv_ddcolor_image-colorization/pytorch_model.pt \ No newline at end of file diff --git a/scripts/train.sh b/scripts/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..d46d19f4d79028c02165bbaf9d6ebb99d2b82ac4 --- /dev/null +++ b/scripts/train.sh @@ -0,0 +1,3 @@ +CUDA_VISIBLE_DEVICES=0,1,2,3 \ +python3 -m torch.distributed.launch --nproc_per_node=4 --master_port=3721 basicsr/train.py \ + -opt options/train/train_ddcolor.yml --auto_resume --launcher pytorch diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..ac9d44dddc9fe811f338dd46814925030329a243 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,28 @@ +[flake8] +ignore = + # line break before binary operator (W503) + W503, + # line break after binary operator (W504) + W504, +max-line-length=120 + +[yapf] +based_on_style = pep8 +column_limit = 120 +blank_line_before_nested_class_or_def = true +split_before_expression_after_opening_paren = true + +[isort] +line_length = 120 +multi_line_output = 0 +known_standard_library = pkg_resources,setuptools +known_first_party = basicsr +known_third_party = PIL,cv2,lmdb,numpy,requests,scipy,skimage,torch,torchvision,tqdm,yaml +no_lines_before = STDLIB,LOCALFOLDER +default_section = THIRDPARTY + +[codespell] +skip = .git,./docs/build,*.cfg +count = +quiet-level = 3 +ignore-words-list = gool diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..9e31e945ee4286e7125753934dcf16130c9811bc --- /dev/null +++ b/setup.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python + +from setuptools import find_packages, setup + +import os +import subprocess +import time +import torch +from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension + +version_file = 'basicsr/version.py' + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +def get_git_hash(): + + def _minimal_ext_cmd(cmd): + # construct minimal environment + env = {} + for k in ['SYSTEMROOT', 'PATH', 'HOME']: + v = os.environ.get(k) + if v is not None: + env[k] = v + # LANGUAGE is used on win32 + env['LANGUAGE'] = 'C' + env['LANG'] = 'C' + env['LC_ALL'] = 'C' + out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] + return out + + try: + out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) + sha = out.strip().decode('ascii') + except OSError: + sha = 'unknown' + + return sha + + +def get_hash(): + if os.path.exists('.git'): + sha = get_git_hash()[:7] + # currently ignore this + # elif os.path.exists(version_file): + # try: + # from basicsr.version import __version__ + # sha = __version__.split('+')[-1] + # except ImportError: + # raise ImportError('Unable to get git version') + else: + sha = 'unknown' + + return sha + + +def write_version_py(): + content = """# GENERATED VERSION FILE +# TIME: {} +__version__ = '{}' +__gitsha__ = '{}' +version_info = ({}) +""" + sha = get_hash() + with open('VERSION', 'r') as f: + SHORT_VERSION = f.read().strip() + VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')]) + + version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO) + with open(version_file, 'w') as f: + f.write(version_file_str) + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def make_cuda_ext(name, module, sources, sources_cuda=None): + if sources_cuda is None: + sources_cuda = [] + define_macros = [] + extra_compile_args = {'cxx': []} + + if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1': + define_macros += [('WITH_CUDA', None)] + extension = CUDAExtension + extra_compile_args['nvcc'] = [ + '-D__CUDA_NO_HALF_OPERATORS__', + '-D__CUDA_NO_HALF_CONVERSIONS__', + '-D__CUDA_NO_HALF2_OPERATORS__', + ] + sources += sources_cuda + else: + print(f'Compiling {name} without CUDA') + extension = CppExtension + + return extension( + name=f'{module}.{name}', + sources=[os.path.join(*module.split('.'), p) for p in sources], + define_macros=define_macros, + extra_compile_args=extra_compile_args) + + +def get_requirements(filename='requirements.txt'): + here = os.path.dirname(os.path.realpath(__file__)) + with open(os.path.join(here, filename), 'r') as f: + requires = [line.replace('\n', '') for line in f.readlines()] + return requires + + +if __name__ == '__main__': + cuda_ext = os.getenv('BASICSR_EXT') # whether compile cuda ext + if cuda_ext == 'True': + ext_modules = [ + make_cuda_ext( + name='deform_conv_ext', + module='basicsr.ops.dcn', + sources=['src/deform_conv_ext.cpp'], + sources_cuda=['src/deform_conv_cuda.cpp', 'src/deform_conv_cuda_kernel.cu']), + make_cuda_ext( + name='fused_act_ext', + module='basicsr.ops.fused_act', + sources=['src/fused_bias_act.cpp'], + sources_cuda=['src/fused_bias_act_kernel.cu']), + make_cuda_ext( + name='upfirdn2d_ext', + module='basicsr.ops.upfirdn2d', + sources=['src/upfirdn2d.cpp'], + sources_cuda=['src/upfirdn2d_kernel.cu']), + ] + else: + ext_modules = [] + + write_version_py() + setup( + name='basicsr', + version=get_version(), + description='Open Source Image and Video Super-Resolution Toolbox', + long_description=readme(), + long_description_content_type='text/markdown', + author='Xintao Wang', + author_email='xintao.wang@outlook.com', + keywords='computer vision, restoration, super resolution', + url='https://github.com/xinntao/BasicSR', + include_package_data=True, + packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')), + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + ], + license='Apache License 2.0', + setup_requires=['cython', 'numpy'], + install_requires=get_requirements(), + ext_modules=ext_modules, + cmdclass={'build_ext': BuildExtension}, + zip_safe=False)