diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/BuildExecutable.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/BuildExecutable.py
new file mode 100644
index 0000000000000000000000000000000000000000..0190cc86ff1539d0c4b4916c559b0f2163c279ed
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/BuildExecutable.py
@@ -0,0 +1,170 @@
+"""
+Compile a Python script into an executable that embeds CPython.
+Requires CPython to be built as a shared library ('libpythonX.Y').
+
+Basic usage:
+
+ python -m Cython.Build.BuildExecutable [ARGS] somefile.py
+"""
+
+from __future__ import absolute_import
+
+DEBUG = True
+
+import sys
+import os
+if sys.version_info < (3, 9):
+ from distutils import sysconfig as _sysconfig
+
+ class sysconfig(object):
+
+ @staticmethod
+ def get_path(name):
+ assert name == 'include'
+ return _sysconfig.get_python_inc()
+
+ get_config_var = staticmethod(_sysconfig.get_config_var)
+else:
+ # sysconfig can be trusted from cpython >= 3.8.7
+ import sysconfig
+
+
+def get_config_var(name, default=''):
+ return sysconfig.get_config_var(name) or default
+
+INCDIR = sysconfig.get_path('include')
+LIBDIR1 = get_config_var('LIBDIR')
+LIBDIR2 = get_config_var('LIBPL')
+PYLIB = get_config_var('LIBRARY')
+PYLIB_DYN = get_config_var('LDLIBRARY')
+if PYLIB_DYN == PYLIB:
+ # no shared library
+ PYLIB_DYN = ''
+else:
+ PYLIB_DYN = os.path.splitext(PYLIB_DYN[3:])[0] # 'lib(XYZ).so' -> XYZ
+
+CC = get_config_var('CC', os.environ.get('CC', ''))
+CFLAGS = get_config_var('CFLAGS') + ' ' + os.environ.get('CFLAGS', '')
+LINKCC = get_config_var('LINKCC', os.environ.get('LINKCC', CC))
+LINKFORSHARED = get_config_var('LINKFORSHARED')
+LIBS = get_config_var('LIBS')
+SYSLIBS = get_config_var('SYSLIBS')
+EXE_EXT = sysconfig.get_config_var('EXE')
+
+
+def _debug(msg, *args):
+ if DEBUG:
+ if args:
+ msg = msg % args
+ sys.stderr.write(msg + '\n')
+
+
+def dump_config():
+ _debug('INCDIR: %s', INCDIR)
+ _debug('LIBDIR1: %s', LIBDIR1)
+ _debug('LIBDIR2: %s', LIBDIR2)
+ _debug('PYLIB: %s', PYLIB)
+ _debug('PYLIB_DYN: %s', PYLIB_DYN)
+ _debug('CC: %s', CC)
+ _debug('CFLAGS: %s', CFLAGS)
+ _debug('LINKCC: %s', LINKCC)
+ _debug('LINKFORSHARED: %s', LINKFORSHARED)
+ _debug('LIBS: %s', LIBS)
+ _debug('SYSLIBS: %s', SYSLIBS)
+ _debug('EXE_EXT: %s', EXE_EXT)
+
+
+def _parse_args(args):
+ cy_args = []
+ last_arg = None
+ for i, arg in enumerate(args):
+ if arg.startswith('-'):
+ cy_args.append(arg)
+ elif last_arg in ('-X', '--directive'):
+ cy_args.append(arg)
+ else:
+ input_file = arg
+ args = args[i+1:]
+ break
+ last_arg = arg
+ else:
+ raise ValueError('no input file provided')
+
+ return input_file, cy_args, args
+
+
+def runcmd(cmd, shell=True):
+ if shell:
+ cmd = ' '.join(cmd)
+ _debug(cmd)
+ else:
+ _debug(' '.join(cmd))
+
+ import subprocess
+ returncode = subprocess.call(cmd, shell=shell)
+
+ if returncode:
+ sys.exit(returncode)
+
+
+def clink(basename):
+ runcmd([LINKCC, '-o', basename + EXE_EXT, basename+'.o', '-L'+LIBDIR1, '-L'+LIBDIR2]
+ + [PYLIB_DYN and ('-l'+PYLIB_DYN) or os.path.join(LIBDIR1, PYLIB)]
+ + LIBS.split() + SYSLIBS.split() + LINKFORSHARED.split())
+
+
+def ccompile(basename):
+ runcmd([CC, '-c', '-o', basename+'.o', basename+'.c', '-I' + INCDIR] + CFLAGS.split())
+
+
+def cycompile(input_file, options=()):
+ from ..Compiler import Version, CmdLine, Main
+ options, sources = CmdLine.parse_command_line(list(options or ()) + ['--embed', input_file])
+ _debug('Using Cython %s to compile %s', Version.version, input_file)
+ result = Main.compile(sources, options)
+ if result.num_errors > 0:
+ sys.exit(1)
+
+
+def exec_file(program_name, args=()):
+ runcmd([os.path.abspath(program_name)] + list(args), shell=False)
+
+
+def build(input_file, compiler_args=(), force=False):
+ """
+ Build an executable program from a Cython module.
+
+ Returns the name of the executable file.
+ """
+ basename = os.path.splitext(input_file)[0]
+ exe_file = basename + EXE_EXT
+ if not force and os.path.abspath(exe_file) == os.path.abspath(input_file):
+ raise ValueError("Input and output file names are the same, refusing to overwrite")
+ if (not force and os.path.exists(exe_file) and os.path.exists(input_file)
+ and os.path.getmtime(input_file) <= os.path.getmtime(exe_file)):
+ _debug("File is up to date, not regenerating %s", exe_file)
+ return exe_file
+ cycompile(input_file, compiler_args)
+ ccompile(basename)
+ clink(basename)
+ return exe_file
+
+
+def build_and_run(args):
+ """
+ Build an executable program from a Cython module and run it.
+
+ Arguments after the module name will be passed verbatimly to the program.
+ """
+ program_name, args = _build(args)
+ exec_file(program_name, args)
+
+
+def _build(args):
+ input_file, cy_args, args = _parse_args(args)
+ program_name = build(input_file, cy_args)
+ return program_name, args
+
+
+if __name__ == '__main__':
+ _build(sys.argv[1:])
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Cythonize.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Cythonize.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4beb21995f7b0ce0fe20e34ac54ee0dbe0a149d
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Cythonize.py
@@ -0,0 +1,255 @@
+from __future__ import absolute_import, print_function
+
+import os
+import shutil
+import tempfile
+
+from .Dependencies import cythonize, extended_iglob
+from ..Utils import is_package_dir
+from ..Compiler import Options
+
+try:
+ import multiprocessing
+ parallel_compiles = int(multiprocessing.cpu_count() * 1.5)
+except ImportError:
+ multiprocessing = None
+ parallel_compiles = 0
+
+
+class _FakePool(object):
+ def map_async(self, func, args):
+ try:
+ from itertools import imap
+ except ImportError:
+ imap=map
+ for _ in imap(func, args):
+ pass
+
+ def close(self):
+ pass
+
+ def terminate(self):
+ pass
+
+ def join(self):
+ pass
+
+
+def find_package_base(path):
+ base_dir, package_path = os.path.split(path)
+ while is_package_dir(base_dir):
+ base_dir, parent = os.path.split(base_dir)
+ package_path = '%s/%s' % (parent, package_path)
+ return base_dir, package_path
+
+def cython_compile(path_pattern, options):
+ all_paths = map(os.path.abspath, extended_iglob(path_pattern))
+ _cython_compile_files(all_paths, options)
+
+def _cython_compile_files(all_paths, options):
+ pool = None
+ try:
+ for path in all_paths:
+ if options.build_inplace:
+ base_dir = path
+ while not os.path.isdir(base_dir) or is_package_dir(base_dir):
+ base_dir = os.path.dirname(base_dir)
+ else:
+ base_dir = None
+
+ if os.path.isdir(path):
+ # recursively compiling a package
+ paths = [os.path.join(path, '**', '*.{py,pyx}')]
+ else:
+ # assume it's a file(-like thing)
+ paths = [path]
+
+ ext_modules = cythonize(
+ paths,
+ nthreads=options.parallel,
+ exclude_failures=options.keep_going,
+ exclude=options.excludes,
+ compiler_directives=options.directives,
+ compile_time_env=options.compile_time_env,
+ force=options.force,
+ quiet=options.quiet,
+ depfile=options.depfile,
+ language=options.language,
+ **options.options)
+
+ if ext_modules and options.build:
+ if len(ext_modules) > 1 and options.parallel > 1:
+ if pool is None:
+ try:
+ pool = multiprocessing.Pool(options.parallel)
+ except OSError:
+ pool = _FakePool()
+ pool.map_async(run_distutils, [
+ (base_dir, [ext]) for ext in ext_modules])
+ else:
+ run_distutils((base_dir, ext_modules))
+ except:
+ if pool is not None:
+ pool.terminate()
+ raise
+ else:
+ if pool is not None:
+ pool.close()
+ pool.join()
+
+
+def run_distutils(args):
+ try:
+ from distutils.core import setup
+ except ImportError:
+ try:
+ from setuptools import setup
+ except ImportError:
+ raise ImportError("'distutils' is not available. Please install 'setuptools' for binary builds.")
+
+ base_dir, ext_modules = args
+ script_args = ['build_ext', '-i']
+ cwd = os.getcwd()
+ temp_dir = None
+ try:
+ if base_dir:
+ os.chdir(base_dir)
+ temp_dir = tempfile.mkdtemp(dir=base_dir)
+ script_args.extend(['--build-temp', temp_dir])
+ setup(
+ script_name='setup.py',
+ script_args=script_args,
+ ext_modules=ext_modules,
+ )
+ finally:
+ if base_dir:
+ os.chdir(cwd)
+ if temp_dir and os.path.isdir(temp_dir):
+ shutil.rmtree(temp_dir)
+
+
+def create_args_parser():
+ from argparse import ArgumentParser, RawDescriptionHelpFormatter
+ from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction
+
+ parser = ArgumentParser(
+ formatter_class=RawDescriptionHelpFormatter,
+ epilog="""\
+Environment variables:
+ CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless
+ of modification times and changes.
+ Environment variables accepted by setuptools are supported to configure the C compiler and build:
+ https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options"""
+ )
+
+ parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',
+ dest='directives', default={}, type=str,
+ action=ParseDirectivesAction,
+ help='set a compiler directive')
+ parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...',
+ dest='compile_time_env', default={}, type=str,
+ action=ParseCompileTimeEnvAction,
+ help='set a compile time environment variable')
+ parser.add_argument('-s', '--option', metavar='NAME=VALUE',
+ dest='options', default={}, type=str,
+ action=ParseOptionsAction,
+ help='set a cythonize option')
+ parser.add_argument('-2', dest='language_level', action='store_const', const=2, default=None,
+ help='use Python 2 syntax mode by default')
+ parser.add_argument('-3', dest='language_level', action='store_const', const=3,
+ help='use Python 3 syntax mode by default')
+ parser.add_argument('--3str', dest='language_level', action='store_const', const='3str',
+ help='use Python 3 syntax mode by default')
+ parser.add_argument('-+', '--cplus', dest='language', action='store_const', const='c++', default=None,
+ help='Compile as C++ rather than C')
+ parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate',
+ help='Produce a colorized HTML version of the source.')
+ parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate',
+ help='Produce a colorized HTML version of the source '
+ 'which includes entire generated C/C++-code.')
+ parser.add_argument('-x', '--exclude', metavar='PATTERN', dest='excludes',
+ action='append', default=[],
+ help='exclude certain file patterns from the compilation')
+
+ parser.add_argument('-b', '--build', dest='build', action='store_true', default=None,
+ help='build extension modules using distutils/setuptools')
+ parser.add_argument('-i', '--inplace', dest='build_inplace', action='store_true', default=None,
+ help='build extension modules in place using distutils/setuptools (implies -b)')
+ parser.add_argument('-j', '--parallel', dest='parallel', metavar='N',
+ type=int, default=parallel_compiles,
+ help=('run builds in N parallel jobs (default: %d)' %
+ parallel_compiles or 1))
+ parser.add_argument('-f', '--force', dest='force', action='store_true', default=None,
+ help='force recompilation')
+ parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=None,
+ help='be less verbose during compilation')
+
+ parser.add_argument('--lenient', dest='lenient', action='store_true', default=None,
+ help='increase Python compatibility by ignoring some compile time errors')
+ parser.add_argument('-k', '--keep-going', dest='keep_going', action='store_true', default=None,
+ help='compile as much as possible, ignore compilation failures')
+ parser.add_argument('--no-docstrings', dest='no_docstrings', action='store_true', default=None,
+ help='strip docstrings')
+ parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources')
+ parser.add_argument('sources', nargs='*')
+ return parser
+
+
+def parse_args_raw(parser, args):
+ options, unknown = parser.parse_known_args(args)
+ sources = options.sources
+ # if positional arguments were interspersed
+ # some of them are in unknown
+ for option in unknown:
+ if option.startswith('-'):
+ parser.error("unknown option "+option)
+ else:
+ sources.append(option)
+ del options.sources
+ return (options, sources)
+
+
+def parse_args(args):
+ parser = create_args_parser()
+ options, args = parse_args_raw(parser, args)
+
+ if not args:
+ parser.error("no source files provided")
+ if options.build_inplace:
+ options.build = True
+ if multiprocessing is None:
+ options.parallel = 0
+ if options.language_level:
+ assert options.language_level in (2, 3, '3str')
+ options.options['language_level'] = options.language_level
+
+ if options.lenient:
+ # increase Python compatibility by ignoring compile time errors
+ Options.error_on_unknown_names = False
+ Options.error_on_uninitialized = False
+
+ if options.annotate:
+ Options.annotate = options.annotate
+
+ if options.no_docstrings:
+ Options.docstrings = False
+
+ return options, args
+
+
+def main(args=None):
+ options, paths = parse_args(args)
+
+ all_paths = []
+ for path in paths:
+ expanded_path = [os.path.abspath(p) for p in extended_iglob(path)]
+ if not expanded_path:
+ import sys
+ print("{}: No such file or directory: '{}'".format(sys.argv[0], path), file=sys.stderr)
+ sys.exit(1)
+ all_paths.extend(expanded_path)
+ _cython_compile_files(all_paths, options)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Dependencies.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Dependencies.py
new file mode 100644
index 0000000000000000000000000000000000000000..7de406516c00c5478726518ec4fc99b91f3b7286
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Dependencies.py
@@ -0,0 +1,1380 @@
+from __future__ import absolute_import, print_function
+
+import cython
+from .. import __version__
+
+import collections
+import contextlib
+import hashlib
+import os
+import shutil
+import subprocess
+import re, sys, time
+from glob import iglob
+from io import open as io_open
+from os.path import relpath as _relpath
+import zipfile
+
+try:
+ from collections.abc import Iterable
+except ImportError:
+ from collections import Iterable
+
+try:
+ import gzip
+ gzip_open = gzip.open
+ gzip_ext = '.gz'
+except ImportError:
+ gzip_open = open
+ gzip_ext = ''
+
+try:
+ import zlib
+ zipfile_compression_mode = zipfile.ZIP_DEFLATED
+except ImportError:
+ zipfile_compression_mode = zipfile.ZIP_STORED
+
+try:
+ import pythran
+except:
+ pythran = None
+
+from .. import Utils
+from ..Utils import (cached_function, cached_method, path_exists,
+ safe_makedirs, copy_file_to_dir_if_newer, is_package_dir, write_depfile)
+from ..Compiler import Errors
+from ..Compiler.Main import Context
+from ..Compiler.Options import (CompilationOptions, default_options,
+ get_directive_defaults)
+
+join_path = cached_function(os.path.join)
+copy_once_if_newer = cached_function(copy_file_to_dir_if_newer)
+safe_makedirs_once = cached_function(safe_makedirs)
+
+if sys.version_info[0] < 3:
+ # stupid Py2 distutils enforces str type in list of sources
+ _fs_encoding = sys.getfilesystemencoding()
+ if _fs_encoding is None:
+ _fs_encoding = sys.getdefaultencoding()
+ def encode_filename_in_py2(filename):
+ if not isinstance(filename, bytes):
+ return filename.encode(_fs_encoding)
+ return filename
+else:
+ def encode_filename_in_py2(filename):
+ return filename
+ basestring = str
+
+
+def _make_relative(file_paths, base=None):
+ if not base:
+ base = os.getcwd()
+ if base[-1] != os.path.sep:
+ base += os.path.sep
+ return [_relpath(path, base) if path.startswith(base) else path
+ for path in file_paths]
+
+
+def extended_iglob(pattern):
+ if '{' in pattern:
+ m = re.match('(.*){([^}]+)}(.*)', pattern)
+ if m:
+ before, switch, after = m.groups()
+ for case in switch.split(','):
+ for path in extended_iglob(before + case + after):
+ yield path
+ return
+
+ # We always accept '/' and also '\' on Windows,
+ # because '/' is generally common for relative paths.
+ if '**/' in pattern or os.sep == '\\' and '**\\' in pattern:
+ seen = set()
+ first, rest = re.split(r'\*\*[%s]' % ('/\\\\' if os.sep == '\\' else '/'), pattern, 1)
+ if first:
+ first = iglob(first + os.sep)
+ else:
+ first = ['']
+ for root in first:
+ for path in extended_iglob(join_path(root, rest)):
+ if path not in seen:
+ seen.add(path)
+ yield path
+ for path in extended_iglob(join_path(root, '*', '**', rest)):
+ if path not in seen:
+ seen.add(path)
+ yield path
+ else:
+ for path in iglob(pattern):
+ yield path
+
+
+def nonempty(it, error_msg="expected non-empty iterator"):
+ empty = True
+ for value in it:
+ empty = False
+ yield value
+ if empty:
+ raise ValueError(error_msg)
+
+
+@cached_function
+def file_hash(filename):
+ path = os.path.normpath(filename)
+ prefix = ('%d:%s' % (len(path), path)).encode("UTF-8")
+ m = hashlib.sha1(prefix)
+ with open(path, 'rb') as f:
+ data = f.read(65000)
+ while data:
+ m.update(data)
+ data = f.read(65000)
+ return m.hexdigest()
+
+
+def update_pythran_extension(ext):
+ if pythran is None:
+ raise RuntimeError("You first need to install Pythran to use the np_pythran directive.")
+ try:
+ pythran_ext = pythran.config.make_extension(python=True)
+ except TypeError: # older pythran version only
+ pythran_ext = pythran.config.make_extension()
+
+ ext.include_dirs.extend(pythran_ext['include_dirs'])
+ ext.extra_compile_args.extend(pythran_ext['extra_compile_args'])
+ ext.extra_link_args.extend(pythran_ext['extra_link_args'])
+ ext.define_macros.extend(pythran_ext['define_macros'])
+ ext.undef_macros.extend(pythran_ext['undef_macros'])
+ ext.library_dirs.extend(pythran_ext['library_dirs'])
+ ext.libraries.extend(pythran_ext['libraries'])
+ ext.language = 'c++'
+
+ # These options are not compatible with the way normal Cython extensions work
+ for bad_option in ["-fwhole-program", "-fvisibility=hidden"]:
+ try:
+ ext.extra_compile_args.remove(bad_option)
+ except ValueError:
+ pass
+
+
+def parse_list(s):
+ """
+ >>> parse_list("")
+ []
+ >>> parse_list("a")
+ ['a']
+ >>> parse_list("a b c")
+ ['a', 'b', 'c']
+ >>> parse_list("[a, b, c]")
+ ['a', 'b', 'c']
+ >>> parse_list('a " " b')
+ ['a', ' ', 'b']
+ >>> parse_list('[a, ",a", "a,", ",", ]')
+ ['a', ',a', 'a,', ',']
+ """
+ if len(s) >= 2 and s[0] == '[' and s[-1] == ']':
+ s = s[1:-1]
+ delimiter = ','
+ else:
+ delimiter = ' '
+ s, literals = strip_string_literals(s)
+ def unquote(literal):
+ literal = literal.strip()
+ if literal[0] in "'\"":
+ return literals[literal[1:-1]]
+ else:
+ return literal
+ return [unquote(item) for item in s.split(delimiter) if item.strip()]
+
+
+transitive_str = object()
+transitive_list = object()
+bool_or = object()
+
+distutils_settings = {
+ 'name': str,
+ 'sources': list,
+ 'define_macros': list,
+ 'undef_macros': list,
+ 'libraries': transitive_list,
+ 'library_dirs': transitive_list,
+ 'runtime_library_dirs': transitive_list,
+ 'include_dirs': transitive_list,
+ 'extra_objects': list,
+ 'extra_compile_args': transitive_list,
+ 'extra_link_args': transitive_list,
+ 'export_symbols': list,
+ 'depends': transitive_list,
+ 'language': transitive_str,
+ 'np_pythran': bool_or
+}
+
+
+def _legacy_strtobool(val):
+ # Used to be "distutils.util.strtobool", adapted for deprecation warnings.
+ if val == "True":
+ return True
+ elif val == "False":
+ return False
+
+ import warnings
+ warnings.warn("The 'np_python' option requires 'True' or 'False'", category=DeprecationWarning)
+ val = val.lower()
+ if val in ('y', 'yes', 't', 'true', 'on', '1'):
+ return True
+ elif val in ('n', 'no', 'f', 'false', 'off', '0'):
+ return False
+ else:
+ raise ValueError("invalid truth value %r" % (val,))
+
+
+@cython.locals(start=cython.Py_ssize_t, end=cython.Py_ssize_t)
+def line_iter(source):
+ if isinstance(source, basestring):
+ start = 0
+ while True:
+ end = source.find('\n', start)
+ if end == -1:
+ yield source[start:]
+ return
+ yield source[start:end]
+ start = end+1
+ else:
+ for line in source:
+ yield line
+
+
+class DistutilsInfo(object):
+
+ def __init__(self, source=None, exn=None):
+ self.values = {}
+ if source is not None:
+ for line in line_iter(source):
+ line = line.lstrip()
+ if not line:
+ continue
+ if line[0] != '#':
+ break
+ line = line[1:].lstrip()
+ kind = next((k for k in ("distutils:","cython:") if line.startswith(k)), None)
+ if kind is not None:
+ key, _, value = [s.strip() for s in line[len(kind):].partition('=')]
+ type = distutils_settings.get(key, None)
+ if line.startswith("cython:") and type is None: continue
+ if type in (list, transitive_list):
+ value = parse_list(value)
+ if key == 'define_macros':
+ value = [tuple(macro.split('=', 1))
+ if '=' in macro else (macro, None)
+ for macro in value]
+ if type is bool_or:
+ value = _legacy_strtobool(value)
+ self.values[key] = value
+ elif exn is not None:
+ for key in distutils_settings:
+ if key in ('name', 'sources','np_pythran'):
+ continue
+ value = getattr(exn, key, None)
+ if value:
+ self.values[key] = value
+
+ def merge(self, other):
+ if other is None:
+ return self
+ for key, value in other.values.items():
+ type = distutils_settings[key]
+ if type is transitive_str and key not in self.values:
+ self.values[key] = value
+ elif type is transitive_list:
+ if key in self.values:
+ # Change a *copy* of the list (Trac #845)
+ all = self.values[key][:]
+ for v in value:
+ if v not in all:
+ all.append(v)
+ value = all
+ self.values[key] = value
+ elif type is bool_or:
+ self.values[key] = self.values.get(key, False) | value
+ return self
+
+ def subs(self, aliases):
+ if aliases is None:
+ return self
+ resolved = DistutilsInfo()
+ for key, value in self.values.items():
+ type = distutils_settings[key]
+ if type in [list, transitive_list]:
+ new_value_list = []
+ for v in value:
+ if v in aliases:
+ v = aliases[v]
+ if isinstance(v, list):
+ new_value_list += v
+ else:
+ new_value_list.append(v)
+ value = new_value_list
+ else:
+ if value in aliases:
+ value = aliases[value]
+ resolved.values[key] = value
+ return resolved
+
+ def apply(self, extension):
+ for key, value in self.values.items():
+ type = distutils_settings[key]
+ if type in [list, transitive_list]:
+ value = getattr(extension, key) + list(value)
+ setattr(extension, key, value)
+
+
+@cython.locals(start=cython.Py_ssize_t, q=cython.Py_ssize_t,
+ single_q=cython.Py_ssize_t, double_q=cython.Py_ssize_t,
+ hash_mark=cython.Py_ssize_t, end=cython.Py_ssize_t,
+ k=cython.Py_ssize_t, counter=cython.Py_ssize_t, quote_len=cython.Py_ssize_t)
+def strip_string_literals(code, prefix='__Pyx_L'):
+ """
+ Normalizes every string literal to be of the form '__Pyx_Lxxx',
+ returning the normalized code and a mapping of labels to
+ string literals.
+ """
+ new_code = []
+ literals = {}
+ counter = 0
+ start = q = 0
+ in_quote = False
+ hash_mark = single_q = double_q = -1
+ code_len = len(code)
+ quote_type = None
+ quote_len = -1
+
+ while True:
+ if hash_mark < q:
+ hash_mark = code.find('#', q)
+ if single_q < q:
+ single_q = code.find("'", q)
+ if double_q < q:
+ double_q = code.find('"', q)
+ q = min(single_q, double_q)
+ if q == -1:
+ q = max(single_q, double_q)
+
+ # We're done.
+ if q == -1 and hash_mark == -1:
+ new_code.append(code[start:])
+ break
+
+ # Try to close the quote.
+ elif in_quote:
+ if code[q-1] == u'\\':
+ k = 2
+ while q >= k and code[q-k] == u'\\':
+ k += 1
+ if k % 2 == 0:
+ q += 1
+ continue
+ if code[q] == quote_type and (
+ quote_len == 1 or (code_len > q + 2 and quote_type == code[q+1] == code[q+2])):
+ counter += 1
+ label = "%s%s_" % (prefix, counter)
+ literals[label] = code[start+quote_len:q]
+ full_quote = code[q:q+quote_len]
+ new_code.append(full_quote)
+ new_code.append(label)
+ new_code.append(full_quote)
+ q += quote_len
+ in_quote = False
+ start = q
+ else:
+ q += 1
+
+ # Process comment.
+ elif -1 != hash_mark and (hash_mark < q or q == -1):
+ new_code.append(code[start:hash_mark+1])
+ end = code.find('\n', hash_mark)
+ counter += 1
+ label = "%s%s_" % (prefix, counter)
+ if end == -1:
+ end_or_none = None
+ else:
+ end_or_none = end
+ literals[label] = code[hash_mark+1:end_or_none]
+ new_code.append(label)
+ if end == -1:
+ break
+ start = q = end
+
+ # Open the quote.
+ else:
+ if code_len >= q+3 and (code[q] == code[q+1] == code[q+2]):
+ quote_len = 3
+ else:
+ quote_len = 1
+ in_quote = True
+ quote_type = code[q]
+ new_code.append(code[start:q])
+ start = q
+ q += quote_len
+
+ return "".join(new_code), literals
+
+
+# We need to allow spaces to allow for conditional compilation like
+# IF ...:
+# cimport ...
+dependency_regex = re.compile(r"(?:^\s*from +([0-9a-zA-Z_.]+) +cimport)|"
+ r"(?:^\s*cimport +([0-9a-zA-Z_.]+(?: *, *[0-9a-zA-Z_.]+)*))|"
+ r"(?:^\s*cdef +extern +from +['\"]([^'\"]+)['\"])|"
+ r"(?:^\s*include +['\"]([^'\"]+)['\"])", re.M)
+dependency_after_from_regex = re.compile(
+ r"(?:^\s+\(([0-9a-zA-Z_., ]*)\)[#\n])|"
+ r"(?:^\s+([0-9a-zA-Z_., ]*)[#\n])",
+ re.M)
+
+
+def normalize_existing(base_path, rel_paths):
+ return normalize_existing0(os.path.dirname(base_path), tuple(set(rel_paths)))
+
+
+@cached_function
+def normalize_existing0(base_dir, rel_paths):
+ """
+ Given some base directory ``base_dir`` and a list of path names
+ ``rel_paths``, normalize each relative path name ``rel`` by
+ replacing it by ``os.path.join(base, rel)`` if that file exists.
+
+ Return a couple ``(normalized, needed_base)`` where ``normalized``
+ if the list of normalized file names and ``needed_base`` is
+ ``base_dir`` if we actually needed ``base_dir``. If no paths were
+ changed (for example, if all paths were already absolute), then
+ ``needed_base`` is ``None``.
+ """
+ normalized = []
+ needed_base = None
+ for rel in rel_paths:
+ if os.path.isabs(rel):
+ normalized.append(rel)
+ continue
+ path = join_path(base_dir, rel)
+ if path_exists(path):
+ normalized.append(os.path.normpath(path))
+ needed_base = base_dir
+ else:
+ normalized.append(rel)
+ return (normalized, needed_base)
+
+
+def resolve_depends(depends, include_dirs):
+ include_dirs = tuple(include_dirs)
+ resolved = []
+ for depend in depends:
+ path = resolve_depend(depend, include_dirs)
+ if path is not None:
+ resolved.append(path)
+ return resolved
+
+
+@cached_function
+def resolve_depend(depend, include_dirs):
+ if depend[0] == '<' and depend[-1] == '>':
+ return None
+ for dir in include_dirs:
+ path = join_path(dir, depend)
+ if path_exists(path):
+ return os.path.normpath(path)
+ return None
+
+
+@cached_function
+def package(filename):
+ dir = os.path.dirname(os.path.abspath(str(filename)))
+ if dir != filename and is_package_dir(dir):
+ return package(dir) + (os.path.basename(dir),)
+ else:
+ return ()
+
+
+@cached_function
+def fully_qualified_name(filename):
+ module = os.path.splitext(os.path.basename(filename))[0]
+ return '.'.join(package(filename) + (module,))
+
+
+@cached_function
+def parse_dependencies(source_filename):
+ # Actual parsing is way too slow, so we use regular expressions.
+ # The only catch is that we must strip comments and string
+ # literals ahead of time.
+ with Utils.open_source_file(source_filename, error_handling='ignore') as fh:
+ source = fh.read()
+ distutils_info = DistutilsInfo(source)
+ source, literals = strip_string_literals(source)
+ source = source.replace('\\\n', ' ').replace('\t', ' ')
+
+ # TODO: pure mode
+ cimports = []
+ includes = []
+ externs = []
+ for m in dependency_regex.finditer(source):
+ cimport_from, cimport_list, extern, include = m.groups()
+ if cimport_from:
+ cimports.append(cimport_from)
+ m_after_from = dependency_after_from_regex.search(source, pos=m.end())
+ if m_after_from:
+ multiline, one_line = m_after_from.groups()
+ subimports = multiline or one_line
+ cimports.extend("{0}.{1}".format(cimport_from, s.strip())
+ for s in subimports.split(','))
+
+ elif cimport_list:
+ cimports.extend(x.strip() for x in cimport_list.split(","))
+ elif extern:
+ externs.append(literals[extern])
+ else:
+ includes.append(literals[include])
+ return cimports, includes, externs, distutils_info
+
+
+class DependencyTree(object):
+
+ def __init__(self, context, quiet=False):
+ self.context = context
+ self.quiet = quiet
+ self._transitive_cache = {}
+
+ def parse_dependencies(self, source_filename):
+ if path_exists(source_filename):
+ source_filename = os.path.normpath(source_filename)
+ return parse_dependencies(source_filename)
+
+ @cached_method
+ def included_files(self, filename):
+ # This is messy because included files are textually included, resolving
+ # cimports (but not includes) relative to the including file.
+ all = set()
+ for include in self.parse_dependencies(filename)[1]:
+ include_path = join_path(os.path.dirname(filename), include)
+ if not path_exists(include_path):
+ include_path = self.context.find_include_file(include, source_file_path=filename)
+ if include_path:
+ if '.' + os.path.sep in include_path:
+ include_path = os.path.normpath(include_path)
+ all.add(include_path)
+ all.update(self.included_files(include_path))
+ elif not self.quiet:
+ print(u"Unable to locate '%s' referenced from '%s'" % (filename, include))
+ return all
+
+ @cached_method
+ def cimports_externs_incdirs(self, filename):
+ # This is really ugly. Nested cimports are resolved with respect to the
+ # includer, but includes are resolved with respect to the includee.
+ cimports, includes, externs = self.parse_dependencies(filename)[:3]
+ cimports = set(cimports)
+ externs = set(externs)
+ incdirs = set()
+ for include in self.included_files(filename):
+ included_cimports, included_externs, included_incdirs = self.cimports_externs_incdirs(include)
+ cimports.update(included_cimports)
+ externs.update(included_externs)
+ incdirs.update(included_incdirs)
+ externs, incdir = normalize_existing(filename, externs)
+ if incdir:
+ incdirs.add(incdir)
+ return tuple(cimports), externs, incdirs
+
+ def cimports(self, filename):
+ return self.cimports_externs_incdirs(filename)[0]
+
+ def package(self, filename):
+ return package(filename)
+
+ def fully_qualified_name(self, filename):
+ return fully_qualified_name(filename)
+
+ @cached_method
+ def find_pxd(self, module, filename=None):
+ is_relative = module[0] == '.'
+ if is_relative and not filename:
+ raise NotImplementedError("New relative imports.")
+ if filename is not None:
+ module_path = module.split('.')
+ if is_relative:
+ module_path.pop(0) # just explicitly relative
+ package_path = list(self.package(filename))
+ while module_path and not module_path[0]:
+ try:
+ package_path.pop()
+ except IndexError:
+ return None # FIXME: error?
+ module_path.pop(0)
+ relative = '.'.join(package_path + module_path)
+ pxd = self.context.find_pxd_file(relative, source_file_path=filename)
+ if pxd:
+ return pxd
+ if is_relative:
+ return None # FIXME: error?
+ return self.context.find_pxd_file(module, source_file_path=filename)
+
+ @cached_method
+ def cimported_files(self, filename):
+ filename_root, filename_ext = os.path.splitext(filename)
+ if filename_ext in ('.pyx', '.py') and path_exists(filename_root + '.pxd'):
+ pxd_list = [filename_root + '.pxd']
+ else:
+ pxd_list = []
+ # Cimports generates all possible combinations package.module
+ # when imported as from package cimport module.
+ for module in self.cimports(filename):
+ if module[:7] == 'cython.' or module == 'cython':
+ continue
+ pxd_file = self.find_pxd(module, filename)
+ if pxd_file is not None:
+ pxd_list.append(pxd_file)
+ return tuple(pxd_list)
+
+ @cached_method
+ def immediate_dependencies(self, filename):
+ all_deps = {filename}
+ all_deps.update(self.cimported_files(filename))
+ all_deps.update(self.included_files(filename))
+ return all_deps
+
+ def all_dependencies(self, filename):
+ return self.transitive_merge(filename, self.immediate_dependencies, set.union)
+
+ @cached_method
+ def timestamp(self, filename):
+ return os.path.getmtime(filename)
+
+ def extract_timestamp(self, filename):
+ return self.timestamp(filename), filename
+
+ def newest_dependency(self, filename):
+ return max([self.extract_timestamp(f) for f in self.all_dependencies(filename)])
+
+ def transitive_fingerprint(self, filename, module, compilation_options):
+ r"""
+ Return a fingerprint of a cython file that is about to be cythonized.
+
+ Fingerprints are looked up in future compilations. If the fingerprint
+ is found, the cythonization can be skipped. The fingerprint must
+ incorporate everything that has an influence on the generated code.
+ """
+ try:
+ m = hashlib.sha1(__version__.encode('UTF-8'))
+ m.update(file_hash(filename).encode('UTF-8'))
+ for x in sorted(self.all_dependencies(filename)):
+ if os.path.splitext(x)[1] not in ('.c', '.cpp', '.h'):
+ m.update(file_hash(x).encode('UTF-8'))
+ # Include the module attributes that change the compilation result
+ # in the fingerprint. We do not iterate over module.__dict__ and
+ # include almost everything here as users might extend Extension
+ # with arbitrary (random) attributes that would lead to cache
+ # misses.
+ m.update(str((
+ module.language,
+ getattr(module, 'py_limited_api', False),
+ getattr(module, 'np_pythran', False)
+ )).encode('UTF-8'))
+
+ m.update(compilation_options.get_fingerprint().encode('UTF-8'))
+ return m.hexdigest()
+ except IOError:
+ return None
+
+ def distutils_info0(self, filename):
+ info = self.parse_dependencies(filename)[3]
+ kwds = info.values
+ cimports, externs, incdirs = self.cimports_externs_incdirs(filename)
+ basedir = os.getcwd()
+ # Add dependencies on "cdef extern from ..." files
+ if externs:
+ externs = _make_relative(externs, basedir)
+ if 'depends' in kwds:
+ kwds['depends'] = list(set(kwds['depends']).union(externs))
+ else:
+ kwds['depends'] = list(externs)
+ # Add include_dirs to ensure that the C compiler will find the
+ # "cdef extern from ..." files
+ if incdirs:
+ include_dirs = list(kwds.get('include_dirs', []))
+ for inc in _make_relative(incdirs, basedir):
+ if inc not in include_dirs:
+ include_dirs.append(inc)
+ kwds['include_dirs'] = include_dirs
+ return info
+
+ def distutils_info(self, filename, aliases=None, base=None):
+ return (self.transitive_merge(filename, self.distutils_info0, DistutilsInfo.merge)
+ .subs(aliases)
+ .merge(base))
+
+ def transitive_merge(self, node, extract, merge):
+ try:
+ seen = self._transitive_cache[extract, merge]
+ except KeyError:
+ seen = self._transitive_cache[extract, merge] = {}
+ return self.transitive_merge_helper(
+ node, extract, merge, seen, {}, self.cimported_files)[0]
+
+ def transitive_merge_helper(self, node, extract, merge, seen, stack, outgoing):
+ if node in seen:
+ return seen[node], None
+ deps = extract(node)
+ if node in stack:
+ return deps, node
+ try:
+ stack[node] = len(stack)
+ loop = None
+ for next in outgoing(node):
+ sub_deps, sub_loop = self.transitive_merge_helper(next, extract, merge, seen, stack, outgoing)
+ if sub_loop is not None:
+ if loop is not None and stack[loop] < stack[sub_loop]:
+ pass
+ else:
+ loop = sub_loop
+ deps = merge(deps, sub_deps)
+ if loop == node:
+ loop = None
+ if loop is None:
+ seen[node] = deps
+ return deps, loop
+ finally:
+ del stack[node]
+
+
+_dep_tree = None
+
+def create_dependency_tree(ctx=None, quiet=False):
+ global _dep_tree
+ if _dep_tree is None:
+ if ctx is None:
+ ctx = Context(["."], get_directive_defaults(),
+ options=CompilationOptions(default_options))
+ _dep_tree = DependencyTree(ctx, quiet=quiet)
+ return _dep_tree
+
+
+# If this changes, change also docs/src/reference/compilation.rst
+# which mentions this function
+def default_create_extension(template, kwds):
+ if 'depends' in kwds:
+ include_dirs = kwds.get('include_dirs', []) + ["."]
+ depends = resolve_depends(kwds['depends'], include_dirs)
+ kwds['depends'] = sorted(set(depends + template.depends))
+
+ t = template.__class__
+ ext = t(**kwds)
+ metadata = dict(distutils=kwds, module_name=kwds['name'])
+ return (ext, metadata)
+
+
+# This may be useful for advanced users?
+def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=False, language=None,
+ exclude_failures=False):
+ if language is not None:
+ print('Warning: passing language={0!r} to cythonize() is deprecated. '
+ 'Instead, put "# distutils: language={0}" in your .pyx or .pxd file(s)'.format(language))
+ if exclude is None:
+ exclude = []
+ if patterns is None:
+ return [], {}
+ elif isinstance(patterns, basestring) or not isinstance(patterns, Iterable):
+ patterns = [patterns]
+
+ from distutils.extension import Extension
+ if 'setuptools' in sys.modules:
+ # Support setuptools Extension instances as well.
+ extension_classes = (
+ Extension, # should normally be the same as 'setuptools.extension._Extension'
+ sys.modules['setuptools.extension']._Extension,
+ sys.modules['setuptools'].Extension,
+ )
+ else:
+ extension_classes = (Extension,)
+
+ explicit_modules = {m.name for m in patterns if isinstance(m, extension_classes)}
+ deps = create_dependency_tree(ctx, quiet=quiet)
+
+ to_exclude = set()
+ if not isinstance(exclude, list):
+ exclude = [exclude]
+ for pattern in exclude:
+ to_exclude.update(map(os.path.abspath, extended_iglob(pattern)))
+
+ module_list = []
+ module_metadata = {}
+
+ # if no create_extension() function is defined, use a simple
+ # default function.
+ create_extension = ctx.options.create_extension or default_create_extension
+
+ seen = set()
+ for pattern in patterns:
+ if not isinstance(pattern, extension_classes):
+ pattern = encode_filename_in_py2(pattern)
+ if isinstance(pattern, str):
+ filepattern = pattern
+ template = Extension(pattern, []) # Fake Extension without sources
+ name = '*'
+ base = None
+ ext_language = language
+ elif isinstance(pattern, extension_classes):
+ cython_sources = [s for s in pattern.sources
+ if os.path.splitext(s)[1] in ('.py', '.pyx')]
+ if cython_sources:
+ filepattern = cython_sources[0]
+ if len(cython_sources) > 1:
+ print(u"Warning: Multiple cython sources found for extension '%s': %s\n"
+ u"See https://cython.readthedocs.io/en/latest/src/userguide/sharing_declarations.html "
+ u"for sharing declarations among Cython files." % (pattern.name, cython_sources))
+ else:
+ # ignore non-cython modules
+ module_list.append(pattern)
+ continue
+ template = pattern
+ name = template.name
+ base = DistutilsInfo(exn=template)
+ ext_language = None # do not override whatever the Extension says
+ else:
+ msg = str("pattern is not of type str nor subclass of Extension (%s)"
+ " but of type %s and class %s" % (repr(Extension),
+ type(pattern),
+ pattern.__class__))
+ raise TypeError(msg)
+
+ for file in nonempty(sorted(extended_iglob(filepattern)), "'%s' doesn't match any files" % filepattern):
+ if os.path.abspath(file) in to_exclude:
+ continue
+ module_name = deps.fully_qualified_name(file)
+ if '*' in name:
+ if module_name in explicit_modules:
+ continue
+ elif name:
+ module_name = name
+
+ Utils.raise_error_if_module_name_forbidden(module_name)
+
+ if module_name not in seen:
+ try:
+ kwds = deps.distutils_info(file, aliases, base).values
+ except Exception:
+ if exclude_failures:
+ continue
+ raise
+ if base is not None:
+ for key, value in base.values.items():
+ if key not in kwds:
+ kwds[key] = value
+
+ kwds['name'] = module_name
+
+ sources = [file] + [m for m in template.sources if m != filepattern]
+ if 'sources' in kwds:
+ # allow users to add .c files etc.
+ for source in kwds['sources']:
+ source = encode_filename_in_py2(source)
+ if source not in sources:
+ sources.append(source)
+ kwds['sources'] = sources
+
+ if ext_language and 'language' not in kwds:
+ kwds['language'] = ext_language
+
+ np_pythran = kwds.pop('np_pythran', False)
+
+ # Create the new extension
+ m, metadata = create_extension(template, kwds)
+ m.np_pythran = np_pythran or getattr(m, 'np_pythran', False)
+ if m.np_pythran:
+ update_pythran_extension(m)
+ module_list.append(m)
+
+ # Store metadata (this will be written as JSON in the
+ # generated C file but otherwise has no purpose)
+ module_metadata[module_name] = metadata
+
+ if file not in m.sources:
+ # Old setuptools unconditionally replaces .pyx with .c/.cpp
+ target_file = os.path.splitext(file)[0] + ('.cpp' if m.language == 'c++' else '.c')
+ try:
+ m.sources.remove(target_file)
+ except ValueError:
+ # never seen this in the wild, but probably better to warn about this unexpected case
+ print(u"Warning: Cython source file not found in sources list, adding %s" % file)
+ m.sources.insert(0, file)
+ seen.add(name)
+ return module_list, module_metadata
+
+
+# This is the user-exposed entry point.
+def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=None, language=None,
+ exclude_failures=False, show_all_warnings=False, **options):
+ """
+ Compile a set of source modules into C/C++ files and return a list of distutils
+ Extension objects for them.
+
+ :param module_list: As module list, pass either a glob pattern, a list of glob
+ patterns or a list of Extension objects. The latter
+ allows you to configure the extensions separately
+ through the normal distutils options.
+ You can also pass Extension objects that have
+ glob patterns as their sources. Then, cythonize
+ will resolve the pattern and create a
+ copy of the Extension for every matching file.
+
+ :param exclude: When passing glob patterns as ``module_list``, you can exclude certain
+ module names explicitly by passing them into the ``exclude`` option.
+
+ :param nthreads: The number of concurrent builds for parallel compilation
+ (requires the ``multiprocessing`` module).
+
+ :param aliases: If you want to use compiler directives like ``# distutils: ...`` but
+ can only know at compile time (when running the ``setup.py``) which values
+ to use, you can use aliases and pass a dictionary mapping those aliases
+ to Python strings when calling :func:`cythonize`. As an example, say you
+ want to use the compiler
+ directive ``# distutils: include_dirs = ../static_libs/include/``
+ but this path isn't always fixed and you want to find it when running
+ the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``,
+ find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python
+ variable called ``foo`` as a string, and then call
+ ``cythonize(..., aliases={'MY_HEADERS': foo})``.
+
+ :param quiet: If True, Cython won't print error, warning, or status messages during the
+ compilation.
+
+ :param force: Forces the recompilation of the Cython modules, even if the timestamps
+ don't indicate that a recompilation is necessary.
+
+ :param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this
+ will be determined at a per-file level based on compiler directives. This
+ affects only modules found based on file names. Extension instances passed
+ into :func:`cythonize` will not be changed. It is recommended to rather
+ use the compiler directive ``# distutils: language = c++`` than this option.
+
+ :param exclude_failures: For a broad 'try to compile' mode that ignores compilation
+ failures and simply excludes the failed extensions,
+ pass ``exclude_failures=True``. Note that this only
+ really makes sense for compiling ``.py`` files which can also
+ be used without compilation.
+
+ :param show_all_warnings: By default, not all Cython warnings are printed.
+ Set to true to show all warnings.
+
+ :param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py``
+ files compiled. The HTML file gives an indication
+ of how much Python interaction there is in
+ each of the source code lines, compared to plain C code.
+ It also allows you to see the C/C++ code
+ generated for each line of Cython code. This report is invaluable when
+ optimizing a function for speed,
+ and for determining when to :ref:`release the GIL `:
+ in general, a ``nogil`` block may contain only "white" code.
+ See examples in :ref:`determining_where_to_add_types` or
+ :ref:`primes`.
+
+
+ :param annotate-fullc: If ``True`` will produce a colorized HTML version of
+ the source which includes entire generated C/C++-code.
+
+
+ :param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this:
+ ``compiler_directives={'embedsignature': True}``.
+ See :ref:`compiler-directives`.
+
+ :param depfile: produce depfiles for the sources if True.
+ """
+ if exclude is None:
+ exclude = []
+ if 'include_path' not in options:
+ options['include_path'] = ['.']
+ if 'common_utility_include_dir' in options:
+ safe_makedirs(options['common_utility_include_dir'])
+
+ depfile = options.pop('depfile', None)
+
+ if pythran is None:
+ pythran_options = None
+ else:
+ pythran_options = CompilationOptions(**options)
+ pythran_options.cplus = True
+ pythran_options.np_pythran = True
+
+ if force is None:
+ force = os.environ.get("CYTHON_FORCE_REGEN") == "1" # allow global overrides for build systems
+
+ c_options = CompilationOptions(**options)
+ cpp_options = CompilationOptions(**options); cpp_options.cplus = True
+ ctx = Context.from_options(c_options)
+ options = c_options
+ module_list, module_metadata = create_extension_list(
+ module_list,
+ exclude=exclude,
+ ctx=ctx,
+ quiet=quiet,
+ exclude_failures=exclude_failures,
+ language=language,
+ aliases=aliases)
+
+ fix_windows_unicode_modules(module_list)
+
+ deps = create_dependency_tree(ctx, quiet=quiet)
+ build_dir = getattr(options, 'build_dir', None)
+
+ def copy_to_build_dir(filepath, root=os.getcwd()):
+ filepath_abs = os.path.abspath(filepath)
+ if os.path.isabs(filepath):
+ filepath = filepath_abs
+ if filepath_abs.startswith(root):
+ # distutil extension depends are relative to cwd
+ mod_dir = join_path(build_dir,
+ os.path.dirname(_relpath(filepath, root)))
+ copy_once_if_newer(filepath_abs, mod_dir)
+
+ modules_by_cfile = collections.defaultdict(list)
+ to_compile = []
+ for m in module_list:
+ if build_dir:
+ for dep in m.depends:
+ copy_to_build_dir(dep)
+
+ cy_sources = [
+ source for source in m.sources
+ if os.path.splitext(source)[1] in ('.pyx', '.py')]
+ if len(cy_sources) == 1:
+ # normal "special" case: believe the Extension module name to allow user overrides
+ full_module_name = m.name
+ else:
+ # infer FQMN from source files
+ full_module_name = None
+
+ new_sources = []
+ for source in m.sources:
+ base, ext = os.path.splitext(source)
+ if ext in ('.pyx', '.py'):
+ if m.np_pythran:
+ c_file = base + '.cpp'
+ options = pythran_options
+ elif m.language == 'c++':
+ c_file = base + '.cpp'
+ options = cpp_options
+ else:
+ c_file = base + '.c'
+ options = c_options
+
+ # setup for out of place build directory if enabled
+ if build_dir:
+ if os.path.isabs(c_file):
+ c_file = os.path.splitdrive(c_file)[1]
+ c_file = c_file.split(os.sep, 1)[1]
+ c_file = os.path.join(build_dir, c_file)
+ dir = os.path.dirname(c_file)
+ safe_makedirs_once(dir)
+
+ # write out the depfile, if requested
+ if depfile:
+ dependencies = deps.all_dependencies(source)
+ write_depfile(c_file, source, dependencies)
+
+ # Missing files and those generated by other Cython versions should always be recreated.
+ if Utils.file_generated_by_this_cython(c_file):
+ c_timestamp = os.path.getmtime(c_file)
+ else:
+ c_timestamp = -1
+
+ # Priority goes first to modified files, second to direct
+ # dependents, and finally to indirect dependents.
+ if c_timestamp < deps.timestamp(source):
+ dep_timestamp, dep = deps.timestamp(source), source
+ priority = 0
+ else:
+ dep_timestamp, dep = deps.newest_dependency(source)
+ priority = 2 - (dep in deps.immediate_dependencies(source))
+ if force or c_timestamp < dep_timestamp:
+ if not quiet and not force:
+ if source == dep:
+ print(u"Compiling %s because it changed." % Utils.decode_filename(source))
+ else:
+ print(u"Compiling %s because it depends on %s." % (
+ Utils.decode_filename(source),
+ Utils.decode_filename(dep),
+ ))
+ if not force and options.cache:
+ fingerprint = deps.transitive_fingerprint(source, m, options)
+ else:
+ fingerprint = None
+ to_compile.append((
+ priority, source, c_file, fingerprint, quiet,
+ options, not exclude_failures, module_metadata.get(m.name),
+ full_module_name, show_all_warnings))
+ new_sources.append(c_file)
+ modules_by_cfile[c_file].append(m)
+ else:
+ new_sources.append(source)
+ if build_dir:
+ copy_to_build_dir(source)
+ m.sources = new_sources
+
+ if options.cache:
+ if not os.path.exists(options.cache):
+ os.makedirs(options.cache)
+ to_compile.sort()
+ # Drop "priority" component of "to_compile" entries and add a
+ # simple progress indicator.
+ N = len(to_compile)
+ progress_fmt = "[{0:%d}/{1}] " % len(str(N))
+ for i in range(N):
+ progress = progress_fmt.format(i+1, N)
+ to_compile[i] = to_compile[i][1:] + (progress,)
+
+ if N <= 1:
+ nthreads = 0
+ if nthreads:
+ import multiprocessing
+ pool = multiprocessing.Pool(
+ nthreads, initializer=_init_multiprocessing_helper)
+ # This is a bit more involved than it should be, because KeyboardInterrupts
+ # break the multiprocessing workers when using a normal pool.map().
+ # See, for example:
+ # https://noswap.com/blog/python-multiprocessing-keyboardinterrupt
+ try:
+ result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)
+ pool.close()
+ while not result.ready():
+ try:
+ result.get(99999) # seconds
+ except multiprocessing.TimeoutError:
+ pass
+ except KeyboardInterrupt:
+ pool.terminate()
+ raise
+ pool.join()
+ else:
+ for args in to_compile:
+ cythonize_one(*args)
+
+ if exclude_failures:
+ failed_modules = set()
+ for c_file, modules in modules_by_cfile.items():
+ if not os.path.exists(c_file):
+ failed_modules.update(modules)
+ elif os.path.getsize(c_file) < 200:
+ f = io_open(c_file, 'r', encoding='iso8859-1')
+ try:
+ if f.read(len('#error ')) == '#error ':
+ # dead compilation result
+ failed_modules.update(modules)
+ finally:
+ f.close()
+ if failed_modules:
+ for module in failed_modules:
+ module_list.remove(module)
+ print(u"Failed compilations: %s" % ', '.join(sorted([
+ module.name for module in failed_modules])))
+
+ if options.cache:
+ cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
+ # cythonize() is often followed by the (non-Python-buffered)
+ # compiler output, flush now to avoid interleaving output.
+ sys.stdout.flush()
+ return module_list
+
+
+def fix_windows_unicode_modules(module_list):
+ # Hack around a distutils 3.[5678] bug on Windows for unicode module names.
+ # https://bugs.python.org/issue39432
+ if sys.platform != "win32":
+ return
+ if sys.version_info < (3, 5) or sys.version_info >= (3, 8, 2):
+ return
+
+ def make_filtered_list(ignored_symbol, old_entries):
+ class FilteredExportSymbols(list):
+ # export_symbols for unicode filename cause link errors on Windows
+ # Cython doesn't need them (it already defines PyInit with the correct linkage)
+ # so use this class as a temporary fix to stop them from being generated
+ def __contains__(self, val):
+ # so distutils doesn't "helpfully" add PyInit_
+ return val == ignored_symbol or list.__contains__(self, val)
+
+ filtered_list = FilteredExportSymbols(old_entries)
+ if old_entries:
+ filtered_list.extend(name for name in old_entries if name != ignored_symbol)
+ return filtered_list
+
+ for m in module_list:
+ # TODO: use m.name.isascii() in Py3.7+
+ try:
+ m.name.encode("ascii")
+ continue
+ except UnicodeEncodeError:
+ pass
+ m.export_symbols = make_filtered_list(
+ "PyInit_" + m.name.rsplit(".", 1)[-1],
+ m.export_symbols,
+ )
+
+
+if os.environ.get('XML_RESULTS'):
+ compile_result_dir = os.environ['XML_RESULTS']
+ def record_results(func):
+ def with_record(*args):
+ t = time.time()
+ success = True
+ try:
+ try:
+ func(*args)
+ except:
+ success = False
+ finally:
+ t = time.time() - t
+ module = fully_qualified_name(args[0])
+ name = "cythonize." + module
+ failures = 1 - success
+ if success:
+ failure_item = ""
+ else:
+ failure_item = "failure"
+ output = open(os.path.join(compile_result_dir, name + ".xml"), "w")
+ output.write("""
+
+
+
+ %(failure_item)s
+
+
+ """.strip() % locals())
+ output.close()
+ return with_record
+else:
+ def record_results(func):
+ return func
+
+
+# TODO: Share context? Issue: pyx processing leaks into pxd module
+@record_results
+def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None,
+ raise_on_failure=True, embedded_metadata=None,
+ full_module_name=None, show_all_warnings=False,
+ progress=""):
+ from ..Compiler.Main import compile_single, default_options
+ from ..Compiler.Errors import CompileError, PyrexError
+
+ if fingerprint:
+ if not os.path.exists(options.cache):
+ safe_makedirs(options.cache)
+ # Cython-generated c files are highly compressible.
+ # (E.g. a compression ratio of about 10 for Sage).
+ fingerprint_file_base = join_path(
+ options.cache, "%s-%s" % (os.path.basename(c_file), fingerprint))
+ gz_fingerprint_file = fingerprint_file_base + gzip_ext
+ zip_fingerprint_file = fingerprint_file_base + '.zip'
+ if os.path.exists(gz_fingerprint_file) or os.path.exists(zip_fingerprint_file):
+ if not quiet:
+ print(u"%sFound compiled %s in cache" % (progress, pyx_file))
+ if os.path.exists(gz_fingerprint_file):
+ os.utime(gz_fingerprint_file, None)
+ with contextlib.closing(gzip_open(gz_fingerprint_file, 'rb')) as g:
+ with contextlib.closing(open(c_file, 'wb')) as f:
+ shutil.copyfileobj(g, f)
+ else:
+ os.utime(zip_fingerprint_file, None)
+ dirname = os.path.dirname(c_file)
+ with contextlib.closing(zipfile.ZipFile(zip_fingerprint_file)) as z:
+ for artifact in z.namelist():
+ z.extract(artifact, os.path.join(dirname, artifact))
+ return
+ if not quiet:
+ print(u"%sCythonizing %s" % (progress, Utils.decode_filename(pyx_file)))
+ if options is None:
+ options = CompilationOptions(default_options)
+ options.output_file = c_file
+ options.embedded_metadata = embedded_metadata
+
+ old_warning_level = Errors.LEVEL
+ if show_all_warnings:
+ Errors.LEVEL = 0
+
+ any_failures = 0
+ try:
+ result = compile_single(pyx_file, options, full_module_name=full_module_name)
+ if result.num_errors > 0:
+ any_failures = 1
+ except (EnvironmentError, PyrexError) as e:
+ sys.stderr.write('%s\n' % e)
+ any_failures = 1
+ # XXX
+ import traceback
+ traceback.print_exc()
+ except Exception:
+ if raise_on_failure:
+ raise
+ import traceback
+ traceback.print_exc()
+ any_failures = 1
+ finally:
+ if show_all_warnings:
+ Errors.LEVEL = old_warning_level
+
+ if any_failures:
+ if raise_on_failure:
+ raise CompileError(None, pyx_file)
+ elif os.path.exists(c_file):
+ os.remove(c_file)
+ elif fingerprint:
+ artifacts = list(filter(None, [
+ getattr(result, attr, None)
+ for attr in ('c_file', 'h_file', 'api_file', 'i_file')]))
+ if len(artifacts) == 1:
+ fingerprint_file = gz_fingerprint_file
+ with contextlib.closing(open(c_file, 'rb')) as f:
+ with contextlib.closing(gzip_open(fingerprint_file + '.tmp', 'wb')) as g:
+ shutil.copyfileobj(f, g)
+ else:
+ fingerprint_file = zip_fingerprint_file
+ with contextlib.closing(zipfile.ZipFile(
+ fingerprint_file + '.tmp', 'w', zipfile_compression_mode)) as zip:
+ for artifact in artifacts:
+ zip.write(artifact, os.path.basename(artifact))
+ os.rename(fingerprint_file + '.tmp', fingerprint_file)
+
+
+def cythonize_one_helper(m):
+ import traceback
+ try:
+ return cythonize_one(*m)
+ except Exception:
+ traceback.print_exc()
+ raise
+
+
+def _init_multiprocessing_helper():
+ # KeyboardInterrupt kills workers, so don't let them get it
+ import signal
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+
+def cleanup_cache(cache, target_size, ratio=.85):
+ try:
+ p = subprocess.Popen(['du', '-s', '-k', os.path.abspath(cache)], stdout=subprocess.PIPE)
+ stdout, _ = p.communicate()
+ res = p.wait()
+ if res == 0:
+ total_size = 1024 * int(stdout.strip().split()[0])
+ if total_size < target_size:
+ return
+ except (OSError, ValueError):
+ pass
+ total_size = 0
+ all = []
+ for file in os.listdir(cache):
+ path = join_path(cache, file)
+ s = os.stat(path)
+ total_size += s.st_size
+ all.append((s.st_atime, s.st_size, path))
+ if total_size > target_size:
+ for time, size, file in reversed(sorted(all)):
+ os.unlink(file)
+ total_size -= size
+ if total_size < target_size * ratio:
+ break
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Distutils.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Distutils.py
new file mode 100644
index 0000000000000000000000000000000000000000..3efcc0d7b5101f5b5fbacfaa47c9afe760dbaaa6
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Distutils.py
@@ -0,0 +1 @@
+from Cython.Distutils.build_ext import build_ext
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Inline.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Inline.py
new file mode 100644
index 0000000000000000000000000000000000000000..fac6a79b9c33388e470ad39d2194ffd966425443
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Inline.py
@@ -0,0 +1,367 @@
+from __future__ import absolute_import
+
+import hashlib
+import inspect
+import os
+import re
+import sys
+
+from distutils.core import Distribution, Extension
+from distutils.command.build_ext import build_ext
+
+import Cython
+from ..Compiler.Main import Context
+from ..Compiler.Options import (default_options, CompilationOptions,
+ get_directive_defaults)
+
+from ..Compiler.Visitor import CythonTransform, EnvTransform
+from ..Compiler.ParseTreeTransforms import SkipDeclarations
+from ..Compiler.TreeFragment import parse_from_strings
+from ..Compiler.StringEncoding import _unicode
+from .Dependencies import strip_string_literals, cythonize, cached_function
+from ..Compiler import Pipeline
+from ..Utils import get_cython_cache_dir
+import cython as cython_module
+
+
+IS_PY3 = sys.version_info >= (3,)
+
+# A utility function to convert user-supplied ASCII strings to unicode.
+if not IS_PY3:
+ def to_unicode(s):
+ if isinstance(s, bytes):
+ return s.decode('ascii')
+ else:
+ return s
+else:
+ to_unicode = lambda x: x
+
+
+if sys.version_info < (3, 5):
+ import imp
+ def load_dynamic(name, module_path):
+ return imp.load_dynamic(name, module_path)
+else:
+ import importlib.util
+ from importlib.machinery import ExtensionFileLoader
+
+ def load_dynamic(name, path):
+ spec = importlib.util.spec_from_file_location(name, loader=ExtensionFileLoader(name, path))
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+ return module
+
+
+class UnboundSymbols(EnvTransform, SkipDeclarations):
+ def __init__(self):
+ super(EnvTransform, self).__init__(context=None)
+ self.unbound = set()
+ def visit_NameNode(self, node):
+ if not self.current_env().lookup(node.name):
+ self.unbound.add(node.name)
+ return node
+ def __call__(self, node):
+ super(UnboundSymbols, self).__call__(node)
+ return self.unbound
+
+
+@cached_function
+def unbound_symbols(code, context=None):
+ code = to_unicode(code)
+ if context is None:
+ context = Context([], get_directive_defaults(),
+ options=CompilationOptions(default_options))
+ from ..Compiler.ParseTreeTransforms import AnalyseDeclarationsTransform
+ tree = parse_from_strings('(tree fragment)', code)
+ for phase in Pipeline.create_pipeline(context, 'pyx'):
+ if phase is None:
+ continue
+ tree = phase(tree)
+ if isinstance(phase, AnalyseDeclarationsTransform):
+ break
+ try:
+ import builtins
+ except ImportError:
+ import __builtin__ as builtins
+ return tuple(UnboundSymbols()(tree) - set(dir(builtins)))
+
+
+def unsafe_type(arg, context=None):
+ py_type = type(arg)
+ if py_type is int:
+ return 'long'
+ else:
+ return safe_type(arg, context)
+
+
+def safe_type(arg, context=None):
+ py_type = type(arg)
+ if py_type in (list, tuple, dict, str):
+ return py_type.__name__
+ elif py_type is complex:
+ return 'double complex'
+ elif py_type is float:
+ return 'double'
+ elif py_type is bool:
+ return 'bint'
+ elif 'numpy' in sys.modules and isinstance(arg, sys.modules['numpy'].ndarray):
+ return 'numpy.ndarray[numpy.%s_t, ndim=%s]' % (arg.dtype.name, arg.ndim)
+ else:
+ for base_type in py_type.__mro__:
+ if base_type.__module__ in ('__builtin__', 'builtins'):
+ return 'object'
+ module = context.find_module(base_type.__module__, need_pxd=False)
+ if module:
+ entry = module.lookup(base_type.__name__)
+ if entry.is_type:
+ return '%s.%s' % (base_type.__module__, base_type.__name__)
+ return 'object'
+
+
+def _get_build_extension():
+ dist = Distribution()
+ # Ensure the build respects distutils configuration by parsing
+ # the configuration files
+ config_files = dist.find_config_files()
+ dist.parse_config_files(config_files)
+ build_extension = build_ext(dist)
+ build_extension.finalize_options()
+ return build_extension
+
+
+@cached_function
+def _create_context(cython_include_dirs):
+ return Context(
+ list(cython_include_dirs),
+ get_directive_defaults(),
+ options=CompilationOptions(default_options)
+ )
+
+
+_cython_inline_cache = {}
+_cython_inline_default_context = _create_context(('.',))
+
+
+def _populate_unbound(kwds, unbound_symbols, locals=None, globals=None):
+ for symbol in unbound_symbols:
+ if symbol not in kwds:
+ if locals is None or globals is None:
+ calling_frame = inspect.currentframe().f_back.f_back.f_back
+ if locals is None:
+ locals = calling_frame.f_locals
+ if globals is None:
+ globals = calling_frame.f_globals
+ if symbol in locals:
+ kwds[symbol] = locals[symbol]
+ elif symbol in globals:
+ kwds[symbol] = globals[symbol]
+ else:
+ print("Couldn't find %r" % symbol)
+
+
+def _inline_key(orig_code, arg_sigs, language_level):
+ key = orig_code, arg_sigs, sys.version_info, sys.executable, language_level, Cython.__version__
+ return hashlib.sha1(_unicode(key).encode('utf-8')).hexdigest()
+
+
+def cython_inline(code, get_type=unsafe_type,
+ lib_dir=os.path.join(get_cython_cache_dir(), 'inline'),
+ cython_include_dirs=None, cython_compiler_directives=None,
+ force=False, quiet=False, locals=None, globals=None, language_level=None, **kwds):
+
+ if get_type is None:
+ get_type = lambda x: 'object'
+ ctx = _create_context(tuple(cython_include_dirs)) if cython_include_dirs else _cython_inline_default_context
+
+ cython_compiler_directives = dict(cython_compiler_directives) if cython_compiler_directives else {}
+ if language_level is None and 'language_level' not in cython_compiler_directives:
+ language_level = '3str'
+ if language_level is not None:
+ cython_compiler_directives['language_level'] = language_level
+
+ key_hash = None
+
+ # Fast path if this has been called in this session.
+ _unbound_symbols = _cython_inline_cache.get(code)
+ if _unbound_symbols is not None:
+ _populate_unbound(kwds, _unbound_symbols, locals, globals)
+ args = sorted(kwds.items())
+ arg_sigs = tuple([(get_type(value, ctx), arg) for arg, value in args])
+ key_hash = _inline_key(code, arg_sigs, language_level)
+ invoke = _cython_inline_cache.get((code, arg_sigs, key_hash))
+ if invoke is not None:
+ arg_list = [arg[1] for arg in args]
+ return invoke(*arg_list)
+
+ orig_code = code
+ code = to_unicode(code)
+ code, literals = strip_string_literals(code)
+ code = strip_common_indent(code)
+ if locals is None:
+ locals = inspect.currentframe().f_back.f_back.f_locals
+ if globals is None:
+ globals = inspect.currentframe().f_back.f_back.f_globals
+ try:
+ _cython_inline_cache[orig_code] = _unbound_symbols = unbound_symbols(code)
+ _populate_unbound(kwds, _unbound_symbols, locals, globals)
+ except AssertionError:
+ if not quiet:
+ # Parsing from strings not fully supported (e.g. cimports).
+ print("Could not parse code as a string (to extract unbound symbols).")
+
+ cimports = []
+ for name, arg in list(kwds.items()):
+ if arg is cython_module:
+ cimports.append('\ncimport cython as %s' % name)
+ del kwds[name]
+ arg_names = sorted(kwds)
+ arg_sigs = tuple([(get_type(kwds[arg], ctx), arg) for arg in arg_names])
+ if key_hash is None:
+ key_hash = _inline_key(orig_code, arg_sigs, language_level)
+ module_name = "_cython_inline_" + key_hash
+
+ if module_name in sys.modules:
+ module = sys.modules[module_name]
+
+ else:
+ build_extension = None
+ if cython_inline.so_ext is None:
+ # Figure out and cache current extension suffix
+ build_extension = _get_build_extension()
+ cython_inline.so_ext = build_extension.get_ext_filename('')
+
+ lib_dir = os.path.abspath(lib_dir)
+ module_path = os.path.join(lib_dir, module_name + cython_inline.so_ext)
+
+ if not os.path.exists(lib_dir):
+ os.makedirs(lib_dir)
+ if force or not os.path.isfile(module_path):
+ cflags = []
+ define_macros = []
+ c_include_dirs = []
+ qualified = re.compile(r'([.\w]+)[.]')
+ for type, _ in arg_sigs:
+ m = qualified.match(type)
+ if m:
+ cimports.append('\ncimport %s' % m.groups()[0])
+ # one special case
+ if m.groups()[0] == 'numpy':
+ import numpy
+ c_include_dirs.append(numpy.get_include())
+ define_macros.append(("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"))
+ # cflags.append('-Wno-unused')
+ module_body, func_body = extract_func_code(code)
+ params = ', '.join(['%s %s' % a for a in arg_sigs])
+ module_code = """
+%(module_body)s
+%(cimports)s
+def __invoke(%(params)s):
+%(func_body)s
+ return locals()
+ """ % {'cimports': '\n'.join(cimports),
+ 'module_body': module_body,
+ 'params': params,
+ 'func_body': func_body }
+ for key, value in literals.items():
+ module_code = module_code.replace(key, value)
+ pyx_file = os.path.join(lib_dir, module_name + '.pyx')
+ fh = open(pyx_file, 'w')
+ try:
+ fh.write(module_code)
+ finally:
+ fh.close()
+ extension = Extension(
+ name=module_name,
+ sources=[pyx_file],
+ include_dirs=c_include_dirs or None,
+ extra_compile_args=cflags or None,
+ define_macros=define_macros or None,
+ )
+ if build_extension is None:
+ build_extension = _get_build_extension()
+ build_extension.extensions = cythonize(
+ [extension],
+ include_path=cython_include_dirs or ['.'],
+ compiler_directives=cython_compiler_directives,
+ quiet=quiet)
+ build_extension.build_temp = os.path.dirname(pyx_file)
+ build_extension.build_lib = lib_dir
+ build_extension.run()
+
+ if sys.platform == 'win32' and sys.version_info >= (3, 8):
+ with os.add_dll_directory(os.path.abspath(lib_dir)):
+ module = load_dynamic(module_name, module_path)
+ else:
+ module = load_dynamic(module_name, module_path)
+
+ _cython_inline_cache[orig_code, arg_sigs, key_hash] = module.__invoke
+ arg_list = [kwds[arg] for arg in arg_names]
+ return module.__invoke(*arg_list)
+
+
+# Cached suffix used by cython_inline above. None should get
+# overridden with actual value upon the first cython_inline invocation
+cython_inline.so_ext = None
+
+_find_non_space = re.compile('[^ ]').search
+
+
+def strip_common_indent(code):
+ min_indent = None
+ lines = code.splitlines()
+ for line in lines:
+ match = _find_non_space(line)
+ if not match:
+ continue # blank
+ indent = match.start()
+ if line[indent] == '#':
+ continue # comment
+ if min_indent is None or min_indent > indent:
+ min_indent = indent
+ for ix, line in enumerate(lines):
+ match = _find_non_space(line)
+ if not match or not line or line[indent:indent+1] == '#':
+ continue
+ lines[ix] = line[min_indent:]
+ return '\n'.join(lines)
+
+
+module_statement = re.compile(r'^((cdef +(extern|class))|cimport|(from .+ cimport)|(from .+ import +[*]))')
+def extract_func_code(code):
+ module = []
+ function = []
+ current = function
+ code = code.replace('\t', ' ')
+ lines = code.split('\n')
+ for line in lines:
+ if not line.startswith(' '):
+ if module_statement.match(line):
+ current = module
+ else:
+ current = function
+ current.append(line)
+ return '\n'.join(module), ' ' + '\n '.join(function)
+
+
+def get_body(source):
+ ix = source.index(':')
+ if source[:5] == 'lambda':
+ return "return %s" % source[ix+1:]
+ else:
+ return source[ix+1:]
+
+
+# Lots to be done here... It would be especially cool if compiled functions
+# could invoke each other quickly.
+class RuntimeCompiledFunction(object):
+
+ def __init__(self, f):
+ self._f = f
+ self._body = get_body(inspect.getsource(f))
+
+ def __call__(self, *args, **kwds):
+ all = inspect.getcallargs(self._f, *args, **kwds)
+ if IS_PY3:
+ return cython_inline(self._body, locals=self._f.__globals__, globals=self._f.__globals__, **all)
+ else:
+ return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all)
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/IpythonMagic.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/IpythonMagic.py
new file mode 100644
index 0000000000000000000000000000000000000000..3fa43c96d1eb6f7ed809a0953779002e561ca40e
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/IpythonMagic.py
@@ -0,0 +1,572 @@
+# -*- coding: utf-8 -*-
+"""
+=====================
+Cython related magics
+=====================
+
+Magic command interface for interactive work with Cython
+
+.. note::
+
+ The ``Cython`` package needs to be installed separately. It
+ can be obtained using ``easy_install`` or ``pip``.
+
+Usage
+=====
+
+To enable the magics below, execute ``%load_ext cython``.
+
+``%%cython``
+
+{CYTHON_DOC}
+
+``%%cython_inline``
+
+{CYTHON_INLINE_DOC}
+
+``%%cython_pyximport``
+
+{CYTHON_PYXIMPORT_DOC}
+
+Author:
+* Brian Granger
+
+Code moved from IPython and adapted by:
+* Martín Gaitán
+
+Parts of this code were taken from Cython.inline.
+"""
+#-----------------------------------------------------------------------------
+# Copyright (C) 2010-2011, IPython Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file ipython-COPYING.rst, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from __future__ import absolute_import, print_function
+
+import io
+import os
+import re
+import sys
+import time
+import copy
+import distutils.log
+import textwrap
+
+IO_ENCODING = sys.getfilesystemencoding()
+IS_PY2 = sys.version_info[0] < 3
+
+import hashlib
+from distutils.core import Distribution, Extension
+from distutils.command.build_ext import build_ext
+
+from IPython.core import display
+from IPython.core import magic_arguments
+from IPython.core.magic import Magics, magics_class, cell_magic
+try:
+ from IPython.paths import get_ipython_cache_dir
+except ImportError:
+ # older IPython version
+ from IPython.utils.path import get_ipython_cache_dir
+from IPython.utils.text import dedent
+
+from ..Shadow import __version__ as cython_version
+from ..Compiler.Errors import CompileError
+from .Inline import cython_inline, load_dynamic
+from .Dependencies import cythonize
+from ..Utils import captured_fd, print_captured
+
+
+PGO_CONFIG = {
+ 'gcc': {
+ 'gen': ['-fprofile-generate', '-fprofile-dir={TEMPDIR}'],
+ 'use': ['-fprofile-use', '-fprofile-correction', '-fprofile-dir={TEMPDIR}'],
+ },
+ # blind copy from 'configure' script in CPython 3.7
+ 'icc': {
+ 'gen': ['-prof-gen'],
+ 'use': ['-prof-use'],
+ }
+}
+PGO_CONFIG['mingw32'] = PGO_CONFIG['gcc']
+
+
+if IS_PY2:
+ def encode_fs(name):
+ return name if isinstance(name, bytes) else name.encode(IO_ENCODING)
+else:
+ def encode_fs(name):
+ return name
+
+
+@magics_class
+class CythonMagics(Magics):
+
+ def __init__(self, shell):
+ super(CythonMagics, self).__init__(shell)
+ self._reloads = {}
+ self._code_cache = {}
+ self._pyximport_installed = False
+
+ def _import_all(self, module):
+ mdict = module.__dict__
+ if '__all__' in mdict:
+ keys = mdict['__all__']
+ else:
+ keys = [k for k in mdict if not k.startswith('_')]
+
+ for k in keys:
+ try:
+ self.shell.push({k: mdict[k]})
+ except KeyError:
+ msg = "'module' object has no attribute '%s'" % k
+ raise AttributeError(msg)
+
+ @cell_magic
+ def cython_inline(self, line, cell):
+ """Compile and run a Cython code cell using Cython.inline.
+
+ This magic simply passes the body of the cell to Cython.inline
+ and returns the result. If the variables `a` and `b` are defined
+ in the user's namespace, here is a simple example that returns
+ their sum::
+
+ %%cython_inline
+ return a+b
+
+ For most purposes, we recommend the usage of the `%%cython` magic.
+ """
+ locs = self.shell.user_global_ns
+ globs = self.shell.user_ns
+ return cython_inline(cell, locals=locs, globals=globs)
+
+ @cell_magic
+ def cython_pyximport(self, line, cell):
+ """Compile and import a Cython code cell using pyximport.
+
+ The contents of the cell are written to a `.pyx` file in the current
+ working directory, which is then imported using `pyximport`. This
+ magic requires a module name to be passed::
+
+ %%cython_pyximport modulename
+ def f(x):
+ return 2.0*x
+
+ The compiled module is then imported and all of its symbols are
+ injected into the user's namespace. For most purposes, we recommend
+ the usage of the `%%cython` magic.
+ """
+ module_name = line.strip()
+ if not module_name:
+ raise ValueError('module name must be given')
+ fname = module_name + '.pyx'
+ with io.open(fname, 'w', encoding='utf-8') as f:
+ f.write(cell)
+ if 'pyximport' not in sys.modules or not self._pyximport_installed:
+ import pyximport
+ pyximport.install()
+ self._pyximport_installed = True
+ if module_name in self._reloads:
+ module = self._reloads[module_name]
+ # Note: reloading extension modules is not actually supported
+ # (requires PEP-489 reinitialisation support).
+ # Don't know why this should ever have worked as it reads here.
+ # All we really need to do is to update the globals below.
+ #reload(module)
+ else:
+ __import__(module_name)
+ module = sys.modules[module_name]
+ self._reloads[module_name] = module
+ self._import_all(module)
+
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ '-a', '--annotate', action='store_const', const='default', dest='annotate',
+ help="Produce a colorized HTML version of the source."
+ )
+ @magic_arguments.argument(
+ '--annotate-fullc', action='store_const', const='fullc', dest='annotate',
+ help="Produce a colorized HTML version of the source "
+ "which includes entire generated C/C++-code."
+ )
+ @magic_arguments.argument(
+ '-+', '--cplus', action='store_true', default=False,
+ help="Output a C++ rather than C file."
+ )
+ @magic_arguments.argument(
+ '-3', dest='language_level', action='store_const', const=3, default=None,
+ help="Select Python 3 syntax."
+ )
+ @magic_arguments.argument(
+ '-2', dest='language_level', action='store_const', const=2, default=None,
+ help="Select Python 2 syntax."
+ )
+ @magic_arguments.argument(
+ '-f', '--force', action='store_true', default=False,
+ help="Force the compilation of a new module, even if the source has been "
+ "previously compiled."
+ )
+ @magic_arguments.argument(
+ '-c', '--compile-args', action='append', default=[],
+ help="Extra flags to pass to compiler via the `extra_compile_args` "
+ "Extension flag (can be specified multiple times)."
+ )
+ @magic_arguments.argument(
+ '--link-args', action='append', default=[],
+ help="Extra flags to pass to linker via the `extra_link_args` "
+ "Extension flag (can be specified multiple times)."
+ )
+ @magic_arguments.argument(
+ '-l', '--lib', action='append', default=[],
+ help="Add a library to link the extension against (can be specified "
+ "multiple times)."
+ )
+ @magic_arguments.argument(
+ '-n', '--name',
+ help="Specify a name for the Cython module."
+ )
+ @magic_arguments.argument(
+ '-L', dest='library_dirs', metavar='dir', action='append', default=[],
+ help="Add a path to the list of library directories (can be specified "
+ "multiple times)."
+ )
+ @magic_arguments.argument(
+ '-I', '--include', action='append', default=[],
+ help="Add a path to the list of include directories (can be specified "
+ "multiple times)."
+ )
+ @magic_arguments.argument(
+ '-S', '--src', action='append', default=[],
+ help="Add a path to the list of src files (can be specified "
+ "multiple times)."
+ )
+ @magic_arguments.argument(
+ '--pgo', dest='pgo', action='store_true', default=False,
+ help=("Enable profile guided optimisation in the C compiler. "
+ "Compiles the cell twice and executes it in between to generate a runtime profile.")
+ )
+ @magic_arguments.argument(
+ '--verbose', dest='quiet', action='store_false', default=True,
+ help=("Print debug information like generated .c/.cpp file location "
+ "and exact gcc/g++ command invoked.")
+ )
+ @cell_magic
+ def cython(self, line, cell):
+ """Compile and import everything from a Cython code cell.
+
+ The contents of the cell are written to a `.pyx` file in the
+ directory `IPYTHONDIR/cython` using a filename with the hash of the
+ code. This file is then cythonized and compiled. The resulting module
+ is imported and all of its symbols are injected into the user's
+ namespace. The usage is similar to that of `%%cython_pyximport` but
+ you don't have to pass a module name::
+
+ %%cython
+ def f(x):
+ return 2.0*x
+
+ To compile OpenMP codes, pass the required `--compile-args`
+ and `--link-args`. For example with gcc::
+
+ %%cython --compile-args=-fopenmp --link-args=-fopenmp
+ ...
+
+ To enable profile guided optimisation, pass the ``--pgo`` option.
+ Note that the cell itself needs to take care of establishing a suitable
+ profile when executed. This can be done by implementing the functions to
+ optimise, and then calling them directly in the same cell on some realistic
+ training data like this::
+
+ %%cython --pgo
+ def critical_function(data):
+ for item in data:
+ ...
+
+ # execute function several times to build profile
+ from somewhere import some_typical_data
+ for _ in range(100):
+ critical_function(some_typical_data)
+
+ In Python 3.5 and later, you can distinguish between the profile and
+ non-profile runs as follows::
+
+ if "_pgo_" in __name__:
+ ... # execute critical code here
+ """
+ args = magic_arguments.parse_argstring(self.cython, line)
+ code = cell if cell.endswith('\n') else cell + '\n'
+ lib_dir = os.path.join(get_ipython_cache_dir(), 'cython')
+ key = (code, line, sys.version_info, sys.executable, cython_version)
+
+ if not os.path.exists(lib_dir):
+ os.makedirs(lib_dir)
+
+ if args.pgo:
+ key += ('pgo',)
+ if args.force:
+ # Force a new module name by adding the current time to the
+ # key which is hashed to determine the module name.
+ key += (time.time(),)
+
+ if args.name:
+ module_name = str(args.name) # no-op in Py3
+ else:
+ module_name = "_cython_magic_" + hashlib.sha1(str(key).encode('utf-8')).hexdigest()
+ html_file = os.path.join(lib_dir, module_name + '.html')
+ module_path = os.path.join(lib_dir, module_name + self.so_ext)
+
+ have_module = os.path.isfile(module_path)
+ need_cythonize = args.pgo or not have_module
+
+ if args.annotate:
+ if not os.path.isfile(html_file):
+ need_cythonize = True
+
+ extension = None
+ if need_cythonize:
+ extensions = self._cythonize(module_name, code, lib_dir, args, quiet=args.quiet)
+ if extensions is None:
+ # Compilation failed and printed error message
+ return None
+ assert len(extensions) == 1
+ extension = extensions[0]
+ self._code_cache[key] = module_name
+
+ if args.pgo:
+ self._profile_pgo_wrapper(extension, lib_dir)
+
+ def print_compiler_output(stdout, stderr, where):
+ # On windows, errors are printed to stdout, we redirect both to sys.stderr.
+ print_captured(stdout, where, u"Content of stdout:\n")
+ print_captured(stderr, where, u"Content of stderr:\n")
+
+ get_stderr = get_stdout = None
+ try:
+ with captured_fd(1) as get_stdout:
+ with captured_fd(2) as get_stderr:
+ self._build_extension(
+ extension, lib_dir, pgo_step_name='use' if args.pgo else None, quiet=args.quiet)
+ except (distutils.errors.CompileError, distutils.errors.LinkError):
+ # Build failed, print error message from compiler/linker
+ print_compiler_output(get_stdout(), get_stderr(), sys.stderr)
+ return None
+
+ # Build seems ok, but we might still want to show any warnings that occurred
+ print_compiler_output(get_stdout(), get_stderr(), sys.stdout)
+
+ module = load_dynamic(module_name, module_path)
+ self._import_all(module)
+
+ if args.annotate:
+ try:
+ with io.open(html_file, encoding='utf-8') as f:
+ annotated_html = f.read()
+ except IOError as e:
+ # File could not be opened. Most likely the user has a version
+ # of Cython before 0.15.1 (when `cythonize` learned the
+ # `force` keyword argument) and has already compiled this
+ # exact source without annotation.
+ print('Cython completed successfully but the annotated '
+ 'source could not be read.', file=sys.stderr)
+ print(e, file=sys.stderr)
+ else:
+ return display.HTML(self.clean_annotated_html(annotated_html))
+
+ def _profile_pgo_wrapper(self, extension, lib_dir):
+ """
+ Generate a .c file for a separate extension module that calls the
+ module init function of the original module. This makes sure that the
+ PGO profiler sees the correct .o file of the final module, but it still
+ allows us to import the module under a different name for profiling,
+ before recompiling it into the PGO optimised module. Overwriting and
+ reimporting the same shared library is not portable.
+ """
+ extension = copy.copy(extension) # shallow copy, do not modify sources in place!
+ module_name = extension.name
+ pgo_module_name = '_pgo_' + module_name
+ pgo_wrapper_c_file = os.path.join(lib_dir, pgo_module_name + '.c')
+ with io.open(pgo_wrapper_c_file, 'w', encoding='utf-8') as f:
+ f.write(textwrap.dedent(u"""
+ #include "Python.h"
+ #if PY_MAJOR_VERSION < 3
+ extern PyMODINIT_FUNC init%(module_name)s(void);
+ PyMODINIT_FUNC init%(pgo_module_name)s(void); /*proto*/
+ PyMODINIT_FUNC init%(pgo_module_name)s(void) {
+ PyObject *sys_modules;
+ init%(module_name)s(); if (PyErr_Occurred()) return;
+ sys_modules = PyImport_GetModuleDict(); /* borrowed, no exception, "never" fails */
+ if (sys_modules) {
+ PyObject *module = PyDict_GetItemString(sys_modules, "%(module_name)s"); if (!module) return;
+ PyDict_SetItemString(sys_modules, "%(pgo_module_name)s", module);
+ Py_DECREF(module);
+ }
+ }
+ #else
+ extern PyMODINIT_FUNC PyInit_%(module_name)s(void);
+ PyMODINIT_FUNC PyInit_%(pgo_module_name)s(void); /*proto*/
+ PyMODINIT_FUNC PyInit_%(pgo_module_name)s(void) {
+ return PyInit_%(module_name)s();
+ }
+ #endif
+ """ % {'module_name': module_name, 'pgo_module_name': pgo_module_name}))
+
+ extension.sources = extension.sources + [pgo_wrapper_c_file] # do not modify in place!
+ extension.name = pgo_module_name
+
+ self._build_extension(extension, lib_dir, pgo_step_name='gen')
+
+ # import and execute module code to generate profile
+ so_module_path = os.path.join(lib_dir, pgo_module_name + self.so_ext)
+ load_dynamic(pgo_module_name, so_module_path)
+
+ def _cythonize(self, module_name, code, lib_dir, args, quiet=True):
+ pyx_file = os.path.join(lib_dir, module_name + '.pyx')
+ pyx_file = encode_fs(pyx_file)
+
+ c_include_dirs = args.include
+ c_src_files = list(map(str, args.src))
+ if 'numpy' in code:
+ import numpy
+ c_include_dirs.append(numpy.get_include())
+ with io.open(pyx_file, 'w', encoding='utf-8') as f:
+ f.write(code)
+ extension = Extension(
+ name=module_name,
+ sources=[pyx_file] + c_src_files,
+ include_dirs=c_include_dirs,
+ library_dirs=args.library_dirs,
+ extra_compile_args=args.compile_args,
+ extra_link_args=args.link_args,
+ libraries=args.lib,
+ language='c++' if args.cplus else 'c',
+ )
+ try:
+ opts = dict(
+ quiet=quiet,
+ annotate=args.annotate,
+ force=True,
+ language_level=min(3, sys.version_info[0]),
+ )
+ if args.language_level is not None:
+ assert args.language_level in (2, 3)
+ opts['language_level'] = args.language_level
+ return cythonize([extension], **opts)
+ except CompileError:
+ return None
+
+ def _build_extension(self, extension, lib_dir, temp_dir=None, pgo_step_name=None, quiet=True):
+ build_extension = self._get_build_extension(
+ extension, lib_dir=lib_dir, temp_dir=temp_dir, pgo_step_name=pgo_step_name)
+ old_threshold = None
+ try:
+ if not quiet:
+ old_threshold = distutils.log.set_threshold(distutils.log.DEBUG)
+ build_extension.run()
+ finally:
+ if not quiet and old_threshold is not None:
+ distutils.log.set_threshold(old_threshold)
+
+ def _add_pgo_flags(self, build_extension, step_name, temp_dir):
+ compiler_type = build_extension.compiler.compiler_type
+ if compiler_type == 'unix':
+ compiler_cmd = build_extension.compiler.compiler_so
+ # TODO: we could try to call "[cmd] --version" for better insights
+ if not compiler_cmd:
+ pass
+ elif 'clang' in compiler_cmd or 'clang' in compiler_cmd[0]:
+ compiler_type = 'clang'
+ elif 'icc' in compiler_cmd or 'icc' in compiler_cmd[0]:
+ compiler_type = 'icc'
+ elif 'gcc' in compiler_cmd or 'gcc' in compiler_cmd[0]:
+ compiler_type = 'gcc'
+ elif 'g++' in compiler_cmd or 'g++' in compiler_cmd[0]:
+ compiler_type = 'gcc'
+ config = PGO_CONFIG.get(compiler_type)
+ orig_flags = []
+ if config and step_name in config:
+ flags = [f.format(TEMPDIR=temp_dir) for f in config[step_name]]
+ for extension in build_extension.extensions:
+ orig_flags.append((extension.extra_compile_args, extension.extra_link_args))
+ extension.extra_compile_args = extension.extra_compile_args + flags
+ extension.extra_link_args = extension.extra_link_args + flags
+ else:
+ print("No PGO %s configuration known for C compiler type '%s'" % (step_name, compiler_type),
+ file=sys.stderr)
+ return orig_flags
+
+ @property
+ def so_ext(self):
+ """The extension suffix for compiled modules."""
+ try:
+ return self._so_ext
+ except AttributeError:
+ self._so_ext = self._get_build_extension().get_ext_filename('')
+ return self._so_ext
+
+ def _clear_distutils_mkpath_cache(self):
+ """clear distutils mkpath cache
+
+ prevents distutils from skipping re-creation of dirs that have been removed
+ """
+ try:
+ from distutils.dir_util import _path_created
+ except ImportError:
+ pass
+ else:
+ _path_created.clear()
+
+ def _get_build_extension(self, extension=None, lib_dir=None, temp_dir=None,
+ pgo_step_name=None, _build_ext=build_ext):
+ self._clear_distutils_mkpath_cache()
+ dist = Distribution()
+ config_files = dist.find_config_files()
+ try:
+ config_files.remove('setup.cfg')
+ except ValueError:
+ pass
+ dist.parse_config_files(config_files)
+
+ if not temp_dir:
+ temp_dir = lib_dir
+ add_pgo_flags = self._add_pgo_flags
+
+ if pgo_step_name:
+ base_build_ext = _build_ext
+ class _build_ext(_build_ext):
+ def build_extensions(self):
+ add_pgo_flags(self, pgo_step_name, temp_dir)
+ base_build_ext.build_extensions(self)
+
+ build_extension = _build_ext(dist)
+ build_extension.finalize_options()
+ if temp_dir:
+ temp_dir = encode_fs(temp_dir)
+ build_extension.build_temp = temp_dir
+ if lib_dir:
+ lib_dir = encode_fs(lib_dir)
+ build_extension.build_lib = lib_dir
+ if extension is not None:
+ build_extension.extensions = [extension]
+ return build_extension
+
+ @staticmethod
+ def clean_annotated_html(html):
+ """Clean up the annotated HTML source.
+
+ Strips the link to the generated C or C++ file, which we do not
+ present to the user.
+ """
+ r = re.compile('Raw output: (.*)')
+ html = '\n'.join(l for l in html.splitlines() if not r.match(l))
+ return html
+
+__doc__ = __doc__.format(
+ # rST doesn't see the -+ flag as part of an option list, so we
+ # hide it from the module-level docstring.
+ CYTHON_DOC=dedent(CythonMagics.cython.__doc__\
+ .replace('-+, --cplus', '--cplus ')),
+ CYTHON_INLINE_DOC=dedent(CythonMagics.cython_inline.__doc__),
+ CYTHON_PYXIMPORT_DOC=dedent(CythonMagics.cython_pyximport.__doc__),
+)
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestCyCache.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestCyCache.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5108ab9ea531903a8d2fdc00b9189eeaaca8f0f
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestCyCache.py
@@ -0,0 +1,121 @@
+import difflib
+import glob
+import gzip
+import os
+import sys
+import tempfile
+import unittest
+
+import Cython.Build.Dependencies
+import Cython.Utils
+from Cython.TestUtils import CythonTest
+
+
+class TestCyCache(CythonTest):
+
+ def setUp(self):
+ CythonTest.setUp(self)
+ self.temp_dir = tempfile.mkdtemp(
+ prefix='cycache-test',
+ dir='TEST_TMP' if os.path.isdir('TEST_TMP') else None)
+ self.src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir)
+ self.cache_dir = tempfile.mkdtemp(prefix='cache', dir=self.temp_dir)
+
+ def cache_files(self, file_glob):
+ return glob.glob(os.path.join(self.cache_dir, file_glob))
+
+ def fresh_cythonize(self, *args, **kwargs):
+ Cython.Utils.clear_function_caches()
+ Cython.Build.Dependencies._dep_tree = None # discard method caches
+ Cython.Build.Dependencies.cythonize(*args, **kwargs)
+
+ def test_cycache_switch(self):
+ content1 = 'value = 1\n'
+ content2 = 'value = 2\n'
+ a_pyx = os.path.join(self.src_dir, 'a.pyx')
+ a_c = a_pyx[:-4] + '.c'
+
+ with open(a_pyx, 'w') as f:
+ f.write(content1)
+ self.fresh_cythonize(a_pyx, cache=self.cache_dir)
+ self.fresh_cythonize(a_pyx, cache=self.cache_dir)
+ self.assertEqual(1, len(self.cache_files('a.c*')))
+ with open(a_c) as f:
+ a_contents1 = f.read()
+ os.unlink(a_c)
+
+ with open(a_pyx, 'w') as f:
+ f.write(content2)
+ self.fresh_cythonize(a_pyx, cache=self.cache_dir)
+ with open(a_c) as f:
+ a_contents2 = f.read()
+ os.unlink(a_c)
+
+ self.assertNotEqual(a_contents1, a_contents2, 'C file not changed!')
+ self.assertEqual(2, len(self.cache_files('a.c*')))
+
+ with open(a_pyx, 'w') as f:
+ f.write(content1)
+ self.fresh_cythonize(a_pyx, cache=self.cache_dir)
+ self.assertEqual(2, len(self.cache_files('a.c*')))
+ with open(a_c) as f:
+ a_contents = f.read()
+ self.assertEqual(
+ a_contents, a_contents1,
+ msg='\n'.join(list(difflib.unified_diff(
+ a_contents.split('\n'), a_contents1.split('\n')))[:10]))
+
+ @unittest.skipIf(sys.version_info[:2] == (3, 12) and sys.platform == "win32",
+ "This test is mysteriously broken on Windows on the CI only "
+ "(https://github.com/cython/cython/issues/5825)")
+ def test_cycache_uses_cache(self):
+ a_pyx = os.path.join(self.src_dir, 'a.pyx')
+ a_c = a_pyx[:-4] + '.c'
+ with open(a_pyx, 'w') as f:
+ f.write('pass')
+ self.fresh_cythonize(a_pyx, cache=self.cache_dir)
+ a_cache = os.path.join(self.cache_dir, os.listdir(self.cache_dir)[0])
+ gzip.GzipFile(a_cache, 'wb').write('fake stuff'.encode('ascii'))
+ os.unlink(a_c)
+ self.fresh_cythonize(a_pyx, cache=self.cache_dir)
+ with open(a_c) as f:
+ a_contents = f.read()
+ self.assertEqual(a_contents, 'fake stuff',
+ 'Unexpected contents: %s...' % a_contents[:100])
+
+ def test_multi_file_output(self):
+ a_pyx = os.path.join(self.src_dir, 'a.pyx')
+ a_c = a_pyx[:-4] + '.c'
+ a_h = a_pyx[:-4] + '.h'
+ a_api_h = a_pyx[:-4] + '_api.h'
+ with open(a_pyx, 'w') as f:
+ f.write('cdef public api int foo(int x): return x\n')
+ self.fresh_cythonize(a_pyx, cache=self.cache_dir)
+ expected = [a_c, a_h, a_api_h]
+ for output in expected:
+ self.assertTrue(os.path.exists(output), output)
+ os.unlink(output)
+ self.fresh_cythonize(a_pyx, cache=self.cache_dir)
+ for output in expected:
+ self.assertTrue(os.path.exists(output), output)
+
+ def test_options_invalidation(self):
+ hash_pyx = os.path.join(self.src_dir, 'options.pyx')
+ hash_c = hash_pyx[:-len('.pyx')] + '.c'
+
+ with open(hash_pyx, 'w') as f:
+ f.write('pass')
+ self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False)
+ self.assertEqual(1, len(self.cache_files('options.c*')))
+
+ os.unlink(hash_c)
+ self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=True)
+ self.assertEqual(2, len(self.cache_files('options.c*')))
+
+ os.unlink(hash_c)
+ self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False, show_version=False)
+ self.assertEqual(2, len(self.cache_files('options.c*')))
+
+ os.unlink(hash_c)
+ self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False, show_version=True)
+ self.assertEqual(2, len(self.cache_files('options.c*')))
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestCythonizeArgsParser.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestCythonizeArgsParser.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5a682dd6440800dad3cc3e120a910f381f1e83b
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestCythonizeArgsParser.py
@@ -0,0 +1,482 @@
+from Cython.Build.Cythonize import (
+ create_args_parser, parse_args_raw, parse_args,
+ parallel_compiles
+)
+
+from Cython.Compiler import Options
+from Cython.Compiler.Tests.Utils import backup_Options, restore_Options, check_global_options
+
+from unittest import TestCase
+
+import sys
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO # doesn't accept 'str' in Py2
+
+
+class TestCythonizeArgsParser(TestCase):
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self.parse_args = lambda x, parser=create_args_parser() : parse_args_raw(parser, x)
+
+
+ def are_default(self, options, skip):
+ # empty containers
+ empty_containers = ['directives', 'compile_time_env', 'options', 'excludes']
+ are_none = ['language_level', 'annotate', 'build', 'build_inplace', 'force', 'quiet', 'lenient', 'keep_going', 'no_docstrings']
+ for opt_name in empty_containers:
+ if len(getattr(options, opt_name))!=0 and (opt_name not in skip):
+ self.assertEqual(opt_name,"", msg="For option "+opt_name)
+ return False
+ for opt_name in are_none:
+ if (getattr(options, opt_name) is not None) and (opt_name not in skip):
+ self.assertEqual(opt_name,"", msg="For option "+opt_name)
+ return False
+ if options.parallel!=parallel_compiles and ('parallel' not in skip):
+ return False
+ return True
+
+ # testing directives:
+ def test_directive_short(self):
+ options, args = self.parse_args(['-X', 'cdivision=True'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['directives']))
+ self.assertEqual(options.directives['cdivision'], True)
+
+ def test_directive_long(self):
+ options, args = self.parse_args(['--directive', 'cdivision=True'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['directives']))
+ self.assertEqual(options.directives['cdivision'], True)
+
+ def test_directive_multiple(self):
+ options, args = self.parse_args(['-X', 'cdivision=True', '-X', 'c_string_type=bytes'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['directives']))
+ self.assertEqual(options.directives['cdivision'], True)
+ self.assertEqual(options.directives['c_string_type'], 'bytes')
+
+ def test_directive_multiple_v2(self):
+ options, args = self.parse_args(['-X', 'cdivision=True,c_string_type=bytes'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['directives']))
+ self.assertEqual(options.directives['cdivision'], True)
+ self.assertEqual(options.directives['c_string_type'], 'bytes')
+
+ def test_directive_value_yes(self):
+ options, args = self.parse_args(['-X', 'cdivision=YeS'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['directives']))
+ self.assertEqual(options.directives['cdivision'], True)
+
+ def test_directive_value_no(self):
+ options, args = self.parse_args(['-X', 'cdivision=no'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['directives']))
+ self.assertEqual(options.directives['cdivision'], False)
+
+ def test_directive_value_invalid(self):
+ with self.assertRaises(ValueError) as context:
+ options, args = self.parse_args(['-X', 'cdivision=sadfasd'])
+
+ def test_directive_key_invalid(self):
+ with self.assertRaises(ValueError) as context:
+ options, args = self.parse_args(['-X', 'abracadabra'])
+
+ def test_directive_no_value(self):
+ with self.assertRaises(ValueError) as context:
+ options, args = self.parse_args(['-X', 'cdivision'])
+
+ def test_directives_types(self):
+ directives = {
+ 'auto_pickle': True,
+ 'c_string_type': 'bytearray',
+ 'c_string_type': 'bytes',
+ 'c_string_type': 'str',
+ 'c_string_type': 'bytearray',
+ 'c_string_type': 'unicode',
+ 'c_string_encoding' : 'ascii',
+ 'language_level' : 2,
+ 'language_level' : 3,
+ 'language_level' : '3str',
+ 'set_initial_path' : 'my_initial_path',
+ }
+ for key, value in directives.items():
+ cmd = '{key}={value}'.format(key=key, value=str(value))
+ options, args = self.parse_args(['-X', cmd])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['directives']), msg = "Error for option: "+cmd)
+ self.assertEqual(options.directives[key], value, msg = "Error for option: "+cmd)
+
+ def test_directives_wrong(self):
+ directives = {
+ 'auto_pickle': 42, # for bool type
+ 'auto_pickle': 'NONONO', # for bool type
+ 'c_string_type': 'bites',
+ #'c_string_encoding' : 'a',
+ #'language_level' : 4,
+ }
+ for key, value in directives.items():
+ cmd = '{key}={value}'.format(key=key, value=str(value))
+ with self.assertRaises(ValueError, msg = "Error for option: "+cmd) as context:
+ options, args = self.parse_args(['-X', cmd])
+
+ def test_compile_time_env_short(self):
+ options, args = self.parse_args(['-E', 'MYSIZE=10'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['compile_time_env']))
+ self.assertEqual(options.compile_time_env['MYSIZE'], 10)
+
+ def test_compile_time_env_long(self):
+ options, args = self.parse_args(['--compile-time-env', 'MYSIZE=10'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['compile_time_env']))
+ self.assertEqual(options.compile_time_env['MYSIZE'], 10)
+
+ def test_compile_time_env_multiple(self):
+ options, args = self.parse_args(['-E', 'MYSIZE=10', '-E', 'ARRSIZE=11'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['compile_time_env']))
+ self.assertEqual(options.compile_time_env['MYSIZE'], 10)
+ self.assertEqual(options.compile_time_env['ARRSIZE'], 11)
+
+ def test_compile_time_env_multiple_v2(self):
+ options, args = self.parse_args(['-E', 'MYSIZE=10,ARRSIZE=11'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['compile_time_env']))
+ self.assertEqual(options.compile_time_env['MYSIZE'], 10)
+ self.assertEqual(options.compile_time_env['ARRSIZE'], 11)
+
+ #testing options
+ def test_option_short(self):
+ options, args = self.parse_args(['-s', 'docstrings=True'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['options']))
+ self.assertEqual(options.options['docstrings'], True)
+
+ def test_option_long(self):
+ options, args = self.parse_args(['--option', 'docstrings=True'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['options']))
+ self.assertEqual(options.options['docstrings'], True)
+
+ def test_option_multiple(self):
+ options, args = self.parse_args(['-s', 'docstrings=True', '-s', 'buffer_max_dims=8'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['options']))
+ self.assertEqual(options.options['docstrings'], True)
+ self.assertEqual(options.options['buffer_max_dims'], True) # really?
+
+ def test_option_multiple_v2(self):
+ options, args = self.parse_args(['-s', 'docstrings=True,buffer_max_dims=8'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['options']))
+ self.assertEqual(options.options['docstrings'], True)
+ self.assertEqual(options.options['buffer_max_dims'], True) # really?
+
+ def test_option_value_yes(self):
+ options, args = self.parse_args(['-s', 'docstrings=YeS'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['options']))
+ self.assertEqual(options.options['docstrings'], True)
+
+ def test_option_value_4242(self):
+ options, args = self.parse_args(['-s', 'docstrings=4242'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['options']))
+ self.assertEqual(options.options['docstrings'], True)
+
+ def test_option_value_0(self):
+ options, args = self.parse_args(['-s', 'docstrings=0'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['options']))
+ self.assertEqual(options.options['docstrings'], False)
+
+ def test_option_value_emptystr(self):
+ options, args = self.parse_args(['-s', 'docstrings='])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['options']))
+ self.assertEqual(options.options['docstrings'], True)
+
+ def test_option_value_a_str(self):
+ options, args = self.parse_args(['-s', 'docstrings=BB'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['options']))
+ self.assertEqual(options.options['docstrings'], True)
+
+ def test_option_value_no(self):
+ options, args = self.parse_args(['-s', 'docstrings=nO'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['options']))
+ self.assertEqual(options.options['docstrings'], False)
+
+ def test_option_no_value(self):
+ options, args = self.parse_args(['-s', 'docstrings'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['options']))
+ self.assertEqual(options.options['docstrings'], True)
+
+ def test_option_any_key(self):
+ options, args = self.parse_args(['-s', 'abracadabra'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['options']))
+ self.assertEqual(options.options['abracadabra'], True)
+
+ def test_language_level_2(self):
+ options, args = self.parse_args(['-2'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['language_level']))
+ self.assertEqual(options.language_level, 2)
+
+ def test_language_level_3(self):
+ options, args = self.parse_args(['-3'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['language_level']))
+ self.assertEqual(options.language_level, 3)
+
+ def test_language_level_3str(self):
+ options, args = self.parse_args(['--3str'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['language_level']))
+ self.assertEqual(options.language_level, '3str')
+
+ def test_annotate_short(self):
+ options, args = self.parse_args(['-a'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['annotate']))
+ self.assertEqual(options.annotate, 'default')
+
+ def test_annotate_long(self):
+ options, args = self.parse_args(['--annotate'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['annotate']))
+ self.assertEqual(options.annotate, 'default')
+
+ def test_annotate_fullc(self):
+ options, args = self.parse_args(['--annotate-fullc'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['annotate']))
+ self.assertEqual(options.annotate, 'fullc')
+
+ def test_annotate_and_positional(self):
+ options, args = self.parse_args(['-a', 'foo.pyx'])
+ self.assertEqual(args, ['foo.pyx'])
+ self.assertTrue(self.are_default(options, ['annotate']))
+ self.assertEqual(options.annotate, 'default')
+
+ def test_annotate_and_optional(self):
+ options, args = self.parse_args(['-a', '--3str'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['annotate', 'language_level']))
+ self.assertEqual(options.annotate, 'default')
+ self.assertEqual(options.language_level, '3str')
+
+ def test_exclude_short(self):
+ options, args = self.parse_args(['-x', '*.pyx'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['excludes']))
+ self.assertTrue('*.pyx' in options.excludes)
+
+ def test_exclude_long(self):
+ options, args = self.parse_args(['--exclude', '*.pyx'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['excludes']))
+ self.assertTrue('*.pyx' in options.excludes)
+
+ def test_exclude_multiple(self):
+ options, args = self.parse_args(['--exclude', '*.pyx', '--exclude', '*.py', ])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['excludes']))
+ self.assertEqual(options.excludes, ['*.pyx', '*.py'])
+
+ def test_build_short(self):
+ options, args = self.parse_args(['-b'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['build']))
+ self.assertEqual(options.build, True)
+
+ def test_build_long(self):
+ options, args = self.parse_args(['--build'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['build']))
+ self.assertEqual(options.build, True)
+
+ def test_inplace_short(self):
+ options, args = self.parse_args(['-i'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['build_inplace']))
+ self.assertEqual(options.build_inplace, True)
+
+ def test_inplace_long(self):
+ options, args = self.parse_args(['--inplace'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['build_inplace']))
+ self.assertEqual(options.build_inplace, True)
+
+ def test_parallel_short(self):
+ options, args = self.parse_args(['-j', '42'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['parallel']))
+ self.assertEqual(options.parallel, 42)
+
+ def test_parallel_long(self):
+ options, args = self.parse_args(['--parallel', '42'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['parallel']))
+ self.assertEqual(options.parallel, 42)
+
+ def test_force_short(self):
+ options, args = self.parse_args(['-f'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['force']))
+ self.assertEqual(options.force, True)
+
+ def test_force_long(self):
+ options, args = self.parse_args(['--force'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['force']))
+ self.assertEqual(options.force, True)
+
+ def test_quite_short(self):
+ options, args = self.parse_args(['-q'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['quiet']))
+ self.assertEqual(options.quiet, True)
+
+ def test_quite_long(self):
+ options, args = self.parse_args(['--quiet'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['quiet']))
+ self.assertEqual(options.quiet, True)
+
+ def test_lenient_long(self):
+ options, args = self.parse_args(['--lenient'])
+ self.assertTrue(self.are_default(options, ['lenient']))
+ self.assertFalse(args)
+ self.assertEqual(options.lenient, True)
+
+ def test_keep_going_short(self):
+ options, args = self.parse_args(['-k'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['keep_going']))
+ self.assertEqual(options.keep_going, True)
+
+ def test_keep_going_long(self):
+ options, args = self.parse_args(['--keep-going'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['keep_going']))
+ self.assertEqual(options.keep_going, True)
+
+ def test_no_docstrings_long(self):
+ options, args = self.parse_args(['--no-docstrings'])
+ self.assertFalse(args)
+ self.assertTrue(self.are_default(options, ['no_docstrings']))
+ self.assertEqual(options.no_docstrings, True)
+
+ def test_file_name(self):
+ options, args = self.parse_args(['file1.pyx', 'file2.pyx'])
+ self.assertEqual(len(args), 2)
+ self.assertEqual(args[0], 'file1.pyx')
+ self.assertEqual(args[1], 'file2.pyx')
+ self.assertTrue(self.are_default(options, []))
+
+ def test_option_first(self):
+ options, args = self.parse_args(['-i', 'file.pyx'])
+ self.assertEqual(args, ['file.pyx'])
+ self.assertEqual(options.build_inplace, True)
+ self.assertTrue(self.are_default(options, ['build_inplace']))
+
+ def test_file_inbetween(self):
+ options, args = self.parse_args(['-i', 'file.pyx', '-a'])
+ self.assertEqual(args, ['file.pyx'])
+ self.assertEqual(options.build_inplace, True)
+ self.assertEqual(options.annotate, 'default')
+ self.assertTrue(self.are_default(options, ['build_inplace', 'annotate']))
+
+ def test_option_trailing(self):
+ options, args = self.parse_args(['file.pyx', '-i'])
+ self.assertEqual(args, ['file.pyx'])
+ self.assertEqual(options.build_inplace, True)
+ self.assertTrue(self.are_default(options, ['build_inplace']))
+
+ def test_interspersed_positional(self):
+ options, sources = self.parse_args([
+ 'file1.pyx', '-a',
+ 'file2.pyx'
+ ])
+ self.assertEqual(sources, ['file1.pyx', 'file2.pyx'])
+ self.assertEqual(options.annotate, 'default')
+ self.assertTrue(self.are_default(options, ['annotate']))
+
+ def test_interspersed_positional2(self):
+ options, sources = self.parse_args([
+ 'file1.pyx', '-a',
+ 'file2.pyx', '-a', 'file3.pyx'
+ ])
+ self.assertEqual(sources, ['file1.pyx', 'file2.pyx', 'file3.pyx'])
+ self.assertEqual(options.annotate, 'default')
+ self.assertTrue(self.are_default(options, ['annotate']))
+
+ def test_interspersed_positional3(self):
+ options, sources = self.parse_args([
+ '-f', 'f1', 'f2', '-a',
+ 'f3', 'f4', '-a', 'f5'
+ ])
+ self.assertEqual(sources, ['f1', 'f2', 'f3', 'f4', 'f5'])
+ self.assertEqual(options.annotate, 'default')
+ self.assertEqual(options.force, True)
+ self.assertTrue(self.are_default(options, ['annotate', 'force']))
+
+ def test_wrong_option(self):
+ old_stderr = sys.stderr
+ stderr = sys.stderr = StringIO()
+ try:
+ self.assertRaises(SystemExit, self.parse_args,
+ ['--unknown-option']
+ )
+ finally:
+ sys.stderr = old_stderr
+ self.assertTrue(stderr.getvalue())
+
+
+class TestParseArgs(TestCase):
+ def setUp(self):
+ self._options_backup = backup_Options()
+
+ def tearDown(self):
+ restore_Options(self._options_backup)
+
+ def check_default_global_options(self, white_list=[]):
+ self.assertEqual(check_global_options(self._options_backup, white_list), "")
+
+ def test_build_set_for_inplace(self):
+ options, args = parse_args(['foo.pyx', '-i'])
+ self.assertEqual(options.build, True)
+ self.check_default_global_options()
+
+ def test_lenient(self):
+ options, sources = parse_args(['foo.pyx', '--lenient'])
+ self.assertEqual(sources, ['foo.pyx'])
+ self.assertEqual(Options.error_on_unknown_names, False)
+ self.assertEqual(Options.error_on_uninitialized, False)
+ self.check_default_global_options(['error_on_unknown_names', 'error_on_uninitialized'])
+
+ def test_annotate(self):
+ options, sources = parse_args(['foo.pyx', '--annotate'])
+ self.assertEqual(sources, ['foo.pyx'])
+ self.assertEqual(Options.annotate, 'default')
+ self.check_default_global_options(['annotate'])
+
+ def test_annotate_fullc(self):
+ options, sources = parse_args(['foo.pyx', '--annotate-fullc'])
+ self.assertEqual(sources, ['foo.pyx'])
+ self.assertEqual(Options.annotate, 'fullc')
+ self.check_default_global_options(['annotate'])
+
+ def test_no_docstrings(self):
+ options, sources = parse_args(['foo.pyx', '--no-docstrings'])
+ self.assertEqual(sources, ['foo.pyx'])
+ self.assertEqual(Options.docstrings, False)
+ self.check_default_global_options(['docstrings'])
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestDependencies.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestDependencies.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3888117d846d67f91fec9cf14d8f852a5a65d82
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestDependencies.py
@@ -0,0 +1,142 @@
+import contextlib
+import os.path
+import sys
+import tempfile
+import unittest
+from io import open
+from os.path import join as pjoin
+
+from ..Dependencies import extended_iglob
+
+
+@contextlib.contextmanager
+def writable_file(dir_path, filename):
+ with open(pjoin(dir_path, filename), "w", encoding="utf8") as f:
+ yield f
+
+
+class TestGlobbing(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls._orig_dir = os.getcwd()
+ if sys.version_info[0] < 3:
+ temp_path = cls._tmpdir = tempfile.mkdtemp()
+ else:
+ cls._tmpdir = tempfile.TemporaryDirectory()
+ temp_path = cls._tmpdir.name
+ os.chdir(temp_path)
+
+ for dir1 in "abcd":
+ for dir1x in [dir1, dir1 + 'x']:
+ for dir2 in "xyz":
+ dir_path = pjoin(dir1x, dir2)
+ os.makedirs(dir_path)
+ with writable_file(dir_path, "file2_pyx.pyx") as f:
+ f.write(u'""" PYX """')
+ with writable_file(dir_path, "file2_py.py") as f:
+ f.write(u'""" PY """')
+
+ with writable_file(dir1x, "file1_pyx.pyx") as f:
+ f.write(u'""" PYX """')
+ with writable_file(dir1x, "file1_py.py") as f:
+ f.write(u'""" PY """')
+
+ @classmethod
+ def tearDownClass(cls):
+ os.chdir(cls._orig_dir)
+ if sys.version_info[0] < 3:
+ import shutil
+ shutil.rmtree(cls._tmpdir)
+ else:
+ cls._tmpdir.cleanup()
+
+ def files_equal(self, pattern, expected_files):
+ expected_files = sorted(expected_files)
+ # It's the users's choice whether '/' will appear on Windows.
+ matched_files = sorted(path.replace('/', os.sep) for path in extended_iglob(pattern))
+ self.assertListEqual(matched_files, expected_files) # /
+
+ # Special case for Windows: also support '\' in patterns.
+ if os.sep == '\\' and '/' in pattern:
+ matched_files = sorted(extended_iglob(pattern.replace('/', '\\')))
+ self.assertListEqual(matched_files, expected_files) # \
+
+ def test_extended_iglob_simple(self):
+ ax_files = [pjoin("a", "x", "file2_pyx.pyx"), pjoin("a", "x", "file2_py.py")]
+ self.files_equal("a/x/*", ax_files)
+ self.files_equal("a/x/*.c12", [])
+ self.files_equal("a/x/*.{py,pyx,c12}", ax_files)
+ self.files_equal("a/x/*.{py,pyx}", ax_files)
+ self.files_equal("a/x/*.{pyx}", ax_files[:1])
+ self.files_equal("a/x/*.pyx", ax_files[:1])
+ self.files_equal("a/x/*.{py}", ax_files[1:])
+ self.files_equal("a/x/*.py", ax_files[1:])
+
+ def test_extended_iglob_simple_star(self):
+ for basedir in "ad":
+ files = [
+ pjoin(basedir, dirname, filename)
+ for dirname in "xyz"
+ for filename in ["file2_pyx.pyx", "file2_py.py"]
+ ]
+ self.files_equal(basedir + "/*/*", files)
+ self.files_equal(basedir + "/*/*.c12", [])
+ self.files_equal(basedir + "/*/*.{py,pyx,c12}", files)
+ self.files_equal(basedir + "/*/*.{py,pyx}", files)
+ self.files_equal(basedir + "/*/*.{pyx}", files[::2])
+ self.files_equal(basedir + "/*/*.pyx", files[::2])
+ self.files_equal(basedir + "/*/*.{py}", files[1::2])
+ self.files_equal(basedir + "/*/*.py", files[1::2])
+
+ for subdir in "xy*":
+ files = [
+ pjoin(basedir, dirname, filename)
+ for dirname in "xyz"
+ if subdir in ('*', dirname)
+ for filename in ["file2_pyx.pyx", "file2_py.py"]
+ ]
+ path = basedir + '/' + subdir + '/'
+ self.files_equal(path + "*", files)
+ self.files_equal(path + "*.{py,pyx}", files)
+ self.files_equal(path + "*.{pyx}", files[::2])
+ self.files_equal(path + "*.pyx", files[::2])
+ self.files_equal(path + "*.{py}", files[1::2])
+ self.files_equal(path + "*.py", files[1::2])
+
+ def test_extended_iglob_double_star(self):
+ basedirs = os.listdir(".")
+ files = [
+ pjoin(basedir, dirname, filename)
+ for basedir in basedirs
+ for dirname in "xyz"
+ for filename in ["file2_pyx.pyx", "file2_py.py"]
+ ]
+ all_files = [
+ pjoin(basedir, filename)
+ for basedir in basedirs
+ for filename in ["file1_pyx.pyx", "file1_py.py"]
+ ] + files
+ self.files_equal("*/*/*", files)
+ self.files_equal("*/*/**/*", files)
+ self.files_equal("*/**/*.*", all_files)
+ self.files_equal("**/*.*", all_files)
+ self.files_equal("*/**/*.c12", [])
+ self.files_equal("**/*.c12", [])
+ self.files_equal("*/*/*.{py,pyx,c12}", files)
+ self.files_equal("*/*/**/*.{py,pyx,c12}", files)
+ self.files_equal("*/**/*/*.{py,pyx,c12}", files)
+ self.files_equal("**/*/*/*.{py,pyx,c12}", files)
+ self.files_equal("**/*.{py,pyx,c12}", all_files)
+ self.files_equal("*/*/*.{py,pyx}", files)
+ self.files_equal("**/*/*/*.{py,pyx}", files)
+ self.files_equal("*/**/*/*.{py,pyx}", files)
+ self.files_equal("**/*.{py,pyx}", all_files)
+ self.files_equal("*/*/*.{pyx}", files[::2])
+ self.files_equal("**/*.{pyx}", all_files[::2])
+ self.files_equal("*/**/*/*.pyx", files[::2])
+ self.files_equal("*/*/*.pyx", files[::2])
+ self.files_equal("**/*.pyx", all_files[::2])
+ self.files_equal("*/*/*.{py}", files[1::2])
+ self.files_equal("**/*.{py}", all_files[1::2])
+ self.files_equal("*/*/*.py", files[1::2])
+ self.files_equal("**/*.py", all_files[1::2])
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestInline.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestInline.py
new file mode 100644
index 0000000000000000000000000000000000000000..53346137052b7ea6d3f02a5407f2df266868c109
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestInline.py
@@ -0,0 +1,112 @@
+import os
+import tempfile
+import unittest
+from Cython.Shadow import inline
+from Cython.Build.Inline import safe_type
+from Cython.TestUtils import CythonTest
+
+try:
+ import numpy
+ has_numpy = True
+except:
+ has_numpy = False
+
+test_kwds = dict(force=True, quiet=True)
+
+global_value = 100
+
+class TestInline(CythonTest):
+ def setUp(self):
+ CythonTest.setUp(self)
+ self._call_kwds = dict(test_kwds)
+ if os.path.isdir('TEST_TMP'):
+ lib_dir = os.path.join('TEST_TMP','inline')
+ else:
+ lib_dir = tempfile.mkdtemp(prefix='cython_inline_')
+ self._call_kwds['lib_dir'] = lib_dir
+
+ def test_simple(self):
+ self.assertEqual(inline("return 1+2", **self._call_kwds), 3)
+
+ def test_types(self):
+ self.assertEqual(inline("""
+ cimport cython
+ return cython.typeof(a), cython.typeof(b)
+ """, a=1.0, b=[], **self._call_kwds), ('double', 'list object'))
+
+ def test_locals(self):
+ a = 1
+ b = 2
+ self.assertEqual(inline("return a+b", **self._call_kwds), 3)
+
+ def test_globals(self):
+ self.assertEqual(inline("return global_value + 1", **self._call_kwds), global_value + 1)
+
+ def test_no_return(self):
+ self.assertEqual(inline("""
+ a = 1
+ cdef double b = 2
+ cdef c = []
+ """, **self._call_kwds), dict(a=1, b=2.0, c=[]))
+
+ def test_def_node(self):
+ foo = inline("def foo(x): return x * x", **self._call_kwds)['foo']
+ self.assertEqual(foo(7), 49)
+
+ def test_class_ref(self):
+ class Type(object):
+ pass
+ tp = inline("Type")['Type']
+ self.assertEqual(tp, Type)
+
+ def test_pure(self):
+ import cython as cy
+ b = inline("""
+ b = cy.declare(float, a)
+ c = cy.declare(cy.pointer(cy.float), &b)
+ return b
+ """, a=3, **self._call_kwds)
+ self.assertEqual(type(b), float)
+
+ def test_compiler_directives(self):
+ self.assertEqual(
+ inline('return sum(x)',
+ x=[1, 2, 3],
+ cython_compiler_directives={'boundscheck': False}),
+ 6
+ )
+
+ def test_lang_version(self):
+ # GH-3419. Caching for inline code didn't always respect compiler directives.
+ inline_divcode = "def f(int a, int b): return a/b"
+ self.assertEqual(
+ inline(inline_divcode, language_level=2)['f'](5,2),
+ 2
+ )
+ self.assertEqual(
+ inline(inline_divcode, language_level=3)['f'](5,2),
+ 2.5
+ )
+ self.assertEqual(
+ inline(inline_divcode, language_level=2)['f'](5,2),
+ 2
+ )
+
+ def test_repeated_use(self):
+ inline_mulcode = "def f(int a, int b): return a * b"
+ self.assertEqual(inline(inline_mulcode)['f'](5, 2), 10)
+ self.assertEqual(inline(inline_mulcode)['f'](5, 3), 15)
+ self.assertEqual(inline(inline_mulcode)['f'](6, 2), 12)
+ self.assertEqual(inline(inline_mulcode)['f'](5, 2), 10)
+
+ f = inline(inline_mulcode)['f']
+ self.assertEqual(f(5, 2), 10)
+ self.assertEqual(f(5, 3), 15)
+
+ @unittest.skipIf(not has_numpy, "NumPy is not available")
+ def test_numpy(self):
+ import numpy
+ a = numpy.ndarray((10, 20))
+ a[0,0] = 10
+ self.assertEqual(safe_type(a), 'numpy.ndarray[numpy.float64_t, ndim=2]')
+ self.assertEqual(inline("return a[0,0]", a=a, **self._call_kwds), 10.0)
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestIpythonMagic.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestIpythonMagic.py
new file mode 100644
index 0000000000000000000000000000000000000000..65d801c6b72e883fc766f034836234973d987ffe
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestIpythonMagic.py
@@ -0,0 +1,295 @@
+# -*- coding: utf-8 -*-
+# tag: ipython
+
+"""Tests for the Cython magics extension."""
+
+from __future__ import absolute_import
+
+import os
+import io
+import sys
+from contextlib import contextmanager
+from unittest import skipIf
+
+from Cython.Build import IpythonMagic
+from Cython.TestUtils import CythonTest
+from Cython.Compiler.Annotate import AnnotationCCodeWriter
+
+try:
+ import IPython.testing.globalipapp
+except ImportError:
+ # Disable tests and fake helpers for initialisation below.
+ def skip_if_not_installed(_):
+ return None
+else:
+ def skip_if_not_installed(c):
+ return c
+
+# not using IPython's decorators here because they depend on "nose"
+skip_win32 = skipIf(sys.platform == 'win32', "Skip on Windows")
+skip_py27 = skipIf(sys.version_info[:2] == (2,7), "Disabled in Py2.7")
+
+try:
+ # disable IPython history thread before it gets started to avoid having to clean it up
+ from IPython.core.history import HistoryManager
+ HistoryManager.enabled = False
+except ImportError:
+ pass
+
+
+@contextmanager
+def capture_output():
+ backup = sys.stdout, sys.stderr
+ try:
+ replacement = [
+ io.TextIOWrapper(io.BytesIO(), encoding=sys.stdout.encoding),
+ io.TextIOWrapper(io.BytesIO(), encoding=sys.stderr.encoding),
+ ]
+ sys.stdout, sys.stderr = replacement
+ output = []
+ yield output
+ finally:
+ sys.stdout, sys.stderr = backup
+ for wrapper in replacement:
+ wrapper.seek(0) # rewind
+ output.append(wrapper.read())
+ wrapper.close()
+
+
+code = u"""\
+def f(x):
+ return 2*x
+"""
+
+cython3_code = u"""\
+def f(int x):
+ return 2 / x
+
+def call(x):
+ return f(*(x,))
+"""
+
+pgo_cython3_code = cython3_code + u"""\
+def main():
+ for _ in range(100): call(5)
+main()
+"""
+
+compile_error_code = u'''\
+cdef extern from *:
+ """
+ xxx a=1;
+ """
+ int a;
+def doit():
+ return a
+'''
+
+compile_warning_code = u'''\
+cdef extern from *:
+ """
+ #pragma message ( "CWarning" )
+ int a = 42;
+ """
+ int a;
+def doit():
+ return a
+'''
+
+
+@skip_if_not_installed
+class TestIPythonMagic(CythonTest):
+
+ @classmethod
+ def setUpClass(cls):
+ CythonTest.setUpClass()
+ cls._ip = IPython.testing.globalipapp.get_ipython()
+
+ def setUp(self):
+ CythonTest.setUp(self)
+ self._ip.extension_manager.load_extension('cython')
+
+ def test_cython_inline(self):
+ ip = self._ip
+ ip.ex('a=10; b=20')
+ result = ip.run_cell_magic('cython_inline', '', 'return a+b')
+ self.assertEqual(result, 30)
+
+ @skip_win32
+ def test_cython_pyximport(self):
+ ip = self._ip
+ module_name = '_test_cython_pyximport'
+ ip.run_cell_magic('cython_pyximport', module_name, code)
+ ip.ex('g = f(10)')
+ self.assertEqual(ip.user_ns['g'], 20.0)
+ ip.run_cell_magic('cython_pyximport', module_name, code)
+ ip.ex('h = f(-10)')
+ self.assertEqual(ip.user_ns['h'], -20.0)
+ try:
+ os.remove(module_name + '.pyx')
+ except OSError:
+ pass
+
+ def test_cython(self):
+ ip = self._ip
+ ip.run_cell_magic('cython', '', code)
+ ip.ex('g = f(10)')
+ self.assertEqual(ip.user_ns['g'], 20.0)
+
+ def test_cython_name(self):
+ # The Cython module named 'mymodule' defines the function f.
+ ip = self._ip
+ ip.run_cell_magic('cython', '--name=mymodule', code)
+ # This module can now be imported in the interactive namespace.
+ ip.ex('import mymodule; g = mymodule.f(10)')
+ self.assertEqual(ip.user_ns['g'], 20.0)
+
+ def test_cython_language_level(self):
+ # The Cython cell defines the functions f() and call().
+ ip = self._ip
+ ip.run_cell_magic('cython', '', cython3_code)
+ ip.ex('g = f(10); h = call(10)')
+ if sys.version_info[0] < 3:
+ self.assertEqual(ip.user_ns['g'], 2 // 10)
+ self.assertEqual(ip.user_ns['h'], 2 // 10)
+ else:
+ self.assertEqual(ip.user_ns['g'], 2.0 / 10.0)
+ self.assertEqual(ip.user_ns['h'], 2.0 / 10.0)
+
+ def test_cython3(self):
+ # The Cython cell defines the functions f() and call().
+ ip = self._ip
+ ip.run_cell_magic('cython', '-3', cython3_code)
+ ip.ex('g = f(10); h = call(10)')
+ self.assertEqual(ip.user_ns['g'], 2.0 / 10.0)
+ self.assertEqual(ip.user_ns['h'], 2.0 / 10.0)
+
+ def test_cython2(self):
+ # The Cython cell defines the functions f() and call().
+ ip = self._ip
+ ip.run_cell_magic('cython', '-2', cython3_code)
+ ip.ex('g = f(10); h = call(10)')
+ self.assertEqual(ip.user_ns['g'], 2 // 10)
+ self.assertEqual(ip.user_ns['h'], 2 // 10)
+
+ def test_cython_compile_error_shown(self):
+ ip = self._ip
+ with capture_output() as out:
+ ip.run_cell_magic('cython', '-3', compile_error_code)
+ captured_out, captured_err = out
+
+ # it could be that c-level output is captured by distutil-extension
+ # (and not by us) and is printed to stdout:
+ captured_all = captured_out + "\n" + captured_err
+ self.assertTrue("error" in captured_all, msg="error in " + captured_all)
+
+ def test_cython_link_error_shown(self):
+ ip = self._ip
+ with capture_output() as out:
+ ip.run_cell_magic('cython', '-3 -l=xxxxxxxx', code)
+ captured_out, captured_err = out
+
+ # it could be that c-level output is captured by distutil-extension
+ # (and not by us) and is printed to stdout:
+ captured_all = captured_out + "\n!" + captured_err
+ self.assertTrue("error" in captured_all, msg="error in " + captured_all)
+
+ def test_cython_warning_shown(self):
+ ip = self._ip
+ with capture_output() as out:
+ # force rebuild, otherwise no warning as after the first success
+ # no build step is performed
+ ip.run_cell_magic('cython', '-3 -f', compile_warning_code)
+ captured_out, captured_err = out
+
+ # check that warning was printed to stdout even if build hasn't failed
+ self.assertTrue("CWarning" in captured_out)
+
+ @skip_py27 # Not strictly broken in Py2.7 but currently fails in CI due to C compiler issues.
+ @skip_win32
+ def test_cython3_pgo(self):
+ # The Cython cell defines the functions f() and call().
+ ip = self._ip
+ ip.run_cell_magic('cython', '-3 --pgo', pgo_cython3_code)
+ ip.ex('g = f(10); h = call(10); main()')
+ self.assertEqual(ip.user_ns['g'], 2.0 / 10.0)
+ self.assertEqual(ip.user_ns['h'], 2.0 / 10.0)
+
+ @skip_win32
+ def test_extlibs(self):
+ ip = self._ip
+ code = u"""
+from libc.math cimport sin
+x = sin(0.0)
+ """
+ ip.user_ns['x'] = 1
+ ip.run_cell_magic('cython', '-l m', code)
+ self.assertEqual(ip.user_ns['x'], 0)
+
+
+ def test_cython_verbose(self):
+ ip = self._ip
+ ip.run_cell_magic('cython', '--verbose', code)
+ ip.ex('g = f(10)')
+ self.assertEqual(ip.user_ns['g'], 20.0)
+
+ def test_cython_verbose_thresholds(self):
+ @contextmanager
+ def mock_distutils():
+ class MockLog:
+ DEBUG = 1
+ INFO = 2
+ thresholds = [INFO]
+
+ def set_threshold(self, val):
+ self.thresholds.append(val)
+ return self.thresholds[-2]
+
+
+ new_log = MockLog()
+ old_log = IpythonMagic.distutils.log
+ try:
+ IpythonMagic.distutils.log = new_log
+ yield new_log
+ finally:
+ IpythonMagic.distutils.log = old_log
+
+ ip = self._ip
+ with mock_distutils() as verbose_log:
+ ip.run_cell_magic('cython', '--verbose', code)
+ ip.ex('g = f(10)')
+ self.assertEqual(ip.user_ns['g'], 20.0)
+ self.assertEqual([verbose_log.INFO, verbose_log.DEBUG, verbose_log.INFO],
+ verbose_log.thresholds)
+
+ with mock_distutils() as normal_log:
+ ip.run_cell_magic('cython', '', code)
+ ip.ex('g = f(10)')
+ self.assertEqual(ip.user_ns['g'], 20.0)
+ self.assertEqual([normal_log.INFO], normal_log.thresholds)
+
+ def test_cython_no_annotate(self):
+ ip = self._ip
+ html = ip.run_cell_magic('cython', '', code)
+ self.assertTrue(html is None)
+
+ def test_cython_annotate(self):
+ ip = self._ip
+ html = ip.run_cell_magic('cython', '--annotate', code)
+ # somewhat brittle way to differentiate between annotated htmls
+ # with/without complete source code:
+ self.assertTrue(AnnotationCCodeWriter.COMPLETE_CODE_TITLE not in html.data)
+
+ def test_cython_annotate_default(self):
+ ip = self._ip
+ html = ip.run_cell_magic('cython', '-a', code)
+ # somewhat brittle way to differentiate between annotated htmls
+ # with/without complete source code:
+ self.assertTrue(AnnotationCCodeWriter.COMPLETE_CODE_TITLE not in html.data)
+
+ def test_cython_annotate_complete_c_code(self):
+ ip = self._ip
+ html = ip.run_cell_magic('cython', '--annotate-fullc', code)
+ # somewhat brittle way to differentiate between annotated htmls
+ # with/without complete source code:
+ self.assertTrue(AnnotationCCodeWriter.COMPLETE_CODE_TITLE in html.data)
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestRecythonize.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestRecythonize.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb87018cb8770832852d50a210afbdec45d2fd36
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestRecythonize.py
@@ -0,0 +1,212 @@
+import shutil
+import os
+import tempfile
+import time
+
+import Cython.Build.Dependencies
+import Cython.Utils
+from Cython.TestUtils import CythonTest
+
+
+def fresh_cythonize(*args, **kwargs):
+ Cython.Utils.clear_function_caches()
+ Cython.Build.Dependencies._dep_tree = None # discard method caches
+ Cython.Build.Dependencies.cythonize(*args, **kwargs)
+
+class TestRecythonize(CythonTest):
+
+ def setUp(self):
+ CythonTest.setUp(self)
+ self.temp_dir = (
+ tempfile.mkdtemp(
+ prefix='recythonize-test',
+ dir='TEST_TMP' if os.path.isdir('TEST_TMP') else None
+ )
+ )
+
+ def tearDown(self):
+ CythonTest.tearDown(self)
+ shutil.rmtree(self.temp_dir)
+
+ def test_recythonize_pyx_on_pxd_change(self):
+
+ src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir)
+
+ a_pxd = os.path.join(src_dir, 'a.pxd')
+ a_pyx = os.path.join(src_dir, 'a.pyx')
+ a_c = os.path.join(src_dir, 'a.c')
+ dep_tree = Cython.Build.Dependencies.create_dependency_tree()
+
+ with open(a_pxd, 'w') as f:
+ f.write('cdef int value\n')
+
+ with open(a_pyx, 'w') as f:
+ f.write('value = 1\n')
+
+
+ # The dependencies for "a.pyx" are "a.pxd" and "a.pyx".
+ self.assertEqual({a_pxd, a_pyx}, dep_tree.all_dependencies(a_pyx))
+
+ # Cythonize to create a.c
+ fresh_cythonize(a_pyx)
+
+ # Sleep to address coarse time-stamp precision.
+ time.sleep(1)
+
+ with open(a_c) as f:
+ a_c_contents1 = f.read()
+
+ with open(a_pxd, 'w') as f:
+ f.write('cdef double value\n')
+
+ fresh_cythonize(a_pyx)
+
+ with open(a_c) as f:
+ a_c_contents2 = f.read()
+
+ self.assertTrue("__pyx_v_1a_value = 1;" in a_c_contents1)
+ self.assertFalse("__pyx_v_1a_value = 1;" in a_c_contents2)
+ self.assertTrue("__pyx_v_1a_value = 1.0;" in a_c_contents2)
+ self.assertFalse("__pyx_v_1a_value = 1.0;" in a_c_contents1)
+
+
+ def test_recythonize_py_on_pxd_change(self):
+
+ src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir)
+
+ a_pxd = os.path.join(src_dir, 'a.pxd')
+ a_py = os.path.join(src_dir, 'a.py')
+ a_c = os.path.join(src_dir, 'a.c')
+ dep_tree = Cython.Build.Dependencies.create_dependency_tree()
+
+ with open(a_pxd, 'w') as f:
+ f.write('cdef int value\n')
+
+ with open(a_py, 'w') as f:
+ f.write('value = 1\n')
+
+
+ # The dependencies for "a.py" are "a.pxd" and "a.py".
+ self.assertEqual({a_pxd, a_py}, dep_tree.all_dependencies(a_py))
+
+ # Cythonize to create a.c
+ fresh_cythonize(a_py)
+
+ # Sleep to address coarse time-stamp precision.
+ time.sleep(1)
+
+ with open(a_c) as f:
+ a_c_contents1 = f.read()
+
+ with open(a_pxd, 'w') as f:
+ f.write('cdef double value\n')
+
+ fresh_cythonize(a_py)
+
+ with open(a_c) as f:
+ a_c_contents2 = f.read()
+
+
+ self.assertTrue("__pyx_v_1a_value = 1;" in a_c_contents1)
+ self.assertFalse("__pyx_v_1a_value = 1;" in a_c_contents2)
+ self.assertTrue("__pyx_v_1a_value = 1.0;" in a_c_contents2)
+ self.assertFalse("__pyx_v_1a_value = 1.0;" in a_c_contents1)
+
+ def test_recythonize_pyx_on_dep_pxd_change(self):
+ src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir)
+
+ a_pxd = os.path.join(src_dir, 'a.pxd')
+ a_pyx = os.path.join(src_dir, 'a.pyx')
+ b_pyx = os.path.join(src_dir, 'b.pyx')
+ b_c = os.path.join(src_dir, 'b.c')
+ dep_tree = Cython.Build.Dependencies.create_dependency_tree()
+
+ with open(a_pxd, 'w') as f:
+ f.write('cdef int value\n')
+
+ with open(a_pyx, 'w') as f:
+ f.write('value = 1\n')
+
+ with open(b_pyx, 'w') as f:
+ f.write('cimport a\n' + 'a.value = 2\n')
+
+
+ # The dependencies for "b.pyx" are "a.pxd" and "b.pyx".
+ self.assertEqual({a_pxd, b_pyx}, dep_tree.all_dependencies(b_pyx))
+
+
+ # Cythonize to create b.c
+ fresh_cythonize([a_pyx, b_pyx])
+
+ # Sleep to address coarse time-stamp precision.
+ time.sleep(1)
+
+ with open(b_c) as f:
+ b_c_contents1 = f.read()
+
+ with open(a_pxd, 'w') as f:
+ f.write('cdef double value\n')
+
+ fresh_cythonize([a_pyx, b_pyx])
+
+ with open(b_c) as f:
+ b_c_contents2 = f.read()
+
+
+
+ self.assertTrue("__pyx_v_1a_value = 2;" in b_c_contents1)
+ self.assertFalse("__pyx_v_1a_value = 2;" in b_c_contents2)
+ self.assertTrue("__pyx_v_1a_value = 2.0;" in b_c_contents2)
+ self.assertFalse("__pyx_v_1a_value = 2.0;" in b_c_contents1)
+
+
+
+ def test_recythonize_py_on_dep_pxd_change(self):
+
+ src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir)
+
+ a_pxd = os.path.join(src_dir, 'a.pxd')
+ a_pyx = os.path.join(src_dir, 'a.pyx')
+ b_pxd = os.path.join(src_dir, 'b.pxd')
+ b_py = os.path.join(src_dir, 'b.py')
+ b_c = os.path.join(src_dir, 'b.c')
+ dep_tree = Cython.Build.Dependencies.create_dependency_tree()
+
+ with open(a_pxd, 'w') as f:
+ f.write('cdef int value\n')
+
+ with open(a_pyx, 'w') as f:
+ f.write('value = 1\n')
+
+ with open(b_pxd, 'w') as f:
+ f.write('cimport a\n')
+
+ with open(b_py, 'w') as f:
+ f.write('a.value = 2\n')
+
+
+ # The dependencies for b.py are "a.pxd", "b.pxd" and "b.py".
+ self.assertEqual({a_pxd, b_pxd, b_py}, dep_tree.all_dependencies(b_py))
+
+
+ # Cythonize to create b.c
+ fresh_cythonize([a_pyx, b_py])
+
+ # Sleep to address coarse time-stamp precision.
+ time.sleep(1)
+
+ with open(b_c) as f:
+ b_c_contents1 = f.read()
+
+ with open(a_pxd, 'w') as f:
+ f.write('cdef double value\n')
+
+ fresh_cythonize([a_pyx, b_py])
+
+ with open(b_c) as f:
+ b_c_contents2 = f.read()
+
+ self.assertTrue("__pyx_v_1a_value = 2;" in b_c_contents1)
+ self.assertFalse("__pyx_v_1a_value = 2;" in b_c_contents2)
+ self.assertTrue("__pyx_v_1a_value = 2.0;" in b_c_contents2)
+ self.assertFalse("__pyx_v_1a_value = 2.0;" in b_c_contents1)
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestStripLiterals.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestStripLiterals.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbe5c65a906b1759f17a026d63213d0c936ab66e
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/TestStripLiterals.py
@@ -0,0 +1,56 @@
+from Cython.Build.Dependencies import strip_string_literals
+
+from Cython.TestUtils import CythonTest
+
+class TestStripLiterals(CythonTest):
+
+ def t(self, before, expected):
+ actual, literals = strip_string_literals(before, prefix="_L")
+ self.assertEqual(expected, actual)
+ for key, value in literals.items():
+ actual = actual.replace(key, value)
+ self.assertEqual(before, actual)
+
+ def test_empty(self):
+ self.t("", "")
+
+ def test_single_quote(self):
+ self.t("'x'", "'_L1_'")
+
+ def test_double_quote(self):
+ self.t('"x"', '"_L1_"')
+
+ def test_nested_quotes(self):
+ self.t(""" '"' "'" """, """ '_L1_' "_L2_" """)
+
+ def test_triple_quote(self):
+ self.t(" '''a\n''' ", " '''_L1_''' ")
+
+ def test_backslash(self):
+ self.t(r"'a\'b'", "'_L1_'")
+ self.t(r"'a\\'", "'_L1_'")
+ self.t(r"'a\\\'b'", "'_L1_'")
+
+ def test_unicode(self):
+ self.t("u'abc'", "u'_L1_'")
+
+ def test_raw(self):
+ self.t(r"r'abc\\'", "r'_L1_'")
+
+ def test_raw_unicode(self):
+ self.t(r"ru'abc\\'", "ru'_L1_'")
+
+ def test_comment(self):
+ self.t("abc # foo", "abc #_L1_")
+
+ def test_comment_and_quote(self):
+ self.t("abc # 'x'", "abc #_L1_")
+ self.t("'abc#'", "'_L1_'")
+
+ def test_include(self):
+ self.t("include 'a.pxi' # something here",
+ "include '_L1_' #_L2_")
+
+ def test_extern(self):
+ self.t("cdef extern from 'a.h': # comment",
+ "cdef extern from '_L1_': #_L2_")
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa81adaff68e06d8e915a6afa375f62f7e5a8fad
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/Tests/__init__.py
@@ -0,0 +1 @@
+# empty file
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e4775fc3a94d0ff945d4ff3cb7a656d4f5fbe8a
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Build/__init__.py
@@ -0,0 +1,14 @@
+from .Dependencies import cythonize
+
+import sys
+if sys.version_info < (3, 7):
+ from .Distutils import build_ext
+del sys
+
+
+def __getattr__(name):
+ if name == 'build_ext':
+ # Lazy import, fails if distutils is not available (in Python 3.12+).
+ from .Distutils import build_ext
+ return build_ext
+ raise AttributeError("module '%s' has no attribute '%s'" % (__name__, name))
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/CodeWriter.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/CodeWriter.py
new file mode 100644
index 0000000000000000000000000000000000000000..f386da21ca7ff86b9fc84eee91b7ca610cb89288
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/CodeWriter.py
@@ -0,0 +1,820 @@
+"""
+Serializes a Cython code tree to Cython code. This is primarily useful for
+debugging and testing purposes.
+The output is in a strict format, no whitespace or comments from the input
+is preserved (and it could not be as it is not present in the code tree).
+"""
+
+from __future__ import absolute_import, print_function
+
+from .Compiler.Visitor import TreeVisitor
+from .Compiler.ExprNodes import *
+from .Compiler.Nodes import CSimpleBaseTypeNode
+
+
+class LinesResult(object):
+ def __init__(self):
+ self.lines = []
+ self.s = u""
+
+ def put(self, s):
+ self.s += s
+
+ def newline(self):
+ self.lines.append(self.s)
+ self.s = u""
+
+ def putline(self, s):
+ self.put(s)
+ self.newline()
+
+
+class DeclarationWriter(TreeVisitor):
+ """
+ A Cython code writer that is limited to declarations nodes.
+ """
+
+ indent_string = u" "
+
+ def __init__(self, result=None):
+ super(DeclarationWriter, self).__init__()
+ if result is None:
+ result = LinesResult()
+ self.result = result
+ self.numindents = 0
+ self.tempnames = {}
+ self.tempblockindex = 0
+
+ def write(self, tree):
+ self.visit(tree)
+ return self.result
+
+ def indent(self):
+ self.numindents += 1
+
+ def dedent(self):
+ self.numindents -= 1
+
+ def startline(self, s=u""):
+ self.result.put(self.indent_string * self.numindents + s)
+
+ def put(self, s):
+ self.result.put(s)
+
+ def putline(self, s):
+ self.result.putline(self.indent_string * self.numindents + s)
+
+ def endline(self, s=u""):
+ self.result.putline(s)
+
+ def line(self, s):
+ self.startline(s)
+ self.endline()
+
+ def comma_separated_list(self, items, output_rhs=False):
+ if len(items) > 0:
+ for item in items[:-1]:
+ self.visit(item)
+ if output_rhs and item.default is not None:
+ self.put(u" = ")
+ self.visit(item.default)
+ self.put(u", ")
+ self.visit(items[-1])
+ if output_rhs and items[-1].default is not None:
+ self.put(u" = ")
+ self.visit(items[-1].default)
+
+ def _visit_indented(self, node):
+ self.indent()
+ self.visit(node)
+ self.dedent()
+
+ def visit_Node(self, node):
+ raise AssertionError("Node not handled by serializer: %r" % node)
+
+ def visit_ModuleNode(self, node):
+ self.visitchildren(node)
+
+ def visit_StatListNode(self, node):
+ self.visitchildren(node)
+
+ def visit_CDefExternNode(self, node):
+ if node.include_file is None:
+ file = u'*'
+ else:
+ file = u'"%s"' % node.include_file
+ self.putline(u"cdef extern from %s:" % file)
+ self._visit_indented(node.body)
+
+ def visit_CPtrDeclaratorNode(self, node):
+ self.put('*')
+ self.visit(node.base)
+
+ def visit_CReferenceDeclaratorNode(self, node):
+ self.put('&')
+ self.visit(node.base)
+
+ def visit_CArrayDeclaratorNode(self, node):
+ self.visit(node.base)
+ self.put(u'[')
+ if node.dimension is not None:
+ self.visit(node.dimension)
+ self.put(u']')
+
+ def visit_CFuncDeclaratorNode(self, node):
+ # TODO: except, gil, etc.
+ self.visit(node.base)
+ self.put(u'(')
+ self.comma_separated_list(node.args)
+ self.endline(u')')
+
+ def visit_CNameDeclaratorNode(self, node):
+ self.put(node.name)
+
+ def visit_CSimpleBaseTypeNode(self, node):
+ # See Parsing.p_sign_and_longness
+ if node.is_basic_c_type:
+ self.put(("unsigned ", "", "signed ")[node.signed])
+ if node.longness < 0:
+ self.put("short " * -node.longness)
+ elif node.longness > 0:
+ self.put("long " * node.longness)
+ if node.name is not None:
+ self.put(node.name)
+
+ def visit_CComplexBaseTypeNode(self, node):
+ self.visit(node.base_type)
+ self.visit(node.declarator)
+
+ def visit_CNestedBaseTypeNode(self, node):
+ self.visit(node.base_type)
+ self.put(u'.')
+ self.put(node.name)
+
+ def visit_TemplatedTypeNode(self, node):
+ self.visit(node.base_type_node)
+ self.put(u'[')
+ self.comma_separated_list(node.positional_args + node.keyword_args.key_value_pairs)
+ self.put(u']')
+
+ def visit_CVarDefNode(self, node):
+ self.startline(u"cdef ")
+ self.visit(node.base_type)
+ self.put(u" ")
+ self.comma_separated_list(node.declarators, output_rhs=True)
+ self.endline()
+
+ def _visit_container_node(self, node, decl, extras, attributes):
+ # TODO: visibility
+ self.startline(decl)
+ if node.name:
+ self.put(u' ')
+ self.put(node.name)
+ if node.cname is not None:
+ self.put(u' "%s"' % node.cname)
+ if extras:
+ self.put(extras)
+ self.endline(':')
+ self.indent()
+ if not attributes:
+ self.putline('pass')
+ else:
+ for attribute in attributes:
+ self.visit(attribute)
+ self.dedent()
+
+ def visit_CStructOrUnionDefNode(self, node):
+ if node.typedef_flag:
+ decl = u'ctypedef '
+ else:
+ decl = u'cdef '
+ if node.visibility == 'public':
+ decl += u'public '
+ if node.packed:
+ decl += u'packed '
+ decl += node.kind
+ self._visit_container_node(node, decl, None, node.attributes)
+
+ def visit_CppClassNode(self, node):
+ extras = ""
+ if node.templates:
+ extras = u"[%s]" % ", ".join(node.templates)
+ if node.base_classes:
+ extras += "(%s)" % ", ".join(node.base_classes)
+ self._visit_container_node(node, u"cdef cppclass", extras, node.attributes)
+
+ def visit_CEnumDefNode(self, node):
+ self._visit_container_node(node, u"cdef enum", None, node.items)
+
+ def visit_CEnumDefItemNode(self, node):
+ self.startline(node.name)
+ if node.cname:
+ self.put(u' "%s"' % node.cname)
+ if node.value:
+ self.put(u" = ")
+ self.visit(node.value)
+ self.endline()
+
+ def visit_CClassDefNode(self, node):
+ assert not node.module_name
+ if node.decorators:
+ for decorator in node.decorators:
+ self.visit(decorator)
+ self.startline(u"cdef class ")
+ self.put(node.class_name)
+ if node.base_class_name:
+ self.put(u"(")
+ if node.base_class_module:
+ self.put(node.base_class_module)
+ self.put(u".")
+ self.put(node.base_class_name)
+ self.put(u")")
+ self.endline(u":")
+ self._visit_indented(node.body)
+
+ def visit_CTypeDefNode(self, node):
+ self.startline(u"ctypedef ")
+ self.visit(node.base_type)
+ self.put(u" ")
+ self.visit(node.declarator)
+ self.endline()
+
+ def visit_FuncDefNode(self, node):
+ # TODO: support cdef + cpdef functions
+ self.startline(u"def %s(" % node.name)
+ self.comma_separated_list(node.args)
+ self.endline(u"):")
+ self._visit_indented(node.body)
+
+ def visit_CFuncDefNode(self, node):
+ self.startline(u'cpdef ' if node.overridable else u'cdef ')
+ if node.modifiers:
+ self.put(' '.join(node.modifiers))
+ self.put(' ')
+ if node.visibility != 'private':
+ self.put(node.visibility)
+ self.put(u' ')
+ if node.api:
+ self.put(u'api ')
+
+ if node.base_type:
+ self.visit(node.base_type)
+ if node.base_type.name is not None:
+ self.put(u' ')
+
+ # visit the CFuncDeclaratorNode, but put a `:` at the end of line
+ self.visit(node.declarator.base)
+ self.put(u'(')
+ self.comma_separated_list(node.declarator.args)
+ self.endline(u'):')
+
+ self._visit_indented(node.body)
+
+ def visit_CArgDeclNode(self, node):
+ # For "CSimpleBaseTypeNode", the variable type may have been parsed as type.
+ # For other node types, the "name" is always None.
+ if not isinstance(node.base_type, CSimpleBaseTypeNode) or \
+ node.base_type.name is not None:
+ self.visit(node.base_type)
+
+ # If we printed something for "node.base_type", we may need to print an extra ' '.
+ #
+ # Special case: if "node.declarator" is a "CNameDeclaratorNode",
+ # its "name" might be an empty string, for example, for "cdef f(x)".
+ if node.declarator.declared_name():
+ self.put(u" ")
+ self.visit(node.declarator)
+ if node.default is not None:
+ self.put(u" = ")
+ self.visit(node.default)
+
+ def visit_CImportStatNode(self, node):
+ self.startline(u"cimport ")
+ self.put(node.module_name)
+ if node.as_name:
+ self.put(u" as ")
+ self.put(node.as_name)
+ self.endline()
+
+ def visit_FromCImportStatNode(self, node):
+ self.startline(u"from ")
+ self.put(node.module_name)
+ self.put(u" cimport ")
+ first = True
+ for pos, name, as_name, kind in node.imported_names:
+ assert kind is None
+ if first:
+ first = False
+ else:
+ self.put(u", ")
+ self.put(name)
+ if as_name:
+ self.put(u" as ")
+ self.put(as_name)
+ self.endline()
+
+ def visit_NameNode(self, node):
+ self.put(node.name)
+
+ def visit_DecoratorNode(self, node):
+ self.startline("@")
+ self.visit(node.decorator)
+ self.endline()
+
+ def visit_PassStatNode(self, node):
+ self.startline(u"pass")
+ self.endline()
+
+
+class StatementWriter(DeclarationWriter):
+ """
+ A Cython code writer for most language statement features.
+ """
+
+ def visit_SingleAssignmentNode(self, node):
+ self.startline()
+ self.visit(node.lhs)
+ self.put(u" = ")
+ self.visit(node.rhs)
+ self.endline()
+
+ def visit_CascadedAssignmentNode(self, node):
+ self.startline()
+ for lhs in node.lhs_list:
+ self.visit(lhs)
+ self.put(u" = ")
+ self.visit(node.rhs)
+ self.endline()
+
+ def visit_PrintStatNode(self, node):
+ self.startline(u"print ")
+ self.comma_separated_list(node.arg_tuple.args)
+ if not node.append_newline:
+ self.put(u",")
+ self.endline()
+
+ def visit_ForInStatNode(self, node):
+ self.startline(u"for ")
+ if node.target.is_sequence_constructor:
+ self.comma_separated_list(node.target.args)
+ else:
+ self.visit(node.target)
+ self.put(u" in ")
+ self.visit(node.iterator.sequence)
+ self.endline(u":")
+ self._visit_indented(node.body)
+ if node.else_clause is not None:
+ self.line(u"else:")
+ self._visit_indented(node.else_clause)
+
+ def visit_IfStatNode(self, node):
+ # The IfClauseNode is handled directly without a separate match
+ # for clariy.
+ self.startline(u"if ")
+ self.visit(node.if_clauses[0].condition)
+ self.endline(":")
+ self._visit_indented(node.if_clauses[0].body)
+ for clause in node.if_clauses[1:]:
+ self.startline("elif ")
+ self.visit(clause.condition)
+ self.endline(":")
+ self._visit_indented(clause.body)
+ if node.else_clause is not None:
+ self.line("else:")
+ self._visit_indented(node.else_clause)
+
+ def visit_WhileStatNode(self, node):
+ self.startline(u"while ")
+ self.visit(node.condition)
+ self.endline(u":")
+ self._visit_indented(node.body)
+ if node.else_clause is not None:
+ self.line("else:")
+ self._visit_indented(node.else_clause)
+
+ def visit_ContinueStatNode(self, node):
+ self.line(u"continue")
+
+ def visit_BreakStatNode(self, node):
+ self.line(u"break")
+
+ def visit_SequenceNode(self, node):
+ self.comma_separated_list(node.args) # Might need to discover whether we need () around tuples...hmm...
+
+ def visit_ExprStatNode(self, node):
+ self.startline()
+ self.visit(node.expr)
+ self.endline()
+
+ def visit_InPlaceAssignmentNode(self, node):
+ self.startline()
+ self.visit(node.lhs)
+ self.put(u" %s= " % node.operator)
+ self.visit(node.rhs)
+ self.endline()
+
+ def visit_WithStatNode(self, node):
+ self.startline()
+ self.put(u"with ")
+ self.visit(node.manager)
+ if node.target is not None:
+ self.put(u" as ")
+ self.visit(node.target)
+ self.endline(u":")
+ self._visit_indented(node.body)
+
+ def visit_TryFinallyStatNode(self, node):
+ self.line(u"try:")
+ self._visit_indented(node.body)
+ self.line(u"finally:")
+ self._visit_indented(node.finally_clause)
+
+ def visit_TryExceptStatNode(self, node):
+ self.line(u"try:")
+ self._visit_indented(node.body)
+ for x in node.except_clauses:
+ self.visit(x)
+ if node.else_clause is not None:
+ self.visit(node.else_clause)
+
+ def visit_ExceptClauseNode(self, node):
+ self.startline(u"except")
+ if node.pattern is not None:
+ self.put(u" ")
+ self.visit(node.pattern)
+ if node.target is not None:
+ self.put(u", ")
+ self.visit(node.target)
+ self.endline(":")
+ self._visit_indented(node.body)
+
+ def visit_ReturnStatNode(self, node):
+ self.startline("return")
+ if node.value is not None:
+ self.put(u" ")
+ self.visit(node.value)
+ self.endline()
+
+ def visit_ReraiseStatNode(self, node):
+ self.line("raise")
+
+ def visit_ImportNode(self, node):
+ self.put(u"(import %s)" % node.module_name.value)
+
+ def visit_TempsBlockNode(self, node):
+ """
+ Temporaries are output like $1_1', where the first number is
+ an index of the TempsBlockNode and the second number is an index
+ of the temporary which that block allocates.
+ """
+ idx = 0
+ for handle in node.temps:
+ self.tempnames[handle] = "$%d_%d" % (self.tempblockindex, idx)
+ idx += 1
+ self.tempblockindex += 1
+ self.visit(node.body)
+
+ def visit_TempRefNode(self, node):
+ self.put(self.tempnames[node.handle])
+
+
+class ExpressionWriter(TreeVisitor):
+ """
+ A Cython code writer that is intentionally limited to expressions.
+ """
+
+ def __init__(self, result=None):
+ super(ExpressionWriter, self).__init__()
+ if result is None:
+ result = u""
+ self.result = result
+ self.precedence = [0]
+
+ def write(self, tree):
+ self.visit(tree)
+ return self.result
+
+ def put(self, s):
+ self.result += s
+
+ def remove(self, s):
+ if self.result.endswith(s):
+ self.result = self.result[:-len(s)]
+
+ def comma_separated_list(self, items):
+ if len(items) > 0:
+ for item in items[:-1]:
+ self.visit(item)
+ self.put(u", ")
+ self.visit(items[-1])
+
+ def visit_Node(self, node):
+ raise AssertionError("Node not handled by serializer: %r" % node)
+
+ def visit_IntNode(self, node):
+ self.put(node.value)
+
+ def visit_FloatNode(self, node):
+ self.put(node.value)
+
+ def visit_NoneNode(self, node):
+ self.put(u"None")
+
+ def visit_NameNode(self, node):
+ self.put(node.name)
+
+ def visit_EllipsisNode(self, node):
+ self.put(u"...")
+
+ def visit_BoolNode(self, node):
+ self.put(str(node.value))
+
+ def visit_ConstNode(self, node):
+ self.put(str(node.value))
+
+ def visit_ImagNode(self, node):
+ self.put(node.value)
+ self.put(u"j")
+
+ def emit_string(self, node, prefix=u""):
+ repr_val = repr(node.value)
+ if repr_val[0] in 'ub':
+ repr_val = repr_val[1:]
+ self.put(u"%s%s" % (prefix, repr_val))
+
+ def visit_BytesNode(self, node):
+ self.emit_string(node, u"b")
+
+ def visit_StringNode(self, node):
+ self.emit_string(node)
+
+ def visit_UnicodeNode(self, node):
+ self.emit_string(node, u"u")
+
+ def emit_sequence(self, node, parens=(u"", u"")):
+ open_paren, close_paren = parens
+ items = node.subexpr_nodes()
+ self.put(open_paren)
+ self.comma_separated_list(items)
+ self.put(close_paren)
+
+ def visit_ListNode(self, node):
+ self.emit_sequence(node, u"[]")
+
+ def visit_TupleNode(self, node):
+ self.emit_sequence(node, u"()")
+
+ def visit_SetNode(self, node):
+ if len(node.subexpr_nodes()) > 0:
+ self.emit_sequence(node, u"{}")
+ else:
+ self.put(u"set()")
+
+ def visit_DictNode(self, node):
+ self.emit_sequence(node, u"{}")
+
+ def visit_DictItemNode(self, node):
+ self.visit(node.key)
+ self.put(u": ")
+ self.visit(node.value)
+
+ unop_precedence = {
+ 'not': 3, '!': 3,
+ '+': 11, '-': 11, '~': 11,
+ }
+ binop_precedence = {
+ 'or': 1,
+ 'and': 2,
+ # unary: 'not': 3, '!': 3,
+ 'in': 4, 'not_in': 4, 'is': 4, 'is_not': 4, '<': 4, '<=': 4, '>': 4, '>=': 4, '!=': 4, '==': 4,
+ '|': 5,
+ '^': 6,
+ '&': 7,
+ '<<': 8, '>>': 8,
+ '+': 9, '-': 9,
+ '*': 10, '@': 10, '/': 10, '//': 10, '%': 10,
+ # unary: '+': 11, '-': 11, '~': 11
+ '**': 12,
+ }
+
+ def operator_enter(self, new_prec):
+ old_prec = self.precedence[-1]
+ if old_prec > new_prec:
+ self.put(u"(")
+ self.precedence.append(new_prec)
+
+ def operator_exit(self):
+ old_prec, new_prec = self.precedence[-2:]
+ if old_prec > new_prec:
+ self.put(u")")
+ self.precedence.pop()
+
+ def visit_NotNode(self, node):
+ op = 'not'
+ prec = self.unop_precedence[op]
+ self.operator_enter(prec)
+ self.put(u"not ")
+ self.visit(node.operand)
+ self.operator_exit()
+
+ def visit_UnopNode(self, node):
+ op = node.operator
+ prec = self.unop_precedence[op]
+ self.operator_enter(prec)
+ self.put(u"%s" % node.operator)
+ self.visit(node.operand)
+ self.operator_exit()
+
+ def visit_BinopNode(self, node):
+ op = node.operator
+ prec = self.binop_precedence.get(op, 0)
+ self.operator_enter(prec)
+ self.visit(node.operand1)
+ self.put(u" %s " % op.replace('_', ' '))
+ self.visit(node.operand2)
+ self.operator_exit()
+
+ def visit_BoolBinopNode(self, node):
+ self.visit_BinopNode(node)
+
+ def visit_PrimaryCmpNode(self, node):
+ self.visit_BinopNode(node)
+
+ def visit_IndexNode(self, node):
+ self.visit(node.base)
+ self.put(u"[")
+ if isinstance(node.index, TupleNode):
+ if node.index.subexpr_nodes():
+ self.emit_sequence(node.index)
+ else:
+ self.put(u"()")
+ else:
+ self.visit(node.index)
+ self.put(u"]")
+
+ def visit_SliceIndexNode(self, node):
+ self.visit(node.base)
+ self.put(u"[")
+ if node.start:
+ self.visit(node.start)
+ self.put(u":")
+ if node.stop:
+ self.visit(node.stop)
+ if node.slice:
+ self.put(u":")
+ self.visit(node.slice)
+ self.put(u"]")
+
+ def visit_SliceNode(self, node):
+ if not node.start.is_none:
+ self.visit(node.start)
+ self.put(u":")
+ if not node.stop.is_none:
+ self.visit(node.stop)
+ if not node.step.is_none:
+ self.put(u":")
+ self.visit(node.step)
+
+ def visit_CondExprNode(self, node):
+ self.visit(node.true_val)
+ self.put(u" if ")
+ self.visit(node.test)
+ self.put(u" else ")
+ self.visit(node.false_val)
+
+ def visit_AttributeNode(self, node):
+ self.visit(node.obj)
+ self.put(u".%s" % node.attribute)
+
+ def visit_SimpleCallNode(self, node):
+ self.visit(node.function)
+ self.put(u"(")
+ self.comma_separated_list(node.args)
+ self.put(")")
+
+ def emit_pos_args(self, node):
+ if node is None:
+ return
+ if isinstance(node, AddNode):
+ self.emit_pos_args(node.operand1)
+ self.emit_pos_args(node.operand2)
+ elif isinstance(node, TupleNode):
+ for expr in node.subexpr_nodes():
+ self.visit(expr)
+ self.put(u", ")
+ elif isinstance(node, AsTupleNode):
+ self.put("*")
+ self.visit(node.arg)
+ self.put(u", ")
+ else:
+ self.visit(node)
+ self.put(u", ")
+
+ def emit_kwd_args(self, node):
+ if node is None:
+ return
+ if isinstance(node, MergedDictNode):
+ for expr in node.subexpr_nodes():
+ self.emit_kwd_args(expr)
+ elif isinstance(node, DictNode):
+ for expr in node.subexpr_nodes():
+ self.put(u"%s=" % expr.key.value)
+ self.visit(expr.value)
+ self.put(u", ")
+ else:
+ self.put(u"**")
+ self.visit(node)
+ self.put(u", ")
+
+ def visit_GeneralCallNode(self, node):
+ self.visit(node.function)
+ self.put(u"(")
+ self.emit_pos_args(node.positional_args)
+ self.emit_kwd_args(node.keyword_args)
+ self.remove(u", ")
+ self.put(")")
+
+ def emit_comprehension(self, body, target,
+ sequence, condition,
+ parens=(u"", u"")):
+ open_paren, close_paren = parens
+ self.put(open_paren)
+ self.visit(body)
+ self.put(u" for ")
+ self.visit(target)
+ self.put(u" in ")
+ self.visit(sequence)
+ if condition:
+ self.put(u" if ")
+ self.visit(condition)
+ self.put(close_paren)
+
+ def visit_ComprehensionAppendNode(self, node):
+ self.visit(node.expr)
+
+ def visit_DictComprehensionAppendNode(self, node):
+ self.visit(node.key_expr)
+ self.put(u": ")
+ self.visit(node.value_expr)
+
+ def visit_ComprehensionNode(self, node):
+ tpmap = {'list': u"[]", 'dict': u"{}", 'set': u"{}"}
+ parens = tpmap[node.type.py_type_name()]
+ body = node.loop.body
+ target = node.loop.target
+ sequence = node.loop.iterator.sequence
+ condition = None
+ if hasattr(body, 'if_clauses'):
+ # type(body) is Nodes.IfStatNode
+ condition = body.if_clauses[0].condition
+ body = body.if_clauses[0].body
+ self.emit_comprehension(body, target, sequence, condition, parens)
+
+ def visit_GeneratorExpressionNode(self, node):
+ body = node.loop.body
+ target = node.loop.target
+ sequence = node.loop.iterator.sequence
+ condition = None
+ if hasattr(body, 'if_clauses'):
+ # type(body) is Nodes.IfStatNode
+ condition = body.if_clauses[0].condition
+ body = body.if_clauses[0].body.expr.arg
+ elif hasattr(body, 'expr'):
+ # type(body) is Nodes.ExprStatNode
+ body = body.expr.arg
+ self.emit_comprehension(body, target, sequence, condition, u"()")
+
+
+class PxdWriter(DeclarationWriter, ExpressionWriter):
+ """
+ A Cython code writer for everything supported in pxd files.
+ (currently unused)
+ """
+
+ def __call__(self, node):
+ print(u'\n'.join(self.write(node).lines))
+ return node
+
+ def visit_CFuncDefNode(self, node):
+ if node.overridable:
+ self.startline(u'cpdef ')
+ else:
+ self.startline(u'cdef ')
+ if node.modifiers:
+ self.put(' '.join(node.modifiers))
+ self.put(' ')
+ if node.visibility != 'private':
+ self.put(node.visibility)
+ self.put(u' ')
+ if node.api:
+ self.put(u'api ')
+ self.visit(node.declarator)
+
+ def visit_StatNode(self, node):
+ pass
+
+
+class CodeWriter(StatementWriter, ExpressionWriter):
+ """
+ A complete Cython code writer.
+ """
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/AnalysedTreeTransforms.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/AnalysedTreeTransforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4941606ef6f4c1072b5c68dcabe328ac6eaad3a
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/AnalysedTreeTransforms.py
@@ -0,0 +1,99 @@
+from __future__ import absolute_import
+
+from .Visitor import ScopeTrackingTransform
+from .Nodes import StatListNode, SingleAssignmentNode, CFuncDefNode, DefNode
+from .ExprNodes import DictNode, DictItemNode, NameNode, UnicodeNode
+from .PyrexTypes import py_object_type
+from .StringEncoding import EncodedString
+from . import Symtab
+
+class AutoTestDictTransform(ScopeTrackingTransform):
+ # Handles autotestdict directive
+
+ excludelist = ['__cinit__', '__dealloc__', '__richcmp__',
+ '__nonzero__', '__bool__',
+ '__len__', '__contains__']
+
+ def visit_ModuleNode(self, node):
+ if node.is_pxd:
+ return node
+ self.scope_type = 'module'
+ self.scope_node = node
+
+ if not self.current_directives['autotestdict']:
+ return node
+ self.all_docstrings = self.current_directives['autotestdict.all']
+ self.cdef_docstrings = self.all_docstrings or self.current_directives['autotestdict.cdef']
+
+ assert isinstance(node.body, StatListNode)
+
+ # First see if __test__ is already created
+ if u'__test__' in node.scope.entries:
+ # Do nothing
+ return node
+
+ pos = node.pos
+
+ self.tests = []
+ self.testspos = node.pos
+
+ test_dict_entry = node.scope.declare_var(EncodedString(u'__test__'),
+ py_object_type,
+ pos,
+ visibility='public')
+ create_test_dict_assignment = SingleAssignmentNode(pos,
+ lhs=NameNode(pos, name=EncodedString(u'__test__'),
+ entry=test_dict_entry),
+ rhs=DictNode(pos, key_value_pairs=self.tests))
+ self.visitchildren(node)
+ node.body.stats.append(create_test_dict_assignment)
+ return node
+
+ def add_test(self, testpos, path, doctest):
+ pos = self.testspos
+ keystr = u'%s (line %d)' % (path, testpos[1])
+ key = UnicodeNode(pos, value=EncodedString(keystr))
+ value = UnicodeNode(pos, value=doctest)
+ self.tests.append(DictItemNode(pos, key=key, value=value))
+
+ def visit_ExprNode(self, node):
+ # expressions cannot contain functions and lambda expressions
+ # do not have a docstring
+ return node
+
+ def visit_FuncDefNode(self, node):
+ if not node.doc or (isinstance(node, DefNode) and node.fused_py_func):
+ return node
+ if not self.cdef_docstrings:
+ if isinstance(node, CFuncDefNode) and not node.py_func:
+ return node
+ if not self.all_docstrings and '>>>' not in node.doc:
+ return node
+
+ pos = self.testspos
+ if self.scope_type == 'module':
+ path = node.entry.name
+ elif self.scope_type in ('pyclass', 'cclass'):
+ if isinstance(node, CFuncDefNode):
+ if node.py_func is not None:
+ name = node.py_func.name
+ else:
+ name = node.entry.name
+ else:
+ name = node.name
+ if self.scope_type == 'cclass' and name in self.excludelist:
+ return node
+ if self.scope_type == 'pyclass':
+ class_name = self.scope_node.name
+ else:
+ class_name = self.scope_node.class_name
+ if isinstance(node.entry.scope, Symtab.PropertyScope):
+ property_method_name = node.entry.scope.name
+ path = "%s.%s.%s" % (class_name, node.entry.scope.name,
+ node.entry.name)
+ else:
+ path = "%s.%s" % (class_name, node.entry.name)
+ else:
+ assert False
+ self.add_test(node.pos, path, node.doc)
+ return node
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Annotate.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Annotate.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e8d2c4a8d822c7b5e9fe8d4467716670d8086c2
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Annotate.py
@@ -0,0 +1,341 @@
+# Note: Work in progress
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import re
+import codecs
+import textwrap
+from datetime import datetime
+from functools import partial
+from collections import defaultdict
+from xml.sax.saxutils import escape as html_escape
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO # does not support writing 'str' in Py2
+
+from . import Version
+from .Code import CCodeWriter
+from .. import Utils
+
+
+class AnnotationCCodeWriter(CCodeWriter):
+
+ # also used as marker for detection of complete code emission in tests
+ COMPLETE_CODE_TITLE = "Complete cythonized code"
+
+ def __init__(self, create_from=None, buffer=None, copy_formatting=True, show_entire_c_code=False, source_desc=None):
+ CCodeWriter.__init__(self, create_from, buffer, copy_formatting=copy_formatting)
+ self.show_entire_c_code = show_entire_c_code
+ if create_from is None:
+ self.annotation_buffer = StringIO()
+ self.last_annotated_pos = None
+ # annotations[filename][line] -> [(column, AnnotationItem)*]
+ self.annotations = defaultdict(partial(defaultdict, list))
+ # code[filename][line] -> str
+ self.code = defaultdict(partial(defaultdict, str))
+ # scopes[filename][line] -> set(scopes)
+ self.scopes = defaultdict(partial(defaultdict, set))
+ else:
+ # When creating an insertion point, keep references to the same database
+ self.annotation_buffer = create_from.annotation_buffer
+ self.annotations = create_from.annotations
+ self.code = create_from.code
+ self.scopes = create_from.scopes
+ self.last_annotated_pos = create_from.last_annotated_pos
+
+ def create_new(self, create_from, buffer, copy_formatting):
+ return AnnotationCCodeWriter(create_from, buffer, copy_formatting)
+
+ def _write_to_buffer(self, s):
+ self.buffer.write(s)
+ self.annotation_buffer.write(s)
+
+ def mark_pos(self, pos, trace=True):
+ if pos is not None:
+ CCodeWriter.mark_pos(self, pos, trace)
+ if self.funcstate and self.funcstate.scope:
+ # lambdas and genexprs can result in multiple scopes per line => keep them in a set
+ self.scopes[pos[0].filename][pos[1]].add(self.funcstate.scope)
+ if self.last_annotated_pos:
+ source_desc, line, _ = self.last_annotated_pos
+ pos_code = self.code[source_desc.filename]
+ pos_code[line] += self.annotation_buffer.getvalue()
+ self.annotation_buffer = StringIO()
+ self.last_annotated_pos = pos
+
+ def annotate(self, pos, item):
+ self.annotations[pos[0].filename][pos[1]].append((pos[2], item))
+
+ def _css(self):
+ """css template will later allow to choose a colormap"""
+ css = [self._css_template]
+ for i in range(255):
+ color = u"FFFF%02x" % int(255.0 // (1.0 + i/10.0))
+ css.append('.cython.score-%d {background-color: #%s;}' % (i, color))
+ try:
+ from pygments.formatters import HtmlFormatter
+ except ImportError:
+ pass
+ else:
+ css.append(HtmlFormatter().get_style_defs('.cython'))
+ return '\n'.join(css)
+
+ _css_template = textwrap.dedent("""
+ body.cython { font-family: courier; font-size: 12; }
+
+ .cython.tag { }
+ .cython.line { color: #000000; margin: 0em }
+ .cython.code { font-size: 9; color: #444444; display: none; margin: 0px 0px 0px 8px; border-left: 8px none; }
+
+ .cython.line .run { background-color: #B0FFB0; }
+ .cython.line .mis { background-color: #FFB0B0; }
+ .cython.code.run { border-left: 8px solid #B0FFB0; }
+ .cython.code.mis { border-left: 8px solid #FFB0B0; }
+
+ .cython.code .py_c_api { color: red; }
+ .cython.code .py_macro_api { color: #FF7000; }
+ .cython.code .pyx_c_api { color: #FF3000; }
+ .cython.code .pyx_macro_api { color: #FF7000; }
+ .cython.code .refnanny { color: #FFA000; }
+ .cython.code .trace { color: #FFA000; }
+ .cython.code .error_goto { color: #FFA000; }
+
+ .cython.code .coerce { color: #008000; border: 1px dotted #008000 }
+ .cython.code .py_attr { color: #FF0000; font-weight: bold; }
+ .cython.code .c_attr { color: #0000FF; }
+ .cython.code .py_call { color: #FF0000; font-weight: bold; }
+ .cython.code .c_call { color: #0000FF; }
+ """)
+
+ # on-click toggle function to show/hide C source code
+ _onclick_attr = ' onclick="{0}"'.format((
+ "(function(s){"
+ " s.display = s.display === 'block' ? 'none' : 'block'"
+ "})(this.nextElementSibling.style)"
+ ).replace(' ', '') # poor dev's JS minification
+ )
+
+ def save_annotation(self, source_filename, target_filename, coverage_xml=None):
+ with Utils.open_source_file(source_filename) as f:
+ code = f.read()
+ generated_code = self.code.get(source_filename, {})
+ c_file = Utils.decode_filename(os.path.basename(target_filename))
+ html_filename = os.path.splitext(target_filename)[0] + ".html"
+
+ with codecs.open(html_filename, "w", encoding="UTF-8") as out_buffer:
+ out_buffer.write(self._save_annotation(code, generated_code, c_file, source_filename, coverage_xml))
+
+ def _save_annotation_header(self, c_file, source_filename, coverage_timestamp=None):
+ coverage_info = ''
+ if coverage_timestamp:
+ coverage_info = u' with coverage data from {timestamp}'.format(
+ timestamp=datetime.fromtimestamp(int(coverage_timestamp) // 1000))
+
+ outlist = [
+ textwrap.dedent(u'''\
+
+
+
+
+
+ Cython: {filename}
+
+
+
+ Generated by Cython {watermark}{more_info}
+
+ Yellow lines hint at Python interaction.
+ Click on a line that starts with a "+
" to see the C code that Cython generated for it.
+
+ ''').format(css=self._css(), watermark=Version.watermark,
+ filename=os.path.basename(source_filename) if source_filename else '',
+ more_info=coverage_info)
+ ]
+ if c_file:
+ outlist.append(u'Raw output: %s
\n' % (c_file, c_file))
+ return outlist
+
+ def _save_annotation_footer(self):
+ return (u'\n',)
+
+ def _save_annotation(self, code, generated_code, c_file=None, source_filename=None, coverage_xml=None):
+ """
+ lines : original cython source code split by lines
+ generated_code : generated c code keyed by line number in original file
+ target filename : name of the file in which to store the generated html
+ c_file : filename in which the c_code has been written
+ """
+ if coverage_xml is not None and source_filename:
+ coverage_timestamp = coverage_xml.get('timestamp', '').strip()
+ covered_lines = self._get_line_coverage(coverage_xml, source_filename)
+ else:
+ coverage_timestamp = covered_lines = None
+ annotation_items = dict(self.annotations[source_filename])
+ scopes = dict(self.scopes[source_filename])
+
+ outlist = []
+ outlist.extend(self._save_annotation_header(c_file, source_filename, coverage_timestamp))
+ outlist.extend(self._save_annotation_body(code, generated_code, annotation_items, scopes, covered_lines))
+ outlist.extend(self._save_annotation_footer())
+ return ''.join(outlist)
+
+ def _get_line_coverage(self, coverage_xml, source_filename):
+ coverage_data = None
+ for entry in coverage_xml.iterfind('.//class'):
+ if not entry.get('filename'):
+ continue
+ if (entry.get('filename') == source_filename or
+ os.path.abspath(entry.get('filename')) == source_filename):
+ coverage_data = entry
+ break
+ elif source_filename.endswith(entry.get('filename')):
+ coverage_data = entry # but we might still find a better match...
+ if coverage_data is None:
+ return None
+ return dict(
+ (int(line.get('number')), int(line.get('hits')))
+ for line in coverage_data.iterfind('lines/line')
+ )
+
+ def _htmlify_code(self, code, language):
+ try:
+ from pygments import highlight
+ from pygments.lexers import CythonLexer, CppLexer
+ from pygments.formatters import HtmlFormatter
+ except ImportError:
+ # no Pygments, just escape the code
+ return html_escape(code)
+
+ if language == "cython":
+ lexer = CythonLexer(stripnl=False, stripall=False)
+ elif language == "c/cpp":
+ lexer = CppLexer(stripnl=False, stripall=False)
+ else:
+ # unknown language, use fallback
+ return html_escape(code)
+ html_code = highlight(
+ code, lexer,
+ HtmlFormatter(nowrap=True))
+ return html_code
+
+ def _save_annotation_body(self, cython_code, generated_code, annotation_items, scopes, covered_lines=None):
+ outlist = [u'']
+ pos_comment_marker = u'/* \N{HORIZONTAL ELLIPSIS} */\n'
+ new_calls_map = dict(
+ (name, 0) for name in
+ 'refnanny trace py_macro_api py_c_api pyx_macro_api pyx_c_api error_goto'.split()
+ ).copy
+
+ self.mark_pos(None)
+
+ def annotate(match):
+ group_name = match.lastgroup
+ calls[group_name] += 1
+ return u"
%s" % (
+ group_name, match.group(group_name))
+
+ lines = self._htmlify_code(cython_code, "cython").splitlines()
+ lineno_width = len(str(len(lines)))
+ if not covered_lines:
+ covered_lines = None
+
+ for k, line in enumerate(lines, 1):
+ try:
+ c_code = generated_code[k]
+ except KeyError:
+ c_code = ''
+ else:
+ c_code = _replace_pos_comment(pos_comment_marker, c_code)
+ if c_code.startswith(pos_comment_marker):
+ c_code = c_code[len(pos_comment_marker):]
+ c_code = html_escape(c_code)
+
+ calls = new_calls_map()
+ c_code = _parse_code(annotate, c_code)
+ score = (5 * calls['py_c_api'] + 2 * calls['pyx_c_api'] +
+ calls['py_macro_api'] + calls['pyx_macro_api'])
+
+ if c_code:
+ onclick = self._onclick_attr
+ expandsymbol = '+'
+ else:
+ onclick = ''
+ expandsymbol = ' '
+
+ covered = ''
+ if covered_lines is not None and k in covered_lines:
+ hits = covered_lines[k]
+ if hits is not None:
+ covered = 'run' if hits else 'mis'
+
+ outlist.append(
+ u'
'
+ # generate line number with expand symbol in front,
+ # and the right number of digit
+ u'{expandsymbol}{line:0{lineno_width}d}: {code}
\n'.format(
+ score=score,
+ expandsymbol=expandsymbol,
+ covered=covered,
+ lineno_width=lineno_width,
+ line=k,
+ code=line.rstrip(),
+ onclick=onclick,
+ ))
+ if c_code:
+ outlist.append(u"
{code}
".format(
+ score=score, covered=covered, code=c_code))
+ outlist.append(u"
")
+
+ # now the whole c-code if needed:
+ if self.show_entire_c_code:
+ outlist.append(u'')
+ onclick_title = u"
+ {title}
\n"
+ outlist.append(onclick_title.format(
+ onclick=self._onclick_attr,
+ title=AnnotationCCodeWriter.COMPLETE_CODE_TITLE,
+ ))
+ complete_code_as_html = self._htmlify_code(self.buffer.getvalue(), "c/cpp")
+ outlist.append(u"
{code}
".format(code=complete_code_as_html))
+ outlist.append(u"
")
+
+ return outlist
+
+
+_parse_code = re.compile((
+ br'(?P__Pyx_X?(?:GOT|GIVE)REF|__Pyx_RefNanny[A-Za-z]+)|'
+ br'(?P__Pyx_Trace[A-Za-z]+)|'
+ br'(?:'
+ br'(?P__Pyx_[A-Z][A-Z_]+)|'
+ br'(?P(?:__Pyx_[A-Z][a-z_][A-Za-z_]*)|__pyx_convert_[A-Za-z_]*)|'
+ br'(?PPy[A-Z][a-z]+_[A-Z][A-Z_]+)|'
+ br'(?PPy[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]*)'
+ br')(?=\()|' # look-ahead to exclude subsequent '(' from replacement
+ br'(?P(?:(?<=;) *if [^;]* +)?__PYX_ERR\([^)]+\))'
+).decode('ascii')).sub
+
+
+_replace_pos_comment = re.compile(
+ # this matches what Cython generates as code line marker comment
+ br'^\s*/\*(?:(?:[^*]|\*[^/])*\n)+\s*\*/\s*\n'.decode('ascii'),
+ re.M
+).sub
+
+
+class AnnotationItem(object):
+
+ def __init__(self, style, text, tag="", size=0):
+ self.style = style
+ self.text = text
+ self.tag = tag
+ self.size = size
+
+ def start(self):
+ return u"%s" % (self.style, self.text, self.tag)
+
+ def end(self):
+ return self.size, u""
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/AutoDocTransforms.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/AutoDocTransforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..c74aab7b7c1ae1315ef3848732126423258ab5e6
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/AutoDocTransforms.py
@@ -0,0 +1,318 @@
+from __future__ import absolute_import, print_function
+
+from .Visitor import CythonTransform
+from .StringEncoding import EncodedString
+from . import Options
+from . import PyrexTypes
+from ..CodeWriter import ExpressionWriter
+from .Errors import warning
+
+
+class AnnotationWriter(ExpressionWriter):
+ """
+ A Cython code writer for Python expressions in argument/variable annotations.
+ """
+ def __init__(self, description=None):
+ """description is optional. If specified it is used in
+ warning messages for the nodes that don't convert to string properly.
+ If not specified then no messages are generated.
+ """
+ ExpressionWriter.__init__(self)
+ self.description = description
+ self.incomplete = False
+
+ def visit_Node(self, node):
+ self.put(u"??>")
+ self.incomplete = True
+ if self.description:
+ warning(node.pos,
+ "Failed to convert code to string representation in {0}".format(
+ self.description), level=1)
+
+ def visit_LambdaNode(self, node):
+ # XXX Should we do better?
+ self.put("")
+ self.incomplete = True
+ if self.description:
+ warning(node.pos,
+ "Failed to convert lambda to string representation in {0}".format(
+ self.description), level=1)
+
+ def visit_UnicodeNode(self, node):
+ # Discard Unicode prefix in annotations. Any tool looking at them
+ # would probably expect Py3 string semantics.
+ self.emit_string(node, "")
+
+ def visit_AnnotationNode(self, node):
+ self.put(node.string.unicode_value)
+
+
+class EmbedSignature(CythonTransform):
+
+ def __init__(self, context):
+ super(EmbedSignature, self).__init__(context)
+ self.class_name = None
+ self.class_node = None
+
+ def _fmt_expr(self, node):
+ writer = ExpressionWriter()
+ result = writer.write(node)
+ # print(type(node).__name__, '-->', result)
+ return result
+
+ def _fmt_annotation(self, node):
+ writer = AnnotationWriter()
+ result = writer.write(node)
+ # print(type(node).__name__, '-->', result)
+ return result
+
+ def _setup_format(self):
+ signature_format = self.current_directives['embedsignature.format']
+ self.is_format_c = signature_format == 'c'
+ self.is_format_python = signature_format == 'python'
+ self.is_format_clinic = signature_format == 'clinic'
+
+ def _fmt_arg(self, arg):
+ arg_doc = arg.name
+ annotation = None
+ defaultval = None
+ if arg.is_self_arg:
+ if self.is_format_clinic:
+ arg_doc = '$self'
+ elif arg.is_type_arg:
+ if self.is_format_clinic:
+ arg_doc = '$type'
+ elif self.is_format_c:
+ if arg.type is not PyrexTypes.py_object_type:
+ arg_doc = arg.type.declaration_code(arg.name, for_display=1)
+ elif self.is_format_python:
+ if not arg.annotation:
+ annotation = self._fmt_type(arg.type)
+ if arg.annotation:
+ if not self.is_format_clinic:
+ annotation = self._fmt_annotation(arg.annotation)
+ if arg.default:
+ defaultval = self._fmt_expr(arg.default)
+ if annotation:
+ arg_doc = arg_doc + (': %s' % annotation)
+ if defaultval:
+ arg_doc = arg_doc + (' = %s' % defaultval)
+ elif defaultval:
+ arg_doc = arg_doc + ('=%s' % defaultval)
+ return arg_doc
+
+ def _fmt_star_arg(self, arg):
+ arg_doc = arg.name
+ if arg.annotation:
+ if not self.is_format_clinic:
+ annotation = self._fmt_annotation(arg.annotation)
+ arg_doc = arg_doc + (': %s' % annotation)
+ return arg_doc
+
+ def _fmt_arglist(self, args,
+ npoargs=0, npargs=0, pargs=None,
+ nkargs=0, kargs=None,
+ hide_self=False):
+ arglist = []
+ for arg in args:
+ if not hide_self or not arg.entry.is_self_arg:
+ arg_doc = self._fmt_arg(arg)
+ arglist.append(arg_doc)
+ if pargs:
+ arg_doc = self._fmt_star_arg(pargs)
+ arglist.insert(npargs + npoargs, '*%s' % arg_doc)
+ elif nkargs:
+ arglist.insert(npargs + npoargs, '*')
+ if npoargs:
+ arglist.insert(npoargs, '/')
+ if kargs:
+ arg_doc = self._fmt_star_arg(kargs)
+ arglist.append('**%s' % arg_doc)
+ return arglist
+
+ def _fmt_type(self, type):
+ if type is PyrexTypes.py_object_type:
+ return None
+ elif self.is_format_c:
+ code = type.declaration_code("", for_display=1)
+ return code
+ elif self.is_format_python:
+ annotation = None
+ if type.is_string:
+ annotation = self.current_directives['c_string_type']
+ elif type.is_numeric:
+ annotation = type.py_type_name()
+ if annotation is None:
+ code = type.declaration_code('', for_display=1)
+ annotation = code.replace(' ', '_').replace('*', 'p')
+ return annotation
+ return None
+
+ def _fmt_signature(self, cls_name, func_name, args,
+ npoargs=0, npargs=0, pargs=None,
+ nkargs=0, kargs=None,
+ return_expr=None, return_type=None,
+ hide_self=False):
+ arglist = self._fmt_arglist(
+ args, npoargs, npargs, pargs, nkargs, kargs,
+ hide_self=hide_self,
+ )
+ arglist_doc = ', '.join(arglist)
+ func_doc = '%s(%s)' % (func_name, arglist_doc)
+ if self.is_format_c and cls_name:
+ func_doc = '%s.%s' % (cls_name, func_doc)
+ if not self.is_format_clinic:
+ ret_doc = None
+ if return_expr:
+ ret_doc = self._fmt_annotation(return_expr)
+ elif return_type:
+ ret_doc = self._fmt_type(return_type)
+ if ret_doc:
+ func_doc = '%s -> %s' % (func_doc, ret_doc)
+ return func_doc
+
+ def _embed_signature(self, signature, node_doc):
+ if self.is_format_clinic and self.current_directives['binding']:
+ return node_doc
+ if node_doc:
+ if self.is_format_clinic:
+ docfmt = "%s\n--\n\n%s"
+ else:
+ docfmt = "%s\n%s"
+ return docfmt % (signature, node_doc)
+ else:
+ if self.is_format_clinic:
+ docfmt = "%s\n--\n\n"
+ else:
+ docfmt = "%s"
+ return docfmt % signature
+
+ def __call__(self, node):
+ if not Options.docstrings:
+ return node
+ else:
+ return super(EmbedSignature, self).__call__(node)
+
+ def visit_ClassDefNode(self, node):
+ oldname = self.class_name
+ oldclass = self.class_node
+ self.class_node = node
+ try:
+ # PyClassDefNode
+ self.class_name = node.name
+ except AttributeError:
+ # CClassDefNode
+ self.class_name = node.class_name
+ self.visitchildren(node)
+ self.class_name = oldname
+ self.class_node = oldclass
+ return node
+
+ def visit_LambdaNode(self, node):
+ # lambda expressions so not have signature or inner functions
+ return node
+
+ def visit_DefNode(self, node):
+ if not self.current_directives['embedsignature']:
+ return node
+ self._setup_format()
+
+ is_constructor = False
+ hide_self = False
+ if node.entry.is_special:
+ is_constructor = self.class_node and node.name == '__init__'
+ if not is_constructor:
+ return node
+ class_name = None
+ func_name = node.name
+ if self.is_format_c:
+ func_name = self.class_name
+ hide_self = True
+ else:
+ class_name, func_name = self.class_name, node.name
+
+ npoargs = getattr(node, 'num_posonly_args', 0)
+ nkargs = getattr(node, 'num_kwonly_args', 0)
+ npargs = len(node.args) - nkargs - npoargs
+ signature = self._fmt_signature(
+ class_name, func_name, node.args,
+ npoargs, npargs, node.star_arg,
+ nkargs, node.starstar_arg,
+ return_expr=node.return_type_annotation,
+ return_type=None, hide_self=hide_self)
+ if signature:
+ if is_constructor and self.is_format_c:
+ doc_holder = self.class_node.entry.type.scope
+ else:
+ doc_holder = node.entry
+ if doc_holder.doc is not None:
+ old_doc = doc_holder.doc
+ elif not is_constructor and getattr(node, 'py_func', None) is not None:
+ old_doc = node.py_func.entry.doc
+ else:
+ old_doc = None
+ new_doc = self._embed_signature(signature, old_doc)
+ doc_holder.doc = EncodedString(new_doc)
+ if not is_constructor and getattr(node, 'py_func', None) is not None:
+ node.py_func.entry.doc = EncodedString(new_doc)
+ return node
+
+ def visit_CFuncDefNode(self, node):
+ if not node.overridable: # not cpdef FOO(...):
+ return node
+ if not self.current_directives['embedsignature']:
+ return node
+ self._setup_format()
+
+ signature = self._fmt_signature(
+ self.class_name, node.declarator.base.name,
+ node.declarator.args,
+ return_type=node.return_type)
+ if signature:
+ if node.entry.doc is not None:
+ old_doc = node.entry.doc
+ elif getattr(node, 'py_func', None) is not None:
+ old_doc = node.py_func.entry.doc
+ else:
+ old_doc = None
+ new_doc = self._embed_signature(signature, old_doc)
+ node.entry.doc = EncodedString(new_doc)
+ py_func = getattr(node, 'py_func', None)
+ if py_func is not None:
+ py_func.entry.doc = EncodedString(new_doc)
+ return node
+
+ def visit_PropertyNode(self, node):
+ if not self.current_directives['embedsignature']:
+ return node
+ self._setup_format()
+
+ entry = node.entry
+ body = node.body
+ prop_name = entry.name
+ type_name = None
+ if entry.visibility == 'public':
+ if self.is_format_c:
+ # property synthesised from a cdef public attribute
+ type_name = entry.type.declaration_code("", for_display=1)
+ if not entry.type.is_pyobject:
+ type_name = "'%s'" % type_name
+ elif entry.type.is_extension_type:
+ type_name = entry.type.module_name + '.' + type_name
+ elif self.is_format_python:
+ type_name = self._fmt_type(entry.type)
+ if type_name is None:
+ for stat in body.stats:
+ if stat.name != '__get__':
+ continue
+ if self.is_format_c:
+ prop_name = '%s.%s' % (self.class_name, prop_name)
+ ret_annotation = stat.return_type_annotation
+ if ret_annotation:
+ type_name = self._fmt_annotation(ret_annotation)
+ if type_name is not None :
+ signature = '%s: %s' % (prop_name, type_name)
+ new_doc = self._embed_signature(signature, entry.doc)
+ if not self.is_format_clinic:
+ entry.doc = EncodedString(new_doc)
+ return node
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Buffer.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Buffer.py
new file mode 100644
index 0000000000000000000000000000000000000000..e86e1e9c24d206d537d682b7c0b67c1b0fabf96c
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Buffer.py
@@ -0,0 +1,749 @@
+from __future__ import absolute_import
+
+from .Visitor import CythonTransform
+from .ModuleNode import ModuleNode
+from .Errors import CompileError
+from .UtilityCode import CythonUtilityCode
+from .Code import UtilityCode, TempitaUtilityCode
+
+from . import Options
+from . import Interpreter
+from . import PyrexTypes
+from . import Naming
+from . import Symtab
+
+def dedent(text, reindent=0):
+ from textwrap import dedent
+ text = dedent(text)
+ if reindent > 0:
+ indent = " " * reindent
+ text = '\n'.join([indent + x for x in text.split('\n')])
+ return text
+
+class IntroduceBufferAuxiliaryVars(CythonTransform):
+
+ #
+ # Entry point
+ #
+
+ buffers_exists = False
+ using_memoryview = False
+
+ def __call__(self, node):
+ assert isinstance(node, ModuleNode)
+ self.max_ndim = 0
+ result = super(IntroduceBufferAuxiliaryVars, self).__call__(node)
+ if self.buffers_exists:
+ use_bufstruct_declare_code(node.scope)
+ use_py2_buffer_functions(node.scope)
+
+ return result
+
+
+ #
+ # Basic operations for transforms
+ #
+ def handle_scope(self, node, scope):
+ # For all buffers, insert extra variables in the scope.
+ # The variables are also accessible from the buffer_info
+ # on the buffer entry
+ scope_items = scope.entries.items()
+ bufvars = [entry for name, entry in scope_items if entry.type.is_buffer]
+ if len(bufvars) > 0:
+ bufvars.sort(key=lambda entry: entry.name)
+ self.buffers_exists = True
+
+ memviewslicevars = [entry for name, entry in scope_items if entry.type.is_memoryviewslice]
+ if len(memviewslicevars) > 0:
+ self.buffers_exists = True
+
+
+ for (name, entry) in scope_items:
+ if name == 'memoryview' and isinstance(entry.utility_code_definition, CythonUtilityCode):
+ self.using_memoryview = True
+ break
+ del scope_items
+
+ if isinstance(node, ModuleNode) and len(bufvars) > 0:
+ # for now...note that pos is wrong
+ raise CompileError(node.pos, "Buffer vars not allowed in module scope")
+ for entry in bufvars:
+ if entry.type.dtype.is_ptr:
+ raise CompileError(node.pos, "Buffers with pointer types not yet supported.")
+
+ name = entry.name
+ buftype = entry.type
+ if buftype.ndim > Options.buffer_max_dims:
+ raise CompileError(node.pos,
+ "Buffer ndims exceeds Options.buffer_max_dims = %d" % Options.buffer_max_dims)
+ if buftype.ndim > self.max_ndim:
+ self.max_ndim = buftype.ndim
+
+ # Declare auxiliary vars
+ def decvar(type, prefix):
+ cname = scope.mangle(prefix, name)
+ aux_var = scope.declare_var(name=None, cname=cname,
+ type=type, pos=node.pos)
+ if entry.is_arg:
+ aux_var.used = True # otherwise, NameNode will mark whether it is used
+
+ return aux_var
+
+ auxvars = ((PyrexTypes.c_pyx_buffer_nd_type, Naming.pybuffernd_prefix),
+ (PyrexTypes.c_pyx_buffer_type, Naming.pybufferstruct_prefix))
+ pybuffernd, rcbuffer = [decvar(type, prefix) for (type, prefix) in auxvars]
+
+ entry.buffer_aux = Symtab.BufferAux(pybuffernd, rcbuffer)
+
+ scope.buffer_entries = bufvars
+ self.scope = scope
+
+ def visit_ModuleNode(self, node):
+ self.handle_scope(node, node.scope)
+ self.visitchildren(node)
+ return node
+
+ def visit_FuncDefNode(self, node):
+ self.handle_scope(node, node.local_scope)
+ self.visitchildren(node)
+ return node
+
+#
+# Analysis
+#
+buffer_options = ("dtype", "ndim", "mode", "negative_indices", "cast") # ordered!
+buffer_defaults = {"ndim": 1, "mode": "full", "negative_indices": True, "cast": False}
+buffer_positional_options_count = 1 # anything beyond this needs keyword argument
+
+ERR_BUF_OPTION_UNKNOWN = '"%s" is not a buffer option'
+ERR_BUF_TOO_MANY = 'Too many buffer options'
+ERR_BUF_DUP = '"%s" buffer option already supplied'
+ERR_BUF_MISSING = '"%s" missing'
+ERR_BUF_MODE = 'Only allowed buffer modes are: "c", "fortran", "full", "strided" (as a compile-time string)'
+ERR_BUF_NDIM = 'ndim must be a non-negative integer'
+ERR_BUF_DTYPE = 'dtype must be "object", numeric type or a struct'
+ERR_BUF_BOOL = '"%s" must be a boolean'
+
+def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, need_complete=True):
+ """
+ Must be called during type analysis, as analyse is called
+ on the dtype argument.
+
+ posargs and dictargs should consist of a list and a dict
+ of tuples (value, pos). Defaults should be a dict of values.
+
+ Returns a dict containing all the options a buffer can have and
+ its value (with the positions stripped).
+ """
+ if defaults is None:
+ defaults = buffer_defaults
+
+ posargs, dictargs = Interpreter.interpret_compiletime_options(
+ posargs, dictargs, type_env=env, type_args=(0, 'dtype'))
+
+ if len(posargs) > buffer_positional_options_count:
+ raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY)
+
+ options = {}
+ for name, (value, pos) in dictargs.items():
+ if name not in buffer_options:
+ raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name)
+ options[name] = value
+
+ for name, (value, pos) in zip(buffer_options, posargs):
+ if name not in buffer_options:
+ raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name)
+ if name in options:
+ raise CompileError(pos, ERR_BUF_DUP % name)
+ options[name] = value
+
+ # Check that they are all there and copy defaults
+ for name in buffer_options:
+ if name not in options:
+ try:
+ options[name] = defaults[name]
+ except KeyError:
+ if need_complete:
+ raise CompileError(globalpos, ERR_BUF_MISSING % name)
+
+ dtype = options.get("dtype")
+ if dtype and dtype.is_extension_type:
+ raise CompileError(globalpos, ERR_BUF_DTYPE)
+
+ ndim = options.get("ndim")
+ if ndim and (not isinstance(ndim, int) or ndim < 0):
+ raise CompileError(globalpos, ERR_BUF_NDIM)
+
+ mode = options.get("mode")
+ if mode and not (mode in ('full', 'strided', 'c', 'fortran')):
+ raise CompileError(globalpos, ERR_BUF_MODE)
+
+ def assert_bool(name):
+ x = options.get(name)
+ if not isinstance(x, bool):
+ raise CompileError(globalpos, ERR_BUF_BOOL % name)
+
+ assert_bool('negative_indices')
+ assert_bool('cast')
+
+ return options
+
+
+#
+# Code generation
+#
+
+class BufferEntry(object):
+ def __init__(self, entry):
+ self.entry = entry
+ self.type = entry.type
+ self.cname = entry.buffer_aux.buflocal_nd_var.cname
+ self.buf_ptr = "%s.rcbuffer->pybuffer.buf" % self.cname
+ self.buf_ptr_type = entry.type.buffer_ptr_type
+ self.init_attributes()
+
+ def init_attributes(self):
+ self.shape = self.get_buf_shapevars()
+ self.strides = self.get_buf_stridevars()
+ self.suboffsets = self.get_buf_suboffsetvars()
+
+ def get_buf_suboffsetvars(self):
+ return self._for_all_ndim("%s.diminfo[%d].suboffsets")
+
+ def get_buf_stridevars(self):
+ return self._for_all_ndim("%s.diminfo[%d].strides")
+
+ def get_buf_shapevars(self):
+ return self._for_all_ndim("%s.diminfo[%d].shape")
+
+ def _for_all_ndim(self, s):
+ return [s % (self.cname, i) for i in range(self.type.ndim)]
+
+ def generate_buffer_lookup_code(self, code, index_cnames):
+ # Create buffer lookup and return it
+ # This is done via utility macros/inline functions, which vary
+ # according to the access mode used.
+ params = []
+ nd = self.type.ndim
+ mode = self.type.mode
+ if mode == 'full':
+ for i, s, o in zip(index_cnames,
+ self.get_buf_stridevars(),
+ self.get_buf_suboffsetvars()):
+ params.append(i)
+ params.append(s)
+ params.append(o)
+ funcname = "__Pyx_BufPtrFull%dd" % nd
+ funcgen = buf_lookup_full_code
+ else:
+ if mode == 'strided':
+ funcname = "__Pyx_BufPtrStrided%dd" % nd
+ funcgen = buf_lookup_strided_code
+ elif mode == 'c':
+ funcname = "__Pyx_BufPtrCContig%dd" % nd
+ funcgen = buf_lookup_c_code
+ elif mode == 'fortran':
+ funcname = "__Pyx_BufPtrFortranContig%dd" % nd
+ funcgen = buf_lookup_fortran_code
+ else:
+ assert False
+ for i, s in zip(index_cnames, self.get_buf_stridevars()):
+ params.append(i)
+ params.append(s)
+
+ # Make sure the utility code is available
+ if funcname not in code.globalstate.utility_codes:
+ code.globalstate.utility_codes.add(funcname)
+ protocode = code.globalstate['utility_code_proto']
+ defcode = code.globalstate['utility_code_def']
+ funcgen(protocode, defcode, name=funcname, nd=nd)
+
+ buf_ptr_type_code = self.buf_ptr_type.empty_declaration_code()
+ ptrcode = "%s(%s, %s, %s)" % (funcname, buf_ptr_type_code, self.buf_ptr,
+ ", ".join(params))
+ return ptrcode
+
+
+def get_flags(buffer_aux, buffer_type):
+ flags = 'PyBUF_FORMAT'
+ mode = buffer_type.mode
+ if mode == 'full':
+ flags += '| PyBUF_INDIRECT'
+ elif mode == 'strided':
+ flags += '| PyBUF_STRIDES'
+ elif mode == 'c':
+ flags += '| PyBUF_C_CONTIGUOUS'
+ elif mode == 'fortran':
+ flags += '| PyBUF_F_CONTIGUOUS'
+ else:
+ assert False
+ if buffer_aux.writable_needed: flags += "| PyBUF_WRITABLE"
+ return flags
+
+def used_buffer_aux_vars(entry):
+ buffer_aux = entry.buffer_aux
+ buffer_aux.buflocal_nd_var.used = True
+ buffer_aux.rcbuf_var.used = True
+
+def put_unpack_buffer_aux_into_scope(buf_entry, code):
+ # Generate code to copy the needed struct info into local
+ # variables.
+ buffer_aux, mode = buf_entry.buffer_aux, buf_entry.type.mode
+ pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
+
+ fldnames = ['strides', 'shape']
+ if mode == 'full':
+ fldnames.append('suboffsets')
+
+ ln = []
+ for i in range(buf_entry.type.ndim):
+ for fldname in fldnames:
+ ln.append("%s.diminfo[%d].%s = %s.rcbuffer->pybuffer.%s[%d];" % (
+ pybuffernd_struct, i, fldname,
+ pybuffernd_struct, fldname, i,
+ ))
+ code.putln(' '.join(ln))
+
+def put_init_vars(entry, code):
+ bufaux = entry.buffer_aux
+ pybuffernd_struct = bufaux.buflocal_nd_var.cname
+ pybuffer_struct = bufaux.rcbuf_var.cname
+ # init pybuffer_struct
+ code.putln("%s.pybuffer.buf = NULL;" % pybuffer_struct)
+ code.putln("%s.refcount = 0;" % pybuffer_struct)
+ # init the buffer object
+ # code.put_init_var_to_py_none(entry)
+ # init the pybuffernd_struct
+ code.putln("%s.data = NULL;" % pybuffernd_struct)
+ code.putln("%s.rcbuffer = &%s;" % (pybuffernd_struct, pybuffer_struct))
+
+
+def put_acquire_arg_buffer(entry, code, pos):
+ buffer_aux = entry.buffer_aux
+ getbuffer = get_getbuffer_call(code, entry.cname, buffer_aux, entry.type)
+
+ # Acquire any new buffer
+ code.putln("{")
+ code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % entry.type.dtype.struct_nesting_depth())
+ code.putln(code.error_goto_if("%s == -1" % getbuffer, pos))
+ code.putln("}")
+ # An exception raised in arg parsing cannot be caught, so no
+ # need to care about the buffer then.
+ put_unpack_buffer_aux_into_scope(entry, code)
+
+
+def put_release_buffer_code(code, entry):
+ code.globalstate.use_utility_code(acquire_utility_code)
+ code.putln("__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);" % entry.buffer_aux.buflocal_nd_var.cname)
+
+
+def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type):
+ ndim = buffer_type.ndim
+ cast = int(buffer_type.cast)
+ flags = get_flags(buffer_aux, buffer_type)
+ pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
+
+ dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype)
+
+ code.globalstate.use_utility_code(acquire_utility_code)
+ return ("__Pyx_GetBufferAndValidate(&%(pybuffernd_struct)s.rcbuffer->pybuffer, "
+ "(PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, "
+ "%(cast)d, __pyx_stack)" % locals())
+
+
+def put_assign_to_buffer(lhs_cname, rhs_cname, buf_entry,
+ is_initialized, pos, code):
+ """
+ Generate code for reassigning a buffer variables. This only deals with getting
+ the buffer auxiliary structure and variables set up correctly, the assignment
+ itself and refcounting is the responsibility of the caller.
+
+ However, the assignment operation may throw an exception so that the reassignment
+ never happens.
+
+ Depending on the circumstances there are two possible outcomes:
+ - Old buffer released, new acquired, rhs assigned to lhs
+ - Old buffer released, new acquired which fails, reaqcuire old lhs buffer
+ (which may or may not succeed).
+ """
+
+ buffer_aux, buffer_type = buf_entry.buffer_aux, buf_entry.type
+ pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
+ flags = get_flags(buffer_aux, buffer_type)
+
+ code.putln("{") # Set up necessary stack for getbuffer
+ code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth())
+
+ getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below
+
+ if is_initialized:
+ # Release any existing buffer
+ code.putln('__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);' % pybuffernd_struct)
+ # Acquire
+ retcode_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = %s;" % (retcode_cname, getbuffer % rhs_cname))
+ code.putln('if (%s) {' % (code.unlikely("%s < 0" % retcode_cname)))
+ # If acquisition failed, attempt to reacquire the old buffer
+ # before raising the exception. A failure of reacquisition
+ # will cause the reacquisition exception to be reported, one
+ # can consider working around this later.
+ exc_temps = tuple(code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=False)
+ for _ in range(3))
+ code.putln('PyErr_Fetch(&%s, &%s, &%s);' % exc_temps)
+ code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % lhs_cname)))
+ code.putln('Py_XDECREF(%s); Py_XDECREF(%s); Py_XDECREF(%s);' % exc_temps) # Do not refnanny these!
+ code.globalstate.use_utility_code(raise_buffer_fallback_code)
+ code.putln('__Pyx_RaiseBufferFallbackError();')
+ code.putln('} else {')
+ code.putln('PyErr_Restore(%s, %s, %s);' % exc_temps)
+ code.putln('}')
+ code.putln('%s = %s = %s = 0;' % exc_temps)
+ for t in exc_temps:
+ code.funcstate.release_temp(t)
+ code.putln('}')
+ # Unpack indices
+ put_unpack_buffer_aux_into_scope(buf_entry, code)
+ code.putln(code.error_goto_if_neg(retcode_cname, pos))
+ code.funcstate.release_temp(retcode_cname)
+ else:
+ # Our entry had no previous value, so set to None when acquisition fails.
+ # In this case, auxiliary vars should be set up right in initialization to a zero-buffer,
+ # so it suffices to set the buf field to NULL.
+ code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % rhs_cname)))
+ code.putln('%s = %s; __Pyx_INCREF(Py_None); %s.rcbuffer->pybuffer.buf = NULL;' %
+ (lhs_cname,
+ PyrexTypes.typecast(buffer_type, PyrexTypes.py_object_type, "Py_None"),
+ pybuffernd_struct))
+ code.putln(code.error_goto(pos))
+ code.put('} else {')
+ # Unpack indices
+ put_unpack_buffer_aux_into_scope(buf_entry, code)
+ code.putln('}')
+
+ code.putln("}") # Release stack
+
+
+def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives,
+ pos, code, negative_indices, in_nogil_context):
+ """
+ Generates code to process indices and calculate an offset into
+ a buffer. Returns a C string which gives a pointer which can be
+ read from or written to at will (it is an expression so caller should
+ store it in a temporary if it is used more than once).
+
+ As the bounds checking can have any number of combinations of unsigned
+ arguments, smart optimizations etc. we insert it directly in the function
+ body. The lookup however is delegated to a inline function that is instantiated
+ once per ndim (lookup with suboffsets tend to get quite complicated).
+
+ entry is a BufferEntry
+ """
+ negative_indices = directives['wraparound'] and negative_indices
+
+ if directives['boundscheck']:
+ # Check bounds and fix negative indices.
+ # We allocate a temporary which is initialized to -1, meaning OK (!).
+ # If an error occurs, the temp is set to the index dimension the
+ # error is occurring at.
+ failed_dim_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = -1;" % failed_dim_temp)
+ for dim, (signed, cname, shape) in enumerate(zip(index_signeds, index_cnames, entry.get_buf_shapevars())):
+ if signed != 0:
+ # not unsigned, deal with negative index
+ code.putln("if (%s < 0) {" % cname)
+ if negative_indices:
+ code.putln("%s += %s;" % (cname, shape))
+ code.putln("if (%s) %s = %d;" % (
+ code.unlikely("%s < 0" % cname),
+ failed_dim_temp, dim))
+ else:
+ code.putln("%s = %d;" % (failed_dim_temp, dim))
+ code.put("} else ")
+ # check bounds in positive direction
+ if signed != 0:
+ cast = ""
+ else:
+ cast = "(size_t)"
+ code.putln("if (%s) %s = %d;" % (
+ code.unlikely("%s >= %s%s" % (cname, cast, shape)),
+ failed_dim_temp, dim))
+
+ if in_nogil_context:
+ code.globalstate.use_utility_code(raise_indexerror_nogil)
+ func = '__Pyx_RaiseBufferIndexErrorNogil'
+ else:
+ code.globalstate.use_utility_code(raise_indexerror_code)
+ func = '__Pyx_RaiseBufferIndexError'
+
+ code.putln("if (%s) {" % code.unlikely("%s != -1" % failed_dim_temp))
+ code.putln('%s(%s);' % (func, failed_dim_temp))
+ code.putln(code.error_goto(pos))
+ code.putln('}')
+ code.funcstate.release_temp(failed_dim_temp)
+ elif negative_indices:
+ # Only fix negative indices.
+ for signed, cname, shape in zip(index_signeds, index_cnames, entry.get_buf_shapevars()):
+ if signed != 0:
+ code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape))
+
+ return entry.generate_buffer_lookup_code(code, index_cnames)
+
+
+def use_bufstruct_declare_code(env):
+ env.use_utility_code(buffer_struct_declare_code)
+
+
+def buf_lookup_full_code(proto, defin, name, nd):
+ """
+ Generates a buffer lookup function for the right number
+ of dimensions. The function gives back a void* at the right location.
+ """
+ # _i_ndex, _s_tride, sub_o_ffset
+ macroargs = ", ".join(["i%d, s%d, o%d" % (i, i, i) for i in range(nd)])
+ proto.putln("#define %s(type, buf, %s) (type)(%s_imp(buf, %s))" % (name, macroargs, name, macroargs))
+
+ funcargs = ", ".join(["Py_ssize_t i%d, Py_ssize_t s%d, Py_ssize_t o%d" % (i, i, i) for i in range(nd)])
+ proto.putln("static CYTHON_INLINE void* %s_imp(void* buf, %s);" % (name, funcargs))
+ defin.putln(dedent("""
+ static CYTHON_INLINE void* %s_imp(void* buf, %s) {
+ char* ptr = (char*)buf;
+ """) % (name, funcargs) + "".join([dedent("""\
+ ptr += s%d * i%d;
+ if (o%d >= 0) ptr = *((char**)ptr) + o%d;
+ """) % (i, i, i, i) for i in range(nd)]
+ ) + "\nreturn ptr;\n}")
+
+
+def buf_lookup_strided_code(proto, defin, name, nd):
+ """
+ Generates a buffer lookup function for the right number
+ of dimensions. The function gives back a void* at the right location.
+ """
+ # _i_ndex, _s_tride
+ args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
+ offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd)])
+ proto.putln("#define %s(type, buf, %s) (type)((char*)buf + %s)" % (name, args, offset))
+
+
+def buf_lookup_c_code(proto, defin, name, nd):
+ """
+ Similar to strided lookup, but can assume that the last dimension
+ doesn't need a multiplication as long as.
+ Still we keep the same signature for now.
+ """
+ if nd == 1:
+ proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name)
+ else:
+ args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
+ offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd - 1)])
+ proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, nd - 1))
+
+
+def buf_lookup_fortran_code(proto, defin, name, nd):
+ """
+ Like C lookup, but the first index is optimized instead.
+ """
+ if nd == 1:
+ proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name)
+ else:
+ args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
+ offset = " + ".join(["i%d * s%d" % (i, i) for i in range(1, nd)])
+ proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, 0))
+
+
+def use_py2_buffer_functions(env):
+ env.use_utility_code(GetAndReleaseBufferUtilityCode())
+
+
+class GetAndReleaseBufferUtilityCode(object):
+ # Emulation of PyObject_GetBuffer and PyBuffer_Release for Python 2.
+ # For >= 2.6 we do double mode -- use the new buffer interface on objects
+ # which has the right tp_flags set, but emulation otherwise.
+
+ requires = None
+ is_cython_utility = False
+
+ def __init__(self):
+ pass
+
+ def __eq__(self, other):
+ return isinstance(other, GetAndReleaseBufferUtilityCode)
+
+ def __hash__(self):
+ return 24342342
+
+ def get_tree(self, **kwargs): pass
+
+ def put_code(self, output):
+ code = output['utility_code_def']
+ proto_code = output['utility_code_proto']
+ env = output.module_node.scope
+ cython_scope = env.context.cython_scope
+
+ # Search all types for __getbuffer__ overloads
+ types = []
+ visited_scopes = set()
+ def find_buffer_types(scope):
+ if scope in visited_scopes:
+ return
+ visited_scopes.add(scope)
+ for m in scope.cimported_modules:
+ find_buffer_types(m)
+ for e in scope.type_entries:
+ if isinstance(e.utility_code_definition, CythonUtilityCode):
+ continue
+ t = e.type
+ if t.is_extension_type:
+ if scope is cython_scope and not e.used:
+ continue
+ release = get = None
+ for x in t.scope.pyfunc_entries:
+ if x.name == u"__getbuffer__": get = x.func_cname
+ elif x.name == u"__releasebuffer__": release = x.func_cname
+ if get:
+ types.append((t.typeptr_cname, get, release))
+
+ find_buffer_types(env)
+
+ util_code = TempitaUtilityCode.load(
+ "GetAndReleaseBuffer", from_file="Buffer.c",
+ context=dict(types=types))
+
+ proto = util_code.format_code(util_code.proto)
+ impl = util_code.format_code(
+ util_code.inject_string_constants(util_code.impl, output)[1])
+
+ proto_code.putln(proto)
+ code.putln(impl)
+
+
+def mangle_dtype_name(dtype):
+ # Use prefixes to separate user defined types from builtins
+ # (consider "typedef float unsigned_int")
+ if dtype.is_pyobject:
+ return "object"
+ elif dtype.is_ptr:
+ return "ptr"
+ else:
+ if dtype.is_typedef or dtype.is_struct_or_union:
+ prefix = "nn_"
+ else:
+ prefix = ""
+ return prefix + dtype.specialization_name()
+
+def get_type_information_cname(code, dtype, maxdepth=None):
+ """
+ Output the run-time type information (__Pyx_TypeInfo) for given dtype,
+ and return the name of the type info struct.
+
+ Structs with two floats of the same size are encoded as complex numbers.
+ One can separate between complex numbers declared as struct or with native
+ encoding by inspecting to see if the fields field of the type is
+ filled in.
+ """
+ namesuffix = mangle_dtype_name(dtype)
+ name = "__Pyx_TypeInfo_%s" % namesuffix
+ structinfo_name = "__Pyx_StructFields_%s" % namesuffix
+
+ if dtype.is_error: return ""
+
+ # It's critical that walking the type info doesn't use more stack
+ # depth than dtype.struct_nesting_depth() returns, so use an assertion for this
+ if maxdepth is None: maxdepth = dtype.struct_nesting_depth()
+ if maxdepth <= 0:
+ assert False
+
+ if name not in code.globalstate.utility_codes:
+ code.globalstate.utility_codes.add(name)
+ typecode = code.globalstate['typeinfo']
+
+ arraysizes = []
+ if dtype.is_array:
+ while dtype.is_array:
+ arraysizes.append(dtype.size)
+ dtype = dtype.base_type
+
+ complex_possible = dtype.is_struct_or_union and dtype.can_be_complex()
+
+ declcode = dtype.empty_declaration_code()
+ if dtype.is_simple_buffer_dtype():
+ structinfo_name = "NULL"
+ elif dtype.is_struct:
+ struct_scope = dtype.scope
+ if dtype.is_cv_qualified:
+ struct_scope = struct_scope.base_type_scope
+ # Must pre-call all used types in order not to recurse during utility code writing.
+ fields = struct_scope.var_entries
+ assert len(fields) > 0
+ types = [get_type_information_cname(code, f.type, maxdepth - 1)
+ for f in fields]
+ typecode.putln("static __Pyx_StructField %s[] = {" % structinfo_name, safe=True)
+
+ if dtype.is_cv_qualified:
+ # roughly speaking, remove "const" from struct_type
+ struct_type = dtype.cv_base_type.empty_declaration_code()
+ else:
+ struct_type = dtype.empty_declaration_code()
+
+ for f, typeinfo in zip(fields, types):
+ typecode.putln(' {&%s, "%s", offsetof(%s, %s)},' %
+ (typeinfo, f.name, struct_type, f.cname), safe=True)
+
+ typecode.putln(' {NULL, NULL, 0}', safe=True)
+ typecode.putln("};", safe=True)
+ else:
+ assert False
+
+ rep = str(dtype)
+
+ flags = "0"
+ is_unsigned = "0"
+ if dtype is PyrexTypes.c_char_type:
+ is_unsigned = "__PYX_IS_UNSIGNED(%s)" % declcode
+ typegroup = "'H'"
+ elif dtype.is_int:
+ is_unsigned = "__PYX_IS_UNSIGNED(%s)" % declcode
+ typegroup = "%s ? 'U' : 'I'" % is_unsigned
+ elif complex_possible or dtype.is_complex:
+ typegroup = "'C'"
+ elif dtype.is_float:
+ typegroup = "'R'"
+ elif dtype.is_struct:
+ typegroup = "'S'"
+ if dtype.packed:
+ flags = "__PYX_BUF_FLAGS_PACKED_STRUCT"
+ elif dtype.is_pyobject:
+ typegroup = "'O'"
+ else:
+ assert False, dtype
+
+ typeinfo = ('static __Pyx_TypeInfo %s = '
+ '{ "%s", %s, sizeof(%s), { %s }, %s, %s, %s, %s };')
+ tup = (name, rep, structinfo_name, declcode,
+ ', '.join([str(x) for x in arraysizes]) or '0', len(arraysizes),
+ typegroup, is_unsigned, flags)
+ typecode.putln(typeinfo % tup, safe=True)
+
+ return name
+
+def load_buffer_utility(util_code_name, context=None, **kwargs):
+ if context is None:
+ return UtilityCode.load(util_code_name, "Buffer.c", **kwargs)
+ else:
+ return TempitaUtilityCode.load(util_code_name, "Buffer.c", context=context, **kwargs)
+
+context = dict(max_dims=Options.buffer_max_dims)
+buffer_struct_declare_code = load_buffer_utility("BufferStructDeclare", context=context)
+buffer_formats_declare_code = load_buffer_utility("BufferFormatStructs")
+
+# Utility function to set the right exception
+# The caller should immediately goto_error
+raise_indexerror_code = load_buffer_utility("BufferIndexError")
+raise_indexerror_nogil = load_buffer_utility("BufferIndexErrorNogil")
+raise_buffer_fallback_code = load_buffer_utility("BufferFallbackError")
+
+acquire_utility_code = load_buffer_utility("BufferGetAndValidate", context=context)
+buffer_format_check_code = load_buffer_utility("BufferFormatCheck", context=context)
+
+# See utility code BufferFormatFromTypeInfo
+_typeinfo_to_format_code = load_buffer_utility("TypeInfoToFormat")
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Builtin.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Builtin.py
new file mode 100644
index 0000000000000000000000000000000000000000..46dea9282bead154aef485374c71cce4dc8cb139
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Builtin.py
@@ -0,0 +1,644 @@
+#
+# Builtin Definitions
+#
+
+from __future__ import absolute_import
+
+from .StringEncoding import EncodedString
+from .Symtab import BuiltinScope, StructOrUnionScope, ModuleScope, Entry
+from .Code import UtilityCode, TempitaUtilityCode
+from .TypeSlots import Signature
+from . import PyrexTypes
+
+
+# C-level implementations of builtin types, functions and methods
+
+iter_next_utility_code = UtilityCode.load("IterNext", "ObjectHandling.c")
+getattr_utility_code = UtilityCode.load("GetAttr", "ObjectHandling.c")
+getattr3_utility_code = UtilityCode.load("GetAttr3", "Builtins.c")
+pyexec_utility_code = UtilityCode.load("PyExec", "Builtins.c")
+pyexec_globals_utility_code = UtilityCode.load("PyExecGlobals", "Builtins.c")
+globals_utility_code = UtilityCode.load("Globals", "Builtins.c")
+
+builtin_utility_code = {
+ 'StopAsyncIteration': UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"),
+}
+
+
+# mapping from builtins to their C-level equivalents
+
+class _BuiltinOverride(object):
+ def __init__(self, py_name, args, ret_type, cname, py_equiv="*",
+ utility_code=None, sig=None, func_type=None,
+ is_strict_signature=False, builtin_return_type=None,
+ nogil=None):
+ self.py_name, self.cname, self.py_equiv = py_name, cname, py_equiv
+ self.args, self.ret_type = args, ret_type
+ self.func_type, self.sig = func_type, sig
+ self.builtin_return_type = builtin_return_type
+ self.is_strict_signature = is_strict_signature
+ self.utility_code = utility_code
+ self.nogil = nogil
+
+ def build_func_type(self, sig=None, self_arg=None):
+ if sig is None:
+ sig = Signature(self.args, self.ret_type, nogil=self.nogil)
+ sig.exception_check = False # not needed for the current builtins
+ func_type = sig.function_type(self_arg)
+ if self.is_strict_signature:
+ func_type.is_strict_signature = True
+ if self.builtin_return_type:
+ func_type.return_type = builtin_types[self.builtin_return_type]
+ return func_type
+
+
+class BuiltinAttribute(object):
+ def __init__(self, py_name, cname=None, field_type=None, field_type_name=None):
+ self.py_name = py_name
+ self.cname = cname or py_name
+ self.field_type_name = field_type_name # can't do the lookup before the type is declared!
+ self.field_type = field_type
+
+ def declare_in_type(self, self_type):
+ if self.field_type_name is not None:
+ # lazy type lookup
+ field_type = builtin_scope.lookup(self.field_type_name).type
+ else:
+ field_type = self.field_type or PyrexTypes.py_object_type
+ entry = self_type.scope.declare(self.py_name, self.cname, field_type, None, 'private')
+ entry.is_variable = True
+
+
+class BuiltinFunction(_BuiltinOverride):
+ def declare_in_scope(self, scope):
+ func_type, sig = self.func_type, self.sig
+ if func_type is None:
+ func_type = self.build_func_type(sig)
+ scope.declare_builtin_cfunction(self.py_name, func_type, self.cname,
+ self.py_equiv, self.utility_code)
+
+
+class BuiltinMethod(_BuiltinOverride):
+ def declare_in_type(self, self_type):
+ method_type, sig = self.func_type, self.sig
+ if method_type is None:
+ # override 'self' type (first argument)
+ self_arg = PyrexTypes.CFuncTypeArg("", self_type, None)
+ self_arg.not_none = True
+ self_arg.accept_builtin_subtypes = True
+ method_type = self.build_func_type(sig, self_arg)
+ self_type.scope.declare_builtin_cfunction(
+ self.py_name, method_type, self.cname, utility_code=self.utility_code)
+
+
+class BuiltinProperty(object):
+ # read only for now
+ def __init__(self, py_name, property_type, call_cname,
+ exception_value=None, exception_check=None, utility_code=None):
+ self.py_name = py_name
+ self.property_type = property_type
+ self.call_cname = call_cname
+ self.utility_code = utility_code
+ self.exception_value = exception_value
+ self.exception_check = exception_check
+
+ def declare_in_type(self, self_type):
+ self_type.scope.declare_cproperty(
+ self.py_name,
+ self.property_type,
+ self.call_cname,
+ exception_value=self.exception_value,
+ exception_check=self.exception_check,
+ utility_code=self.utility_code
+ )
+
+
+builtin_function_table = [
+ # name, args, return, C API func, py equiv = "*"
+ BuiltinFunction('abs', "d", "d", "fabs",
+ is_strict_signature=True, nogil=True),
+ BuiltinFunction('abs', "f", "f", "fabsf",
+ is_strict_signature=True, nogil=True),
+ BuiltinFunction('abs', "i", "i", "abs",
+ is_strict_signature=True, nogil=True),
+ BuiltinFunction('abs', "l", "l", "labs",
+ is_strict_signature=True, nogil=True),
+ BuiltinFunction('abs', None, None, "__Pyx_abs_longlong",
+ utility_code = UtilityCode.load("abs_longlong", "Builtins.c"),
+ func_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_longlong_type, [
+ PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_longlong_type, None)
+ ],
+ is_strict_signature = True, nogil=True)),
+ ] + list(
+ BuiltinFunction('abs', None, None, "/*abs_{0}*/".format(t.specialization_name()),
+ func_type = PyrexTypes.CFuncType(
+ t,
+ [PyrexTypes.CFuncTypeArg("arg", t, None)],
+ is_strict_signature = True, nogil=True))
+ for t in (PyrexTypes.c_uint_type, PyrexTypes.c_ulong_type, PyrexTypes.c_ulonglong_type)
+ ) + list(
+ BuiltinFunction('abs', None, None, "__Pyx_c_abs{0}".format(t.funcsuffix),
+ func_type = PyrexTypes.CFuncType(
+ t.real_type, [
+ PyrexTypes.CFuncTypeArg("arg", t, None)
+ ],
+ is_strict_signature = True, nogil=True))
+ for t in (PyrexTypes.c_float_complex_type,
+ PyrexTypes.c_double_complex_type,
+ PyrexTypes.c_longdouble_complex_type)
+ ) + [
+ BuiltinFunction('abs', "O", "O", "__Pyx_PyNumber_Absolute",
+ utility_code=UtilityCode.load("py_abs", "Builtins.c")),
+ #('all', "", "", ""),
+ #('any', "", "", ""),
+ #('ascii', "", "", ""),
+ #('bin', "", "", ""),
+ BuiltinFunction('callable', "O", "b", "__Pyx_PyCallable_Check",
+ utility_code = UtilityCode.load("CallableCheck", "ObjectHandling.c")),
+ #('chr', "", "", ""),
+ #('cmp', "", "", "", ""), # int PyObject_Cmp(PyObject *o1, PyObject *o2, int *result)
+ #('compile', "", "", ""), # PyObject* Py_CompileString( char *str, char *filename, int start)
+ BuiltinFunction('delattr', "OO", "r", "PyObject_DelAttr"),
+ BuiltinFunction('dir', "O", "O", "PyObject_Dir"),
+ BuiltinFunction('divmod', "OO", "O", "PyNumber_Divmod"),
+ BuiltinFunction('exec', "O", "O", "__Pyx_PyExecGlobals",
+ utility_code = pyexec_globals_utility_code),
+ BuiltinFunction('exec', "OO", "O", "__Pyx_PyExec2",
+ utility_code = pyexec_utility_code),
+ BuiltinFunction('exec', "OOO", "O", "__Pyx_PyExec3",
+ utility_code = pyexec_utility_code),
+ #('eval', "", "", ""),
+ #('execfile', "", "", ""),
+ #('filter', "", "", ""),
+ BuiltinFunction('getattr3', "OOO", "O", "__Pyx_GetAttr3", "getattr",
+ utility_code=getattr3_utility_code), # Pyrex legacy
+ BuiltinFunction('getattr', "OOO", "O", "__Pyx_GetAttr3",
+ utility_code=getattr3_utility_code),
+ BuiltinFunction('getattr', "OO", "O", "__Pyx_GetAttr",
+ utility_code=getattr_utility_code),
+ BuiltinFunction('hasattr', "OO", "b", "__Pyx_HasAttr",
+ utility_code = UtilityCode.load("HasAttr", "Builtins.c")),
+ BuiltinFunction('hash', "O", "h", "PyObject_Hash"),
+ #('hex', "", "", ""),
+ #('id', "", "", ""),
+ #('input', "", "", ""),
+ BuiltinFunction('intern', "O", "O", "__Pyx_Intern",
+ utility_code = UtilityCode.load("Intern", "Builtins.c")),
+ BuiltinFunction('isinstance', "OO", "b", "PyObject_IsInstance"),
+ BuiltinFunction('issubclass', "OO", "b", "PyObject_IsSubclass"),
+ BuiltinFunction('iter', "OO", "O", "PyCallIter_New"),
+ BuiltinFunction('iter', "O", "O", "PyObject_GetIter"),
+ BuiltinFunction('len', "O", "z", "PyObject_Length"),
+ BuiltinFunction('locals', "", "O", "__pyx_locals"),
+ #('map', "", "", ""),
+ #('max', "", "", ""),
+ #('min', "", "", ""),
+ BuiltinFunction('next', "O", "O", "__Pyx_PyIter_Next",
+ utility_code = iter_next_utility_code), # not available in Py2 => implemented here
+ BuiltinFunction('next', "OO", "O", "__Pyx_PyIter_Next2",
+ utility_code = iter_next_utility_code), # not available in Py2 => implemented here
+ #('oct', "", "", ""),
+ #('open', "ss", "O", "PyFile_FromString"), # not in Py3
+] + [
+ BuiltinFunction('ord', None, None, "__Pyx_long_cast",
+ func_type=PyrexTypes.CFuncType(
+ PyrexTypes.c_long_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)],
+ is_strict_signature=True))
+ for c_type in [PyrexTypes.c_py_ucs4_type, PyrexTypes.c_py_unicode_type]
+] + [
+ BuiltinFunction('ord', None, None, "__Pyx_uchar_cast",
+ func_type=PyrexTypes.CFuncType(
+ PyrexTypes.c_uchar_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)],
+ is_strict_signature=True))
+ for c_type in [PyrexTypes.c_char_type, PyrexTypes.c_schar_type, PyrexTypes.c_uchar_type]
+] + [
+ BuiltinFunction('ord', None, None, "__Pyx_PyObject_Ord",
+ utility_code=UtilityCode.load_cached("object_ord", "Builtins.c"),
+ func_type=PyrexTypes.CFuncType(
+ PyrexTypes.c_long_type, [
+ PyrexTypes.CFuncTypeArg("c", PyrexTypes.py_object_type, None)
+ ],
+ exception_value="(long)(Py_UCS4)-1")),
+ BuiltinFunction('pow', "OOO", "O", "PyNumber_Power"),
+ BuiltinFunction('pow', "OO", "O", "__Pyx_PyNumber_Power2",
+ utility_code = UtilityCode.load("pow2", "Builtins.c")),
+ #('range', "", "", ""),
+ #('raw_input', "", "", ""),
+ #('reduce', "", "", ""),
+ BuiltinFunction('reload', "O", "O", "PyImport_ReloadModule"),
+ BuiltinFunction('repr', "O", "O", "PyObject_Repr"), # , builtin_return_type='str'), # add in Cython 3.1
+ #('round', "", "", ""),
+ BuiltinFunction('setattr', "OOO", "r", "PyObject_SetAttr"),
+ #('sum', "", "", ""),
+ #('sorted', "", "", ""),
+ #('type', "O", "O", "PyObject_Type"),
+ BuiltinFunction('unichr', "i", "O", "PyUnicode_FromOrdinal", builtin_return_type='unicode'),
+ #('unicode', "", "", ""),
+ #('vars', "", "", ""),
+ #('zip', "", "", ""),
+ # Can't do these easily until we have builtin type entries.
+ #('typecheck', "OO", "i", "PyObject_TypeCheck", False),
+ #('issubtype', "OO", "i", "PyType_IsSubtype", False),
+
+ # Put in namespace append optimization.
+ BuiltinFunction('__Pyx_PyObject_Append', "OO", "O", "__Pyx_PyObject_Append"),
+
+ # This is conditionally looked up based on a compiler directive.
+ BuiltinFunction('__Pyx_Globals', "", "O", "__Pyx_Globals",
+ utility_code=globals_utility_code),
+]
+
+
+# Builtin types
+# bool
+# buffer
+# classmethod
+# dict
+# enumerate
+# file
+# float
+# int
+# list
+# long
+# object
+# property
+# slice
+# staticmethod
+# super
+# str
+# tuple
+# type
+# xrange
+
+builtin_types_table = [
+
+ ("type", "PyType_Type", []),
+
+# This conflicts with the C++ bool type, and unfortunately
+# C++ is too liberal about PyObject* <-> bool conversions,
+# resulting in unintuitive runtime behavior and segfaults.
+# ("bool", "PyBool_Type", []),
+
+ ("int", "PyInt_Type", []),
+ ("long", "PyLong_Type", []),
+ ("float", "PyFloat_Type", []),
+
+ ("complex", "PyComplex_Type", [BuiltinAttribute('cval', field_type_name = 'Py_complex'),
+ BuiltinAttribute('real', 'cval.real', field_type = PyrexTypes.c_double_type),
+ BuiltinAttribute('imag', 'cval.imag', field_type = PyrexTypes.c_double_type),
+ ]),
+
+ ("basestring", "PyBaseString_Type", [
+ BuiltinMethod("join", "TO", "T", "__Pyx_PyBaseString_Join",
+ utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
+ BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply",
+ utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")),
+ ]),
+ ("bytearray", "PyByteArray_Type", [
+ BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply",
+ utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")),
+ ]),
+ ("bytes", "PyBytes_Type", [BuiltinMethod("join", "TO", "O", "__Pyx_PyBytes_Join",
+ utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
+ BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply",
+ utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")),
+ ]),
+ ("str", "PyString_Type", [BuiltinMethod("join", "TO", "O", "__Pyx_PyString_Join",
+ builtin_return_type='basestring',
+ utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
+ BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply",
+ utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")),
+ ]),
+ ("unicode", "PyUnicode_Type", [BuiltinMethod("__contains__", "TO", "b", "PyUnicode_Contains"),
+ BuiltinMethod("join", "TO", "T", "PyUnicode_Join"),
+ BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply",
+ utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")),
+ ]),
+
+ ("tuple", "PyTuple_Type", [BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply",
+ utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")),
+ ]),
+
+ ("list", "PyList_Type", [BuiltinMethod("insert", "TzO", "r", "PyList_Insert"),
+ BuiltinMethod("reverse", "T", "r", "PyList_Reverse"),
+ BuiltinMethod("append", "TO", "r", "__Pyx_PyList_Append",
+ utility_code=UtilityCode.load("ListAppend", "Optimize.c")),
+ BuiltinMethod("extend", "TO", "r", "__Pyx_PyList_Extend",
+ utility_code=UtilityCode.load("ListExtend", "Optimize.c")),
+ BuiltinMethod("__mul__", "Tz", "T", "__Pyx_PySequence_Multiply",
+ utility_code=UtilityCode.load("PySequenceMultiply", "ObjectHandling.c")),
+ ]),
+
+ ("dict", "PyDict_Type", [BuiltinMethod("__contains__", "TO", "b", "PyDict_Contains"),
+ BuiltinMethod("has_key", "TO", "b", "PyDict_Contains"),
+ BuiltinMethod("items", "T", "O", "__Pyx_PyDict_Items",
+ utility_code=UtilityCode.load("py_dict_items", "Builtins.c")),
+ BuiltinMethod("keys", "T", "O", "__Pyx_PyDict_Keys",
+ utility_code=UtilityCode.load("py_dict_keys", "Builtins.c")),
+ BuiltinMethod("values", "T", "O", "__Pyx_PyDict_Values",
+ utility_code=UtilityCode.load("py_dict_values", "Builtins.c")),
+ BuiltinMethod("iteritems", "T", "O", "__Pyx_PyDict_IterItems",
+ utility_code=UtilityCode.load("py_dict_iteritems", "Builtins.c")),
+ BuiltinMethod("iterkeys", "T", "O", "__Pyx_PyDict_IterKeys",
+ utility_code=UtilityCode.load("py_dict_iterkeys", "Builtins.c")),
+ BuiltinMethod("itervalues", "T", "O", "__Pyx_PyDict_IterValues",
+ utility_code=UtilityCode.load("py_dict_itervalues", "Builtins.c")),
+ BuiltinMethod("viewitems", "T", "O", "__Pyx_PyDict_ViewItems",
+ utility_code=UtilityCode.load("py_dict_viewitems", "Builtins.c")),
+ BuiltinMethod("viewkeys", "T", "O", "__Pyx_PyDict_ViewKeys",
+ utility_code=UtilityCode.load("py_dict_viewkeys", "Builtins.c")),
+ BuiltinMethod("viewvalues", "T", "O", "__Pyx_PyDict_ViewValues",
+ utility_code=UtilityCode.load("py_dict_viewvalues", "Builtins.c")),
+ BuiltinMethod("clear", "T", "r", "__Pyx_PyDict_Clear",
+ utility_code=UtilityCode.load("py_dict_clear", "Optimize.c")),
+ BuiltinMethod("copy", "T", "T", "PyDict_Copy")]),
+
+ ("slice", "PySlice_Type", [BuiltinAttribute('start'),
+ BuiltinAttribute('stop'),
+ BuiltinAttribute('step'),
+ ]),
+# ("file", "PyFile_Type", []), # not in Py3
+
+ ("set", "PySet_Type", [BuiltinMethod("clear", "T", "r", "PySet_Clear"),
+ # discard() and remove() have a special treatment for unhashable values
+ BuiltinMethod("discard", "TO", "r", "__Pyx_PySet_Discard",
+ utility_code=UtilityCode.load("py_set_discard", "Optimize.c")),
+ BuiltinMethod("remove", "TO", "r", "__Pyx_PySet_Remove",
+ utility_code=UtilityCode.load("py_set_remove", "Optimize.c")),
+ # update is actually variadic (see Github issue #1645)
+# BuiltinMethod("update", "TO", "r", "__Pyx_PySet_Update",
+# utility_code=UtilityCode.load_cached("PySet_Update", "Builtins.c")),
+ BuiltinMethod("add", "TO", "r", "PySet_Add"),
+ BuiltinMethod("pop", "T", "O", "PySet_Pop")]),
+ ("frozenset", "PyFrozenSet_Type", []),
+ ("Exception", "((PyTypeObject*)PyExc_Exception)[0]", []),
+ ("StopAsyncIteration", "((PyTypeObject*)__Pyx_PyExc_StopAsyncIteration)[0]", []),
+ ("memoryview", "PyMemoryView_Type", [
+ # TODO - format would be nice, but hard to get
+ # __len__ can be accessed through a direct lookup of the buffer (but probably in Optimize.c)
+ # error checking would ideally be limited api only
+ BuiltinProperty("ndim", PyrexTypes.c_int_type, '__Pyx_PyMemoryView_Get_ndim',
+ exception_value="-1", exception_check=True,
+ utility_code=TempitaUtilityCode.load_cached(
+ "memoryview_get_from_buffer", "Builtins.c",
+ context=dict(name="ndim")
+ )
+ ),
+ BuiltinProperty("readonly", PyrexTypes.c_bint_type, '__Pyx_PyMemoryView_Get_readonly',
+ exception_value="-1", exception_check=True,
+ utility_code=TempitaUtilityCode.load_cached(
+ "memoryview_get_from_buffer", "Builtins.c",
+ context=dict(name="readonly")
+ )
+ ),
+ BuiltinProperty("itemsize", PyrexTypes.c_py_ssize_t_type, '__Pyx_PyMemoryView_Get_itemsize',
+ exception_value="-1", exception_check=True,
+ utility_code=TempitaUtilityCode.load_cached(
+ "memoryview_get_from_buffer", "Builtins.c",
+ context=dict(name="itemsize")
+ )
+ )]
+ )
+]
+
+
+types_that_construct_their_instance = frozenset({
+ # some builtin types do not always return an instance of
+ # themselves - these do:
+ 'type', 'bool', 'long', 'float', 'complex',
+ 'bytes', 'unicode', 'bytearray',
+ 'tuple', 'list', 'dict', 'set', 'frozenset',
+ # 'str', # only in Py3.x
+ # 'file', # only in Py2.x
+ 'memoryview'
+})
+
+
+builtin_structs_table = [
+ ('Py_buffer', 'Py_buffer',
+ [("buf", PyrexTypes.c_void_ptr_type),
+ ("obj", PyrexTypes.py_object_type),
+ ("len", PyrexTypes.c_py_ssize_t_type),
+ ("itemsize", PyrexTypes.c_py_ssize_t_type),
+ ("readonly", PyrexTypes.c_bint_type),
+ ("ndim", PyrexTypes.c_int_type),
+ ("format", PyrexTypes.c_char_ptr_type),
+ ("shape", PyrexTypes.c_py_ssize_t_ptr_type),
+ ("strides", PyrexTypes.c_py_ssize_t_ptr_type),
+ ("suboffsets", PyrexTypes.c_py_ssize_t_ptr_type),
+ ("smalltable", PyrexTypes.CArrayType(PyrexTypes.c_py_ssize_t_type, 2)),
+ ("internal", PyrexTypes.c_void_ptr_type),
+ ]),
+ ('Py_complex', 'Py_complex',
+ [('real', PyrexTypes.c_double_type),
+ ('imag', PyrexTypes.c_double_type),
+ ])
+]
+
+# set up builtin scope
+
+builtin_scope = BuiltinScope()
+
+def init_builtin_funcs():
+ for bf in builtin_function_table:
+ bf.declare_in_scope(builtin_scope)
+
+builtin_types = {}
+
+def init_builtin_types():
+ global builtin_types
+ for name, cname, methods in builtin_types_table:
+ utility = builtin_utility_code.get(name)
+ if name == 'frozenset':
+ objstruct_cname = 'PySetObject'
+ elif name == 'bytearray':
+ objstruct_cname = 'PyByteArrayObject'
+ elif name == 'bool':
+ objstruct_cname = None
+ elif name == 'Exception':
+ objstruct_cname = "PyBaseExceptionObject"
+ elif name == 'StopAsyncIteration':
+ objstruct_cname = "PyBaseExceptionObject"
+ else:
+ objstruct_cname = 'Py%sObject' % name.capitalize()
+ type_class = PyrexTypes.BuiltinObjectType
+ if name in ['dict', 'list', 'set', 'frozenset']:
+ type_class = PyrexTypes.BuiltinTypeConstructorObjectType
+ elif name == 'tuple':
+ type_class = PyrexTypes.PythonTupleTypeConstructor
+ the_type = builtin_scope.declare_builtin_type(name, cname, utility, objstruct_cname,
+ type_class=type_class)
+ builtin_types[name] = the_type
+ for method in methods:
+ method.declare_in_type(the_type)
+
+def init_builtin_structs():
+ for name, cname, attribute_types in builtin_structs_table:
+ scope = StructOrUnionScope(name)
+ for attribute_name, attribute_type in attribute_types:
+ scope.declare_var(attribute_name, attribute_type, None,
+ attribute_name, allow_pyobject=True)
+ builtin_scope.declare_struct_or_union(
+ name, "struct", scope, 1, None, cname = cname)
+
+
+def init_builtins():
+ #Errors.init_thread() # hopefully not needed - we should not emit warnings ourselves
+ init_builtin_structs()
+ init_builtin_types()
+ init_builtin_funcs()
+
+ entry = builtin_scope.declare_var(
+ '__debug__', PyrexTypes.c_const_type(PyrexTypes.c_bint_type),
+ pos=None, cname='__pyx_assertions_enabled()', is_cdef=True)
+ entry.utility_code = UtilityCode.load_cached("AssertionsEnabled", "Exceptions.c")
+
+ global type_type, list_type, tuple_type, dict_type, set_type, frozenset_type, slice_type
+ global bytes_type, str_type, unicode_type, basestring_type, bytearray_type
+ global float_type, int_type, long_type, bool_type, complex_type
+ global memoryview_type, py_buffer_type
+ global sequence_types
+ type_type = builtin_scope.lookup('type').type
+ list_type = builtin_scope.lookup('list').type
+ tuple_type = builtin_scope.lookup('tuple').type
+ dict_type = builtin_scope.lookup('dict').type
+ set_type = builtin_scope.lookup('set').type
+ frozenset_type = builtin_scope.lookup('frozenset').type
+ slice_type = builtin_scope.lookup('slice').type
+
+ bytes_type = builtin_scope.lookup('bytes').type
+ str_type = builtin_scope.lookup('str').type
+ unicode_type = builtin_scope.lookup('unicode').type
+ basestring_type = builtin_scope.lookup('basestring').type
+ bytearray_type = builtin_scope.lookup('bytearray').type
+ memoryview_type = builtin_scope.lookup('memoryview').type
+
+ float_type = builtin_scope.lookup('float').type
+ int_type = builtin_scope.lookup('int').type
+ long_type = builtin_scope.lookup('long').type
+ bool_type = builtin_scope.lookup('bool').type
+ complex_type = builtin_scope.lookup('complex').type
+
+ sequence_types = (
+ list_type,
+ tuple_type,
+ bytes_type,
+ str_type,
+ unicode_type,
+ basestring_type,
+ bytearray_type,
+ memoryview_type,
+ )
+
+ # Set up type inference links between equivalent Python/C types
+ bool_type.equivalent_type = PyrexTypes.c_bint_type
+ PyrexTypes.c_bint_type.equivalent_type = bool_type
+
+ float_type.equivalent_type = PyrexTypes.c_double_type
+ PyrexTypes.c_double_type.equivalent_type = float_type
+
+ complex_type.equivalent_type = PyrexTypes.c_double_complex_type
+ PyrexTypes.c_double_complex_type.equivalent_type = complex_type
+
+ py_buffer_type = builtin_scope.lookup('Py_buffer').type
+
+
+init_builtins()
+
+##############################
+# Support for a few standard library modules that Cython understands (currently typing and dataclasses)
+##############################
+_known_module_scopes = {}
+
+def get_known_standard_library_module_scope(module_name):
+ mod = _known_module_scopes.get(module_name)
+ if mod:
+ return mod
+
+ if module_name == "typing":
+ mod = ModuleScope(module_name, None, None)
+ for name, tp in [
+ ('Dict', dict_type),
+ ('List', list_type),
+ ('Tuple', tuple_type),
+ ('Set', set_type),
+ ('FrozenSet', frozenset_type),
+ ]:
+ name = EncodedString(name)
+ entry = mod.declare_type(name, tp, pos = None)
+ var_entry = Entry(name, None, PyrexTypes.py_object_type)
+ var_entry.is_pyglobal = True
+ var_entry.is_variable = True
+ var_entry.scope = mod
+ entry.as_variable = var_entry
+ entry.known_standard_library_import = "%s.%s" % (module_name, name)
+
+ for name in ['ClassVar', 'Optional']:
+ name = EncodedString(name)
+ indexed_type = PyrexTypes.SpecialPythonTypeConstructor(EncodedString("typing."+name))
+ entry = mod.declare_type(name, indexed_type, pos = None)
+ var_entry = Entry(name, None, PyrexTypes.py_object_type)
+ var_entry.is_pyglobal = True
+ var_entry.is_variable = True
+ var_entry.scope = mod
+ entry.as_variable = var_entry
+ entry.known_standard_library_import = "%s.%s" % (module_name, name)
+ _known_module_scopes[module_name] = mod
+ elif module_name == "dataclasses":
+ mod = ModuleScope(module_name, None, None)
+ indexed_type = PyrexTypes.SpecialPythonTypeConstructor(EncodedString("dataclasses.InitVar"))
+ initvar_string = EncodedString("InitVar")
+ entry = mod.declare_type(initvar_string, indexed_type, pos = None)
+ var_entry = Entry(initvar_string, None, PyrexTypes.py_object_type)
+ var_entry.is_pyglobal = True
+ var_entry.scope = mod
+ entry.as_variable = var_entry
+ entry.known_standard_library_import = "%s.InitVar" % module_name
+ for name in ["dataclass", "field"]:
+ mod.declare_var(EncodedString(name), PyrexTypes.py_object_type, pos=None)
+ _known_module_scopes[module_name] = mod
+ elif module_name == "functools":
+ mod = ModuleScope(module_name, None, None)
+ for name in ["total_ordering"]:
+ mod.declare_var(EncodedString(name), PyrexTypes.py_object_type, pos=None)
+ _known_module_scopes[module_name] = mod
+
+ return mod
+
+
+def get_known_standard_library_entry(qualified_name):
+ name_parts = qualified_name.split(".")
+ module_name = EncodedString(name_parts[0])
+ rest = name_parts[1:]
+
+ if len(rest) > 1: # for now, we don't know how to deal with any nested modules
+ return None
+
+ mod = get_known_standard_library_module_scope(module_name)
+
+ # eventually handle more sophisticated multiple lookups if needed
+ if mod and rest:
+ return mod.lookup_here(rest[0])
+ return None
+
+
+def exprnode_to_known_standard_library_name(node, env):
+ qualified_name_parts = []
+ known_name = None
+ while node.is_attribute:
+ qualified_name_parts.append(node.attribute)
+ node = node.obj
+ if node.is_name:
+ entry = env.lookup(node.name)
+ if entry and entry.known_standard_library_import:
+ if get_known_standard_library_entry(
+ entry.known_standard_library_import):
+ known_name = entry.known_standard_library_import
+ else:
+ standard_env = get_known_standard_library_module_scope(
+ entry.known_standard_library_import)
+ if standard_env:
+ qualified_name_parts.append(standard_env.name)
+ known_name = ".".join(reversed(qualified_name_parts))
+ return known_name
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CmdLine.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CmdLine.py
new file mode 100644
index 0000000000000000000000000000000000000000..776636c3234fb04cbd2d3a50ae092de9e2c900ec
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CmdLine.py
@@ -0,0 +1,251 @@
+#
+# Cython - Command Line Parsing
+#
+
+from __future__ import absolute_import
+
+import sys
+import os
+from argparse import ArgumentParser, Action, SUPPRESS
+from . import Options
+
+
+if sys.version_info < (3, 3):
+ # TODO: This workaround can be removed in Cython 3.1
+ FileNotFoundError = IOError
+
+
+class ParseDirectivesAction(Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ old_directives = dict(getattr(namespace, self.dest,
+ Options.get_directive_defaults()))
+ directives = Options.parse_directive_list(
+ values, relaxed_bool=True, current_settings=old_directives)
+ setattr(namespace, self.dest, directives)
+
+
+class ParseOptionsAction(Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ options = dict(getattr(namespace, self.dest, {}))
+ for opt in values.split(','):
+ if '=' in opt:
+ n, v = opt.split('=', 1)
+ v = v.lower() not in ('false', 'f', '0', 'no')
+ else:
+ n, v = opt, True
+ options[n] = v
+ setattr(namespace, self.dest, options)
+
+
+class ParseCompileTimeEnvAction(Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ old_env = dict(getattr(namespace, self.dest, {}))
+ new_env = Options.parse_compile_time_env(values, current_settings=old_env)
+ setattr(namespace, self.dest, new_env)
+
+
+class ActivateAllWarningsAction(Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ directives = getattr(namespace, 'compiler_directives', {})
+ directives.update(Options.extra_warnings)
+ namespace.compiler_directives = directives
+
+
+class SetLenientAction(Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ namespace.error_on_unknown_names = False
+ namespace.error_on_uninitialized = False
+
+
+class SetGDBDebugAction(Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ namespace.gdb_debug = True
+ namespace.output_dir = os.curdir
+
+
+class SetGDBDebugOutputAction(Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ namespace.gdb_debug = True
+ namespace.output_dir = values
+
+
+class SetAnnotateCoverageAction(Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ namespace.annotate = True
+ namespace.annotate_coverage_xml = values
+
+
+def create_cython_argparser():
+ description = "Cython (https://cython.org/) is a compiler for code written in the "\
+ "Cython language. Cython is based on Pyrex by Greg Ewing."
+
+ parser = ArgumentParser(description=description, argument_default=SUPPRESS)
+
+ parser.add_argument("-V", "--version", dest='show_version', action='store_const', const=1,
+ help='Display version number of cython compiler')
+ parser.add_argument("-l", "--create-listing", dest='use_listing_file', action='store_const', const=1,
+ help='Write error messages to a listing file')
+ parser.add_argument("-I", "--include-dir", dest='include_path', action='append',
+ help='Search for include files in named directory '
+ '(multiple include directories are allowed).')
+ parser.add_argument("-o", "--output-file", dest='output_file', action='store', type=str,
+ help='Specify name of generated C file')
+ parser.add_argument("-t", "--timestamps", dest='timestamps', action='store_const', const=1,
+ help='Only compile newer source files')
+ parser.add_argument("-f", "--force", dest='timestamps', action='store_const', const=0,
+ help='Compile all source files (overrides implied -t)')
+ parser.add_argument("-v", "--verbose", dest='verbose', action='count',
+ help='Be verbose, print file names on multiple compilation')
+ parser.add_argument("-p", "--embed-positions", dest='embed_pos_in_docstring', action='store_const', const=1,
+ help='If specified, the positions in Cython files of each '
+ 'function definition is embedded in its docstring.')
+ parser.add_argument("--cleanup", dest='generate_cleanup_code', action='store', type=int,
+ help='Release interned objects on python exit, for memory debugging. '
+ 'Level indicates aggressiveness, default 0 releases nothing.')
+ parser.add_argument("-w", "--working", dest='working_path', action='store', type=str,
+ help='Sets the working directory for Cython (the directory modules are searched from)')
+ parser.add_argument("--gdb", action=SetGDBDebugAction, nargs=0,
+ help='Output debug information for cygdb')
+ parser.add_argument("--gdb-outdir", action=SetGDBDebugOutputAction, type=str,
+ help='Specify gdb debug information output directory. Implies --gdb.')
+ parser.add_argument("-D", "--no-docstrings", dest='docstrings', action='store_false',
+ help='Strip docstrings from the compiled module.')
+ parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate',
+ help='Produce a colorized HTML version of the source.')
+ parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate',
+ help='Produce a colorized HTML version of the source '
+ 'which includes entire generated C/C++-code.')
+ parser.add_argument("--annotate-coverage", dest='annotate_coverage_xml', action=SetAnnotateCoverageAction, type=str,
+ help='Annotate and include coverage information from cov.xml.')
+ parser.add_argument("--line-directives", dest='emit_linenums', action='store_true',
+ help='Produce #line directives pointing to the .pyx source')
+ parser.add_argument("-+", "--cplus", dest='cplus', action='store_const', const=1,
+ help='Output a C++ rather than C file.')
+ parser.add_argument('--embed', action='store_const', const='main',
+ help='Generate a main() function that embeds the Python interpreter. '
+ 'Pass --embed= for a name other than main().')
+ parser.add_argument('-2', dest='language_level', action='store_const', const=2,
+ help='Compile based on Python-2 syntax and code semantics.')
+ parser.add_argument('-3', dest='language_level', action='store_const', const=3,
+ help='Compile based on Python-3 syntax and code semantics.')
+ parser.add_argument('--3str', dest='language_level', action='store_const', const='3str',
+ help='Compile based on Python-3 syntax and code semantics without '
+ 'assuming unicode by default for string literals under Python 2.')
+ parser.add_argument("--lenient", action=SetLenientAction, nargs=0,
+ help='Change some compile time errors to runtime errors to '
+ 'improve Python compatibility')
+ parser.add_argument("--capi-reexport-cincludes", dest='capi_reexport_cincludes', action='store_true',
+ help='Add cincluded headers to any auto-generated header files.')
+ parser.add_argument("--fast-fail", dest='fast_fail', action='store_true',
+ help='Abort the compilation on the first error')
+ parser.add_argument("-Werror", "--warning-errors", dest='warning_errors', action='store_true',
+ help='Make all warnings into errors')
+ parser.add_argument("-Wextra", "--warning-extra", action=ActivateAllWarningsAction, nargs=0,
+ help='Enable extra warnings')
+
+ parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',
+ dest='compiler_directives', type=str,
+ action=ParseDirectivesAction,
+ help='Overrides a compiler directive')
+ parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...',
+ dest='compile_time_env', type=str,
+ action=ParseCompileTimeEnvAction,
+ help='Provides compile time env like DEF would do.')
+ parser.add_argument("--module-name",
+ dest='module_name', type=str, action='store',
+ help='Fully qualified module name. If not given, is '
+ 'deduced from the import path if source file is in '
+ 'a package, or equals the filename otherwise.')
+ parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources')
+ parser.add_argument('sources', nargs='*', default=[])
+
+ # TODO: add help
+ parser.add_argument("-z", "--pre-import", dest='pre_import', action='store', type=str, help=SUPPRESS)
+ parser.add_argument("--convert-range", dest='convert_range', action='store_true', help=SUPPRESS)
+ parser.add_argument("--no-c-in-traceback", dest='c_line_in_traceback', action='store_false', help=SUPPRESS)
+ parser.add_argument("--cimport-from-pyx", dest='cimport_from_pyx', action='store_true', help=SUPPRESS)
+ parser.add_argument("--old-style-globals", dest='old_style_globals', action='store_true', help=SUPPRESS)
+
+ # debug stuff:
+ from . import DebugFlags
+ for name in vars(DebugFlags):
+ if name.startswith("debug"):
+ option_name = name.replace('_', '-')
+ parser.add_argument("--" + option_name, action='store_true', help=SUPPRESS)
+
+ return parser
+
+
+def parse_command_line_raw(parser, args):
+ # special handling for --embed and --embed=xxxx as they aren't correctly parsed
+ def filter_out_embed_options(args):
+ with_embed, without_embed = [], []
+ for x in args:
+ if x == '--embed' or x.startswith('--embed='):
+ with_embed.append(x)
+ else:
+ without_embed.append(x)
+ return with_embed, without_embed
+
+ with_embed, args_without_embed = filter_out_embed_options(args)
+
+ arguments, unknown = parser.parse_known_args(args_without_embed)
+
+ sources = arguments.sources
+ del arguments.sources
+
+ # unknown can be either debug, embed or input files or really unknown
+ for option in unknown:
+ if option.startswith('-'):
+ parser.error("unknown option " + option)
+ else:
+ sources.append(option)
+
+ # embed-stuff must be handled extra:
+ for x in with_embed:
+ if x == '--embed':
+ name = 'main' # default value
+ else:
+ name = x[len('--embed='):]
+ setattr(arguments, 'embed', name)
+
+ return arguments, sources
+
+
+def parse_command_line(args):
+ parser = create_cython_argparser()
+ arguments, sources = parse_command_line_raw(parser, args)
+
+ work_dir = getattr(arguments, 'working_path', '')
+ for source in sources:
+ if work_dir and not os.path.isabs(source):
+ source = os.path.join(work_dir, source)
+ if not os.path.exists(source):
+ import errno
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), source)
+
+ options = Options.CompilationOptions(Options.default_options)
+ for name, value in vars(arguments).items():
+ if name.startswith('debug'):
+ from . import DebugFlags
+ if name in dir(DebugFlags):
+ setattr(DebugFlags, name, value)
+ else:
+ parser.error("Unknown debug flag: %s\n" % name)
+ elif hasattr(Options, name):
+ setattr(Options, name, value)
+ else:
+ setattr(options, name, value)
+
+ if options.use_listing_file and len(sources) > 1:
+ parser.error("cython: Only one source file allowed when using -o\n")
+ if len(sources) == 0 and not options.show_version:
+ parser.error("cython: Need at least one source file\n")
+ if Options.embed and len(sources) > 1:
+ parser.error("cython: Only one source file allowed when using --embed\n")
+ if options.module_name:
+ if options.timestamps:
+ parser.error("cython: Cannot use --module-name with --timestamps\n")
+ if len(sources) > 1:
+ parser.error("cython: Only one source file allowed when using --module-name\n")
+ return options, sources
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..2e86cbf140c7935e9e8d7e6560328edb1c271b8e
Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.cp39-win_amd64.pyd differ
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.pxd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.pxd
new file mode 100644
index 0000000000000000000000000000000000000000..e915c6fea3534b4fed1e4df99093f4c3a4721082
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.pxd
@@ -0,0 +1,131 @@
+# cython: language_level=3
+
+cimport cython
+from ..StringIOTree cimport StringIOTree
+
+
+cdef class UtilityCodeBase(object):
+ cpdef format_code(self, code_string, replace_empty_lines=*)
+
+
+cdef class UtilityCode(UtilityCodeBase):
+ cdef public object name
+ cdef public object proto
+ cdef public object impl
+ cdef public object init
+ cdef public object cleanup
+ cdef public object proto_block
+ cdef public object requires
+ cdef public dict _cache
+ cdef public list specialize_list
+ cdef public object file
+
+ cpdef none_or_sub(self, s, context)
+
+
+cdef class FunctionState:
+ cdef public set names_taken
+ cdef public object owner
+ cdef public object scope
+
+ cdef public object error_label
+ cdef public size_t label_counter
+ cdef public set labels_used
+ cdef public object return_label
+ cdef public object continue_label
+ cdef public object break_label
+ cdef public list yield_labels
+
+ cdef public object return_from_error_cleanup_label # not used in __init__ ?
+
+ cdef public object exc_vars
+ cdef public object current_except
+ cdef public bint in_try_finally
+ cdef public bint can_trace
+ cdef public bint gil_owned
+
+ cdef public list temps_allocated
+ cdef public dict temps_free
+ cdef public dict temps_used_type
+ cdef public set zombie_temps
+ cdef public size_t temp_counter
+ cdef public list collect_temps_stack
+
+ cdef public object closure_temps
+ cdef public bint should_declare_error_indicator
+ cdef public bint uses_error_indicator
+ cdef public bint error_without_exception
+
+ cdef public bint needs_refnanny
+
+ @cython.locals(n=size_t)
+ cpdef new_label(self, name=*)
+ cpdef tuple get_loop_labels(self)
+ cpdef set_loop_labels(self, labels)
+ cpdef tuple get_all_labels(self)
+ cpdef set_all_labels(self, labels)
+ cpdef start_collecting_temps(self)
+ cpdef stop_collecting_temps(self)
+
+ cpdef list temps_in_use(self)
+
+cdef class IntConst:
+ cdef public object cname
+ cdef public object value
+ cdef public bint is_long
+
+cdef class PyObjectConst:
+ cdef public object cname
+ cdef public object type
+
+cdef class StringConst:
+ cdef public object cname
+ cdef public object text
+ cdef public object escaped_value
+ cdef public dict py_strings
+ cdef public list py_versions
+
+ @cython.locals(intern=bint, is_str=bint, is_unicode=bint)
+ cpdef get_py_string_const(self, encoding, identifier=*, is_str=*, py3str_cstring=*)
+
+## cdef class PyStringConst:
+## cdef public object cname
+## cdef public object encoding
+## cdef public bint is_str
+## cdef public bint is_unicode
+## cdef public bint intern
+
+#class GlobalState(object):
+
+#def funccontext_property(name):
+
+cdef class CCodeWriter(object):
+ cdef readonly StringIOTree buffer
+ cdef readonly list pyclass_stack
+ cdef readonly object globalstate
+ cdef readonly object funcstate
+ cdef object code_config
+ cdef object last_pos
+ cdef object last_marked_pos
+ cdef Py_ssize_t level
+ cdef public Py_ssize_t call_level # debug-only, see Nodes.py
+ cdef bint bol
+
+ cpdef write(self, s)
+ @cython.final
+ cdef _write_lines(self, s)
+ cpdef _write_to_buffer(self, s)
+ cpdef put(self, code)
+ cpdef put_safe(self, code)
+ cpdef putln(self, code=*, bint safe=*)
+ @cython.final
+ cdef increase_indent(self)
+ @cython.final
+ cdef decrease_indent(self)
+ @cython.final
+ cdef indent(self)
+
+
+cdef class PyrexCodeWriter:
+ cdef public object f
+ cdef public Py_ssize_t level
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bc39ef7d8bbca7253a50bded9920b5ff1146a96
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Code.py
@@ -0,0 +1,2725 @@
+# cython: language_level=3str
+# cython: auto_pickle=False
+#
+# Code output module
+#
+
+from __future__ import absolute_import
+
+import cython
+cython.declare(os=object, re=object, operator=object, textwrap=object,
+ Template=object, Naming=object, Options=object, StringEncoding=object,
+ Utils=object, SourceDescriptor=object, StringIOTree=object,
+ DebugFlags=object, basestring=object, defaultdict=object,
+ closing=object, partial=object)
+
+import hashlib
+import operator
+import os
+import re
+import shutil
+import textwrap
+from string import Template
+from functools import partial
+from contextlib import closing, contextmanager
+from collections import defaultdict
+
+from . import Naming
+from . import Options
+from . import DebugFlags
+from . import StringEncoding
+from .. import Utils
+from .Scanning import SourceDescriptor
+from ..StringIOTree import StringIOTree
+
+try:
+ from __builtin__ import basestring
+except ImportError:
+ from builtins import str as basestring
+
+
+non_portable_builtins_map = {
+ # builtins that have different names in different Python versions
+ 'bytes' : ('PY_MAJOR_VERSION < 3', 'str'),
+ 'unicode' : ('PY_MAJOR_VERSION >= 3', 'str'),
+ 'basestring' : ('PY_MAJOR_VERSION >= 3', 'str'),
+ 'xrange' : ('PY_MAJOR_VERSION >= 3', 'range'),
+ 'raw_input' : ('PY_MAJOR_VERSION >= 3', 'input'),
+}
+
+ctypedef_builtins_map = {
+ # types of builtins in "ctypedef class" statements which we don't
+ # import either because the names conflict with C types or because
+ # the type simply is not exposed.
+ 'py_int' : '&PyInt_Type',
+ 'py_long' : '&PyLong_Type',
+ 'py_float' : '&PyFloat_Type',
+ 'wrapper_descriptor' : '&PyWrapperDescr_Type',
+}
+
+basicsize_builtins_map = {
+ # builtins whose type has a different tp_basicsize than sizeof(...)
+ 'PyTypeObject': 'PyHeapTypeObject',
+}
+
+uncachable_builtins = [
+ # Global/builtin names that cannot be cached because they may or may not
+ # be available at import time, for various reasons:
+ ## - Py3.7+
+ 'breakpoint', # might deserve an implementation in Cython
+ ## - Py3.4+
+ '__loader__',
+ '__spec__',
+ ## - Py3+
+ 'BlockingIOError',
+ 'BrokenPipeError',
+ 'ChildProcessError',
+ 'ConnectionAbortedError',
+ 'ConnectionError',
+ 'ConnectionRefusedError',
+ 'ConnectionResetError',
+ 'FileExistsError',
+ 'FileNotFoundError',
+ 'InterruptedError',
+ 'IsADirectoryError',
+ 'ModuleNotFoundError',
+ 'NotADirectoryError',
+ 'PermissionError',
+ 'ProcessLookupError',
+ 'RecursionError',
+ 'ResourceWarning',
+ #'StopAsyncIteration', # backported
+ 'TimeoutError',
+ '__build_class__',
+ 'ascii', # might deserve an implementation in Cython
+ #'exec', # implemented in Cython
+ ## - platform specific
+ 'WindowsError',
+ ## - others
+ '_', # e.g. used by gettext
+]
+
+special_py_methods = cython.declare(frozenset, frozenset((
+ '__cinit__', '__dealloc__', '__richcmp__', '__next__',
+ '__await__', '__aiter__', '__anext__',
+ '__getreadbuffer__', '__getwritebuffer__', '__getsegcount__',
+ '__getcharbuffer__', '__getbuffer__', '__releasebuffer__',
+)))
+
+modifier_output_mapper = {
+ 'inline': 'CYTHON_INLINE'
+}.get
+
+
+class IncludeCode(object):
+ """
+ An include file and/or verbatim C code to be included in the
+ generated sources.
+ """
+ # attributes:
+ #
+ # pieces {order: unicode}: pieces of C code to be generated.
+ # For the included file, the key "order" is zero.
+ # For verbatim include code, the "order" is the "order"
+ # attribute of the original IncludeCode where this piece
+ # of C code was first added. This is needed to prevent
+ # duplication if the same include code is found through
+ # multiple cimports.
+ # location int: where to put this include in the C sources, one
+ # of the constants INITIAL, EARLY, LATE
+ # order int: sorting order (automatically set by increasing counter)
+
+ # Constants for location. If the same include occurs with different
+ # locations, the earliest one takes precedense.
+ INITIAL = 0
+ EARLY = 1
+ LATE = 2
+
+ counter = 1 # Counter for "order"
+
+ def __init__(self, include=None, verbatim=None, late=True, initial=False):
+ self.order = self.counter
+ type(self).counter += 1
+ self.pieces = {}
+
+ if include:
+ if include[0] == '<' and include[-1] == '>':
+ self.pieces[0] = u'#include {0}'.format(include)
+ late = False # system include is never late
+ else:
+ self.pieces[0] = u'#include "{0}"'.format(include)
+
+ if verbatim:
+ self.pieces[self.order] = verbatim
+
+ if initial:
+ self.location = self.INITIAL
+ elif late:
+ self.location = self.LATE
+ else:
+ self.location = self.EARLY
+
+ def dict_update(self, d, key):
+ """
+ Insert `self` in dict `d` with key `key`. If that key already
+ exists, update the attributes of the existing value with `self`.
+ """
+ if key in d:
+ other = d[key]
+ other.location = min(self.location, other.location)
+ other.pieces.update(self.pieces)
+ else:
+ d[key] = self
+
+ def sortkey(self):
+ return self.order
+
+ def mainpiece(self):
+ """
+ Return the main piece of C code, corresponding to the include
+ file. If there was no include file, return None.
+ """
+ return self.pieces.get(0)
+
+ def write(self, code):
+ # Write values of self.pieces dict, sorted by the keys
+ for k in sorted(self.pieces):
+ code.putln(self.pieces[k])
+
+
+def get_utility_dir():
+ # make this a function and not global variables:
+ # http://trac.cython.org/cython_trac/ticket/475
+ Cython_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ return os.path.join(Cython_dir, "Utility")
+
+read_utilities_hook = None
+"""
+Override the hook for reading a utilities file that contains code fragments used
+by the codegen.
+
+The hook functions takes the path of the utilities file, and returns a list
+of strings, one per line.
+
+The default behavior is to open a file relative to get_utility_dir().
+"""
+
+def read_utilities_from_utility_dir(path):
+ """
+ Read all lines of the file at the provided path from a path relative
+ to get_utility_dir().
+ """
+ filename = os.path.join(get_utility_dir(), path)
+ with closing(Utils.open_source_file(filename, encoding='UTF-8')) as f:
+ return f.readlines()
+
+# by default, read utilities from the utility directory.
+read_utilities_hook = read_utilities_from_utility_dir
+
+class UtilityCodeBase(object):
+ """
+ Support for loading utility code from a file.
+
+ Code sections in the file can be specified as follows:
+
+ ##### MyUtility.proto #####
+
+ [proto declarations]
+
+ ##### MyUtility.init #####
+
+ [code run at module initialization]
+
+ ##### MyUtility #####
+ #@requires: MyOtherUtility
+ #@substitute: naming
+
+ [definitions]
+
+ ##### MyUtility #####
+ #@substitute: tempita
+
+ [requires tempita substitution
+ - context can't be specified here though so only
+ tempita utility that requires no external context
+ will benefit from this tag
+ - only necessary when @required from non-tempita code]
+
+ for prototypes and implementation respectively. For non-python or
+ -cython files backslashes should be used instead. 5 to 30 comment
+ characters may be used on either side.
+
+ If the @cname decorator is not used and this is a CythonUtilityCode,
+ one should pass in the 'name' keyword argument to be used for name
+ mangling of such entries.
+ """
+
+ is_cython_utility = False
+ _utility_cache = {}
+
+ @classmethod
+ def _add_utility(cls, utility, type, lines, begin_lineno, tags=None):
+ if utility is None:
+ return
+
+ code = '\n'.join(lines)
+ if tags and 'substitute' in tags and 'naming' in tags['substitute']:
+ try:
+ code = Template(code).substitute(vars(Naming))
+ except (KeyError, ValueError) as e:
+ raise RuntimeError("Error parsing templated utility code of type '%s' at line %d: %s" % (
+ type, begin_lineno, e))
+
+ # remember correct line numbers at least until after templating
+ code = '\n' * begin_lineno + code
+
+ if type == 'proto':
+ utility[0] = code
+ elif type == 'impl':
+ utility[1] = code
+ else:
+ all_tags = utility[2]
+ all_tags[type] = code
+
+ if tags:
+ all_tags = utility[2]
+ for name, values in tags.items():
+ all_tags.setdefault(name, set()).update(values)
+
+ @classmethod
+ def load_utilities_from_file(cls, path):
+ utilities = cls._utility_cache.get(path)
+ if utilities:
+ return utilities
+
+ _, ext = os.path.splitext(path)
+ if ext in ('.pyx', '.py', '.pxd', '.pxi'):
+ comment = '#'
+ strip_comments = partial(re.compile(r'^\s*#(?!\s*cython\s*:).*').sub, '')
+ rstrip = StringEncoding._unicode.rstrip
+ else:
+ comment = '/'
+ strip_comments = partial(re.compile(r'^\s*//.*|/\*[^*]*\*/').sub, '')
+ rstrip = partial(re.compile(r'\s+(\\?)$').sub, r'\1')
+ match_special = re.compile(
+ (r'^%(C)s{5,30}\s*(?P(?:\w|\.)+)\s*%(C)s{5,30}|'
+ r'^%(C)s+@(?P\w+)\s*:\s*(?P(?:\w|[.:])+)') %
+ {'C': comment}).match
+ match_type = re.compile(r'(.+)[.](proto(?:[.]\S+)?|impl|init|cleanup)$').match
+
+ all_lines = read_utilities_hook(path)
+
+ utilities = defaultdict(lambda: [None, None, {}])
+ lines = []
+ tags = defaultdict(set)
+ utility = type = None
+ begin_lineno = 0
+
+ for lineno, line in enumerate(all_lines):
+ m = match_special(line)
+ if m:
+ if m.group('name'):
+ cls._add_utility(utility, type, lines, begin_lineno, tags)
+
+ begin_lineno = lineno + 1
+ del lines[:]
+ tags.clear()
+
+ name = m.group('name')
+ mtype = match_type(name)
+ if mtype:
+ name, type = mtype.groups()
+ else:
+ type = 'impl'
+ utility = utilities[name]
+ else:
+ tags[m.group('tag')].add(m.group('value'))
+ lines.append('') # keep line number correct
+ else:
+ lines.append(rstrip(strip_comments(line)))
+
+ if utility is None:
+ raise ValueError("Empty utility code file")
+
+ # Don't forget to add the last utility code
+ cls._add_utility(utility, type, lines, begin_lineno, tags)
+
+ utilities = dict(utilities) # un-defaultdict-ify
+ cls._utility_cache[path] = utilities
+ return utilities
+
+ @classmethod
+ def load(cls, util_code_name, from_file, **kwargs):
+ """
+ Load utility code from a file specified by from_file (relative to
+ Cython/Utility) and name util_code_name.
+ """
+
+ if '::' in util_code_name:
+ from_file, util_code_name = util_code_name.rsplit('::', 1)
+ assert from_file
+ utilities = cls.load_utilities_from_file(from_file)
+ proto, impl, tags = utilities[util_code_name]
+
+ if tags:
+ if "substitute" in tags and "tempita" in tags["substitute"]:
+ if not issubclass(cls, TempitaUtilityCode):
+ return TempitaUtilityCode.load(util_code_name, from_file, **kwargs)
+ orig_kwargs = kwargs.copy()
+ for name, values in tags.items():
+ if name in kwargs:
+ continue
+ # only pass lists when we have to: most argument expect one value or None
+ if name == 'requires':
+ if orig_kwargs:
+ values = [cls.load(dep, from_file, **orig_kwargs)
+ for dep in sorted(values)]
+ else:
+ # dependencies are rarely unique, so use load_cached() when we can
+ values = [cls.load_cached(dep, from_file)
+ for dep in sorted(values)]
+ elif name == 'substitute':
+ # don't want to pass "naming" or "tempita" to the constructor
+ # since these will have been handled
+ values = values - {'naming', 'tempita'}
+ if not values:
+ continue
+ elif not values:
+ values = None
+ elif len(values) == 1:
+ values = list(values)[0]
+ kwargs[name] = values
+
+ if proto is not None:
+ kwargs['proto'] = proto
+ if impl is not None:
+ kwargs['impl'] = impl
+
+ if 'name' not in kwargs:
+ kwargs['name'] = util_code_name
+
+ if 'file' not in kwargs and from_file:
+ kwargs['file'] = from_file
+ return cls(**kwargs)
+
+ @classmethod
+ def load_cached(cls, utility_code_name, from_file, __cache={}):
+ """
+ Calls .load(), but using a per-type cache based on utility name and file name.
+ """
+ key = (utility_code_name, from_file, cls)
+ try:
+ return __cache[key]
+ except KeyError:
+ pass
+ code = __cache[key] = cls.load(utility_code_name, from_file)
+ return code
+
+ @classmethod
+ def load_as_string(cls, util_code_name, from_file, **kwargs):
+ """
+ Load a utility code as a string. Returns (proto, implementation)
+ """
+ util = cls.load(util_code_name, from_file, **kwargs)
+ proto, impl = util.proto, util.impl
+ return util.format_code(proto), util.format_code(impl)
+
+ def format_code(self, code_string, replace_empty_lines=re.compile(r'\n\n+').sub):
+ """
+ Format a code section for output.
+ """
+ if code_string:
+ code_string = replace_empty_lines('\n', code_string.strip()) + '\n\n'
+ return code_string
+
+ def __str__(self):
+ return "<%s(%s)>" % (type(self).__name__, self.name)
+
+ def get_tree(self, **kwargs):
+ return None
+
+ def __deepcopy__(self, memodict=None):
+ # No need to deep-copy utility code since it's essentially immutable.
+ return self
+
+
+class UtilityCode(UtilityCodeBase):
+ """
+ Stores utility code to add during code generation.
+
+ See GlobalState.put_utility_code.
+
+ hashes/equals by instance
+
+ proto C prototypes
+ impl implementation code
+ init code to call on module initialization
+ requires utility code dependencies
+ proto_block the place in the resulting file where the prototype should
+ end up
+ name name of the utility code (or None)
+ file filename of the utility code file this utility was loaded
+ from (or None)
+ """
+
+ def __init__(self, proto=None, impl=None, init=None, cleanup=None, requires=None,
+ proto_block='utility_code_proto', name=None, file=None):
+ # proto_block: Which code block to dump prototype in. See GlobalState.
+ self.proto = proto
+ self.impl = impl
+ self.init = init
+ self.cleanup = cleanup
+ self.requires = requires
+ self._cache = {}
+ self.specialize_list = []
+ self.proto_block = proto_block
+ self.name = name
+ self.file = file
+
+ def __hash__(self):
+ return hash((self.proto, self.impl))
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ self_type, other_type = type(self), type(other)
+ if self_type is not other_type and not (isinstance(other, self_type) or isinstance(self, other_type)):
+ return False
+
+ self_proto = getattr(self, 'proto', None)
+ other_proto = getattr(other, 'proto', None)
+ return (self_proto, self.impl) == (other_proto, other.impl)
+
+ def none_or_sub(self, s, context):
+ """
+ Format a string in this utility code with context. If None, do nothing.
+ """
+ if s is None:
+ return None
+ return s % context
+
+ def specialize(self, pyrex_type=None, **data):
+ name = self.name
+ if pyrex_type is not None:
+ data['type'] = pyrex_type.empty_declaration_code()
+ data['type_name'] = pyrex_type.specialization_name()
+ name = "%s[%s]" % (name, data['type_name'])
+ # Dicts aren't hashable...
+ key = tuple(sorted(data.items()))
+ try:
+ return self._cache[key]
+ except KeyError:
+ if self.requires is None:
+ requires = None
+ else:
+ requires = [r.specialize(data) for r in self.requires]
+
+ s = self._cache[key] = UtilityCode(
+ self.none_or_sub(self.proto, data),
+ self.none_or_sub(self.impl, data),
+ self.none_or_sub(self.init, data),
+ self.none_or_sub(self.cleanup, data),
+ requires,
+ self.proto_block,
+ name,
+ )
+
+ self.specialize_list.append(s)
+ return s
+
+ def inject_string_constants(self, impl, output):
+ """Replace 'PYIDENT("xyz")' by a constant Python identifier cname.
+ """
+ if 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl:
+ return False, impl
+
+ replacements = {}
+ def externalise(matchobj):
+ key = matchobj.groups()
+ try:
+ cname = replacements[key]
+ except KeyError:
+ str_type, name = key
+ cname = replacements[key] = output.get_py_string_const(
+ StringEncoding.EncodedString(name), identifier=str_type == 'IDENT').cname
+ return cname
+
+ impl = re.sub(r'PY(IDENT|UNICODE)\("([^"]+)"\)', externalise, impl)
+ assert 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl
+ return True, impl
+
+ def inject_unbound_methods(self, impl, output):
+ """Replace 'UNBOUND_METHOD(type, "name")' by a constant Python identifier cname.
+ """
+ if 'CALL_UNBOUND_METHOD(' not in impl:
+ return False, impl
+
+ def externalise(matchobj):
+ type_cname, method_name, obj_cname, args = matchobj.groups()
+ args = [arg.strip() for arg in args[1:].split(',')] if args else []
+ assert len(args) < 3, "CALL_UNBOUND_METHOD() does not support %d call arguments" % len(args)
+ return output.cached_unbound_method_call_code(obj_cname, type_cname, method_name, args)
+
+ impl = re.sub(
+ r'CALL_UNBOUND_METHOD\('
+ r'([a-zA-Z_]+),' # type cname
+ r'\s*"([^"]+)",' # method name
+ r'\s*([^),]+)' # object cname
+ r'((?:,[^),]+)*)' # args*
+ r'\)', externalise, impl)
+ assert 'CALL_UNBOUND_METHOD(' not in impl
+
+ return True, impl
+
+ def wrap_c_strings(self, impl):
+ """Replace CSTRING('''xyz''') by a C compatible string
+ """
+ if 'CSTRING(' not in impl:
+ return impl
+
+ def split_string(matchobj):
+ content = matchobj.group(1).replace('"', '\042')
+ return ''.join(
+ '"%s\\n"\n' % line if not line.endswith('\\') or line.endswith('\\\\') else '"%s"\n' % line[:-1]
+ for line in content.splitlines())
+
+ impl = re.sub(r'CSTRING\(\s*"""([^"]*(?:"[^"]+)*)"""\s*\)', split_string, impl)
+ assert 'CSTRING(' not in impl
+ return impl
+
+ def put_code(self, output):
+ if self.requires:
+ for dependency in self.requires:
+ output.use_utility_code(dependency)
+ if self.proto:
+ writer = output[self.proto_block]
+ writer.putln("/* %s.proto */" % self.name)
+ writer.put_or_include(
+ self.format_code(self.proto), '%s_proto' % self.name)
+ if self.impl:
+ impl = self.format_code(self.wrap_c_strings(self.impl))
+ is_specialised1, impl = self.inject_string_constants(impl, output)
+ is_specialised2, impl = self.inject_unbound_methods(impl, output)
+ writer = output['utility_code_def']
+ writer.putln("/* %s */" % self.name)
+ if not (is_specialised1 or is_specialised2):
+ # no module specific adaptations => can be reused
+ writer.put_or_include(impl, '%s_impl' % self.name)
+ else:
+ writer.put(impl)
+ if self.init:
+ writer = output['init_globals']
+ writer.putln("/* %s.init */" % self.name)
+ if isinstance(self.init, basestring):
+ writer.put(self.format_code(self.init))
+ else:
+ self.init(writer, output.module_pos)
+ # 'init' code can end with an 'if' statement for an error condition like:
+ # if (check_ok()) ; else
+ writer.putln(writer.error_goto_if_PyErr(output.module_pos))
+ writer.putln()
+ if self.cleanup and Options.generate_cleanup_code:
+ writer = output['cleanup_globals']
+ writer.putln("/* %s.cleanup */" % self.name)
+ if isinstance(self.cleanup, basestring):
+ writer.put_or_include(
+ self.format_code(self.cleanup),
+ '%s_cleanup' % self.name)
+ else:
+ self.cleanup(writer, output.module_pos)
+
+
+def sub_tempita(s, context, file=None, name=None):
+ "Run tempita on string s with given context."
+ if not s:
+ return None
+
+ if file:
+ context['__name'] = "%s:%s" % (file, name)
+ elif name:
+ context['__name'] = name
+
+ from ..Tempita import sub
+ return sub(s, **context)
+
+
+class TempitaUtilityCode(UtilityCode):
+ def __init__(self, name=None, proto=None, impl=None, init=None, file=None, context=None, **kwargs):
+ if context is None:
+ context = {}
+ proto = sub_tempita(proto, context, file, name)
+ impl = sub_tempita(impl, context, file, name)
+ init = sub_tempita(init, context, file, name)
+ super(TempitaUtilityCode, self).__init__(
+ proto, impl, init=init, name=name, file=file, **kwargs)
+
+ @classmethod
+ def load_cached(cls, utility_code_name, from_file=None, context=None, __cache={}):
+ context_key = tuple(sorted(context.items())) if context else None
+ assert hash(context_key) is not None # raise TypeError if not hashable
+ key = (cls, from_file, utility_code_name, context_key)
+ try:
+ return __cache[key]
+ except KeyError:
+ pass
+ code = __cache[key] = cls.load(utility_code_name, from_file, context=context)
+ return code
+
+ def none_or_sub(self, s, context):
+ """
+ Format a string in this utility code with context. If None, do nothing.
+ """
+ if s is None:
+ return None
+ return sub_tempita(s, context, self.file, self.name)
+
+
+class LazyUtilityCode(UtilityCodeBase):
+ """
+ Utility code that calls a callback with the root code writer when
+ available. Useful when you only have 'env' but not 'code'.
+ """
+ __name__ = ''
+ requires = None
+
+ def __init__(self, callback):
+ self.callback = callback
+
+ def put_code(self, globalstate):
+ utility = self.callback(globalstate.rootwriter)
+ globalstate.use_utility_code(utility)
+
+
+class FunctionState(object):
+ # return_label string function return point label
+ # error_label string error catch point label
+ # error_without_exception boolean Can go to the error label without an exception (e.g. __next__ can return NULL)
+ # continue_label string loop continue point label
+ # break_label string loop break point label
+ # return_from_error_cleanup_label string
+ # label_counter integer counter for naming labels
+ # in_try_finally boolean inside try of try...finally
+ # exc_vars (string * 3) exception variables for reraise, or None
+ # can_trace boolean line tracing is supported in the current context
+ # scope Scope the scope object of the current function
+
+ # Not used for now, perhaps later
+ def __init__(self, owner, names_taken=set(), scope=None):
+ self.names_taken = names_taken
+ self.owner = owner
+ self.scope = scope
+
+ self.error_label = None
+ self.label_counter = 0
+ self.labels_used = set()
+ self.return_label = self.new_label()
+ self.new_error_label()
+ self.continue_label = None
+ self.break_label = None
+ self.yield_labels = []
+
+ self.in_try_finally = 0
+ self.exc_vars = None
+ self.current_except = None
+ self.can_trace = False
+ self.gil_owned = True
+
+ self.temps_allocated = [] # of (name, type, manage_ref, static)
+ self.temps_free = {} # (type, manage_ref) -> list of free vars with same type/managed status
+ self.temps_used_type = {} # name -> (type, manage_ref)
+ self.zombie_temps = set() # temps that must not be reused after release
+ self.temp_counter = 0
+ self.closure_temps = None
+
+ # This is used to collect temporaries, useful to find out which temps
+ # need to be privatized in parallel sections
+ self.collect_temps_stack = []
+
+ # This is used for the error indicator, which needs to be local to the
+ # function. It used to be global, which relies on the GIL being held.
+ # However, exceptions may need to be propagated through 'nogil'
+ # sections, in which case we introduce a race condition.
+ self.should_declare_error_indicator = False
+ self.uses_error_indicator = False
+
+ self.error_without_exception = False
+
+ self.needs_refnanny = False
+
+ # safety checks
+
+ def validate_exit(self):
+ # validate that all allocated temps have been freed
+ if self.temps_allocated:
+ leftovers = self.temps_in_use()
+ if leftovers:
+ msg = "TEMPGUARD: Temps left over at end of '%s': %s" % (self.scope.name, ', '.join([
+ '%s [%s]' % (name, ctype)
+ for name, ctype, is_pytemp in sorted(leftovers)]),
+ )
+ #print(msg)
+ raise RuntimeError(msg)
+
+ # labels
+
+ def new_label(self, name=None):
+ n = self.label_counter
+ self.label_counter = n + 1
+ label = "%s%d" % (Naming.label_prefix, n)
+ if name is not None:
+ label += '_' + name
+ return label
+
+ def new_yield_label(self, expr_type='yield'):
+ label = self.new_label('resume_from_%s' % expr_type)
+ num_and_label = (len(self.yield_labels) + 1, label)
+ self.yield_labels.append(num_and_label)
+ return num_and_label
+
+ def new_error_label(self, prefix=""):
+ old_err_lbl = self.error_label
+ self.error_label = self.new_label(prefix + 'error')
+ return old_err_lbl
+
+ def get_loop_labels(self):
+ return (
+ self.continue_label,
+ self.break_label)
+
+ def set_loop_labels(self, labels):
+ (self.continue_label,
+ self.break_label) = labels
+
+ def new_loop_labels(self, prefix=""):
+ old_labels = self.get_loop_labels()
+ self.set_loop_labels(
+ (self.new_label(prefix + "continue"),
+ self.new_label(prefix + "break")))
+ return old_labels
+
+ def get_all_labels(self):
+ return (
+ self.continue_label,
+ self.break_label,
+ self.return_label,
+ self.error_label)
+
+ def set_all_labels(self, labels):
+ (self.continue_label,
+ self.break_label,
+ self.return_label,
+ self.error_label) = labels
+
+ def all_new_labels(self):
+ old_labels = self.get_all_labels()
+ new_labels = []
+ for old_label, name in zip(old_labels, ['continue', 'break', 'return', 'error']):
+ if old_label:
+ new_labels.append(self.new_label(name))
+ else:
+ new_labels.append(old_label)
+ self.set_all_labels(new_labels)
+ return old_labels
+
+ def use_label(self, lbl):
+ self.labels_used.add(lbl)
+
+ def label_used(self, lbl):
+ return lbl in self.labels_used
+
+ # temp handling
+
+ def allocate_temp(self, type, manage_ref, static=False, reusable=True):
+ """
+ Allocates a temporary (which may create a new one or get a previously
+ allocated and released one of the same type). Type is simply registered
+ and handed back, but will usually be a PyrexType.
+
+ If type.needs_refcounting, manage_ref comes into play. If manage_ref is set to
+ True, the temp will be decref-ed on return statements and in exception
+ handling clauses. Otherwise the caller has to deal with any reference
+ counting of the variable.
+
+ If not type.needs_refcounting, then manage_ref will be ignored, but it
+ still has to be passed. It is recommended to pass False by convention
+ if it is known that type will never be a reference counted type.
+
+ static=True marks the temporary declaration with "static".
+ This is only used when allocating backing store for a module-level
+ C array literals.
+
+ if reusable=False, the temp will not be reused after release.
+
+ A C string referring to the variable is returned.
+ """
+ if type.is_cv_qualified and not type.is_reference:
+ type = type.cv_base_type
+ elif type.is_reference and not type.is_fake_reference:
+ type = type.ref_base_type
+ elif type.is_cfunction:
+ from . import PyrexTypes
+ type = PyrexTypes.c_ptr_type(type) # A function itself isn't an l-value
+ elif type.is_cpp_class and not type.is_fake_reference and self.scope.directives['cpp_locals']:
+ self.scope.use_utility_code(UtilityCode.load_cached("OptionalLocals", "CppSupport.cpp"))
+ if not type.needs_refcounting:
+ # Make manage_ref canonical, so that manage_ref will always mean
+ # a decref is needed.
+ manage_ref = False
+
+ freelist = self.temps_free.get((type, manage_ref))
+ if reusable and freelist is not None and freelist[0]:
+ result = freelist[0].pop()
+ freelist[1].remove(result)
+ else:
+ while True:
+ self.temp_counter += 1
+ result = "%s%d" % (Naming.codewriter_temp_prefix, self.temp_counter)
+ if result not in self.names_taken: break
+ self.temps_allocated.append((result, type, manage_ref, static))
+ if not reusable:
+ self.zombie_temps.add(result)
+ self.temps_used_type[result] = (type, manage_ref)
+ if DebugFlags.debug_temp_code_comments:
+ self.owner.putln("/* %s allocated (%s)%s */" % (result, type, "" if reusable else " - zombie"))
+
+ if self.collect_temps_stack:
+ self.collect_temps_stack[-1].add((result, type))
+
+ return result
+
+ def release_temp(self, name):
+ """
+ Releases a temporary so that it can be reused by other code needing
+ a temp of the same type.
+ """
+ type, manage_ref = self.temps_used_type[name]
+ freelist = self.temps_free.get((type, manage_ref))
+ if freelist is None:
+ freelist = ([], set()) # keep order in list and make lookups in set fast
+ self.temps_free[(type, manage_ref)] = freelist
+ if name in freelist[1]:
+ raise RuntimeError("Temp %s freed twice!" % name)
+ if name not in self.zombie_temps:
+ freelist[0].append(name)
+ freelist[1].add(name)
+ if DebugFlags.debug_temp_code_comments:
+ self.owner.putln("/* %s released %s*/" % (
+ name, " - zombie" if name in self.zombie_temps else ""))
+
+ def temps_in_use(self):
+ """Return a list of (cname,type,manage_ref) tuples of temp names and their type
+ that are currently in use.
+ """
+ used = []
+ for name, type, manage_ref, static in self.temps_allocated:
+ freelist = self.temps_free.get((type, manage_ref))
+ if freelist is None or name not in freelist[1]:
+ used.append((name, type, manage_ref and type.needs_refcounting))
+ return used
+
+ def temps_holding_reference(self):
+ """Return a list of (cname,type) tuples of temp names and their type
+ that are currently in use. This includes only temps
+ with a reference counted type which owns its reference.
+ """
+ return [(name, type)
+ for name, type, manage_ref in self.temps_in_use()
+ if manage_ref and type.needs_refcounting]
+
+ def all_managed_temps(self):
+ """Return a list of (cname, type) tuples of refcount-managed Python objects.
+ """
+ return [(cname, type)
+ for cname, type, manage_ref, static in self.temps_allocated
+ if manage_ref]
+
+ def all_free_managed_temps(self):
+ """Return a list of (cname, type) tuples of refcount-managed Python
+ objects that are not currently in use. This is used by
+ try-except and try-finally blocks to clean up temps in the
+ error case.
+ """
+ return sorted([ # Enforce deterministic order.
+ (cname, type)
+ for (type, manage_ref), freelist in self.temps_free.items() if manage_ref
+ for cname in freelist[0]
+ ])
+
+ def start_collecting_temps(self):
+ """
+ Useful to find out which temps were used in a code block
+ """
+ self.collect_temps_stack.append(set())
+
+ def stop_collecting_temps(self):
+ return self.collect_temps_stack.pop()
+
+ def init_closure_temps(self, scope):
+ self.closure_temps = ClosureTempAllocator(scope)
+
+
+class NumConst(object):
+ """Global info about a Python number constant held by GlobalState.
+
+ cname string
+ value string
+ py_type string int, long, float
+ value_code string evaluation code if different from value
+ """
+
+ def __init__(self, cname, value, py_type, value_code=None):
+ self.cname = cname
+ self.value = value
+ self.py_type = py_type
+ self.value_code = value_code or value
+
+
+class PyObjectConst(object):
+ """Global info about a generic constant held by GlobalState.
+ """
+ # cname string
+ # type PyrexType
+
+ def __init__(self, cname, type):
+ self.cname = cname
+ self.type = type
+
+
+cython.declare(possible_unicode_identifier=object, possible_bytes_identifier=object,
+ replace_identifier=object, find_alphanums=object)
+possible_unicode_identifier = re.compile(br"(?![0-9])\w+$".decode('ascii'), re.U).match
+possible_bytes_identifier = re.compile(r"(?![0-9])\w+$".encode('ASCII')).match
+replace_identifier = re.compile(r'[^a-zA-Z0-9_]+').sub
+find_alphanums = re.compile('([a-zA-Z0-9]+)').findall
+
+class StringConst(object):
+ """Global info about a C string constant held by GlobalState.
+ """
+ # cname string
+ # text EncodedString or BytesLiteral
+ # py_strings {(identifier, encoding) : PyStringConst}
+
+ def __init__(self, cname, text, byte_string):
+ self.cname = cname
+ self.text = text
+ self.escaped_value = StringEncoding.escape_byte_string(byte_string)
+ self.py_strings = None
+ self.py_versions = []
+
+ def add_py_version(self, version):
+ if not version:
+ self.py_versions = [2, 3]
+ elif version not in self.py_versions:
+ self.py_versions.append(version)
+
+ def get_py_string_const(self, encoding, identifier=None,
+ is_str=False, py3str_cstring=None):
+ py_strings = self.py_strings
+ text = self.text
+
+ is_str = bool(identifier or is_str)
+ is_unicode = encoding is None and not is_str
+
+ if encoding is None:
+ # unicode string
+ encoding_key = None
+ else:
+ # bytes or str
+ encoding = encoding.lower()
+ if encoding in ('utf8', 'utf-8', 'ascii', 'usascii', 'us-ascii'):
+ encoding = None
+ encoding_key = None
+ else:
+ encoding_key = ''.join(find_alphanums(encoding))
+
+ key = (is_str, is_unicode, encoding_key, py3str_cstring)
+ if py_strings is not None:
+ try:
+ return py_strings[key]
+ except KeyError:
+ pass
+ else:
+ self.py_strings = {}
+
+ if identifier:
+ intern = True
+ elif identifier is None:
+ if isinstance(text, bytes):
+ intern = bool(possible_bytes_identifier(text))
+ else:
+ intern = bool(possible_unicode_identifier(text))
+ else:
+ intern = False
+ if intern:
+ prefix = Naming.interned_prefixes['str']
+ else:
+ prefix = Naming.py_const_prefix
+
+ if encoding_key:
+ encoding_prefix = '_%s' % encoding_key
+ else:
+ encoding_prefix = ''
+
+ pystring_cname = "%s%s%s_%s" % (
+ prefix,
+ (is_str and 's') or (is_unicode and 'u') or 'b',
+ encoding_prefix,
+ self.cname[len(Naming.const_prefix):])
+
+ py_string = PyStringConst(
+ pystring_cname, encoding, is_unicode, is_str, py3str_cstring, intern)
+ self.py_strings[key] = py_string
+ return py_string
+
+class PyStringConst(object):
+ """Global info about a Python string constant held by GlobalState.
+ """
+ # cname string
+ # py3str_cstring string
+ # encoding string
+ # intern boolean
+ # is_unicode boolean
+ # is_str boolean
+
+ def __init__(self, cname, encoding, is_unicode, is_str=False,
+ py3str_cstring=None, intern=False):
+ self.cname = cname
+ self.py3str_cstring = py3str_cstring
+ self.encoding = encoding
+ self.is_str = is_str
+ self.is_unicode = is_unicode
+ self.intern = intern
+
+ def __lt__(self, other):
+ return self.cname < other.cname
+
+
+class GlobalState(object):
+ # filename_table {string : int} for finding filename table indexes
+ # filename_list [string] filenames in filename table order
+ # input_file_contents dict contents (=list of lines) of any file that was used as input
+ # to create this output C code. This is
+ # used to annotate the comments.
+ #
+ # utility_codes set IDs of used utility code (to avoid reinsertion)
+ #
+ # declared_cnames {string:Entry} used in a transition phase to merge pxd-declared
+ # constants etc. into the pyx-declared ones (i.e,
+ # check if constants are already added).
+ # In time, hopefully the literals etc. will be
+ # supplied directly instead.
+ #
+ # const_cnames_used dict global counter for unique constant identifiers
+ #
+
+ # parts {string:CCodeWriter}
+
+
+ # interned_strings
+ # consts
+ # interned_nums
+
+ # directives set Temporary variable used to track
+ # the current set of directives in the code generation
+ # process.
+
+ directives = {}
+
+ code_layout = [
+ 'h_code',
+ 'filename_table',
+ 'utility_code_proto_before_types',
+ 'numeric_typedefs', # Let these detailed individual parts stay!,
+ 'complex_type_declarations', # as the proper solution is to make a full DAG...
+ 'type_declarations', # More coarse-grained blocks would simply hide
+ 'utility_code_proto', # the ugliness, not fix it
+ 'module_declarations',
+ 'typeinfo',
+ 'before_global_var',
+ 'global_var',
+ 'string_decls',
+ 'decls',
+ 'late_includes',
+ 'module_state',
+ 'module_state_clear',
+ 'module_state_traverse',
+ 'module_state_defines', # redefines names used in module_state/_clear/_traverse
+ 'module_code', # user code goes here
+ 'pystring_table',
+ 'cached_builtins',
+ 'cached_constants',
+ 'init_constants',
+ 'init_globals', # (utility code called at init-time)
+ 'init_module',
+ 'cleanup_globals',
+ 'cleanup_module',
+ 'main_method',
+ 'utility_code_pragmas', # silence some irrelevant warnings in utility code
+ 'utility_code_def',
+ 'utility_code_pragmas_end', # clean-up the utility_code_pragmas
+ 'end'
+ ]
+
+ # h files can only have a much smaller list of sections
+ h_code_layout = [
+ 'h_code',
+ 'utility_code_proto_before_types',
+ 'type_declarations',
+ 'utility_code_proto',
+ 'end'
+ ]
+
+ def __init__(self, writer, module_node, code_config, common_utility_include_dir=None):
+ self.filename_table = {}
+ self.filename_list = []
+ self.input_file_contents = {}
+ self.utility_codes = set()
+ self.declared_cnames = {}
+ self.in_utility_code_generation = False
+ self.code_config = code_config
+ self.common_utility_include_dir = common_utility_include_dir
+ self.parts = {}
+ self.module_node = module_node # because some utility code generation needs it
+ # (generating backwards-compatible Get/ReleaseBuffer
+
+ self.const_cnames_used = {}
+ self.string_const_index = {}
+ self.dedup_const_index = {}
+ self.pyunicode_ptr_const_index = {}
+ self.num_const_index = {}
+ self.py_constants = []
+ self.cached_cmethods = {}
+ self.initialised_constants = set()
+
+ writer.set_global_state(self)
+ self.rootwriter = writer
+
+ def initialize_main_c_code(self):
+ rootwriter = self.rootwriter
+ for i, part in enumerate(self.code_layout):
+ w = self.parts[part] = rootwriter.insertion_point()
+ if i > 0:
+ w.putln("/* #### Code section: %s ### */" % part)
+
+ if not Options.cache_builtins:
+ del self.parts['cached_builtins']
+ else:
+ w = self.parts['cached_builtins']
+ w.enter_cfunc_scope()
+ w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {")
+
+ w = self.parts['cached_constants']
+ w.enter_cfunc_scope()
+ w.putln("")
+ w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {")
+ w.put_declare_refcount_context()
+ w.put_setup_refcount_context(StringEncoding.EncodedString("__Pyx_InitCachedConstants"))
+
+ w = self.parts['init_globals']
+ w.enter_cfunc_scope()
+ w.putln("")
+ w.putln("static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {")
+
+ w = self.parts['init_constants']
+ w.enter_cfunc_scope()
+ w.putln("")
+ w.putln("static CYTHON_SMALL_CODE int __Pyx_InitConstants(void) {")
+
+ if not Options.generate_cleanup_code:
+ del self.parts['cleanup_globals']
+ else:
+ w = self.parts['cleanup_globals']
+ w.enter_cfunc_scope()
+ w.putln("")
+ w.putln("static CYTHON_SMALL_CODE void __Pyx_CleanupGlobals(void) {")
+
+ code = self.parts['utility_code_proto']
+ code.putln("")
+ code.putln("/* --- Runtime support code (head) --- */")
+
+ code = self.parts['utility_code_def']
+ if self.code_config.emit_linenums:
+ code.write('\n#line 1 "cython_utility"\n')
+ code.putln("")
+ code.putln("/* --- Runtime support code --- */")
+
+ def initialize_main_h_code(self):
+ rootwriter = self.rootwriter
+ for part in self.h_code_layout:
+ self.parts[part] = rootwriter.insertion_point()
+
+ def finalize_main_c_code(self):
+ self.close_global_decls()
+
+ #
+ # utility_code_def
+ #
+ code = self.parts['utility_code_def']
+ util = TempitaUtilityCode.load_cached("TypeConversions", "TypeConversion.c")
+ code.put(util.format_code(util.impl))
+ code.putln("")
+
+ #
+ # utility code pragmas
+ #
+ code = self.parts['utility_code_pragmas']
+ util = UtilityCode.load_cached("UtilityCodePragmas", "ModuleSetupCode.c")
+ code.putln(util.format_code(util.impl))
+ code.putln("")
+ code = self.parts['utility_code_pragmas_end']
+ util = UtilityCode.load_cached("UtilityCodePragmasEnd", "ModuleSetupCode.c")
+ code.putln(util.format_code(util.impl))
+ code.putln("")
+
+ def __getitem__(self, key):
+ return self.parts[key]
+
+ #
+ # Global constants, interned objects, etc.
+ #
+ def close_global_decls(self):
+ # This is called when it is known that no more global declarations will
+ # declared.
+ self.generate_const_declarations()
+ if Options.cache_builtins:
+ w = self.parts['cached_builtins']
+ w.putln("return 0;")
+ if w.label_used(w.error_label):
+ w.put_label(w.error_label)
+ w.putln("return -1;")
+ w.putln("}")
+ w.exit_cfunc_scope()
+
+ w = self.parts['cached_constants']
+ w.put_finish_refcount_context()
+ w.putln("return 0;")
+ if w.label_used(w.error_label):
+ w.put_label(w.error_label)
+ w.put_finish_refcount_context()
+ w.putln("return -1;")
+ w.putln("}")
+ w.exit_cfunc_scope()
+
+ for part in ['init_globals', 'init_constants']:
+ w = self.parts[part]
+ w.putln("return 0;")
+ if w.label_used(w.error_label):
+ w.put_label(w.error_label)
+ w.putln("return -1;")
+ w.putln("}")
+ w.exit_cfunc_scope()
+
+ if Options.generate_cleanup_code:
+ w = self.parts['cleanup_globals']
+ w.putln("}")
+ w.exit_cfunc_scope()
+
+ if Options.generate_cleanup_code:
+ w = self.parts['cleanup_module']
+ w.putln("}")
+ w.exit_cfunc_scope()
+
+ def put_pyobject_decl(self, entry):
+ self['global_var'].putln("static PyObject *%s;" % entry.cname)
+
+ # constant handling at code generation time
+
+ def get_cached_constants_writer(self, target=None):
+ if target is not None:
+ if target in self.initialised_constants:
+ # Return None on second/later calls to prevent duplicate creation code.
+ return None
+ self.initialised_constants.add(target)
+ return self.parts['cached_constants']
+
+ def get_int_const(self, str_value, longness=False):
+ py_type = longness and 'long' or 'int'
+ try:
+ c = self.num_const_index[(str_value, py_type)]
+ except KeyError:
+ c = self.new_num_const(str_value, py_type)
+ return c
+
+ def get_float_const(self, str_value, value_code):
+ try:
+ c = self.num_const_index[(str_value, 'float')]
+ except KeyError:
+ c = self.new_num_const(str_value, 'float', value_code)
+ return c
+
+ def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None):
+ if dedup_key is not None:
+ const = self.dedup_const_index.get(dedup_key)
+ if const is not None:
+ return const
+ # create a new Python object constant
+ const = self.new_py_const(type, prefix)
+ if (cleanup_level is not None
+ and cleanup_level <= Options.generate_cleanup_code
+ # Note that this function is used for all argument defaults
+ # which aren't just Python objects
+ and type.needs_refcounting):
+ cleanup_writer = self.parts['cleanup_globals']
+ cleanup_writer.putln('Py_CLEAR(%s);' % const.cname)
+ if dedup_key is not None:
+ self.dedup_const_index[dedup_key] = const
+ return const
+
+ def get_string_const(self, text, py_version=None):
+ # return a C string constant, creating a new one if necessary
+ if text.is_unicode:
+ byte_string = text.utf8encode()
+ else:
+ byte_string = text.byteencode()
+ try:
+ c = self.string_const_index[byte_string]
+ except KeyError:
+ c = self.new_string_const(text, byte_string)
+ c.add_py_version(py_version)
+ return c
+
+ def get_pyunicode_ptr_const(self, text):
+ # return a Py_UNICODE[] constant, creating a new one if necessary
+ assert text.is_unicode
+ try:
+ c = self.pyunicode_ptr_const_index[text]
+ except KeyError:
+ c = self.pyunicode_ptr_const_index[text] = self.new_const_cname()
+ return c
+
+ def get_py_string_const(self, text, identifier=None,
+ is_str=False, unicode_value=None):
+ # return a Python string constant, creating a new one if necessary
+ py3str_cstring = None
+ if is_str and unicode_value is not None \
+ and unicode_value.utf8encode() != text.byteencode():
+ py3str_cstring = self.get_string_const(unicode_value, py_version=3)
+ c_string = self.get_string_const(text, py_version=2)
+ else:
+ c_string = self.get_string_const(text)
+ py_string = c_string.get_py_string_const(
+ text.encoding, identifier, is_str, py3str_cstring)
+ return py_string
+
+ def get_interned_identifier(self, text):
+ return self.get_py_string_const(text, identifier=True)
+
+ def new_string_const(self, text, byte_string):
+ cname = self.new_string_const_cname(byte_string)
+ c = StringConst(cname, text, byte_string)
+ self.string_const_index[byte_string] = c
+ return c
+
+ def new_num_const(self, value, py_type, value_code=None):
+ cname = self.new_num_const_cname(value, py_type)
+ c = NumConst(cname, value, py_type, value_code)
+ self.num_const_index[(value, py_type)] = c
+ return c
+
+ def new_py_const(self, type, prefix=''):
+ cname = self.new_const_cname(prefix)
+ c = PyObjectConst(cname, type)
+ self.py_constants.append(c)
+ return c
+
+ def new_string_const_cname(self, bytes_value):
+ # Create a new globally-unique nice name for a C string constant.
+ value = bytes_value.decode('ASCII', 'ignore')
+ return self.new_const_cname(value=value)
+
+ def unique_const_cname(self, format_str): # type: (str) -> str
+ used = self.const_cnames_used
+ cname = value = format_str.format(sep='', counter='')
+ while cname in used:
+ counter = used[value] = used[value] + 1
+ cname = format_str.format(sep='_', counter=counter)
+ used[cname] = 1
+ return cname
+
+ def new_num_const_cname(self, value, py_type): # type: (str, str) -> str
+ if py_type == 'long':
+ value += 'L'
+ py_type = 'int'
+ prefix = Naming.interned_prefixes[py_type]
+
+ value = value.replace('.', '_').replace('+', '_').replace('-', 'neg_')
+ if len(value) > 42:
+ # update tests/run/large_integer_T5290.py in case the amount is changed
+ cname = self.unique_const_cname(
+ prefix + "large{counter}_" + value[:18] + "_xxx_" + value[-18:])
+ else:
+ cname = "%s%s" % (prefix, value)
+ return cname
+
+ def new_const_cname(self, prefix='', value=''):
+ value = replace_identifier('_', value)[:32].strip('_')
+ name_suffix = self.unique_const_cname(value + "{sep}{counter}")
+ if prefix:
+ prefix = Naming.interned_prefixes[prefix]
+ else:
+ prefix = Naming.const_prefix
+ return "%s%s" % (prefix, name_suffix)
+
+ def get_cached_unbound_method(self, type_cname, method_name):
+ key = (type_cname, method_name)
+ try:
+ cname = self.cached_cmethods[key]
+ except KeyError:
+ cname = self.cached_cmethods[key] = self.new_const_cname(
+ 'umethod', '%s_%s' % (type_cname, method_name))
+ return cname
+
+ def cached_unbound_method_call_code(self, obj_cname, type_cname, method_name, arg_cnames):
+ # admittedly, not the best place to put this method, but it is reused by UtilityCode and ExprNodes ...
+ utility_code_name = "CallUnboundCMethod%d" % len(arg_cnames)
+ self.use_utility_code(UtilityCode.load_cached(utility_code_name, "ObjectHandling.c"))
+ cache_cname = self.get_cached_unbound_method(type_cname, method_name)
+ args = [obj_cname] + arg_cnames
+ return "__Pyx_%s(&%s, %s)" % (
+ utility_code_name,
+ cache_cname,
+ ', '.join(args),
+ )
+
+ def add_cached_builtin_decl(self, entry):
+ if entry.is_builtin and entry.is_const:
+ if self.should_declare(entry.cname, entry):
+ self.put_pyobject_decl(entry)
+ w = self.parts['cached_builtins']
+ condition = None
+ if entry.name in non_portable_builtins_map:
+ condition, replacement = non_portable_builtins_map[entry.name]
+ w.putln('#if %s' % condition)
+ self.put_cached_builtin_init(
+ entry.pos, StringEncoding.EncodedString(replacement),
+ entry.cname)
+ w.putln('#else')
+ self.put_cached_builtin_init(
+ entry.pos, StringEncoding.EncodedString(entry.name),
+ entry.cname)
+ if condition:
+ w.putln('#endif')
+
+ def put_cached_builtin_init(self, pos, name, cname):
+ w = self.parts['cached_builtins']
+ interned_cname = self.get_interned_identifier(name).cname
+ self.use_utility_code(
+ UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
+ w.putln('%s = __Pyx_GetBuiltinName(%s); if (!%s) %s' % (
+ cname,
+ interned_cname,
+ cname,
+ w.error_goto(pos)))
+
+ def generate_const_declarations(self):
+ self.generate_cached_methods_decls()
+ self.generate_string_constants()
+ self.generate_num_constants()
+ self.generate_object_constant_decls()
+
+ def generate_object_constant_decls(self):
+ consts = [(len(c.cname), c.cname, c)
+ for c in self.py_constants]
+ consts.sort()
+ for _, cname, c in consts:
+ self.parts['module_state'].putln("%s;" % c.type.declaration_code(cname))
+ self.parts['module_state_defines'].putln(
+ "#define %s %s->%s" % (cname, Naming.modulestateglobal_cname, cname))
+ if not c.type.needs_refcounting:
+ # Note that py_constants is used for all argument defaults
+ # which aren't necessarily PyObjects, so aren't appropriate
+ # to clear.
+ continue
+ self.parts['module_state_clear'].putln(
+ "Py_CLEAR(clear_module_state->%s);" % cname)
+ self.parts['module_state_traverse'].putln(
+ "Py_VISIT(traverse_module_state->%s);" % cname)
+
+ def generate_cached_methods_decls(self):
+ if not self.cached_cmethods:
+ return
+
+ decl = self.parts['decls']
+ init = self.parts['init_constants']
+ cnames = []
+ for (type_cname, method_name), cname in sorted(self.cached_cmethods.items()):
+ cnames.append(cname)
+ method_name_cname = self.get_interned_identifier(StringEncoding.EncodedString(method_name)).cname
+ decl.putln('static __Pyx_CachedCFunction %s = {0, 0, 0, 0, 0};' % (
+ cname))
+ # split type reference storage as it might not be static
+ init.putln('%s.type = (PyObject*)&%s;' % (
+ cname, type_cname))
+ # method name string isn't static in limited api
+ init.putln('%s.method_name = &%s;' % (
+ cname, method_name_cname))
+
+ if Options.generate_cleanup_code:
+ cleanup = self.parts['cleanup_globals']
+ for cname in cnames:
+ cleanup.putln("Py_CLEAR(%s.method);" % cname)
+
+ def generate_string_constants(self):
+ c_consts = [(len(c.cname), c.cname, c) for c in self.string_const_index.values()]
+ c_consts.sort()
+ py_strings = []
+
+ decls_writer = self.parts['string_decls']
+ for _, cname, c in c_consts:
+ conditional = False
+ if c.py_versions and (2 not in c.py_versions or 3 not in c.py_versions):
+ conditional = True
+ decls_writer.putln("#if PY_MAJOR_VERSION %s 3" % (
+ (2 in c.py_versions) and '<' or '>='))
+ decls_writer.putln('static const char %s[] = "%s";' % (
+ cname, StringEncoding.split_string_literal(c.escaped_value)))
+ if conditional:
+ decls_writer.putln("#endif")
+ if c.py_strings is not None:
+ for py_string in c.py_strings.values():
+ py_strings.append((c.cname, len(py_string.cname), py_string))
+
+ for c, cname in sorted(self.pyunicode_ptr_const_index.items()):
+ utf16_array, utf32_array = StringEncoding.encode_pyunicode_string(c)
+ if utf16_array:
+ # Narrow and wide representations differ
+ decls_writer.putln("#ifdef Py_UNICODE_WIDE")
+ decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf32_array))
+ if utf16_array:
+ decls_writer.putln("#else")
+ decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf16_array))
+ decls_writer.putln("#endif")
+
+ init_constants = self.parts['init_constants']
+ if py_strings:
+ self.use_utility_code(UtilityCode.load_cached("InitStrings", "StringTools.c"))
+ py_strings.sort()
+ w = self.parts['pystring_table']
+ w.putln("")
+ w.putln("static int __Pyx_CreateStringTabAndInitStrings(void) {")
+ # the stringtab is a function local rather than a global to
+ # ensure that it doesn't conflict with module state
+ w.putln("__Pyx_StringTabEntry %s[] = {" % Naming.stringtab_cname)
+ for py_string_args in py_strings:
+ c_cname, _, py_string = py_string_args
+ if not py_string.is_str or not py_string.encoding or \
+ py_string.encoding in ('ASCII', 'USASCII', 'US-ASCII',
+ 'UTF8', 'UTF-8'):
+ encoding = '0'
+ else:
+ encoding = '"%s"' % py_string.encoding.lower()
+
+ self.parts['module_state'].putln("PyObject *%s;" % py_string.cname)
+ self.parts['module_state_defines'].putln("#define %s %s->%s" % (
+ py_string.cname,
+ Naming.modulestateglobal_cname,
+ py_string.cname))
+ self.parts['module_state_clear'].putln("Py_CLEAR(clear_module_state->%s);" %
+ py_string.cname)
+ self.parts['module_state_traverse'].putln("Py_VISIT(traverse_module_state->%s);" %
+ py_string.cname)
+ if py_string.py3str_cstring:
+ w.putln("#if PY_MAJOR_VERSION >= 3")
+ w.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % (
+ py_string.cname,
+ py_string.py3str_cstring.cname,
+ py_string.py3str_cstring.cname,
+ '0', 1, 0,
+ py_string.intern
+ ))
+ w.putln("#else")
+ w.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % (
+ py_string.cname,
+ c_cname,
+ c_cname,
+ encoding,
+ py_string.is_unicode,
+ py_string.is_str,
+ py_string.intern
+ ))
+ if py_string.py3str_cstring:
+ w.putln("#endif")
+ w.putln("{0, 0, 0, 0, 0, 0, 0}")
+ w.putln("};")
+ w.putln("return __Pyx_InitStrings(%s);" % Naming.stringtab_cname)
+ w.putln("}")
+
+ init_constants.putln(
+ "if (__Pyx_CreateStringTabAndInitStrings() < 0) %s;" %
+ init_constants.error_goto(self.module_pos))
+
+ def generate_num_constants(self):
+ consts = [(c.py_type, c.value[0] == '-', len(c.value), c.value, c.value_code, c)
+ for c in self.num_const_index.values()]
+ consts.sort()
+ init_constants = self.parts['init_constants']
+ for py_type, _, _, value, value_code, c in consts:
+ cname = c.cname
+ self.parts['module_state'].putln("PyObject *%s;" % cname)
+ self.parts['module_state_defines'].putln("#define %s %s->%s" % (
+ cname, Naming.modulestateglobal_cname, cname))
+ self.parts['module_state_clear'].putln(
+ "Py_CLEAR(clear_module_state->%s);" % cname)
+ self.parts['module_state_traverse'].putln(
+ "Py_VISIT(traverse_module_state->%s);" % cname)
+ if py_type == 'float':
+ function = 'PyFloat_FromDouble(%s)'
+ elif py_type == 'long':
+ function = 'PyLong_FromString((char *)"%s", 0, 0)'
+ elif Utils.long_literal(value):
+ function = 'PyInt_FromString((char *)"%s", 0, 0)'
+ elif len(value.lstrip('-')) > 4:
+ function = "PyInt_FromLong(%sL)"
+ else:
+ function = "PyInt_FromLong(%s)"
+ init_constants.putln('%s = %s; %s' % (
+ cname, function % value_code,
+ init_constants.error_goto_if_null(cname, self.module_pos)))
+
+ # The functions below are there in a transition phase only
+ # and will be deprecated. They are called from Nodes.BlockNode.
+ # The copy&paste duplication is intentional in order to be able
+ # to see quickly how BlockNode worked, until this is replaced.
+
+ def should_declare(self, cname, entry):
+ if cname in self.declared_cnames:
+ other = self.declared_cnames[cname]
+ assert str(entry.type) == str(other.type)
+ assert entry.init == other.init
+ return False
+ else:
+ self.declared_cnames[cname] = entry
+ return True
+
+ #
+ # File name state
+ #
+
+ def lookup_filename(self, source_desc):
+ entry = source_desc.get_filenametable_entry()
+ try:
+ index = self.filename_table[entry]
+ except KeyError:
+ index = len(self.filename_list)
+ self.filename_list.append(source_desc)
+ self.filename_table[entry] = index
+ return index
+
+ def commented_file_contents(self, source_desc):
+ try:
+ return self.input_file_contents[source_desc]
+ except KeyError:
+ pass
+ source_file = source_desc.get_lines(encoding='ASCII',
+ error_handling='ignore')
+ try:
+ F = [u' * ' + line.rstrip().replace(
+ u'*/', u'*[inserted by cython to avoid comment closer]/'
+ ).replace(
+ u'/*', u'/[inserted by cython to avoid comment start]*'
+ )
+ for line in source_file]
+ finally:
+ if hasattr(source_file, 'close'):
+ source_file.close()
+ if not F: F.append(u'')
+ self.input_file_contents[source_desc] = F
+ return F
+
+ #
+ # Utility code state
+ #
+
+ def use_utility_code(self, utility_code):
+ """
+ Adds code to the C file. utility_code should
+ a) implement __eq__/__hash__ for the purpose of knowing whether the same
+ code has already been included
+ b) implement put_code, which takes a globalstate instance
+
+ See UtilityCode.
+ """
+ if utility_code and utility_code not in self.utility_codes:
+ self.utility_codes.add(utility_code)
+ utility_code.put_code(self)
+
+ def use_entry_utility_code(self, entry):
+ if entry is None:
+ return
+ if entry.utility_code:
+ self.use_utility_code(entry.utility_code)
+ if entry.utility_code_definition:
+ self.use_utility_code(entry.utility_code_definition)
+
+
+def funccontext_property(func):
+ name = func.__name__
+ attribute_of = operator.attrgetter(name)
+ def get(self):
+ return attribute_of(self.funcstate)
+ def set(self, value):
+ setattr(self.funcstate, name, value)
+ return property(get, set)
+
+
+class CCodeConfig(object):
+ # emit_linenums boolean write #line pragmas?
+ # emit_code_comments boolean copy the original code into C comments?
+ # c_line_in_traceback boolean append the c file and line number to the traceback for exceptions?
+
+ def __init__(self, emit_linenums=True, emit_code_comments=True, c_line_in_traceback=True):
+ self.emit_code_comments = emit_code_comments
+ self.emit_linenums = emit_linenums
+ self.c_line_in_traceback = c_line_in_traceback
+
+
+class CCodeWriter(object):
+ """
+ Utility class to output C code.
+
+ When creating an insertion point one must care about the state that is
+ kept:
+ - formatting state (level, bol) is cloned and used in insertion points
+ as well
+ - labels, temps, exc_vars: One must construct a scope in which these can
+ exist by calling enter_cfunc_scope/exit_cfunc_scope (these are for
+ sanity checking and forward compatibility). Created insertion points
+ looses this scope and cannot access it.
+ - marker: Not copied to insertion point
+ - filename_table, filename_list, input_file_contents: All codewriters
+ coming from the same root share the same instances simultaneously.
+ """
+
+ # f file output file
+ # buffer StringIOTree
+
+ # level int indentation level
+ # bol bool beginning of line?
+ # marker string comment to emit before next line
+ # funcstate FunctionState contains state local to a C function used for code
+ # generation (labels and temps state etc.)
+ # globalstate GlobalState contains state global for a C file (input file info,
+ # utility code, declared constants etc.)
+ # pyclass_stack list used during recursive code generation to pass information
+ # about the current class one is in
+ # code_config CCodeConfig configuration options for the C code writer
+
+ @cython.locals(create_from='CCodeWriter')
+ def __init__(self, create_from=None, buffer=None, copy_formatting=False):
+ if buffer is None: buffer = StringIOTree()
+ self.buffer = buffer
+ self.last_pos = None
+ self.last_marked_pos = None
+ self.pyclass_stack = []
+
+ self.funcstate = None
+ self.globalstate = None
+ self.code_config = None
+ self.level = 0
+ self.call_level = 0
+ self.bol = 1
+
+ if create_from is not None:
+ # Use same global state
+ self.set_global_state(create_from.globalstate)
+ self.funcstate = create_from.funcstate
+ # Clone formatting state
+ if copy_formatting:
+ self.level = create_from.level
+ self.bol = create_from.bol
+ self.call_level = create_from.call_level
+ self.last_pos = create_from.last_pos
+ self.last_marked_pos = create_from.last_marked_pos
+
+ def create_new(self, create_from, buffer, copy_formatting):
+ # polymorphic constructor -- very slightly more versatile
+ # than using __class__
+ result = CCodeWriter(create_from, buffer, copy_formatting)
+ return result
+
+ def set_global_state(self, global_state):
+ assert self.globalstate is None # prevent overwriting once it's set
+ self.globalstate = global_state
+ self.code_config = global_state.code_config
+
+ def copyto(self, f):
+ self.buffer.copyto(f)
+
+ def getvalue(self):
+ return self.buffer.getvalue()
+
+ def write(self, s):
+ if '\n' in s:
+ self._write_lines(s)
+ else:
+ self._write_to_buffer(s)
+
+ def _write_lines(self, s):
+ # Cygdb needs to know which Cython source line corresponds to which C line.
+ # Therefore, we write this information into "self.buffer.markers" and then write it from there
+ # into cython_debug/cython_debug_info_* (see ModuleNode._serialize_lineno_map).
+ filename_line = self.last_marked_pos[:2] if self.last_marked_pos else (None, 0)
+ self.buffer.markers.extend([filename_line] * s.count('\n'))
+
+ self._write_to_buffer(s)
+
+ def _write_to_buffer(self, s):
+ self.buffer.write(s)
+
+ def insertion_point(self):
+ other = self.create_new(create_from=self, buffer=self.buffer.insertion_point(), copy_formatting=True)
+ return other
+
+ def new_writer(self):
+ """
+ Creates a new CCodeWriter connected to the same global state, which
+ can later be inserted using insert.
+ """
+ return CCodeWriter(create_from=self)
+
+ def insert(self, writer):
+ """
+ Inserts the contents of another code writer (created with
+ the same global state) in the current location.
+
+ It is ok to write to the inserted writer also after insertion.
+ """
+ assert writer.globalstate is self.globalstate
+ self.buffer.insert(writer.buffer)
+
+ # Properties delegated to function scope
+ @funccontext_property
+ def label_counter(self): pass
+ @funccontext_property
+ def return_label(self): pass
+ @funccontext_property
+ def error_label(self): pass
+ @funccontext_property
+ def labels_used(self): pass
+ @funccontext_property
+ def continue_label(self): pass
+ @funccontext_property
+ def break_label(self): pass
+ @funccontext_property
+ def return_from_error_cleanup_label(self): pass
+ @funccontext_property
+ def yield_labels(self): pass
+
+ def label_interceptor(self, new_labels, orig_labels, skip_to_label=None, pos=None, trace=True):
+ """
+ Helper for generating multiple label interceptor code blocks.
+
+ @param new_labels: the new labels that should be intercepted
+ @param orig_labels: the original labels that we should dispatch to after the interception
+ @param skip_to_label: a label to skip to before starting the code blocks
+ @param pos: the node position to mark for each interceptor block
+ @param trace: add a trace line for the pos marker or not
+ """
+ for label, orig_label in zip(new_labels, orig_labels):
+ if not self.label_used(label):
+ continue
+ if skip_to_label:
+ # jump over the whole interception block
+ self.put_goto(skip_to_label)
+ skip_to_label = None
+
+ if pos is not None:
+ self.mark_pos(pos, trace=trace)
+ self.put_label(label)
+ yield (label, orig_label)
+ self.put_goto(orig_label)
+
+ # Functions delegated to function scope
+ def new_label(self, name=None): return self.funcstate.new_label(name)
+ def new_error_label(self, *args): return self.funcstate.new_error_label(*args)
+ def new_yield_label(self, *args): return self.funcstate.new_yield_label(*args)
+ def get_loop_labels(self): return self.funcstate.get_loop_labels()
+ def set_loop_labels(self, labels): return self.funcstate.set_loop_labels(labels)
+ def new_loop_labels(self, *args): return self.funcstate.new_loop_labels(*args)
+ def get_all_labels(self): return self.funcstate.get_all_labels()
+ def set_all_labels(self, labels): return self.funcstate.set_all_labels(labels)
+ def all_new_labels(self): return self.funcstate.all_new_labels()
+ def use_label(self, lbl): return self.funcstate.use_label(lbl)
+ def label_used(self, lbl): return self.funcstate.label_used(lbl)
+
+
+ def enter_cfunc_scope(self, scope=None):
+ self.funcstate = FunctionState(self, scope=scope)
+
+ def exit_cfunc_scope(self):
+ self.funcstate.validate_exit()
+ self.funcstate = None
+
+ # constant handling
+
+ def get_py_int(self, str_value, longness):
+ return self.globalstate.get_int_const(str_value, longness).cname
+
+ def get_py_float(self, str_value, value_code):
+ return self.globalstate.get_float_const(str_value, value_code).cname
+
+ def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None):
+ return self.globalstate.get_py_const(type, prefix, cleanup_level, dedup_key).cname
+
+ def get_string_const(self, text):
+ return self.globalstate.get_string_const(text).cname
+
+ def get_pyunicode_ptr_const(self, text):
+ return self.globalstate.get_pyunicode_ptr_const(text)
+
+ def get_py_string_const(self, text, identifier=None,
+ is_str=False, unicode_value=None):
+ return self.globalstate.get_py_string_const(
+ text, identifier, is_str, unicode_value).cname
+
+ def get_argument_default_const(self, type):
+ return self.globalstate.get_py_const(type).cname
+
+ def intern(self, text):
+ return self.get_py_string_const(text)
+
+ def intern_identifier(self, text):
+ return self.get_py_string_const(text, identifier=True)
+
+ def get_cached_constants_writer(self, target=None):
+ return self.globalstate.get_cached_constants_writer(target)
+
+ # code generation
+
+ def putln(self, code="", safe=False):
+ if self.last_pos and self.bol:
+ self.emit_marker()
+ if self.code_config.emit_linenums and self.last_marked_pos:
+ source_desc, line, _ = self.last_marked_pos
+ self._write_lines('\n#line %s "%s"\n' % (line, source_desc.get_escaped_description()))
+ if code:
+ if safe:
+ self.put_safe(code)
+ else:
+ self.put(code)
+ self._write_lines("\n")
+ self.bol = 1
+
+ def mark_pos(self, pos, trace=True):
+ if pos is None:
+ return
+ if self.last_marked_pos and self.last_marked_pos[:2] == pos[:2]:
+ return
+ self.last_pos = (pos, trace)
+
+ def emit_marker(self):
+ pos, trace = self.last_pos
+ self.last_marked_pos = pos
+ self.last_pos = None
+ self._write_lines("\n")
+ if self.code_config.emit_code_comments:
+ self.indent()
+ self._write_lines("/* %s */\n" % self._build_marker(pos))
+ if trace and self.funcstate and self.funcstate.can_trace and self.globalstate.directives['linetrace']:
+ self.indent()
+ self._write_lines('__Pyx_TraceLine(%d,%d,%s)\n' % (
+ pos[1], not self.funcstate.gil_owned, self.error_goto(pos)))
+
+ def _build_marker(self, pos):
+ source_desc, line, col = pos
+ assert isinstance(source_desc, SourceDescriptor)
+ contents = self.globalstate.commented_file_contents(source_desc)
+ lines = contents[max(0, line-3):line] # line numbers start at 1
+ lines[-1] += u' # <<<<<<<<<<<<<<'
+ lines += contents[line:line+2]
+ return u'"%s":%d\n%s\n' % (source_desc.get_escaped_description(), line, u'\n'.join(lines))
+
+ def put_safe(self, code):
+ # put code, but ignore {}
+ self.write(code)
+ self.bol = 0
+
+ def put_or_include(self, code, name):
+ include_dir = self.globalstate.common_utility_include_dir
+ if include_dir and len(code) > 1024:
+ include_file = "%s_%s.h" % (
+ name, hashlib.sha1(code.encode('utf8')).hexdigest())
+ path = os.path.join(include_dir, include_file)
+ if not os.path.exists(path):
+ tmp_path = '%s.tmp%s' % (path, os.getpid())
+ with closing(Utils.open_new_file(tmp_path)) as f:
+ f.write(code)
+ shutil.move(tmp_path, path)
+ code = '#include "%s"\n' % path
+ self.put(code)
+
+ def put(self, code):
+ fix_indent = False
+ if "{" in code:
+ dl = code.count("{")
+ else:
+ dl = 0
+ if "}" in code:
+ dl -= code.count("}")
+ if dl < 0:
+ self.level += dl
+ elif dl == 0 and code[0] == "}":
+ # special cases like "} else {" need a temporary dedent
+ fix_indent = True
+ self.level -= 1
+ if self.bol:
+ self.indent()
+ self.write(code)
+ self.bol = 0
+ if dl > 0:
+ self.level += dl
+ elif fix_indent:
+ self.level += 1
+
+ def putln_tempita(self, code, **context):
+ from ..Tempita import sub
+ self.putln(sub(code, **context))
+
+ def put_tempita(self, code, **context):
+ from ..Tempita import sub
+ self.put(sub(code, **context))
+
+ def increase_indent(self):
+ self.level += 1
+
+ def decrease_indent(self):
+ self.level -= 1
+
+ def begin_block(self):
+ self.putln("{")
+ self.increase_indent()
+
+ def end_block(self):
+ self.decrease_indent()
+ self.putln("}")
+
+ def indent(self):
+ self._write_to_buffer(" " * self.level)
+
+ def get_py_version_hex(self, pyversion):
+ return "0x%02X%02X%02X%02X" % (tuple(pyversion) + (0,0,0,0))[:4]
+
+ def put_label(self, lbl):
+ if lbl in self.funcstate.labels_used:
+ self.putln("%s:;" % lbl)
+
+ def put_goto(self, lbl):
+ self.funcstate.use_label(lbl)
+ self.putln("goto %s;" % lbl)
+
+ def put_var_declaration(self, entry, storage_class="",
+ dll_linkage=None, definition=True):
+ #print "Code.put_var_declaration:", entry.name, "definition =", definition ###
+ if entry.visibility == 'private' and not (definition or entry.defined_in_pxd):
+ #print "...private and not definition, skipping", entry.cname ###
+ return
+ if entry.visibility == "private" and not entry.used:
+ #print "...private and not used, skipping", entry.cname ###
+ return
+ if not entry.cf_used:
+ self.put('CYTHON_UNUSED ')
+ if storage_class:
+ self.put("%s " % storage_class)
+ if entry.is_cpp_optional:
+ self.put(entry.type.cpp_optional_declaration_code(
+ entry.cname, dll_linkage=dll_linkage))
+ else:
+ self.put(entry.type.declaration_code(
+ entry.cname, dll_linkage=dll_linkage))
+ if entry.init is not None:
+ self.put_safe(" = %s" % entry.type.literal_code(entry.init))
+ elif entry.type.is_pyobject:
+ self.put(" = NULL")
+ self.putln(";")
+ self.funcstate.scope.use_entry_utility_code(entry)
+
+ def put_temp_declarations(self, func_context):
+ for name, type, manage_ref, static in func_context.temps_allocated:
+ if type.is_cpp_class and not type.is_fake_reference and func_context.scope.directives['cpp_locals']:
+ decl = type.cpp_optional_declaration_code(name)
+ else:
+ decl = type.declaration_code(name)
+ if type.is_pyobject:
+ self.putln("%s = NULL;" % decl)
+ elif type.is_memoryviewslice:
+ self.putln("%s = %s;" % (decl, type.literal_code(type.default_value)))
+ else:
+ self.putln("%s%s;" % (static and "static " or "", decl))
+
+ if func_context.should_declare_error_indicator:
+ if self.funcstate.uses_error_indicator:
+ unused = ''
+ else:
+ unused = 'CYTHON_UNUSED '
+ # Initialize these variables to silence compiler warnings
+ self.putln("%sint %s = 0;" % (unused, Naming.lineno_cname))
+ self.putln("%sconst char *%s = NULL;" % (unused, Naming.filename_cname))
+ self.putln("%sint %s = 0;" % (unused, Naming.clineno_cname))
+
+ def put_generated_by(self):
+ self.putln(Utils.GENERATED_BY_MARKER)
+ self.putln("")
+
+ def put_h_guard(self, guard):
+ self.putln("#ifndef %s" % guard)
+ self.putln("#define %s" % guard)
+
+ def unlikely(self, cond):
+ if Options.gcc_branch_hints:
+ return 'unlikely(%s)' % cond
+ else:
+ return cond
+
+ def build_function_modifiers(self, modifiers, mapper=modifier_output_mapper):
+ if not modifiers:
+ return ''
+ return '%s ' % ' '.join([mapper(m,m) for m in modifiers])
+
+ # Python objects and reference counting
+
+ def entry_as_pyobject(self, entry):
+ type = entry.type
+ if (not entry.is_self_arg and not entry.type.is_complete()
+ or entry.type.is_extension_type):
+ return "(PyObject *)" + entry.cname
+ else:
+ return entry.cname
+
+ def as_pyobject(self, cname, type):
+ from .PyrexTypes import py_object_type, typecast
+ return typecast(py_object_type, type, cname)
+
+ def put_gotref(self, cname, type):
+ type.generate_gotref(self, cname)
+
+ def put_giveref(self, cname, type):
+ type.generate_giveref(self, cname)
+
+ def put_xgiveref(self, cname, type):
+ type.generate_xgiveref(self, cname)
+
+ def put_xgotref(self, cname, type):
+ type.generate_xgotref(self, cname)
+
+ def put_incref(self, cname, type, nanny=True):
+ # Note: original put_Memslice_Incref/Decref also added in some utility code
+ # this is unnecessary since the relevant utility code is loaded anyway if a memoryview is used
+ # and so has been removed. However, it's potentially a feature that might be useful here
+ type.generate_incref(self, cname, nanny=nanny)
+
+ def put_xincref(self, cname, type, nanny=True):
+ type.generate_xincref(self, cname, nanny=nanny)
+
+ def put_decref(self, cname, type, nanny=True, have_gil=True):
+ type.generate_decref(self, cname, nanny=nanny, have_gil=have_gil)
+
+ def put_xdecref(self, cname, type, nanny=True, have_gil=True):
+ type.generate_xdecref(self, cname, nanny=nanny, have_gil=have_gil)
+
+ def put_decref_clear(self, cname, type, clear_before_decref=False, nanny=True, have_gil=True):
+ type.generate_decref_clear(self, cname, clear_before_decref=clear_before_decref,
+ nanny=nanny, have_gil=have_gil)
+
+ def put_xdecref_clear(self, cname, type, clear_before_decref=False, nanny=True, have_gil=True):
+ type.generate_xdecref_clear(self, cname, clear_before_decref=clear_before_decref,
+ nanny=nanny, have_gil=have_gil)
+
+ def put_decref_set(self, cname, type, rhs_cname):
+ type.generate_decref_set(self, cname, rhs_cname)
+
+ def put_xdecref_set(self, cname, type, rhs_cname):
+ type.generate_xdecref_set(self, cname, rhs_cname)
+
+ def put_incref_memoryviewslice(self, slice_cname, type, have_gil):
+ # TODO ideally this would just be merged into "put_incref"
+ type.generate_incref_memoryviewslice(self, slice_cname, have_gil=have_gil)
+
+ def put_var_incref_memoryviewslice(self, entry, have_gil):
+ self.put_incref_memoryviewslice(entry.cname, entry.type, have_gil=have_gil)
+
+ def put_var_gotref(self, entry):
+ self.put_gotref(entry.cname, entry.type)
+
+ def put_var_giveref(self, entry):
+ self.put_giveref(entry.cname, entry.type)
+
+ def put_var_xgotref(self, entry):
+ self.put_xgotref(entry.cname, entry.type)
+
+ def put_var_xgiveref(self, entry):
+ self.put_xgiveref(entry.cname, entry.type)
+
+ def put_var_incref(self, entry, **kwds):
+ self.put_incref(entry.cname, entry.type, **kwds)
+
+ def put_var_xincref(self, entry, **kwds):
+ self.put_xincref(entry.cname, entry.type, **kwds)
+
+ def put_var_decref(self, entry, **kwds):
+ self.put_decref(entry.cname, entry.type, **kwds)
+
+ def put_var_xdecref(self, entry, **kwds):
+ self.put_xdecref(entry.cname, entry.type, **kwds)
+
+ def put_var_decref_clear(self, entry, **kwds):
+ self.put_decref_clear(entry.cname, entry.type, clear_before_decref=entry.in_closure, **kwds)
+
+ def put_var_decref_set(self, entry, rhs_cname, **kwds):
+ self.put_decref_set(entry.cname, entry.type, rhs_cname, **kwds)
+
+ def put_var_xdecref_set(self, entry, rhs_cname, **kwds):
+ self.put_xdecref_set(entry.cname, entry.type, rhs_cname, **kwds)
+
+ def put_var_xdecref_clear(self, entry, **kwds):
+ self.put_xdecref_clear(entry.cname, entry.type, clear_before_decref=entry.in_closure, **kwds)
+
+ def put_var_decrefs(self, entries, used_only = 0):
+ for entry in entries:
+ if not used_only or entry.used:
+ if entry.xdecref_cleanup:
+ self.put_var_xdecref(entry)
+ else:
+ self.put_var_decref(entry)
+
+ def put_var_xdecrefs(self, entries):
+ for entry in entries:
+ self.put_var_xdecref(entry)
+
+ def put_var_xdecrefs_clear(self, entries):
+ for entry in entries:
+ self.put_var_xdecref_clear(entry)
+
+ def put_init_to_py_none(self, cname, type, nanny=True):
+ from .PyrexTypes import py_object_type, typecast
+ py_none = typecast(type, py_object_type, "Py_None")
+ if nanny:
+ self.putln("%s = %s; __Pyx_INCREF(Py_None);" % (cname, py_none))
+ else:
+ self.putln("%s = %s; Py_INCREF(Py_None);" % (cname, py_none))
+
+ def put_init_var_to_py_none(self, entry, template = "%s", nanny=True):
+ code = template % entry.cname
+ #if entry.type.is_extension_type:
+ # code = "((PyObject*)%s)" % code
+ self.put_init_to_py_none(code, entry.type, nanny)
+ if entry.in_closure:
+ self.put_giveref('Py_None')
+
+ def put_pymethoddef(self, entry, term, allow_skip=True, wrapper_code_writer=None):
+ is_reverse_number_slot = False
+ if entry.is_special or entry.name == '__getattribute__':
+ from . import TypeSlots
+ is_reverse_number_slot = True
+ if entry.name not in special_py_methods and not TypeSlots.is_reverse_number_slot(entry.name):
+ if entry.name == '__getattr__' and not self.globalstate.directives['fast_getattr']:
+ pass
+ # Python's typeobject.c will automatically fill in our slot
+ # in add_operators() (called by PyType_Ready) with a value
+ # that's better than ours.
+ elif allow_skip:
+ return
+
+ method_flags = entry.signature.method_flags()
+ if not method_flags:
+ return
+ if entry.is_special:
+ method_flags += [TypeSlots.method_coexist]
+ func_ptr = wrapper_code_writer.put_pymethoddef_wrapper(entry) if wrapper_code_writer else entry.func_cname
+ # Add required casts, but try not to shadow real warnings.
+ cast = entry.signature.method_function_type()
+ if cast != 'PyCFunction':
+ func_ptr = '(void*)(%s)%s' % (cast, func_ptr)
+ entry_name = entry.name.as_c_string_literal()
+ if is_reverse_number_slot:
+ # Unlike most special functions, reverse number operator slots are actually generated here
+ # (to ensure that they can be looked up). However, they're sometimes guarded by the preprocessor
+ # so a bit of extra logic is needed
+ slot = TypeSlots.get_slot_table(self.globalstate.directives).get_slot_by_method_name(entry.name)
+ preproc_guard = slot.preprocessor_guard_code()
+ if preproc_guard:
+ self.putln(preproc_guard)
+ self.putln(
+ '{%s, (PyCFunction)%s, %s, %s}%s' % (
+ entry_name,
+ func_ptr,
+ "|".join(method_flags),
+ entry.doc_cname if entry.doc else '0',
+ term))
+ if is_reverse_number_slot and preproc_guard:
+ self.putln("#endif")
+
+ def put_pymethoddef_wrapper(self, entry):
+ func_cname = entry.func_cname
+ if entry.is_special:
+ method_flags = entry.signature.method_flags() or []
+ from .TypeSlots import method_noargs
+ if method_noargs in method_flags:
+ # Special NOARGS methods really take no arguments besides 'self', but PyCFunction expects one.
+ func_cname = Naming.method_wrapper_prefix + func_cname
+ self.putln("static PyObject *%s(PyObject *self, CYTHON_UNUSED PyObject *arg) {" % func_cname)
+ func_call = "%s(self)" % entry.func_cname
+ if entry.name == "__next__":
+ self.putln("PyObject *res = %s;" % func_call)
+ # tp_iternext can return NULL without an exception
+ self.putln("if (!res && !PyErr_Occurred()) { PyErr_SetNone(PyExc_StopIteration); }")
+ self.putln("return res;")
+ else:
+ self.putln("return %s;" % func_call)
+ self.putln("}")
+ return func_cname
+
+ # GIL methods
+
+ def use_fast_gil_utility_code(self):
+ if self.globalstate.directives['fast_gil']:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
+ else:
+ self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
+
+ def put_ensure_gil(self, declare_gilstate=True, variable=None):
+ """
+ Acquire the GIL. The generated code is safe even when no PyThreadState
+ has been allocated for this thread (for threads not initialized by
+ using the Python API). Additionally, the code generated by this method
+ may be called recursively.
+ """
+ self.globalstate.use_utility_code(
+ UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
+ self.use_fast_gil_utility_code()
+ self.putln("#ifdef WITH_THREAD")
+ if not variable:
+ variable = '__pyx_gilstate_save'
+ if declare_gilstate:
+ self.put("PyGILState_STATE ")
+ self.putln("%s = __Pyx_PyGILState_Ensure();" % variable)
+ self.putln("#endif")
+
+ def put_release_ensured_gil(self, variable=None):
+ """
+ Releases the GIL, corresponds to `put_ensure_gil`.
+ """
+ self.use_fast_gil_utility_code()
+ if not variable:
+ variable = '__pyx_gilstate_save'
+ self.putln("#ifdef WITH_THREAD")
+ self.putln("__Pyx_PyGILState_Release(%s);" % variable)
+ self.putln("#endif")
+
+ def put_acquire_gil(self, variable=None, unknown_gil_state=True):
+ """
+ Acquire the GIL. The thread's thread state must have been initialized
+ by a previous `put_release_gil`
+ """
+ self.use_fast_gil_utility_code()
+ self.putln("#ifdef WITH_THREAD")
+ self.putln("__Pyx_FastGIL_Forget();")
+ if variable:
+ self.putln('_save = %s;' % variable)
+ if unknown_gil_state:
+ self.putln("if (_save) {")
+ self.putln("Py_BLOCK_THREADS")
+ if unknown_gil_state:
+ self.putln("}")
+ self.putln("#endif")
+
+ def put_release_gil(self, variable=None, unknown_gil_state=True):
+ "Release the GIL, corresponds to `put_acquire_gil`."
+ self.use_fast_gil_utility_code()
+ self.putln("#ifdef WITH_THREAD")
+ self.putln("PyThreadState *_save;")
+ self.putln("_save = NULL;")
+ if unknown_gil_state:
+ # we don't *know* that we don't have the GIL (since we may be inside a nogil function,
+ # and Py_UNBLOCK_THREADS is unsafe without the GIL)
+ self.putln("if (PyGILState_Check()) {")
+ self.putln("Py_UNBLOCK_THREADS")
+ if unknown_gil_state:
+ self.putln("}")
+ if variable:
+ self.putln('%s = _save;' % variable)
+ self.putln("__Pyx_FastGIL_Remember();")
+ self.putln("#endif")
+
+ def declare_gilstate(self):
+ self.putln("#ifdef WITH_THREAD")
+ self.putln("PyGILState_STATE __pyx_gilstate_save;")
+ self.putln("#endif")
+
+ # error handling
+
+ def put_error_if_neg(self, pos, value):
+ # TODO this path is almost _never_ taken, yet this macro makes is slower!
+ # return self.putln("if (unlikely(%s < 0)) %s" % (value, self.error_goto(pos)))
+ return self.putln("if (%s < 0) %s" % (value, self.error_goto(pos)))
+
+ def put_error_if_unbound(self, pos, entry, in_nogil_context=False, unbound_check_code=None):
+ if entry.from_closure:
+ func = '__Pyx_RaiseClosureNameError'
+ self.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseClosureNameError", "ObjectHandling.c"))
+ elif entry.type.is_memoryviewslice and in_nogil_context:
+ func = '__Pyx_RaiseUnboundMemoryviewSliceNogil'
+ self.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseUnboundMemoryviewSliceNogil", "ObjectHandling.c"))
+ elif entry.type.is_cpp_class and entry.is_cglobal:
+ func = '__Pyx_RaiseCppGlobalNameError'
+ self.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseCppGlobalNameError", "ObjectHandling.c"))
+ elif entry.type.is_cpp_class and entry.is_variable and not entry.is_member and entry.scope.is_c_class_scope:
+ # there doesn't seem to be a good way to detecting an instance-attribute of a C class
+ # (is_member is only set for class attributes)
+ func = '__Pyx_RaiseCppAttributeError'
+ self.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseCppAttributeError", "ObjectHandling.c"))
+ else:
+ func = '__Pyx_RaiseUnboundLocalError'
+ self.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseUnboundLocalError", "ObjectHandling.c"))
+
+ if not unbound_check_code:
+ unbound_check_code = entry.type.check_for_null_code(entry.cname)
+ self.putln('if (unlikely(!%s)) { %s("%s"); %s }' % (
+ unbound_check_code,
+ func,
+ entry.name,
+ self.error_goto(pos)))
+
+ def set_error_info(self, pos, used=False):
+ self.funcstate.should_declare_error_indicator = True
+ if used:
+ self.funcstate.uses_error_indicator = True
+ return "__PYX_MARK_ERR_POS(%s, %s)" % (
+ self.lookup_filename(pos[0]),
+ pos[1])
+
+ def error_goto(self, pos, used=True):
+ lbl = self.funcstate.error_label
+ self.funcstate.use_label(lbl)
+ if pos is None:
+ return 'goto %s;' % lbl
+ self.funcstate.should_declare_error_indicator = True
+ if used:
+ self.funcstate.uses_error_indicator = True
+ return "__PYX_ERR(%s, %s, %s)" % (
+ self.lookup_filename(pos[0]),
+ pos[1],
+ lbl)
+
+ def error_goto_if(self, cond, pos):
+ return "if (%s) %s" % (self.unlikely(cond), self.error_goto(pos))
+
+ def error_goto_if_null(self, cname, pos):
+ return self.error_goto_if("!%s" % cname, pos)
+
+ def error_goto_if_neg(self, cname, pos):
+ # Add extra parentheses to silence clang warnings about constant conditions.
+ return self.error_goto_if("(%s < 0)" % cname, pos)
+
+ def error_goto_if_PyErr(self, pos):
+ return self.error_goto_if("PyErr_Occurred()", pos)
+
+ def lookup_filename(self, filename):
+ return self.globalstate.lookup_filename(filename)
+
+ def put_declare_refcount_context(self):
+ self.putln('__Pyx_RefNannyDeclarations')
+
+ def put_setup_refcount_context(self, name, acquire_gil=False):
+ name = name.as_c_string_literal() # handle unicode names
+ if acquire_gil:
+ self.globalstate.use_utility_code(
+ UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
+ self.putln('__Pyx_RefNannySetupContext(%s, %d);' % (name, acquire_gil and 1 or 0))
+
+ def put_finish_refcount_context(self, nogil=False):
+ self.putln("__Pyx_RefNannyFinishContextNogil()" if nogil else "__Pyx_RefNannyFinishContext();")
+
+ def put_add_traceback(self, qualified_name, include_cline=True):
+ """
+ Build a Python traceback for propagating exceptions.
+
+ qualified_name should be the qualified name of the function.
+ """
+ qualified_name = qualified_name.as_c_string_literal() # handle unicode names
+ format_tuple = (
+ qualified_name,
+ Naming.clineno_cname if include_cline else 0,
+ Naming.lineno_cname,
+ Naming.filename_cname,
+ )
+
+ self.funcstate.uses_error_indicator = True
+ self.putln('__Pyx_AddTraceback(%s, %s, %s, %s);' % format_tuple)
+
+ def put_unraisable(self, qualified_name, nogil=False):
+ """
+ Generate code to print a Python warning for an unraisable exception.
+
+ qualified_name should be the qualified name of the function.
+ """
+ format_tuple = (
+ qualified_name,
+ Naming.clineno_cname,
+ Naming.lineno_cname,
+ Naming.filename_cname,
+ self.globalstate.directives['unraisable_tracebacks'],
+ nogil,
+ )
+ self.funcstate.uses_error_indicator = True
+ self.putln('__Pyx_WriteUnraisable("%s", %s, %s, %s, %d, %d);' % format_tuple)
+ self.globalstate.use_utility_code(
+ UtilityCode.load_cached("WriteUnraisableException", "Exceptions.c"))
+
+ def put_trace_declarations(self):
+ self.putln('__Pyx_TraceDeclarations')
+
+ def put_trace_frame_init(self, codeobj=None):
+ if codeobj:
+ self.putln('__Pyx_TraceFrameInit(%s)' % codeobj)
+
+ def put_trace_call(self, name, pos, nogil=False):
+ self.putln('__Pyx_TraceCall("%s", %s[%s], %s, %d, %s);' % (
+ name, Naming.filetable_cname, self.lookup_filename(pos[0]), pos[1], nogil, self.error_goto(pos)))
+
+ def put_trace_exception(self):
+ self.putln("__Pyx_TraceException();")
+
+ def put_trace_return(self, retvalue_cname, nogil=False):
+ self.putln("__Pyx_TraceReturn(%s, %d);" % (retvalue_cname, nogil))
+
+ def putln_openmp(self, string):
+ self.putln("#ifdef _OPENMP")
+ self.putln(string)
+ self.putln("#endif /* _OPENMP */")
+
+ def undef_builtin_expect(self, cond):
+ """
+ Redefine the macros likely() and unlikely to no-ops, depending on
+ condition 'cond'
+ """
+ self.putln("#if %s" % cond)
+ self.putln(" #undef likely")
+ self.putln(" #undef unlikely")
+ self.putln(" #define likely(x) (x)")
+ self.putln(" #define unlikely(x) (x)")
+ self.putln("#endif")
+
+ def redef_builtin_expect(self, cond):
+ self.putln("#if %s" % cond)
+ self.putln(" #undef likely")
+ self.putln(" #undef unlikely")
+ self.putln(" #define likely(x) __builtin_expect(!!(x), 1)")
+ self.putln(" #define unlikely(x) __builtin_expect(!!(x), 0)")
+ self.putln("#endif")
+
+
+class PyrexCodeWriter(object):
+ # f file output file
+ # level int indentation level
+
+ def __init__(self, outfile_name):
+ self.f = Utils.open_new_file(outfile_name)
+ self.level = 0
+
+ def putln(self, code):
+ self.f.write("%s%s\n" % (" " * self.level, code))
+
+ def indent(self):
+ self.level += 1
+
+ def dedent(self):
+ self.level -= 1
+
+
+class PyxCodeWriter(object):
+ """
+ Can be used for writing out some Cython code.
+ """
+
+ def __init__(self, buffer=None, indent_level=0, context=None, encoding='ascii'):
+ self.buffer = buffer or StringIOTree()
+ self.level = indent_level
+ self.original_level = indent_level
+ self.context = context
+ self.encoding = encoding
+
+ def indent(self, levels=1):
+ self.level += levels
+ return True
+
+ def dedent(self, levels=1):
+ self.level -= levels
+
+ @contextmanager
+ def indenter(self, line):
+ """
+ with pyx_code.indenter("for i in range(10):"):
+ pyx_code.putln("print i")
+ """
+ self.putln(line)
+ self.indent()
+ yield
+ self.dedent()
+
+ def empty(self):
+ return self.buffer.empty()
+
+ def getvalue(self):
+ result = self.buffer.getvalue()
+ if isinstance(result, bytes):
+ result = result.decode(self.encoding)
+ return result
+
+ def putln(self, line, context=None):
+ context = context or self.context
+ if context:
+ line = sub_tempita(line, context)
+ self._putln(line)
+
+ def _putln(self, line):
+ self.buffer.write(u"%s%s\n" % (self.level * u" ", line))
+
+ def put_chunk(self, chunk, context=None):
+ context = context or self.context
+ if context:
+ chunk = sub_tempita(chunk, context)
+
+ chunk = textwrap.dedent(chunk)
+ for line in chunk.splitlines():
+ self._putln(line)
+
+ def insertion_point(self):
+ return type(self)(self.buffer.insertion_point(), self.level, self.context)
+
+ def reset(self):
+ # resets the buffer so that nothing gets written. Most useful
+ # for abandoning all work in a specific insertion point
+ self.buffer.reset()
+ self.level = self.original_level
+
+ def named_insertion_point(self, name):
+ setattr(self, name, self.insertion_point())
+
+
+class ClosureTempAllocator(object):
+ def __init__(self, klass):
+ self.klass = klass
+ self.temps_allocated = {}
+ self.temps_free = {}
+ self.temps_count = 0
+
+ def reset(self):
+ for type, cnames in self.temps_allocated.items():
+ self.temps_free[type] = list(cnames)
+
+ def allocate_temp(self, type):
+ if type not in self.temps_allocated:
+ self.temps_allocated[type] = []
+ self.temps_free[type] = []
+ elif self.temps_free[type]:
+ return self.temps_free[type].pop(0)
+ cname = '%s%d' % (Naming.codewriter_temp_prefix, self.temps_count)
+ self.klass.declare_var(pos=None, name=cname, cname=cname, type=type, is_cdef=True)
+ self.temps_allocated[type].append(cname)
+ self.temps_count += 1
+ return cname
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CodeGeneration.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CodeGeneration.py
new file mode 100644
index 0000000000000000000000000000000000000000..e64049c7f5d88a2ab52c26bd74948f6be8a0e333
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CodeGeneration.py
@@ -0,0 +1,35 @@
+from __future__ import absolute_import
+
+from .Visitor import VisitorTransform
+from .Nodes import StatListNode
+
+
+class ExtractPxdCode(VisitorTransform):
+ """
+ Finds nodes in a pxd file that should generate code, and
+ returns them in a StatListNode.
+
+ The result is a tuple (StatListNode, ModuleScope), i.e.
+ everything that is needed from the pxd after it is processed.
+
+ A purer approach would be to separately compile the pxd code,
+ but the result would have to be slightly more sophisticated
+ than pure strings (functions + wanted interned strings +
+ wanted utility code + wanted cached objects) so for now this
+ approach is taken.
+ """
+
+ def __call__(self, root):
+ self.funcs = []
+ self.visitchildren(root)
+ return (StatListNode(root.pos, stats=self.funcs), root.scope)
+
+ def visit_FuncDefNode(self, node):
+ self.funcs.append(node)
+ # Do not visit children, nested funcdefnodes will
+ # also be moved by this action...
+ return node
+
+ def visit_Node(self, node):
+ self.visitchildren(node)
+ return node
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CythonScope.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CythonScope.py
new file mode 100644
index 0000000000000000000000000000000000000000..f73be007086bed44b46312ad27687ceaa19c4ed8
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/CythonScope.py
@@ -0,0 +1,181 @@
+from __future__ import absolute_import
+
+from .Symtab import ModuleScope
+from .PyrexTypes import *
+from .UtilityCode import CythonUtilityCode
+from .Errors import error
+from .Scanning import StringSourceDescriptor
+from . import MemoryView
+from .StringEncoding import EncodedString
+
+
+class CythonScope(ModuleScope):
+ is_cython_builtin = 1
+ _cythonscope_initialized = False
+
+ def __init__(self, context):
+ ModuleScope.__init__(self, u'cython', None, None)
+ self.pxd_file_loaded = True
+ self.populate_cython_scope()
+ # The Main.Context object
+ self.context = context
+
+ for fused_type in (cy_integral_type, cy_floating_type, cy_numeric_type):
+ entry = self.declare_typedef(fused_type.name,
+ fused_type,
+ None,
+ cname='')
+ entry.in_cinclude = True
+
+ def is_cpp(self):
+ # Allow C++ utility code in C++ contexts.
+ return self.context.cpp
+
+ def lookup_type(self, name):
+ # This function should go away when types are all first-level objects.
+ type = parse_basic_type(name)
+ if type:
+ return type
+
+ return super(CythonScope, self).lookup_type(name)
+
+ def lookup(self, name):
+ entry = super(CythonScope, self).lookup(name)
+
+ if entry is None and not self._cythonscope_initialized:
+ self.load_cythonscope()
+ entry = super(CythonScope, self).lookup(name)
+
+ return entry
+
+ def find_module(self, module_name, pos):
+ error("cython.%s is not available" % module_name, pos)
+
+ def find_submodule(self, module_name, as_package=False):
+ entry = self.entries.get(module_name, None)
+ if not entry:
+ self.load_cythonscope()
+ entry = self.entries.get(module_name, None)
+
+ if entry and entry.as_module:
+ return entry.as_module
+ else:
+ # TODO: fix find_submodule control flow so that we're not
+ # expected to create a submodule here (to protect CythonScope's
+ # possible immutability). Hack ourselves out of the situation
+ # for now.
+ raise error((StringSourceDescriptor(u"cython", u""), 0, 0),
+ "cython.%s is not available" % module_name)
+
+ def lookup_qualified_name(self, qname):
+ # ExprNode.as_cython_attribute generates qnames and we untangle it here...
+ name_path = qname.split(u'.')
+ scope = self
+ while len(name_path) > 1:
+ scope = scope.lookup_here(name_path[0])
+ if scope:
+ scope = scope.as_module
+ del name_path[0]
+ if scope is None:
+ return None
+ else:
+ return scope.lookup_here(name_path[0])
+
+ def populate_cython_scope(self):
+ # These are used to optimize isinstance in FinalOptimizePhase
+ type_object = self.declare_typedef(
+ 'PyTypeObject',
+ base_type = c_void_type,
+ pos = None,
+ cname = 'PyTypeObject')
+ type_object.is_void = True
+ type_object_type = type_object.type
+
+ self.declare_cfunction(
+ 'PyObject_TypeCheck',
+ CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None),
+ CFuncTypeArg("t", c_ptr_type(type_object_type), None)]),
+ pos = None,
+ defining = 1,
+ cname = 'PyObject_TypeCheck')
+
+ def load_cythonscope(self):
+ """
+ Creates some entries for testing purposes and entries for
+ cython.array() and for cython.view.*.
+ """
+ if self._cythonscope_initialized:
+ return
+
+ self._cythonscope_initialized = True
+ cython_testscope_utility_code.declare_in_scope(
+ self, cython_scope=self)
+ cython_test_extclass_utility_code.declare_in_scope(
+ self, cython_scope=self)
+
+ #
+ # The view sub-scope
+ #
+ self.viewscope = viewscope = ModuleScope(u'view', self, None)
+ self.declare_module('view', viewscope, None).as_module = viewscope
+ viewscope.is_cython_builtin = True
+ viewscope.pxd_file_loaded = True
+
+ cythonview_testscope_utility_code.declare_in_scope(
+ viewscope, cython_scope=self)
+
+ view_utility_scope = MemoryView.view_utility_code.declare_in_scope(
+ self.viewscope, cython_scope=self,
+ allowlist=MemoryView.view_utility_allowlist)
+
+ # Marks the types as being cython_builtin_type so that they can be
+ # extended from without Cython attempting to import cython.view
+ ext_types = [ entry.type
+ for entry in view_utility_scope.entries.values()
+ if entry.type.is_extension_type ]
+ for ext_type in ext_types:
+ ext_type.is_cython_builtin_type = 1
+
+ # self.entries["array"] = view_utility_scope.entries.pop("array")
+
+ # dataclasses scope
+ dc_str = EncodedString(u'dataclasses')
+ dataclassesscope = ModuleScope(dc_str, self, context=None)
+ self.declare_module(dc_str, dataclassesscope, pos=None).as_module = dataclassesscope
+ dataclassesscope.is_cython_builtin = True
+ dataclassesscope.pxd_file_loaded = True
+ # doesn't actually have any contents
+
+
+def create_cython_scope(context):
+ # One could in fact probably make it a singleton,
+ # but not sure yet whether any code mutates it (which would kill reusing
+ # it across different contexts)
+ return CythonScope(context)
+
+# Load test utilities for the cython scope
+
+def load_testscope_utility(cy_util_name, **kwargs):
+ return CythonUtilityCode.load(cy_util_name, "TestCythonScope.pyx", **kwargs)
+
+
+undecorated_methods_protos = UtilityCode(proto=u"""
+ /* These methods are undecorated and have therefore no prototype */
+ static PyObject *__pyx_TestClass_cdef_method(
+ struct __pyx_TestClass_obj *self, int value);
+ static PyObject *__pyx_TestClass_cpdef_method(
+ struct __pyx_TestClass_obj *self, int value, int skip_dispatch);
+ static PyObject *__pyx_TestClass_def_method(
+ PyObject *self, PyObject *value);
+""")
+
+cython_testscope_utility_code = load_testscope_utility("TestScope")
+
+test_cython_utility_dep = load_testscope_utility("TestDep")
+
+cython_test_extclass_utility_code = \
+ load_testscope_utility("TestClass", name="TestClass",
+ requires=[undecorated_methods_protos,
+ test_cython_utility_dep])
+
+cythonview_testscope_utility_code = load_testscope_utility("View.TestScope")
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Dataclass.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Dataclass.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b41bf9e6a0c709e80c995548170fa6707e6bdbd
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Dataclass.py
@@ -0,0 +1,839 @@
+# functions to transform a c class into a dataclass
+
+from collections import OrderedDict
+from textwrap import dedent
+import operator
+
+from . import ExprNodes
+from . import Nodes
+from . import PyrexTypes
+from . import Builtin
+from . import Naming
+from .Errors import error, warning
+from .Code import UtilityCode, TempitaUtilityCode, PyxCodeWriter
+from .Visitor import VisitorTransform
+from .StringEncoding import EncodedString
+from .TreeFragment import TreeFragment
+from .ParseTreeTransforms import NormalizeTree, SkipDeclarations
+from .Options import copy_inherited_directives
+
+_dataclass_loader_utilitycode = None
+
+def make_dataclasses_module_callnode(pos):
+ global _dataclass_loader_utilitycode
+ if not _dataclass_loader_utilitycode:
+ python_utility_code = UtilityCode.load_cached("Dataclasses_fallback", "Dataclasses.py")
+ python_utility_code = EncodedString(python_utility_code.impl)
+ _dataclass_loader_utilitycode = TempitaUtilityCode.load(
+ "SpecificModuleLoader", "Dataclasses.c",
+ context={'cname': "dataclasses", 'py_code': python_utility_code.as_c_string_literal()})
+ return ExprNodes.PythonCapiCallNode(
+ pos, "__Pyx_Load_dataclasses_Module",
+ PyrexTypes.CFuncType(PyrexTypes.py_object_type, []),
+ utility_code=_dataclass_loader_utilitycode,
+ args=[],
+ )
+
+def make_dataclass_call_helper(pos, callable, kwds):
+ utility_code = UtilityCode.load_cached("DataclassesCallHelper", "Dataclasses.c")
+ func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("callable", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("kwds", PyrexTypes.py_object_type, None)
+ ],
+ )
+ return ExprNodes.PythonCapiCallNode(
+ pos,
+ function_name="__Pyx_DataclassesCallHelper",
+ func_type=func_type,
+ utility_code=utility_code,
+ args=[callable, kwds],
+ )
+
+
+class RemoveAssignmentsToNames(VisitorTransform, SkipDeclarations):
+ """
+ Cython (and Python) normally treats
+
+ class A:
+ x = 1
+
+ as generating a class attribute. However for dataclasses the `= 1` should be interpreted as
+ a default value to initialize an instance attribute with.
+ This transform therefore removes the `x=1` assignment so that the class attribute isn't
+ generated, while recording what it has removed so that it can be used in the initialization.
+ """
+ def __init__(self, names):
+ super(RemoveAssignmentsToNames, self).__init__()
+ self.names = names
+ self.removed_assignments = {}
+
+ def visit_CClassNode(self, node):
+ self.visitchildren(node)
+ return node
+
+ def visit_PyClassNode(self, node):
+ return node # go no further
+
+ def visit_FuncDefNode(self, node):
+ return node # go no further
+
+ def visit_SingleAssignmentNode(self, node):
+ if node.lhs.is_name and node.lhs.name in self.names:
+ if node.lhs.name in self.removed_assignments:
+ warning(node.pos, ("Multiple assignments for '%s' in dataclass; "
+ "using most recent") % node.lhs.name, 1)
+ self.removed_assignments[node.lhs.name] = node.rhs
+ return []
+ return node
+
+ # I believe cascaded assignment is always a syntax error with annotations
+ # so there's no need to define visit_CascadedAssignmentNode
+
+ def visit_Node(self, node):
+ self.visitchildren(node)
+ return node
+
+
+class TemplateCode(object):
+ """
+ Adds the ability to keep track of placeholder argument names to PyxCodeWriter.
+
+ Also adds extra_stats which are nodes bundled at the end when this
+ is converted to a tree.
+ """
+ _placeholder_count = 0
+
+ def __init__(self, writer=None, placeholders=None, extra_stats=None):
+ self.writer = PyxCodeWriter() if writer is None else writer
+ self.placeholders = {} if placeholders is None else placeholders
+ self.extra_stats = [] if extra_stats is None else extra_stats
+
+ def add_code_line(self, code_line):
+ self.writer.putln(code_line)
+
+ def add_code_lines(self, code_lines):
+ for line in code_lines:
+ self.writer.putln(line)
+
+ def reset(self):
+ # don't attempt to reset placeholders - it really doesn't matter if
+ # we have unused placeholders
+ self.writer.reset()
+
+ def empty(self):
+ return self.writer.empty()
+
+ def indenter(self):
+ return self.writer.indenter()
+
+ def new_placeholder(self, field_names, value):
+ name = self._new_placeholder_name(field_names)
+ self.placeholders[name] = value
+ return name
+
+ def add_extra_statements(self, statements):
+ if self.extra_stats is None:
+ assert False, "Can only use add_extra_statements on top-level writer"
+ self.extra_stats.extend(statements)
+
+ def _new_placeholder_name(self, field_names):
+ while True:
+ name = "DATACLASS_PLACEHOLDER_%d" % self._placeholder_count
+ if (name not in self.placeholders
+ and name not in field_names):
+ # make sure name isn't already used and doesn't
+ # conflict with a variable name (which is unlikely but possible)
+ break
+ self._placeholder_count += 1
+ return name
+
+ def generate_tree(self, level='c_class'):
+ stat_list_node = TreeFragment(
+ self.writer.getvalue(),
+ level=level,
+ pipeline=[NormalizeTree(None)],
+ ).substitute(self.placeholders)
+
+ stat_list_node.stats += self.extra_stats
+ return stat_list_node
+
+ def insertion_point(self):
+ new_writer = self.writer.insertion_point()
+ return TemplateCode(
+ writer=new_writer,
+ placeholders=self.placeholders,
+ extra_stats=self.extra_stats
+ )
+
+
+class _MISSING_TYPE(object):
+ pass
+MISSING = _MISSING_TYPE()
+
+
+class Field(object):
+ """
+ Field is based on the dataclasses.field class from the standard library module.
+ It is used internally during the generation of Cython dataclasses to keep track
+ of the settings for individual attributes.
+
+ Attributes of this class are stored as nodes so they can be used in code construction
+ more readily (i.e. we store BoolNode rather than bool)
+ """
+ default = MISSING
+ default_factory = MISSING
+ private = False
+
+ literal_keys = ("repr", "hash", "init", "compare", "metadata")
+
+ # default values are defined by the CPython dataclasses.field
+ def __init__(self, pos, default=MISSING, default_factory=MISSING,
+ repr=None, hash=None, init=None,
+ compare=None, metadata=None,
+ is_initvar=False, is_classvar=False,
+ **additional_kwds):
+ if default is not MISSING:
+ self.default = default
+ if default_factory is not MISSING:
+ self.default_factory = default_factory
+ self.repr = repr or ExprNodes.BoolNode(pos, value=True)
+ self.hash = hash or ExprNodes.NoneNode(pos)
+ self.init = init or ExprNodes.BoolNode(pos, value=True)
+ self.compare = compare or ExprNodes.BoolNode(pos, value=True)
+ self.metadata = metadata or ExprNodes.NoneNode(pos)
+ self.is_initvar = is_initvar
+ self.is_classvar = is_classvar
+
+ for k, v in additional_kwds.items():
+ # There should not be any additional keywords!
+ error(v.pos, "cython.dataclasses.field() got an unexpected keyword argument '%s'" % k)
+
+ for field_name in self.literal_keys:
+ field_value = getattr(self, field_name)
+ if not field_value.is_literal:
+ error(field_value.pos,
+ "cython.dataclasses.field parameter '%s' must be a literal value" % field_name)
+
+ def iterate_record_node_arguments(self):
+ for key in (self.literal_keys + ('default', 'default_factory')):
+ value = getattr(self, key)
+ if value is not MISSING:
+ yield key, value
+
+
+def process_class_get_fields(node):
+ var_entries = node.scope.var_entries
+ # order of definition is used in the dataclass
+ var_entries = sorted(var_entries, key=operator.attrgetter('pos'))
+ var_names = [entry.name for entry in var_entries]
+
+ # don't treat `x = 1` as an assignment of a class attribute within the dataclass
+ transform = RemoveAssignmentsToNames(var_names)
+ transform(node)
+ default_value_assignments = transform.removed_assignments
+
+ base_type = node.base_type
+ fields = OrderedDict()
+ while base_type:
+ if base_type.is_external or not base_type.scope.implemented:
+ warning(node.pos, "Cannot reliably handle Cython dataclasses with base types "
+ "in external modules since it is not possible to tell what fields they have", 2)
+ if base_type.dataclass_fields:
+ fields = base_type.dataclass_fields.copy()
+ break
+ base_type = base_type.base_type
+
+ for entry in var_entries:
+ name = entry.name
+ is_initvar = entry.declared_with_pytyping_modifier("dataclasses.InitVar")
+ # TODO - classvars aren't included in "var_entries" so are missed here
+ # and thus this code is never triggered
+ is_classvar = entry.declared_with_pytyping_modifier("typing.ClassVar")
+ if name in default_value_assignments:
+ assignment = default_value_assignments[name]
+ if (isinstance(assignment, ExprNodes.CallNode) and (
+ assignment.function.as_cython_attribute() == "dataclasses.field" or
+ Builtin.exprnode_to_known_standard_library_name(
+ assignment.function, node.scope) == "dataclasses.field")):
+ # I believe most of this is well-enforced when it's treated as a directive
+ # but it doesn't hurt to make sure
+ valid_general_call = (isinstance(assignment, ExprNodes.GeneralCallNode)
+ and isinstance(assignment.positional_args, ExprNodes.TupleNode)
+ and not assignment.positional_args.args
+ and (assignment.keyword_args is None or isinstance(assignment.keyword_args, ExprNodes.DictNode)))
+ valid_simple_call = (isinstance(assignment, ExprNodes.SimpleCallNode) and not assignment.args)
+ if not (valid_general_call or valid_simple_call):
+ error(assignment.pos, "Call to 'cython.dataclasses.field' must only consist "
+ "of compile-time keyword arguments")
+ continue
+ keyword_args = assignment.keyword_args.as_python_dict() if valid_general_call and assignment.keyword_args else {}
+ if 'default' in keyword_args and 'default_factory' in keyword_args:
+ error(assignment.pos, "cannot specify both default and default_factory")
+ continue
+ field = Field(node.pos, **keyword_args)
+ else:
+ if assignment.type in [Builtin.list_type, Builtin.dict_type, Builtin.set_type]:
+ # The standard library module generates a TypeError at runtime
+ # in this situation.
+ # Error message is copied from CPython
+ error(assignment.pos, "mutable default for field {1} is not allowed: "
+ "use default_factory".format(assignment.type.name, name))
+
+ field = Field(node.pos, default=assignment)
+ else:
+ field = Field(node.pos)
+ field.is_initvar = is_initvar
+ field.is_classvar = is_classvar
+ if entry.visibility == "private":
+ field.private = True
+ fields[name] = field
+ node.entry.type.dataclass_fields = fields
+ return fields
+
+
+def handle_cclass_dataclass(node, dataclass_args, analyse_decs_transform):
+ # default argument values from https://docs.python.org/3/library/dataclasses.html
+ kwargs = dict(init=True, repr=True, eq=True,
+ order=False, unsafe_hash=False,
+ frozen=False, kw_only=False)
+ if dataclass_args is not None:
+ if dataclass_args[0]:
+ error(node.pos, "cython.dataclasses.dataclass takes no positional arguments")
+ for k, v in dataclass_args[1].items():
+ if k not in kwargs:
+ error(node.pos,
+ "cython.dataclasses.dataclass() got an unexpected keyword argument '%s'" % k)
+ if not isinstance(v, ExprNodes.BoolNode):
+ error(node.pos,
+ "Arguments passed to cython.dataclasses.dataclass must be True or False")
+ kwargs[k] = v.value
+
+ kw_only = kwargs['kw_only']
+
+ fields = process_class_get_fields(node)
+
+ dataclass_module = make_dataclasses_module_callnode(node.pos)
+
+ # create __dataclass_params__ attribute. I try to use the exact
+ # `_DataclassParams` class defined in the standard library module if at all possible
+ # for maximum duck-typing compatibility.
+ dataclass_params_func = ExprNodes.AttributeNode(node.pos, obj=dataclass_module,
+ attribute=EncodedString("_DataclassParams"))
+ dataclass_params_keywords = ExprNodes.DictNode.from_pairs(
+ node.pos,
+ [ (ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(k)),
+ ExprNodes.BoolNode(node.pos, value=v))
+ for k, v in kwargs.items() ] +
+ [ (ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(k)),
+ ExprNodes.BoolNode(node.pos, value=v))
+ for k, v in [('kw_only', kw_only), ('match_args', False),
+ ('slots', False), ('weakref_slot', False)]
+ ])
+ dataclass_params = make_dataclass_call_helper(
+ node.pos, dataclass_params_func, dataclass_params_keywords)
+ dataclass_params_assignment = Nodes.SingleAssignmentNode(
+ node.pos,
+ lhs = ExprNodes.NameNode(node.pos, name=EncodedString("__dataclass_params__")),
+ rhs = dataclass_params)
+
+ dataclass_fields_stats = _set_up_dataclass_fields(node, fields, dataclass_module)
+
+ stats = Nodes.StatListNode(node.pos,
+ stats=[dataclass_params_assignment] + dataclass_fields_stats)
+
+ code = TemplateCode()
+ generate_init_code(code, kwargs['init'], node, fields, kw_only)
+ generate_repr_code(code, kwargs['repr'], node, fields)
+ generate_eq_code(code, kwargs['eq'], node, fields)
+ generate_order_code(code, kwargs['order'], node, fields)
+ generate_hash_code(code, kwargs['unsafe_hash'], kwargs['eq'], kwargs['frozen'], node, fields)
+
+ stats.stats += code.generate_tree().stats
+
+ # turn off annotation typing, so all arguments to __init__ are accepted as
+ # generic objects and thus can accept _HAS_DEFAULT_FACTORY.
+ # Type conversion comes later
+ comp_directives = Nodes.CompilerDirectivesNode(node.pos,
+ directives=copy_inherited_directives(node.scope.directives, annotation_typing=False),
+ body=stats)
+
+ comp_directives.analyse_declarations(node.scope)
+ # probably already in this scope, but it doesn't hurt to make sure
+ analyse_decs_transform.enter_scope(node, node.scope)
+ analyse_decs_transform.visit(comp_directives)
+ analyse_decs_transform.exit_scope()
+
+ node.body.stats.append(comp_directives)
+
+
+def generate_init_code(code, init, node, fields, kw_only):
+ """
+ Notes on CPython generated "__init__":
+ * Implemented in `_init_fn`.
+ * The use of the `dataclasses._HAS_DEFAULT_FACTORY` sentinel value as
+ the default argument for fields that need constructing with a factory
+ function is copied from the CPython implementation. (`None` isn't
+ suitable because it could also be a value for the user to pass.)
+ There's no real reason why it needs importing from the dataclasses module
+ though - it could equally be a value generated by Cython when the module loads.
+ * seen_default and the associated error message are copied directly from Python
+ * Call to user-defined __post_init__ function (if it exists) is copied from
+ CPython.
+
+ Cython behaviour deviates a little here (to be decided if this is right...)
+ Because the class variable from the assignment does not exist Cython fields will
+ return None (or whatever their type default is) if not initialized while Python
+ dataclasses will fall back to looking up the class variable.
+ """
+ if not init or node.scope.lookup_here("__init__"):
+ return
+
+ # selfname behaviour copied from the cpython module
+ selfname = "__dataclass_self__" if "self" in fields else "self"
+ args = [selfname]
+
+ if kw_only:
+ args.append("*")
+
+ function_start_point = code.insertion_point()
+ code = code.insertion_point()
+
+ # create a temp to get _HAS_DEFAULT_FACTORY
+ dataclass_module = make_dataclasses_module_callnode(node.pos)
+ has_default_factory = ExprNodes.AttributeNode(
+ node.pos,
+ obj=dataclass_module,
+ attribute=EncodedString("_HAS_DEFAULT_FACTORY")
+ )
+
+ default_factory_placeholder = code.new_placeholder(fields, has_default_factory)
+
+ seen_default = False
+ for name, field in fields.items():
+ entry = node.scope.lookup(name)
+ if entry.annotation:
+ annotation = u": %s" % entry.annotation.string.value
+ else:
+ annotation = u""
+ assignment = u''
+ if field.default is not MISSING or field.default_factory is not MISSING:
+ seen_default = True
+ if field.default_factory is not MISSING:
+ ph_name = default_factory_placeholder
+ else:
+ ph_name = code.new_placeholder(fields, field.default) # 'default' should be a node
+ assignment = u" = %s" % ph_name
+ elif seen_default and not kw_only and field.init.value:
+ error(entry.pos, ("non-default argument '%s' follows default argument "
+ "in dataclass __init__") % name)
+ code.reset()
+ return
+
+ if field.init.value:
+ args.append(u"%s%s%s" % (name, annotation, assignment))
+
+ if field.is_initvar:
+ continue
+ elif field.default_factory is MISSING:
+ if field.init.value:
+ code.add_code_line(u" %s.%s = %s" % (selfname, name, name))
+ elif assignment:
+ # not an argument to the function, but is still initialized
+ code.add_code_line(u" %s.%s%s" % (selfname, name, assignment))
+ else:
+ ph_name = code.new_placeholder(fields, field.default_factory)
+ if field.init.value:
+ # close to:
+ # def __init__(self, name=_PLACEHOLDER_VALUE):
+ # self.name = name_default_factory() if name is _PLACEHOLDER_VALUE else name
+ code.add_code_line(u" %s.%s = %s() if %s is %s else %s" % (
+ selfname, name, ph_name, name, default_factory_placeholder, name))
+ else:
+ # still need to use the default factory to initialize
+ code.add_code_line(u" %s.%s = %s()" % (
+ selfname, name, ph_name))
+
+ if node.scope.lookup("__post_init__"):
+ post_init_vars = ", ".join(name for name, field in fields.items()
+ if field.is_initvar)
+ code.add_code_line(" %s.__post_init__(%s)" % (selfname, post_init_vars))
+
+ if code.empty():
+ code.add_code_line(" pass")
+
+ args = u", ".join(args)
+ function_start_point.add_code_line(u"def __init__(%s):" % args)
+
+
+def generate_repr_code(code, repr, node, fields):
+ """
+ The core of the CPython implementation is just:
+ ['return self.__class__.__qualname__ + f"(' +
+ ', '.join([f"{f.name}={{self.{f.name}!r}}"
+ for f in fields]) +
+ ')"'],
+
+ The only notable difference here is self.__class__.__qualname__ -> type(self).__name__
+ which is because Cython currently supports Python 2.
+
+ However, it also has some guards for recursive repr invocations. In the standard
+ library implementation they're done with a wrapper decorator that captures a set
+ (with the set keyed by id and thread). Here we create a set as a thread local
+ variable and key only by id.
+ """
+ if not repr or node.scope.lookup("__repr__"):
+ return
+
+ # The recursive guard is likely a little costly, so skip it if possible.
+ # is_gc_simple defines where it can contain recursive objects
+ needs_recursive_guard = False
+ for name in fields.keys():
+ entry = node.scope.lookup(name)
+ type_ = entry.type
+ if type_.is_memoryviewslice:
+ type_ = type_.dtype
+ if not type_.is_pyobject:
+ continue # no GC
+ if not type_.is_gc_simple:
+ needs_recursive_guard = True
+ break
+
+ if needs_recursive_guard:
+ code.add_code_line("__pyx_recursive_repr_guard = __import__('threading').local()")
+ code.add_code_line("__pyx_recursive_repr_guard.running = set()")
+ code.add_code_line("def __repr__(self):")
+ if needs_recursive_guard:
+ code.add_code_line(" key = id(self)")
+ code.add_code_line(" guard_set = self.__pyx_recursive_repr_guard.running")
+ code.add_code_line(" if key in guard_set: return '...'")
+ code.add_code_line(" guard_set.add(key)")
+ code.add_code_line(" try:")
+ strs = [u"%s={self.%s!r}" % (name, name)
+ for name, field in fields.items()
+ if field.repr.value and not field.is_initvar]
+ format_string = u", ".join(strs)
+
+ code.add_code_line(u' name = getattr(type(self), "__qualname__", type(self).__name__)')
+ code.add_code_line(u" return f'{name}(%s)'" % format_string)
+ if needs_recursive_guard:
+ code.add_code_line(" finally:")
+ code.add_code_line(" guard_set.remove(key)")
+
+
+def generate_cmp_code(code, op, funcname, node, fields):
+ if node.scope.lookup_here(funcname):
+ return
+
+ names = [name for name, field in fields.items() if (field.compare.value and not field.is_initvar)]
+
+ code.add_code_lines([
+ "def %s(self, other):" % funcname,
+ " if other.__class__ is not self.__class__:"
+ " return NotImplemented",
+ #
+ " cdef %s other_cast" % node.class_name,
+ " other_cast = <%s>other" % node.class_name,
+ ])
+
+ # The Python implementation of dataclasses.py does a tuple comparison
+ # (roughly):
+ # return self._attributes_to_tuple() {op} other._attributes_to_tuple()
+ #
+ # For the Cython implementation a tuple comparison isn't an option because
+ # not all attributes can be converted to Python objects and stored in a tuple
+ #
+ # TODO - better diagnostics of whether the types support comparison before
+ # generating the code. Plus, do we want to convert C structs to dicts and
+ # compare them that way (I think not, but it might be in demand)?
+ checks = []
+ op_without_equals = op.replace('=', '')
+
+ for name in names:
+ if op != '==':
+ # tuple comparison rules - early elements take precedence
+ code.add_code_line(" if self.%s %s other_cast.%s: return True" % (
+ name, op_without_equals, name))
+ code.add_code_line(" if self.%s != other_cast.%s: return False" % (
+ name, name))
+ if "=" in op:
+ code.add_code_line(" return True") # "() == ()" is True
+ else:
+ code.add_code_line(" return False")
+
+
+def generate_eq_code(code, eq, node, fields):
+ if not eq:
+ return
+ generate_cmp_code(code, "==", "__eq__", node, fields)
+
+
+def generate_order_code(code, order, node, fields):
+ if not order:
+ return
+
+ for op, name in [("<", "__lt__"),
+ ("<=", "__le__"),
+ (">", "__gt__"),
+ (">=", "__ge__")]:
+ generate_cmp_code(code, op, name, node, fields)
+
+
+def generate_hash_code(code, unsafe_hash, eq, frozen, node, fields):
+ """
+ Copied from CPython implementation - the intention is to follow this as far as
+ is possible:
+ # +------------------- unsafe_hash= parameter
+ # | +----------- eq= parameter
+ # | | +--- frozen= parameter
+ # | | |
+ # v v v | | |
+ # | no | yes | <--- class has explicitly defined __hash__
+ # +=======+=======+=======+========+========+
+ # | False | False | False | | | No __eq__, use the base class __hash__
+ # +-------+-------+-------+--------+--------+
+ # | False | False | True | | | No __eq__, use the base class __hash__
+ # +-------+-------+-------+--------+--------+
+ # | False | True | False | None | | <-- the default, not hashable
+ # +-------+-------+-------+--------+--------+
+ # | False | True | True | add | | Frozen, so hashable, allows override
+ # +-------+-------+-------+--------+--------+
+ # | True | False | False | add | raise | Has no __eq__, but hashable
+ # +-------+-------+-------+--------+--------+
+ # | True | False | True | add | raise | Has no __eq__, but hashable
+ # +-------+-------+-------+--------+--------+
+ # | True | True | False | add | raise | Not frozen, but hashable
+ # +-------+-------+-------+--------+--------+
+ # | True | True | True | add | raise | Frozen, so hashable
+ # +=======+=======+=======+========+========+
+ # For boxes that are blank, __hash__ is untouched and therefore
+ # inherited from the base class. If the base is object, then
+ # id-based hashing is used.
+
+ The Python implementation creates a tuple of all the fields, then hashes them.
+ This implementation creates a tuple of all the hashes of all the fields and hashes that.
+ The reason for this slight difference is to avoid to-Python conversions for anything
+ that Cython knows how to hash directly (It doesn't look like this currently applies to
+ anything though...).
+ """
+
+ hash_entry = node.scope.lookup_here("__hash__")
+ if hash_entry:
+ # TODO ideally assignment of __hash__ to None shouldn't trigger this
+ # but difficult to get the right information here
+ if unsafe_hash:
+ # error message taken from CPython dataclasses module
+ error(node.pos, "Cannot overwrite attribute __hash__ in class %s" % node.class_name)
+ return
+
+ if not unsafe_hash:
+ if not eq:
+ return
+ if not frozen:
+ code.add_extra_statements([
+ Nodes.SingleAssignmentNode(
+ node.pos,
+ lhs=ExprNodes.NameNode(node.pos, name=EncodedString("__hash__")),
+ rhs=ExprNodes.NoneNode(node.pos),
+ )
+ ])
+ return
+
+ names = [
+ name for name, field in fields.items()
+ if not field.is_initvar and (
+ field.compare.value if field.hash.value is None else field.hash.value)
+ ]
+
+ # make a tuple of the hashes
+ hash_tuple_items = u", ".join(u"self.%s" % name for name in names)
+ if hash_tuple_items:
+ hash_tuple_items += u"," # ensure that one arg form is a tuple
+
+ # if we're here we want to generate a hash
+ code.add_code_lines([
+ "def __hash__(self):",
+ " return hash((%s))" % hash_tuple_items,
+ ])
+
+
+def get_field_type(pos, entry):
+ """
+ sets the .type attribute for a field
+
+ Returns the annotation if possible (since this is what the dataclasses
+ module does). If not (for example, attributes defined with cdef) then
+ it creates a string fallback.
+ """
+ if entry.annotation:
+ # Right now it doesn't look like cdef classes generate an
+ # __annotations__ dict, therefore it's safe to just return
+ # entry.annotation
+ # (TODO: remove .string if we ditch PEP563)
+ return entry.annotation.string
+ # If they do in future then we may need to look up into that
+ # to duplicating the node. The code below should do this:
+ #class_name_node = ExprNodes.NameNode(pos, name=entry.scope.name)
+ #annotations = ExprNodes.AttributeNode(
+ # pos, obj=class_name_node,
+ # attribute=EncodedString("__annotations__")
+ #)
+ #return ExprNodes.IndexNode(
+ # pos, base=annotations,
+ # index=ExprNodes.StringNode(pos, value=entry.name)
+ #)
+ else:
+ # it's slightly unclear what the best option is here - we could
+ # try to return PyType_Type. This case should only happen with
+ # attributes defined with cdef so Cython is free to make it's own
+ # decision
+ s = EncodedString(entry.type.declaration_code("", for_display=1))
+ return ExprNodes.StringNode(pos, value=s)
+
+
+class FieldRecordNode(ExprNodes.ExprNode):
+ """
+ __dataclass_fields__ contains a bunch of field objects recording how each field
+ of the dataclass was initialized (mainly corresponding to the arguments passed to
+ the "field" function). This node is used for the attributes of these field objects.
+
+ If possible, coerces `arg` to a Python object.
+ Otherwise, generates a sensible backup string.
+ """
+ subexprs = ['arg']
+
+ def __init__(self, pos, arg):
+ super(FieldRecordNode, self).__init__(pos, arg=arg)
+
+ def analyse_types(self, env):
+ self.arg.analyse_types(env)
+ self.type = self.arg.type
+ return self
+
+ def coerce_to_pyobject(self, env):
+ if self.arg.type.can_coerce_to_pyobject(env):
+ return self.arg.coerce_to_pyobject(env)
+ else:
+ # A string representation of the code that gave the field seems like a reasonable
+ # fallback. This'll mostly happen for "default" and "default_factory" where the
+ # type may be a C-type that can't be converted to Python.
+ return self._make_string()
+
+ def _make_string(self):
+ from .AutoDocTransforms import AnnotationWriter
+ writer = AnnotationWriter(description="Dataclass field")
+ string = writer.write(self.arg)
+ return ExprNodes.StringNode(self.pos, value=EncodedString(string))
+
+ def generate_evaluation_code(self, code):
+ return self.arg.generate_evaluation_code(code)
+
+
+def _set_up_dataclass_fields(node, fields, dataclass_module):
+ # For defaults and default_factories containing things like lambda,
+ # they're already declared in the class scope, and it creates a big
+ # problem if multiple copies are floating around in both the __init__
+ # function, and in the __dataclass_fields__ structure.
+ # Therefore, create module-level constants holding these values and
+ # pass those around instead
+ #
+ # If possible we use the `Field` class defined in the standard library
+ # module so that the information stored here is as close to a regular
+ # dataclass as is possible.
+ variables_assignment_stats = []
+ for name, field in fields.items():
+ if field.private:
+ continue # doesn't appear in the public interface
+ for attrname in [ "default", "default_factory" ]:
+ field_default = getattr(field, attrname)
+ if field_default is MISSING or field_default.is_literal or field_default.is_name:
+ # some simple cases where we don't need to set up
+ # the variable as a module-level constant
+ continue
+ global_scope = node.scope.global_scope()
+ module_field_name = global_scope.mangle(
+ global_scope.mangle(Naming.dataclass_field_default_cname, node.class_name),
+ name)
+ # create an entry in the global scope for this variable to live
+ field_node = ExprNodes.NameNode(field_default.pos, name=EncodedString(module_field_name))
+ field_node.entry = global_scope.declare_var(
+ field_node.name, type=field_default.type or PyrexTypes.unspecified_type,
+ pos=field_default.pos, cname=field_node.name, is_cdef=True,
+ # TODO: do we need to set 'pytyping_modifiers' here?
+ )
+ # replace the field so that future users just receive the namenode
+ setattr(field, attrname, field_node)
+
+ variables_assignment_stats.append(
+ Nodes.SingleAssignmentNode(field_default.pos, lhs=field_node, rhs=field_default))
+
+ placeholders = {}
+ field_func = ExprNodes.AttributeNode(node.pos, obj=dataclass_module,
+ attribute=EncodedString("field"))
+ dc_fields = ExprNodes.DictNode(node.pos, key_value_pairs=[])
+ dc_fields_namevalue_assignments = []
+
+ for name, field in fields.items():
+ if field.private:
+ continue # doesn't appear in the public interface
+ type_placeholder_name = "PLACEHOLDER_%s" % name
+ placeholders[type_placeholder_name] = get_field_type(
+ node.pos, node.scope.entries[name]
+ )
+
+ # defining these make the fields introspect more like a Python dataclass
+ field_type_placeholder_name = "PLACEHOLDER_FIELD_TYPE_%s" % name
+ if field.is_initvar:
+ placeholders[field_type_placeholder_name] = ExprNodes.AttributeNode(
+ node.pos, obj=dataclass_module,
+ attribute=EncodedString("_FIELD_INITVAR")
+ )
+ elif field.is_classvar:
+ # TODO - currently this isn't triggered
+ placeholders[field_type_placeholder_name] = ExprNodes.AttributeNode(
+ node.pos, obj=dataclass_module,
+ attribute=EncodedString("_FIELD_CLASSVAR")
+ )
+ else:
+ placeholders[field_type_placeholder_name] = ExprNodes.AttributeNode(
+ node.pos, obj=dataclass_module,
+ attribute=EncodedString("_FIELD")
+ )
+
+ dc_field_keywords = ExprNodes.DictNode.from_pairs(
+ node.pos,
+ [(ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(k)),
+ FieldRecordNode(node.pos, arg=v))
+ for k, v in field.iterate_record_node_arguments()]
+
+ )
+ dc_field_call = make_dataclass_call_helper(
+ node.pos, field_func, dc_field_keywords
+ )
+ dc_fields.key_value_pairs.append(
+ ExprNodes.DictItemNode(
+ node.pos,
+ key=ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(name)),
+ value=dc_field_call))
+ dc_fields_namevalue_assignments.append(
+ dedent(u"""\
+ __dataclass_fields__[{0!r}].name = {0!r}
+ __dataclass_fields__[{0!r}].type = {1}
+ __dataclass_fields__[{0!r}]._field_type = {2}
+ """).format(name, type_placeholder_name, field_type_placeholder_name))
+
+ dataclass_fields_assignment = \
+ Nodes.SingleAssignmentNode(node.pos,
+ lhs = ExprNodes.NameNode(node.pos,
+ name=EncodedString("__dataclass_fields__")),
+ rhs = dc_fields)
+
+ dc_fields_namevalue_assignments = u"\n".join(dc_fields_namevalue_assignments)
+ dc_fields_namevalue_assignments = TreeFragment(dc_fields_namevalue_assignments,
+ level="c_class",
+ pipeline=[NormalizeTree(None)])
+ dc_fields_namevalue_assignments = dc_fields_namevalue_assignments.substitute(placeholders)
+
+ return (variables_assignment_stats
+ + [dataclass_fields_assignment]
+ + dc_fields_namevalue_assignments.stats)
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/DebugFlags.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/DebugFlags.py
new file mode 100644
index 0000000000000000000000000000000000000000..e830ab1849cf506ec10ab38ebd850a0a398c0431
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/DebugFlags.py
@@ -0,0 +1,21 @@
+# Can be enabled at the command line with --debug-xxx.
+
+debug_disposal_code = 0
+debug_temp_alloc = 0
+debug_coercion = 0
+
+# Write comments into the C code that show where temporary variables
+# are allocated and released.
+debug_temp_code_comments = 0
+
+# Write a call trace of the code generation phase into the C code.
+debug_trace_code_generation = 0
+
+# Do not replace exceptions with user-friendly error messages.
+debug_no_exception_intercept = 0
+
+# Print a message each time a new stage in the pipeline is entered.
+debug_verbose_pipeline = 0
+
+# Raise an exception when an error is encountered.
+debug_exception_on_error = 0
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Errors.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Errors.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3be0fd8b0216f57c46914f8fc421cb181582590
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Errors.py
@@ -0,0 +1,300 @@
+#
+# Errors
+#
+
+from __future__ import absolute_import
+
+try:
+ from __builtin__ import basestring as any_string_type
+except ImportError:
+ any_string_type = (bytes, str)
+
+import sys
+from contextlib import contextmanager
+
+try:
+ from threading import local as _threadlocal
+except ImportError:
+ class _threadlocal(object): pass
+
+threadlocal = _threadlocal()
+
+from ..Utils import open_new_file
+from . import DebugFlags
+from . import Options
+
+
+class PyrexError(Exception):
+ pass
+
+
+class PyrexWarning(Exception):
+ pass
+
+class CannotSpecialize(PyrexError):
+ pass
+
+def context(position):
+ source = position[0]
+ assert not (isinstance(source, any_string_type)), (
+ "Please replace filename strings with Scanning.FileSourceDescriptor instances %r" % source)
+ try:
+ F = source.get_lines()
+ except UnicodeDecodeError:
+ # file has an encoding problem
+ s = u"[unprintable code]\n"
+ else:
+ s = u''.join(F[max(0, position[1]-6):position[1]])
+ s = u'...\n%s%s^\n' % (s, u' '*(position[2]))
+ s = u'%s\n%s%s\n' % (u'-'*60, s, u'-'*60)
+ return s
+
+def format_position(position):
+ if position:
+ return u"%s:%d:%d: " % (position[0].get_error_description(),
+ position[1], position[2])
+ return u''
+
+def format_error(message, position):
+ if position:
+ pos_str = format_position(position)
+ cont = context(position)
+ message = u'\nError compiling Cython file:\n%s\n%s%s' % (cont, pos_str, message or u'')
+ return message
+
+class CompileError(PyrexError):
+
+ def __init__(self, position = None, message = u""):
+ self.position = position
+ self.message_only = message
+ self.formatted_message = format_error(message, position)
+ self.reported = False
+ Exception.__init__(self, self.formatted_message)
+ # Python Exception subclass pickling is broken,
+ # see https://bugs.python.org/issue1692335
+ self.args = (position, message)
+
+ def __str__(self):
+ return self.formatted_message
+
+class CompileWarning(PyrexWarning):
+
+ def __init__(self, position = None, message = ""):
+ self.position = position
+ Exception.__init__(self, format_position(position) + message)
+
+class InternalError(Exception):
+ # If this is ever raised, there is a bug in the compiler.
+
+ def __init__(self, message):
+ self.message_only = message
+ Exception.__init__(self, u"Internal compiler error: %s"
+ % message)
+
+class AbortError(Exception):
+ # Throw this to stop the compilation immediately.
+
+ def __init__(self, message):
+ self.message_only = message
+ Exception.__init__(self, u"Abort error: %s" % message)
+
+class CompilerCrash(CompileError):
+ # raised when an unexpected exception occurs in a transform
+ def __init__(self, pos, context, message, cause, stacktrace=None):
+ if message:
+ message = u'\n' + message
+ else:
+ message = u'\n'
+ self.message_only = message
+ if context:
+ message = u"Compiler crash in %s%s" % (context, message)
+ if stacktrace:
+ import traceback
+ message += (
+ u'\n\nCompiler crash traceback from this point on:\n' +
+ u''.join(traceback.format_tb(stacktrace)))
+ if cause:
+ if not stacktrace:
+ message += u'\n'
+ message += u'%s: %s' % (cause.__class__.__name__, cause)
+ CompileError.__init__(self, pos, message)
+ # Python Exception subclass pickling is broken,
+ # see https://bugs.python.org/issue1692335
+ self.args = (pos, context, message, cause, stacktrace)
+
+class NoElementTreeInstalledException(PyrexError):
+ """raised when the user enabled options.gdb_debug but no ElementTree
+ implementation was found
+ """
+
+def open_listing_file(path, echo_to_stderr=True):
+ # Begin a new error listing. If path is None, no file
+ # is opened, the error counter is just reset.
+ if path is not None:
+ threadlocal.cython_errors_listing_file = open_new_file(path)
+ else:
+ threadlocal.cython_errors_listing_file = None
+ if echo_to_stderr:
+ threadlocal.cython_errors_echo_file = sys.stderr
+ else:
+ threadlocal.cython_errors_echo_file = None
+ threadlocal.cython_errors_count = 0
+
+def close_listing_file():
+ if threadlocal.cython_errors_listing_file:
+ threadlocal.cython_errors_listing_file.close()
+ threadlocal.cython_errors_listing_file = None
+
+def report_error(err, use_stack=True):
+ error_stack = threadlocal.cython_errors_stack
+ if error_stack and use_stack:
+ error_stack[-1].append(err)
+ else:
+ # See Main.py for why dual reporting occurs. Quick fix for now.
+ if err.reported: return
+ err.reported = True
+ try: line = u"%s\n" % err
+ except UnicodeEncodeError:
+ # Python <= 2.5 does this for non-ASCII Unicode exceptions
+ line = format_error(getattr(err, 'message_only', "[unprintable exception message]"),
+ getattr(err, 'position', None)) + u'\n'
+ listing_file = threadlocal.cython_errors_listing_file
+ if listing_file:
+ try: listing_file.write(line)
+ except UnicodeEncodeError:
+ listing_file.write(line.encode('ASCII', 'replace'))
+ echo_file = threadlocal.cython_errors_echo_file
+ if echo_file:
+ try: echo_file.write(line)
+ except UnicodeEncodeError:
+ echo_file.write(line.encode('ASCII', 'replace'))
+ threadlocal.cython_errors_count += 1
+ if Options.fast_fail:
+ raise AbortError("fatal errors")
+
+def error(position, message):
+ #print("Errors.error:", repr(position), repr(message)) ###
+ if position is None:
+ raise InternalError(message)
+ err = CompileError(position, message)
+ if DebugFlags.debug_exception_on_error: raise Exception(err) # debug
+ report_error(err)
+ return err
+
+
+LEVEL = 1 # warn about all errors level 1 or higher
+
+def _write_file_encode(file, line):
+ try:
+ file.write(line)
+ except UnicodeEncodeError:
+ file.write(line.encode('ascii', 'replace'))
+
+
+def performance_hint(position, message, env):
+ if not env.directives['show_performance_hints']:
+ return
+ warn = CompileWarning(position, message)
+ line = "performance hint: %s\n" % warn
+ listing_file = threadlocal.cython_errors_listing_file
+ if listing_file:
+ _write_file_encode(listing_file, line)
+ echo_file = threadlocal.cython_errors_echo_file
+ if echo_file:
+ _write_file_encode(echo_file, line)
+ return warn
+
+
+def message(position, message, level=1):
+ if level < LEVEL:
+ return
+ warn = CompileWarning(position, message)
+ line = u"note: %s\n" % warn
+ listing_file = threadlocal.cython_errors_listing_file
+ if listing_file:
+ _write_file_encode(listing_file, line)
+ echo_file = threadlocal.cython_errors_echo_file
+ if echo_file:
+ _write_file_encode(echo_file, line)
+ return warn
+
+
+def warning(position, message, level=0):
+ if level < LEVEL:
+ return
+ if Options.warning_errors and position:
+ return error(position, message)
+ warn = CompileWarning(position, message)
+ line = u"warning: %s\n" % warn
+ listing_file = threadlocal.cython_errors_listing_file
+ if listing_file:
+ _write_file_encode(listing_file, line)
+ echo_file = threadlocal.cython_errors_echo_file
+ if echo_file:
+ _write_file_encode(echo_file, line)
+ return warn
+
+
+def warn_once(position, message, level=0):
+ if level < LEVEL:
+ return
+ warn_once_seen = threadlocal.cython_errors_warn_once_seen
+ if message in warn_once_seen:
+ return
+ warn = CompileWarning(position, message)
+ line = u"warning: %s\n" % warn
+ listing_file = threadlocal.cython_errors_listing_file
+ if listing_file:
+ _write_file_encode(listing_file, line)
+ echo_file = threadlocal.cython_errors_echo_file
+ if echo_file:
+ _write_file_encode(echo_file, line)
+ warn_once_seen.add(message)
+ return warn
+
+
+# These functions can be used to momentarily suppress errors.
+
+def hold_errors():
+ errors = []
+ threadlocal.cython_errors_stack.append(errors)
+ return errors
+
+
+def release_errors(ignore=False):
+ held_errors = threadlocal.cython_errors_stack.pop()
+ if not ignore:
+ for err in held_errors:
+ report_error(err)
+
+
+def held_errors():
+ return threadlocal.cython_errors_stack[-1]
+
+
+# same as context manager:
+
+@contextmanager
+def local_errors(ignore=False):
+ errors = hold_errors()
+ try:
+ yield errors
+ finally:
+ release_errors(ignore=ignore)
+
+
+# Keep all global state in thread local storage to support parallel cythonisation in distutils.
+
+def init_thread():
+ threadlocal.cython_errors_count = 0
+ threadlocal.cython_errors_listing_file = None
+ threadlocal.cython_errors_echo_file = None
+ threadlocal.cython_errors_warn_once_seen = set()
+ threadlocal.cython_errors_stack = []
+
+def reset():
+ threadlocal.cython_errors_warn_once_seen.clear()
+ del threadlocal.cython_errors_stack[:]
+
+def get_errors_count():
+ return threadlocal.cython_errors_count
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/ExprNodes.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/ExprNodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..775d2f582fd2db8cfade96cce0719d21b184ce7a
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/ExprNodes.py
@@ -0,0 +1,14743 @@
+#
+# Parse tree nodes for expressions
+#
+
+from __future__ import absolute_import
+
+import cython
+cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
+ CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
+ StringEncoding=object, operator=object, local_errors=object, report_error=object,
+ Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
+ list_type=object, tuple_type=object, set_type=object, dict_type=object,
+ unicode_type=object, str_type=object, bytes_type=object, type_type=object,
+ Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
+ debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object,
+ bytearray_type=object, slice_type=object, memoryview_type=object,
+ builtin_sequence_types=object, _py_int_types=object,
+ IS_PYTHON3=cython.bint)
+
+import re
+import sys
+import copy
+import os.path
+import operator
+
+from .Errors import (
+ error, warning, InternalError, CompileError, report_error, local_errors,
+ CannotSpecialize, performance_hint)
+from .Code import UtilityCode, TempitaUtilityCode
+from . import StringEncoding
+from . import Naming
+from . import Nodes
+from .Nodes import Node, utility_code_for_imports, SingleAssignmentNode
+from . import PyrexTypes
+from .PyrexTypes import py_object_type, typecast, error_type, \
+ unspecified_type
+from . import TypeSlots
+from .Builtin import (
+ list_type, tuple_type, set_type, dict_type, type_type,
+ unicode_type, str_type, bytes_type, bytearray_type, basestring_type,
+ slice_type, long_type, sequence_types as builtin_sequence_types, memoryview_type,
+)
+from . import Builtin
+from . import Symtab
+from .. import Utils
+from .Annotate import AnnotationItem
+from . import Future
+from ..Debugging import print_call_chain
+from .DebugFlags import debug_disposal_code, debug_coercion
+
+from .Pythran import (to_pythran, is_pythran_supported_type, is_pythran_supported_operation_type,
+ is_pythran_expr, pythran_func_type, pythran_binop_type, pythran_unaryop_type, has_np_pythran,
+ pythran_indexing_code, pythran_indexing_type, is_pythran_supported_node_or_none, pythran_type,
+ pythran_is_numpy_func_supported, pythran_get_func_include_file, pythran_functor)
+from .PyrexTypes import PythranExpr
+
+try:
+ from __builtin__ import basestring
+except ImportError:
+ # Python 3
+ basestring = str
+ any_string_type = (bytes, str)
+else:
+ # Python 2
+ any_string_type = (bytes, unicode)
+
+
+if sys.version_info[0] >= 3:
+ IS_PYTHON3 = True
+ _py_int_types = int
+else:
+ IS_PYTHON3 = False
+ _py_int_types = (int, long)
+
+
+class NotConstant(object):
+ _obj = None
+
+ def __new__(cls):
+ if NotConstant._obj is None:
+ NotConstant._obj = super(NotConstant, cls).__new__(cls)
+
+ return NotConstant._obj
+
+ def __repr__(self):
+ return ""
+
+not_a_constant = NotConstant()
+constant_value_not_set = object()
+
+# error messages when coercing from key[0] to key[1]
+coercion_error_dict = {
+ # string related errors
+ (unicode_type, str_type): ("Cannot convert Unicode string to 'str' implicitly."
+ " This is not portable and requires explicit encoding."),
+ (unicode_type, bytes_type): "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
+ (unicode_type, PyrexTypes.c_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
+ (unicode_type, PyrexTypes.c_const_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
+ (unicode_type, PyrexTypes.c_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
+ (unicode_type, PyrexTypes.c_const_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
+ (bytes_type, unicode_type): "Cannot convert 'bytes' object to unicode implicitly, decoding required",
+ (bytes_type, str_type): "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
+ (bytes_type, basestring_type): ("Cannot convert 'bytes' object to basestring implicitly."
+ " This is not portable to Py3."),
+ (bytes_type, PyrexTypes.c_py_unicode_ptr_type): "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
+ (bytes_type, PyrexTypes.c_const_py_unicode_ptr_type): (
+ "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'."),
+ (basestring_type, bytes_type): "Cannot convert 'basestring' object to bytes implicitly. This is not portable.",
+ (str_type, unicode_type): ("str objects do not support coercion to unicode,"
+ " use a unicode string literal instead (u'')"),
+ (str_type, bytes_type): "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
+ (str_type, PyrexTypes.c_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
+ (str_type, PyrexTypes.c_const_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
+ (str_type, PyrexTypes.c_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
+ (str_type, PyrexTypes.c_const_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
+ (str_type, PyrexTypes.c_py_unicode_ptr_type): "'str' objects do not support coercion to C types (use 'unicode'?).",
+ (str_type, PyrexTypes.c_const_py_unicode_ptr_type): (
+ "'str' objects do not support coercion to C types (use 'unicode'?)."),
+ (PyrexTypes.c_char_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
+ (PyrexTypes.c_const_char_ptr_type, unicode_type): (
+ "Cannot convert 'char*' to unicode implicitly, decoding required"),
+ (PyrexTypes.c_uchar_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
+ (PyrexTypes.c_const_uchar_ptr_type, unicode_type): (
+ "Cannot convert 'char*' to unicode implicitly, decoding required"),
+}
+
+def find_coercion_error(type_tuple, default, env):
+ err = coercion_error_dict.get(type_tuple)
+ if err is None:
+ return default
+ elif (env.directives['c_string_encoding'] and
+ any(t in type_tuple for t in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_uchar_ptr_type,
+ PyrexTypes.c_const_char_ptr_type, PyrexTypes.c_const_uchar_ptr_type))):
+ if type_tuple[1].is_pyobject:
+ return default
+ elif env.directives['c_string_encoding'] in ('ascii', 'default'):
+ return default
+ else:
+ return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
+ else:
+ return err
+
+
+def default_str_type(env):
+ return {
+ 'bytes': bytes_type,
+ 'bytearray': bytearray_type,
+ 'str': str_type,
+ 'unicode': unicode_type
+ }.get(env.directives['c_string_type'])
+
+
+def check_negative_indices(*nodes):
+ """
+ Raise a warning on nodes that are known to have negative numeric values.
+ Used to find (potential) bugs inside of "wraparound=False" sections.
+ """
+ for node in nodes:
+ if node is None or (
+ not isinstance(node.constant_result, _py_int_types) and
+ not isinstance(node.constant_result, float)):
+ continue
+ if node.constant_result < 0:
+ warning(node.pos,
+ "the result of using negative indices inside of "
+ "code sections marked as 'wraparound=False' is "
+ "undefined", level=1)
+
+
+def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None):
+ if not seq_node.is_sequence_constructor:
+ if seq_type is None:
+ seq_type = seq_node.infer_type(env)
+ if seq_type is tuple_type:
+ # tuples are immutable => we can safely follow assignments
+ if seq_node.cf_state and len(seq_node.cf_state) == 1:
+ try:
+ seq_node = seq_node.cf_state[0].rhs
+ except AttributeError:
+ pass
+ if seq_node is not None and seq_node.is_sequence_constructor:
+ if index_node is not None and index_node.has_constant_result():
+ try:
+ item = seq_node.args[index_node.constant_result]
+ except (ValueError, TypeError, IndexError):
+ pass
+ else:
+ return item.infer_type(env)
+ # if we're lucky, all items have the same type
+ item_types = {item.infer_type(env) for item in seq_node.args}
+ if len(item_types) == 1:
+ return item_types.pop()
+ return None
+
+
+def make_dedup_key(outer_type, item_nodes):
+ """
+ Recursively generate a deduplication key from a sequence of values.
+ Includes Cython node types to work around the fact that (1, 2.0) == (1.0, 2), for example.
+
+ @param outer_type: The type of the outer container.
+ @param item_nodes: A sequence of constant nodes that will be traversed recursively.
+ @return: A tuple that can be used as a dict key for deduplication.
+ """
+ item_keys = [
+ (py_object_type, None, type(None)) if node is None
+ # For sequences and their "mult_factor", see TupleNode.
+ else make_dedup_key(node.type, [node.mult_factor if node.is_literal else None] + node.args) if node.is_sequence_constructor
+ else make_dedup_key(node.type, (node.start, node.stop, node.step)) if node.is_slice
+ # For constants, look at the Python value type if we don't know the concrete Cython type.
+ else (node.type, node.constant_result,
+ type(node.constant_result) if node.type is py_object_type else None) if node.has_constant_result()
+ # IdentifierStringNode doesn't usually have a "constant_result" set because:
+ # 1. it doesn't usually have unicode_value
+ # 2. it's often created later in the compilation process after ConstantFolding
+ # but should be cacheable
+ else (node.type, node.value, node.unicode_value, "IdentifierStringNode") if isinstance(node, IdentifierStringNode)
+ else None # something we cannot handle => short-circuit below
+ for node in item_nodes
+ ]
+ if None in item_keys:
+ return None
+ return outer_type, tuple(item_keys)
+
+
+# Returns a block of code to translate the exception,
+# plus a boolean indicating whether to check for Python exceptions.
+def get_exception_handler(exception_value):
+ if exception_value is None:
+ return "__Pyx_CppExn2PyErr();", False
+ elif (exception_value.type == PyrexTypes.c_char_type
+ and exception_value.value == '*'):
+ return "__Pyx_CppExn2PyErr();", True
+ elif exception_value.type.is_pyobject:
+ return (
+ 'try { throw; } catch(const std::exception& exn) {'
+ 'PyErr_SetString(%s, exn.what());'
+ '} catch(...) { PyErr_SetNone(%s); }' % (
+ exception_value.entry.cname,
+ exception_value.entry.cname),
+ False)
+ else:
+ return (
+ '%s(); if (!PyErr_Occurred())'
+ 'PyErr_SetString(PyExc_RuntimeError, '
+ '"Error converting c++ exception.");' % (
+ exception_value.entry.cname),
+ False)
+
+
+def maybe_check_py_error(code, check_py_exception, pos, nogil):
+ if check_py_exception:
+ if nogil:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ErrOccurredWithGIL", "Exceptions.c"))
+ code.putln(code.error_goto_if("__Pyx_ErrOccurredWithGIL()", pos))
+ else:
+ code.putln(code.error_goto_if("PyErr_Occurred()", pos))
+
+
+def translate_cpp_exception(code, pos, inside, py_result, exception_value, nogil):
+ raise_py_exception, check_py_exception = get_exception_handler(exception_value)
+ code.putln("try {")
+ code.putln("%s" % inside)
+ if py_result:
+ code.putln(code.error_goto_if_null(py_result, pos))
+ maybe_check_py_error(code, check_py_exception, pos, nogil)
+ code.putln("} catch(...) {")
+ if nogil:
+ code.put_ensure_gil(declare_gilstate=True)
+ code.putln(raise_py_exception)
+ if nogil:
+ code.put_release_ensured_gil()
+ code.putln(code.error_goto(pos))
+ code.putln("}")
+
+def needs_cpp_exception_conversion(node):
+ assert node.exception_check == "+"
+ if node.exception_value is None:
+ return True
+ # exception_value can be a NameNode
+ # (in which case it's used as a handler function and no conversion is needed)
+ if node.exception_value.is_name:
+ return False
+ # or a CharNode with a value of "*"
+ if isinstance(node.exception_value, CharNode) and node.exception_value.value == "*":
+ return True
+ # Most other const-nodes are disallowed after "+" by the parser
+ return False
+
+
+# Used to handle the case where an lvalue expression and an overloaded assignment
+# both have an exception declaration.
+def translate_double_cpp_exception(code, pos, lhs_type, lhs_code, rhs_code, lhs_exc_val, assign_exc_val, nogil):
+ handle_lhs_exc, lhc_check_py_exc = get_exception_handler(lhs_exc_val)
+ handle_assignment_exc, assignment_check_py_exc = get_exception_handler(assign_exc_val)
+ code.putln("try {")
+ code.putln(lhs_type.declaration_code("__pyx_local_lvalue = %s;" % lhs_code))
+ maybe_check_py_error(code, lhc_check_py_exc, pos, nogil)
+ code.putln("try {")
+ code.putln("__pyx_local_lvalue = %s;" % rhs_code)
+ maybe_check_py_error(code, assignment_check_py_exc, pos, nogil)
+ # Catch any exception from the overloaded assignment.
+ code.putln("} catch(...) {")
+ if nogil:
+ code.put_ensure_gil(declare_gilstate=True)
+ code.putln(handle_assignment_exc)
+ if nogil:
+ code.put_release_ensured_gil()
+ code.putln(code.error_goto(pos))
+ code.putln("}")
+ # Catch any exception from evaluating lhs.
+ code.putln("} catch(...) {")
+ if nogil:
+ code.put_ensure_gil(declare_gilstate=True)
+ code.putln(handle_lhs_exc)
+ if nogil:
+ code.put_release_ensured_gil()
+ code.putln(code.error_goto(pos))
+ code.putln('}')
+
+
+class ExprNode(Node):
+ # subexprs [string] Class var holding names of subexpr node attrs
+ # type PyrexType Type of the result
+ # result_code string Code fragment
+ # result_ctype string C type of result_code if different from type
+ # is_temp boolean Result is in a temporary variable
+ # is_sequence_constructor
+ # boolean Is a list or tuple constructor expression
+ # is_starred boolean Is a starred expression (e.g. '*a')
+ # use_managed_ref boolean use ref-counted temps/assignments/etc.
+ # result_is_used boolean indicates that the result will be dropped and the
+ # result_code/temp_result can safely be set to None
+ # is_numpy_attribute boolean Is a Numpy module attribute
+ # annotation ExprNode or None PEP526 annotation for names or expressions
+ # generator_arg_tag None or Node A tag to mark ExprNodes that potentially need to
+ # be changed to a generator argument
+
+ result_ctype = None
+ type = None
+ annotation = None
+ temp_code = None
+ old_temp = None # error checker for multiple frees etc.
+ use_managed_ref = True # can be set by optimisation transforms
+ result_is_used = True
+ is_numpy_attribute = False
+ generator_arg_tag = None
+
+ # The Analyse Expressions phase for expressions is split
+ # into two sub-phases:
+ #
+ # Analyse Types
+ # Determines the result type of the expression based
+ # on the types of its sub-expressions, and inserts
+ # coercion nodes into the expression tree where needed.
+ # Marks nodes which will need to have temporary variables
+ # allocated.
+ #
+ # Allocate Temps
+ # Allocates temporary variables where needed, and fills
+ # in the result_code field of each node.
+ #
+ # ExprNode provides some convenience routines which
+ # perform both of the above phases. These should only
+ # be called from statement nodes, and only when no
+ # coercion nodes need to be added around the expression
+ # being analysed. In that case, the above two phases
+ # should be invoked separately.
+ #
+ # Framework code in ExprNode provides much of the common
+ # processing for the various phases. It makes use of the
+ # 'subexprs' class attribute of ExprNodes, which should
+ # contain a list of the names of attributes which can
+ # hold sub-nodes or sequences of sub-nodes.
+ #
+ # The framework makes use of a number of abstract methods.
+ # Their responsibilities are as follows.
+ #
+ # Declaration Analysis phase
+ #
+ # analyse_target_declaration
+ # Called during the Analyse Declarations phase to analyse
+ # the LHS of an assignment or argument of a del statement.
+ # Nodes which cannot be the LHS of an assignment need not
+ # implement it.
+ #
+ # Expression Analysis phase
+ #
+ # analyse_types
+ # - Call analyse_types on all sub-expressions.
+ # - Check operand types, and wrap coercion nodes around
+ # sub-expressions where needed.
+ # - Set the type of this node.
+ # - If a temporary variable will be required for the
+ # result, set the is_temp flag of this node.
+ #
+ # analyse_target_types
+ # Called during the Analyse Types phase to analyse
+ # the LHS of an assignment or argument of a del
+ # statement. Similar responsibilities to analyse_types.
+ #
+ # target_code
+ # Called by the default implementation of allocate_target_temps.
+ # Should return a C lvalue for assigning to the node. The default
+ # implementation calls calculate_result_code.
+ #
+ # check_const
+ # - Check that this node and its subnodes form a
+ # legal constant expression. If so, do nothing,
+ # otherwise call not_const.
+ #
+ # The default implementation of check_const
+ # assumes that the expression is not constant.
+ #
+ # check_const_addr
+ # - Same as check_const, except check that the
+ # expression is a C lvalue whose address is
+ # constant. Otherwise, call addr_not_const.
+ #
+ # The default implementation of calc_const_addr
+ # assumes that the expression is not a constant
+ # lvalue.
+ #
+ # Code Generation phase
+ #
+ # generate_evaluation_code
+ # - Call generate_evaluation_code for sub-expressions.
+ # - Perform the functions of generate_result_code
+ # (see below).
+ # - If result is temporary, call generate_disposal_code
+ # on all sub-expressions.
+ #
+ # A default implementation of generate_evaluation_code
+ # is provided which uses the following abstract methods:
+ #
+ # generate_result_code
+ # - Generate any C statements necessary to calculate
+ # the result of this node from the results of its
+ # sub-expressions.
+ #
+ # calculate_result_code
+ # - Should return a C code fragment evaluating to the
+ # result. This is only called when the result is not
+ # a temporary.
+ #
+ # generate_assignment_code
+ # Called on the LHS of an assignment.
+ # - Call generate_evaluation_code for sub-expressions.
+ # - Generate code to perform the assignment.
+ # - If the assignment absorbed a reference, call
+ # generate_post_assignment_code on the RHS,
+ # otherwise call generate_disposal_code on it.
+ #
+ # generate_deletion_code
+ # Called on an argument of a del statement.
+ # - Call generate_evaluation_code for sub-expressions.
+ # - Generate code to perform the deletion.
+ # - Call generate_disposal_code on all sub-expressions.
+ #
+ #
+
+ is_sequence_constructor = False
+ is_dict_literal = False
+ is_set_literal = False
+ is_string_literal = False
+ is_attribute = False
+ is_subscript = False
+ is_slice = False
+
+ is_buffer_access = False
+ is_memview_index = False
+ is_memview_slice = False
+ is_memview_broadcast = False
+ is_memview_copy_assignment = False
+
+ is_temp = False
+ has_temp_moved = False # if True then attempting to do anything but free the temp is invalid
+ is_target = False
+ is_starred = False
+
+ constant_result = constant_value_not_set
+
+ child_attrs = property(fget=operator.attrgetter('subexprs'))
+
+ def analyse_annotations(self, env):
+ pass
+
+ def not_implemented(self, method_name):
+ print_call_chain(method_name, "not implemented")
+ raise InternalError(
+ "%s.%s not implemented" % (self.__class__.__name__, method_name))
+
+ def is_lvalue(self):
+ return 0
+
+ def is_addressable(self):
+ return self.is_lvalue() and not self.type.is_memoryviewslice
+
+ def is_ephemeral(self):
+ # An ephemeral node is one whose result is in
+ # a Python temporary and we suspect there are no
+ # other references to it. Certain operations are
+ # disallowed on such values, since they are
+ # likely to result in a dangling pointer.
+ return self.type.is_pyobject and self.is_temp
+
+ def subexpr_nodes(self):
+ # Extract a list of subexpression nodes based
+ # on the contents of the subexprs class attribute.
+ nodes = []
+ for name in self.subexprs:
+ item = getattr(self, name)
+ if item is not None:
+ if type(item) is list:
+ nodes.extend(item)
+ else:
+ nodes.append(item)
+ return nodes
+
+ def result(self):
+ if self.is_temp:
+ #if not self.temp_code:
+ # pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
+ # raise RuntimeError("temp result name not set in %s at %r" % (
+ # self.__class__.__name__, pos))
+ return self.temp_code
+ else:
+ return self.calculate_result_code()
+
+ def _make_move_result_rhs(self, result, optional=False):
+ if optional and not (self.is_temp and self.type.is_cpp_class and not self.type.is_reference):
+ return result
+ self.has_temp_moved = True
+ return "{}({})".format("__PYX_STD_MOVE_IF_SUPPORTED" if optional else "std::move", result)
+
+ def move_result_rhs(self):
+ return self._make_move_result_rhs(self.result(), optional=True)
+
+ def move_result_rhs_as(self, type):
+ result = self.result_as(type)
+ if not (type.is_reference or type.needs_refcounting):
+ requires_move = type.is_rvalue_reference and self.is_temp
+ result = self._make_move_result_rhs(result, optional=not requires_move)
+ return result
+
+ def pythran_result(self, type_=None):
+ if is_pythran_supported_node_or_none(self):
+ return to_pythran(self)
+
+ assert type_ is not None
+ return to_pythran(self, type_)
+
+ def is_c_result_required(self):
+ """
+ Subtypes may return False here if result temp allocation can be skipped.
+ """
+ return True
+
+ def result_as(self, type = None):
+ # Return the result code cast to the specified C type.
+ if (self.is_temp and self.type.is_pyobject and
+ type != py_object_type):
+ # Allocated temporaries are always PyObject *, which may not
+ # reflect the actual type (e.g. an extension type)
+ return typecast(type, py_object_type, self.result())
+ return typecast(type, self.ctype(), self.result())
+
+ def py_result(self):
+ # Return the result code cast to PyObject *.
+ return self.result_as(py_object_type)
+
+ def ctype(self):
+ # Return the native C type of the result (i.e. the
+ # C type of the result_code expression).
+ return self.result_ctype or self.type
+
+ def get_constant_c_result_code(self):
+ # Return the constant value of this node as a result code
+ # string, or None if the node is not constant. This method
+ # can be called when the constant result code is required
+ # before the code generation phase.
+ #
+ # The return value is a string that can represent a simple C
+ # value, a constant C name or a constant C expression. If the
+ # node type depends on Python code, this must return None.
+ return None
+
+ def calculate_constant_result(self):
+ # Calculate the constant compile time result value of this
+ # expression and store it in ``self.constant_result``. Does
+ # nothing by default, thus leaving ``self.constant_result``
+ # unknown. If valid, the result can be an arbitrary Python
+ # value.
+ #
+ # This must only be called when it is assured that all
+ # sub-expressions have a valid constant_result value. The
+ # ConstantFolding transform will do this.
+ pass
+
+ def has_constant_result(self):
+ return self.constant_result is not constant_value_not_set and \
+ self.constant_result is not not_a_constant
+
+ def compile_time_value(self, denv):
+ # Return value of compile-time expression, or report error.
+ error(self.pos, "Invalid compile-time expression")
+
+ def compile_time_value_error(self, e):
+ error(self.pos, "Error in compile-time expression: %s: %s" % (
+ e.__class__.__name__, e))
+
+ # ------------- Declaration Analysis ----------------
+
+ def analyse_target_declaration(self, env):
+ error(self.pos, "Cannot assign to or delete this")
+
+ def analyse_assignment_expression_target_declaration(self, env):
+ error(self.pos, "Cannot use anything except a name in an assignment expression")
+
+ # ------------- Expression Analysis ----------------
+
+ def analyse_const_expression(self, env):
+ # Called during the analyse_declarations phase of a
+ # constant expression. Analyses the expression's type,
+ # checks whether it is a legal const expression,
+ # and determines its value.
+ node = self.analyse_types(env)
+ node.check_const()
+ return node
+
+ def analyse_expressions(self, env):
+ # Convenience routine performing both the Type
+ # Analysis and Temp Allocation phases for a whole
+ # expression.
+ return self.analyse_types(env)
+
+ def analyse_target_expression(self, env, rhs):
+ # Convenience routine performing both the Type
+ # Analysis and Temp Allocation phases for the LHS of
+ # an assignment.
+ return self.analyse_target_types(env)
+
+ def analyse_boolean_expression(self, env):
+ # Analyse expression and coerce to a boolean.
+ node = self.analyse_types(env)
+ bool = node.coerce_to_boolean(env)
+ return bool
+
+ def analyse_temp_boolean_expression(self, env):
+ # Analyse boolean expression and coerce result into
+ # a temporary. This is used when a branch is to be
+ # performed on the result and we won't have an
+ # opportunity to ensure disposal code is executed
+ # afterwards. By forcing the result into a temporary,
+ # we ensure that all disposal has been done by the
+ # time we get the result.
+ node = self.analyse_types(env)
+ return node.coerce_to_boolean(env).coerce_to_simple(env)
+
+ # --------------- Type Inference -----------------
+
+ def type_dependencies(self, env):
+ # Returns the list of entries whose types must be determined
+ # before the type of self can be inferred.
+ if getattr(self, 'type', None) is not None:
+ return ()
+ return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
+
+ def infer_type(self, env):
+ # Attempt to deduce the type of self.
+ # Differs from analyse_types as it avoids unnecessary
+ # analysis of subexpressions, but can assume everything
+ # in self.type_dependencies() has been resolved.
+ type = getattr(self, 'type', None)
+ if type is not None:
+ return type
+ entry = getattr(self, 'entry', None)
+ if entry is not None:
+ return entry.type
+ self.not_implemented("infer_type")
+
+ def nonlocally_immutable(self):
+ # Returns whether this variable is a safe reference, i.e.
+ # can't be modified as part of globals or closures.
+ return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction
+
+ def inferable_item_node(self, index=0):
+ """
+ Return a node that represents the (type) result of an indexing operation,
+ e.g. for tuple unpacking or iteration.
+ """
+ return IndexNode(self.pos, base=self, index=IntNode(
+ self.pos, value=str(index), constant_result=index, type=PyrexTypes.c_py_ssize_t_type))
+
+ # --------------- Type Analysis ------------------
+
+ def analyse_as_module(self, env):
+ # If this node can be interpreted as a reference to a
+ # cimported module, return its scope, else None.
+ return None
+
+ def analyse_as_type(self, env):
+ # If this node can be interpreted as a reference to a
+ # type, return that type, else None.
+ return None
+
+ def analyse_as_specialized_type(self, env):
+ type = self.analyse_as_type(env)
+ if type and type.is_fused and env.fused_to_specific:
+ # while it would be nice to test "if entry.type in env.fused_to_specific"
+ # rather than try/catch this doesn't work reliably (mainly for nested fused types)
+ try:
+ return type.specialize(env.fused_to_specific)
+ except KeyError:
+ pass
+ if type and type.is_fused:
+ error(self.pos, "Type is not specific")
+ return type
+
+ def analyse_as_extension_type(self, env):
+ # If this node can be interpreted as a reference to an
+ # extension type or builtin type, return its type, else None.
+ return None
+
+ def analyse_types(self, env):
+ self.not_implemented("analyse_types")
+
+ def analyse_target_types(self, env):
+ return self.analyse_types(env)
+
+ def nogil_check(self, env):
+ # By default, any expression based on Python objects is
+ # prevented in nogil environments. Subtypes must override
+ # this if they can work without the GIL.
+ if self.type and self.type.is_pyobject:
+ self.gil_error()
+
+ def gil_assignment_check(self, env):
+ if env.nogil and self.type.is_pyobject:
+ error(self.pos, "Assignment of Python object not allowed without gil")
+
+ def check_const(self):
+ self.not_const()
+ return False
+
+ def not_const(self):
+ error(self.pos, "Not allowed in a constant expression")
+
+ def check_const_addr(self):
+ self.addr_not_const()
+ return False
+
+ def addr_not_const(self):
+ error(self.pos, "Address is not constant")
+
+ # ----------------- Result Allocation -----------------
+
+ def result_in_temp(self):
+ # Return true if result is in a temporary owned by
+ # this node or one of its subexpressions. Overridden
+ # by certain nodes which can share the result of
+ # a subnode.
+ return self.is_temp
+
+ def target_code(self):
+ # Return code fragment for use as LHS of a C assignment.
+ return self.calculate_result_code()
+
+ def calculate_result_code(self):
+ self.not_implemented("calculate_result_code")
+
+# def release_target_temp(self, env):
+# # Release temporaries used by LHS of an assignment.
+# self.release_subexpr_temps(env)
+
+ def allocate_temp_result(self, code):
+ if self.temp_code:
+ raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
+ type = self.type
+ if not type.is_void:
+ if type.is_pyobject:
+ type = PyrexTypes.py_object_type
+ elif not (self.result_is_used or type.is_memoryviewslice or self.is_c_result_required()):
+ self.temp_code = None
+ return
+ self.temp_code = code.funcstate.allocate_temp(
+ type, manage_ref=self.use_managed_ref)
+ else:
+ self.temp_code = None
+
+ def release_temp_result(self, code):
+ if not self.temp_code:
+ if not self.result_is_used:
+ # not used anyway, so ignore if not set up
+ return
+ pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
+ if self.old_temp:
+ raise RuntimeError("temp %s released multiple times in %s at %r" % (
+ self.old_temp, self.__class__.__name__, pos))
+ else:
+ raise RuntimeError("no temp, but release requested in %s at %r" % (
+ self.__class__.__name__, pos))
+ code.funcstate.release_temp(self.temp_code)
+ self.old_temp = self.temp_code
+ self.temp_code = None
+
+ # ---------------- Code Generation -----------------
+
+ def make_owned_reference(self, code):
+ """
+ Make sure we own a reference to result.
+ If the result is in a temp, it is already a new reference.
+ """
+ if not self.result_in_temp():
+ code.put_incref(self.result(), self.ctype())
+
+ def make_owned_memoryviewslice(self, code):
+ """
+ Make sure we own the reference to this memoryview slice.
+ """
+ # TODO ideally this would be shared with "make_owned_reference"
+ if not self.result_in_temp():
+ code.put_incref_memoryviewslice(self.result(), self.type,
+ have_gil=not self.in_nogil_context)
+
+ def generate_evaluation_code(self, code):
+ # Generate code to evaluate this node and
+ # its sub-expressions, and dispose of any
+ # temporary results of its sub-expressions.
+ self.generate_subexpr_evaluation_code(code)
+
+ code.mark_pos(self.pos)
+ if self.is_temp:
+ self.allocate_temp_result(code)
+
+ self.generate_result_code(code)
+ if self.is_temp and not (self.type.is_string or self.type.is_pyunicode_ptr):
+ # If we are temp we do not need to wait until this node is disposed
+ # before disposing children.
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+
+ def generate_subexpr_evaluation_code(self, code):
+ for node in self.subexpr_nodes():
+ node.generate_evaluation_code(code)
+
+ def generate_result_code(self, code):
+ self.not_implemented("generate_result_code")
+
+ def generate_disposal_code(self, code):
+ if self.has_temp_moved:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp"))
+ if self.is_temp:
+ if self.type.is_string or self.type.is_pyunicode_ptr:
+ # postponed from self.generate_evaluation_code()
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+ if self.result():
+ code.put_decref_clear(self.result(), self.ctype(),
+ have_gil=not self.in_nogil_context)
+ else:
+ # Already done if self.is_temp
+ self.generate_subexpr_disposal_code(code)
+
+ def generate_subexpr_disposal_code(self, code):
+ # Generate code to dispose of temporary results
+ # of all sub-expressions.
+ for node in self.subexpr_nodes():
+ node.generate_disposal_code(code)
+
+ def generate_post_assignment_code(self, code):
+ if self.is_temp:
+ if self.type.is_string or self.type.is_pyunicode_ptr:
+ # postponed from self.generate_evaluation_code()
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+ elif self.type.is_pyobject:
+ code.putln("%s = 0;" % self.result())
+ elif self.type.is_memoryviewslice:
+ code.putln("%s.memview = NULL;" % self.result())
+ code.putln("%s.data = NULL;" % self.result())
+
+ if self.has_temp_moved:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp"))
+ else:
+ self.generate_subexpr_disposal_code(code)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
+ exception_check=None, exception_value=None):
+ # Stub method for nodes which are not legal as
+ # the LHS of an assignment. An error will have
+ # been reported earlier.
+ pass
+
+ def generate_deletion_code(self, code, ignore_nonexisting=False):
+ # Stub method for nodes that are not legal as
+ # the argument of a del statement. An error
+ # will have been reported earlier.
+ pass
+
+ def free_temps(self, code):
+ if self.is_temp:
+ if not self.type.is_void:
+ self.release_temp_result(code)
+ else:
+ self.free_subexpr_temps(code)
+
+ def free_subexpr_temps(self, code):
+ for sub in self.subexpr_nodes():
+ sub.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ pass
+
+ # ----Generation of small bits of reference counting --
+
+ def generate_decref_set(self, code, rhs):
+ code.put_decref_set(self.result(), self.ctype(), rhs)
+
+ def generate_xdecref_set(self, code, rhs):
+ code.put_xdecref_set(self.result(), self.ctype(), rhs)
+
+ def generate_gotref(self, code, handle_null=False,
+ maybe_null_extra_check=True):
+ if not (handle_null and self.cf_is_null):
+ if (handle_null and self.cf_maybe_null
+ and maybe_null_extra_check):
+ self.generate_xgotref(code)
+ else:
+ code.put_gotref(self.result(), self.ctype())
+
+ def generate_xgotref(self, code):
+ code.put_xgotref(self.result(), self.ctype())
+
+ def generate_giveref(self, code):
+ code.put_giveref(self.result(), self.ctype())
+
+ def generate_xgiveref(self, code):
+ code.put_xgiveref(self.result(), self.ctype())
+
+ # ---------------- Annotation ---------------------
+
+ def annotate(self, code):
+ for node in self.subexpr_nodes():
+ node.annotate(code)
+
+ # ----------------- Coercion ----------------------
+
+ def coerce_to(self, dst_type, env):
+ # Coerce the result so that it can be assigned to
+ # something of type dst_type. If processing is necessary,
+ # wraps this node in a coercion node and returns that.
+ # Otherwise, returns this node unchanged.
+ #
+ # This method is called during the analyse_expressions
+ # phase of the src_node's processing.
+ #
+ # Note that subclasses that override this (especially
+ # ConstNodes) must not (re-)set their own .type attribute
+ # here. Since expression nodes may turn up in different
+ # places in the tree (e.g. inside of CloneNodes in cascaded
+ # assignments), this method must return a new node instance
+ # if it changes the type.
+ #
+ src = self
+ src_type = self.type
+
+ if self.check_for_coercion_error(dst_type, env):
+ return self
+
+ used_as_reference = dst_type.is_reference
+ if used_as_reference and not src_type.is_reference:
+ dst_type = dst_type.ref_base_type
+
+ if src_type.is_cv_qualified:
+ src_type = src_type.cv_base_type
+
+ if src_type.is_fused or dst_type.is_fused:
+ # See if we are coercing a fused function to a pointer to a
+ # specialized function
+ if (src_type.is_cfunction and not dst_type.is_fused and
+ dst_type.is_ptr and dst_type.base_type.is_cfunction):
+
+ dst_type = dst_type.base_type
+
+ for signature in src_type.get_all_specialized_function_types():
+ if signature.same_as(dst_type):
+ src.type = signature
+ src.entry = src.type.entry
+ src.entry.used = True
+ return self
+
+ if src_type.is_fused:
+ error(self.pos, "Type is not specialized")
+ elif src_type.is_null_ptr and dst_type.is_ptr:
+ # NULL can be implicitly cast to any pointer type
+ return self
+ else:
+ error(self.pos, "Cannot coerce to a type that is not specialized")
+
+ self.type = error_type
+ return self
+
+ if self.coercion_type is not None:
+ # This is purely for error checking purposes!
+ node = NameNode(self.pos, name='', type=self.coercion_type)
+ node.coerce_to(dst_type, env)
+
+ if dst_type.is_memoryviewslice:
+ from . import MemoryView
+ if not src.type.is_memoryviewslice:
+ if src.type.is_pyobject:
+ src = CoerceToMemViewSliceNode(src, dst_type, env)
+ elif src.type.is_array:
+ src = CythonArrayNode.from_carray(src, env).coerce_to(dst_type, env)
+ elif not src_type.is_error:
+ error(self.pos,
+ "Cannot convert '%s' to memoryviewslice" % (src_type,))
+ else:
+ if src.type.writable_needed:
+ dst_type.writable_needed = True
+ if not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
+ copying=self.is_memview_copy_assignment):
+ if src.type.dtype.same_as(dst_type.dtype):
+ msg = "Memoryview '%s' not conformable to memoryview '%s'."
+ tup = src.type, dst_type
+ else:
+ msg = "Different base types for memoryviews (%s, %s)"
+ tup = src.type.dtype, dst_type.dtype
+
+ error(self.pos, msg % tup)
+
+ elif dst_type.is_pyobject:
+ # We never need a type check when assigning None to a Python object type.
+ if src.is_none:
+ pass
+ elif src.constant_result is None:
+ src = NoneNode(src.pos).coerce_to(dst_type, env)
+ else:
+ if not src.type.is_pyobject:
+ if dst_type is bytes_type and src.type.is_int:
+ src = CoerceIntToBytesNode(src, env)
+ else:
+ src = CoerceToPyTypeNode(src, env, type=dst_type)
+ # FIXME: I would expect that CoerceToPyTypeNode(type=dst_type) returns a value of type dst_type
+ # but it doesn't for ctuples. Thus, we add a PyTypeTestNode which then triggers the
+ # Python conversion and becomes useless. That sems backwards and inefficient.
+ # We should not need a PyTypeTestNode after a previous conversion above.
+ if not src.type.subtype_of(dst_type):
+ src = PyTypeTestNode(src, dst_type, env)
+ elif is_pythran_expr(dst_type) and is_pythran_supported_type(src.type):
+ # We let the compiler decide whether this is valid
+ return src
+ elif is_pythran_expr(src.type):
+ if is_pythran_supported_type(dst_type):
+ # Match the case were a pythran expr is assigned to a value, or vice versa.
+ # We let the C++ compiler decide whether this is valid or not!
+ return src
+ # Else, we need to convert the Pythran expression to a Python object
+ src = CoerceToPyTypeNode(src, env, type=dst_type)
+ elif src.type.is_pyobject:
+ if used_as_reference and dst_type.is_cpp_class:
+ warning(
+ self.pos,
+ "Cannot pass Python object as C++ data structure reference (%s &), will pass by copy." % dst_type)
+ src = CoerceFromPyTypeNode(dst_type, src, env)
+ elif (dst_type.is_complex
+ and src_type != dst_type
+ and dst_type.assignable_from(src_type)):
+ src = CoerceToComplexNode(src, dst_type, env)
+ elif (src_type is PyrexTypes.soft_complex_type
+ and src_type != dst_type
+ and not dst_type.assignable_from(src_type)):
+ src = coerce_from_soft_complex(src, dst_type, env)
+ else:
+ # neither src nor dst are py types
+ # Added the string comparison, since for c types that
+ # is enough, but Cython gets confused when the types are
+ # in different pxi files.
+ # TODO: Remove this hack and require shared declarations.
+ if not (src.type == dst_type or str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
+ self.fail_assignment(dst_type)
+ return src
+
+ def fail_assignment(self, dst_type):
+ extra_diagnostics = dst_type.assignment_failure_extra_info(self.type)
+ if extra_diagnostics:
+ extra_diagnostics = ". " + extra_diagnostics
+ error(self.pos, "Cannot assign type '%s' to '%s'%s" % (
+ self.type, dst_type, extra_diagnostics))
+
+ def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
+ if fail and not default:
+ default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
+ message = find_coercion_error((self.type, dst_type), default, env)
+ if message is not None:
+ error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
+ return True
+ if fail:
+ self.fail_assignment(dst_type)
+ return True
+ return False
+
+ def coerce_to_pyobject(self, env):
+ return self.coerce_to(PyrexTypes.py_object_type, env)
+
+ def coerce_to_boolean(self, env):
+ # Coerce result to something acceptable as
+ # a boolean value.
+
+ # if it's constant, calculate the result now
+ if self.has_constant_result():
+ bool_value = bool(self.constant_result)
+ return BoolNode(self.pos, value=bool_value,
+ constant_result=bool_value)
+
+ type = self.type
+ if type.is_enum or type.is_error:
+ return self
+ elif type is PyrexTypes.c_bint_type:
+ return self
+ elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
+ return CoerceToBooleanNode(self, env)
+ elif type.is_cpp_class and type.scope and type.scope.lookup("operator bool"):
+ return SimpleCallNode(
+ self.pos,
+ function=AttributeNode(
+ self.pos, obj=self, attribute=StringEncoding.EncodedString('operator bool')),
+ args=[]).analyse_types(env)
+ elif type.is_ctuple:
+ bool_value = len(type.components) == 0
+ return BoolNode(self.pos, value=bool_value,
+ constant_result=bool_value)
+ else:
+ error(self.pos, "Type '%s' not acceptable as a boolean" % type)
+ return self
+
+ def coerce_to_integer(self, env):
+ # If not already some C integer type, coerce to longint.
+ if self.type.is_int:
+ return self
+ else:
+ return self.coerce_to(PyrexTypes.c_long_type, env)
+
+ def coerce_to_temp(self, env):
+ # Ensure that the result is in a temporary.
+ if self.result_in_temp():
+ return self
+ else:
+ return CoerceToTempNode(self, env)
+
+ def coerce_to_simple(self, env):
+ # Ensure that the result is simple (see is_simple).
+ if self.is_simple():
+ return self
+ else:
+ return self.coerce_to_temp(env)
+
+ def is_simple(self):
+ # A node is simple if its result is something that can
+ # be referred to without performing any operations, e.g.
+ # a constant, local var, C global var, struct member
+ # reference, or temporary.
+ return self.result_in_temp()
+
+ def may_be_none(self):
+ if self.type and not (self.type.is_pyobject or
+ self.type.is_memoryviewslice):
+ return False
+ if self.has_constant_result():
+ return self.constant_result is not None
+ return True
+
+ def as_cython_attribute(self):
+ return None
+
+ def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
+ # Wraps the node in a NoneCheckNode if it is not known to be
+ # not-None (e.g. because it is a Python literal).
+ if self.may_be_none():
+ return NoneCheckNode(self, error, message, format_args)
+ else:
+ return self
+
+ @classmethod
+ def from_node(cls, node, **kwargs):
+ """Instantiate this node class from another node, properly
+ copying over all attributes that one would forget otherwise.
+ """
+ attributes = "cf_state cf_maybe_null cf_is_null constant_result".split()
+ for attr_name in attributes:
+ if attr_name in kwargs:
+ continue
+ try:
+ value = getattr(node, attr_name)
+ except AttributeError:
+ pass
+ else:
+ kwargs[attr_name] = value
+ return cls(node.pos, **kwargs)
+
+ def get_known_standard_library_import(self):
+ """
+ Gets the module.path that this node was imported from.
+
+ Many nodes do not have one, or it is ambiguous, in which case
+ this function returns a false value.
+ """
+ return None
+
+
+class AtomicExprNode(ExprNode):
+ # Abstract base class for expression nodes which have
+ # no sub-expressions.
+
+ subexprs = []
+
+ # Override to optimize -- we know we have no children
+ def generate_subexpr_evaluation_code(self, code):
+ pass
+ def generate_subexpr_disposal_code(self, code):
+ pass
+
+class PyConstNode(AtomicExprNode):
+ # Abstract base class for constant Python values.
+
+ is_literal = 1
+ type = py_object_type
+ nogil_check = None
+
+ def is_simple(self):
+ return 1
+
+ def may_be_none(self):
+ return False
+
+ def analyse_types(self, env):
+ return self
+
+ def calculate_result_code(self):
+ return self.value
+
+ def generate_result_code(self, code):
+ pass
+
+
+class NoneNode(PyConstNode):
+ # The constant value None
+
+ is_none = 1
+ value = "Py_None"
+
+ constant_result = None
+
+ def compile_time_value(self, denv):
+ return None
+
+ def may_be_none(self):
+ return True
+
+ def coerce_to(self, dst_type, env):
+ if not (dst_type.is_pyobject or dst_type.is_memoryviewslice or dst_type.is_error):
+ # Catch this error early and loudly.
+ error(self.pos, "Cannot assign None to %s" % dst_type)
+ return super(NoneNode, self).coerce_to(dst_type, env)
+
+
+class EllipsisNode(PyConstNode):
+ # '...' in a subscript list.
+
+ value = "Py_Ellipsis"
+
+ constant_result = Ellipsis
+
+ def compile_time_value(self, denv):
+ return Ellipsis
+
+
+class ConstNode(AtomicExprNode):
+ # Abstract base type for literal constant nodes.
+ #
+ # value string C code fragment
+
+ is_literal = 1
+ nogil_check = None
+
+ def is_simple(self):
+ return 1
+
+ def nonlocally_immutable(self):
+ return 1
+
+ def may_be_none(self):
+ return False
+
+ def analyse_types(self, env):
+ return self # Types are held in class variables
+
+ def check_const(self):
+ return True
+
+ def get_constant_c_result_code(self):
+ return self.calculate_result_code()
+
+ def calculate_result_code(self):
+ return str(self.value)
+
+ def generate_result_code(self, code):
+ pass
+
+
+class BoolNode(ConstNode):
+ type = PyrexTypes.c_bint_type
+ # The constant value True or False
+
+ def calculate_constant_result(self):
+ self.constant_result = self.value
+
+ def compile_time_value(self, denv):
+ return self.value
+
+ def calculate_result_code(self):
+ if self.type.is_pyobject:
+ return 'Py_True' if self.value else 'Py_False'
+ else:
+ return str(int(self.value))
+
+ def coerce_to(self, dst_type, env):
+ if dst_type == self.type:
+ return self
+ if dst_type is py_object_type and self.type is Builtin.bool_type:
+ return self
+ if dst_type.is_pyobject and self.type.is_int:
+ return BoolNode(
+ self.pos, value=self.value,
+ constant_result=self.constant_result,
+ type=Builtin.bool_type)
+ if dst_type.is_int and self.type.is_pyobject:
+ return BoolNode(
+ self.pos, value=self.value,
+ constant_result=self.constant_result,
+ type=PyrexTypes.c_bint_type)
+ return ConstNode.coerce_to(self, dst_type, env)
+
+
+class NullNode(ConstNode):
+ type = PyrexTypes.c_null_ptr_type
+ value = "NULL"
+ constant_result = 0
+
+ def get_constant_c_result_code(self):
+ return self.value
+
+
+class CharNode(ConstNode):
+ type = PyrexTypes.c_char_type
+
+ def calculate_constant_result(self):
+ self.constant_result = ord(self.value)
+
+ def compile_time_value(self, denv):
+ return ord(self.value)
+
+ def calculate_result_code(self):
+ return "'%s'" % StringEncoding.escape_char(self.value)
+
+
+class IntNode(ConstNode):
+
+ # unsigned "" or "U"
+ # longness "" or "L" or "LL"
+ # is_c_literal True/False/None creator considers this a C integer literal
+
+ unsigned = ""
+ longness = ""
+ is_c_literal = None # unknown
+
+ # hex_value and base_10_value are designed only to simplify
+ # writing tests to get a consistent representation of value
+ @property
+ def hex_value(self):
+ return Utils.strip_py2_long_suffix(hex(Utils.str_to_number(self.value)))
+
+ @property
+ def base_10_value(self):
+ return str(Utils.str_to_number(self.value))
+
+ def __init__(self, pos, **kwds):
+ ExprNode.__init__(self, pos, **kwds)
+ if 'type' not in kwds:
+ self.type = self.find_suitable_type_for_value()
+
+ def find_suitable_type_for_value(self):
+ if self.constant_result is constant_value_not_set:
+ try:
+ self.calculate_constant_result()
+ except ValueError:
+ pass
+ # we ignore 'is_c_literal = True' and instead map signed 32bit
+ # integers as C long values
+ if self.is_c_literal or \
+ not self.has_constant_result() or \
+ self.unsigned or self.longness == 'LL':
+ # clearly a C literal
+ rank = (self.longness == 'LL') and 2 or 1
+ suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
+ if self.type:
+ suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
+ else:
+ # C literal or Python literal - split at 32bit boundary
+ if -2**31 <= self.constant_result < 2**31:
+ if self.type and self.type.is_int:
+ suitable_type = self.type
+ else:
+ suitable_type = PyrexTypes.c_long_type
+ else:
+ suitable_type = PyrexTypes.py_object_type
+ return suitable_type
+
+ def coerce_to(self, dst_type, env):
+ if self.type is dst_type:
+ return self
+ elif dst_type.is_float:
+ if self.has_constant_result():
+ return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
+ constant_result=float(self.constant_result))
+ else:
+ return FloatNode(self.pos, value=self.value, type=dst_type,
+ constant_result=not_a_constant)
+ if dst_type.is_numeric and not dst_type.is_complex:
+ node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
+ type=dst_type, is_c_literal=True,
+ unsigned=self.unsigned, longness=self.longness)
+ return node
+ elif dst_type.is_pyobject:
+ node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
+ type=PyrexTypes.py_object_type, is_c_literal=False,
+ unsigned=self.unsigned, longness=self.longness)
+ else:
+ # FIXME: not setting the type here to keep it working with
+ # complex numbers. Should they be special cased?
+ node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
+ unsigned=self.unsigned, longness=self.longness)
+ # We still need to perform normal coerce_to processing on the
+ # result, because we might be coercing to an extension type,
+ # in which case a type test node will be needed.
+ return ConstNode.coerce_to(node, dst_type, env)
+
+ def coerce_to_boolean(self, env):
+ return IntNode(
+ self.pos, value=self.value,
+ constant_result=self.constant_result,
+ type=PyrexTypes.c_bint_type,
+ unsigned=self.unsigned, longness=self.longness)
+
+ def generate_evaluation_code(self, code):
+ if self.type.is_pyobject:
+ # pre-allocate a Python version of the number
+ # (In hex if sufficiently large to cope with Python's string-to-int limitations.
+ # We use quite a small value of "sufficiently large" - 10**13 is picked as
+ # the approximate point where hex strings become shorter)
+ value = Utils.str_to_number(self.value)
+ formatter = hex if value > (10**13) else str
+ plain_integer_string = formatter(value)
+ plain_integer_string = Utils.strip_py2_long_suffix(plain_integer_string)
+ self.result_code = code.get_py_int(plain_integer_string, self.longness)
+ else:
+ self.result_code = self.get_constant_c_result_code()
+
+ def get_constant_c_result_code(self):
+ unsigned, longness = self.unsigned, self.longness
+ literal = self.value_as_c_integer_string()
+ if not (unsigned or longness) and self.type.is_int and literal[0] == '-' and literal[1] != '0':
+ # negative decimal literal => guess longness from type to prevent wrap-around
+ if self.type.rank >= PyrexTypes.c_longlong_type.rank:
+ longness = 'LL'
+ elif self.type.rank >= PyrexTypes.c_long_type.rank:
+ longness = 'L'
+ return literal + unsigned + longness
+
+ def value_as_c_integer_string(self):
+ value = self.value
+ if len(value) <= 2:
+ # too short to go wrong (and simplifies code below)
+ return value
+ neg_sign = ''
+ if value[0] == '-':
+ neg_sign = '-'
+ value = value[1:]
+ if value[0] == '0':
+ literal_type = value[1] # 0'o' - 0'b' - 0'x'
+ # 0x123 hex literals and 0123 octal literals work nicely in C
+ # but C-incompatible Py3 oct/bin notations need conversion
+ if neg_sign and literal_type in 'oOxX0123456789' and value[2:].isdigit():
+ # negative hex/octal literal => prevent C compiler from using
+ # unsigned integer types by converting to decimal (see C standard 6.4.4.1)
+ value = str(Utils.str_to_number(value))
+ elif literal_type in 'oO':
+ value = '0' + value[2:] # '0o123' => '0123'
+ elif literal_type in 'bB':
+ value = str(int(value[2:], 2))
+ elif value.isdigit() and not self.unsigned and not self.longness:
+ if not neg_sign:
+ # C compilers do not consider unsigned types for decimal literals,
+ # but they do for hex (see C standard 6.4.4.1)
+ value = '0x%X' % int(value)
+ return neg_sign + value
+
+ def calculate_result_code(self):
+ return self.result_code
+
+ def calculate_constant_result(self):
+ self.constant_result = Utils.str_to_number(self.value)
+
+ def compile_time_value(self, denv):
+ return Utils.str_to_number(self.value)
+
+class FloatNode(ConstNode):
+ type = PyrexTypes.c_double_type
+
+ def calculate_constant_result(self):
+ self.constant_result = float(self.value)
+
+ def compile_time_value(self, denv):
+ float_value = float(self.value)
+ str_float_value = ("%.330f" % float_value).strip('0')
+ str_value = Utils.normalise_float_repr(self.value)
+ if str_value not in (str_float_value, repr(float_value).lstrip('0')):
+ warning(self.pos, "Using this floating point value with DEF may lose precision, using %r" % float_value)
+ return float_value
+
+ def coerce_to(self, dst_type, env):
+ if dst_type.is_pyobject and self.type.is_float:
+ return FloatNode(
+ self.pos, value=self.value,
+ constant_result=self.constant_result,
+ type=Builtin.float_type)
+ if dst_type.is_float and self.type.is_pyobject:
+ return FloatNode(
+ self.pos, value=self.value,
+ constant_result=self.constant_result,
+ type=dst_type)
+ return ConstNode.coerce_to(self, dst_type, env)
+
+ def calculate_result_code(self):
+ return self.result_code
+
+ def get_constant_c_result_code(self):
+ strval = self.value
+ assert isinstance(strval, basestring)
+ cmpval = repr(float(strval))
+ if cmpval == 'nan':
+ return "(Py_HUGE_VAL * 0)"
+ elif cmpval == 'inf':
+ return "Py_HUGE_VAL"
+ elif cmpval == '-inf':
+ return "(-Py_HUGE_VAL)"
+ else:
+ return strval
+
+ def generate_evaluation_code(self, code):
+ c_value = self.get_constant_c_result_code()
+ if self.type.is_pyobject:
+ self.result_code = code.get_py_float(self.value, c_value)
+ else:
+ self.result_code = c_value
+
+
+def _analyse_name_as_type(name, pos, env):
+ ctype = PyrexTypes.parse_basic_type(name)
+ if ctype is not None and env.in_c_type_context:
+ return ctype
+
+ global_scope = env.global_scope()
+ global_entry = global_scope.lookup(name)
+ if global_entry and global_entry.is_type:
+ type = global_entry.type
+ if (not env.in_c_type_context
+ and type is Builtin.int_type
+ and global_scope.context.language_level == 2):
+ # While we still support Python2 this needs to be downgraded
+ # to a generic Python object to include both int and long.
+ # With language_level > 3, we keep the type but also accept 'long' in Py2.
+ type = py_object_type
+ if type and (type.is_pyobject or env.in_c_type_context):
+ return type
+ ctype = ctype or type
+
+ # This is fairly heavy, so it's worth trying some easier things above.
+ from .TreeFragment import TreeFragment
+ with local_errors(ignore=True):
+ pos = (pos[0], pos[1], pos[2]-7)
+ try:
+ declaration = TreeFragment(u"sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos)
+ except CompileError:
+ pass
+ else:
+ sizeof_node = declaration.root.stats[0].expr
+ if isinstance(sizeof_node, SizeofTypeNode):
+ sizeof_node = sizeof_node.analyse_types(env)
+ if isinstance(sizeof_node, SizeofTypeNode):
+ type = sizeof_node.arg_type
+ if type and (type.is_pyobject or env.in_c_type_context):
+ return type
+ ctype = ctype or type
+ return ctype
+
+
+class BytesNode(ConstNode):
+ # A char* or bytes literal
+ #
+ # value BytesLiteral
+
+ is_string_literal = True
+ # start off as Python 'bytes' to support len() in O(1)
+ type = bytes_type
+
+ def calculate_constant_result(self):
+ self.constant_result = self.value
+
+ def as_sliced_node(self, start, stop, step=None):
+ value = StringEncoding.bytes_literal(self.value[start:stop:step], self.value.encoding)
+ return BytesNode(self.pos, value=value, constant_result=value)
+
+ def compile_time_value(self, denv):
+ return self.value.byteencode()
+
+ def analyse_as_type(self, env):
+ return _analyse_name_as_type(self.value.decode('ISO8859-1'), self.pos, env)
+
+ def can_coerce_to_char_literal(self):
+ return len(self.value) == 1
+
+ def coerce_to_boolean(self, env):
+ # This is special because testing a C char* for truth directly
+ # would yield the wrong result.
+ bool_value = bool(self.value)
+ return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
+
+ def coerce_to(self, dst_type, env):
+ if self.type == dst_type:
+ return self
+ if dst_type.is_int:
+ if not self.can_coerce_to_char_literal():
+ error(self.pos, "Only single-character string literals can be coerced into ints.")
+ return self
+ if dst_type.is_unicode_char:
+ error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
+ return self
+ return CharNode(self.pos, value=self.value,
+ constant_result=ord(self.value))
+
+ node = BytesNode(self.pos, value=self.value, constant_result=self.constant_result)
+ if dst_type.is_pyobject:
+ if dst_type in (py_object_type, Builtin.bytes_type):
+ node.type = Builtin.bytes_type
+ else:
+ self.check_for_coercion_error(dst_type, env, fail=True)
+ return node
+ elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
+ node.type = dst_type
+ return node
+ elif dst_type in (PyrexTypes.c_uchar_ptr_type, PyrexTypes.c_const_uchar_ptr_type, PyrexTypes.c_void_ptr_type):
+ node.type = (PyrexTypes.c_const_char_ptr_type if dst_type == PyrexTypes.c_const_uchar_ptr_type
+ else PyrexTypes.c_char_ptr_type)
+ return CastNode(node, dst_type)
+ elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
+ # Exclude the case of passing a C string literal into a non-const C++ string.
+ if not dst_type.is_cpp_class or dst_type.is_const:
+ node.type = dst_type
+ return node
+
+ # We still need to perform normal coerce_to processing on the
+ # result, because we might be coercing to an extension type,
+ # in which case a type test node will be needed.
+ return ConstNode.coerce_to(node, dst_type, env)
+
+ def generate_evaluation_code(self, code):
+ if self.type.is_pyobject:
+ result = code.get_py_string_const(self.value)
+ elif self.type.is_const:
+ result = code.get_string_const(self.value)
+ else:
+ # not const => use plain C string literal and cast to mutable type
+ literal = self.value.as_c_string_literal()
+ # C++ may require a cast
+ result = typecast(self.type, PyrexTypes.c_void_ptr_type, literal)
+ self.result_code = result
+
+ def get_constant_c_result_code(self):
+ return None # FIXME
+
+ def calculate_result_code(self):
+ return self.result_code
+
+
+class UnicodeNode(ConstNode):
+ # A Py_UNICODE* or unicode literal
+ #
+ # value EncodedString
+ # bytes_value BytesLiteral the literal parsed as bytes string
+ # ('-3' unicode literals only)
+
+ is_string_literal = True
+ bytes_value = None
+ type = unicode_type
+
+ def calculate_constant_result(self):
+ self.constant_result = self.value
+
+ def analyse_as_type(self, env):
+ return _analyse_name_as_type(self.value, self.pos, env)
+
+ def as_sliced_node(self, start, stop, step=None):
+ if StringEncoding.string_contains_surrogates(self.value[:stop]):
+ # this is unsafe as it may give different results
+ # in different runtimes
+ return None
+ value = StringEncoding.EncodedString(self.value[start:stop:step])
+ value.encoding = self.value.encoding
+ if self.bytes_value is not None:
+ bytes_value = StringEncoding.bytes_literal(
+ self.bytes_value[start:stop:step], self.bytes_value.encoding)
+ else:
+ bytes_value = None
+ return UnicodeNode(
+ self.pos, value=value, bytes_value=bytes_value,
+ constant_result=value)
+
+ def coerce_to(self, dst_type, env):
+ if dst_type is self.type:
+ pass
+ elif dst_type.is_unicode_char:
+ if not self.can_coerce_to_char_literal():
+ error(self.pos,
+ "Only single-character Unicode string literals or "
+ "surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
+ return self
+ int_value = ord(self.value)
+ return IntNode(self.pos, type=dst_type, value=str(int_value),
+ constant_result=int_value)
+ elif not dst_type.is_pyobject:
+ if dst_type.is_string and self.bytes_value is not None:
+ # special case: '-3' enforced unicode literal used in a
+ # C char* context
+ return BytesNode(self.pos, value=self.bytes_value).coerce_to(dst_type, env)
+ if dst_type.is_pyunicode_ptr:
+ return UnicodeNode(self.pos, value=self.value, type=dst_type)
+ error(self.pos,
+ "Unicode literals do not support coercion to C types other "
+ "than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* "
+ "(for strings).")
+ elif dst_type not in (py_object_type, Builtin.basestring_type):
+ self.check_for_coercion_error(dst_type, env, fail=True)
+ return self
+
+ def can_coerce_to_char_literal(self):
+ return len(self.value) == 1
+ ## or (len(self.value) == 2
+ ## and (0xD800 <= self.value[0] <= 0xDBFF)
+ ## and (0xDC00 <= self.value[1] <= 0xDFFF))
+
+ def coerce_to_boolean(self, env):
+ bool_value = bool(self.value)
+ return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
+
+ def contains_surrogates(self):
+ return StringEncoding.string_contains_surrogates(self.value)
+
+ def generate_evaluation_code(self, code):
+ if self.type.is_pyobject:
+ # FIXME: this should go away entirely!
+ # Since string_contains_lone_surrogates() returns False for surrogate pairs in Py2/UCS2,
+ # Py2 can generate different code from Py3 here. Let's hope we get away with claiming that
+ # the processing of surrogate pairs in code was always ambiguous and lead to different results
+ # on P16/32bit Unicode platforms.
+ if StringEncoding.string_contains_lone_surrogates(self.value):
+ # lone (unpaired) surrogates are not really portable and cannot be
+ # decoded by the UTF-8 codec in Py3.3
+ self.result_code = code.get_py_const(py_object_type, 'ustring')
+ data_cname = code.get_string_const(
+ StringEncoding.BytesLiteral(self.value.encode('unicode_escape')))
+ const_code = code.get_cached_constants_writer(self.result_code)
+ if const_code is None:
+ return # already initialised
+ const_code.mark_pos(self.pos)
+ const_code.putln(
+ "%s = PyUnicode_DecodeUnicodeEscape(%s, sizeof(%s) - 1, NULL); %s" % (
+ self.result_code,
+ data_cname,
+ data_cname,
+ const_code.error_goto_if_null(self.result_code, self.pos)))
+ const_code.put_error_if_neg(
+ self.pos, "__Pyx_PyUnicode_READY(%s)" % self.result_code)
+ else:
+ self.result_code = code.get_py_string_const(self.value)
+ else:
+ self.result_code = code.get_pyunicode_ptr_const(self.value)
+
+ def calculate_result_code(self):
+ return self.result_code
+
+ def compile_time_value(self, env):
+ return self.value
+
+
+class StringNode(PyConstNode):
+ # A Python str object, i.e. a byte string in Python 2.x and a
+ # unicode string in Python 3.x
+ #
+ # value BytesLiteral (or EncodedString with ASCII content)
+ # unicode_value EncodedString or None
+ # is_identifier boolean
+
+ type = str_type
+ is_string_literal = True
+ is_identifier = None
+ unicode_value = None
+
+ def calculate_constant_result(self):
+ if self.unicode_value is not None:
+ # only the Unicode value is portable across Py2/3
+ self.constant_result = self.unicode_value
+
+ def analyse_as_type(self, env):
+ return _analyse_name_as_type(self.unicode_value or self.value.decode('ISO8859-1'), self.pos, env)
+
+ def as_sliced_node(self, start, stop, step=None):
+ value = type(self.value)(self.value[start:stop:step])
+ value.encoding = self.value.encoding
+ if self.unicode_value is not None:
+ if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]):
+ # this is unsafe as it may give different results in different runtimes
+ return None
+ unicode_value = StringEncoding.EncodedString(
+ self.unicode_value[start:stop:step])
+ else:
+ unicode_value = None
+ return StringNode(
+ self.pos, value=value, unicode_value=unicode_value,
+ constant_result=value, is_identifier=self.is_identifier)
+
+ def coerce_to(self, dst_type, env):
+ if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
+# if dst_type is Builtin.bytes_type:
+# # special case: bytes = 'str literal'
+# return BytesNode(self.pos, value=self.value)
+ if not dst_type.is_pyobject:
+ return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
+ if dst_type is not Builtin.basestring_type:
+ self.check_for_coercion_error(dst_type, env, fail=True)
+ return self
+
+ def can_coerce_to_char_literal(self):
+ return not self.is_identifier and len(self.value) == 1
+
+ def generate_evaluation_code(self, code):
+ self.result_code = code.get_py_string_const(
+ self.value, identifier=self.is_identifier, is_str=True,
+ unicode_value=self.unicode_value)
+
+ def get_constant_c_result_code(self):
+ return None
+
+ def calculate_result_code(self):
+ return self.result_code
+
+ def compile_time_value(self, env):
+ if self.value.is_unicode:
+ return self.value
+ if not IS_PYTHON3:
+ # use plain str/bytes object in Py2
+ return self.value.byteencode()
+ # in Py3, always return a Unicode string
+ if self.unicode_value is not None:
+ return self.unicode_value
+ return self.value.decode('iso8859-1')
+
+
+class IdentifierStringNode(StringNode):
+ # A special str value that represents an identifier (bytes in Py2,
+ # unicode in Py3).
+ is_identifier = True
+
+
+class ImagNode(AtomicExprNode):
+ # Imaginary number literal
+ #
+ # value string imaginary part (float value)
+
+ type = PyrexTypes.c_double_complex_type
+
+ def calculate_constant_result(self):
+ self.constant_result = complex(0.0, float(self.value))
+
+ def compile_time_value(self, denv):
+ return complex(0.0, float(self.value))
+
+ def analyse_types(self, env):
+ self.type.create_declaration_utility_code(env)
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def coerce_to(self, dst_type, env):
+ if self.type is dst_type:
+ return self
+ node = ImagNode(self.pos, value=self.value)
+ if dst_type.is_pyobject:
+ node.is_temp = 1
+ node.type = Builtin.complex_type
+ # We still need to perform normal coerce_to processing on the
+ # result, because we might be coercing to an extension type,
+ # in which case a type test node will be needed.
+ return AtomicExprNode.coerce_to(node, dst_type, env)
+
+ gil_message = "Constructing complex number"
+
+ def calculate_result_code(self):
+ if self.type.is_pyobject:
+ return self.result()
+ else:
+ return "%s(0, %r)" % (self.type.from_parts, float(self.value))
+
+ def generate_result_code(self, code):
+ if self.type.is_pyobject:
+ code.putln(
+ "%s = PyComplex_FromDoubles(0.0, %r); %s" % (
+ self.result(),
+ float(self.value),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class NewExprNode(AtomicExprNode):
+
+ # C++ new statement
+ #
+ # cppclass node c++ class to create
+
+ type = None
+
+ def infer_type(self, env):
+ type = self.cppclass.analyse_as_type(env)
+ if type is None or not type.is_cpp_class:
+ error(self.pos, "new operator can only be applied to a C++ class")
+ self.type = error_type
+ return
+ self.cpp_check(env)
+ constructor = type.get_constructor(self.pos)
+ self.class_type = type
+ self.entry = constructor
+ self.type = constructor.type
+ return self.type
+
+ def analyse_types(self, env):
+ if self.type is None:
+ self.infer_type(env)
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def generate_result_code(self, code):
+ pass
+
+ def calculate_result_code(self):
+ return "new " + self.class_type.empty_declaration_code()
+
+
+class NameNode(AtomicExprNode):
+ # Reference to a local or global variable name.
+ #
+ # name string Python name of the variable
+ # entry Entry Symbol table entry
+ # type_entry Entry For extension type names, the original type entry
+ # cf_is_null boolean Is uninitialized before this node
+ # cf_maybe_null boolean Maybe uninitialized before this node
+ # allow_null boolean Don't raise UnboundLocalError
+ # nogil boolean Whether it is used in a nogil context
+
+ is_name = True
+ is_cython_module = False
+ cython_attribute = None
+ lhs_of_first_assignment = False # TODO: remove me
+ is_used_as_rvalue = 0
+ entry = None
+ type_entry = None
+ cf_maybe_null = True
+ cf_is_null = False
+ allow_null = False
+ nogil = False
+ inferred_type = None
+
+ def as_cython_attribute(self):
+ return self.cython_attribute
+
+ def type_dependencies(self, env):
+ if self.entry is None:
+ self.entry = env.lookup(self.name)
+ if self.entry is not None and self.entry.type.is_unspecified:
+ return (self,)
+ else:
+ return ()
+
+ def infer_type(self, env):
+ if self.entry is None:
+ self.entry = env.lookup(self.name)
+ if self.entry is None or self.entry.type is unspecified_type:
+ if self.inferred_type is not None:
+ return self.inferred_type
+ return py_object_type
+ elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
+ self.name == self.entry.type.name:
+ # Unfortunately the type attribute of type objects
+ # is used for the pointer to the type they represent.
+ return type_type
+ elif self.entry.type.is_cfunction:
+ if self.entry.scope.is_builtin_scope:
+ # special case: optimised builtin functions must be treated as Python objects
+ return py_object_type
+ else:
+ # special case: referring to a C function must return its pointer
+ return PyrexTypes.CPtrType(self.entry.type)
+ else:
+ # If entry is inferred as pyobject it's safe to use local
+ # NameNode's inferred_type.
+ if self.entry.type.is_pyobject and self.inferred_type:
+ # Overflow may happen if integer
+ if not (self.inferred_type.is_int and self.entry.might_overflow):
+ return self.inferred_type
+ return self.entry.type
+
+ def compile_time_value(self, denv):
+ try:
+ return denv.lookup(self.name)
+ except KeyError:
+ error(self.pos, "Compile-time name '%s' not defined" % self.name)
+
+ def get_constant_c_result_code(self):
+ if not self.entry or self.entry.type.is_pyobject:
+ return None
+ return self.entry.cname
+
+ def coerce_to(self, dst_type, env):
+ # If coercing to a generic pyobject and this is a builtin
+ # C function with a Python equivalent, manufacture a NameNode
+ # referring to the Python builtin.
+ #print "NameNode.coerce_to:", self.name, dst_type ###
+ if dst_type is py_object_type:
+ entry = self.entry
+ if entry and entry.is_cfunction:
+ var_entry = entry.as_variable
+ if var_entry:
+ if var_entry.is_builtin and var_entry.is_const:
+ var_entry = env.declare_builtin(var_entry.name, self.pos)
+ node = NameNode(self.pos, name = self.name)
+ node.entry = var_entry
+ node.analyse_rvalue_entry(env)
+ return node
+
+ return super(NameNode, self).coerce_to(dst_type, env)
+
+ def declare_from_annotation(self, env, as_target=False):
+ """Implements PEP 526 annotation typing in a fairly relaxed way.
+
+ Annotations are ignored for global variables.
+ All other annotations are stored on the entry in the symbol table.
+ String literals are allowed and not evaluated.
+ The ambiguous Python types 'int' and 'long' are not evaluated - the 'cython.int' form must be used instead.
+ """
+ name = self.name
+ annotation = self.annotation
+ entry = self.entry or env.lookup_here(name)
+ if not entry:
+ # annotations never create global cdef names
+ if env.is_module_scope:
+ return
+
+ modifiers = ()
+ if (
+ # name: "description" => not a type, but still a declared variable or attribute
+ annotation.expr.is_string_literal
+ # don't do type analysis from annotations if not asked to, but still collect the annotation
+ or not env.directives['annotation_typing']
+ ):
+ atype = None
+ elif env.is_py_class_scope:
+ # For Python class scopes every attribute is a Python object
+ atype = py_object_type
+ else:
+ modifiers, atype = annotation.analyse_type_annotation(env)
+
+ if atype is None:
+ atype = unspecified_type if as_target and env.directives['infer_types'] != False else py_object_type
+ elif atype.is_fused and env.fused_to_specific:
+ try:
+ atype = atype.specialize(env.fused_to_specific)
+ except CannotSpecialize:
+ error(self.pos,
+ "'%s' cannot be specialized since its type is not a fused argument to this function" %
+ self.name)
+ atype = error_type
+
+ visibility = 'private'
+ if env.is_c_dataclass_scope:
+ # handle "frozen" directive - full inspection of the dataclass directives happens
+ # in Dataclass.py
+ is_frozen = env.is_c_dataclass_scope == "frozen"
+ if atype.is_pyobject or atype.can_coerce_to_pyobject(env):
+ visibility = 'readonly' if is_frozen else 'public'
+ # If the object can't be coerced that's fine - we just don't create a property
+
+ if as_target and env.is_c_class_scope and not (atype.is_pyobject or atype.is_error):
+ # TODO: this will need revising slightly if annotated cdef attributes are implemented
+ atype = py_object_type
+ warning(annotation.pos, "Annotation ignored since class-level attributes must be Python objects. "
+ "Were you trying to set up an instance attribute?", 2)
+
+ entry = self.entry = env.declare_var(
+ name, atype, self.pos, is_cdef=not as_target, visibility=visibility,
+ pytyping_modifiers=modifiers)
+
+ # Even if the entry already exists, make sure we're supplying an annotation if we can.
+ if annotation and not entry.annotation:
+ entry.annotation = annotation
+
+ def analyse_as_module(self, env):
+ # Try to interpret this as a reference to a cimported module.
+ # Returns the module scope, or None.
+ entry = self.entry
+ if not entry:
+ entry = env.lookup(self.name)
+ if entry and entry.as_module:
+ return entry.as_module
+ if entry and entry.known_standard_library_import:
+ scope = Builtin.get_known_standard_library_module_scope(entry.known_standard_library_import)
+ if scope and scope.is_module_scope:
+ return scope
+ return None
+
+ def analyse_as_type(self, env):
+ type = None
+ if self.cython_attribute:
+ type = PyrexTypes.parse_basic_type(self.cython_attribute)
+ elif env.in_c_type_context:
+ type = PyrexTypes.parse_basic_type(self.name)
+ if type:
+ return type
+
+ entry = self.entry
+ if not entry:
+ entry = env.lookup(self.name)
+ if entry and not entry.is_type and entry.known_standard_library_import:
+ entry = Builtin.get_known_standard_library_entry(entry.known_standard_library_import)
+ if entry and entry.is_type:
+ # Infer equivalent C types instead of Python types when possible.
+ type = entry.type
+ if not env.in_c_type_context and type is Builtin.long_type:
+ # Try to give a helpful warning when users write plain C type names.
+ warning(self.pos, "Found Python 2.x type 'long' in a Python annotation. Did you mean to use 'cython.long'?")
+ type = py_object_type
+ elif type.is_pyobject and type.equivalent_type:
+ type = type.equivalent_type
+ elif type is Builtin.int_type and env.global_scope().context.language_level == 2:
+ # While we still support Python 2 this must be a plain object
+ # so that it can be either int or long. With language_level=3(str),
+ # we pick up the type but accept both int and long in Py2.
+ type = py_object_type
+ return type
+ if self.name == 'object':
+ # This is normally parsed as "simple C type", but not if we don't parse C types.
+ return py_object_type
+
+ # Try to give a helpful warning when users write plain C type names.
+ if not env.in_c_type_context and PyrexTypes.parse_basic_type(self.name):
+ warning(self.pos, "Found C type '%s' in a Python annotation. Did you mean to use 'cython.%s'?" % (self.name, self.name))
+
+ return None
+
+ def analyse_as_extension_type(self, env):
+ # Try to interpret this as a reference to an extension type.
+ # Returns the extension type, or None.
+ entry = self.entry
+ if not entry:
+ entry = env.lookup(self.name)
+ if entry and entry.is_type:
+ if entry.type.is_extension_type or entry.type.is_builtin_type:
+ return entry.type
+ return None
+
+ def analyse_target_declaration(self, env):
+ return self._analyse_target_declaration(env, is_assignment_expression=False)
+
+ def analyse_assignment_expression_target_declaration(self, env):
+ return self._analyse_target_declaration(env, is_assignment_expression=True)
+
+ def _analyse_target_declaration(self, env, is_assignment_expression):
+ self.is_target = True
+ if not self.entry:
+ if is_assignment_expression:
+ self.entry = env.lookup_assignment_expression_target(self.name)
+ else:
+ self.entry = env.lookup_here(self.name)
+ if self.entry:
+ self.entry.known_standard_library_import = "" # already exists somewhere and so is now ambiguous
+ if not self.entry and self.annotation is not None:
+ # name : type = ...
+ is_dataclass = env.is_c_dataclass_scope
+ # In a dataclass, an assignment should not prevent a name from becoming an instance attribute.
+ # Hence, "as_target = not is_dataclass".
+ self.declare_from_annotation(env, as_target=not is_dataclass)
+ elif (self.entry and self.entry.is_inherited and
+ self.annotation and env.is_c_dataclass_scope):
+ error(self.pos, "Cannot redeclare inherited fields in Cython dataclasses")
+ if not self.entry:
+ if env.directives['warn.undeclared']:
+ warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
+ if env.directives['infer_types'] != False:
+ type = unspecified_type
+ else:
+ type = py_object_type
+ if is_assignment_expression:
+ self.entry = env.declare_assignment_expression_target(self.name, type, self.pos)
+ else:
+ self.entry = env.declare_var(self.name, type, self.pos)
+ if self.entry.is_declared_generic:
+ self.result_ctype = py_object_type
+ if self.entry.as_module:
+ # cimported modules namespace can shadow actual variables
+ self.entry.is_variable = 1
+
+ def analyse_types(self, env):
+ self.initialized_check = env.directives['initializedcheck']
+ entry = self.entry
+ if entry is None:
+ entry = env.lookup(self.name)
+ if not entry:
+ entry = env.declare_builtin(self.name, self.pos)
+ if entry and entry.is_builtin and entry.is_const:
+ self.is_literal = True
+ if not entry:
+ self.type = PyrexTypes.error_type
+ return self
+ self.entry = entry
+ entry.used = 1
+ if entry.type.is_buffer:
+ from . import Buffer
+ Buffer.used_buffer_aux_vars(entry)
+ self.analyse_rvalue_entry(env)
+ return self
+
+ def analyse_target_types(self, env):
+ self.analyse_entry(env, is_target=True)
+
+ entry = self.entry
+ if entry.is_cfunction and entry.as_variable:
+ # FIXME: unify "is_overridable" flags below
+ if (entry.is_overridable or entry.type.is_overridable) or not self.is_lvalue() and entry.fused_cfunction:
+ # We need this for assigning to cpdef names and for the fused 'def' TreeFragment
+ entry = self.entry = entry.as_variable
+ self.type = entry.type
+
+ if self.type.is_const:
+ error(self.pos, "Assignment to const '%s'" % self.name)
+ if not self.is_lvalue():
+ error(self.pos, "Assignment to non-lvalue '%s'" % self.name)
+ self.type = PyrexTypes.error_type
+ entry.used = 1
+ if entry.type.is_buffer:
+ from . import Buffer
+ Buffer.used_buffer_aux_vars(entry)
+ return self
+
+ def analyse_rvalue_entry(self, env):
+ #print "NameNode.analyse_rvalue_entry:", self.name ###
+ #print "Entry:", self.entry.__dict__ ###
+ self.analyse_entry(env)
+ entry = self.entry
+
+ if entry.is_declared_generic:
+ self.result_ctype = py_object_type
+
+ if entry.is_pyglobal or entry.is_builtin:
+ if entry.is_builtin and entry.is_const:
+ self.is_temp = 0
+ else:
+ self.is_temp = 1
+
+ self.is_used_as_rvalue = 1
+ elif entry.type.is_memoryviewslice:
+ self.is_temp = False
+ self.is_used_as_rvalue = True
+ self.use_managed_ref = True
+ return self
+
+ def nogil_check(self, env):
+ self.nogil = True
+ if self.is_used_as_rvalue:
+ entry = self.entry
+ if entry.is_builtin:
+ if not entry.is_const: # cached builtins are ok
+ self.gil_error()
+ elif entry.is_pyglobal:
+ self.gil_error()
+
+ gil_message = "Accessing Python global or builtin"
+
+ def analyse_entry(self, env, is_target=False):
+ #print "NameNode.analyse_entry:", self.name ###
+ self.check_identifier_kind()
+ entry = self.entry
+ type = entry.type
+ if (not is_target and type.is_pyobject and self.inferred_type and
+ self.inferred_type.is_builtin_type):
+ # assume that type inference is smarter than the static entry
+ type = self.inferred_type
+ self.type = type
+
+ def check_identifier_kind(self):
+ # Check that this is an appropriate kind of name for use in an
+ # expression. Also finds the variable entry associated with
+ # an extension type.
+ entry = self.entry
+ if entry.is_type and entry.type.is_extension_type:
+ self.type_entry = entry
+ if entry.is_type and (entry.type.is_enum or entry.type.is_cpp_enum):
+ py_entry = Symtab.Entry(self.name, None, py_object_type)
+ py_entry.is_pyglobal = True
+ py_entry.scope = self.entry.scope
+ self.entry = py_entry
+ elif not (entry.is_const or entry.is_variable or
+ entry.is_builtin or entry.is_cfunction or
+ entry.is_cpp_class):
+ if self.entry.as_variable:
+ self.entry = self.entry.as_variable
+ elif not self.is_cython_module:
+ error(self.pos, "'%s' is not a constant, variable or function identifier" % self.name)
+
+ def is_cimported_module_without_shadow(self, env):
+ if self.is_cython_module or self.cython_attribute:
+ return False
+ entry = self.entry or env.lookup(self.name)
+ return entry.as_module and not entry.is_variable
+
+ def is_simple(self):
+ # If it's not a C variable, it'll be in a temp.
+ return 1
+
+ def may_be_none(self):
+ if self.cf_state and self.type and (self.type.is_pyobject or
+ self.type.is_memoryviewslice):
+ # guard against infinite recursion on self-dependencies
+ if getattr(self, '_none_checking', False):
+ # self-dependency - either this node receives a None
+ # value from *another* node, or it can not reference
+ # None at this point => safe to assume "not None"
+ return False
+ self._none_checking = True
+ # evaluate control flow state to see if there were any
+ # potential None values assigned to the node so far
+ may_be_none = False
+ for assignment in self.cf_state:
+ if assignment.rhs.may_be_none():
+ may_be_none = True
+ break
+ del self._none_checking
+ return may_be_none
+ return super(NameNode, self).may_be_none()
+
+ def nonlocally_immutable(self):
+ if ExprNode.nonlocally_immutable(self):
+ return True
+ entry = self.entry
+ if not entry or entry.in_closure:
+ return False
+ return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly
+
+ def calculate_target_results(self, env):
+ pass
+
+ def check_const(self):
+ entry = self.entry
+ if entry is not None and not (
+ entry.is_const or
+ entry.is_cfunction or
+ entry.is_builtin or
+ entry.type.is_const):
+ self.not_const()
+ return False
+ return True
+
+ def check_const_addr(self):
+ entry = self.entry
+ if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
+ self.addr_not_const()
+ return False
+ return True
+
+ def is_lvalue(self):
+ return (
+ self.entry.is_variable and
+ not self.entry.is_readonly
+ ) or (
+ self.entry.is_cfunction and
+ self.entry.is_overridable
+ )
+
+ def is_addressable(self):
+ return self.entry.is_variable and not self.type.is_memoryviewslice
+
+ def is_ephemeral(self):
+ # Name nodes are never ephemeral, even if the
+ # result is in a temporary.
+ return 0
+
+ def calculate_result_code(self):
+ entry = self.entry
+ if not entry:
+ return "" # There was an error earlier
+ if self.entry.is_cpp_optional and not self.is_target:
+ return "(*%s)" % entry.cname
+ return entry.cname
+
+ def generate_result_code(self, code):
+ entry = self.entry
+ if entry is None:
+ return # There was an error earlier
+ if entry.utility_code:
+ code.globalstate.use_utility_code(entry.utility_code)
+ if entry.is_builtin and entry.is_const:
+ return # Lookup already cached
+ elif entry.is_pyclass_attr:
+ assert entry.type.is_pyobject, "Python global or builtin not a Python object"
+ interned_cname = code.intern_identifier(self.entry.name)
+ if entry.is_builtin:
+ namespace = Naming.builtins_cname
+ else: # entry.is_pyglobal
+ namespace = entry.scope.namespace_cname
+ if not self.cf_is_null:
+ code.putln(
+ '%s = PyObject_GetItem(%s, %s);' % (
+ self.result(),
+ namespace,
+ interned_cname))
+ code.putln('if (unlikely(!%s)) {' % self.result())
+ code.putln('PyErr_Clear();')
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
+ code.putln(
+ '__Pyx_GetModuleGlobalName(%s, %s);' % (
+ self.result(),
+ interned_cname))
+ if not self.cf_is_null:
+ code.putln("}")
+ code.putln(code.error_goto_if_null(self.result(), self.pos))
+ self.generate_gotref(code)
+
+ elif entry.is_builtin and not entry.scope.is_module_scope:
+ # known builtin
+ assert entry.type.is_pyobject, "Python global or builtin not a Python object"
+ interned_cname = code.intern_identifier(self.entry.name)
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
+ code.putln(
+ '%s = __Pyx_GetBuiltinName(%s); %s' % (
+ self.result(),
+ interned_cname,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+ elif entry.is_pyglobal or (entry.is_builtin and entry.scope.is_module_scope):
+ # name in class body, global name or unknown builtin
+ assert entry.type.is_pyobject, "Python global or builtin not a Python object"
+ interned_cname = code.intern_identifier(self.entry.name)
+ if entry.scope.is_module_scope:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
+ code.putln(
+ '__Pyx_GetModuleGlobalName(%s, %s); %s' % (
+ self.result(),
+ interned_cname,
+ code.error_goto_if_null(self.result(), self.pos)))
+ else:
+ # FIXME: is_pyglobal is also used for class namespace
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
+ code.putln(
+ '__Pyx_GetNameInClass(%s, %s, %s); %s' % (
+ self.result(),
+ entry.scope.namespace_cname,
+ interned_cname,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+ elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
+ # Raise UnboundLocalError for objects and memoryviewslices
+ raise_unbound = (
+ (self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
+
+ memslice_check = entry.type.is_memoryviewslice and self.initialized_check
+ optional_cpp_check = entry.is_cpp_optional and self.initialized_check
+
+ if optional_cpp_check:
+ unbound_check_code = entry.type.cpp_optional_check_for_null_code(entry.cname)
+ else:
+ unbound_check_code = entry.type.check_for_null_code(entry.cname)
+
+ if unbound_check_code and raise_unbound and (entry.type.is_pyobject or memslice_check or optional_cpp_check):
+ code.put_error_if_unbound(self.pos, entry, self.in_nogil_context, unbound_check_code=unbound_check_code)
+
+ elif entry.is_cglobal and entry.is_cpp_optional and self.initialized_check:
+ unbound_check_code = entry.type.cpp_optional_check_for_null_code(entry.cname)
+ code.put_error_if_unbound(self.pos, entry, unbound_check_code=unbound_check_code)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
+ exception_check=None, exception_value=None):
+ #print "NameNode.generate_assignment_code:", self.name ###
+ entry = self.entry
+ if entry is None:
+ return # There was an error earlier
+
+ if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
+ and not self.lhs_of_first_assignment and not rhs.in_module_scope):
+ error(self.pos, "Literal list must be assigned to pointer at time of declaration")
+
+ # is_pyglobal seems to be True for module level-globals only.
+ # We use this to access class->tp_dict if necessary.
+ if entry.is_pyglobal:
+ assert entry.type.is_pyobject, "Python global or builtin not a Python object"
+ interned_cname = code.intern_identifier(self.entry.name)
+ namespace = self.entry.scope.namespace_cname
+ if entry.is_member:
+ # if the entry is a member we have to cheat: SetAttr does not work
+ # on types, so we create a descriptor which is then added to tp_dict.
+ setter = '__Pyx_SetItemOnTypeDict'
+ elif entry.scope.is_module_scope:
+ setter = 'PyDict_SetItem'
+ namespace = Naming.moddict_cname
+ elif entry.is_pyclass_attr:
+ # Special-case setting __new__
+ n = "SetNewInClass" if self.name == "__new__" else "SetNameInClass"
+ code.globalstate.use_utility_code(UtilityCode.load_cached(n, "ObjectHandling.c"))
+ setter = '__Pyx_' + n
+ else:
+ assert False, repr(entry)
+ code.put_error_if_neg(
+ self.pos,
+ '%s(%s, %s, %s)' % (
+ setter,
+ namespace,
+ interned_cname,
+ rhs.py_result()))
+ if debug_disposal_code:
+ print("NameNode.generate_assignment_code:")
+ print("...generating disposal code for %s" % rhs)
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+ if entry.is_member:
+ # in Py2.6+, we need to invalidate the method cache
+ code.putln("PyType_Modified(%s);" %
+ entry.scope.parent_type.typeptr_cname)
+ else:
+ if self.type.is_memoryviewslice:
+ self.generate_acquire_memoryviewslice(rhs, code)
+
+ elif self.type.is_buffer:
+ # Generate code for doing the buffer release/acquisition.
+ # This might raise an exception in which case the assignment (done
+ # below) will not happen.
+ #
+ # The reason this is not in a typetest-like node is because the
+ # variables that the acquired buffer info is stored to is allocated
+ # per entry and coupled with it.
+ self.generate_acquire_buffer(rhs, code)
+ assigned = False
+ if self.type.is_pyobject:
+ #print "NameNode.generate_assignment_code: to", self.name ###
+ #print "...from", rhs ###
+ #print "...LHS type", self.type, "ctype", self.ctype() ###
+ #print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
+ if self.use_managed_ref:
+ rhs.make_owned_reference(code)
+ is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
+ if is_external_ref:
+ self.generate_gotref(code, handle_null=True)
+ assigned = True
+ if entry.is_cglobal:
+ self.generate_decref_set(code, rhs.result_as(self.ctype()))
+ else:
+ if not self.cf_is_null:
+ if self.cf_maybe_null:
+ self.generate_xdecref_set(code, rhs.result_as(self.ctype()))
+ else:
+ self.generate_decref_set(code, rhs.result_as(self.ctype()))
+ else:
+ assigned = False
+ if is_external_ref:
+ rhs.generate_giveref(code)
+ if not self.type.is_memoryviewslice:
+ if not assigned:
+ if overloaded_assignment:
+ result = rhs.move_result_rhs()
+ if exception_check == '+':
+ translate_cpp_exception(
+ code, self.pos,
+ '%s = %s;' % (self.result(), result),
+ self.result() if self.type.is_pyobject else None,
+ exception_value, self.in_nogil_context)
+ else:
+ code.putln('%s = %s;' % (self.result(), result))
+ else:
+ result = rhs.move_result_rhs_as(self.ctype())
+
+ if is_pythran_expr(self.type):
+ code.putln('new (&%s) decltype(%s){%s};' % (self.result(), self.result(), result))
+ elif result != self.result():
+ code.putln('%s = %s;' % (self.result(), result))
+ if debug_disposal_code:
+ print("NameNode.generate_assignment_code:")
+ print("...generating post-assignment code for %s" % rhs)
+ rhs.generate_post_assignment_code(code)
+ elif rhs.result_in_temp():
+ rhs.generate_post_assignment_code(code)
+
+ rhs.free_temps(code)
+
+ def generate_acquire_memoryviewslice(self, rhs, code):
+ """
+ Slices, coercions from objects, return values etc are new references.
+ We have a borrowed reference in case of dst = src
+ """
+ from . import MemoryView
+
+ MemoryView.put_acquire_memoryviewslice(
+ lhs_cname=self.result(),
+ lhs_type=self.type,
+ lhs_pos=self.pos,
+ rhs=rhs,
+ code=code,
+ have_gil=not self.in_nogil_context,
+ first_assignment=self.cf_is_null)
+
+ def generate_acquire_buffer(self, rhs, code):
+ # rhstmp is only used in case the rhs is a complicated expression leading to
+ # the object, to avoid repeating the same C expression for every reference
+ # to the rhs. It does NOT hold a reference.
+ pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
+ if pretty_rhs:
+ rhstmp = rhs.result_as(self.ctype())
+ else:
+ rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
+ code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
+
+ from . import Buffer
+ Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
+ is_initialized=not self.lhs_of_first_assignment,
+ pos=self.pos, code=code)
+
+ if not pretty_rhs:
+ code.putln("%s = 0;" % rhstmp)
+ code.funcstate.release_temp(rhstmp)
+
+ def generate_deletion_code(self, code, ignore_nonexisting=False):
+ if self.entry is None:
+ return # There was an error earlier
+ elif self.entry.is_pyclass_attr:
+ namespace = self.entry.scope.namespace_cname
+ interned_cname = code.intern_identifier(self.entry.name)
+ if ignore_nonexisting:
+ key_error_code = 'PyErr_Clear(); else'
+ else:
+ # minor hack: fake a NameError on KeyError
+ key_error_code = (
+ '{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' %
+ self.entry.name)
+ code.putln(
+ 'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {'
+ ' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s'
+ ' %s '
+ '}' % (namespace, interned_cname,
+ key_error_code,
+ code.error_goto(self.pos)))
+ elif self.entry.is_pyglobal:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
+ interned_cname = code.intern_identifier(self.entry.name)
+ del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
+ Naming.module_cname, interned_cname)
+ if ignore_nonexisting:
+ code.putln(
+ 'if (unlikely(%s < 0)) {'
+ ' if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s '
+ '}' % (del_code, code.error_goto(self.pos)))
+ else:
+ code.put_error_if_neg(self.pos, del_code)
+ elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice:
+ if not self.cf_is_null:
+ if self.cf_maybe_null and not ignore_nonexisting:
+ code.put_error_if_unbound(self.pos, self.entry)
+
+ if self.entry.in_closure:
+ # generator
+ self.generate_gotref(code, handle_null=True, maybe_null_extra_check=ignore_nonexisting)
+ if ignore_nonexisting and self.cf_maybe_null:
+ code.put_xdecref_clear(self.result(), self.ctype(),
+ have_gil=not self.nogil)
+ else:
+ code.put_decref_clear(self.result(), self.ctype(),
+ have_gil=not self.nogil)
+ else:
+ error(self.pos, "Deletion of C names not supported")
+
+ def annotate(self, code):
+ if getattr(self, 'is_called', False):
+ pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
+ if self.type.is_pyobject:
+ style, text = 'py_call', 'python function (%s)'
+ else:
+ style, text = 'c_call', 'c function (%s)'
+ code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name)))
+
+ def get_known_standard_library_import(self):
+ if self.entry:
+ return self.entry.known_standard_library_import
+ return None
+
+class BackquoteNode(ExprNode):
+ # `expr`
+ #
+ # arg ExprNode
+
+ type = py_object_type
+
+ subexprs = ['arg']
+
+ def analyse_types(self, env):
+ self.arg = self.arg.analyse_types(env)
+ self.arg = self.arg.coerce_to_pyobject(env)
+ self.is_temp = 1
+ return self
+
+ gil_message = "Backquote expression"
+
+ def calculate_constant_result(self):
+ self.constant_result = repr(self.arg.constant_result)
+
+ def generate_result_code(self, code):
+ code.putln(
+ "%s = PyObject_Repr(%s); %s" % (
+ self.result(),
+ self.arg.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class ImportNode(ExprNode):
+ # Used as part of import statement implementation.
+ # Implements result =
+ # __import__(module_name, globals(), None, name_list, level)
+ #
+ # module_name StringNode dotted name of module. Empty module
+ # name means importing the parent package according
+ # to level
+ # name_list ListNode or None list of names to be imported
+ # level int relative import level:
+ # -1: attempt both relative import and absolute import;
+ # 0: absolute import;
+ # >0: the number of parent directories to search
+ # relative to the current module.
+ # None: decide the level according to language level and
+ # directives
+ # get_top_level_module int true: return top-level module, false: return imported module
+ # module_names TupleNode the separate names of the module and submodules, or None
+
+ type = py_object_type
+ module_names = None
+ get_top_level_module = False
+ is_temp = True
+
+ subexprs = ['module_name', 'name_list', 'module_names']
+
+ def analyse_types(self, env):
+ if self.level is None:
+ # For modules in packages, and without 'absolute_import' enabled, try relative (Py2) import first.
+ if env.global_scope().parent_module and (
+ env.directives['py2_import'] or
+ Future.absolute_import not in env.global_scope().context.future_directives):
+ self.level = -1
+ else:
+ self.level = 0
+ module_name = self.module_name.analyse_types(env)
+ self.module_name = module_name.coerce_to_pyobject(env)
+ assert self.module_name.is_string_literal
+ if self.name_list:
+ name_list = self.name_list.analyse_types(env)
+ self.name_list = name_list.coerce_to_pyobject(env)
+ elif '.' in self.module_name.value:
+ self.module_names = TupleNode(self.module_name.pos, args=[
+ IdentifierStringNode(self.module_name.pos, value=part, constant_result=part)
+ for part in map(StringEncoding.EncodedString, self.module_name.value.split('.'))
+ ]).analyse_types(env)
+ return self
+
+ gil_message = "Python import"
+
+ def generate_result_code(self, code):
+ assert self.module_name.is_string_literal
+ module_name = self.module_name.value
+
+ if self.level <= 0 and not self.name_list and not self.get_top_level_module:
+ if self.module_names:
+ assert self.module_names.is_literal # make sure we create the tuple only once
+ if self.level == 0:
+ utility_code = UtilityCode.load_cached("ImportDottedModule", "ImportExport.c")
+ helper_func = "__Pyx_ImportDottedModule"
+ else:
+ utility_code = UtilityCode.load_cached("ImportDottedModuleRelFirst", "ImportExport.c")
+ helper_func = "__Pyx_ImportDottedModuleRelFirst"
+ code.globalstate.use_utility_code(utility_code)
+ import_code = "%s(%s, %s)" % (
+ helper_func,
+ self.module_name.py_result(),
+ self.module_names.py_result() if self.module_names else 'NULL',
+ )
+ else:
+ code.globalstate.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
+ import_code = "__Pyx_Import(%s, %s, %d)" % (
+ self.module_name.py_result(),
+ self.name_list.py_result() if self.name_list else '0',
+ self.level)
+
+ if self.level <= 0 and module_name in utility_code_for_imports:
+ helper_func, code_name, code_file = utility_code_for_imports[module_name]
+ code.globalstate.use_utility_code(UtilityCode.load_cached(code_name, code_file))
+ import_code = '%s(%s)' % (helper_func, import_code)
+
+ code.putln("%s = %s; %s" % (
+ self.result(),
+ import_code,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+ def get_known_standard_library_import(self):
+ return self.module_name.value
+
+
+class ScopedExprNode(ExprNode):
+ # Abstract base class for ExprNodes that have their own local
+ # scope, such as generator expressions.
+ #
+ # expr_scope Scope the inner scope of the expression
+
+ subexprs = []
+ expr_scope = None
+
+ # does this node really have a local scope, e.g. does it leak loop
+ # variables or not? non-leaking Py3 behaviour is default, except
+ # for list comprehensions where the behaviour differs in Py2 and
+ # Py3 (set in Parsing.py based on parser context)
+ has_local_scope = True
+
+ def init_scope(self, outer_scope, expr_scope=None):
+ if expr_scope is not None:
+ self.expr_scope = expr_scope
+ elif self.has_local_scope:
+ self.expr_scope = Symtab.ComprehensionScope(outer_scope)
+ elif not self.expr_scope: # don't unset if it's already been set
+ self.expr_scope = None
+
+ def analyse_declarations(self, env):
+ self.init_scope(env)
+
+ def analyse_scoped_declarations(self, env):
+ # this is called with the expr_scope as env
+ pass
+
+ def analyse_types(self, env):
+ # no recursion here, the children will be analysed separately below
+ return self
+
+ def analyse_scoped_expressions(self, env):
+ # this is called with the expr_scope as env
+ return self
+
+ def generate_evaluation_code(self, code):
+ # set up local variables and free their references on exit
+ generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
+ if not self.has_local_scope or not self.expr_scope.var_entries:
+ # no local variables => delegate, done
+ generate_inner_evaluation_code(code)
+ return
+
+ code.putln('{ /* enter inner scope */')
+ py_entries = []
+ for _, entry in sorted(item for item in self.expr_scope.entries.items() if item[0]):
+ if not entry.in_closure:
+ if entry.type.is_pyobject and entry.used:
+ py_entries.append(entry)
+ if not py_entries:
+ # no local Python references => no cleanup required
+ generate_inner_evaluation_code(code)
+ code.putln('} /* exit inner scope */')
+ return
+
+ # must free all local Python references at each exit point
+ old_loop_labels = code.new_loop_labels()
+ old_error_label = code.new_error_label()
+
+ generate_inner_evaluation_code(code)
+
+ # normal (non-error) exit
+ self._generate_vars_cleanup(code, py_entries)
+
+ # error/loop body exit points
+ exit_scope = code.new_label('exit_scope')
+ code.put_goto(exit_scope)
+ for label, old_label in ([(code.error_label, old_error_label)] +
+ list(zip(code.get_loop_labels(), old_loop_labels))):
+ if code.label_used(label):
+ code.put_label(label)
+ self._generate_vars_cleanup(code, py_entries)
+ code.put_goto(old_label)
+ code.put_label(exit_scope)
+ code.putln('} /* exit inner scope */')
+
+ code.set_loop_labels(old_loop_labels)
+ code.error_label = old_error_label
+
+ def _generate_vars_cleanup(self, code, py_entries):
+ for entry in py_entries:
+ if entry.is_cglobal:
+ code.put_var_gotref(entry)
+ code.put_var_decref_set(entry, "Py_None")
+ else:
+ code.put_var_xdecref_clear(entry)
+
+
+class IteratorNode(ScopedExprNode):
+ # Used as part of for statement implementation.
+ #
+ # Implements result = iter(sequence)
+ #
+ # sequence ExprNode
+
+ type = py_object_type
+ iter_func_ptr = None
+ counter_cname = None
+ reversed = False # currently only used for list/tuple types (see Optimize.py)
+ is_async = False
+ has_local_scope = False
+
+ subexprs = ['sequence']
+
+ def analyse_types(self, env):
+ if self.expr_scope:
+ env = self.expr_scope # actually evaluate sequence in this scope instead
+ self.sequence = self.sequence.analyse_types(env)
+ if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
+ not self.sequence.type.is_string:
+ # C array iteration will be transformed later on
+ self.type = self.sequence.type
+ elif self.sequence.type.is_cpp_class:
+ return CppIteratorNode(self.pos, sequence=self.sequence).analyse_types(env)
+ elif self.is_reversed_cpp_iteration():
+ sequence = self.sequence.arg_tuple.args[0].arg
+ return CppIteratorNode(self.pos, sequence=sequence, reversed=True).analyse_types(env)
+ else:
+ self.sequence = self.sequence.coerce_to_pyobject(env)
+ if self.sequence.type in (list_type, tuple_type):
+ self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
+ self.is_temp = 1
+ return self
+
+ gil_message = "Iterating over Python object"
+
+ _func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
+ ]))
+
+ def is_reversed_cpp_iteration(self):
+ """
+ Returns True if the 'reversed' function is applied to a C++ iterable.
+
+ This supports C++ classes with reverse_iterator implemented.
+ """
+ if not (isinstance(self.sequence, SimpleCallNode) and
+ self.sequence.arg_tuple and len(self.sequence.arg_tuple.args) == 1):
+ return False
+ func = self.sequence.function
+ if func.is_name and func.name == "reversed":
+ if not func.entry.is_builtin:
+ return False
+ arg = self.sequence.arg_tuple.args[0]
+ if isinstance(arg, CoercionNode) and arg.arg.is_name:
+ arg = arg.arg.entry
+ return arg.type.is_cpp_class
+ return False
+
+ def type_dependencies(self, env):
+ return self.sequence.type_dependencies(self.expr_scope or env)
+
+ def infer_type(self, env):
+ sequence_type = self.sequence.infer_type(env)
+ if sequence_type.is_array or sequence_type.is_ptr:
+ return sequence_type
+ elif sequence_type.is_cpp_class:
+ begin = sequence_type.scope.lookup("begin")
+ if begin is not None:
+ return begin.type.return_type
+ elif sequence_type.is_pyobject:
+ return sequence_type
+ return py_object_type
+
+ def generate_result_code(self, code):
+ sequence_type = self.sequence.type
+ if sequence_type.is_cpp_class:
+ assert False, "Should have been changed to CppIteratorNode"
+ if sequence_type.is_array or sequence_type.is_ptr:
+ raise InternalError("for in carray slice not transformed")
+
+ is_builtin_sequence = sequence_type in (list_type, tuple_type)
+ if not is_builtin_sequence:
+ # reversed() not currently optimised (see Optimize.py)
+ assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
+ self.may_be_a_sequence = not sequence_type.is_builtin_type
+ if self.may_be_a_sequence:
+ code.putln(
+ "if (likely(PyList_CheckExact(%s)) || PyTuple_CheckExact(%s)) {" % (
+ self.sequence.py_result(),
+ self.sequence.py_result()))
+
+ if is_builtin_sequence or self.may_be_a_sequence:
+ code.putln("%s = %s; __Pyx_INCREF(%s);" % (
+ self.result(),
+ self.sequence.py_result(),
+ self.result(),
+ ))
+ self.counter_cname = code.funcstate.allocate_temp(
+ PyrexTypes.c_py_ssize_t_type, manage_ref=False)
+ if self.reversed:
+ if sequence_type is list_type:
+ len_func = '__Pyx_PyList_GET_SIZE'
+ else:
+ len_func = '__Pyx_PyTuple_GET_SIZE'
+ code.putln("%s = %s(%s);" % (self.counter_cname, len_func, self.result()))
+ code.putln("#if !CYTHON_ASSUME_SAFE_MACROS")
+ code.putln(code.error_goto_if_neg(self.counter_cname, self.pos))
+ code.putln("#endif")
+ code.putln("--%s;" % self.counter_cname) # len -> last item
+ else:
+ code.putln("%s = 0;" % self.counter_cname)
+
+ if not is_builtin_sequence:
+ self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
+ if self.may_be_a_sequence:
+ code.putln("%s = NULL;" % self.iter_func_ptr)
+ code.putln("} else {")
+ code.put("%s = -1; " % self.counter_cname)
+
+ code.putln("%s = PyObject_GetIter(%s); %s" % (
+ self.result(),
+ self.sequence.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+ # PyObject_GetIter() fails if "tp_iternext" is not set, but the check below
+ # makes it visible to the C compiler that the pointer really isn't NULL, so that
+ # it can distinguish between the special cases and the generic case
+ code.putln("%s = __Pyx_PyObject_GetIterNextFunc(%s); %s" % (
+ self.iter_func_ptr, self.py_result(),
+ code.error_goto_if_null(self.iter_func_ptr, self.pos)))
+ if self.may_be_a_sequence:
+ code.putln("}")
+
+ def generate_next_sequence_item(self, test_name, result_name, code):
+ assert self.counter_cname, "internal error: counter_cname temp not prepared"
+ assert test_name in ('List', 'Tuple')
+
+ final_size = '__Pyx_Py%s_GET_SIZE(%s)' % (test_name, self.py_result())
+ size_is_safe = False
+ if self.sequence.is_sequence_constructor:
+ item_count = len(self.sequence.args)
+ if self.sequence.mult_factor is None:
+ final_size = item_count
+ size_is_safe = True
+ elif isinstance(self.sequence.mult_factor.constant_result, _py_int_types):
+ final_size = item_count * self.sequence.mult_factor.constant_result
+ size_is_safe = True
+
+ if size_is_safe:
+ code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size))
+ else:
+ code.putln("{")
+ code.putln("Py_ssize_t %s = %s;" % (Naming.quick_temp_cname, final_size))
+ code.putln("#if !CYTHON_ASSUME_SAFE_MACROS")
+ code.putln(code.error_goto_if_neg(Naming.quick_temp_cname, self.pos))
+ code.putln("#endif")
+ code.putln("if (%s >= %s) break;" % (self.counter_cname, Naming.quick_temp_cname))
+ code.putln("}")
+
+ if self.reversed:
+ inc_dec = '--'
+ else:
+ inc_dec = '++'
+ code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS")
+ code.putln(
+ "%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % (
+ result_name,
+ test_name,
+ self.py_result(),
+ self.counter_cname,
+ result_name,
+ self.counter_cname,
+ inc_dec,
+ # use the error label to avoid C compiler warnings if we only use it below
+ code.error_goto_if_neg('0', self.pos)
+ ))
+ code.putln("#else")
+ code.putln(
+ "%s = __Pyx_PySequence_ITEM(%s, %s); %s%s; %s" % (
+ result_name,
+ self.py_result(),
+ self.counter_cname,
+ self.counter_cname,
+ inc_dec,
+ code.error_goto_if_null(result_name, self.pos)))
+ code.put_gotref(result_name, py_object_type)
+ code.putln("#endif")
+
+ def generate_iter_next_result_code(self, result_name, code):
+ sequence_type = self.sequence.type
+ if self.reversed:
+ code.putln("if (%s < 0) break;" % self.counter_cname)
+ if sequence_type is list_type:
+ self.generate_next_sequence_item('List', result_name, code)
+ return
+ elif sequence_type is tuple_type:
+ self.generate_next_sequence_item('Tuple', result_name, code)
+ return
+
+ if self.may_be_a_sequence:
+ code.putln("if (likely(!%s)) {" % self.iter_func_ptr)
+ code.putln("if (likely(PyList_CheckExact(%s))) {" % self.py_result())
+ self.generate_next_sequence_item('List', result_name, code)
+ code.putln("} else {")
+ self.generate_next_sequence_item('Tuple', result_name, code)
+ code.putln("}")
+ code.put("} else ")
+
+ code.putln("{")
+ code.putln(
+ "%s = %s(%s);" % (
+ result_name,
+ self.iter_func_ptr,
+ self.py_result()))
+ code.putln("if (unlikely(!%s)) {" % result_name)
+ code.putln("PyObject* exc_type = PyErr_Occurred();")
+ code.putln("if (exc_type) {")
+ code.putln("if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
+ code.putln("else %s" % code.error_goto(self.pos))
+ code.putln("}")
+ code.putln("break;")
+ code.putln("}")
+ code.put_gotref(result_name, py_object_type)
+ code.putln("}")
+
+ def free_temps(self, code):
+ if self.counter_cname:
+ code.funcstate.release_temp(self.counter_cname)
+ if self.iter_func_ptr:
+ code.funcstate.release_temp(self.iter_func_ptr)
+ self.iter_func_ptr = None
+ ExprNode.free_temps(self, code)
+
+
+class CppIteratorNode(ExprNode):
+ # Iteration over a C++ container.
+ # Created at the analyse_types stage by IteratorNode
+ cpp_sequence_cname = None
+ cpp_attribute_op = "."
+ extra_dereference = ""
+ is_temp = True
+ reversed = False
+
+ subexprs = ['sequence']
+
+ def get_iterator_func_names(self):
+ return ("begin", "end") if not self.reversed else ("rbegin", "rend")
+
+ def analyse_types(self, env):
+ sequence_type = self.sequence.type
+ if sequence_type.is_ptr:
+ sequence_type = sequence_type.base_type
+ begin_name, end_name = self.get_iterator_func_names()
+ begin = sequence_type.scope.lookup(begin_name)
+ end = sequence_type.scope.lookup(end_name)
+ if (begin is None
+ or not begin.type.is_cfunction
+ or begin.type.args):
+ error(self.pos, "missing %s() on %s" % (begin_name, self.sequence.type))
+ self.type = error_type
+ return self
+ if (end is None
+ or not end.type.is_cfunction
+ or end.type.args):
+ error(self.pos, "missing %s() on %s" % (end_name, self.sequence.type))
+ self.type = error_type
+ return self
+ iter_type = begin.type.return_type
+ if iter_type.is_cpp_class:
+ if env.directives['cpp_locals']:
+ self.extra_dereference = "*"
+ if env.lookup_operator_for_types(
+ self.pos,
+ "!=",
+ [iter_type, end.type.return_type]) is None:
+ error(self.pos, "missing operator!= on result of %s() on %s" % (begin_name, self.sequence.type))
+ self.type = error_type
+ return self
+ if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
+ error(self.pos, "missing operator++ on result of %s() on %s" % (begin_name, self.sequence.type))
+ self.type = error_type
+ return self
+ if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
+ error(self.pos, "missing operator* on result of %s() on %s" % (begin_name, self.sequence.type))
+ self.type = error_type
+ return self
+ self.type = iter_type
+ elif iter_type.is_ptr:
+ if not (iter_type == end.type.return_type):
+ error(self.pos, "incompatible types for %s() and %s()" % (begin_name, end_name))
+ self.type = iter_type
+ else:
+ error(self.pos, "result type of %s() on %s must be a C++ class or pointer" % (begin_name, self.sequence.type))
+ self.type = error_type
+ return self
+
+ def generate_result_code(self, code):
+ sequence_type = self.sequence.type
+ begin_name, _ = self.get_iterator_func_names()
+ # essentially 3 options:
+ if self.sequence.is_simple():
+ # 1) Sequence can be accessed directly, like a name;
+ # assigning to it may break the container, but that's the responsibility
+ # of the user
+ code.putln("%s = %s%s%s();" % (
+ self.result(),
+ self.sequence.result(),
+ self.cpp_attribute_op,
+ begin_name))
+ else:
+ # (while it'd be nice to limit the scope of the loop temp, it's essentially
+ # impossible to do while supporting generators)
+ temp_type = sequence_type
+ if temp_type.is_reference:
+ # 2) Sequence is a reference (often obtained by dereferencing a pointer);
+ # make the temp a pointer so we are not sensitive to users reassigning
+ # the pointer than it came from
+ temp_type = PyrexTypes.CPtrType(sequence_type.ref_base_type)
+ if temp_type.is_ptr or code.globalstate.directives['cpp_locals']:
+ self.cpp_attribute_op = "->"
+ # 3) (otherwise) sequence comes from a function call or similar, so we must
+ # create a temp to store it in
+ self.cpp_sequence_cname = code.funcstate.allocate_temp(temp_type, manage_ref=False)
+ code.putln("%s = %s%s;" % (self.cpp_sequence_cname,
+ "&" if temp_type.is_ptr else "",
+ self.sequence.move_result_rhs()))
+ code.putln("%s = %s%s%s();" % (
+ self.result(),
+ self.cpp_sequence_cname,
+ self.cpp_attribute_op,
+ begin_name))
+
+ def generate_iter_next_result_code(self, result_name, code):
+ # end call isn't cached to support containers that allow adding while iterating
+ # (much as this is usually a bad idea)
+ _, end_name = self.get_iterator_func_names()
+ code.putln("if (!(%s%s != %s%s%s())) break;" % (
+ self.extra_dereference,
+ self.result(),
+ self.cpp_sequence_cname or self.sequence.result(),
+ self.cpp_attribute_op,
+ end_name))
+ code.putln("%s = *%s%s;" % (
+ result_name,
+ self.extra_dereference,
+ self.result()))
+ code.putln("++%s%s;" % (self.extra_dereference, self.result()))
+
+ def generate_subexpr_disposal_code(self, code):
+ if not self.cpp_sequence_cname:
+ # the sequence is accessed directly so any temporary result in its
+ # subexpressions must remain available until the iterator is not needed
+ return
+ ExprNode.generate_subexpr_disposal_code(self, code)
+
+ def free_subexpr_temps(self, code):
+ if not self.cpp_sequence_cname:
+ # the sequence is accessed directly so any temporary result in its
+ # subexpressions must remain available until the iterator is not needed
+ return
+ ExprNode.free_subexpr_temps(self, code)
+
+ def generate_disposal_code(self, code):
+ if not self.cpp_sequence_cname:
+ # postponed from CppIteratorNode.generate_subexpr_disposal_code
+ # and CppIteratorNode.free_subexpr_temps
+ ExprNode.generate_subexpr_disposal_code(self, code)
+ ExprNode.free_subexpr_temps(self, code)
+ ExprNode.generate_disposal_code(self, code)
+
+ def free_temps(self, code):
+ if self.cpp_sequence_cname:
+ code.funcstate.release_temp(self.cpp_sequence_cname)
+ # skip over IteratorNode since we don't use any of the temps it does
+ ExprNode.free_temps(self, code)
+
+
+class NextNode(AtomicExprNode):
+ # Used as part of for statement implementation.
+ # Implements result = next(iterator)
+ # Created during analyse_types phase.
+ # The iterator is not owned by this node.
+ #
+ # iterator IteratorNode
+
+ def __init__(self, iterator):
+ AtomicExprNode.__init__(self, iterator.pos)
+ self.iterator = iterator
+
+ def nogil_check(self, env):
+ # ignore - errors (if any) are already handled by IteratorNode
+ pass
+
+ def type_dependencies(self, env):
+ return self.iterator.type_dependencies(env)
+
+ def infer_type(self, env, iterator_type=None):
+ if iterator_type is None:
+ iterator_type = self.iterator.infer_type(env)
+ if iterator_type.is_ptr or iterator_type.is_array:
+ return iterator_type.base_type
+ elif iterator_type.is_cpp_class:
+ item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
+ item_type = PyrexTypes.remove_cv_ref(item_type, remove_fakeref=True)
+ return item_type
+ else:
+ # Avoid duplication of complicated logic.
+ fake_index_node = IndexNode(
+ self.pos,
+ base=self.iterator.sequence,
+ index=IntNode(self.pos, value='PY_SSIZE_T_MAX',
+ type=PyrexTypes.c_py_ssize_t_type))
+ return fake_index_node.infer_type(env)
+
+ def analyse_types(self, env):
+ self.type = self.infer_type(env, self.iterator.type)
+ self.is_temp = 1
+ return self
+
+ def generate_result_code(self, code):
+ self.iterator.generate_iter_next_result_code(self.result(), code)
+
+
+class AsyncIteratorNode(ScopedExprNode):
+ # Used as part of 'async for' statement implementation.
+ #
+ # Implements result = sequence.__aiter__()
+ #
+ # sequence ExprNode
+
+ subexprs = ['sequence']
+
+ is_async = True
+ type = py_object_type
+ is_temp = 1
+ has_local_scope = False
+
+ def infer_type(self, env):
+ return py_object_type
+
+ def analyse_types(self, env):
+ if self.expr_scope:
+ env = self.expr_scope
+ self.sequence = self.sequence.analyse_types(env)
+ if not self.sequence.type.is_pyobject:
+ error(self.pos, "async for loops not allowed on C/C++ types")
+ self.sequence = self.sequence.coerce_to_pyobject(env)
+ return self
+
+ def generate_result_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
+ code.putln("%s = __Pyx_Coroutine_GetAsyncIter(%s); %s" % (
+ self.result(),
+ self.sequence.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class AsyncNextNode(AtomicExprNode):
+ # Used as part of 'async for' statement implementation.
+ # Implements result = iterator.__anext__()
+ # Created during analyse_types phase.
+ # The iterator is not owned by this node.
+ #
+ # iterator IteratorNode
+
+ type = py_object_type
+ is_temp = 1
+
+ def __init__(self, iterator):
+ AtomicExprNode.__init__(self, iterator.pos)
+ self.iterator = iterator
+
+ def infer_type(self, env):
+ return py_object_type
+
+ def analyse_types(self, env):
+ return self
+
+ def generate_result_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
+ code.putln("%s = __Pyx_Coroutine_AsyncIterNext(%s); %s" % (
+ self.result(),
+ self.iterator.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class WithExitCallNode(ExprNode):
+ # The __exit__() call of a 'with' statement. Used in both the
+ # except and finally clauses.
+
+ # with_stat WithStatNode the surrounding 'with' statement
+ # args TupleNode or ResultStatNode the exception info tuple
+ # await_expr AwaitExprNode the await expression of an 'async with' statement
+
+ subexprs = ['args', 'await_expr']
+ test_if_run = True
+ await_expr = None
+
+ def analyse_types(self, env):
+ self.args = self.args.analyse_types(env)
+ if self.await_expr:
+ self.await_expr = self.await_expr.analyse_types(env)
+ self.type = PyrexTypes.c_bint_type
+ self.is_temp = True
+ return self
+
+ def generate_evaluation_code(self, code):
+ if self.test_if_run:
+ # call only if it was not already called (and decref-cleared)
+ code.putln("if (%s) {" % self.with_stat.exit_var)
+
+ self.args.generate_evaluation_code(code)
+ result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
+
+ code.mark_pos(self.pos)
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCall", "ObjectHandling.c"))
+ code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % (
+ result_var,
+ self.with_stat.exit_var,
+ self.args.result()))
+ code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
+ self.args.generate_disposal_code(code)
+ self.args.free_temps(code)
+
+ code.putln(code.error_goto_if_null(result_var, self.pos))
+ code.put_gotref(result_var, py_object_type)
+
+ if self.await_expr:
+ # FIXME: result_var temp currently leaks into the closure
+ self.await_expr.generate_evaluation_code(code, source_cname=result_var, decref_source=True)
+ code.putln("%s = %s;" % (result_var, self.await_expr.py_result()))
+ self.await_expr.generate_post_assignment_code(code)
+ self.await_expr.free_temps(code)
+
+ if self.result_is_used:
+ self.allocate_temp_result(code)
+ code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
+ code.put_decref_clear(result_var, type=py_object_type)
+ if self.result_is_used:
+ code.put_error_if_neg(self.pos, self.result())
+ code.funcstate.release_temp(result_var)
+ if self.test_if_run:
+ code.putln("}")
+
+
+class ExcValueNode(AtomicExprNode):
+ # Node created during analyse_types phase
+ # of an ExceptClauseNode to fetch the current
+ # exception value.
+
+ type = py_object_type
+
+ def __init__(self, pos):
+ ExprNode.__init__(self, pos)
+
+ def set_var(self, var):
+ self.var = var
+
+ def calculate_result_code(self):
+ return self.var
+
+ def generate_result_code(self, code):
+ pass
+
+ def analyse_types(self, env):
+ return self
+
+
+class TempNode(ExprNode):
+ # Node created during analyse_types phase
+ # of some nodes to hold a temporary value.
+ #
+ # Note: One must call "allocate" and "release" on
+ # the node during code generation to get/release the temp.
+ # This is because the temp result is often used outside of
+ # the regular cycle.
+
+ subexprs = []
+
+ def __init__(self, pos, type, env=None):
+ ExprNode.__init__(self, pos)
+ self.type = type
+ if type.is_pyobject:
+ self.result_ctype = py_object_type
+ self.is_temp = 1
+
+ def analyse_types(self, env):
+ return self
+
+ def analyse_target_declaration(self, env):
+ self.is_target = True
+
+ def generate_result_code(self, code):
+ pass
+
+ def allocate(self, code):
+ self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
+
+ def release(self, code):
+ code.funcstate.release_temp(self.temp_cname)
+ self.temp_cname = None
+
+ def result(self):
+ try:
+ return self.temp_cname
+ except:
+ assert False, "Remember to call allocate/release on TempNode"
+ raise
+
+ # Do not participate in normal temp alloc/dealloc:
+ def allocate_temp_result(self, code):
+ pass
+
+ def release_temp_result(self, code):
+ pass
+
+class PyTempNode(TempNode):
+ # TempNode holding a Python value.
+
+ def __init__(self, pos, env):
+ TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
+
+class RawCNameExprNode(ExprNode):
+ subexprs = []
+
+ def __init__(self, pos, type=None, cname=None):
+ ExprNode.__init__(self, pos, type=type)
+ if cname is not None:
+ self.cname = cname
+
+ def analyse_types(self, env):
+ return self
+
+ def set_cname(self, cname):
+ self.cname = cname
+
+ def result(self):
+ return self.cname
+
+ def generate_result_code(self, code):
+ pass
+
+
+#-------------------------------------------------------------------
+#
+# F-strings
+#
+#-------------------------------------------------------------------
+
+
+class JoinedStrNode(ExprNode):
+ # F-strings
+ #
+ # values [UnicodeNode|FormattedValueNode] Substrings of the f-string
+ #
+ type = unicode_type
+ is_temp = True
+ gil_message = "String concatenation"
+
+ subexprs = ['values']
+
+ def analyse_types(self, env):
+ self.values = [v.analyse_types(env).coerce_to_pyobject(env) for v in self.values]
+ return self
+
+ def may_be_none(self):
+ # PyUnicode_Join() always returns a Unicode string or raises an exception
+ return False
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ num_items = len(self.values)
+ list_var = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ ulength_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
+ max_char_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ucs4_type, manage_ref=False)
+
+ code.putln('%s = PyTuple_New(%s); %s' % (
+ list_var,
+ num_items,
+ code.error_goto_if_null(list_var, self.pos)))
+ code.put_gotref(list_var, py_object_type)
+ code.putln("%s = 0;" % ulength_var)
+ code.putln("%s = 127;" % max_char_var) # at least ASCII character range
+
+ for i, node in enumerate(self.values):
+ node.generate_evaluation_code(code)
+ node.make_owned_reference(code)
+
+ ulength = "__Pyx_PyUnicode_GET_LENGTH(%s)" % node.py_result()
+ max_char_value = "__Pyx_PyUnicode_MAX_CHAR_VALUE(%s)" % node.py_result()
+ is_ascii = False
+ if isinstance(node, UnicodeNode):
+ try:
+ # most strings will be ASCII or at least Latin-1
+ node.value.encode('iso8859-1')
+ max_char_value = '255'
+ node.value.encode('us-ascii')
+ is_ascii = True
+ except UnicodeEncodeError:
+ if max_char_value != '255':
+ # not ISO8859-1 => check BMP limit
+ max_char = max(map(ord, node.value))
+ if max_char < 0xD800:
+ # BMP-only, no surrogate pairs used
+ max_char_value = '65535'
+ ulength = str(len(node.value))
+ elif max_char >= 65536:
+ # clearly outside of BMP, and not on a 16-bit Unicode system
+ max_char_value = '1114111'
+ ulength = str(len(node.value))
+ else:
+ # not really worth implementing a check for surrogate pairs here
+ # drawback: C code can differ when generating on Py2 with 2-byte Unicode
+ pass
+ else:
+ ulength = str(len(node.value))
+ elif isinstance(node, FormattedValueNode) and node.value.type.is_numeric:
+ is_ascii = True # formatted C numbers are always ASCII
+
+ if not is_ascii:
+ code.putln("%s = (%s > %s) ? %s : %s;" % (
+ max_char_var, max_char_value, max_char_var, max_char_value, max_char_var))
+ code.putln("%s += %s;" % (ulength_var, ulength))
+
+ node.generate_giveref(code)
+ code.putln('PyTuple_SET_ITEM(%s, %s, %s);' % (list_var, i, node.py_result()))
+ node.generate_post_assignment_code(code)
+ node.free_temps(code)
+
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+ code.globalstate.use_utility_code(UtilityCode.load_cached("JoinPyUnicode", "StringTools.c"))
+ code.putln('%s = __Pyx_PyUnicode_Join(%s, %d, %s, %s); %s' % (
+ self.result(),
+ list_var,
+ num_items,
+ ulength_var,
+ max_char_var,
+ code.error_goto_if_null(self.py_result(), self.pos)))
+ self.generate_gotref(code)
+
+ code.put_decref_clear(list_var, py_object_type)
+ code.funcstate.release_temp(list_var)
+ code.funcstate.release_temp(ulength_var)
+ code.funcstate.release_temp(max_char_var)
+
+
+class FormattedValueNode(ExprNode):
+ # {}-delimited portions of an f-string
+ #
+ # value ExprNode The expression itself
+ # conversion_char str or None Type conversion (!s, !r, !a, none, or 'd' for integer conversion)
+ # format_spec JoinedStrNode or None Format string passed to __format__
+ # c_format_spec str or None If not None, formatting can be done at the C level
+
+ subexprs = ['value', 'format_spec']
+
+ type = unicode_type
+ is_temp = True
+ c_format_spec = None
+ gil_message = "String formatting"
+
+ find_conversion_func = {
+ 's': 'PyObject_Unicode',
+ 'r': 'PyObject_Repr',
+ 'a': 'PyObject_ASCII', # NOTE: mapped to PyObject_Repr() in Py2
+ 'd': '__Pyx_PyNumber_IntOrLong', # NOTE: internal mapping for '%d' formatting
+ }.get
+
+ def may_be_none(self):
+ # PyObject_Format() always returns a Unicode string or raises an exception
+ return False
+
+ def analyse_types(self, env):
+ self.value = self.value.analyse_types(env)
+ if not self.format_spec or self.format_spec.is_string_literal:
+ c_format_spec = self.format_spec.value if self.format_spec else self.value.type.default_format_spec
+ if self.value.type.can_coerce_to_pystring(env, format_spec=c_format_spec):
+ self.c_format_spec = c_format_spec
+
+ if self.format_spec:
+ self.format_spec = self.format_spec.analyse_types(env).coerce_to_pyobject(env)
+ if self.c_format_spec is None:
+ self.value = self.value.coerce_to_pyobject(env)
+ if not self.format_spec and (not self.conversion_char or self.conversion_char == 's'):
+ if self.value.type is unicode_type and not self.value.may_be_none():
+ # value is definitely a unicode string and we don't format it any special
+ return self.value
+ return self
+
+ def generate_result_code(self, code):
+ if self.c_format_spec is not None and not self.value.type.is_pyobject:
+ convert_func_call = self.value.type.convert_to_pystring(
+ self.value.result(), code, self.c_format_spec)
+ code.putln("%s = %s; %s" % (
+ self.result(),
+ convert_func_call,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+ return
+
+ value_result = self.value.py_result()
+ value_is_unicode = self.value.type is unicode_type and not self.value.may_be_none()
+ if self.format_spec:
+ format_func = '__Pyx_PyObject_Format'
+ format_spec = self.format_spec.py_result()
+ else:
+ # common case: expect simple Unicode pass-through if no format spec
+ format_func = '__Pyx_PyObject_FormatSimple'
+ # passing a Unicode format string in Py2 forces PyObject_Format() to also return a Unicode string
+ format_spec = Naming.empty_unicode
+
+ conversion_char = self.conversion_char
+ if conversion_char == 's' and value_is_unicode:
+ # no need to pipe unicode strings through str()
+ conversion_char = None
+
+ if conversion_char:
+ fn = self.find_conversion_func(conversion_char)
+ assert fn is not None, "invalid conversion character found: '%s'" % conversion_char
+ value_result = '%s(%s)' % (fn, value_result)
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectFormatAndDecref", "StringTools.c"))
+ format_func += 'AndDecref'
+ elif self.format_spec:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectFormat", "StringTools.c"))
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectFormatSimple", "StringTools.c"))
+
+ code.putln("%s = %s(%s, %s); %s" % (
+ self.result(),
+ format_func,
+ value_result,
+ format_spec,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+#-------------------------------------------------------------------
+#
+# Parallel nodes (cython.parallel.thread(savailable|id))
+#
+#-------------------------------------------------------------------
+
+class ParallelThreadsAvailableNode(AtomicExprNode):
+ """
+ Note: this is disabled and not a valid directive at this moment
+
+ Implements cython.parallel.threadsavailable(). If we are called from the
+ sequential part of the application, we need to call omp_get_max_threads(),
+ and in the parallel part we can just call omp_get_num_threads()
+ """
+
+ type = PyrexTypes.c_int_type
+
+ def analyse_types(self, env):
+ self.is_temp = True
+ # env.add_include_file("omp.h")
+ return self
+
+ def generate_result_code(self, code):
+ code.putln("#ifdef _OPENMP")
+ code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
+ self.temp_code)
+ code.putln("else %s = omp_get_num_threads();" % self.temp_code)
+ code.putln("#else")
+ code.putln("%s = 1;" % self.temp_code)
+ code.putln("#endif")
+
+ def result(self):
+ return self.temp_code
+
+
+class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
+ """
+ Implements cython.parallel.threadid()
+ """
+
+ type = PyrexTypes.c_int_type
+
+ def analyse_types(self, env):
+ self.is_temp = True
+ # env.add_include_file("omp.h")
+ return self
+
+ def generate_result_code(self, code):
+ code.putln("#ifdef _OPENMP")
+ code.putln("%s = omp_get_thread_num();" % self.temp_code)
+ code.putln("#else")
+ code.putln("%s = 0;" % self.temp_code)
+ code.putln("#endif")
+
+ def result(self):
+ return self.temp_code
+
+
+#-------------------------------------------------------------------
+#
+# Trailer nodes
+#
+#-------------------------------------------------------------------
+
+
+class _IndexingBaseNode(ExprNode):
+ # Base class for indexing nodes.
+ #
+ # base ExprNode the value being indexed
+
+ def is_ephemeral(self):
+ # in most cases, indexing will return a safe reference to an object in a container,
+ # so we consider the result safe if the base object is
+ return self.base.is_ephemeral() or self.base.type in (
+ basestring_type, str_type, bytes_type, bytearray_type, unicode_type)
+
+ def check_const_addr(self):
+ return self.base.check_const_addr() and self.index.check_const()
+
+ def is_lvalue(self):
+ # NOTE: references currently have both is_reference and is_ptr
+ # set. Since pointers and references have different lvalue
+ # rules, we must be careful to separate the two.
+ if self.type.is_reference:
+ if self.type.ref_base_type.is_array:
+ # fixed-sized arrays aren't l-values
+ return False
+ elif self.type.is_ptr:
+ # non-const pointers can always be reassigned
+ return True
+ # Just about everything else returned by the index operator
+ # can be an lvalue.
+ return True
+
+
+class IndexNode(_IndexingBaseNode):
+ # Sequence indexing.
+ #
+ # base ExprNode
+ # index ExprNode
+ # type_indices [PyrexType]
+ #
+ # is_fused_index boolean Whether the index is used to specialize a
+ # c(p)def function
+
+ subexprs = ['base', 'index']
+ type_indices = None
+
+ is_subscript = True
+ is_fused_index = False
+
+ def calculate_constant_result(self):
+ self.constant_result = self.base.constant_result[self.index.constant_result]
+
+ def compile_time_value(self, denv):
+ base = self.base.compile_time_value(denv)
+ index = self.index.compile_time_value(denv)
+ try:
+ return base[index]
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def is_simple(self):
+ base = self.base
+ return (base.is_simple() and self.index.is_simple()
+ and base.type and (base.type.is_ptr or base.type.is_array))
+
+ def may_be_none(self):
+ base_type = self.base.type
+ if base_type:
+ if base_type.is_string:
+ return False
+ if isinstance(self.index, SliceNode):
+ # slicing!
+ if base_type in (bytes_type, bytearray_type, str_type, unicode_type,
+ basestring_type, list_type, tuple_type):
+ return False
+ return ExprNode.may_be_none(self)
+
+ def analyse_target_declaration(self, env):
+ pass
+
+ def analyse_as_type(self, env):
+ base_type = self.base.analyse_as_type(env)
+ if base_type:
+ if base_type.is_cpp_class or base_type.python_type_constructor_name:
+ if self.index.is_sequence_constructor:
+ template_values = self.index.args
+ else:
+ template_values = [self.index]
+ type_node = Nodes.TemplatedTypeNode(
+ pos=self.pos,
+ positional_args=template_values,
+ keyword_args=None)
+ return type_node.analyse(env, base_type=base_type)
+ elif self.index.is_slice or self.index.is_sequence_constructor:
+ # memory view
+ from . import MemoryView
+ env.use_utility_code(MemoryView.view_utility_code)
+ axes = [self.index] if self.index.is_slice else list(self.index.args)
+ return PyrexTypes.MemoryViewSliceType(base_type, MemoryView.get_axes_specs(env, axes))
+ elif not base_type.is_pyobject:
+ # C array
+ index = self.index.compile_time_value(env)
+ if index is not None:
+ try:
+ index = int(index)
+ except (ValueError, TypeError):
+ pass
+ else:
+ return PyrexTypes.CArrayType(base_type, index)
+ error(self.pos, "Array size must be a compile time constant")
+ return None
+
+ def analyse_pytyping_modifiers(self, env):
+ # Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]"
+ # TODO: somehow bring this together with TemplatedTypeNode.analyse_pytyping_modifiers()
+ modifiers = []
+ modifier_node = self
+ while modifier_node.is_subscript:
+ modifier_type = modifier_node.base.analyse_as_type(env)
+ if (modifier_type and modifier_type.python_type_constructor_name
+ and modifier_type.modifier_name):
+ modifiers.append(modifier_type.modifier_name)
+ modifier_node = modifier_node.index
+ return modifiers
+
+ def type_dependencies(self, env):
+ return self.base.type_dependencies(env) + self.index.type_dependencies(env)
+
+ def infer_type(self, env):
+ base_type = self.base.infer_type(env)
+ if self.index.is_slice:
+ # slicing!
+ if base_type.is_string:
+ # sliced C strings must coerce to Python
+ return bytes_type
+ elif base_type.is_pyunicode_ptr:
+ # sliced Py_UNICODE* strings must coerce to Python
+ return unicode_type
+ elif base_type in (unicode_type, bytes_type, str_type,
+ bytearray_type, list_type, tuple_type):
+ # slicing these returns the same type
+ return base_type
+ elif base_type.is_memoryviewslice:
+ return base_type
+ else:
+ # TODO: Handle buffers (hopefully without too much redundancy).
+ return py_object_type
+
+ index_type = self.index.infer_type(env)
+ if index_type and index_type.is_int or isinstance(self.index, IntNode):
+ # indexing!
+ if base_type is unicode_type:
+ # Py_UCS4 will automatically coerce to a unicode string
+ # if required, so this is safe. We only infer Py_UCS4
+ # when the index is a C integer type. Otherwise, we may
+ # need to use normal Python item access, in which case
+ # it's faster to return the one-char unicode string than
+ # to receive it, throw it away, and potentially rebuild it
+ # on a subsequent PyObject coercion.
+ return PyrexTypes.c_py_ucs4_type
+ elif base_type is str_type:
+ # always returns str - Py2: bytes, Py3: unicode
+ return base_type
+ elif base_type is bytearray_type:
+ return PyrexTypes.c_uchar_type
+ elif isinstance(self.base, BytesNode):
+ #if env.global_scope().context.language_level >= 3:
+ # # inferring 'char' can be made to work in Python 3 mode
+ # return PyrexTypes.c_char_type
+ # Py2/3 return different types on indexing bytes objects
+ return py_object_type
+ elif base_type in (tuple_type, list_type):
+ # if base is a literal, take a look at its values
+ item_type = infer_sequence_item_type(
+ env, self.base, self.index, seq_type=base_type)
+ if item_type is not None:
+ return item_type
+ elif base_type.is_ptr or base_type.is_array:
+ return base_type.base_type
+ elif base_type.is_ctuple and isinstance(self.index, IntNode):
+ if self.index.has_constant_result():
+ index = self.index.constant_result
+ if index < 0:
+ index += base_type.size
+ if 0 <= index < base_type.size:
+ return base_type.components[index]
+ elif base_type.is_memoryviewslice:
+ if base_type.ndim == 0:
+ pass # probably an error, but definitely don't know what to do - return pyobject for now
+ if base_type.ndim == 1:
+ return base_type.dtype
+ else:
+ return PyrexTypes.MemoryViewSliceType(base_type.dtype, base_type.axes[1:])
+
+ if self.index.is_sequence_constructor and base_type.is_memoryviewslice:
+ inferred_type = base_type
+ for a in self.index.args:
+ if not inferred_type.is_memoryviewslice:
+ break # something's gone wrong
+ inferred_type = IndexNode(self.pos, base=ExprNode(self.base.pos, type=inferred_type),
+ index=a).infer_type(env)
+ else:
+ return inferred_type
+
+ if base_type.is_cpp_class:
+ class FakeOperand:
+ def __init__(self, **kwds):
+ self.__dict__.update(kwds)
+ operands = [
+ FakeOperand(pos=self.pos, type=base_type),
+ FakeOperand(pos=self.pos, type=index_type),
+ ]
+ index_func = env.lookup_operator('[]', operands)
+ if index_func is not None:
+ return index_func.type.return_type
+
+ if is_pythran_expr(base_type) and is_pythran_expr(index_type):
+ index_with_type = (self.index, index_type)
+ return PythranExpr(pythran_indexing_type(base_type, [index_with_type]))
+
+ # may be slicing or indexing, we don't know
+ if base_type in (unicode_type, str_type):
+ # these types always returns their own type on Python indexing/slicing
+ return base_type
+ else:
+ # TODO: Handle buffers (hopefully without too much redundancy).
+ return py_object_type
+
+ def analyse_types(self, env):
+ return self.analyse_base_and_index_types(env, getting=True)
+
+ def analyse_target_types(self, env):
+ node = self.analyse_base_and_index_types(env, setting=True)
+ if node.type.is_const:
+ error(self.pos, "Assignment to const dereference")
+ if node is self and not node.is_lvalue():
+ error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type)
+ return node
+
+ def analyse_base_and_index_types(self, env, getting=False, setting=False,
+ analyse_base=True):
+ # Note: This might be cleaned up by having IndexNode
+ # parsed in a saner way and only construct the tuple if
+ # needed.
+ if analyse_base:
+ self.base = self.base.analyse_types(env)
+
+ if self.base.type.is_error:
+ # Do not visit child tree if base is undeclared to avoid confusing
+ # error messages
+ self.type = PyrexTypes.error_type
+ return self
+
+ is_slice = self.index.is_slice
+ if not env.directives['wraparound']:
+ if is_slice:
+ check_negative_indices(self.index.start, self.index.stop)
+ else:
+ check_negative_indices(self.index)
+
+ # Potentially overflowing index value.
+ if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
+ self.index = self.index.coerce_to_pyobject(env)
+
+ is_memslice = self.base.type.is_memoryviewslice
+ # Handle the case where base is a literal char* (and we expect a string, not an int)
+ if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
+ if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
+ self.base = self.base.coerce_to_pyobject(env)
+
+ replacement_node = self.analyse_as_buffer_operation(env, getting)
+ if replacement_node is not None:
+ return replacement_node
+
+ self.nogil = env.nogil
+ base_type = self.base.type
+
+ if not base_type.is_cfunction:
+ self.index = self.index.analyse_types(env)
+ self.original_index_type = self.index.type
+ if self.original_index_type.is_reference:
+ self.original_index_type = self.original_index_type.ref_base_type
+
+ if base_type.is_unicode_char:
+ # we infer Py_UNICODE/Py_UCS4 for unicode strings in some
+ # cases, but indexing must still work for them
+ if setting:
+ warning(self.pos, "cannot assign to Unicode string index", level=1)
+ elif self.index.constant_result in (0, -1):
+ # uchar[0] => uchar
+ return self.base
+ self.base = self.base.coerce_to_pyobject(env)
+ base_type = self.base.type
+
+ if base_type.is_pyobject:
+ return self.analyse_as_pyobject(env, is_slice, getting, setting)
+ elif base_type.is_ptr or base_type.is_array:
+ return self.analyse_as_c_array(env, is_slice)
+ elif base_type.is_cpp_class:
+ return self.analyse_as_cpp(env, setting)
+ elif base_type.is_cfunction:
+ return self.analyse_as_c_function(env)
+ elif base_type.is_ctuple:
+ return self.analyse_as_c_tuple(env, getting, setting)
+ else:
+ error(self.pos,
+ "Attempting to index non-array type '%s'" %
+ base_type)
+ self.type = PyrexTypes.error_type
+ return self
+
+ def analyse_as_pyobject(self, env, is_slice, getting, setting):
+ base_type = self.base.type
+ if self.index.type.is_unicode_char and base_type is not dict_type:
+ # TODO: eventually fold into case below and remove warning, once people have adapted their code
+ warning(self.pos,
+ "Item lookup of unicode character codes now always converts to a Unicode string. "
+ "Use an explicit C integer cast to get back the previous integer lookup behaviour.", level=1)
+ self.index = self.index.coerce_to_pyobject(env)
+ self.is_temp = 1
+ elif self.index.type.is_int and base_type is not dict_type:
+ if (getting
+ and not env.directives['boundscheck']
+ and (base_type in (list_type, tuple_type, bytearray_type))
+ and (not self.index.type.signed
+ or not env.directives['wraparound']
+ or (isinstance(self.index, IntNode) and
+ self.index.has_constant_result() and self.index.constant_result >= 0))
+ ):
+ self.is_temp = 0
+ else:
+ self.is_temp = 1
+ self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
+ self.original_index_type.create_to_py_utility_code(env)
+ else:
+ self.index = self.index.coerce_to_pyobject(env)
+ self.is_temp = 1
+
+ if self.index.type.is_int and base_type is unicode_type:
+ # Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
+ # if required, so this is fast and safe
+ self.type = PyrexTypes.c_py_ucs4_type
+ elif self.index.type.is_int and base_type is bytearray_type:
+ if setting:
+ self.type = PyrexTypes.c_uchar_type
+ else:
+ # not using 'uchar' to enable fast and safe error reporting as '-1'
+ self.type = PyrexTypes.c_int_type
+ elif is_slice and base_type in (bytes_type, bytearray_type, str_type, unicode_type, list_type, tuple_type):
+ self.type = base_type
+ else:
+ item_type = None
+ if base_type in (list_type, tuple_type) and self.index.type.is_int:
+ item_type = infer_sequence_item_type(
+ env, self.base, self.index, seq_type=base_type)
+ if base_type in (list_type, tuple_type, dict_type):
+ # do the None check explicitly (not in a helper) to allow optimising it away
+ self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
+ if item_type is None or not item_type.is_pyobject:
+ # Even if we inferred a C type as result, we will read a Python object, so trigger coercion if needed.
+ # We could potentially use "item_type.equivalent_type" here, but that may trigger assumptions
+ # about the actual runtime item types, rather than just their ability to coerce to the C "item_type".
+ self.type = py_object_type
+ else:
+ self.type = item_type
+
+ self.wrap_in_nonecheck_node(env, getting)
+ return self
+
+ def analyse_as_c_array(self, env, is_slice):
+ base_type = self.base.type
+ self.type = base_type.base_type
+ if self.type.is_cpp_class:
+ self.type = PyrexTypes.CReferenceType(self.type)
+ if is_slice:
+ self.type = base_type
+ elif self.index.type.is_pyobject:
+ self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
+ elif not self.index.type.is_int:
+ error(self.pos, "Invalid index type '%s'" % self.index.type)
+ return self
+
+ def analyse_as_cpp(self, env, setting):
+ base_type = self.base.type
+ function = env.lookup_operator("[]", [self.base, self.index])
+ if function is None:
+ error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
+ self.type = PyrexTypes.error_type
+ self.result_code = ""
+ return self
+ func_type = function.type
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+ self.exception_check = func_type.exception_check
+ self.exception_value = func_type.exception_value
+ if self.exception_check:
+ if not setting:
+ self.is_temp = True
+ if needs_cpp_exception_conversion(self):
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+ self.index = self.index.coerce_to(func_type.args[0].type, env)
+ self.type = func_type.return_type
+ if setting and not func_type.return_type.is_reference:
+ error(self.pos, "Can't set non-reference result '%s'" % self.type)
+ return self
+
+ def analyse_as_c_function(self, env):
+ base_type = self.base.type
+ if base_type.is_fused:
+ self.parse_indexed_fused_cdef(env)
+ else:
+ self.type_indices = self.parse_index_as_types(env)
+ self.index = None # FIXME: use a dedicated Node class instead of generic IndexNode
+ if base_type.templates is None:
+ error(self.pos, "Can only parameterize template functions.")
+ self.type = error_type
+ elif self.type_indices is None:
+ # Error recorded earlier.
+ self.type = error_type
+ elif len(base_type.templates) != len(self.type_indices):
+ error(self.pos, "Wrong number of template arguments: expected %s, got %s" % (
+ (len(base_type.templates), len(self.type_indices))))
+ self.type = error_type
+ else:
+ self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices)))
+ # FIXME: use a dedicated Node class instead of generic IndexNode
+ return self
+
+ def analyse_as_c_tuple(self, env, getting, setting):
+ base_type = self.base.type
+ if isinstance(self.index, IntNode) and self.index.has_constant_result():
+ index = self.index.constant_result
+ if -base_type.size <= index < base_type.size:
+ if index < 0:
+ index += base_type.size
+ self.type = base_type.components[index]
+ else:
+ error(self.pos,
+ "Index %s out of bounds for '%s'" %
+ (index, base_type))
+ self.type = PyrexTypes.error_type
+ return self
+ else:
+ self.base = self.base.coerce_to_pyobject(env)
+ return self.analyse_base_and_index_types(env, getting=getting, setting=setting, analyse_base=False)
+
+ def analyse_as_buffer_operation(self, env, getting):
+ """
+ Analyse buffer indexing and memoryview indexing/slicing
+ """
+ if isinstance(self.index, TupleNode):
+ indices = self.index.args
+ else:
+ indices = [self.index]
+
+ base = self.base
+ base_type = base.type
+ replacement_node = None
+ if base_type.is_memoryviewslice:
+ # memoryviewslice indexing or slicing
+ from . import MemoryView
+ if base.is_memview_slice:
+ # For memory views, "view[i][j]" is the same as "view[i, j]" => use the latter for speed.
+ merged_indices = base.merged_indices(indices)
+ if merged_indices is not None:
+ base = base.base
+ base_type = base.type
+ indices = merged_indices
+ have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim)
+ if have_slices:
+ replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=base)
+ else:
+ replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=base)
+ elif base_type.is_buffer or base_type.is_pythran_expr:
+ if base_type.is_pythran_expr or len(indices) == base_type.ndim:
+ # Buffer indexing
+ is_buffer_access = True
+ indices = [index.analyse_types(env) for index in indices]
+ if base_type.is_pythran_expr:
+ do_replacement = all(
+ index.type.is_int or index.is_slice or index.type.is_pythran_expr
+ for index in indices)
+ if do_replacement:
+ for i,index in enumerate(indices):
+ if index.is_slice:
+ index = SliceIntNode(index.pos, start=index.start, stop=index.stop, step=index.step)
+ index = index.analyse_types(env)
+ indices[i] = index
+ else:
+ do_replacement = all(index.type.is_int for index in indices)
+ if do_replacement:
+ replacement_node = BufferIndexNode(self.pos, indices=indices, base=base)
+ # On cloning, indices is cloned. Otherwise, unpack index into indices.
+ assert not isinstance(self.index, CloneNode)
+
+ if replacement_node is not None:
+ replacement_node = replacement_node.analyse_types(env, getting)
+ return replacement_node
+
+ def wrap_in_nonecheck_node(self, env, getting):
+ if not env.directives['nonecheck'] or not self.base.may_be_none():
+ return
+ self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
+
+ def parse_index_as_types(self, env, required=True):
+ if isinstance(self.index, TupleNode):
+ indices = self.index.args
+ else:
+ indices = [self.index]
+ type_indices = []
+ for index in indices:
+ type_indices.append(index.analyse_as_type(env))
+ if type_indices[-1] is None:
+ if required:
+ error(index.pos, "not parsable as a type")
+ return None
+ return type_indices
+
+ def parse_indexed_fused_cdef(self, env):
+ """
+ Interpret fused_cdef_func[specific_type1, ...]
+
+ Note that if this method is called, we are an indexed cdef function
+ with fused argument types, and this IndexNode will be replaced by the
+ NameNode with specific entry just after analysis of expressions by
+ AnalyseExpressionsTransform.
+ """
+ self.type = PyrexTypes.error_type
+
+ self.is_fused_index = True
+
+ base_type = self.base.type
+ positions = []
+
+ if self.index.is_name or self.index.is_attribute:
+ positions.append(self.index.pos)
+ elif isinstance(self.index, TupleNode):
+ for arg in self.index.args:
+ positions.append(arg.pos)
+ specific_types = self.parse_index_as_types(env, required=False)
+
+ if specific_types is None:
+ self.index = self.index.analyse_types(env)
+
+ if not self.base.entry.as_variable:
+ error(self.pos, "Can only index fused functions with types")
+ else:
+ # A cpdef function indexed with Python objects
+ self.base.entry = self.entry = self.base.entry.as_variable
+ self.base.type = self.type = self.entry.type
+
+ self.base.is_temp = True
+ self.is_temp = True
+
+ self.entry.used = True
+
+ self.is_fused_index = False
+ return
+
+ for i, type in enumerate(specific_types):
+ specific_types[i] = type.specialize_fused(env)
+
+ fused_types = base_type.get_fused_types()
+ if len(specific_types) > len(fused_types):
+ return error(self.pos, "Too many types specified")
+ elif len(specific_types) < len(fused_types):
+ t = fused_types[len(specific_types)]
+ return error(self.pos, "Not enough types specified to specialize "
+ "the function, %s is still fused" % t)
+
+ # See if our index types form valid specializations
+ for pos, specific_type, fused_type in zip(positions,
+ specific_types,
+ fused_types):
+ if not any([specific_type.same_as(t) for t in fused_type.types]):
+ return error(pos, "Type not in fused type")
+
+ if specific_type is None or specific_type.is_error:
+ return
+
+ fused_to_specific = dict(zip(fused_types, specific_types))
+ type = base_type.specialize(fused_to_specific)
+
+ if type.is_fused:
+ # Only partially specific, this is invalid
+ error(self.pos,
+ "Index operation makes function only partially specific")
+ else:
+ # Fully specific, find the signature with the specialized entry
+ for signature in self.base.type.get_all_specialized_function_types():
+ if type.same_as(signature):
+ self.type = signature
+
+ if self.base.is_attribute:
+ # Pretend to be a normal attribute, for cdef extension
+ # methods
+ self.entry = signature.entry
+ self.is_attribute = True
+ self.obj = self.base.obj
+
+ self.type.entry.used = True
+ self.base.type = signature
+ self.base.entry = signature.entry
+
+ break
+ else:
+ # This is a bug
+ raise InternalError("Couldn't find the right signature")
+
+ gil_message = "Indexing Python object"
+
+ def calculate_result_code(self):
+ if self.base.type in (list_type, tuple_type, bytearray_type):
+ if self.base.type is list_type:
+ index_code = "PyList_GET_ITEM(%s, %s)"
+ elif self.base.type is tuple_type:
+ index_code = "PyTuple_GET_ITEM(%s, %s)"
+ elif self.base.type is bytearray_type:
+ index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))"
+ else:
+ assert False, "unexpected base type in indexing: %s" % self.base.type
+ elif self.base.type.is_cfunction:
+ return "%s<%s>" % (
+ self.base.result(),
+ ",".join([param.empty_declaration_code() for param in self.type_indices]))
+ elif self.base.type.is_ctuple:
+ index = self.index.constant_result
+ if index < 0:
+ index += self.base.type.size
+ return "%s.f%s" % (self.base.result(), index)
+ else:
+ if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
+ error(self.pos, "Invalid use of pointer slice")
+ return
+ index_code = "(%s[%s])"
+ return index_code % (self.base.result(), self.index.result())
+
+ def extra_index_params(self, code):
+ if self.index.type.is_int:
+ is_list = self.base.type is list_type
+ wraparound = (
+ bool(code.globalstate.directives['wraparound']) and
+ self.original_index_type.signed and
+ not (isinstance(self.index.constant_result, _py_int_types)
+ and self.index.constant_result >= 0))
+ boundscheck = bool(code.globalstate.directives['boundscheck'])
+ return ", %s, %d, %s, %d, %d, %d" % (
+ self.original_index_type.empty_declaration_code(),
+ self.original_index_type.signed and 1 or 0,
+ self.original_index_type.to_py_function,
+ is_list, wraparound, boundscheck)
+ else:
+ return ""
+
+ def generate_result_code(self, code):
+ if not self.is_temp:
+ # all handled in self.calculate_result_code()
+ return
+
+ utility_code = None
+ error_value = None
+ if self.type.is_pyobject:
+ error_value = 'NULL'
+ if self.index.type.is_int:
+ if self.base.type is list_type:
+ function = "__Pyx_GetItemInt_List"
+ elif self.base.type is tuple_type:
+ function = "__Pyx_GetItemInt_Tuple"
+ else:
+ function = "__Pyx_GetItemInt"
+ utility_code = TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c")
+ else:
+ if self.base.type is dict_type:
+ function = "__Pyx_PyDict_GetItem"
+ utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
+ elif self.base.type is py_object_type and self.index.type in (str_type, unicode_type):
+ # obj[str] is probably doing a dict lookup
+ function = "__Pyx_PyObject_Dict_GetItem"
+ utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
+ else:
+ function = "__Pyx_PyObject_GetItem"
+ code.globalstate.use_utility_code(
+ TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
+ utility_code = UtilityCode.load_cached("ObjectGetItem", "ObjectHandling.c")
+ elif self.type.is_unicode_char and self.base.type is unicode_type:
+ assert self.index.type.is_int
+ function = "__Pyx_GetItemInt_Unicode"
+ error_value = '(Py_UCS4)-1'
+ utility_code = UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c")
+ elif self.base.type is bytearray_type:
+ assert self.index.type.is_int
+ assert self.type.is_int
+ function = "__Pyx_GetItemInt_ByteArray"
+ error_value = '-1'
+ utility_code = UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c")
+ elif not (self.base.type.is_cpp_class and self.exception_check):
+ assert False, "unexpected type %s and base type %s for indexing (%s)" % (
+ self.type, self.base.type, self.pos)
+
+ if utility_code is not None:
+ code.globalstate.use_utility_code(utility_code)
+
+ if self.index.type.is_int:
+ index_code = self.index.result()
+ else:
+ index_code = self.index.py_result()
+
+ if self.base.type.is_cpp_class and self.exception_check:
+ translate_cpp_exception(code, self.pos,
+ "%s = %s[%s];" % (self.result(), self.base.result(),
+ self.index.result()),
+ self.result() if self.type.is_pyobject else None,
+ self.exception_value, self.in_nogil_context)
+ else:
+ error_check = '!%s' if error_value == 'NULL' else '%%s == %s' % error_value
+ code.putln(
+ "%s = %s(%s, %s%s); %s" % (
+ self.result(),
+ function,
+ self.base.py_result(),
+ index_code,
+ self.extra_index_params(code),
+ code.error_goto_if(error_check % self.result(), self.pos)))
+ if self.type.is_pyobject:
+ self.generate_gotref(code)
+
+ def generate_setitem_code(self, value_code, code):
+ if self.index.type.is_int:
+ if self.base.type is bytearray_type:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c"))
+ function = "__Pyx_SetItemInt_ByteArray"
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("SetItemInt", "ObjectHandling.c"))
+ function = "__Pyx_SetItemInt"
+ index_code = self.index.result()
+ else:
+ index_code = self.index.py_result()
+ if self.base.type is dict_type:
+ function = "PyDict_SetItem"
+ # It would seem that we could specialized lists/tuples, but that
+ # shouldn't happen here.
+ # Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as
+ # index instead of an object, and bad conversion here would give
+ # the wrong exception. Also, tuples are supposed to be immutable,
+ # and raise a TypeError when trying to set their entries
+ # (PyTuple_SetItem() is for creating new tuples from scratch).
+ else:
+ function = "PyObject_SetItem"
+ code.putln(code.error_goto_if_neg(
+ "%s(%s, %s, %s%s)" % (
+ function,
+ self.base.py_result(),
+ index_code,
+ value_code,
+ self.extra_index_params(code)),
+ self.pos))
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
+ exception_check=None, exception_value=None):
+ self.generate_subexpr_evaluation_code(code)
+
+ if self.type.is_pyobject:
+ self.generate_setitem_code(rhs.py_result(), code)
+ elif self.base.type is bytearray_type:
+ value_code = self._check_byte_value(code, rhs)
+ self.generate_setitem_code(value_code, code)
+ elif self.base.type.is_cpp_class and self.exception_check and self.exception_check == '+':
+ if overloaded_assignment and exception_check and self.exception_value != exception_value:
+ # Handle the case that both the index operator and the assignment
+ # operator have a c++ exception handler and they are not the same.
+ translate_double_cpp_exception(code, self.pos, self.type,
+ self.result(), rhs.result(), self.exception_value,
+ exception_value, self.in_nogil_context)
+ else:
+ # Handle the case that only the index operator has a
+ # c++ exception handler, or that
+ # both exception handlers are the same.
+ translate_cpp_exception(code, self.pos,
+ "%s = %s;" % (self.result(), rhs.result()),
+ self.result() if self.type.is_pyobject else None,
+ self.exception_value, self.in_nogil_context)
+ else:
+ code.putln(
+ "%s = %s;" % (self.result(), rhs.result()))
+
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+
+ def _check_byte_value(self, code, rhs):
+ # TODO: should we do this generally on downcasts, or just here?
+ assert rhs.type.is_int, repr(rhs.type)
+ value_code = rhs.result()
+ if rhs.has_constant_result():
+ if 0 <= rhs.constant_result < 256:
+ return value_code
+ needs_cast = True # make at least the C compiler happy
+ warning(rhs.pos,
+ "value outside of range(0, 256)"
+ " when assigning to byte: %s" % rhs.constant_result,
+ level=1)
+ else:
+ needs_cast = rhs.type != PyrexTypes.c_uchar_type
+
+ if not self.nogil:
+ conditions = []
+ if rhs.is_literal or rhs.type.signed:
+ conditions.append('%s < 0' % value_code)
+ if (rhs.is_literal or not
+ (rhs.is_temp and rhs.type in (
+ PyrexTypes.c_uchar_type, PyrexTypes.c_char_type,
+ PyrexTypes.c_schar_type))):
+ conditions.append('%s > 255' % value_code)
+ if conditions:
+ code.putln("if (unlikely(%s)) {" % ' || '.join(conditions))
+ code.putln(
+ 'PyErr_SetString(PyExc_ValueError,'
+ ' "byte must be in range(0, 256)"); %s' %
+ code.error_goto(self.pos))
+ code.putln("}")
+
+ if needs_cast:
+ value_code = '((unsigned char)%s)' % value_code
+ return value_code
+
+ def generate_deletion_code(self, code, ignore_nonexisting=False):
+ self.generate_subexpr_evaluation_code(code)
+ #if self.type.is_pyobject:
+ if self.index.type.is_int:
+ function = "__Pyx_DelItemInt"
+ index_code = self.index.result()
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("DelItemInt", "ObjectHandling.c"))
+ else:
+ index_code = self.index.py_result()
+ if self.base.type is dict_type:
+ function = "PyDict_DelItem"
+ else:
+ function = "PyObject_DelItem"
+ code.putln(code.error_goto_if_neg(
+ "%s(%s, %s%s)" % (
+ function,
+ self.base.py_result(),
+ index_code,
+ self.extra_index_params(code)),
+ self.pos))
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+
+
+class BufferIndexNode(_IndexingBaseNode):
+ """
+ Indexing of buffers and memoryviews. This node is created during type
+ analysis from IndexNode and replaces it.
+
+ Attributes:
+ base - base node being indexed
+ indices - list of indexing expressions
+ """
+
+ subexprs = ['base', 'indices']
+
+ is_buffer_access = True
+
+ # Whether we're assigning to a buffer (in that case it needs to be writable)
+ writable_needed = False
+
+ # Any indexing temp variables that we need to clean up.
+ index_temps = ()
+
+ def analyse_target_types(self, env):
+ self.analyse_types(env, getting=False)
+
+ def analyse_types(self, env, getting=True):
+ """
+ Analyse types for buffer indexing only. Overridden by memoryview
+ indexing and slicing subclasses
+ """
+ # self.indices are already analyzed
+ if not self.base.is_name and not is_pythran_expr(self.base.type):
+ error(self.pos, "Can only index buffer variables")
+ self.type = error_type
+ return self
+
+ if not getting:
+ if not self.base.entry.type.writable:
+ error(self.pos, "Writing to readonly buffer")
+ else:
+ self.writable_needed = True
+ if self.base.type.is_buffer:
+ self.base.entry.buffer_aux.writable_needed = True
+
+ self.none_error_message = "'NoneType' object is not subscriptable"
+ self.analyse_buffer_index(env, getting)
+ self.wrap_in_nonecheck_node(env)
+ return self
+
+ def analyse_buffer_index(self, env, getting):
+ if is_pythran_expr(self.base.type):
+ index_with_type_list = [(idx, idx.type) for idx in self.indices]
+ self.type = PythranExpr(pythran_indexing_type(self.base.type, index_with_type_list))
+ else:
+ self.base = self.base.coerce_to_simple(env)
+ self.type = self.base.type.dtype
+ self.buffer_type = self.base.type
+
+ if getting and (self.type.is_pyobject or self.type.is_pythran_expr):
+ self.is_temp = True
+
+ def analyse_assignment(self, rhs):
+ """
+ Called by IndexNode when this node is assigned to,
+ with the rhs of the assignment
+ """
+
+ def wrap_in_nonecheck_node(self, env):
+ if not env.directives['nonecheck'] or not self.base.may_be_none():
+ return
+ self.base = self.base.as_none_safe_node(self.none_error_message)
+
+ def nogil_check(self, env):
+ if self.is_buffer_access or self.is_memview_index:
+ if self.type.is_pyobject:
+ error(self.pos, "Cannot access buffer with object dtype without gil")
+ self.type = error_type
+
+ def calculate_result_code(self):
+ return "(*%s)" % self.buffer_ptr_code
+
+ def buffer_entry(self):
+ base = self.base
+ if self.base.is_nonecheck:
+ base = base.arg
+ return base.type.get_entry(base)
+
+ def get_index_in_temp(self, code, ivar):
+ ret = code.funcstate.allocate_temp(
+ PyrexTypes.widest_numeric_type(
+ ivar.type,
+ PyrexTypes.c_ssize_t_type if ivar.type.signed else PyrexTypes.c_size_t_type),
+ manage_ref=False)
+ code.putln("%s = %s;" % (ret, ivar.result()))
+ return ret
+
+ def buffer_lookup_code(self, code):
+ """
+ ndarray[1, 2, 3] and memslice[1, 2, 3]
+ """
+ if self.in_nogil_context:
+ if self.is_buffer_access or self.is_memview_index:
+ if code.globalstate.directives['boundscheck']:
+ warning(self.pos, "Use boundscheck(False) for faster access", level=1)
+
+ # Assign indices to temps of at least (s)size_t to allow further index calculations.
+ self.index_temps = index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices]
+
+ # Generate buffer access code using these temps
+ from . import Buffer
+ buffer_entry = self.buffer_entry()
+ if buffer_entry.type.is_buffer:
+ negative_indices = buffer_entry.type.negative_indices
+ else:
+ negative_indices = Buffer.buffer_defaults['negative_indices']
+
+ return buffer_entry, Buffer.put_buffer_lookup_code(
+ entry=buffer_entry,
+ index_signeds=[ivar.type.signed for ivar in self.indices],
+ index_cnames=index_temps,
+ directives=code.globalstate.directives,
+ pos=self.pos, code=code,
+ negative_indices=negative_indices,
+ in_nogil_context=self.in_nogil_context)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
+ self.generate_subexpr_evaluation_code(code)
+ self.generate_buffer_setitem_code(rhs, code)
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+
+ def generate_buffer_setitem_code(self, rhs, code, op=""):
+ base_type = self.base.type
+ if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type):
+ obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False)
+ # We have got to do this because we have to declare pythran objects
+ # at the beginning of the functions.
+ # Indeed, Cython uses "goto" statement for error management, and
+ # RAII doesn't work with that kind of construction.
+ # Moreover, the way Pythran expressions are made is that they don't
+ # support move-assignation easily.
+ # This, we explicitly destroy then in-place new objects in this
+ # case.
+ code.putln("__Pyx_call_destructor(%s);" % obj)
+ code.putln("new (&%s) decltype(%s){%s};" % (obj, obj, self.base.pythran_result()))
+ code.putln("%s%s %s= %s;" % (
+ obj,
+ pythran_indexing_code(self.indices),
+ op,
+ rhs.pythran_result()))
+ code.funcstate.release_temp(obj)
+ return
+
+ # Used from generate_assignment_code and InPlaceAssignmentNode
+ buffer_entry, ptrexpr = self.buffer_lookup_code(code)
+
+ if self.buffer_type.dtype.is_pyobject:
+ # Must manage refcounts. XDecref what is already there
+ # and incref what we put in (NumPy allows there to be NULL)
+ ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
+ manage_ref=False)
+ rhs_code = rhs.result()
+ code.putln("%s = %s;" % (ptr, ptrexpr))
+ code.put_xgotref("*%s" % ptr, self.buffer_type.dtype)
+ code.putln("__Pyx_INCREF(%s); __Pyx_XDECREF(*%s);" % (
+ rhs_code, ptr))
+ code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
+ code.put_xgiveref("*%s" % ptr, self.buffer_type.dtype)
+ code.funcstate.release_temp(ptr)
+ else:
+ # Simple case
+ code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
+
+ def generate_result_code(self, code):
+ if is_pythran_expr(self.base.type):
+ res = self.result()
+ code.putln("__Pyx_call_destructor(%s);" % res)
+ code.putln("new (&%s) decltype(%s){%s%s};" % (
+ res,
+ res,
+ self.base.pythran_result(),
+ pythran_indexing_code(self.indices)))
+ return
+ buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
+ if self.type.is_pyobject:
+ # is_temp is True, so must pull out value and incref it.
+ # NOTE: object temporary results for nodes are declared
+ # as PyObject *, so we need a cast
+ res = self.result()
+ code.putln("%s = (PyObject *) *%s;" % (res, self.buffer_ptr_code))
+ # NumPy does (occasionally) allow NULL to denote None.
+ code.putln("if (unlikely(%s == NULL)) %s = Py_None;" % (res, res))
+ code.putln("__Pyx_INCREF((PyObject*)%s);" % res)
+
+ def free_subexpr_temps(self, code):
+ for temp in self.index_temps:
+ code.funcstate.release_temp(temp)
+ self.index_temps = ()
+ super(BufferIndexNode, self).free_subexpr_temps(code)
+
+
+class MemoryViewIndexNode(BufferIndexNode):
+
+ is_memview_index = True
+ is_buffer_access = False
+
+ def analyse_types(self, env, getting=True):
+ # memoryviewslice indexing or slicing
+ from . import MemoryView
+
+ self.is_pythran_mode = has_np_pythran(env)
+ indices = self.indices
+ have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim)
+
+ if not getting:
+ self.writable_needed = True
+ if self.base.is_name or self.base.is_attribute:
+ self.base.entry.type.writable_needed = True
+
+ self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim)
+ axes = []
+
+ index_type = PyrexTypes.c_py_ssize_t_type
+ new_indices = []
+
+ if len(indices) - len(newaxes) > self.base.type.ndim:
+ self.type = error_type
+ error(indices[self.base.type.ndim].pos,
+ "Too many indices specified for type %s" % self.base.type)
+ return self
+
+ axis_idx = 0
+ for i, index in enumerate(indices[:]):
+ index = index.analyse_types(env)
+ if index.is_none:
+ self.is_memview_slice = True
+ new_indices.append(index)
+ axes.append(('direct', 'strided'))
+ continue
+
+ access, packing = self.base.type.axes[axis_idx]
+ axis_idx += 1
+
+ if index.is_slice:
+ self.is_memview_slice = True
+ if index.step.is_none:
+ axes.append((access, packing))
+ else:
+ axes.append((access, 'strided'))
+
+ # Coerce start, stop and step to temps of the right type
+ for attr in ('start', 'stop', 'step'):
+ value = getattr(index, attr)
+ if not value.is_none:
+ value = value.coerce_to(index_type, env)
+ #value = value.coerce_to_temp(env)
+ setattr(index, attr, value)
+ new_indices.append(value)
+
+ elif index.type.is_int or index.type.is_pyobject:
+ if index.type.is_pyobject:
+ performance_hint(index.pos, "Index should be typed for more efficient access", env)
+
+ self.is_memview_index = True
+ index = index.coerce_to(index_type, env)
+ indices[i] = index
+ new_indices.append(index)
+
+ else:
+ self.type = error_type
+ error(index.pos, "Invalid index for memoryview specified, type %s" % index.type)
+ return self
+
+ ### FIXME: replace by MemoryViewSliceNode if is_memview_slice ?
+ self.is_memview_index = self.is_memview_index and not self.is_memview_slice
+ self.indices = new_indices
+ # All indices with all start/stop/step for slices.
+ # We need to keep this around.
+ self.original_indices = indices
+ self.nogil = env.nogil
+
+ self.analyse_operation(env, getting, axes)
+ self.wrap_in_nonecheck_node(env)
+ return self
+
+ def analyse_operation(self, env, getting, axes):
+ self.none_error_message = "Cannot index None memoryview slice"
+ self.analyse_buffer_index(env, getting)
+
+ def analyse_broadcast_operation(self, rhs):
+ """
+ Support broadcasting for slice assignment.
+ E.g.
+ m_2d[...] = m_1d # or,
+ m_1d[...] = m_2d # if the leading dimension has extent 1
+ """
+ if self.type.is_memoryviewslice:
+ lhs = self
+ if lhs.is_memview_broadcast or rhs.is_memview_broadcast:
+ lhs.is_memview_broadcast = True
+ rhs.is_memview_broadcast = True
+
+ def analyse_as_memview_scalar_assignment(self, rhs):
+ lhs = self.analyse_assignment(rhs)
+ if lhs:
+ rhs.is_memview_copy_assignment = lhs.is_memview_copy_assignment
+ return lhs
+ return self
+
+
+class MemoryViewSliceNode(MemoryViewIndexNode):
+
+ is_memview_slice = True
+
+ # No-op slicing operation, this node will be replaced
+ is_ellipsis_noop = False
+ is_memview_scalar_assignment = False
+ is_memview_index = False
+ is_memview_broadcast = False
+
+ def analyse_ellipsis_noop(self, env, getting):
+ """Slicing operations needing no evaluation, i.e. m[...] or m[:, :]"""
+ ### FIXME: replace directly
+ self.is_ellipsis_noop = all(
+ index.is_slice and index.start.is_none and index.stop.is_none and index.step.is_none
+ for index in self.indices)
+
+ if self.is_ellipsis_noop:
+ self.type = self.base.type
+
+ def analyse_operation(self, env, getting, axes):
+ from . import MemoryView
+
+ if not getting:
+ self.is_memview_broadcast = True
+ self.none_error_message = "Cannot assign to None memoryview slice"
+ else:
+ self.none_error_message = "Cannot slice None memoryview slice"
+
+ self.analyse_ellipsis_noop(env, getting)
+ if self.is_ellipsis_noop:
+ return
+
+ self.index = None
+ self.is_temp = True
+ self.use_managed_ref = True
+
+ if not MemoryView.validate_axes(self.pos, axes):
+ self.type = error_type
+ return
+
+ self.type = PyrexTypes.MemoryViewSliceType(self.base.type.dtype, axes)
+
+ if not (self.base.is_simple() or self.base.result_in_temp()):
+ self.base = self.base.coerce_to_temp(env)
+
+ def analyse_assignment(self, rhs):
+ if not rhs.type.is_memoryviewslice and (
+ self.type.dtype.assignable_from(rhs.type) or
+ rhs.type.is_pyobject):
+ # scalar assignment
+ return MemoryCopyScalar(self.pos, self)
+ else:
+ return MemoryCopySlice(self.pos, self)
+
+ def merged_indices(self, indices):
+ """Return a new list of indices/slices with 'indices' merged into the current ones
+ according to slicing rules.
+ Is used to implement "view[i][j]" => "view[i, j]".
+ Return None if the indices cannot (easily) be merged at compile time.
+ """
+ if not indices:
+ return None
+ # NOTE: Need to evaluate "self.original_indices" here as they might differ from "self.indices".
+ new_indices = self.original_indices[:]
+ indices = indices[:]
+ for i, s in enumerate(self.original_indices):
+ if s.is_slice:
+ if s.start.is_none and s.stop.is_none and s.step.is_none:
+ # Full slice found, replace by index.
+ new_indices[i] = indices[0]
+ indices.pop(0)
+ if not indices:
+ return new_indices
+ else:
+ # Found something non-trivial, e.g. a partial slice.
+ return None
+ elif not s.type.is_int:
+ # Not a slice, not an integer index => could be anything...
+ return None
+ if indices:
+ if len(new_indices) + len(indices) > self.base.type.ndim:
+ return None
+ new_indices += indices
+ return new_indices
+
+ def is_simple(self):
+ if self.is_ellipsis_noop:
+ # TODO: fix SimpleCallNode.is_simple()
+ return self.base.is_simple() or self.base.result_in_temp()
+
+ return self.result_in_temp()
+
+ def calculate_result_code(self):
+ """This is called in case this is a no-op slicing node"""
+ return self.base.result()
+
+ def generate_result_code(self, code):
+ if self.is_ellipsis_noop:
+ return ### FIXME: remove
+ buffer_entry = self.buffer_entry()
+ have_gil = not self.in_nogil_context
+
+ # TODO Mark: this is insane, do it better
+ have_slices = False
+ it = iter(self.indices)
+ for index in self.original_indices:
+ if index.is_slice:
+ have_slices = True
+ if not index.start.is_none:
+ index.start = next(it)
+ if not index.stop.is_none:
+ index.stop = next(it)
+ if not index.step.is_none:
+ index.step = next(it)
+ else:
+ next(it)
+
+ assert not list(it)
+
+ buffer_entry.generate_buffer_slice_code(
+ code, self.original_indices, self.result(), self.type,
+ have_gil=have_gil, have_slices=have_slices,
+ directives=code.globalstate.directives)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
+ if self.is_ellipsis_noop:
+ self.generate_subexpr_evaluation_code(code)
+ else:
+ self.generate_evaluation_code(code)
+
+ if self.is_memview_scalar_assignment:
+ self.generate_memoryviewslice_assign_scalar_code(rhs, code)
+ else:
+ self.generate_memoryviewslice_setslice_code(rhs, code)
+
+ if self.is_ellipsis_noop:
+ self.generate_subexpr_disposal_code(code)
+ else:
+ self.generate_disposal_code(code)
+
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+
+
+class MemoryCopyNode(ExprNode):
+ """
+ Wraps a memoryview slice for slice assignment.
+
+ dst: destination mememoryview slice
+ """
+
+ subexprs = ['dst']
+
+ def __init__(self, pos, dst):
+ super(MemoryCopyNode, self).__init__(pos)
+ self.dst = dst
+ self.type = dst.type
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
+ self.dst.generate_evaluation_code(code)
+ self._generate_assignment_code(rhs, code)
+ self.dst.generate_disposal_code(code)
+ self.dst.free_temps(code)
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+
+
+class MemoryCopySlice(MemoryCopyNode):
+ """
+ Copy the contents of slice src to slice dst. Does not support indirect
+ slices.
+
+ memslice1[...] = memslice2
+ memslice1[:] = memslice2
+ """
+
+ is_memview_copy_assignment = True
+ copy_slice_cname = "__pyx_memoryview_copy_contents"
+
+ def _generate_assignment_code(self, src, code):
+ dst = self.dst
+
+ src.type.assert_direct_dims(src.pos)
+ dst.type.assert_direct_dims(dst.pos)
+
+ code.putln(code.error_goto_if_neg(
+ "%s(%s, %s, %d, %d, %d)" % (self.copy_slice_cname,
+ src.result(), dst.result(),
+ src.type.ndim, dst.type.ndim,
+ dst.type.dtype.is_pyobject),
+ dst.pos))
+
+
+class MemoryCopyScalar(MemoryCopyNode):
+ """
+ Assign a scalar to a slice. dst must be simple, scalar will be assigned
+ to a correct type and not just something assignable.
+
+ memslice1[...] = 0.0
+ memslice1[:] = 0.0
+ """
+
+ def __init__(self, pos, dst):
+ super(MemoryCopyScalar, self).__init__(pos, dst)
+ self.type = dst.type.dtype
+
+ def _generate_assignment_code(self, scalar, code):
+ from . import MemoryView
+
+ self.dst.type.assert_direct_dims(self.dst.pos)
+
+ dtype = self.dst.type.dtype
+ type_decl = dtype.declaration_code("")
+ slice_decl = self.dst.type.declaration_code("")
+
+ code.begin_block()
+ code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result()))
+ if self.dst.result_in_temp() or self.dst.is_simple():
+ dst_temp = self.dst.result()
+ else:
+ code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, self.dst.result()))
+ dst_temp = "__pyx_temp_slice"
+
+ force_strided = False
+ indices = self.dst.original_indices
+ for idx in indices:
+ if isinstance(idx, SliceNode) and not (idx.start.is_none and
+ idx.stop.is_none and
+ idx.step.is_none):
+ force_strided = True
+
+ slice_iter_obj = MemoryView.slice_iter(self.dst.type, dst_temp,
+ self.dst.type.ndim, code,
+ force_strided=force_strided)
+ p = slice_iter_obj.start_loops()
+
+ if dtype.is_pyobject:
+ code.putln("Py_DECREF(*(PyObject **) %s);" % p)
+
+ code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p))
+
+ if dtype.is_pyobject:
+ code.putln("Py_INCREF(__pyx_temp_scalar);")
+
+ slice_iter_obj.end_loops()
+ code.end_block()
+
+
+class SliceIndexNode(ExprNode):
+ # 2-element slice indexing
+ #
+ # base ExprNode
+ # start ExprNode or None
+ # stop ExprNode or None
+ # slice ExprNode or None constant slice object
+ # nogil bool used internally
+
+ subexprs = ['base', 'start', 'stop', 'slice']
+ nogil = False
+
+ slice = None
+
+ def infer_type(self, env):
+ base_type = self.base.infer_type(env)
+ if base_type.is_string or base_type.is_cpp_class:
+ return bytes_type
+ elif base_type.is_pyunicode_ptr:
+ return unicode_type
+ elif base_type in (bytes_type, bytearray_type, str_type, unicode_type,
+ basestring_type, list_type, tuple_type):
+ return base_type
+ elif base_type.is_ptr or base_type.is_array:
+ return PyrexTypes.c_array_type(base_type.base_type, None)
+ return py_object_type
+
+ def inferable_item_node(self, index=0):
+ # slicing shouldn't change the result type of the base, but the index might
+ if index is not not_a_constant and self.start:
+ if self.start.has_constant_result():
+ index += self.start.constant_result
+ else:
+ index = not_a_constant
+ return self.base.inferable_item_node(index)
+
+ def may_be_none(self):
+ base_type = self.base.type
+ if base_type:
+ if base_type.is_string:
+ return False
+ if base_type in (bytes_type, str_type, unicode_type,
+ basestring_type, list_type, tuple_type):
+ return False
+ return ExprNode.may_be_none(self)
+
+ def calculate_constant_result(self):
+ if self.start is None:
+ start = None
+ else:
+ start = self.start.constant_result
+ if self.stop is None:
+ stop = None
+ else:
+ stop = self.stop.constant_result
+ self.constant_result = self.base.constant_result[start:stop]
+
+ def compile_time_value(self, denv):
+ base = self.base.compile_time_value(denv)
+ if self.start is None:
+ start = 0
+ else:
+ start = self.start.compile_time_value(denv)
+ if self.stop is None:
+ stop = None
+ else:
+ stop = self.stop.compile_time_value(denv)
+ try:
+ return base[start:stop]
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def analyse_target_declaration(self, env):
+ pass
+
+ def analyse_target_types(self, env):
+ node = self.analyse_types(env, getting=False)
+ # when assigning, we must accept any Python type
+ if node.type.is_pyobject:
+ node.type = py_object_type
+ return node
+
+ def analyse_types(self, env, getting=True):
+ self.base = self.base.analyse_types(env)
+
+ if self.base.type.is_buffer or self.base.type.is_pythran_expr or self.base.type.is_memoryviewslice:
+ none_node = NoneNode(self.pos)
+ index = SliceNode(self.pos,
+ start=self.start or none_node,
+ stop=self.stop or none_node,
+ step=none_node)
+ index_node = IndexNode(self.pos, index=index, base=self.base)
+ return index_node.analyse_base_and_index_types(
+ env, getting=getting, setting=not getting,
+ analyse_base=False)
+
+ if self.start:
+ self.start = self.start.analyse_types(env)
+ if self.stop:
+ self.stop = self.stop.analyse_types(env)
+
+ if not env.directives['wraparound']:
+ check_negative_indices(self.start, self.stop)
+
+ base_type = self.base.type
+ if base_type.is_array and not getting:
+ # cannot assign directly to C array => try to assign by making a copy
+ if not self.start and not self.stop:
+ self.type = base_type
+ else:
+ self.type = PyrexTypes.CPtrType(base_type.base_type)
+ elif base_type.is_string or base_type.is_cpp_string:
+ self.type = default_str_type(env)
+ elif base_type.is_pyunicode_ptr:
+ self.type = unicode_type
+ elif base_type.is_ptr:
+ self.type = base_type
+ elif base_type.is_array:
+ # we need a ptr type here instead of an array type, as
+ # array types can result in invalid type casts in the C
+ # code
+ self.type = PyrexTypes.CPtrType(base_type.base_type)
+ else:
+ self.base = self.base.coerce_to_pyobject(env)
+ self.type = py_object_type
+ if base_type.is_builtin_type:
+ # slicing builtin types returns something of the same type
+ self.type = base_type
+ self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
+
+ if self.type is py_object_type:
+ if (not self.start or self.start.is_literal) and \
+ (not self.stop or self.stop.is_literal):
+ # cache the constant slice object, in case we need it
+ none_node = NoneNode(self.pos)
+ self.slice = SliceNode(
+ self.pos,
+ start=copy.deepcopy(self.start or none_node),
+ stop=copy.deepcopy(self.stop or none_node),
+ step=none_node
+ ).analyse_types(env)
+ else:
+ c_int = PyrexTypes.c_py_ssize_t_type
+
+ def allow_none(node, default_value, env):
+ # Coerce to Py_ssize_t, but allow None as meaning the default slice bound.
+ from .UtilNodes import EvalWithTempExprNode, ResultRefNode
+
+ node_ref = ResultRefNode(node)
+ new_expr = CondExprNode(
+ node.pos,
+ true_val=IntNode(
+ node.pos,
+ type=c_int,
+ value=default_value,
+ constant_result=int(default_value) if default_value.isdigit() else not_a_constant,
+ ),
+ false_val=node_ref.coerce_to(c_int, env),
+ test=PrimaryCmpNode(
+ node.pos,
+ operand1=node_ref,
+ operator='is',
+ operand2=NoneNode(node.pos),
+ ).analyse_types(env)
+ ).analyse_result_type(env)
+ return EvalWithTempExprNode(node_ref, new_expr)
+
+ if self.start:
+ if self.start.type.is_pyobject:
+ self.start = allow_none(self.start, '0', env)
+ self.start = self.start.coerce_to(c_int, env)
+ if self.stop:
+ if self.stop.type.is_pyobject:
+ self.stop = allow_none(self.stop, 'PY_SSIZE_T_MAX', env)
+ self.stop = self.stop.coerce_to(c_int, env)
+ self.is_temp = 1
+ return self
+
+ def analyse_as_type(self, env):
+ base_type = self.base.analyse_as_type(env)
+ if base_type:
+ if not self.start and not self.stop:
+ # memory view
+ from . import MemoryView
+ env.use_utility_code(MemoryView.view_utility_code)
+ none_node = NoneNode(self.pos)
+ slice_node = SliceNode(
+ self.pos,
+ start=none_node,
+ stop=none_node,
+ step=none_node,
+ )
+ return PyrexTypes.MemoryViewSliceType(
+ base_type, MemoryView.get_axes_specs(env, [slice_node]))
+ return None
+
+ def nogil_check(self, env):
+ self.nogil = env.nogil
+ return super(SliceIndexNode, self).nogil_check(env)
+
+ gil_message = "Slicing Python object"
+
+ get_slice_utility_code = TempitaUtilityCode.load(
+ "SliceObject", "ObjectHandling.c", context={'access': 'Get'})
+
+ set_slice_utility_code = TempitaUtilityCode.load(
+ "SliceObject", "ObjectHandling.c", context={'access': 'Set'})
+
+ def coerce_to(self, dst_type, env):
+ if ((self.base.type.is_string or self.base.type.is_cpp_string)
+ and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)):
+ if (dst_type not in (bytes_type, bytearray_type)
+ and not env.directives['c_string_encoding']):
+ error(self.pos,
+ "default encoding required for conversion from '%s' to '%s'" %
+ (self.base.type, dst_type))
+ self.type = dst_type
+ if dst_type.is_array and self.base.type.is_array:
+ if not self.start and not self.stop:
+ # redundant slice building, copy C arrays directly
+ return self.base.coerce_to(dst_type, env)
+ # else: check array size if possible
+ return super(SliceIndexNode, self).coerce_to(dst_type, env)
+
+ def generate_result_code(self, code):
+ if not self.type.is_pyobject:
+ error(self.pos,
+ "Slicing is not currently supported for '%s'." % self.type)
+ return
+
+ base_result = self.base.result()
+ result = self.result()
+ start_code = self.start_code()
+ stop_code = self.stop_code()
+ if self.base.type.is_string:
+ base_result = self.base.result()
+ if self.base.type not in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
+ base_result = '((const char*)%s)' % base_result
+ if self.type is bytearray_type:
+ type_name = 'ByteArray'
+ else:
+ type_name = self.type.name.title()
+ if self.stop is None:
+ code.putln(
+ "%s = __Pyx_Py%s_FromString(%s + %s); %s" % (
+ result,
+ type_name,
+ base_result,
+ start_code,
+ code.error_goto_if_null(result, self.pos)))
+ else:
+ code.putln(
+ "%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % (
+ result,
+ type_name,
+ base_result,
+ start_code,
+ stop_code,
+ start_code,
+ code.error_goto_if_null(result, self.pos)))
+ elif self.base.type.is_pyunicode_ptr:
+ base_result = self.base.result()
+ if self.base.type != PyrexTypes.c_py_unicode_ptr_type:
+ base_result = '((const Py_UNICODE*)%s)' % base_result
+ if self.stop is None:
+ code.putln(
+ "%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % (
+ result,
+ base_result,
+ start_code,
+ code.error_goto_if_null(result, self.pos)))
+ else:
+ code.putln(
+ "%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % (
+ result,
+ base_result,
+ start_code,
+ stop_code,
+ start_code,
+ code.error_goto_if_null(result, self.pos)))
+
+ elif self.base.type is unicode_type:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c"))
+ code.putln(
+ "%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % (
+ result,
+ base_result,
+ start_code,
+ stop_code,
+ code.error_goto_if_null(result, self.pos)))
+ elif self.type is py_object_type:
+ code.globalstate.use_utility_code(self.get_slice_utility_code)
+ (has_c_start, has_c_stop, c_start, c_stop,
+ py_start, py_stop, py_slice) = self.get_slice_config()
+ code.putln(
+ "%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % (
+ result,
+ self.base.py_result(),
+ c_start, c_stop,
+ py_start, py_stop, py_slice,
+ has_c_start, has_c_stop,
+ bool(code.globalstate.directives['wraparound']),
+ code.error_goto_if_null(result, self.pos)))
+ else:
+ if self.base.type is list_type:
+ code.globalstate.use_utility_code(
+ TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
+ cfunc = '__Pyx_PyList_GetSlice'
+ elif self.base.type is tuple_type:
+ code.globalstate.use_utility_code(
+ TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
+ cfunc = '__Pyx_PyTuple_GetSlice'
+ else:
+ cfunc = 'PySequence_GetSlice'
+ code.putln(
+ "%s = %s(%s, %s, %s); %s" % (
+ result,
+ cfunc,
+ self.base.py_result(),
+ start_code,
+ stop_code,
+ code.error_goto_if_null(result, self.pos)))
+ self.generate_gotref(code)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
+ exception_check=None, exception_value=None):
+ self.generate_subexpr_evaluation_code(code)
+ if self.type.is_pyobject:
+ code.globalstate.use_utility_code(self.set_slice_utility_code)
+ has_c_start, has_c_stop, c_start, c_stop, py_start, py_stop, py_slice = self.get_slice_config()
+ code.put_error_if_neg(self.pos,
+ "__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
+ self.base.py_result(),
+ rhs.py_result(),
+ c_start, c_stop,
+ py_start, py_stop, py_slice,
+ has_c_start, has_c_stop,
+ bool(code.globalstate.directives['wraparound'])))
+ else:
+ start_offset = self.start_code() if self.start else '0'
+ if rhs.type.is_array:
+ array_length = rhs.type.size
+ self.generate_slice_guard_code(code, array_length)
+ else:
+ array_length = '%s - %s' % (self.stop_code(), start_offset)
+
+ code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
+ code.putln("memcpy(&(%s[%s]), %s, sizeof(%s[0]) * (%s));" % (
+ self.base.result(), start_offset,
+ rhs.result(),
+ self.base.result(), array_length
+ ))
+
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+
+ def generate_deletion_code(self, code, ignore_nonexisting=False):
+ if not self.base.type.is_pyobject:
+ error(self.pos,
+ "Deleting slices is only supported for Python types, not '%s'." % self.type)
+ return
+ self.generate_subexpr_evaluation_code(code)
+ code.globalstate.use_utility_code(self.set_slice_utility_code)
+ (has_c_start, has_c_stop, c_start, c_stop,
+ py_start, py_stop, py_slice) = self.get_slice_config()
+ code.put_error_if_neg(self.pos,
+ "__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
+ self.base.py_result(),
+ c_start, c_stop,
+ py_start, py_stop, py_slice,
+ has_c_start, has_c_stop,
+ bool(code.globalstate.directives['wraparound'])))
+ self.generate_subexpr_disposal_code(code)
+ self.free_subexpr_temps(code)
+
+ def get_slice_config(self):
+ has_c_start, c_start, py_start = False, '0', 'NULL'
+ if self.start:
+ has_c_start = not self.start.type.is_pyobject
+ if has_c_start:
+ c_start = self.start.result()
+ else:
+ py_start = '&%s' % self.start.py_result()
+ has_c_stop, c_stop, py_stop = False, '0', 'NULL'
+ if self.stop:
+ has_c_stop = not self.stop.type.is_pyobject
+ if has_c_stop:
+ c_stop = self.stop.result()
+ else:
+ py_stop = '&%s' % self.stop.py_result()
+ py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL'
+ return (has_c_start, has_c_stop, c_start, c_stop,
+ py_start, py_stop, py_slice)
+
+ def generate_slice_guard_code(self, code, target_size):
+ if not self.base.type.is_array:
+ return
+ slice_size = self.base.type.size
+ try:
+ total_length = slice_size = int(slice_size)
+ except ValueError:
+ total_length = None
+
+ start = stop = None
+ if self.stop:
+ stop = self.stop.result()
+ try:
+ stop = int(stop)
+ if stop < 0:
+ if total_length is None:
+ slice_size = '%s + %d' % (slice_size, stop)
+ else:
+ slice_size += stop
+ else:
+ slice_size = stop
+ stop = None
+ except ValueError:
+ pass
+
+ if self.start:
+ start = self.start.result()
+ try:
+ start = int(start)
+ if start < 0:
+ if total_length is None:
+ start = '%s + %d' % (self.base.type.size, start)
+ else:
+ start += total_length
+ if isinstance(slice_size, _py_int_types):
+ slice_size -= start
+ else:
+ slice_size = '%s - (%s)' % (slice_size, start)
+ start = None
+ except ValueError:
+ pass
+
+ runtime_check = None
+ compile_time_check = False
+ try:
+ int_target_size = int(target_size)
+ except ValueError:
+ int_target_size = None
+ else:
+ compile_time_check = isinstance(slice_size, _py_int_types)
+
+ if compile_time_check and slice_size < 0:
+ if int_target_size > 0:
+ error(self.pos, "Assignment to empty slice.")
+ elif compile_time_check and start is None and stop is None:
+ # we know the exact slice length
+ if int_target_size != slice_size:
+ error(self.pos, "Assignment to slice of wrong length, expected %s, got %s" % (
+ slice_size, target_size))
+ elif start is not None:
+ if stop is None:
+ stop = slice_size
+ runtime_check = "(%s)-(%s)" % (stop, start)
+ elif stop is not None:
+ runtime_check = stop
+ else:
+ runtime_check = slice_size
+
+ if runtime_check:
+ code.putln("if (unlikely((%s) != (%s))) {" % (runtime_check, target_size))
+ if self.nogil:
+ code.put_ensure_gil()
+ code.putln(
+ 'PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length,'
+ ' expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d",'
+ ' (Py_ssize_t)(%s), (Py_ssize_t)(%s));' % (
+ target_size, runtime_check))
+ if self.nogil:
+ code.put_release_ensured_gil()
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+
+ def start_code(self):
+ if self.start:
+ return self.start.result()
+ else:
+ return "0"
+
+ def stop_code(self):
+ if self.stop:
+ return self.stop.result()
+ elif self.base.type.is_array:
+ return self.base.type.size
+ else:
+ return "PY_SSIZE_T_MAX"
+
+ def calculate_result_code(self):
+ # self.result() is not used, but this method must exist
+ return ""
+
+
+class SliceNode(ExprNode):
+ # start:stop:step in subscript list
+ #
+ # start ExprNode
+ # stop ExprNode
+ # step ExprNode
+
+ subexprs = ['start', 'stop', 'step']
+ is_slice = True
+ type = slice_type
+ is_temp = 1
+
+ def calculate_constant_result(self):
+ self.constant_result = slice(
+ self.start.constant_result,
+ self.stop.constant_result,
+ self.step.constant_result)
+
+ def compile_time_value(self, denv):
+ start = self.start.compile_time_value(denv)
+ stop = self.stop.compile_time_value(denv)
+ step = self.step.compile_time_value(denv)
+ try:
+ return slice(start, stop, step)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def may_be_none(self):
+ return False
+
+ def analyse_types(self, env):
+ start = self.start.analyse_types(env)
+ stop = self.stop.analyse_types(env)
+ step = self.step.analyse_types(env)
+ self.start = start.coerce_to_pyobject(env)
+ self.stop = stop.coerce_to_pyobject(env)
+ self.step = step.coerce_to_pyobject(env)
+ if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
+ self.is_literal = True
+ self.is_temp = False
+ return self
+
+ gil_message = "Constructing Python slice object"
+
+ def calculate_result_code(self):
+ return self.result_code
+
+ def generate_result_code(self, code):
+ if self.is_literal:
+ dedup_key = make_dedup_key(self.type, (self,))
+ self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2, dedup_key=dedup_key)
+ code = code.get_cached_constants_writer(self.result_code)
+ if code is None:
+ return # already initialised
+ code.mark_pos(self.pos)
+
+ code.putln(
+ "%s = PySlice_New(%s, %s, %s); %s" % (
+ self.result(),
+ self.start.py_result(),
+ self.stop.py_result(),
+ self.step.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+ if self.is_literal:
+ self.generate_giveref(code)
+
+class SliceIntNode(SliceNode):
+ # start:stop:step in subscript list
+ # This is just a node to hold start,stop and step nodes that can be
+ # converted to integers. This does not generate a slice python object.
+ #
+ # start ExprNode
+ # stop ExprNode
+ # step ExprNode
+
+ is_temp = 0
+
+ def calculate_constant_result(self):
+ self.constant_result = slice(
+ self.start.constant_result,
+ self.stop.constant_result,
+ self.step.constant_result)
+
+ def compile_time_value(self, denv):
+ start = self.start.compile_time_value(denv)
+ stop = self.stop.compile_time_value(denv)
+ step = self.step.compile_time_value(denv)
+ try:
+ return slice(start, stop, step)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def may_be_none(self):
+ return False
+
+ def analyse_types(self, env):
+ self.start = self.start.analyse_types(env)
+ self.stop = self.stop.analyse_types(env)
+ self.step = self.step.analyse_types(env)
+
+ if not self.start.is_none:
+ self.start = self.start.coerce_to_integer(env)
+ if not self.stop.is_none:
+ self.stop = self.stop.coerce_to_integer(env)
+ if not self.step.is_none:
+ self.step = self.step.coerce_to_integer(env)
+
+ if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
+ self.is_literal = True
+ self.is_temp = False
+ return self
+
+ def calculate_result_code(self):
+ pass
+
+ def generate_result_code(self, code):
+ for a in self.start,self.stop,self.step:
+ if isinstance(a, CloneNode):
+ a.arg.result()
+
+
+class CallNode(ExprNode):
+
+ # allow overriding the default 'may_be_none' behaviour
+ may_return_none = None
+
+ def infer_type(self, env):
+ # TODO(robertwb): Reduce redundancy with analyse_types.
+ function = self.function
+ func_type = function.infer_type(env)
+ if isinstance(function, NewExprNode):
+ # note: needs call to infer_type() above
+ return PyrexTypes.CPtrType(function.class_type)
+ if func_type is py_object_type:
+ # function might have lied for safety => try to find better type
+ entry = getattr(function, 'entry', None)
+ if entry is not None:
+ func_type = entry.type or func_type
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+ if func_type.is_cfunction:
+ if getattr(self.function, 'entry', None) and hasattr(self, 'args'):
+ alternatives = self.function.entry.all_alternatives()
+ arg_types = [arg.infer_type(env) for arg in self.args]
+ func_entry = PyrexTypes.best_match(arg_types, alternatives)
+ if func_entry:
+ func_type = func_entry.type
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+ return func_type.return_type
+ return func_type.return_type
+ elif func_type is type_type:
+ if function.is_name and function.entry and function.entry.type:
+ result_type = function.entry.type
+ if result_type.is_extension_type:
+ return result_type
+ elif result_type.is_builtin_type:
+ if function.entry.name == 'float':
+ return PyrexTypes.c_double_type
+ elif function.entry.name in Builtin.types_that_construct_their_instance:
+ return result_type
+ func_type = self.function.analyse_as_type(env)
+ if func_type and (func_type.is_struct_or_union or func_type.is_cpp_class):
+ return func_type
+ return py_object_type
+
+ def type_dependencies(self, env):
+ # TODO: Update when Danilo's C++ code merged in to handle the
+ # the case of function overloading.
+ return self.function.type_dependencies(env)
+
+ def is_simple(self):
+ # C function calls could be considered simple, but they may
+ # have side-effects that may hit when multiple operations must
+ # be effected in order, e.g. when constructing the argument
+ # sequence for a function call or comparing values.
+ return False
+
+ def may_be_none(self):
+ if self.may_return_none is not None:
+ return self.may_return_none
+ func_type = self.function.type
+ if func_type is type_type and self.function.is_name:
+ entry = self.function.entry
+ if entry.type.is_extension_type:
+ return False
+ if (entry.type.is_builtin_type and
+ entry.name in Builtin.types_that_construct_their_instance):
+ return False
+ return ExprNode.may_be_none(self)
+
+ def set_py_result_type(self, function, func_type=None):
+ if func_type is None:
+ func_type = function.type
+ if func_type is Builtin.type_type and (
+ function.is_name and
+ function.entry and
+ function.entry.is_builtin and
+ function.entry.name in Builtin.types_that_construct_their_instance):
+ # calling a builtin type that returns a specific object type
+ if function.entry.name == 'float':
+ # the following will come true later on in a transform
+ self.type = PyrexTypes.c_double_type
+ self.result_ctype = PyrexTypes.c_double_type
+ else:
+ self.type = Builtin.builtin_types[function.entry.name]
+ self.result_ctype = py_object_type
+ self.may_return_none = False
+ elif function.is_name and function.type_entry:
+ # We are calling an extension type constructor. As long as we do not
+ # support __new__(), the result type is clear
+ self.type = function.type_entry.type
+ self.result_ctype = py_object_type
+ self.may_return_none = False
+ else:
+ self.type = py_object_type
+
+ def analyse_as_type_constructor(self, env):
+ type = self.function.analyse_as_type(env)
+ if type and type.is_struct_or_union:
+ args, kwds = self.explicit_args_kwds()
+ items = []
+ for arg, member in zip(args, type.scope.var_entries):
+ items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
+ if kwds:
+ items += kwds.key_value_pairs
+ self.key_value_pairs = items
+ self.__class__ = DictNode
+ self.analyse_types(env) # FIXME
+ self.coerce_to(type, env)
+ return True
+ elif type and type.is_cpp_class:
+ self.args = [ arg.analyse_types(env) for arg in self.args ]
+ constructor = type.scope.lookup("")
+ if not constructor:
+ error(self.function.pos, "no constructor found for C++ type '%s'" % self.function.name)
+ self.type = error_type
+ return self
+ self.function = RawCNameExprNode(self.function.pos, constructor.type)
+ self.function.entry = constructor
+ self.function.set_cname(type.empty_declaration_code())
+ self.analyse_c_function_call(env)
+ self.type = type
+ return True
+
+ def is_lvalue(self):
+ return self.type.is_reference
+
+ def nogil_check(self, env):
+ func_type = self.function_type()
+ if func_type.is_pyobject:
+ self.gil_error()
+ elif not func_type.is_error and not getattr(func_type, 'nogil', False):
+ self.gil_error()
+
+ gil_message = "Calling gil-requiring function"
+
+
+class SimpleCallNode(CallNode):
+ # Function call without keyword, * or ** args.
+ #
+ # function ExprNode
+ # args [ExprNode]
+ # arg_tuple ExprNode or None used internally
+ # self ExprNode or None used internally
+ # coerced_self ExprNode or None used internally
+ # wrapper_call bool used internally
+ # has_optional_args bool used internally
+ # nogil bool used internally
+
+ subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
+
+ self = None
+ coerced_self = None
+ arg_tuple = None
+ wrapper_call = False
+ has_optional_args = False
+ nogil = False
+ analysed = False
+ overflowcheck = False
+
+ def compile_time_value(self, denv):
+ function = self.function.compile_time_value(denv)
+ args = [arg.compile_time_value(denv) for arg in self.args]
+ try:
+ return function(*args)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ @classmethod
+ def for_cproperty(cls, pos, obj, entry):
+ # Create a call node for C property access.
+ property_scope = entry.scope
+ getter_entry = property_scope.lookup_here(entry.name)
+ assert getter_entry, "Getter not found in scope %s: %s" % (property_scope, property_scope.entries)
+ function = NameNode(pos, name=entry.name, entry=getter_entry, type=getter_entry.type)
+ node = cls(pos, function=function, args=[obj])
+ return node
+
+ def analyse_as_type(self, env):
+ attr = self.function.as_cython_attribute()
+ if attr == 'pointer':
+ if len(self.args) != 1:
+ error(self.args.pos, "only one type allowed.")
+ else:
+ type = self.args[0].analyse_as_type(env)
+ if not type:
+ error(self.args[0].pos, "Unknown type")
+ else:
+ return PyrexTypes.CPtrType(type)
+ elif attr == 'typeof':
+ if len(self.args) != 1:
+ error(self.args.pos, "only one type allowed.")
+ operand = self.args[0].analyse_types(env)
+ return operand.type
+
+ def explicit_args_kwds(self):
+ return self.args, None
+
+ def analyse_types(self, env):
+ if self.analysed:
+ return self
+ self.analysed = True
+ if self.analyse_as_type_constructor(env):
+ return self
+ self.function.is_called = 1
+ self.function = self.function.analyse_types(env)
+ function = self.function
+
+ if function.is_attribute and function.entry and function.entry.is_cmethod:
+ # Take ownership of the object from which the attribute
+ # was obtained, because we need to pass it as 'self'.
+ self.self = function.obj
+ function.obj = CloneNode(self.self)
+
+ func_type = self.function_type()
+ self.is_numpy_call_with_exprs = False
+ if (has_np_pythran(env) and function.is_numpy_attribute and
+ pythran_is_numpy_func_supported(function)):
+ has_pythran_args = True
+ self.arg_tuple = TupleNode(self.pos, args = self.args)
+ self.arg_tuple = self.arg_tuple.analyse_types(env)
+ for arg in self.arg_tuple.args:
+ has_pythran_args &= is_pythran_supported_node_or_none(arg)
+ self.is_numpy_call_with_exprs = bool(has_pythran_args)
+ if self.is_numpy_call_with_exprs:
+ env.add_include_file(pythran_get_func_include_file(function))
+ return NumPyMethodCallNode.from_node(
+ self,
+ function_cname=pythran_functor(function),
+ arg_tuple=self.arg_tuple,
+ type=PythranExpr(pythran_func_type(function, self.arg_tuple.args)),
+ )
+ elif func_type.is_pyobject:
+ self.arg_tuple = TupleNode(self.pos, args = self.args)
+ self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env)
+ self.args = None
+ self.set_py_result_type(function, func_type)
+ self.is_temp = 1
+ else:
+ self.args = [ arg.analyse_types(env) for arg in self.args ]
+ self.analyse_c_function_call(env)
+ if func_type.exception_check == '+':
+ self.is_temp = True
+
+ return self
+
+ def function_type(self):
+ # Return the type of the function being called, coercing a function
+ # pointer to a function if necessary. If the function has fused
+ # arguments, return the specific type.
+ func_type = self.function.type
+
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+
+ return func_type
+
+ def analyse_c_function_call(self, env):
+ func_type = self.function.type
+ if func_type is error_type:
+ self.type = error_type
+ return
+
+ if func_type.is_cfunction and func_type.is_static_method:
+ if self.self and self.self.type.is_extension_type:
+ # To support this we'd need to pass self to determine whether
+ # it was overloaded in Python space (possibly via a Cython
+ # superclass turning a cdef method into a cpdef one).
+ error(self.pos, "Cannot call a static method on an instance variable.")
+ args = self.args
+ elif self.self:
+ args = [self.self] + self.args
+ else:
+ args = self.args
+
+ if func_type.is_cpp_class:
+ overloaded_entry = self.function.type.scope.lookup("operator()")
+ if overloaded_entry is None:
+ self.type = PyrexTypes.error_type
+ self.result_code = ""
+ return
+ elif hasattr(self.function, 'entry'):
+ overloaded_entry = self.function.entry
+ elif self.function.is_subscript and self.function.is_fused_index:
+ overloaded_entry = self.function.type.entry
+ else:
+ overloaded_entry = None
+
+ if overloaded_entry:
+ if self.function.type.is_fused:
+ functypes = self.function.type.get_all_specialized_function_types()
+ alternatives = [f.entry for f in functypes]
+ else:
+ alternatives = overloaded_entry.all_alternatives()
+
+ entry = PyrexTypes.best_match([arg.type for arg in args],
+ alternatives, self.pos, env, args)
+
+ if not entry:
+ self.type = PyrexTypes.error_type
+ self.result_code = ""
+ return
+
+ entry.used = True
+ if not func_type.is_cpp_class:
+ self.function.entry = entry
+ self.function.type = entry.type
+ func_type = self.function_type()
+ else:
+ entry = None
+ func_type = self.function_type()
+ if not func_type.is_cfunction:
+ error(self.pos, "Calling non-function type '%s'" % func_type)
+ self.type = PyrexTypes.error_type
+ self.result_code = ""
+ return
+
+ # Check no. of args
+ max_nargs = len(func_type.args)
+ expected_nargs = max_nargs - func_type.optional_arg_count
+ actual_nargs = len(args)
+ if func_type.optional_arg_count and expected_nargs != actual_nargs:
+ self.has_optional_args = 1
+ self.is_temp = 1
+
+ # check 'self' argument
+ if entry and entry.is_cmethod and func_type.args and not func_type.is_static_method:
+ formal_arg = func_type.args[0]
+ arg = args[0]
+ if formal_arg.not_none:
+ if self.self:
+ self.self = self.self.as_none_safe_node(
+ "'NoneType' object has no attribute '%{0}s'".format('.30' if len(entry.name) <= 30 else ''),
+ error='PyExc_AttributeError',
+ format_args=[entry.name])
+ else:
+ # unbound method
+ arg = arg.as_none_safe_node(
+ "descriptor '%s' requires a '%s' object but received a 'NoneType'",
+ format_args=[entry.name, formal_arg.type.name])
+ if self.self:
+ if formal_arg.accept_builtin_subtypes:
+ arg = CMethodSelfCloneNode(self.self)
+ else:
+ arg = CloneNode(self.self)
+ arg = self.coerced_self = arg.coerce_to(formal_arg.type, env)
+ elif formal_arg.type.is_builtin_type:
+ # special case: unbound methods of builtins accept subtypes
+ arg = arg.coerce_to(formal_arg.type, env)
+ if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode):
+ arg.exact_builtin_type = False
+ args[0] = arg
+
+ # Coerce arguments
+ some_args_in_temps = False
+ for i in range(min(max_nargs, actual_nargs)):
+ formal_arg = func_type.args[i]
+ formal_type = formal_arg.type
+ arg = args[i].coerce_to(formal_type, env)
+ if formal_arg.not_none:
+ # C methods must do the None checks at *call* time
+ arg = arg.as_none_safe_node(
+ "cannot pass None into a C function argument that is declared 'not None'")
+ if arg.is_temp:
+ if i > 0:
+ # first argument in temp doesn't impact subsequent arguments
+ some_args_in_temps = True
+ elif arg.type.is_pyobject and not env.nogil:
+ if i == 0 and self.self is not None:
+ # a method's cloned "self" argument is ok
+ pass
+ elif arg.nonlocally_immutable():
+ # plain local variables are ok
+ pass
+ else:
+ # we do not safely own the argument's reference,
+ # but we must make sure it cannot be collected
+ # before we return from the function, so we create
+ # an owned temp reference to it
+ if i > 0: # first argument doesn't matter
+ some_args_in_temps = True
+ arg = arg.coerce_to_temp(env)
+ args[i] = arg
+
+ # handle additional varargs parameters
+ for i in range(max_nargs, actual_nargs):
+ arg = args[i]
+ if arg.type.is_pyobject:
+ if arg.type is str_type:
+ arg_ctype = PyrexTypes.c_char_ptr_type
+ else:
+ arg_ctype = arg.type.default_coerced_ctype()
+ if arg_ctype is None:
+ error(self.args[i-1].pos,
+ "Python object cannot be passed as a varargs parameter")
+ else:
+ args[i] = arg = arg.coerce_to(arg_ctype, env)
+ if arg.is_temp and i > 0:
+ some_args_in_temps = True
+
+ if some_args_in_temps:
+ # if some args are temps and others are not, they may get
+ # constructed in the wrong order (temps first) => make
+ # sure they are either all temps or all not temps (except
+ # for the last argument, which is evaluated last in any
+ # case)
+ for i in range(actual_nargs-1):
+ if i == 0 and self.self is not None:
+ continue # self is ok
+ arg = args[i]
+ if arg.nonlocally_immutable():
+ # locals, C functions, unassignable types are safe.
+ pass
+ elif arg.type.is_cpp_class:
+ # Assignment has side effects, avoid.
+ pass
+ elif env.nogil and arg.type.is_pyobject:
+ # can't copy a Python reference into a temp in nogil
+ # env (this is safe: a construction would fail in
+ # nogil anyway)
+ pass
+ else:
+ #self.args[i] = arg.coerce_to_temp(env)
+ # instead: issue a warning
+ if i > 0 or i == 1 and self.self is not None: # skip first arg
+ warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
+ break
+
+ self.args[:] = args
+
+ # Calc result type and code fragment
+ if isinstance(self.function, NewExprNode):
+ self.type = PyrexTypes.CPtrType(self.function.class_type)
+ else:
+ self.type = func_type.return_type
+
+ if self.function.is_name or self.function.is_attribute:
+ func_entry = self.function.entry
+ if func_entry and (func_entry.utility_code or func_entry.utility_code_definition):
+ self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
+
+ if self.type.is_pyobject:
+ self.result_ctype = py_object_type
+ self.is_temp = 1
+ elif func_type.exception_value is not None or func_type.exception_check:
+ self.is_temp = 1
+ elif self.type.is_memoryviewslice:
+ self.is_temp = 1
+ # func_type.exception_check = True
+
+ if self.is_temp and self.type.is_reference:
+ self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
+
+ # C++ exception handler
+ if func_type.exception_check == '+':
+ if needs_cpp_exception_conversion(func_type):
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+
+ self.overflowcheck = env.directives['overflowcheck']
+
+ def calculate_result_code(self):
+ return self.c_call_code()
+
+ def c_call_code(self):
+ func_type = self.function_type()
+ if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
+ return ""
+ formal_args = func_type.args
+ arg_list_code = []
+ args = list(zip(formal_args, self.args))
+ max_nargs = len(func_type.args)
+ expected_nargs = max_nargs - func_type.optional_arg_count
+ actual_nargs = len(self.args)
+ for formal_arg, actual_arg in args[:expected_nargs]:
+ arg_code = actual_arg.move_result_rhs_as(formal_arg.type)
+ arg_list_code.append(arg_code)
+
+ if func_type.is_overridable:
+ arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
+
+ if func_type.optional_arg_count:
+ if expected_nargs == actual_nargs:
+ optional_args = 'NULL'
+ else:
+ optional_args = "&%s" % self.opt_arg_struct
+ arg_list_code.append(optional_args)
+
+ for actual_arg in self.args[len(formal_args):]:
+ arg_list_code.append(actual_arg.move_result_rhs())
+
+ result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
+ return result
+
+ def is_c_result_required(self):
+ func_type = self.function_type()
+ if not func_type.exception_value or func_type.exception_check == '+':
+ return False # skip allocation of unused result temp
+ return True
+
+ def generate_evaluation_code(self, code):
+ function = self.function
+ if function.is_name or function.is_attribute:
+ code.globalstate.use_entry_utility_code(function.entry)
+
+ abs_function_cnames = ('abs', 'labs', '__Pyx_abs_longlong')
+ is_signed_int = self.type.is_int and self.type.signed
+ if self.overflowcheck and is_signed_int and function.result() in abs_function_cnames:
+ code.globalstate.use_utility_code(UtilityCode.load_cached("Common", "Overflow.c"))
+ code.putln('if (unlikely(%s == __PYX_MIN(%s))) {\
+ PyErr_SetString(PyExc_OverflowError,\
+ "Trying to take the absolute value of the most negative integer is not defined."); %s; }' % (
+ self.args[0].result(),
+ self.args[0].type.empty_declaration_code(),
+ code.error_goto(self.pos)))
+
+ if not function.type.is_pyobject or len(self.arg_tuple.args) > 1 or (
+ self.arg_tuple.args and self.arg_tuple.is_literal):
+ super(SimpleCallNode, self).generate_evaluation_code(code)
+ return
+
+ # Special case 0-args and try to avoid explicit tuple creation for Python calls with 1 arg.
+ arg = self.arg_tuple.args[0] if self.arg_tuple.args else None
+ subexprs = (self.self, self.coerced_self, function, arg)
+ for subexpr in subexprs:
+ if subexpr is not None:
+ subexpr.generate_evaluation_code(code)
+
+ code.mark_pos(self.pos)
+ assert self.is_temp
+ self.allocate_temp_result(code)
+
+ if arg is None:
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCallNoArg", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
+ self.result(),
+ function.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ else:
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCallOneArg", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
+ self.result(),
+ function.py_result(),
+ arg.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ self.generate_gotref(code)
+
+ for subexpr in subexprs:
+ if subexpr is not None:
+ subexpr.generate_disposal_code(code)
+ subexpr.free_temps(code)
+
+ def generate_result_code(self, code):
+ func_type = self.function_type()
+ if func_type.is_pyobject:
+ arg_code = self.arg_tuple.py_result()
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCall", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
+ self.result(),
+ self.function.py_result(),
+ arg_code,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+ elif func_type.is_cfunction:
+ nogil = not code.funcstate.gil_owned
+ if self.has_optional_args:
+ actual_nargs = len(self.args)
+ expected_nargs = len(func_type.args) - func_type.optional_arg_count
+ self.opt_arg_struct = code.funcstate.allocate_temp(
+ func_type.op_arg_struct.base_type, manage_ref=True)
+ code.putln("%s.%s = %s;" % (
+ self.opt_arg_struct,
+ Naming.pyrex_prefix + "n",
+ len(self.args) - expected_nargs))
+ args = list(zip(func_type.args, self.args))
+ for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
+ code.putln("%s.%s = %s;" % (
+ self.opt_arg_struct,
+ func_type.opt_arg_cname(formal_arg.name),
+ actual_arg.result_as(formal_arg.type)))
+ exc_checks = []
+ if self.type.is_pyobject and self.is_temp:
+ exc_checks.append("!%s" % self.result())
+ elif self.type.is_memoryviewslice:
+ assert self.is_temp
+ exc_checks.append(self.type.error_condition(self.result()))
+ elif func_type.exception_check != '+':
+ exc_val = func_type.exception_value
+ exc_check = func_type.exception_check
+ if exc_val is not None:
+ exc_checks.append("%s == %s" % (self.result(), func_type.return_type.cast_code(exc_val)))
+ if exc_check:
+ if nogil:
+ if not exc_checks:
+ perf_hint_entry = getattr(self.function, "entry", None)
+ PyrexTypes.write_noexcept_performance_hint(
+ self.pos, code.funcstate.scope,
+ function_name=perf_hint_entry.name if perf_hint_entry else None,
+ void_return=self.type.is_void, is_call=True)
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ErrOccurredWithGIL", "Exceptions.c"))
+ exc_checks.append("__Pyx_ErrOccurredWithGIL()")
+ else:
+ exc_checks.append("PyErr_Occurred()")
+ if self.is_temp or exc_checks:
+ rhs = self.c_call_code()
+ if self.result():
+ lhs = "%s = " % self.result()
+ if self.is_temp and self.type.is_pyobject:
+ #return_type = self.type # func_type.return_type
+ #print "SimpleCallNode.generate_result_code: casting", rhs, \
+ # "from", return_type, "to pyobject" ###
+ rhs = typecast(py_object_type, self.type, rhs)
+ else:
+ lhs = ""
+ if func_type.exception_check == '+':
+ translate_cpp_exception(code, self.pos, '%s%s;' % (lhs, rhs),
+ self.result() if self.type.is_pyobject else None,
+ func_type.exception_value, nogil)
+ else:
+ if exc_checks:
+ goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
+ else:
+ goto_error = ""
+ code.putln("%s%s; %s" % (lhs, rhs, goto_error))
+ if self.type.is_pyobject and self.result():
+ self.generate_gotref(code)
+ if self.has_optional_args:
+ code.funcstate.release_temp(self.opt_arg_struct)
+
+
+class NumPyMethodCallNode(ExprNode):
+ # Pythran call to a NumPy function or method.
+ #
+ # function_cname string the function/method to call
+ # arg_tuple TupleNode the arguments as an args tuple
+
+ subexprs = ['arg_tuple']
+ is_temp = True
+ may_return_none = True
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ assert self.arg_tuple.mult_factor is None
+ args = self.arg_tuple.args
+ for arg in args:
+ arg.generate_evaluation_code(code)
+
+ code.putln("// function evaluation code for numpy function")
+ code.putln("__Pyx_call_destructor(%s);" % self.result())
+ code.putln("new (&%s) decltype(%s){%s{}(%s)};" % (
+ self.result(),
+ self.result(),
+ self.function_cname,
+ ", ".join(a.pythran_result() for a in args)))
+
+
+class PyMethodCallNode(SimpleCallNode):
+ # Specialised call to a (potential) PyMethodObject with non-constant argument tuple.
+ # Allows the self argument to be injected directly instead of repacking a tuple for it.
+ #
+ # function ExprNode the function/method object to call
+ # arg_tuple TupleNode the arguments for the args tuple
+
+ subexprs = ['function', 'arg_tuple']
+ is_temp = True
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ self.function.generate_evaluation_code(code)
+ assert self.arg_tuple.mult_factor is None
+ args = self.arg_tuple.args
+ for arg in args:
+ arg.generate_evaluation_code(code)
+
+ # make sure function is in temp so that we can replace the reference below if it's a method
+ reuse_function_temp = self.function.is_temp
+ if reuse_function_temp:
+ function = self.function.result()
+ else:
+ function = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ self.function.make_owned_reference(code)
+ code.put("%s = %s; " % (function, self.function.py_result()))
+ self.function.generate_disposal_code(code)
+ self.function.free_temps(code)
+
+ self_arg = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ code.putln("%s = NULL;" % self_arg)
+ arg_offset_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = 0;" % arg_offset_cname)
+
+ def attribute_is_likely_method(attr):
+ obj = attr.obj
+ if obj.is_name and obj.entry.is_pyglobal:
+ return False # more likely to be a function
+ return True
+
+ if self.function.is_attribute:
+ likely_method = 'likely' if attribute_is_likely_method(self.function) else 'unlikely'
+ elif self.function.is_name and self.function.cf_state:
+ # not an attribute itself, but might have been assigned from one (e.g. bound method)
+ for assignment in self.function.cf_state:
+ value = assignment.rhs
+ if value and value.is_attribute and value.obj.type and value.obj.type.is_pyobject:
+ if attribute_is_likely_method(value):
+ likely_method = 'likely'
+ break
+ else:
+ likely_method = 'unlikely'
+ else:
+ likely_method = 'unlikely'
+
+ code.putln("#if CYTHON_UNPACK_METHODS")
+ code.putln("if (%s(PyMethod_Check(%s))) {" % (likely_method, function))
+ code.putln("%s = PyMethod_GET_SELF(%s);" % (self_arg, function))
+ # the following is always true in Py3 (kept only for safety),
+ # but is false for unbound methods in Py2
+ code.putln("if (likely(%s)) {" % self_arg)
+ code.putln("PyObject* function = PyMethod_GET_FUNCTION(%s);" % function)
+ code.put_incref(self_arg, py_object_type)
+ code.put_incref("function", py_object_type)
+ # free method object as early to possible to enable reuse from CPython's freelist
+ code.put_decref_set(function, py_object_type, "function")
+ code.putln("%s = 1;" % arg_offset_cname)
+ code.putln("}")
+ code.putln("}")
+ code.putln("#endif") # CYTHON_UNPACK_METHODS
+ # TODO may need to deal with unused variables in the #else case
+
+ # actually call the function
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectFastCall", "ObjectHandling.c"))
+
+ code.putln("{")
+ # To avoid passing an out-of-bounds argument pointer in the no-args case,
+ # we need at least two entries, so we pad with NULL and point to that.
+ # See https://github.com/cython/cython/issues/5668
+ code.putln("PyObject *__pyx_callargs[%d] = {%s, %s};" % (
+ (len(args) + 1) if args else 2,
+ self_arg,
+ ', '.join(arg.py_result() for arg in args) if args else "NULL",
+ ))
+ code.putln("%s = __Pyx_PyObject_FastCall(%s, __pyx_callargs+1-%s, %d+%s);" % (
+ self.result(),
+ function,
+ arg_offset_cname,
+ len(args),
+ arg_offset_cname))
+
+ code.put_xdecref_clear(self_arg, py_object_type)
+ code.funcstate.release_temp(self_arg)
+ code.funcstate.release_temp(arg_offset_cname)
+ for arg in args:
+ arg.generate_disposal_code(code)
+ arg.free_temps(code)
+ code.putln(code.error_goto_if_null(self.result(), self.pos))
+ self.generate_gotref(code)
+
+ if reuse_function_temp:
+ self.function.generate_disposal_code(code)
+ self.function.free_temps(code)
+ else:
+ code.put_decref_clear(function, py_object_type)
+ code.funcstate.release_temp(function)
+ code.putln("}")
+
+
+class InlinedDefNodeCallNode(CallNode):
+ # Inline call to defnode
+ #
+ # function PyCFunctionNode
+ # function_name NameNode
+ # args [ExprNode]
+
+ subexprs = ['args', 'function_name']
+ is_temp = 1
+ type = py_object_type
+ function = None
+ function_name = None
+
+ def can_be_inlined(self):
+ func_type= self.function.def_node
+ if func_type.star_arg or func_type.starstar_arg:
+ return False
+ if len(func_type.args) != len(self.args):
+ return False
+ if func_type.num_kwonly_args:
+ return False # actually wrong number of arguments
+ return True
+
+ def analyse_types(self, env):
+ self.function_name = self.function_name.analyse_types(env)
+
+ self.args = [ arg.analyse_types(env) for arg in self.args ]
+ func_type = self.function.def_node
+ actual_nargs = len(self.args)
+
+ # Coerce arguments
+ some_args_in_temps = False
+ for i in range(actual_nargs):
+ formal_type = func_type.args[i].type
+ arg = self.args[i].coerce_to(formal_type, env)
+ if arg.is_temp:
+ if i > 0:
+ # first argument in temp doesn't impact subsequent arguments
+ some_args_in_temps = True
+ elif arg.type.is_pyobject and not env.nogil:
+ if arg.nonlocally_immutable():
+ # plain local variables are ok
+ pass
+ else:
+ # we do not safely own the argument's reference,
+ # but we must make sure it cannot be collected
+ # before we return from the function, so we create
+ # an owned temp reference to it
+ if i > 0: # first argument doesn't matter
+ some_args_in_temps = True
+ arg = arg.coerce_to_temp(env)
+ self.args[i] = arg
+
+ if some_args_in_temps:
+ # if some args are temps and others are not, they may get
+ # constructed in the wrong order (temps first) => make
+ # sure they are either all temps or all not temps (except
+ # for the last argument, which is evaluated last in any
+ # case)
+ for i in range(actual_nargs-1):
+ arg = self.args[i]
+ if arg.nonlocally_immutable():
+ # locals, C functions, unassignable types are safe.
+ pass
+ elif arg.type.is_cpp_class:
+ # Assignment has side effects, avoid.
+ pass
+ elif env.nogil and arg.type.is_pyobject:
+ # can't copy a Python reference into a temp in nogil
+ # env (this is safe: a construction would fail in
+ # nogil anyway)
+ pass
+ else:
+ #self.args[i] = arg.coerce_to_temp(env)
+ # instead: issue a warning
+ if i > 0:
+ warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
+ break
+ return self
+
+ def generate_result_code(self, code):
+ arg_code = [self.function_name.py_result()]
+ func_type = self.function.def_node
+ for arg, proto_arg in zip(self.args, func_type.args):
+ if arg.type.is_pyobject:
+ arg_code.append(arg.result_as(proto_arg.type))
+ else:
+ arg_code.append(arg.result())
+ arg_code = ', '.join(arg_code)
+ code.putln(
+ "%s = %s(%s); %s" % (
+ self.result(),
+ self.function.def_node.entry.pyfunc_cname,
+ arg_code,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class PythonCapiFunctionNode(ExprNode):
+ subexprs = []
+
+ def __init__(self, pos, py_name, cname, func_type, utility_code = None):
+ ExprNode.__init__(self, pos, name=py_name, cname=cname,
+ type=func_type, utility_code=utility_code)
+
+ def analyse_types(self, env):
+ return self
+
+ def generate_result_code(self, code):
+ if self.utility_code:
+ code.globalstate.use_utility_code(self.utility_code)
+
+ def calculate_result_code(self):
+ return self.cname
+
+
+class PythonCapiCallNode(SimpleCallNode):
+ # Python C-API Function call (only created in transforms)
+
+ # By default, we assume that the call never returns None, as this
+ # is true for most C-API functions in CPython. If this does not
+ # apply to a call, set the following to True (or None to inherit
+ # the default behaviour).
+ may_return_none = False
+
+ def __init__(self, pos, function_name, func_type,
+ utility_code = None, py_name=None, **kwargs):
+ self.type = func_type.return_type
+ self.result_ctype = self.type
+ self.function = PythonCapiFunctionNode(
+ pos, py_name, function_name, func_type,
+ utility_code = utility_code)
+ # call this last so that we can override the constructed
+ # attributes above with explicit keyword arguments if required
+ SimpleCallNode.__init__(self, pos, **kwargs)
+
+
+class CachedBuiltinMethodCallNode(CallNode):
+ # Python call to a method of a known Python builtin (only created in transforms)
+
+ subexprs = ['obj', 'args']
+ is_temp = True
+
+ def __init__(self, call_node, obj, method_name, args):
+ super(CachedBuiltinMethodCallNode, self).__init__(
+ call_node.pos,
+ obj=obj, method_name=method_name, args=args,
+ may_return_none=call_node.may_return_none,
+ type=call_node.type)
+
+ def may_be_none(self):
+ if self.may_return_none is not None:
+ return self.may_return_none
+ return ExprNode.may_be_none(self)
+
+ def generate_result_code(self, code):
+ type_cname = self.obj.type.cname
+ obj_cname = self.obj.py_result()
+ args = [arg.py_result() for arg in self.args]
+ call_code = code.globalstate.cached_unbound_method_call_code(
+ obj_cname, type_cname, self.method_name, args)
+ code.putln("%s = %s; %s" % (
+ self.result(), call_code,
+ code.error_goto_if_null(self.result(), self.pos)
+ ))
+ self.generate_gotref(code)
+
+
+class GeneralCallNode(CallNode):
+ # General Python function call, including keyword,
+ # * and ** arguments.
+ #
+ # function ExprNode
+ # positional_args ExprNode Tuple of positional arguments
+ # keyword_args ExprNode or None Dict of keyword arguments
+
+ type = py_object_type
+
+ subexprs = ['function', 'positional_args', 'keyword_args']
+
+ nogil_check = Node.gil_error
+
+ def compile_time_value(self, denv):
+ function = self.function.compile_time_value(denv)
+ positional_args = self.positional_args.compile_time_value(denv)
+ keyword_args = self.keyword_args.compile_time_value(denv)
+ try:
+ return function(*positional_args, **keyword_args)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def explicit_args_kwds(self):
+ if (self.keyword_args and not self.keyword_args.is_dict_literal or
+ not self.positional_args.is_sequence_constructor):
+ raise CompileError(self.pos,
+ 'Compile-time keyword arguments must be explicit.')
+ return self.positional_args.args, self.keyword_args
+
+ def analyse_types(self, env):
+ if self.analyse_as_type_constructor(env):
+ return self
+ self.function = self.function.analyse_types(env)
+ if not self.function.type.is_pyobject:
+ if self.function.type.is_error:
+ self.type = error_type
+ return self
+ if hasattr(self.function, 'entry'):
+ node = self.map_to_simple_call_node()
+ if node is not None and node is not self:
+ return node.analyse_types(env)
+ elif self.function.entry.as_variable:
+ self.function = self.function.coerce_to_pyobject(env)
+ elif node is self:
+ error(self.pos,
+ "Non-trivial keyword arguments and starred "
+ "arguments not allowed in cdef functions.")
+ else:
+ # error was already reported
+ pass
+ else:
+ self.function = self.function.coerce_to_pyobject(env)
+ if self.keyword_args:
+ self.keyword_args = self.keyword_args.analyse_types(env)
+ self.positional_args = self.positional_args.analyse_types(env)
+ self.positional_args = \
+ self.positional_args.coerce_to_pyobject(env)
+ self.set_py_result_type(self.function)
+ self.is_temp = 1
+ return self
+
+ def map_to_simple_call_node(self):
+ """
+ Tries to map keyword arguments to declared positional arguments.
+ Returns self to try a Python call, None to report an error
+ or a SimpleCallNode if the mapping succeeds.
+ """
+ if not isinstance(self.positional_args, TupleNode):
+ # has starred argument
+ return self
+ if not self.keyword_args.is_dict_literal:
+ # keywords come from arbitrary expression => nothing to do here
+ return self
+ function = self.function
+ entry = getattr(function, 'entry', None)
+ if not entry:
+ return self
+ function_type = entry.type
+ if function_type.is_ptr:
+ function_type = function_type.base_type
+ if not function_type.is_cfunction:
+ return self
+
+ pos_args = self.positional_args.args
+ kwargs = self.keyword_args
+ declared_args = function_type.args
+ if entry.is_cmethod:
+ declared_args = declared_args[1:] # skip 'self'
+
+ if len(pos_args) > len(declared_args):
+ error(self.pos, "function call got too many positional arguments, "
+ "expected %d, got %s" % (len(declared_args),
+ len(pos_args)))
+ return None
+
+ matched_args = {
+ arg.name for arg in declared_args[:len(pos_args)]
+ if arg.name
+ }
+ unmatched_args = declared_args[len(pos_args):]
+ matched_kwargs_count = 0
+ args = list(pos_args)
+
+ # check for duplicate keywords
+ seen = set(matched_args)
+ has_errors = False
+ for arg in kwargs.key_value_pairs:
+ name = arg.key.value
+ if name in seen:
+ error(arg.pos, "argument '%s' passed twice" % name)
+ has_errors = True
+ # continue to report more errors if there are any
+ seen.add(name)
+
+ # match keywords that are passed in order
+ for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
+ name = arg.key.value
+ if decl_arg.name == name:
+ matched_args.add(name)
+ matched_kwargs_count += 1
+ args.append(arg.value)
+ else:
+ break
+
+ # match keyword arguments that are passed out-of-order, but keep
+ # the evaluation of non-simple arguments in order by moving them
+ # into temps
+ from .UtilNodes import EvalWithTempExprNode, LetRefNode
+ temps = []
+ if len(kwargs.key_value_pairs) > matched_kwargs_count:
+ unmatched_args = declared_args[len(args):]
+ keywords = dict([ (arg.key.value, (i+len(pos_args), arg))
+ for i, arg in enumerate(kwargs.key_value_pairs) ])
+ first_missing_keyword = None
+ for decl_arg in unmatched_args:
+ name = decl_arg.name
+ if name not in keywords:
+ # missing keyword argument => either done or error
+ if not first_missing_keyword:
+ first_missing_keyword = name
+ continue
+ elif first_missing_keyword:
+ if entry.as_variable:
+ # we might be able to convert the function to a Python
+ # object, which then allows full calling semantics
+ # with default values in gaps - currently, we only
+ # support optional arguments at the end
+ return self
+ # wasn't the last keyword => gaps are not supported
+ error(self.pos, "C function call is missing "
+ "argument '%s'" % first_missing_keyword)
+ return None
+ pos, arg = keywords[name]
+ matched_args.add(name)
+ matched_kwargs_count += 1
+ if arg.value.is_simple():
+ args.append(arg.value)
+ else:
+ temp = LetRefNode(arg.value)
+ assert temp.is_simple()
+ args.append(temp)
+ temps.append((pos, temp))
+
+ if temps:
+ # may have to move preceding non-simple args into temps
+ final_args = []
+ new_temps = []
+ first_temp_arg = temps[0][-1]
+ for arg_value in args:
+ if arg_value is first_temp_arg:
+ break # done
+ if arg_value.is_simple():
+ final_args.append(arg_value)
+ else:
+ temp = LetRefNode(arg_value)
+ new_temps.append(temp)
+ final_args.append(temp)
+ if new_temps:
+ args = final_args
+ temps = new_temps + [ arg for i,arg in sorted(temps) ]
+
+ # check for unexpected keywords
+ for arg in kwargs.key_value_pairs:
+ name = arg.key.value
+ if name not in matched_args:
+ has_errors = True
+ error(arg.pos,
+ "C function got unexpected keyword argument '%s'" %
+ name)
+
+ if has_errors:
+ # error was reported already
+ return None
+
+ # all keywords mapped to positional arguments
+ # if we are missing arguments, SimpleCallNode will figure it out
+ node = SimpleCallNode(self.pos, function=function, args=args)
+ for temp in temps[::-1]:
+ node = EvalWithTempExprNode(temp, node)
+ return node
+
+ def generate_result_code(self, code):
+ if self.type.is_error: return
+ if self.keyword_args:
+ kwargs = self.keyword_args.py_result()
+ else:
+ kwargs = 'NULL'
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCall", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
+ self.result(),
+ self.function.py_result(),
+ self.positional_args.py_result(),
+ kwargs,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class AsTupleNode(ExprNode):
+ # Convert argument to tuple. Used for normalising
+ # the * argument of a function call.
+ #
+ # arg ExprNode
+
+ subexprs = ['arg']
+ is_temp = 1
+
+ def calculate_constant_result(self):
+ self.constant_result = tuple(self.arg.constant_result)
+
+ def compile_time_value(self, denv):
+ arg = self.arg.compile_time_value(denv)
+ try:
+ return tuple(arg)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def analyse_types(self, env):
+ self.arg = self.arg.analyse_types(env).coerce_to_pyobject(env)
+ if self.arg.type is tuple_type:
+ return self.arg.as_none_safe_node("'NoneType' object is not iterable")
+ self.type = tuple_type
+ return self
+
+ def may_be_none(self):
+ return False
+
+ nogil_check = Node.gil_error
+ gil_message = "Constructing Python tuple"
+
+ def generate_result_code(self, code):
+ cfunc = "__Pyx_PySequence_Tuple" if self.arg.type in (py_object_type, tuple_type) else "PySequence_Tuple"
+ code.putln(
+ "%s = %s(%s); %s" % (
+ self.result(),
+ cfunc, self.arg.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class MergedDictNode(ExprNode):
+ # Helper class for keyword arguments and other merged dicts.
+ #
+ # keyword_args [DictNode or other ExprNode]
+
+ subexprs = ['keyword_args']
+ is_temp = 1
+ type = dict_type
+ reject_duplicates = True
+
+ def calculate_constant_result(self):
+ result = {}
+ reject_duplicates = self.reject_duplicates
+ for item in self.keyword_args:
+ if item.is_dict_literal:
+ # process items in order
+ items = ((key.constant_result, value.constant_result)
+ for key, value in item.key_value_pairs)
+ else:
+ items = item.constant_result.iteritems()
+
+ for key, value in items:
+ if reject_duplicates and key in result:
+ raise ValueError("duplicate keyword argument found: %s" % key)
+ result[key] = value
+
+ self.constant_result = result
+
+ def compile_time_value(self, denv):
+ result = {}
+ reject_duplicates = self.reject_duplicates
+ for item in self.keyword_args:
+ if item.is_dict_literal:
+ # process items in order
+ items = [(key.compile_time_value(denv), value.compile_time_value(denv))
+ for key, value in item.key_value_pairs]
+ else:
+ items = item.compile_time_value(denv).iteritems()
+
+ try:
+ for key, value in items:
+ if reject_duplicates and key in result:
+ raise ValueError("duplicate keyword argument found: %s" % key)
+ result[key] = value
+ except Exception as e:
+ self.compile_time_value_error(e)
+ return result
+
+ def type_dependencies(self, env):
+ return ()
+
+ def infer_type(self, env):
+ return dict_type
+
+ def analyse_types(self, env):
+ self.keyword_args = [
+ arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
+ # FIXME: CPython's error message starts with the runtime function name
+ 'argument after ** must be a mapping, not NoneType')
+ for arg in self.keyword_args
+ ]
+
+ return self
+
+ def may_be_none(self):
+ return False
+
+ gil_message = "Constructing Python dict"
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ args = iter(self.keyword_args)
+ item = next(args)
+ item.generate_evaluation_code(code)
+ if item.type is not dict_type:
+ # CPython supports calling functions with non-dicts, so do we
+ code.putln('if (likely(PyDict_CheckExact(%s))) {' %
+ item.py_result())
+
+ if item.is_dict_literal:
+ item.make_owned_reference(code)
+ code.putln("%s = %s;" % (self.result(), item.py_result()))
+ item.generate_post_assignment_code(code)
+ else:
+ code.putln("%s = PyDict_Copy(%s); %s" % (
+ self.result(),
+ item.py_result(),
+ code.error_goto_if_null(self.result(), item.pos)))
+ self.generate_gotref(code)
+ item.generate_disposal_code(code)
+
+ if item.type is not dict_type:
+ code.putln('} else {')
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCallOneArg", "ObjectHandling.c"))
+ code.putln("%s = __Pyx_PyObject_CallOneArg((PyObject*)&PyDict_Type, %s); %s" % (
+ self.result(),
+ item.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+ item.generate_disposal_code(code)
+ code.putln('}')
+ item.free_temps(code)
+
+ helpers = set()
+ for item in args:
+ if item.is_dict_literal:
+ # inline update instead of creating an intermediate dict
+ for arg in item.key_value_pairs:
+ arg.generate_evaluation_code(code)
+ if self.reject_duplicates:
+ code.putln("if (unlikely(PyDict_Contains(%s, %s))) {" % (
+ self.result(),
+ arg.key.py_result()))
+ helpers.add("RaiseDoubleKeywords")
+ # FIXME: find out function name at runtime!
+ code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
+ arg.key.py_result(),
+ code.error_goto(self.pos)))
+ code.putln("}")
+ code.put_error_if_neg(arg.key.pos, "PyDict_SetItem(%s, %s, %s)" % (
+ self.result(),
+ arg.key.py_result(),
+ arg.value.py_result()))
+ arg.generate_disposal_code(code)
+ arg.free_temps(code)
+ else:
+ item.generate_evaluation_code(code)
+ if self.reject_duplicates:
+ # merge mapping into kwdict one by one as we need to check for duplicates
+ helpers.add("MergeKeywords")
+ code.put_error_if_neg(item.pos, "__Pyx_MergeKeywords(%s, %s)" % (
+ self.result(), item.py_result()))
+ else:
+ # simple case, just add all entries
+ helpers.add("RaiseMappingExpected")
+ code.putln("if (unlikely(PyDict_Update(%s, %s) < 0)) {" % (
+ self.result(), item.py_result()))
+ code.putln("if (PyErr_ExceptionMatches(PyExc_AttributeError)) "
+ "__Pyx_RaiseMappingExpectedError(%s);" % item.py_result())
+ code.putln(code.error_goto(item.pos))
+ code.putln("}")
+ item.generate_disposal_code(code)
+ item.free_temps(code)
+
+ for helper in sorted(helpers):
+ code.globalstate.use_utility_code(UtilityCode.load_cached(helper, "FunctionArguments.c"))
+
+ def annotate(self, code):
+ for item in self.keyword_args:
+ item.annotate(code)
+
+
+class AttributeNode(ExprNode):
+ # obj.attribute
+ #
+ # obj ExprNode
+ # attribute string
+ # needs_none_check boolean Used if obj is an extension type.
+ # If set to True, it is known that the type is not None.
+ #
+ # Used internally:
+ #
+ # is_py_attr boolean Is a Python getattr operation
+ # member string C name of struct member
+ # is_called boolean Function call is being done on result
+ # entry Entry Symbol table entry of attribute
+
+ is_attribute = 1
+ subexprs = ['obj']
+
+ entry = None
+ is_called = 0
+ needs_none_check = True
+ is_memslice_transpose = False
+ is_special_lookup = False
+ is_py_attr = 0
+
+ def as_cython_attribute(self):
+ if (isinstance(self.obj, NameNode) and
+ self.obj.is_cython_module and not
+ self.attribute == u"parallel"):
+ return self.attribute
+
+ cy = self.obj.as_cython_attribute()
+ if cy:
+ return "%s.%s" % (cy, self.attribute)
+ return None
+
+ def coerce_to(self, dst_type, env):
+ # If coercing to a generic pyobject and this is a cpdef function
+ # we can create the corresponding attribute
+ if dst_type is py_object_type:
+ entry = self.entry
+ if entry and entry.is_cfunction and entry.as_variable:
+ # must be a cpdef function
+ self.is_temp = 1
+ self.entry = entry.as_variable
+ self.analyse_as_python_attribute(env)
+ return self
+ elif entry and entry.is_cfunction and self.obj.type is not Builtin.type_type:
+ # "bound" cdef function.
+ # This implementation is likely a little inefficient and could be improved.
+ # Essentially it does:
+ # __import__("functools").partial(coerce_to_object(self), self.obj)
+ from .UtilNodes import EvalWithTempExprNode, ResultRefNode
+ # take self.obj out to a temp because it's used twice
+ obj_node = ResultRefNode(self.obj, type=self.obj.type)
+ obj_node.result_ctype = self.obj.result_ctype
+ self.obj = obj_node
+ unbound_node = ExprNode.coerce_to(self, dst_type, env)
+ utility_code=UtilityCode.load_cached(
+ "PyMethodNew2Arg", "ObjectHandling.c"
+ )
+ func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("func", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("self", PyrexTypes.py_object_type, None)
+ ],
+ )
+ binding_call = PythonCapiCallNode(
+ self.pos,
+ function_name="__Pyx_PyMethod_New2Arg",
+ func_type=func_type,
+ args=[unbound_node, obj_node],
+ utility_code=utility_code,
+ )
+ complete_call = EvalWithTempExprNode(obj_node, binding_call)
+ return complete_call.analyse_types(env)
+ return ExprNode.coerce_to(self, dst_type, env)
+
+ def calculate_constant_result(self):
+ attr = self.attribute
+ if attr.startswith("__") and attr.endswith("__"):
+ return
+ self.constant_result = getattr(self.obj.constant_result, attr)
+
+ def compile_time_value(self, denv):
+ attr = self.attribute
+ if attr.startswith("__") and attr.endswith("__"):
+ error(self.pos,
+ "Invalid attribute name '%s' in compile-time expression" % attr)
+ return None
+ obj = self.obj.compile_time_value(denv)
+ try:
+ return getattr(obj, attr)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def type_dependencies(self, env):
+ return self.obj.type_dependencies(env)
+
+ def infer_type(self, env):
+ # FIXME: this is way too redundant with analyse_types()
+ node = self.analyse_as_cimported_attribute_node(env, target=False)
+ if node is not None:
+ if node.entry.type and node.entry.type.is_cfunction:
+ # special-case - function converted to pointer
+ return PyrexTypes.CPtrType(node.entry.type)
+ else:
+ return node.entry.type
+ node = self.analyse_as_type_attribute(env)
+ if node is not None:
+ return node.entry.type
+ obj_type = self.obj.infer_type(env)
+ self.analyse_attribute(env, obj_type=obj_type)
+ if obj_type.is_builtin_type and self.type.is_cfunction:
+ # special case: C-API replacements for C methods of
+ # builtin types cannot be inferred as C functions as
+ # that would prevent their use as bound methods
+ return py_object_type
+ elif self.entry and self.entry.is_cmethod:
+ # special case: bound methods should not be inferred
+ # as their unbound method types
+ return py_object_type
+ return self.type
+
+ def analyse_target_declaration(self, env):
+ self.is_target = True
+
+ def analyse_target_types(self, env):
+ node = self.analyse_types(env, target = 1)
+ if node.type.is_const:
+ error(self.pos, "Assignment to const attribute '%s'" % self.attribute)
+ if not node.is_lvalue():
+ error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
+ return node
+
+ def analyse_types(self, env, target = 0):
+ if not self.type:
+ self.type = PyrexTypes.error_type # default value if it isn't analysed successfully
+ self.initialized_check = env.directives['initializedcheck']
+ node = self.analyse_as_cimported_attribute_node(env, target)
+ if node is None and not target:
+ node = self.analyse_as_type_attribute(env)
+ if node is None:
+ node = self.analyse_as_ordinary_attribute_node(env, target)
+ assert node is not None
+ if (node.is_attribute or node.is_name) and node.entry:
+ node.entry.used = True
+ if node.is_attribute:
+ node.wrap_obj_in_nonecheck(env)
+ return node
+
+ def analyse_as_cimported_attribute_node(self, env, target):
+ # Try to interpret this as a reference to an imported
+ # C const, type, var or function. If successful, mutates
+ # this node into a NameNode and returns 1, otherwise
+ # returns 0.
+ module_scope = self.obj.analyse_as_module(env)
+ if module_scope:
+ entry = module_scope.lookup_here(self.attribute)
+ if entry and not entry.known_standard_library_import and (
+ entry.is_cglobal or entry.is_cfunction
+ or entry.is_type or entry.is_const):
+ return self.as_name_node(env, entry, target)
+ if self.is_cimported_module_without_shadow(env):
+ # TODO: search for submodule
+ error(self.pos, "cimported module has no attribute '%s'" % self.attribute)
+ return self
+ return None
+
+ def analyse_as_type_attribute(self, env):
+ # Try to interpret this as a reference to an unbound
+ # C method of an extension type or builtin type. If successful,
+ # creates a corresponding NameNode and returns it, otherwise
+ # returns None.
+ if self.obj.is_string_literal:
+ return
+ type = self.obj.analyse_as_type(env)
+ if type:
+ if type.is_extension_type or type.is_builtin_type or type.is_cpp_class:
+ entry = type.scope.lookup_here(self.attribute)
+ if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction):
+ if type.is_builtin_type:
+ if not self.is_called:
+ # must handle this as Python object
+ return None
+ ubcm_entry = entry
+ else:
+ ubcm_entry = self._create_unbound_cmethod_entry(type, entry, env)
+ ubcm_entry.overloaded_alternatives = [
+ self._create_unbound_cmethod_entry(type, overloaded_alternative, env)
+ for overloaded_alternative in entry.overloaded_alternatives
+ ]
+ return self.as_name_node(env, ubcm_entry, target=False)
+ elif type.is_enum or type.is_cpp_enum:
+ if self.attribute in type.values:
+ for entry in type.entry.enum_values:
+ if entry.name == self.attribute:
+ return self.as_name_node(env, entry, target=False)
+ else:
+ error(self.pos, "%s not a known value of %s" % (self.attribute, type))
+ else:
+ error(self.pos, "%s not a known value of %s" % (self.attribute, type))
+ return None
+
+ def _create_unbound_cmethod_entry(self, type, entry, env):
+ # Create a temporary entry describing the unbound C method in `entry`
+ # as an ordinary function.
+ if entry.func_cname and entry.type.op_arg_struct is None:
+ cname = entry.func_cname
+ if entry.type.is_static_method or (
+ env.parent_scope and env.parent_scope.is_cpp_class_scope):
+ ctype = entry.type
+ elif type.is_cpp_class:
+ error(self.pos, "%s not a static member of %s" % (entry.name, type))
+ ctype = PyrexTypes.error_type
+ else:
+ # Fix self type.
+ ctype = copy.copy(entry.type)
+ ctype.args = ctype.args[:]
+ ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None)
+ else:
+ cname = "%s->%s" % (type.vtabptr_cname, entry.cname)
+ ctype = entry.type
+ ubcm_entry = Symtab.Entry(entry.name, cname, ctype)
+ ubcm_entry.is_cfunction = 1
+ ubcm_entry.func_cname = entry.func_cname
+ ubcm_entry.is_unbound_cmethod = 1
+ ubcm_entry.scope = entry.scope
+ return ubcm_entry
+
+ def analyse_as_type(self, env):
+ module_scope = self.obj.analyse_as_module(env)
+ if module_scope:
+ return module_scope.lookup_type(self.attribute)
+ if not self.obj.is_string_literal:
+ base_type = self.obj.analyse_as_type(env)
+ if base_type and getattr(base_type, 'scope', None) is not None:
+ return base_type.scope.lookup_type(self.attribute)
+ return None
+
+ def analyse_as_extension_type(self, env):
+ # Try to interpret this as a reference to an extension type
+ # in a cimported module. Returns the extension type, or None.
+ module_scope = self.obj.analyse_as_module(env)
+ if module_scope:
+ entry = module_scope.lookup_here(self.attribute)
+ if entry and entry.is_type:
+ if entry.type.is_extension_type or entry.type.is_builtin_type:
+ return entry.type
+ return None
+
+ def analyse_as_module(self, env):
+ # Try to interpret this as a reference to a cimported module
+ # in another cimported module. Returns the module scope, or None.
+ module_scope = self.obj.analyse_as_module(env)
+ if module_scope:
+ entry = module_scope.lookup_here(self.attribute)
+ if entry and entry.as_module:
+ return entry.as_module
+ return None
+
+ def as_name_node(self, env, entry, target):
+ # Create a corresponding NameNode from this node and complete the
+ # analyse_types phase.
+ node = NameNode.from_node(self, name=self.attribute, entry=entry)
+ if target:
+ node = node.analyse_target_types(env)
+ else:
+ node = node.analyse_rvalue_entry(env)
+ node.entry.used = 1
+ return node
+
+ def analyse_as_ordinary_attribute_node(self, env, target):
+ self.obj = self.obj.analyse_types(env)
+ self.analyse_attribute(env)
+ if self.entry and self.entry.is_cmethod and not self.is_called:
+# error(self.pos, "C method can only be called")
+ pass
+ ## Reference to C array turns into pointer to first element.
+ #while self.type.is_array:
+ # self.type = self.type.element_ptr_type()
+ if self.is_py_attr:
+ if not target:
+ self.is_temp = 1
+ self.result_ctype = py_object_type
+ elif target and self.obj.type.is_builtin_type:
+ error(self.pos, "Assignment to an immutable object field")
+ elif self.entry and self.entry.is_cproperty:
+ if not target:
+ return SimpleCallNode.for_cproperty(self.pos, self.obj, self.entry).analyse_types(env)
+ # TODO: implement writable C-properties?
+ error(self.pos, "Assignment to a read-only property")
+ #elif self.type.is_memoryviewslice and not target:
+ # self.is_temp = True
+ return self
+
+ def analyse_attribute(self, env, obj_type = None):
+ # Look up attribute and set self.type and self.member.
+ immutable_obj = obj_type is not None # used during type inference
+ self.is_py_attr = 0
+ self.member = self.attribute
+ if obj_type is None:
+ if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr:
+ self.obj = self.obj.coerce_to_pyobject(env)
+ obj_type = self.obj.type
+ else:
+ if obj_type.is_string or obj_type.is_pyunicode_ptr:
+ obj_type = py_object_type
+ if obj_type.is_ptr or obj_type.is_array:
+ obj_type = obj_type.base_type
+ self.op = "->"
+ elif obj_type.is_extension_type or obj_type.is_builtin_type:
+ self.op = "->"
+ elif obj_type.is_reference and obj_type.is_fake_reference:
+ self.op = "->"
+ else:
+ self.op = "."
+ if obj_type.has_attributes:
+ if obj_type.attributes_known():
+ entry = obj_type.scope.lookup_here(self.attribute)
+ if obj_type.is_memoryviewslice and not entry:
+ if self.attribute == 'T':
+ self.is_memslice_transpose = True
+ self.is_temp = True
+ self.use_managed_ref = True
+ self.type = self.obj.type.transpose(self.pos)
+ return
+ else:
+ obj_type.declare_attribute(self.attribute, env, self.pos)
+ entry = obj_type.scope.lookup_here(self.attribute)
+ if entry and entry.is_member:
+ entry = None
+ else:
+ error(self.pos,
+ "Cannot select attribute of incomplete type '%s'"
+ % obj_type)
+ self.type = PyrexTypes.error_type
+ return
+ self.entry = entry
+ if entry:
+ if obj_type.is_extension_type and entry.name == "__weakref__":
+ error(self.pos, "Illegal use of special attribute __weakref__")
+
+ # def methods need the normal attribute lookup
+ # because they do not have struct entries
+ # fused function go through assignment synthesis
+ # (foo = pycfunction(foo_func_obj)) and need to go through
+ # regular Python lookup as well
+ if entry.is_cproperty:
+ self.type = entry.type
+ return
+ elif (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod:
+ self.type = entry.type
+ self.member = entry.cname
+ return
+ else:
+ # If it's not a variable or C method, it must be a Python
+ # method of an extension type, so we treat it like a Python
+ # attribute.
+ pass
+ # If we get here, the base object is not a struct/union/extension
+ # type, or it is an extension type and the attribute is either not
+ # declared or is declared as a Python method. Treat it as a Python
+ # attribute reference.
+ self.analyse_as_python_attribute(env, obj_type, immutable_obj)
+
+ def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False):
+ if obj_type is None:
+ obj_type = self.obj.type
+ # mangle private '__*' Python attributes used inside of a class
+ self.attribute = env.mangle_class_private_name(self.attribute)
+ self.member = self.attribute
+ self.type = py_object_type
+ self.is_py_attr = 1
+
+ if not obj_type.is_pyobject and not obj_type.is_error:
+ # Expose python methods for immutable objects.
+ if (obj_type.is_string or obj_type.is_cpp_string
+ or obj_type.is_buffer or obj_type.is_memoryviewslice
+ or obj_type.is_numeric
+ or (obj_type.is_ctuple and obj_type.can_coerce_to_pyobject(env))
+ or (obj_type.is_struct and obj_type.can_coerce_to_pyobject(env))):
+ if not immutable_obj:
+ self.obj = self.obj.coerce_to_pyobject(env)
+ elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute)
+ and self.obj.entry.as_variable
+ and self.obj.entry.as_variable.type.is_pyobject):
+ # might be an optimised builtin function => unpack it
+ if not immutable_obj:
+ self.obj = self.obj.coerce_to_pyobject(env)
+ else:
+ error(self.pos,
+ "Object of type '%s' has no attribute '%s'" %
+ (obj_type, self.attribute))
+
+ def wrap_obj_in_nonecheck(self, env):
+ if not env.directives['nonecheck']:
+ return
+
+ msg = None
+ format_args = ()
+ if (self.obj.type.is_extension_type and self.needs_none_check and not
+ self.is_py_attr):
+ msg = "'NoneType' object has no attribute '%{0}s'".format('.30' if len(self.attribute) <= 30 else '')
+ format_args = (self.attribute,)
+ elif self.obj.type.is_memoryviewslice:
+ if self.is_memslice_transpose:
+ msg = "Cannot transpose None memoryview slice"
+ else:
+ entry = self.obj.type.scope.lookup_here(self.attribute)
+ if entry:
+ # copy/is_c_contig/shape/strides etc
+ msg = "Cannot access '%s' attribute of None memoryview slice"
+ format_args = (entry.name,)
+
+ if msg:
+ self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError',
+ format_args=format_args)
+
+ def nogil_check(self, env):
+ if self.is_py_attr:
+ self.gil_error()
+
+ gil_message = "Accessing Python attribute"
+
+ def is_cimported_module_without_shadow(self, env):
+ return self.obj.is_cimported_module_without_shadow(env)
+
+ def is_simple(self):
+ if self.obj:
+ return self.result_in_temp() or self.obj.is_simple()
+ else:
+ return NameNode.is_simple(self)
+
+ def is_lvalue(self):
+ if self.obj:
+ return True
+ else:
+ return NameNode.is_lvalue(self)
+
+ def is_ephemeral(self):
+ if self.obj:
+ return self.obj.is_ephemeral()
+ else:
+ return NameNode.is_ephemeral(self)
+
+ def calculate_result_code(self):
+ result = self.calculate_access_code()
+ if self.entry and self.entry.is_cpp_optional and not self.is_target:
+ result = "(*%s)" % result
+ return result
+
+ def calculate_access_code(self):
+ # Does the job of calculate_result_code but doesn't dereference cpp_optionals
+ # Therefore allowing access to the holder variable
+ obj = self.obj
+ obj_code = obj.result_as(obj.type)
+ #print "...obj_code =", obj_code ###
+ if self.entry and self.entry.is_cmethod:
+ if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
+ if self.entry.final_func_cname:
+ return self.entry.final_func_cname
+
+ if self.type.from_fused:
+ # If the attribute was specialized through indexing, make
+ # sure to get the right fused name, as our entry was
+ # replaced by our parent index node
+ # (AnalyseExpressionsTransform)
+ self.member = self.entry.cname
+
+ return "((struct %s *)%s%s%s)->%s" % (
+ obj.type.vtabstruct_cname, obj_code, self.op,
+ obj.type.vtabslot_cname, self.member)
+ elif self.result_is_used:
+ return self.member
+ # Generating no code at all for unused access to optimised builtin
+ # methods fixes the problem that some optimisations only exist as
+ # macros, i.e. there is no function pointer to them, so we would
+ # generate invalid C code here.
+ return
+ elif obj.type.is_complex:
+ return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
+ else:
+ if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
+ # accessing a field of a builtin type, need to cast better than result_as() does
+ obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
+ return "%s%s%s" % (obj_code, self.op, self.member)
+
+ def generate_result_code(self, code):
+ if self.is_py_attr:
+ if self.is_special_lookup:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
+ lookup_func_name = '__Pyx_PyObject_LookupSpecial'
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
+ lookup_func_name = '__Pyx_PyObject_GetAttrStr'
+ code.putln(
+ '%s = %s(%s, %s); %s' % (
+ self.result(),
+ lookup_func_name,
+ self.obj.py_result(),
+ code.intern_identifier(self.attribute),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+ elif self.type.is_memoryviewslice:
+ if self.is_memslice_transpose:
+ # transpose the slice
+ for access, packing in self.type.axes:
+ if access == 'ptr':
+ error(self.pos, "Transposing not supported for slices "
+ "with indirect dimensions")
+ return
+
+ code.putln("%s = %s;" % (self.result(), self.obj.result()))
+ code.put_incref_memoryviewslice(self.result(), self.type,
+ have_gil=True)
+
+ T = "__pyx_memslice_transpose(&%s)" % self.result()
+ code.putln(code.error_goto_if_neg(T, self.pos))
+ elif self.initialized_check:
+ code.putln(
+ 'if (unlikely(!%s.memview)) {'
+ 'PyErr_SetString(PyExc_AttributeError,'
+ '"Memoryview is not initialized");'
+ '%s'
+ '}' % (self.result(), code.error_goto(self.pos)))
+ elif self.entry.is_cpp_optional and self.initialized_check:
+ if self.is_target:
+ undereferenced_result = self.result()
+ else:
+ assert not self.is_temp # calculate_access_code() only makes sense for non-temps
+ undereferenced_result = self.calculate_access_code()
+ unbound_check_code = self.type.cpp_optional_check_for_null_code(undereferenced_result)
+ code.put_error_if_unbound(self.pos, self.entry, unbound_check_code=unbound_check_code)
+ else:
+ # result_code contains what is needed, but we may need to insert
+ # a check and raise an exception
+ if self.obj.type and self.obj.type.is_extension_type:
+ pass
+ elif self.entry and self.entry.is_cmethod:
+ # C method implemented as function call with utility code
+ code.globalstate.use_entry_utility_code(self.entry)
+
+ def generate_disposal_code(self, code):
+ if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose:
+ # mirror condition for putting the memview incref here:
+ code.put_xdecref_clear(self.result(), self.type, have_gil=True)
+ else:
+ ExprNode.generate_disposal_code(self, code)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
+ exception_check=None, exception_value=None):
+ self.obj.generate_evaluation_code(code)
+ if self.is_py_attr:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
+ code.put_error_if_neg(self.pos,
+ '__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % (
+ self.obj.py_result(),
+ code.intern_identifier(self.attribute),
+ rhs.py_result()))
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+ elif self.obj.type.is_complex:
+ code.putln("__Pyx_SET_C%s%s(%s, %s);" % (
+ self.member.upper(),
+ self.obj.type.implementation_suffix,
+ self.obj.result_as(self.obj.type),
+ rhs.result_as(self.ctype())))
+ rhs.generate_disposal_code(code)
+ rhs.free_temps(code)
+ else:
+ select_code = self.result()
+ if self.type.is_pyobject and self.use_managed_ref:
+ rhs.make_owned_reference(code)
+ rhs.generate_giveref(code)
+ code.put_gotref(select_code, self.type)
+ code.put_decref(select_code, self.ctype())
+ elif self.type.is_memoryviewslice:
+ from . import MemoryView
+ MemoryView.put_assign_to_memviewslice(
+ select_code, rhs, rhs.result(), self.type, code)
+
+ if not self.type.is_memoryviewslice:
+ code.putln(
+ "%s = %s;" % (
+ select_code,
+ rhs.move_result_rhs_as(self.ctype())))
+ #rhs.result()))
+ rhs.generate_post_assignment_code(code)
+ rhs.free_temps(code)
+ self.obj.generate_disposal_code(code)
+ self.obj.free_temps(code)
+
+ def generate_deletion_code(self, code, ignore_nonexisting=False):
+ self.obj.generate_evaluation_code(code)
+ if self.is_py_attr or (self.entry.scope.is_property_scope
+ and u'__del__' in self.entry.scope.entries):
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
+ code.put_error_if_neg(self.pos,
+ '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
+ self.obj.py_result(),
+ code.intern_identifier(self.attribute)))
+ else:
+ error(self.pos, "Cannot delete C attribute of extension type")
+ self.obj.generate_disposal_code(code)
+ self.obj.free_temps(code)
+
+ def annotate(self, code):
+ if self.is_py_attr:
+ style, text = 'py_attr', 'python attribute (%s)'
+ else:
+ style, text = 'c_attr', 'c attribute (%s)'
+ code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute)))
+
+ def get_known_standard_library_import(self):
+ module_name = self.obj.get_known_standard_library_import()
+ if module_name:
+ return StringEncoding.EncodedString("%s.%s" % (module_name, self.attribute))
+ return None
+
+
+#-------------------------------------------------------------------
+#
+# Constructor nodes
+#
+#-------------------------------------------------------------------
+
+class StarredUnpackingNode(ExprNode):
+ # A starred expression like "*a"
+ #
+ # This is only allowed in sequence assignment or construction such as
+ #
+ # a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
+ #
+ # and will be special cased during type analysis (or generate an error
+ # if it's found at unexpected places).
+ #
+ # target ExprNode
+
+ subexprs = ['target']
+ is_starred = 1
+ type = py_object_type
+ is_temp = 1
+ starred_expr_allowed_here = False
+
+ def __init__(self, pos, target):
+ ExprNode.__init__(self, pos, target=target)
+
+ def analyse_declarations(self, env):
+ if not self.starred_expr_allowed_here:
+ error(self.pos, "starred expression is not allowed here")
+ self.target.analyse_declarations(env)
+
+ def infer_type(self, env):
+ return self.target.infer_type(env)
+
+ def analyse_types(self, env):
+ if not self.starred_expr_allowed_here:
+ error(self.pos, "starred expression is not allowed here")
+ self.target = self.target.analyse_types(env)
+ self.type = self.target.type
+ return self
+
+ def analyse_target_declaration(self, env):
+ self.target.analyse_target_declaration(env)
+
+ def analyse_target_types(self, env):
+ self.target = self.target.analyse_target_types(env)
+ self.type = self.target.type
+ return self
+
+ def calculate_result_code(self):
+ return ""
+
+ def generate_result_code(self, code):
+ pass
+
+
+class SequenceNode(ExprNode):
+ # Base class for list and tuple constructor nodes.
+ # Contains common code for performing sequence unpacking.
+ #
+ # args [ExprNode]
+ # unpacked_items [ExprNode] or None
+ # coerced_unpacked_items [ExprNode] or None
+ # mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
+
+ subexprs = ['args', 'mult_factor']
+
+ is_sequence_constructor = 1
+ unpacked_items = None
+ mult_factor = None
+ slow = False # trade speed for code size (e.g. use PyTuple_Pack())
+
+ def compile_time_value_list(self, denv):
+ return [arg.compile_time_value(denv) for arg in self.args]
+
+ def replace_starred_target_node(self):
+ # replace a starred node in the targets by the contained expression
+ self.starred_assignment = False
+ args = []
+ for arg in self.args:
+ if arg.is_starred:
+ if self.starred_assignment:
+ error(arg.pos, "more than 1 starred expression in assignment")
+ self.starred_assignment = True
+ arg = arg.target
+ arg.is_starred = True
+ args.append(arg)
+ self.args = args
+
+ def analyse_target_declaration(self, env):
+ self.replace_starred_target_node()
+ for arg in self.args:
+ arg.analyse_target_declaration(env)
+
+ def analyse_types(self, env, skip_children=False):
+ for i, arg in enumerate(self.args):
+ if not skip_children:
+ arg = arg.analyse_types(env)
+ self.args[i] = arg.coerce_to_pyobject(env)
+ if self.mult_factor:
+ mult_factor = self.mult_factor.analyse_types(env)
+ if not mult_factor.type.is_int:
+ mult_factor = mult_factor.coerce_to_pyobject(env)
+ self.mult_factor = mult_factor.coerce_to_simple(env)
+ self.is_temp = 1
+ # not setting self.type here, subtypes do this
+ return self
+
+ def coerce_to_ctuple(self, dst_type, env):
+ if self.type == dst_type:
+ return self
+ assert not self.mult_factor
+ if len(self.args) != dst_type.size:
+ error(self.pos, "trying to coerce sequence to ctuple of wrong length, expected %d, got %d" % (
+ dst_type.size, len(self.args)))
+ coerced_args = [arg.coerce_to(type, env) for arg, type in zip(self.args, dst_type.components)]
+ return TupleNode(self.pos, args=coerced_args, type=dst_type, is_temp=True)
+
+ def _create_merge_node_if_necessary(self, env):
+ self._flatten_starred_args()
+ if not any(arg.is_starred for arg in self.args):
+ return self
+ # convert into MergedSequenceNode by building partial sequences
+ args = []
+ values = []
+ for arg in self.args:
+ if arg.is_starred:
+ if values:
+ args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
+ values = []
+ args.append(arg.target)
+ else:
+ values.append(arg)
+ if values:
+ args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
+ node = MergedSequenceNode(self.pos, args, self.type)
+ if self.mult_factor:
+ node = binop_node(
+ self.pos, '*', node, self.mult_factor.coerce_to_pyobject(env),
+ inplace=True, type=self.type, is_temp=True)
+ return node
+
+ def _flatten_starred_args(self):
+ args = []
+ for arg in self.args:
+ if arg.is_starred and arg.target.is_sequence_constructor and not arg.target.mult_factor:
+ args.extend(arg.target.args)
+ else:
+ args.append(arg)
+ self.args[:] = args
+
+ def may_be_none(self):
+ return False
+
+ def analyse_target_types(self, env):
+ if self.mult_factor:
+ error(self.pos, "can't assign to multiplied sequence")
+ self.unpacked_items = []
+ self.coerced_unpacked_items = []
+ self.any_coerced_items = False
+ for i, arg in enumerate(self.args):
+ arg = self.args[i] = arg.analyse_target_types(env)
+ if arg.is_starred:
+ if not arg.type.assignable_from(list_type):
+ error(arg.pos,
+ "starred target must have Python object (list) type")
+ if arg.type is py_object_type:
+ arg.type = list_type
+ unpacked_item = PyTempNode(self.pos, env)
+ coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
+ if unpacked_item is not coerced_unpacked_item:
+ self.any_coerced_items = True
+ self.unpacked_items.append(unpacked_item)
+ self.coerced_unpacked_items.append(coerced_unpacked_item)
+ self.type = py_object_type
+ return self
+
+ def generate_result_code(self, code):
+ self.generate_operation_code(code)
+
+ def generate_sequence_packing_code(self, code, target=None, plain=False):
+ if target is None:
+ target = self.result()
+ size_factor = c_mult = ''
+ mult_factor = None
+
+ if self.mult_factor and not plain:
+ mult_factor = self.mult_factor
+ if mult_factor.type.is_int:
+ c_mult = mult_factor.result()
+ if (isinstance(mult_factor.constant_result, _py_int_types) and
+ mult_factor.constant_result > 0):
+ size_factor = ' * %s' % mult_factor.constant_result
+ elif mult_factor.type.signed:
+ size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult)
+ else:
+ size_factor = ' * (%s)' % (c_mult,)
+
+ if self.type is tuple_type and (self.is_literal or self.slow) and not c_mult:
+ # use PyTuple_Pack() to avoid generating huge amounts of one-time code
+ code.putln('%s = PyTuple_Pack(%d, %s); %s' % (
+ target,
+ len(self.args),
+ ', '.join(arg.py_result() for arg in self.args),
+ code.error_goto_if_null(target, self.pos)))
+ code.put_gotref(target, py_object_type)
+ elif self.type.is_ctuple:
+ for i, arg in enumerate(self.args):
+ code.putln("%s.f%s = %s;" % (
+ target, i, arg.result()))
+ else:
+ # build the tuple/list step by step, potentially multiplying it as we go
+ if self.type is list_type:
+ create_func, set_item_func = 'PyList_New', '__Pyx_PyList_SET_ITEM'
+ elif self.type is tuple_type:
+ create_func, set_item_func = 'PyTuple_New', '__Pyx_PyTuple_SET_ITEM'
+ else:
+ raise InternalError("sequence packing for unexpected type %s" % self.type)
+ arg_count = len(self.args)
+ code.putln("%s = %s(%s%s); %s" % (
+ target, create_func, arg_count, size_factor,
+ code.error_goto_if_null(target, self.pos)))
+ code.put_gotref(target, py_object_type)
+
+ if c_mult:
+ # FIXME: can't use a temp variable here as the code may
+ # end up in the constant building function. Temps
+ # currently don't work there.
+
+ #counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
+ counter = Naming.quick_temp_cname
+ code.putln('{ Py_ssize_t %s;' % counter)
+ if arg_count == 1:
+ offset = counter
+ else:
+ offset = '%s * %s' % (counter, arg_count)
+ code.putln('for (%s=0; %s < %s; %s++) {' % (
+ counter, counter, c_mult, counter
+ ))
+ else:
+ offset = ''
+
+ for i in range(arg_count):
+ arg = self.args[i]
+ if c_mult or not arg.result_in_temp():
+ code.put_incref(arg.result(), arg.ctype())
+ arg.generate_giveref(code)
+ code.putln("if (%s(%s, %s, %s)) %s;" % (
+ set_item_func,
+ target,
+ (offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
+ arg.py_result(),
+ code.error_goto(self.pos)))
+
+ if c_mult:
+ code.putln('}')
+ #code.funcstate.release_temp(counter)
+ code.putln('}')
+
+ if mult_factor is not None and mult_factor.type.is_pyobject:
+ code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
+ Naming.quick_temp_cname, target, mult_factor.py_result(),
+ code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
+ ))
+ code.put_gotref(Naming.quick_temp_cname, py_object_type)
+ code.put_decref(target, py_object_type)
+ code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
+ code.putln('}')
+
+ def generate_subexpr_disposal_code(self, code):
+ if self.mult_factor and self.mult_factor.type.is_int:
+ super(SequenceNode, self).generate_subexpr_disposal_code(code)
+ elif self.type is tuple_type and (self.is_literal or self.slow):
+ super(SequenceNode, self).generate_subexpr_disposal_code(code)
+ else:
+ # We call generate_post_assignment_code here instead
+ # of generate_disposal_code, because values were stored
+ # in the tuple using a reference-stealing operation.
+ for arg in self.args:
+ arg.generate_post_assignment_code(code)
+ # Should NOT call free_temps -- this is invoked by the default
+ # generate_evaluation_code which will do that.
+ if self.mult_factor:
+ self.mult_factor.generate_disposal_code(code)
+
+ def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
+ exception_check=None, exception_value=None):
+ if self.starred_assignment:
+ self.generate_starred_assignment_code(rhs, code)
+ else:
+ self.generate_parallel_assignment_code(rhs, code)
+
+ for item in self.unpacked_items:
+ item.release(code)
+ rhs.free_temps(code)
+
+ _func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
+ ]))
+
+ def generate_parallel_assignment_code(self, rhs, code):
+ # Need to work around the fact that generate_evaluation_code
+ # allocates the temps in a rather hacky way -- the assignment
+ # is evaluated twice, within each if-block.
+ for item in self.unpacked_items:
+ item.allocate(code)
+ special_unpack = (rhs.type is py_object_type
+ or rhs.type in (tuple_type, list_type)
+ or not rhs.type.is_builtin_type)
+ long_enough_for_a_loop = len(self.unpacked_items) > 3
+
+ if special_unpack:
+ self.generate_special_parallel_unpacking_code(
+ code, rhs, use_loop=long_enough_for_a_loop)
+ else:
+ code.putln("{")
+ self.generate_generic_parallel_unpacking_code(
+ code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
+ code.putln("}")
+
+ for value_node in self.coerced_unpacked_items:
+ value_node.generate_evaluation_code(code)
+ for i in range(len(self.args)):
+ self.args[i].generate_assignment_code(
+ self.coerced_unpacked_items[i], code)
+
+ def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
+ sequence_type_test = '1'
+ none_check = "likely(%s != Py_None)" % rhs.py_result()
+ if rhs.type is list_type:
+ sequence_types = ['List']
+ if rhs.may_be_none():
+ sequence_type_test = none_check
+ elif rhs.type is tuple_type:
+ sequence_types = ['Tuple']
+ if rhs.may_be_none():
+ sequence_type_test = none_check
+ else:
+ sequence_types = ['Tuple', 'List']
+ tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
+ list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
+ sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
+
+ code.putln("if (%s) {" % sequence_type_test)
+ code.putln("PyObject* sequence = %s;" % rhs.py_result())
+
+ # list/tuple => check size
+ code.putln("Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);")
+ code.putln("if (unlikely(size != %d)) {" % len(self.args))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c"))
+ code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
+ len(self.args), len(self.args)))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c"))
+ code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
+ # < 0 => exception
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+
+ code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS")
+ # unpack items from list/tuple in unrolled loop (can't fail)
+ if len(sequence_types) == 2:
+ code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
+ for i, item in enumerate(self.unpacked_items):
+ code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
+ item.result(), sequence_types[0], i))
+ if len(sequence_types) == 2:
+ code.putln("} else {")
+ for i, item in enumerate(self.unpacked_items):
+ code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
+ item.result(), sequence_types[1], i))
+ code.putln("}")
+ for item in self.unpacked_items:
+ code.put_incref(item.result(), item.ctype())
+
+ code.putln("#else")
+ # in non-CPython, use the PySequence protocol (which can fail)
+ if not use_loop:
+ for i, item in enumerate(self.unpacked_items):
+ code.putln("%s = PySequence_ITEM(sequence, %d); %s" % (
+ item.result(), i,
+ code.error_goto_if_null(item.result(), self.pos)))
+ code.put_gotref(item.result(), item.type)
+ else:
+ code.putln("{")
+ code.putln("Py_ssize_t i;")
+ code.putln("PyObject** temps[%s] = {%s};" % (
+ len(self.unpacked_items),
+ ','.join(['&%s' % item.result() for item in self.unpacked_items])))
+ code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items))
+ code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % (
+ code.error_goto_if_null('item', self.pos)))
+ code.put_gotref('item', py_object_type)
+ code.putln("*(temps[i]) = item;")
+ code.putln("}")
+ code.putln("}")
+
+ code.putln("#endif")
+ rhs.generate_disposal_code(code)
+
+ if sequence_type_test == '1':
+ code.putln("}") # all done
+ elif sequence_type_test == none_check:
+ # either tuple/list or None => save some code by generating the error directly
+ code.putln("} else {")
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c"))
+ code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos))
+ code.putln("}") # all done
+ else:
+ code.putln("} else {") # needs iteration fallback code
+ self.generate_generic_parallel_unpacking_code(
+ code, rhs, self.unpacked_items, use_loop=use_loop)
+ code.putln("}")
+
+ def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c"))
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
+ code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
+
+ if use_loop:
+ code.putln("PyObject** temps[%s] = {%s};" % (
+ len(self.unpacked_items),
+ ','.join(['&%s' % item.result() for item in unpacked_items])))
+
+ iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ code.putln(
+ "%s = PyObject_GetIter(%s); %s" % (
+ iterator_temp,
+ rhs.py_result(),
+ code.error_goto_if_null(iterator_temp, self.pos)))
+ code.put_gotref(iterator_temp, py_object_type)
+ rhs.generate_disposal_code(code)
+
+ iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
+ code.putln("%s = __Pyx_PyObject_GetIterNextFunc(%s);" % (
+ iternext_func, iterator_temp))
+
+ unpacking_error_label = code.new_label('unpacking_failed')
+ unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
+ if use_loop:
+ code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
+ code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
+ code.put_goto(unpacking_error_label)
+ code.put_gotref("item", py_object_type)
+ code.putln("*(temps[index]) = item;")
+ code.putln("}")
+ else:
+ for i, item in enumerate(unpacked_items):
+ code.put(
+ "index = %d; %s = %s; if (unlikely(!%s)) " % (
+ i,
+ item.result(),
+ unpack_code,
+ item.result()))
+ code.put_goto(unpacking_error_label)
+ item.generate_gotref(code)
+
+ if terminate:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
+ code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
+ unpack_code,
+ len(unpacked_items)))
+ code.putln("%s = NULL;" % iternext_func)
+ code.put_decref_clear(iterator_temp, py_object_type)
+
+ unpacking_done_label = code.new_label('unpacking_done')
+ code.put_goto(unpacking_done_label)
+
+ code.put_label(unpacking_error_label)
+ code.put_decref_clear(iterator_temp, py_object_type)
+ code.putln("%s = NULL;" % iternext_func)
+ code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
+ code.putln(code.error_goto(self.pos))
+ code.put_label(unpacking_done_label)
+
+ code.funcstate.release_temp(iternext_func)
+ if terminate:
+ code.funcstate.release_temp(iterator_temp)
+ iterator_temp = None
+
+ return iterator_temp
+
+ def generate_starred_assignment_code(self, rhs, code):
+ for i, arg in enumerate(self.args):
+ if arg.is_starred:
+ starred_target = self.unpacked_items[i]
+ unpacked_fixed_items_left = self.unpacked_items[:i]
+ unpacked_fixed_items_right = self.unpacked_items[i+1:]
+ break
+ else:
+ assert False
+
+ iterator_temp = None
+ if unpacked_fixed_items_left:
+ for item in unpacked_fixed_items_left:
+ item.allocate(code)
+ code.putln('{')
+ iterator_temp = self.generate_generic_parallel_unpacking_code(
+ code, rhs, unpacked_fixed_items_left,
+ use_loop=True, terminate=False)
+ for i, item in enumerate(unpacked_fixed_items_left):
+ value_node = self.coerced_unpacked_items[i]
+ value_node.generate_evaluation_code(code)
+ code.putln('}')
+
+ starred_target.allocate(code)
+ target_list = starred_target.result()
+ code.putln("%s = %s(%s); %s" % (
+ target_list,
+ "__Pyx_PySequence_ListKeepNew" if (
+ not iterator_temp and rhs.is_temp and rhs.type in (py_object_type, list_type))
+ else "PySequence_List",
+ iterator_temp or rhs.py_result(),
+ code.error_goto_if_null(target_list, self.pos)))
+ starred_target.generate_gotref(code)
+
+ if iterator_temp:
+ code.put_decref_clear(iterator_temp, py_object_type)
+ code.funcstate.release_temp(iterator_temp)
+ else:
+ rhs.generate_disposal_code(code)
+
+ if unpacked_fixed_items_right:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c"))
+ length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
+ code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
+ code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
+ code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
+ len(unpacked_fixed_items_left), length_temp,
+ code.error_goto(self.pos)))
+ code.putln('}')
+
+ for item in unpacked_fixed_items_right[::-1]:
+ item.allocate(code)
+ for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
+ self.coerced_unpacked_items[::-1])):
+ code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
+ code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
+ item.py_result(), target_list, length_temp, i+1))
+ # resize the list the hard way
+ code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
+ code.putln('#else')
+ code.putln("%s = PySequence_ITEM(%s, %s-%d); " % (
+ item.py_result(), target_list, length_temp, i+1))
+ code.putln('#endif')
+ item.generate_gotref(code)
+ coerced_arg.generate_evaluation_code(code)
+
+ code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
+ sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (
+ sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
+ code.error_goto_if_null(sublist_temp, self.pos)))
+ code.put_gotref(sublist_temp, py_object_type)
+ code.funcstate.release_temp(length_temp)
+ code.put_decref(target_list, py_object_type)
+ code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
+ code.putln('#else')
+ code.putln('CYTHON_UNUSED_VAR(%s);' % sublist_temp)
+ code.funcstate.release_temp(sublist_temp)
+ code.putln('#endif')
+
+ for i, arg in enumerate(self.args):
+ arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
+
+ def annotate(self, code):
+ for arg in self.args:
+ arg.annotate(code)
+ if self.unpacked_items:
+ for arg in self.unpacked_items:
+ arg.annotate(code)
+ for arg in self.coerced_unpacked_items:
+ arg.annotate(code)
+
+
+class TupleNode(SequenceNode):
+ # Tuple constructor.
+
+ type = tuple_type
+ is_partly_literal = False
+
+ gil_message = "Constructing Python tuple"
+
+ def infer_type(self, env):
+ if self.mult_factor or not self.args:
+ return tuple_type
+ arg_types = [arg.infer_type(env) for arg in self.args]
+ if any(type.is_pyobject or type.is_memoryviewslice or type.is_unspecified or type.is_fused
+ for type in arg_types):
+ return tuple_type
+ return env.declare_tuple_type(self.pos, arg_types).type
+
+ def analyse_types(self, env, skip_children=False):
+ # reset before re-analysing
+ if self.is_literal:
+ self.is_literal = False
+ if self.is_partly_literal:
+ self.is_partly_literal = False
+
+ if len(self.args) == 0:
+ self.is_temp = False
+ self.is_literal = True
+ return self
+
+ if not skip_children:
+ for i, arg in enumerate(self.args):
+ if arg.is_starred:
+ arg.starred_expr_allowed_here = True
+ self.args[i] = arg.analyse_types(env)
+ if (not self.mult_factor and
+ not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_memoryviewslice or arg.type.is_fused)
+ for arg in self.args)):
+ self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type
+ self.is_temp = 1
+ return self
+
+ node = SequenceNode.analyse_types(self, env, skip_children=True)
+ node = node._create_merge_node_if_necessary(env)
+ if not node.is_sequence_constructor:
+ return node
+
+ if not all(child.is_literal for child in node.args):
+ return node
+ if not node.mult_factor or (
+ node.mult_factor.is_literal and
+ isinstance(node.mult_factor.constant_result, _py_int_types)):
+ node.is_temp = False
+ node.is_literal = True
+ else:
+ if not node.mult_factor.type.is_pyobject and not node.mult_factor.type.is_int:
+ node.mult_factor = node.mult_factor.coerce_to_pyobject(env)
+ node.is_temp = True
+ node.is_partly_literal = True
+ return node
+
+ def analyse_as_type(self, env):
+ # ctuple type
+ if not self.args:
+ return None
+ item_types = [arg.analyse_as_type(env) for arg in self.args]
+ if any(t is None for t in item_types):
+ return None
+ entry = env.declare_tuple_type(self.pos, item_types)
+ return entry.type
+
+ def coerce_to(self, dst_type, env):
+ if self.type.is_ctuple:
+ if dst_type.is_ctuple and self.type.size == dst_type.size:
+ return self.coerce_to_ctuple(dst_type, env)
+ elif dst_type is tuple_type or dst_type is py_object_type:
+ coerced_args = [arg.coerce_to_pyobject(env) for arg in self.args]
+ return TupleNode(
+ self.pos,
+ args=coerced_args,
+ type=tuple_type,
+ mult_factor=self.mult_factor,
+ is_temp=1,
+ ).analyse_types(env, skip_children=True)
+ else:
+ return self.coerce_to_pyobject(env).coerce_to(dst_type, env)
+ elif dst_type.is_ctuple and not self.mult_factor:
+ return self.coerce_to_ctuple(dst_type, env)
+ else:
+ return SequenceNode.coerce_to(self, dst_type, env)
+
+ def as_list(self):
+ t = ListNode(self.pos, args=self.args, mult_factor=self.mult_factor)
+ if isinstance(self.constant_result, tuple):
+ t.constant_result = list(self.constant_result)
+ return t
+
+ def is_simple(self):
+ # either temp or constant => always simple
+ return True
+
+ def nonlocally_immutable(self):
+ # either temp or constant => always safe
+ return True
+
+ def calculate_result_code(self):
+ if len(self.args) > 0:
+ return self.result_code
+ else:
+ return Naming.empty_tuple
+
+ def calculate_constant_result(self):
+ self.constant_result = tuple([
+ arg.constant_result for arg in self.args])
+
+ def compile_time_value(self, denv):
+ values = self.compile_time_value_list(denv)
+ try:
+ return tuple(values)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def generate_operation_code(self, code):
+ if len(self.args) == 0:
+ # result_code is Naming.empty_tuple
+ return
+
+ if self.is_literal or self.is_partly_literal:
+ # The "mult_factor" is part of the deduplication if it is also constant, i.e. when
+ # we deduplicate the multiplied result. Otherwise, only deduplicate the constant part.
+ dedup_key = make_dedup_key(self.type, [self.mult_factor if self.is_literal else None] + self.args)
+ tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2, dedup_key=dedup_key)
+ const_code = code.get_cached_constants_writer(tuple_target)
+ if const_code is not None:
+ # constant is not yet initialised
+ const_code.mark_pos(self.pos)
+ self.generate_sequence_packing_code(const_code, tuple_target, plain=not self.is_literal)
+ const_code.put_giveref(tuple_target, py_object_type)
+ if self.is_literal:
+ self.result_code = tuple_target
+ elif self.mult_factor.type.is_int:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PySequenceMultiply", "ObjectHandling.c"))
+ code.putln('%s = __Pyx_PySequence_Multiply(%s, %s); %s' % (
+ self.result(), tuple_target, self.mult_factor.result(),
+ code.error_goto_if_null(self.result(), self.pos)
+ ))
+ self.generate_gotref(code)
+ else:
+ code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
+ self.result(), tuple_target, self.mult_factor.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)
+ ))
+ self.generate_gotref(code)
+ else:
+ self.type.entry.used = True
+ self.generate_sequence_packing_code(code)
+
+
+class ListNode(SequenceNode):
+ # List constructor.
+
+ # obj_conversion_errors [PyrexError] used internally
+ # orignial_args [ExprNode] used internally
+
+ obj_conversion_errors = []
+ type = list_type
+ in_module_scope = False
+
+ gil_message = "Constructing Python list"
+
+ def type_dependencies(self, env):
+ return ()
+
+ def infer_type(self, env):
+ # TODO: Infer non-object list arrays.
+ return list_type
+
+ def analyse_expressions(self, env):
+ for arg in self.args:
+ if arg.is_starred:
+ arg.starred_expr_allowed_here = True
+ node = SequenceNode.analyse_expressions(self, env)
+ return node.coerce_to_pyobject(env)
+
+ def analyse_types(self, env):
+ with local_errors(ignore=True) as errors:
+ self.original_args = list(self.args)
+ node = SequenceNode.analyse_types(self, env)
+ node.obj_conversion_errors = errors
+ if env.is_module_scope:
+ self.in_module_scope = True
+ node = node._create_merge_node_if_necessary(env)
+ return node
+
+ def coerce_to(self, dst_type, env):
+ if dst_type.is_pyobject:
+ for err in self.obj_conversion_errors:
+ report_error(err)
+ self.obj_conversion_errors = []
+ if not self.type.subtype_of(dst_type):
+ error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
+ elif (dst_type.is_array or dst_type.is_ptr) and dst_type.base_type is not PyrexTypes.c_void_type:
+ array_length = len(self.args)
+ if self.mult_factor:
+ if isinstance(self.mult_factor.constant_result, _py_int_types):
+ if self.mult_factor.constant_result <= 0:
+ error(self.pos, "Cannot coerce non-positively multiplied list to '%s'" % dst_type)
+ else:
+ array_length *= self.mult_factor.constant_result
+ else:
+ error(self.pos, "Cannot coerce dynamically multiplied list to '%s'" % dst_type)
+ base_type = dst_type.base_type
+ self.type = PyrexTypes.CArrayType(base_type, array_length)
+ for i in range(len(self.original_args)):
+ arg = self.args[i]
+ if isinstance(arg, CoerceToPyTypeNode):
+ arg = arg.arg
+ self.args[i] = arg.coerce_to(base_type, env)
+ elif dst_type.is_cpp_class:
+ # TODO(robertwb): Avoid object conversion for vector/list/set.
+ return TypecastNode(self.pos, operand=self, type=PyrexTypes.py_object_type).coerce_to(dst_type, env)
+ elif self.mult_factor:
+ error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
+ elif dst_type.is_struct:
+ if len(self.args) > len(dst_type.scope.var_entries):
+ error(self.pos, "Too many members for '%s'" % dst_type)
+ else:
+ if len(self.args) < len(dst_type.scope.var_entries):
+ warning(self.pos, "Too few members for '%s'" % dst_type, 1)
+ for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
+ if isinstance(arg, CoerceToPyTypeNode):
+ arg = arg.arg
+ self.args[i] = arg.coerce_to(member.type, env)
+ self.type = dst_type
+ elif dst_type.is_ctuple:
+ return self.coerce_to_ctuple(dst_type, env)
+ else:
+ self.type = error_type
+ error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
+ return self
+
+ def as_list(self): # dummy for compatibility with TupleNode
+ return self
+
+ def as_tuple(self):
+ t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor)
+ if isinstance(self.constant_result, list):
+ t.constant_result = tuple(self.constant_result)
+ return t
+
+ def allocate_temp_result(self, code):
+ if self.type.is_array:
+ if self.in_module_scope:
+ self.temp_code = code.funcstate.allocate_temp(
+ self.type, manage_ref=False, static=True, reusable=False)
+ else:
+ # To be valid C++, we must allocate the memory on the stack
+ # manually and be sure not to reuse it for something else.
+ # Yes, this means that we leak a temp array variable.
+ self.temp_code = code.funcstate.allocate_temp(
+ self.type, manage_ref=False, reusable=False)
+ else:
+ SequenceNode.allocate_temp_result(self, code)
+
+ def calculate_constant_result(self):
+ if self.mult_factor:
+ raise ValueError() # may exceed the compile time memory
+ self.constant_result = [
+ arg.constant_result for arg in self.args]
+
+ def compile_time_value(self, denv):
+ l = self.compile_time_value_list(denv)
+ if self.mult_factor:
+ l *= self.mult_factor.compile_time_value(denv)
+ return l
+
+ def generate_operation_code(self, code):
+ if self.type.is_pyobject:
+ for err in self.obj_conversion_errors:
+ report_error(err)
+ self.generate_sequence_packing_code(code)
+ elif self.type.is_array:
+ if self.mult_factor:
+ code.putln("{")
+ code.putln("Py_ssize_t %s;" % Naming.quick_temp_cname)
+ code.putln("for ({i} = 0; {i} < {count}; {i}++) {{".format(
+ i=Naming.quick_temp_cname, count=self.mult_factor.result()))
+ offset = '+ (%d * %s)' % (len(self.args), Naming.quick_temp_cname)
+ else:
+ offset = ''
+ for i, arg in enumerate(self.args):
+ if arg.type.is_array:
+ code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
+ code.putln("memcpy(&(%s[%s%s]), %s, sizeof(%s[0]));" % (
+ self.result(), i, offset,
+ arg.result(), self.result()
+ ))
+ else:
+ code.putln("%s[%s%s] = %s;" % (
+ self.result(),
+ i,
+ offset,
+ arg.result()))
+ if self.mult_factor:
+ code.putln("}")
+ code.putln("}")
+ elif self.type.is_struct:
+ for arg, member in zip(self.args, self.type.scope.var_entries):
+ code.putln("%s.%s = %s;" % (
+ self.result(),
+ member.cname,
+ arg.result()))
+ else:
+ raise InternalError("List type never specified")
+
+
+class ComprehensionNode(ScopedExprNode):
+ # A list/set/dict comprehension
+
+ child_attrs = ["loop"]
+
+ is_temp = True
+ constant_result = not_a_constant
+
+ def infer_type(self, env):
+ return self.type
+
+ def analyse_declarations(self, env):
+ self.append.target = self # this is used in the PyList_Append of the inner loop
+ self.init_scope(env)
+ # setup loop scope
+ if isinstance(self.loop, Nodes._ForInStatNode):
+ assert isinstance(self.loop.iterator, ScopedExprNode), self.loop.iterator
+ self.loop.iterator.init_scope(None, env)
+ else:
+ assert isinstance(self.loop, Nodes.ForFromStatNode), self.loop
+
+ def analyse_scoped_declarations(self, env):
+ self.loop.analyse_declarations(env)
+
+ def analyse_types(self, env):
+ if not self.has_local_scope:
+ self.loop = self.loop.analyse_expressions(env)
+ return self
+
+ def analyse_scoped_expressions(self, env):
+ if self.has_local_scope:
+ self.loop = self.loop.analyse_expressions(env)
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def generate_result_code(self, code):
+ self.generate_operation_code(code)
+
+ def generate_operation_code(self, code):
+ if self.type is Builtin.list_type:
+ create_code = 'PyList_New(0)'
+ elif self.type is Builtin.set_type:
+ create_code = 'PySet_New(NULL)'
+ elif self.type is Builtin.dict_type:
+ create_code = 'PyDict_New()'
+ else:
+ raise InternalError("illegal type for comprehension: %s" % self.type)
+ code.putln('%s = %s; %s' % (
+ self.result(), create_code,
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ self.generate_gotref(code)
+ self.loop.generate_execution_code(code)
+
+ def annotate(self, code):
+ self.loop.annotate(code)
+
+
+class ComprehensionAppendNode(Node):
+ # Need to be careful to avoid infinite recursion:
+ # target must not be in child_attrs/subexprs
+
+ child_attrs = ['expr']
+ target = None
+
+ type = PyrexTypes.c_int_type
+
+ def analyse_expressions(self, env):
+ self.expr = self.expr.analyse_expressions(env)
+ if not self.expr.type.is_pyobject:
+ self.expr = self.expr.coerce_to_pyobject(env)
+ return self
+
+ def generate_execution_code(self, code):
+ if self.target.type is list_type:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ListCompAppend", "Optimize.c"))
+ function = "__Pyx_ListComp_Append"
+ elif self.target.type is set_type:
+ function = "PySet_Add"
+ else:
+ raise InternalError(
+ "Invalid type for comprehension node: %s" % self.target.type)
+
+ self.expr.generate_evaluation_code(code)
+ code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
+ function,
+ self.target.result(),
+ self.expr.result()
+ ), self.pos))
+ self.expr.generate_disposal_code(code)
+ self.expr.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ self.expr.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.expr.annotate(code)
+
+class DictComprehensionAppendNode(ComprehensionAppendNode):
+ child_attrs = ['key_expr', 'value_expr']
+
+ def analyse_expressions(self, env):
+ self.key_expr = self.key_expr.analyse_expressions(env)
+ if not self.key_expr.type.is_pyobject:
+ self.key_expr = self.key_expr.coerce_to_pyobject(env)
+ self.value_expr = self.value_expr.analyse_expressions(env)
+ if not self.value_expr.type.is_pyobject:
+ self.value_expr = self.value_expr.coerce_to_pyobject(env)
+ return self
+
+ def generate_execution_code(self, code):
+ self.key_expr.generate_evaluation_code(code)
+ self.value_expr.generate_evaluation_code(code)
+ code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
+ self.target.result(),
+ self.key_expr.result(),
+ self.value_expr.result()
+ ), self.pos))
+ self.key_expr.generate_disposal_code(code)
+ self.key_expr.free_temps(code)
+ self.value_expr.generate_disposal_code(code)
+ self.value_expr.free_temps(code)
+
+ def generate_function_definitions(self, env, code):
+ self.key_expr.generate_function_definitions(env, code)
+ self.value_expr.generate_function_definitions(env, code)
+
+ def annotate(self, code):
+ self.key_expr.annotate(code)
+ self.value_expr.annotate(code)
+
+
+class InlinedGeneratorExpressionNode(ExprNode):
+ # An inlined generator expression for which the result is calculated
+ # inside of the loop and returned as a single, first and only Generator
+ # return value.
+ # This will only be created by transforms when replacing safe builtin
+ # calls on generator expressions.
+ #
+ # gen GeneratorExpressionNode the generator, not containing any YieldExprNodes
+ # orig_func String the name of the builtin function this node replaces
+ # target ExprNode or None a 'target' for a ComprehensionAppend node
+
+ subexprs = ["gen"]
+ orig_func = None
+ target = None
+ is_temp = True
+ type = py_object_type
+
+ def __init__(self, pos, gen, comprehension_type=None, **kwargs):
+ gbody = gen.def_node.gbody
+ gbody.is_inlined = True
+ if comprehension_type is not None:
+ assert comprehension_type in (list_type, set_type, dict_type), comprehension_type
+ gbody.inlined_comprehension_type = comprehension_type
+ kwargs.update(
+ target=RawCNameExprNode(pos, comprehension_type, Naming.retval_cname),
+ type=comprehension_type,
+ )
+ super(InlinedGeneratorExpressionNode, self).__init__(pos, gen=gen, **kwargs)
+
+ def may_be_none(self):
+ return self.orig_func not in ('any', 'all', 'sorted')
+
+ def infer_type(self, env):
+ return self.type
+
+ def analyse_types(self, env):
+ self.gen = self.gen.analyse_expressions(env)
+ return self
+
+ def generate_result_code(self, code):
+ code.putln("%s = __Pyx_Generator_Next(%s); %s" % (
+ self.result(), self.gen.result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class MergedSequenceNode(ExprNode):
+ """
+ Merge a sequence of iterables into a set/list/tuple.
+
+ The target collection is determined by self.type, which must be set externally.
+
+ args [ExprNode]
+ """
+ subexprs = ['args']
+ is_temp = True
+ gil_message = "Constructing Python collection"
+
+ def __init__(self, pos, args, type):
+ if type in (list_type, tuple_type) and args and args[0].is_sequence_constructor:
+ # construct a list directly from the first argument that we can then extend
+ if args[0].type is not list_type:
+ args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True, mult_factor=args[0].mult_factor)
+ ExprNode.__init__(self, pos, args=args, type=type)
+
+ def calculate_constant_result(self):
+ result = []
+ for item in self.args:
+ if item.is_sequence_constructor and item.mult_factor:
+ if item.mult_factor.constant_result <= 0:
+ continue
+ # otherwise, adding each item once should be enough
+ if item.is_set_literal or item.is_sequence_constructor:
+ # process items in order
+ items = (arg.constant_result for arg in item.args)
+ else:
+ items = item.constant_result
+ result.extend(items)
+ if self.type is set_type:
+ result = set(result)
+ elif self.type is tuple_type:
+ result = tuple(result)
+ else:
+ assert self.type is list_type
+ self.constant_result = result
+
+ def compile_time_value(self, denv):
+ result = []
+ for item in self.args:
+ if item.is_sequence_constructor and item.mult_factor:
+ if item.mult_factor.compile_time_value(denv) <= 0:
+ continue
+ if item.is_set_literal or item.is_sequence_constructor:
+ # process items in order
+ items = (arg.compile_time_value(denv) for arg in item.args)
+ else:
+ items = item.compile_time_value(denv)
+ result.extend(items)
+ if self.type is set_type:
+ try:
+ result = set(result)
+ except Exception as e:
+ self.compile_time_value_error(e)
+ elif self.type is tuple_type:
+ result = tuple(result)
+ else:
+ assert self.type is list_type
+ return result
+
+ def type_dependencies(self, env):
+ return ()
+
+ def infer_type(self, env):
+ return self.type
+
+ def analyse_types(self, env):
+ args = [
+ arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
+ # FIXME: CPython's error message starts with the runtime function name
+ 'argument after * must be an iterable, not NoneType')
+ for arg in self.args
+ ]
+
+ if len(args) == 1 and args[0].type is self.type:
+ # strip this intermediate node and use the bare collection
+ return args[0]
+
+ assert self.type in (set_type, list_type, tuple_type)
+
+ self.args = args
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ is_set = self.type is set_type
+
+ args = iter(self.args)
+ item = next(args)
+ item.generate_evaluation_code(code)
+ if (is_set and item.is_set_literal or
+ not is_set and item.is_sequence_constructor and item.type is list_type):
+ code.putln("%s = %s;" % (self.result(), item.py_result()))
+ item.generate_post_assignment_code(code)
+ else:
+ code.putln("%s = %s(%s); %s" % (
+ self.result(),
+ 'PySet_New' if is_set
+ else "__Pyx_PySequence_ListKeepNew" if item.is_temp and item.type in (py_object_type, list_type)
+ else "PySequence_List",
+ item.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+ item.generate_disposal_code(code)
+ item.free_temps(code)
+
+ helpers = set()
+ if is_set:
+ add_func = "PySet_Add"
+ extend_func = "__Pyx_PySet_Update"
+ else:
+ add_func = "__Pyx_ListComp_Append"
+ extend_func = "__Pyx_PyList_Extend"
+
+ for item in args:
+ if (is_set and (item.is_set_literal or item.is_sequence_constructor) or
+ (item.is_sequence_constructor and not item.mult_factor)):
+ if not is_set and item.args:
+ helpers.add(("ListCompAppend", "Optimize.c"))
+ for arg in item.args:
+ arg.generate_evaluation_code(code)
+ code.put_error_if_neg(arg.pos, "%s(%s, %s)" % (
+ add_func,
+ self.result(),
+ arg.py_result()))
+ arg.generate_disposal_code(code)
+ arg.free_temps(code)
+ continue
+
+ if is_set:
+ helpers.add(("PySet_Update", "Builtins.c"))
+ else:
+ helpers.add(("ListExtend", "Optimize.c"))
+
+ item.generate_evaluation_code(code)
+ code.put_error_if_neg(item.pos, "%s(%s, %s)" % (
+ extend_func,
+ self.result(),
+ item.py_result()))
+ item.generate_disposal_code(code)
+ item.free_temps(code)
+
+ if self.type is tuple_type:
+ code.putln("{")
+ code.putln("PyObject *%s = PyList_AsTuple(%s);" % (
+ Naming.quick_temp_cname,
+ self.result()))
+ code.put_decref(self.result(), py_object_type)
+ code.putln("%s = %s; %s" % (
+ self.result(),
+ Naming.quick_temp_cname,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+ code.putln("}")
+
+ for helper in sorted(helpers):
+ code.globalstate.use_utility_code(UtilityCode.load_cached(*helper))
+
+ def annotate(self, code):
+ for item in self.args:
+ item.annotate(code)
+
+
+class SetNode(ExprNode):
+ """
+ Set constructor.
+ """
+ subexprs = ['args']
+ type = set_type
+ is_set_literal = True
+ gil_message = "Constructing Python set"
+
+ def analyse_types(self, env):
+ for i in range(len(self.args)):
+ arg = self.args[i]
+ arg = arg.analyse_types(env)
+ self.args[i] = arg.coerce_to_pyobject(env)
+ self.type = set_type
+ self.is_temp = 1
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def calculate_constant_result(self):
+ self.constant_result = {arg.constant_result for arg in self.args}
+
+ def compile_time_value(self, denv):
+ values = [arg.compile_time_value(denv) for arg in self.args]
+ try:
+ return set(values)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def generate_evaluation_code(self, code):
+ for arg in self.args:
+ arg.generate_evaluation_code(code)
+ self.allocate_temp_result(code)
+ code.putln(
+ "%s = PySet_New(0); %s" % (
+ self.result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+ for arg in self.args:
+ code.put_error_if_neg(
+ self.pos,
+ "PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
+ arg.generate_disposal_code(code)
+ arg.free_temps(code)
+
+
+class DictNode(ExprNode):
+ # Dictionary constructor.
+ #
+ # key_value_pairs [DictItemNode]
+ # exclude_null_values [boolean] Do not add NULL values to dict
+ #
+ # obj_conversion_errors [PyrexError] used internally
+
+ subexprs = ['key_value_pairs']
+ is_temp = 1
+ exclude_null_values = False
+ type = dict_type
+ is_dict_literal = True
+ reject_duplicates = False
+
+ obj_conversion_errors = []
+
+ @classmethod
+ def from_pairs(cls, pos, pairs):
+ return cls(pos, key_value_pairs=[
+ DictItemNode(pos, key=k, value=v) for k, v in pairs])
+
+ def calculate_constant_result(self):
+ self.constant_result = dict([
+ item.constant_result for item in self.key_value_pairs])
+
+ def compile_time_value(self, denv):
+ pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
+ for item in self.key_value_pairs]
+ try:
+ return dict(pairs)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def type_dependencies(self, env):
+ return ()
+
+ def infer_type(self, env):
+ # TODO: Infer struct constructors.
+ return dict_type
+
+ def analyse_types(self, env):
+ with local_errors(ignore=True) as errors:
+ self.key_value_pairs = [
+ item.analyse_types(env)
+ for item in self.key_value_pairs
+ ]
+ self.obj_conversion_errors = errors
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def coerce_to(self, dst_type, env):
+ if dst_type.is_pyobject:
+ self.release_errors()
+ if self.type.is_struct_or_union:
+ if not dict_type.subtype_of(dst_type):
+ error(self.pos, "Cannot interpret struct as non-dict type '%s'" % dst_type)
+ return DictNode(self.pos, key_value_pairs=[
+ DictItemNode(item.pos, key=item.key.coerce_to_pyobject(env),
+ value=item.value.coerce_to_pyobject(env))
+ for item in self.key_value_pairs])
+ if not self.type.subtype_of(dst_type):
+ error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
+ elif dst_type.is_struct_or_union:
+ self.type = dst_type
+ if not dst_type.is_struct and len(self.key_value_pairs) != 1:
+ error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
+ elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
+ warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
+ for item in self.key_value_pairs:
+ if isinstance(item.key, CoerceToPyTypeNode):
+ item.key = item.key.arg
+ if not item.key.is_string_literal:
+ error(item.key.pos, "Invalid struct field identifier")
+ item.key = StringNode(item.key.pos, value="")
+ else:
+ key = str(item.key.value) # converts string literals to unicode in Py3
+ member = dst_type.scope.lookup_here(key)
+ if not member:
+ error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
+ else:
+ value = item.value
+ if isinstance(value, CoerceToPyTypeNode):
+ value = value.arg
+ item.value = value.coerce_to(member.type, env)
+ else:
+ return super(DictNode, self).coerce_to(dst_type, env)
+ return self
+
+ def release_errors(self):
+ for err in self.obj_conversion_errors:
+ report_error(err)
+ self.obj_conversion_errors = []
+
+ gil_message = "Constructing Python dict"
+
+ def generate_evaluation_code(self, code):
+ # Custom method used here because key-value
+ # pairs are evaluated and used one at a time.
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ is_dict = self.type.is_pyobject
+ if is_dict:
+ self.release_errors()
+ code.putln(
+ "%s = __Pyx_PyDict_NewPresized(%d); %s" % (
+ self.result(),
+ len(self.key_value_pairs),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+ keys_seen = set()
+ key_type = None
+ needs_error_helper = False
+
+ for item in self.key_value_pairs:
+ item.generate_evaluation_code(code)
+ if is_dict:
+ if self.exclude_null_values:
+ code.putln('if (%s) {' % item.value.py_result())
+ key = item.key
+ if self.reject_duplicates:
+ if keys_seen is not None:
+ # avoid runtime 'in' checks for literals that we can do at compile time
+ if not key.is_string_literal:
+ keys_seen = None
+ elif key.value in keys_seen:
+ # FIXME: this could be a compile time error, at least in Cython code
+ keys_seen = None
+ elif key_type is not type(key.value):
+ if key_type is None:
+ key_type = type(key.value)
+ keys_seen.add(key.value)
+ else:
+ # different types => may not be able to compare at compile time
+ keys_seen = None
+ else:
+ keys_seen.add(key.value)
+
+ if keys_seen is None:
+ code.putln('if (unlikely(PyDict_Contains(%s, %s))) {' % (
+ self.result(), key.py_result()))
+ # currently only used in function calls
+ needs_error_helper = True
+ code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
+ key.py_result(),
+ code.error_goto(item.pos)))
+ code.putln("} else {")
+
+ code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % (
+ self.result(),
+ item.key.py_result(),
+ item.value.py_result()))
+ if self.reject_duplicates and keys_seen is None:
+ code.putln('}')
+ if self.exclude_null_values:
+ code.putln('}')
+ else:
+ if item.value.type.is_array:
+ code.putln("memcpy(%s.%s, %s, sizeof(%s));" % (
+ self.result(),
+ item.key.value,
+ item.value.result(),
+ item.value.result()))
+ else:
+ code.putln("%s.%s = %s;" % (
+ self.result(),
+ item.key.value,
+ item.value.result()))
+ item.generate_disposal_code(code)
+ item.free_temps(code)
+
+ if needs_error_helper:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
+
+ def annotate(self, code):
+ for item in self.key_value_pairs:
+ item.annotate(code)
+
+ def as_python_dict(self):
+ # returns a dict with constant keys and Node values
+ # (only works on DictNodes where the keys are ConstNodes or PyConstNode)
+ return dict([(key.value, value) for key, value in self.key_value_pairs])
+
+
+class DictItemNode(ExprNode):
+ # Represents a single item in a DictNode
+ #
+ # key ExprNode
+ # value ExprNode
+ subexprs = ['key', 'value']
+
+ nogil_check = None # Parent DictNode takes care of it
+
+ def calculate_constant_result(self):
+ self.constant_result = (
+ self.key.constant_result, self.value.constant_result)
+
+ def analyse_types(self, env):
+ self.key = self.key.analyse_types(env)
+ self.value = self.value.analyse_types(env)
+ self.key = self.key.coerce_to_pyobject(env)
+ self.value = self.value.coerce_to_pyobject(env)
+ return self
+
+ def generate_evaluation_code(self, code):
+ self.key.generate_evaluation_code(code)
+ self.value.generate_evaluation_code(code)
+
+ def generate_disposal_code(self, code):
+ self.key.generate_disposal_code(code)
+ self.value.generate_disposal_code(code)
+
+ def free_temps(self, code):
+ self.key.free_temps(code)
+ self.value.free_temps(code)
+
+ def __iter__(self):
+ return iter([self.key, self.value])
+
+
+class SortedDictKeysNode(ExprNode):
+ # build sorted list of dict keys, e.g. for dir()
+ subexprs = ['arg']
+
+ is_temp = True
+
+ def __init__(self, arg):
+ ExprNode.__init__(self, arg.pos, arg=arg)
+ self.type = Builtin.list_type
+
+ def analyse_types(self, env):
+ arg = self.arg.analyse_types(env)
+ if arg.type is Builtin.dict_type:
+ arg = arg.as_none_safe_node(
+ "'NoneType' object is not iterable")
+ self.arg = arg
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def generate_result_code(self, code):
+ dict_result = self.arg.py_result()
+ if self.arg.type is Builtin.dict_type:
+ code.putln('%s = PyDict_Keys(%s); %s' % (
+ self.result(), dict_result,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+ else:
+ # originally used PyMapping_Keys() here, but that may return a tuple
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ 'PyObjectCallMethod0', 'ObjectHandling.c'))
+ keys_cname = code.intern_identifier(StringEncoding.EncodedString("keys"))
+ code.putln('%s = __Pyx_PyObject_CallMethod0(%s, %s); %s' % (
+ self.result(), dict_result, keys_cname,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+ code.putln("if (unlikely(!PyList_Check(%s))) {" % self.result())
+ self.generate_decref_set(code, "PySequence_List(%s)" % self.result())
+ code.putln(code.error_goto_if_null(self.result(), self.pos))
+ self.generate_gotref(code)
+ code.putln("}")
+ code.put_error_if_neg(
+ self.pos, 'PyList_Sort(%s)' % self.py_result())
+
+
+class ModuleNameMixin(object):
+ def get_py_mod_name(self, code):
+ return code.get_py_string_const(
+ self.module_name, identifier=True)
+
+ def get_py_qualified_name(self, code):
+ return code.get_py_string_const(
+ self.qualname, identifier=True)
+
+
+class ClassNode(ExprNode, ModuleNameMixin):
+ # Helper class used in the implementation of Python
+ # class definitions. Constructs a class object given
+ # a name, tuple of bases and class dictionary.
+ #
+ # name EncodedString Name of the class
+ # class_def_node PyClassDefNode PyClassDefNode defining this class
+ # doc ExprNode or None Doc string
+ # module_name EncodedString Name of defining module
+
+ subexprs = ['doc']
+ type = py_object_type
+ is_temp = True
+
+ def analyse_annotations(self, env):
+ pass
+
+ def infer_type(self, env):
+ # TODO: could return 'type' in some cases
+ return py_object_type
+
+ def analyse_types(self, env):
+ if self.doc:
+ self.doc = self.doc.analyse_types(env)
+ self.doc = self.doc.coerce_to_pyobject(env)
+ env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
+ return self
+
+ def may_be_none(self):
+ return True
+
+ gil_message = "Constructing Python class"
+
+ def generate_result_code(self, code):
+ class_def_node = self.class_def_node
+ cname = code.intern_identifier(self.name)
+
+ if self.doc:
+ code.put_error_if_neg(self.pos,
+ 'PyDict_SetItem(%s, %s, %s)' % (
+ class_def_node.dict.py_result(),
+ code.intern_identifier(
+ StringEncoding.EncodedString("__doc__")),
+ self.doc.py_result()))
+ py_mod_name = self.get_py_mod_name(code)
+ qualname = self.get_py_qualified_name(code)
+ code.putln(
+ '%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
+ self.result(),
+ class_def_node.bases.py_result(),
+ class_def_node.dict.py_result(),
+ cname,
+ qualname,
+ py_mod_name,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class Py3ClassNode(ExprNode):
+ # Helper class used in the implementation of Python3+
+ # class definitions. Constructs a class object given
+ # a name, tuple of bases and class dictionary.
+ #
+ # name EncodedString Name of the class
+ # module_name EncodedString Name of defining module
+ # class_def_node PyClassDefNode PyClassDefNode defining this class
+ # calculate_metaclass bool should call CalculateMetaclass()
+ # allow_py2_metaclass bool should look for Py2 metaclass
+ # force_type bool always create a "new style" class, even with no bases
+
+ subexprs = []
+ type = py_object_type
+ force_type = False
+ is_temp = True
+
+ def infer_type(self, env):
+ # TODO: could return 'type' in some cases
+ return py_object_type
+
+ def analyse_types(self, env):
+ return self
+
+ def may_be_none(self):
+ return True
+
+ gil_message = "Constructing Python class"
+
+ def analyse_annotations(self, env):
+ from .AutoDocTransforms import AnnotationWriter
+ position = self.class_def_node.pos
+ dict_items = [
+ DictItemNode(
+ entry.pos,
+ key=IdentifierStringNode(entry.pos, value=entry.name),
+ value=entry.annotation.string
+ )
+ for entry in env.entries.values() if entry.annotation
+ ]
+ # Annotations dict shouldn't exist for classes which don't declare any.
+ if dict_items:
+ annotations_dict = DictNode(position, key_value_pairs=dict_items)
+ lhs = NameNode(position, name=StringEncoding.EncodedString(u"__annotations__"))
+ lhs.entry = env.lookup_here(lhs.name) or env.declare_var(lhs.name, dict_type, position)
+ node = SingleAssignmentNode(position, lhs=lhs, rhs=annotations_dict)
+ node.analyse_declarations(env)
+ self.class_def_node.body.stats.insert(0, node)
+
+ def generate_result_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
+ cname = code.intern_identifier(self.name)
+ class_def_node = self.class_def_node
+ mkw = class_def_node.mkw.py_result() if class_def_node.mkw else 'NULL'
+ if class_def_node.metaclass:
+ metaclass = class_def_node.metaclass.py_result()
+ elif self.force_type:
+ metaclass = "((PyObject*)&PyType_Type)"
+ else:
+ metaclass = "((PyObject*)&__Pyx_DefaultClassType)"
+ code.putln(
+ '%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % (
+ self.result(),
+ metaclass,
+ cname,
+ class_def_node.bases.py_result(),
+ class_def_node.dict.py_result(),
+ mkw,
+ self.calculate_metaclass,
+ self.allow_py2_metaclass,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class PyClassMetaclassNode(ExprNode):
+ # Helper class holds Python3 metaclass object
+ #
+ # class_def_node PyClassDefNode PyClassDefNode defining this class
+
+ subexprs = []
+
+ def analyse_types(self, env):
+ self.type = py_object_type
+ self.is_temp = True
+ return self
+
+ def may_be_none(self):
+ return True
+
+ def generate_result_code(self, code):
+ bases = self.class_def_node.bases
+ mkw = self.class_def_node.mkw
+ if mkw:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c"))
+ call = "__Pyx_Py3MetaclassGet(%s, %s)" % (
+ bases.result(),
+ mkw.result())
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c"))
+ call = "__Pyx_CalculateMetaclass(NULL, %s)" % (
+ bases.result())
+ code.putln(
+ "%s = %s; %s" % (
+ self.result(), call,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
+ # Helper class holds Python3 namespace object
+ #
+ # All this are not owned by this node
+ # class_def_node PyClassDefNode PyClassDefNode defining this class
+ # doc ExprNode or None Doc string (owned)
+
+ subexprs = ['doc']
+
+ def analyse_types(self, env):
+ if self.doc:
+ self.doc = self.doc.analyse_types(env).coerce_to_pyobject(env)
+ self.type = py_object_type
+ self.is_temp = 1
+ return self
+
+ def may_be_none(self):
+ return True
+
+ def generate_result_code(self, code):
+ cname = code.intern_identifier(self.name)
+ py_mod_name = self.get_py_mod_name(code)
+ qualname = self.get_py_qualified_name(code)
+ class_def_node = self.class_def_node
+ null = "(PyObject *) NULL"
+ doc_code = self.doc.result() if self.doc else null
+ mkw = class_def_node.mkw.py_result() if class_def_node.mkw else null
+ metaclass = class_def_node.metaclass.py_result() if class_def_node.metaclass else null
+ code.putln(
+ "%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
+ self.result(),
+ metaclass,
+ class_def_node.bases.result(),
+ cname,
+ qualname,
+ mkw,
+ py_mod_name,
+ doc_code,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class ClassCellInjectorNode(ExprNode):
+ # Initialize CyFunction.func_classobj
+ is_temp = True
+ type = py_object_type
+ subexprs = []
+ is_active = False
+
+ def analyse_expressions(self, env):
+ return self
+
+ def generate_result_code(self, code):
+ assert self.is_active
+ code.putln(
+ '%s = PyList_New(0); %s' % (
+ self.result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+ def generate_injection_code(self, code, classobj_cname):
+ assert self.is_active
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c"))
+ code.put_error_if_neg(self.pos, '__Pyx_CyFunction_InitClassCell(%s, %s)' % (
+ self.result(), classobj_cname))
+
+
+class ClassCellNode(ExprNode):
+ # Class Cell for noargs super()
+ subexprs = []
+ is_temp = True
+ is_generator = False
+ type = py_object_type
+
+ def analyse_types(self, env):
+ return self
+
+ def generate_result_code(self, code):
+ if not self.is_generator:
+ code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
+ self.result(),
+ Naming.self_cname))
+ else:
+ code.putln('%s = %s->classobj;' % (
+ self.result(), Naming.generator_cname))
+ code.putln(
+ 'if (!%s) { PyErr_SetString(PyExc_SystemError, '
+ '"super(): empty __class__ cell"); %s }' % (
+ self.result(),
+ code.error_goto(self.pos)))
+ code.put_incref(self.result(), py_object_type)
+
+
+class PyCFunctionNode(ExprNode, ModuleNameMixin):
+ # Helper class used in the implementation of Python
+ # functions. Constructs a PyCFunction object
+ # from a PyMethodDef struct.
+ #
+ # pymethdef_cname string PyMethodDef structure
+ # binding bool
+ # def_node DefNode the Python function node
+ # module_name EncodedString Name of defining module
+ # code_object CodeObjectNode the PyCodeObject creator node
+
+ subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict',
+ 'annotations_dict']
+
+ code_object = None
+ binding = False
+ def_node = None
+ defaults = None
+ defaults_struct = None
+ defaults_pyobjects = 0
+ defaults_tuple = None
+ defaults_kwdict = None
+ annotations_dict = None
+
+ type = py_object_type
+ is_temp = 1
+
+ specialized_cpdefs = None
+ is_specialization = False
+
+ @classmethod
+ def from_defnode(cls, node, binding):
+ return cls(node.pos,
+ def_node=node,
+ pymethdef_cname=node.entry.pymethdef_cname,
+ binding=binding or node.specialized_cpdefs,
+ specialized_cpdefs=node.specialized_cpdefs,
+ code_object=CodeObjectNode(node))
+
+ def analyse_types(self, env):
+ if self.binding:
+ self.analyse_default_args(env)
+ return self
+
+ def analyse_default_args(self, env):
+ """
+ Handle non-literal function's default arguments.
+ """
+ nonliteral_objects = []
+ nonliteral_other = []
+ default_args = []
+ default_kwargs = []
+ annotations = []
+
+ # For global cpdef functions and def/cpdef methods in cdef classes, we must use global constants
+ # for default arguments to avoid the dependency on the CyFunction object as 'self' argument
+ # in the underlying C function. Basically, cpdef functions/methods are static C functions,
+ # so their optional arguments must be static, too.
+ # TODO: change CyFunction implementation to pass both function object and owning object for method calls
+ must_use_constants = env.is_c_class_scope or (self.def_node.is_wrapper and env.is_module_scope)
+
+ for arg in self.def_node.args:
+ if arg.default:
+ if not must_use_constants:
+ if arg.default.is_literal:
+ arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
+ if arg.default.type:
+ arg.default = arg.default.coerce_to(arg.type, env)
+ else:
+ arg.is_dynamic = True
+ if arg.type.is_pyobject:
+ nonliteral_objects.append(arg)
+ else:
+ nonliteral_other.append(arg)
+ if arg.default.type and arg.default.type.can_coerce_to_pyobject(env):
+ if arg.kw_only:
+ default_kwargs.append(arg)
+ else:
+ default_args.append(arg)
+ if arg.annotation:
+ arg.annotation = arg.annotation.analyse_types(env)
+ annotations.append((arg.pos, arg.name, arg.annotation.string))
+
+ for arg in (self.def_node.star_arg, self.def_node.starstar_arg):
+ if arg and arg.annotation:
+ arg.annotation = arg.annotation.analyse_types(env)
+ annotations.append((arg.pos, arg.name, arg.annotation.string))
+
+ annotation = self.def_node.return_type_annotation
+ if annotation:
+ self.def_node.return_type_annotation = annotation.analyse_types(env)
+ annotations.append((annotation.pos, StringEncoding.EncodedString("return"),
+ annotation.string))
+
+ if nonliteral_objects or nonliteral_other:
+ module_scope = env.global_scope()
+ cname = module_scope.next_id(Naming.defaults_struct_prefix)
+ scope = Symtab.StructOrUnionScope(cname)
+ self.defaults = []
+ for arg in nonliteral_objects:
+ type_ = arg.type
+ if type_.is_buffer:
+ type_ = type_.base
+ entry = scope.declare_var(arg.name, type_, None,
+ Naming.arg_prefix + arg.name,
+ allow_pyobject=True)
+ self.defaults.append((arg, entry))
+ for arg in nonliteral_other:
+ entry = scope.declare_var(arg.name, arg.type, None,
+ Naming.arg_prefix + arg.name,
+ allow_pyobject=False, allow_memoryview=True)
+ self.defaults.append((arg, entry))
+ entry = module_scope.declare_struct_or_union(
+ None, 'struct', scope, 1, None, cname=cname)
+ self.defaults_struct = scope
+ self.defaults_pyobjects = len(nonliteral_objects)
+ for arg, entry in self.defaults:
+ arg.default_value = '%s->%s' % (
+ Naming.dynamic_args_cname, entry.cname)
+ self.def_node.defaults_struct = self.defaults_struct.name
+
+ if default_args or default_kwargs:
+ if self.defaults_struct is None:
+ if default_args:
+ defaults_tuple = TupleNode(self.pos, args=[
+ arg.default for arg in default_args])
+ self.defaults_tuple = defaults_tuple.analyse_types(env).coerce_to_pyobject(env)
+ if default_kwargs:
+ defaults_kwdict = DictNode(self.pos, key_value_pairs=[
+ DictItemNode(
+ arg.pos,
+ key=IdentifierStringNode(arg.pos, value=arg.name),
+ value=arg.default)
+ for arg in default_kwargs])
+ self.defaults_kwdict = defaults_kwdict.analyse_types(env)
+ elif not self.specialized_cpdefs:
+ # Fused dispatch functions do not support (dynamic) default arguments, only the specialisations do.
+ if default_args:
+ defaults_tuple = DefaultsTupleNode(
+ self.pos, default_args, self.defaults_struct)
+ else:
+ defaults_tuple = NoneNode(self.pos)
+ if default_kwargs:
+ defaults_kwdict = DefaultsKwDictNode(
+ self.pos, default_kwargs, self.defaults_struct)
+ else:
+ defaults_kwdict = NoneNode(self.pos)
+
+ defaults_getter = Nodes.DefNode(
+ self.pos, args=[], star_arg=None, starstar_arg=None,
+ body=Nodes.ReturnStatNode(
+ self.pos, return_type=py_object_type,
+ value=TupleNode(
+ self.pos, args=[defaults_tuple, defaults_kwdict])),
+ decorators=None,
+ name=StringEncoding.EncodedString("__defaults__"))
+ # defaults getter must never live in class scopes, it's always a module function
+ module_scope = env.global_scope()
+ defaults_getter.analyse_declarations(module_scope)
+ defaults_getter = defaults_getter.analyse_expressions(module_scope)
+ defaults_getter.body = defaults_getter.body.analyse_expressions(
+ defaults_getter.local_scope)
+ defaults_getter.py_wrapper_required = False
+ defaults_getter.pymethdef_required = False
+ self.def_node.defaults_getter = defaults_getter
+ if annotations:
+ annotations_dict = DictNode(self.pos, key_value_pairs=[
+ DictItemNode(
+ pos, key=IdentifierStringNode(pos, value=name),
+ value=value)
+ for pos, name, value in annotations])
+ self.annotations_dict = annotations_dict.analyse_types(env)
+
+ def may_be_none(self):
+ return False
+
+ gil_message = "Constructing Python function"
+
+ def closure_result_code(self):
+ return "NULL"
+
+ def generate_result_code(self, code):
+ if self.binding:
+ self.generate_cyfunction_code(code)
+ else:
+ self.generate_pycfunction_code(code)
+
+ def generate_pycfunction_code(self, code):
+ py_mod_name = self.get_py_mod_name(code)
+ code.putln(
+ '%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
+ self.result(),
+ self.pymethdef_cname,
+ self.closure_result_code(),
+ py_mod_name,
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ self.generate_gotref(code)
+
+ def generate_cyfunction_code(self, code):
+ if self.specialized_cpdefs:
+ def_node = self.specialized_cpdefs[0]
+ else:
+ def_node = self.def_node
+
+ if self.specialized_cpdefs or self.is_specialization:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("FusedFunction", "CythonFunction.c"))
+ constructor = "__pyx_FusedFunction_New"
+ else:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("CythonFunction", "CythonFunction.c"))
+ constructor = "__Pyx_CyFunction_New"
+
+ if self.code_object:
+ code_object_result = self.code_object.py_result()
+ else:
+ code_object_result = 'NULL'
+
+ flags = []
+ if def_node.is_staticmethod:
+ flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
+ elif def_node.is_classmethod:
+ flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
+
+ if def_node.local_scope.parent_scope.is_c_class_scope and not def_node.entry.is_anonymous:
+ flags.append('__Pyx_CYFUNCTION_CCLASS')
+
+ if def_node.is_coroutine:
+ flags.append('__Pyx_CYFUNCTION_COROUTINE')
+
+ if flags:
+ flags = ' | '.join(flags)
+ else:
+ flags = '0'
+
+ code.putln(
+ '%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % (
+ self.result(),
+ constructor,
+ self.pymethdef_cname,
+ flags,
+ self.get_py_qualified_name(code),
+ self.closure_result_code(),
+ self.get_py_mod_name(code),
+ Naming.moddict_cname,
+ code_object_result,
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ self.generate_gotref(code)
+
+ if def_node.requires_classobj:
+ assert code.pyclass_stack, "pyclass_stack is empty"
+ class_node = code.pyclass_stack[-1]
+ code.put_incref(self.py_result(), py_object_type)
+ code.putln(
+ 'PyList_Append(%s, %s);' % (
+ class_node.class_cell.result(),
+ self.result()))
+ self.generate_giveref(code)
+
+ if self.defaults:
+ code.putln(
+ 'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
+ self.result(), self.defaults_struct.name,
+ self.defaults_pyobjects, code.error_goto(self.pos)))
+ defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
+ self.defaults_struct.name, self.result())
+ for arg, entry in self.defaults:
+ arg.generate_assignment_code(code, target='%s->%s' % (
+ defaults, entry.cname))
+
+ if self.defaults_tuple:
+ code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
+ self.result(), self.defaults_tuple.py_result()))
+ if not self.specialized_cpdefs:
+ # disable introspection functions for fused dispatcher function since the user never sees it
+ # TODO: this is mostly disabled because the attributes end up pointing to ones belonging
+ # to the specializations - ideally this would be fixed instead
+ if self.defaults_kwdict:
+ code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % (
+ self.result(), self.defaults_kwdict.py_result()))
+ if def_node.defaults_getter:
+ code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
+ self.result(), def_node.defaults_getter.entry.pyfunc_cname))
+ if self.annotations_dict:
+ code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % (
+ self.result(), self.annotations_dict.py_result()))
+
+
+class InnerFunctionNode(PyCFunctionNode):
+ # Special PyCFunctionNode that depends on a closure class
+
+ binding = True
+ needs_closure_code = True
+
+ def closure_result_code(self):
+ if self.needs_closure_code:
+ return "((PyObject*)%s)" % Naming.cur_scope_cname
+ return "NULL"
+
+
+class CodeObjectNode(ExprNode):
+ # Create a PyCodeObject for a CyFunction instance.
+ #
+ # def_node DefNode the Python function node
+ # varnames TupleNode a tuple with all local variable names
+
+ subexprs = ['varnames']
+ is_temp = False
+ result_code = None
+
+ def __init__(self, def_node):
+ ExprNode.__init__(self, def_node.pos, def_node=def_node)
+ args = list(def_node.args)
+ # if we have args/kwargs, then the first two in var_entries are those
+ local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
+ self.varnames = TupleNode(
+ def_node.pos,
+ args=[IdentifierStringNode(arg.pos, value=arg.name)
+ for arg in args + local_vars],
+ is_temp=0,
+ is_literal=1)
+
+ def may_be_none(self):
+ return False
+
+ def calculate_result_code(self, code=None):
+ if self.result_code is None:
+ self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
+ return self.result_code
+
+ def generate_result_code(self, code):
+ if self.result_code is None:
+ self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
+
+ code = code.get_cached_constants_writer(self.result_code)
+ if code is None:
+ return # already initialised
+ code.mark_pos(self.pos)
+ func = self.def_node
+ func_name = code.get_py_string_const(
+ func.name, identifier=True, is_str=False, unicode_value=func.name)
+ # FIXME: better way to get the module file path at module init time? Encoding to use?
+ file_path = StringEncoding.bytes_literal(func.pos[0].get_filenametable_entry().encode('utf8'), 'utf8')
+ file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
+
+ # This combination makes CPython create a new dict for "frame.f_locals" (see GH #1836).
+ flags = ['CO_OPTIMIZED', 'CO_NEWLOCALS']
+
+ if self.def_node.star_arg:
+ flags.append('CO_VARARGS')
+ if self.def_node.starstar_arg:
+ flags.append('CO_VARKEYWORDS')
+ if self.def_node.is_asyncgen:
+ flags.append('CO_ASYNC_GENERATOR')
+ elif self.def_node.is_coroutine:
+ flags.append('CO_COROUTINE')
+ elif self.def_node.is_generator:
+ flags.append('CO_GENERATOR')
+
+ code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
+ self.result_code,
+ len(func.args) - func.num_kwonly_args, # argcount
+ func.num_posonly_args, # posonlyargcount (Py3.8+ only)
+ func.num_kwonly_args, # kwonlyargcount (Py3 only)
+ len(self.varnames.args), # nlocals
+ '|'.join(flags) or '0', # flags
+ Naming.empty_bytes, # code
+ Naming.empty_tuple, # consts
+ Naming.empty_tuple, # names (FIXME)
+ self.varnames.result(), # varnames
+ Naming.empty_tuple, # freevars (FIXME)
+ Naming.empty_tuple, # cellvars (FIXME)
+ file_path_const, # filename
+ func_name, # name
+ self.pos[1], # firstlineno
+ Naming.empty_bytes, # lnotab
+ code.error_goto_if_null(self.result_code, self.pos),
+ ))
+
+
+class DefaultLiteralArgNode(ExprNode):
+ # CyFunction's literal argument default value
+ #
+ # Evaluate literal only once.
+
+ subexprs = []
+ is_literal = True
+ is_temp = False
+
+ def __init__(self, pos, arg):
+ super(DefaultLiteralArgNode, self).__init__(pos)
+ self.arg = arg
+ self.constant_result = arg.constant_result
+ self.type = self.arg.type
+ self.evaluated = False
+
+ def analyse_types(self, env):
+ return self
+
+ def generate_result_code(self, code):
+ pass
+
+ def generate_evaluation_code(self, code):
+ if not self.evaluated:
+ self.arg.generate_evaluation_code(code)
+ self.evaluated = True
+
+ def result(self):
+ return self.type.cast_code(self.arg.result())
+
+
+class DefaultNonLiteralArgNode(ExprNode):
+ # CyFunction's non-literal argument default value
+
+ subexprs = []
+
+ def __init__(self, pos, arg, defaults_struct):
+ super(DefaultNonLiteralArgNode, self).__init__(pos)
+ self.arg = arg
+ self.defaults_struct = defaults_struct
+
+ def analyse_types(self, env):
+ self.type = self.arg.type
+ self.is_temp = False
+ return self
+
+ def generate_result_code(self, code):
+ pass
+
+ def result(self):
+ return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
+ self.defaults_struct.name, Naming.self_cname,
+ self.defaults_struct.lookup(self.arg.name).cname)
+
+
+class DefaultsTupleNode(TupleNode):
+ # CyFunction's __defaults__ tuple
+
+ def __init__(self, pos, defaults, defaults_struct):
+ args = []
+ for arg in defaults:
+ if not arg.default.is_literal:
+ arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
+ else:
+ arg = arg.default
+ args.append(arg)
+ super(DefaultsTupleNode, self).__init__(pos, args=args)
+
+ def analyse_types(self, env, skip_children=False):
+ return super(DefaultsTupleNode, self).analyse_types(env, skip_children).coerce_to_pyobject(env)
+
+
+class DefaultsKwDictNode(DictNode):
+ # CyFunction's __kwdefaults__ dict
+
+ def __init__(self, pos, defaults, defaults_struct):
+ items = []
+ for arg in defaults:
+ name = IdentifierStringNode(arg.pos, value=arg.name)
+ if not arg.default.is_literal:
+ arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
+ else:
+ arg = arg.default
+ items.append(DictItemNode(arg.pos, key=name, value=arg))
+ super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items)
+
+
+class LambdaNode(InnerFunctionNode):
+ # Lambda expression node (only used as a function reference)
+ #
+ # args [CArgDeclNode] formal arguments
+ # star_arg PyArgDeclNode or None * argument
+ # starstar_arg PyArgDeclNode or None ** argument
+ # lambda_name string a module-globally unique lambda name
+ # result_expr ExprNode
+ # def_node DefNode the underlying function 'def' node
+
+ child_attrs = ['def_node']
+
+ name = StringEncoding.EncodedString('')
+
+ def analyse_declarations(self, env):
+ if hasattr(self, "lambda_name"):
+ # this if-statement makes it safe to run twice
+ return
+ self.lambda_name = self.def_node.lambda_name = env.next_id('lambda')
+ self.def_node.no_assignment_synthesis = True
+ self.def_node.pymethdef_required = True
+ self.def_node.is_cyfunction = True
+ self.def_node.analyse_declarations(env)
+ self.pymethdef_cname = self.def_node.entry.pymethdef_cname
+ env.add_lambda_def(self.def_node)
+
+ def analyse_types(self, env):
+ self.def_node = self.def_node.analyse_expressions(env)
+ return super(LambdaNode, self).analyse_types(env)
+
+ def generate_result_code(self, code):
+ self.def_node.generate_execution_code(code)
+ super(LambdaNode, self).generate_result_code(code)
+
+
+class GeneratorExpressionNode(LambdaNode):
+ # A generator expression, e.g. (i for i in range(10))
+ #
+ # Result is a generator.
+ #
+ # loop ForStatNode the for-loop, containing a YieldExprNode
+ # def_node DefNode the underlying generator 'def' node
+ # call_parameters [ExprNode] (Internal) parameters passed to the DefNode call
+
+ name = StringEncoding.EncodedString('genexpr')
+ binding = False
+
+ child_attrs = LambdaNode.child_attrs + ["call_parameters"]
+ subexprs = LambdaNode.subexprs + ["call_parameters"]
+
+ def __init__(self, pos, *args, **kwds):
+ super(GeneratorExpressionNode, self).__init__(pos, *args, **kwds)
+ self.call_parameters = []
+
+ def analyse_declarations(self, env):
+ if hasattr(self, "genexpr_name"):
+ # this if-statement makes it safe to run twice
+ return
+ self.genexpr_name = env.next_id('genexpr')
+ super(GeneratorExpressionNode, self).analyse_declarations(env)
+ # No pymethdef required
+ self.def_node.pymethdef_required = False
+ self.def_node.py_wrapper_required = False
+ self.def_node.is_cyfunction = False
+ # Force genexpr signature
+ self.def_node.entry.signature = TypeSlots.pyfunction_noargs
+ # setup loop scope
+ if isinstance(self.loop, Nodes._ForInStatNode):
+ assert isinstance(self.loop.iterator, ScopedExprNode)
+ self.loop.iterator.init_scope(None, env)
+ else:
+ assert isinstance(self.loop, Nodes.ForFromStatNode)
+
+ def generate_result_code(self, code):
+ args_to_call = ([self.closure_result_code()] +
+ [ cp.result() for cp in self.call_parameters ])
+ args_to_call = ", ".join(args_to_call)
+ code.putln(
+ '%s = %s(%s); %s' % (
+ self.result(),
+ self.def_node.entry.pyfunc_cname,
+ args_to_call,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class YieldExprNode(ExprNode):
+ # Yield expression node
+ #
+ # arg ExprNode the value to return from the generator
+ # label_num integer yield label number
+ # is_yield_from boolean is a YieldFromExprNode to delegate to another generator
+
+ subexprs = ['arg']
+ type = py_object_type
+ label_num = 0
+ is_yield_from = False
+ is_await = False
+ in_async_gen = False
+ expr_keyword = 'yield'
+
+ def analyse_types(self, env):
+ if not self.label_num or (self.is_yield_from and self.in_async_gen):
+ error(self.pos, "'%s' not supported here" % self.expr_keyword)
+ self.is_temp = 1
+ if self.arg is not None:
+ self.arg = self.arg.analyse_types(env)
+ if not self.arg.type.is_pyobject:
+ self.coerce_yield_argument(env)
+ return self
+
+ def coerce_yield_argument(self, env):
+ self.arg = self.arg.coerce_to_pyobject(env)
+
+ def generate_evaluation_code(self, code):
+ if self.arg:
+ self.arg.generate_evaluation_code(code)
+ self.arg.make_owned_reference(code)
+ code.putln(
+ "%s = %s;" % (
+ Naming.retval_cname,
+ self.arg.result_as(py_object_type)))
+ self.arg.generate_post_assignment_code(code)
+ self.arg.free_temps(code)
+ else:
+ code.put_init_to_py_none(Naming.retval_cname, py_object_type)
+ self.generate_yield_code(code)
+
+ def generate_yield_code(self, code):
+ """
+ Generate the code to return the argument in 'Naming.retval_cname'
+ and to continue at the yield label.
+ """
+ label_num, label_name = code.new_yield_label(
+ self.expr_keyword.replace(' ', '_'))
+ code.use_label(label_name)
+
+ saved = []
+ code.funcstate.closure_temps.reset()
+ for cname, type, manage_ref in code.funcstate.temps_in_use():
+ save_cname = code.funcstate.closure_temps.allocate_temp(type)
+ saved.append((cname, save_cname, type))
+ if type.is_cpp_class:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp"))
+ cname = "__PYX_STD_MOVE_IF_SUPPORTED(%s)" % cname
+ else:
+ code.put_xgiveref(cname, type)
+ code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
+
+ code.put_xgiveref(Naming.retval_cname, py_object_type)
+ profile = code.globalstate.directives['profile']
+ linetrace = code.globalstate.directives['linetrace']
+ if profile or linetrace:
+ code.put_trace_return(Naming.retval_cname,
+ nogil=not code.funcstate.gil_owned)
+ code.put_finish_refcount_context()
+
+ if code.funcstate.current_except is not None:
+ # inside of an except block => save away currently handled exception
+ code.putln("__Pyx_Coroutine_SwapException(%s);" % Naming.generator_cname)
+ else:
+ # no exceptions being handled => restore exception state of caller
+ code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname)
+
+ code.putln("/* return from %sgenerator, %sing value */" % (
+ 'async ' if self.in_async_gen else '',
+ 'await' if self.is_await else 'yield'))
+ code.putln("%s->resume_label = %d;" % (
+ Naming.generator_cname, label_num))
+ if self.in_async_gen and not self.is_await:
+ # __Pyx__PyAsyncGenValueWrapperNew() steals a reference to the return value
+ code.putln("return __Pyx__PyAsyncGenValueWrapperNew(%s);" % Naming.retval_cname)
+ else:
+ code.putln("return %s;" % Naming.retval_cname)
+
+ code.put_label(label_name)
+ for cname, save_cname, type in saved:
+ save_cname = "%s->%s" % (Naming.cur_scope_cname, save_cname)
+ if type.is_cpp_class:
+ save_cname = "__PYX_STD_MOVE_IF_SUPPORTED(%s)" % save_cname
+ code.putln('%s = %s;' % (cname, save_cname))
+ if type.is_pyobject:
+ code.putln('%s = 0;' % save_cname)
+ code.put_xgotref(cname, type)
+ elif type.is_memoryviewslice:
+ code.putln('%s.memview = NULL; %s.data = NULL;' % (save_cname, save_cname))
+ self.generate_sent_value_handling_code(code, Naming.sent_value_cname)
+ if self.result_is_used:
+ self.allocate_temp_result(code)
+ code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname))
+ code.put_incref(self.result(), py_object_type)
+
+ def generate_sent_value_handling_code(self, code, value_cname):
+ code.putln(code.error_goto_if_null(value_cname, self.pos))
+
+
+class _YieldDelegationExprNode(YieldExprNode):
+ def yield_from_func(self, code):
+ raise NotImplementedError()
+
+ def generate_evaluation_code(self, code, source_cname=None, decref_source=False):
+ if source_cname is None:
+ self.arg.generate_evaluation_code(code)
+ code.putln("%s = %s(%s, %s);" % (
+ Naming.retval_cname,
+ self.yield_from_func(code),
+ Naming.generator_cname,
+ self.arg.py_result() if source_cname is None else source_cname))
+ if source_cname is None:
+ self.arg.generate_disposal_code(code)
+ self.arg.free_temps(code)
+ elif decref_source:
+ code.put_decref_clear(source_cname, py_object_type)
+ code.put_xgotref(Naming.retval_cname, py_object_type)
+
+ code.putln("if (likely(%s)) {" % Naming.retval_cname)
+ self.generate_yield_code(code)
+ code.putln("} else {")
+ # either error or sub-generator has normally terminated: return value => node result
+ if self.result_is_used:
+ self.fetch_iteration_result(code)
+ else:
+ self.handle_iteration_exception(code)
+ code.putln("}")
+
+ def fetch_iteration_result(self, code):
+ # YieldExprNode has allocated the result temp for us
+ code.putln("%s = NULL;" % self.result())
+ code.put_error_if_neg(self.pos, "__Pyx_PyGen_FetchStopIterationValue(&%s)" % self.result())
+ self.generate_gotref(code)
+
+ def handle_iteration_exception(self, code):
+ code.putln("PyObject* exc_type = __Pyx_PyErr_CurrentExceptionType();")
+ code.putln("if (exc_type) {")
+ code.putln("if (likely(exc_type == PyExc_StopIteration || (exc_type != PyExc_GeneratorExit &&"
+ " __Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))) PyErr_Clear();")
+ code.putln("else %s" % code.error_goto(self.pos))
+ code.putln("}")
+
+
+class YieldFromExprNode(_YieldDelegationExprNode):
+ # "yield from GEN" expression
+ is_yield_from = True
+ expr_keyword = 'yield from'
+
+ def coerce_yield_argument(self, env):
+ if not self.arg.type.is_string:
+ # FIXME: support C arrays and C++ iterators?
+ error(self.pos, "yielding from non-Python object not supported")
+ self.arg = self.arg.coerce_to_pyobject(env)
+
+ def yield_from_func(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("GeneratorYieldFrom", "Coroutine.c"))
+ return "__Pyx_Generator_Yield_From"
+
+
+class AwaitExprNode(_YieldDelegationExprNode):
+ # 'await' expression node
+ #
+ # arg ExprNode the Awaitable value to await
+ # label_num integer yield label number
+
+ is_await = True
+ expr_keyword = 'await'
+
+ def coerce_yield_argument(self, env):
+ if self.arg is not None:
+ # FIXME: use same check as in YieldFromExprNode.coerce_yield_argument() ?
+ self.arg = self.arg.coerce_to_pyobject(env)
+
+ def yield_from_func(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("CoroutineYieldFrom", "Coroutine.c"))
+ return "__Pyx_Coroutine_Yield_From"
+
+
+class AwaitIterNextExprNode(AwaitExprNode):
+ # 'await' expression node as part of 'async for' iteration
+ #
+ # Breaks out of loop on StopAsyncIteration exception.
+
+ def _generate_break(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
+ code.putln("PyObject* exc_type = __Pyx_PyErr_CurrentExceptionType();")
+ code.putln("if (unlikely(exc_type && (exc_type == __Pyx_PyExc_StopAsyncIteration || ("
+ " exc_type != PyExc_StopIteration && exc_type != PyExc_GeneratorExit &&"
+ " __Pyx_PyErr_GivenExceptionMatches(exc_type, __Pyx_PyExc_StopAsyncIteration))))) {")
+ code.putln("PyErr_Clear();")
+ code.putln("break;")
+ code.putln("}")
+
+ def fetch_iteration_result(self, code):
+ assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
+ self._generate_break(code)
+ super(AwaitIterNextExprNode, self).fetch_iteration_result(code)
+
+ def generate_sent_value_handling_code(self, code, value_cname):
+ assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
+ code.putln("if (unlikely(!%s)) {" % value_cname)
+ self._generate_break(code)
+ # all non-break exceptions are errors, as in parent class
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+
+
+class GlobalsExprNode(AtomicExprNode):
+ type = dict_type
+ is_temp = 1
+
+ def analyse_types(self, env):
+ env.use_utility_code(Builtin.globals_utility_code)
+ return self
+
+ gil_message = "Constructing globals dict"
+
+ def may_be_none(self):
+ return False
+
+ def generate_result_code(self, code):
+ code.putln('%s = __Pyx_Globals(); %s' % (
+ self.result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+
+class LocalsDictItemNode(DictItemNode):
+ def analyse_types(self, env):
+ self.key = self.key.analyse_types(env)
+ self.value = self.value.analyse_types(env)
+ self.key = self.key.coerce_to_pyobject(env)
+ if self.value.type.can_coerce_to_pyobject(env):
+ self.value = self.value.coerce_to_pyobject(env)
+ else:
+ self.value = None
+ return self
+
+
+class FuncLocalsExprNode(DictNode):
+ def __init__(self, pos, env):
+ local_vars = sorted([
+ entry.name for entry in env.entries.values() if entry.name])
+ items = [LocalsDictItemNode(
+ pos, key=IdentifierStringNode(pos, value=var),
+ value=NameNode(pos, name=var, allow_null=True))
+ for var in local_vars]
+ DictNode.__init__(self, pos, key_value_pairs=items,
+ exclude_null_values=True)
+
+ def analyse_types(self, env):
+ node = super(FuncLocalsExprNode, self).analyse_types(env)
+ node.key_value_pairs = [ i for i in node.key_value_pairs
+ if i.value is not None ]
+ return node
+
+
+class PyClassLocalsExprNode(AtomicExprNode):
+ def __init__(self, pos, pyclass_dict):
+ AtomicExprNode.__init__(self, pos)
+ self.pyclass_dict = pyclass_dict
+
+ def analyse_types(self, env):
+ self.type = self.pyclass_dict.type
+ self.is_temp = False
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def result(self):
+ return self.pyclass_dict.result()
+
+ def generate_result_code(self, code):
+ pass
+
+
+def LocalsExprNode(pos, scope_node, env):
+ if env.is_module_scope:
+ return GlobalsExprNode(pos)
+ if env.is_py_class_scope:
+ return PyClassLocalsExprNode(pos, scope_node.dict)
+ return FuncLocalsExprNode(pos, env)
+
+
+#-------------------------------------------------------------------
+#
+# Unary operator nodes
+#
+#-------------------------------------------------------------------
+
+compile_time_unary_operators = {
+ 'not': operator.not_,
+ '~': operator.inv,
+ '-': operator.neg,
+ '+': operator.pos,
+}
+
+class UnopNode(ExprNode):
+ # operator string
+ # operand ExprNode
+ #
+ # Processing during analyse_expressions phase:
+ #
+ # analyse_c_operation
+ # Called when the operand is not a pyobject.
+ # - Check operand type and coerce if needed.
+ # - Determine result type and result code fragment.
+ # - Allocate temporary for result if needed.
+
+ subexprs = ['operand']
+ infix = True
+ is_inc_dec_op = False
+
+ def calculate_constant_result(self):
+ func = compile_time_unary_operators[self.operator]
+ self.constant_result = func(self.operand.constant_result)
+
+ def compile_time_value(self, denv):
+ func = compile_time_unary_operators.get(self.operator)
+ if not func:
+ error(self.pos,
+ "Unary '%s' not supported in compile-time expression"
+ % self.operator)
+ operand = self.operand.compile_time_value(denv)
+ try:
+ return func(operand)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def infer_type(self, env):
+ operand_type = self.operand.infer_type(env)
+ if operand_type.is_cpp_class or operand_type.is_ptr:
+ cpp_type = operand_type.find_cpp_operation_type(self.operator)
+ if cpp_type is not None:
+ return cpp_type
+ return self.infer_unop_type(env, operand_type)
+
+ def infer_unop_type(self, env, operand_type):
+ if operand_type.is_pyobject:
+ return py_object_type
+ else:
+ return operand_type
+
+ def may_be_none(self):
+ if self.operand.type and self.operand.type.is_builtin_type:
+ if self.operand.type is not type_type:
+ return False
+ return ExprNode.may_be_none(self)
+
+ def analyse_types(self, env):
+ self.operand = self.operand.analyse_types(env)
+ if self.is_pythran_operation(env):
+ self.type = PythranExpr(pythran_unaryop_type(self.operator, self.operand.type))
+ self.is_temp = 1
+ elif self.is_py_operation():
+ self.coerce_operand_to_pyobject(env)
+ self.type = py_object_type
+ self.is_temp = 1
+ elif self.is_cpp_operation():
+ self.analyse_cpp_operation(env)
+ else:
+ self.analyse_c_operation(env)
+ return self
+
+ def check_const(self):
+ return self.operand.check_const()
+
+ def is_py_operation(self):
+ return self.operand.type.is_pyobject or self.operand.type.is_ctuple
+
+ def is_pythran_operation(self, env):
+ np_pythran = has_np_pythran(env)
+ op_type = self.operand.type
+ return np_pythran and (op_type.is_buffer or op_type.is_pythran_expr)
+
+ def nogil_check(self, env):
+ if self.is_py_operation():
+ self.gil_error()
+
+ def is_cpp_operation(self):
+ type = self.operand.type
+ return type.is_cpp_class
+
+ def coerce_operand_to_pyobject(self, env):
+ self.operand = self.operand.coerce_to_pyobject(env)
+
+ def generate_result_code(self, code):
+ if self.type.is_pythran_expr:
+ code.putln("// Pythran unaryop")
+ code.putln("__Pyx_call_destructor(%s);" % self.result())
+ code.putln("new (&%s) decltype(%s){%s%s};" % (
+ self.result(),
+ self.result(),
+ self.operator,
+ self.operand.pythran_result()))
+ elif self.operand.type.is_pyobject:
+ self.generate_py_operation_code(code)
+ elif self.is_temp:
+ if self.is_cpp_operation() and self.exception_check == '+':
+ translate_cpp_exception(code, self.pos,
+ "%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
+ self.result() if self.type.is_pyobject else None,
+ self.exception_value, self.in_nogil_context)
+ else:
+ code.putln("%s = %s %s;" % (self.result(), self.operator, self.operand.result()))
+
+ def generate_py_operation_code(self, code):
+ function = self.py_operation_function(code)
+ code.putln(
+ "%s = %s(%s); %s" % (
+ self.result(),
+ function,
+ self.operand.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+ def type_error(self):
+ if not self.operand.type.is_error:
+ error(self.pos, "Invalid operand type for '%s' (%s)" %
+ (self.operator, self.operand.type))
+ self.type = PyrexTypes.error_type
+
+ def analyse_cpp_operation(self, env, overload_check=True):
+ operand_types = [self.operand.type]
+ if self.is_inc_dec_op and not self.is_prefix:
+ operand_types.append(PyrexTypes.c_int_type)
+ entry = env.lookup_operator_for_types(self.pos, self.operator, operand_types)
+ if overload_check and not entry:
+ self.type_error()
+ return
+ if entry:
+ self.exception_check = entry.type.exception_check
+ self.exception_value = entry.type.exception_value
+ if self.exception_check == '+':
+ self.is_temp = True
+ if needs_cpp_exception_conversion(self):
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+ else:
+ self.exception_check = ''
+ self.exception_value = ''
+ if self.is_inc_dec_op and not self.is_prefix:
+ cpp_type = self.operand.type.find_cpp_operation_type(
+ self.operator, operand_type=PyrexTypes.c_int_type
+ )
+ else:
+ cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
+ if overload_check and cpp_type is None:
+ error(self.pos, "'%s' operator not defined for %s" % (
+ self.operator, type))
+ self.type_error()
+ return
+ self.type = cpp_type
+
+
+class NotNode(UnopNode):
+ # 'not' operator
+ #
+ # operand ExprNode
+ operator = '!'
+
+ type = PyrexTypes.c_bint_type
+
+ def calculate_constant_result(self):
+ self.constant_result = not self.operand.constant_result
+
+ def compile_time_value(self, denv):
+ operand = self.operand.compile_time_value(denv)
+ try:
+ return not operand
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def infer_unop_type(self, env, operand_type):
+ return PyrexTypes.c_bint_type
+
+ def analyse_types(self, env):
+ self.operand = self.operand.analyse_types(env)
+ operand_type = self.operand.type
+ if operand_type.is_cpp_class:
+ self.analyse_cpp_operation(env)
+ else:
+ self.operand = self.operand.coerce_to_boolean(env)
+ return self
+
+ def calculate_result_code(self):
+ return "(!%s)" % self.operand.result()
+
+
+class UnaryPlusNode(UnopNode):
+ # unary '+' operator
+
+ operator = '+'
+
+ def analyse_c_operation(self, env):
+ self.type = PyrexTypes.widest_numeric_type(
+ self.operand.type, PyrexTypes.c_int_type)
+
+ def py_operation_function(self, code):
+ return "PyNumber_Positive"
+
+ def calculate_result_code(self):
+ if self.is_cpp_operation():
+ return "(+%s)" % self.operand.result()
+ else:
+ return self.operand.result()
+
+
+class UnaryMinusNode(UnopNode):
+ # unary '-' operator
+
+ operator = '-'
+
+ def analyse_c_operation(self, env):
+ if self.operand.type.is_numeric:
+ self.type = PyrexTypes.widest_numeric_type(
+ self.operand.type, PyrexTypes.c_int_type)
+ elif self.operand.type.is_enum:
+ self.type = PyrexTypes.c_int_type
+ else:
+ self.type_error()
+ if self.type.is_complex:
+ self.infix = False
+
+ def py_operation_function(self, code):
+ return "PyNumber_Negative"
+
+ def calculate_result_code(self):
+ if self.infix:
+ return "(-%s)" % self.operand.result()
+ else:
+ return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
+
+ def get_constant_c_result_code(self):
+ value = self.operand.get_constant_c_result_code()
+ if value:
+ return "(-%s)" % value
+
+class TildeNode(UnopNode):
+ # unary '~' operator
+
+ def analyse_c_operation(self, env):
+ if self.operand.type.is_int:
+ self.type = PyrexTypes.widest_numeric_type(
+ self.operand.type, PyrexTypes.c_int_type)
+ elif self.operand.type.is_enum:
+ self.type = PyrexTypes.c_int_type
+ else:
+ self.type_error()
+
+ def py_operation_function(self, code):
+ return "PyNumber_Invert"
+
+ def calculate_result_code(self):
+ return "(~%s)" % self.operand.result()
+
+
+class CUnopNode(UnopNode):
+
+ def is_py_operation(self):
+ return False
+
+class DereferenceNode(CUnopNode):
+ # unary * operator
+
+ operator = '*'
+
+ def infer_unop_type(self, env, operand_type):
+ if operand_type.is_ptr:
+ return operand_type.base_type
+ else:
+ return PyrexTypes.error_type
+
+ def analyse_c_operation(self, env):
+ if self.operand.type.is_ptr:
+ if env.is_cpp:
+ self.type = PyrexTypes.CReferenceType(self.operand.type.base_type)
+ else:
+ self.type = self.operand.type.base_type
+ else:
+ self.type_error()
+
+ def calculate_result_code(self):
+ return "(*%s)" % self.operand.result()
+
+
+class DecrementIncrementNode(CUnopNode):
+ # unary ++/-- operator
+ is_inc_dec_op = True
+
+ def type_error(self):
+ if not self.operand.type.is_error:
+ if self.is_prefix:
+ error(self.pos, "No match for 'operator%s' (operand type is '%s')" %
+ (self.operator, self.operand.type))
+ else:
+ error(self.pos, "No 'operator%s(int)' declared for postfix '%s' (operand type is '%s')" %
+ (self.operator, self.operator, self.operand.type))
+ self.type = PyrexTypes.error_type
+
+ def analyse_c_operation(self, env):
+ if self.operand.type.is_numeric:
+ self.type = PyrexTypes.widest_numeric_type(
+ self.operand.type, PyrexTypes.c_int_type)
+ elif self.operand.type.is_ptr:
+ self.type = self.operand.type
+ else:
+ self.type_error()
+
+ def calculate_result_code(self):
+ if self.is_prefix:
+ return "(%s%s)" % (self.operator, self.operand.result())
+ else:
+ return "(%s%s)" % (self.operand.result(), self.operator)
+
+def inc_dec_constructor(is_prefix, operator):
+ return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
+
+
+class AmpersandNode(CUnopNode):
+ # The C address-of operator.
+ #
+ # operand ExprNode
+ operator = '&'
+
+ def infer_unop_type(self, env, operand_type):
+ return PyrexTypes.c_ptr_type(operand_type)
+
+ def analyse_types(self, env):
+ self.operand = self.operand.analyse_types(env)
+ argtype = self.operand.type
+ if argtype.is_cpp_class:
+ self.analyse_cpp_operation(env, overload_check=False)
+ if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()):
+ if argtype.is_memoryviewslice:
+ self.error("Cannot take address of memoryview slice")
+ else:
+ self.error("Taking address of non-lvalue (type %s)" % argtype)
+ return self
+ if argtype.is_pyobject:
+ self.error("Cannot take address of Python %s" % (
+ "variable '%s'" % self.operand.name if self.operand.is_name else
+ "object attribute '%s'" % self.operand.attribute if self.operand.is_attribute else
+ "object"))
+ return self
+ if not argtype.is_cpp_class or not self.type:
+ self.type = PyrexTypes.c_ptr_type(argtype)
+ return self
+
+ def check_const(self):
+ return self.operand.check_const_addr()
+
+ def error(self, mess):
+ error(self.pos, mess)
+ self.type = PyrexTypes.error_type
+ self.result_code = ""
+
+ def calculate_result_code(self):
+ return "(&%s)" % self.operand.result()
+
+ def generate_result_code(self, code):
+ if (self.operand.type.is_cpp_class and self.exception_check == '+'):
+ translate_cpp_exception(code, self.pos,
+ "%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
+ self.result() if self.type.is_pyobject else None,
+ self.exception_value, self.in_nogil_context)
+
+
+unop_node_classes = {
+ "+": UnaryPlusNode,
+ "-": UnaryMinusNode,
+ "~": TildeNode,
+}
+
+def unop_node(pos, operator, operand):
+ # Construct unnop node of appropriate class for
+ # given operator.
+ if isinstance(operand, IntNode) and operator == '-':
+ return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)),
+ longness=operand.longness, unsigned=operand.unsigned)
+ elif isinstance(operand, UnopNode) and operand.operator == operator in '+-':
+ warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5)
+ return unop_node_classes[operator](pos,
+ operator = operator,
+ operand = operand)
+
+
+class TypecastNode(ExprNode):
+ # C type cast
+ #
+ # operand ExprNode
+ # base_type CBaseTypeNode
+ # declarator CDeclaratorNode
+ # typecheck boolean
+ #
+ # If used from a transform, one can if wanted specify the attribute
+ # "type" directly and leave base_type and declarator to None
+
+ subexprs = ['operand']
+ base_type = declarator = type = None
+
+ def type_dependencies(self, env):
+ return ()
+
+ def infer_type(self, env):
+ if self.type is None:
+ base_type = self.base_type.analyse(env)
+ _, self.type = self.declarator.analyse(base_type, env)
+ return self.type
+
+ def analyse_types(self, env):
+ if self.type is None:
+ base_type = self.base_type.analyse(env)
+ _, self.type = self.declarator.analyse(base_type, env)
+ if self.operand.has_constant_result():
+ # Must be done after self.type is resolved.
+ self.calculate_constant_result()
+ if self.type.is_cfunction:
+ error(self.pos,
+ "Cannot cast to a function type")
+ self.type = PyrexTypes.error_type
+ self.operand = self.operand.analyse_types(env)
+ if self.type is PyrexTypes.c_bint_type:
+ # short circuit this to a coercion
+ return self.operand.coerce_to_boolean(env)
+ to_py = self.type.is_pyobject
+ from_py = self.operand.type.is_pyobject
+ if from_py and not to_py and self.operand.is_ephemeral():
+ if not self.type.is_numeric and not self.type.is_cpp_class:
+ error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
+ if to_py and not from_py:
+ if self.type is bytes_type and self.operand.type.is_int:
+ return CoerceIntToBytesNode(self.operand, env)
+ elif self.operand.type.can_coerce_to_pyobject(env):
+ self.result_ctype = py_object_type
+ self.operand = self.operand.coerce_to(self.type, env)
+ else:
+ if self.operand.type.is_ptr:
+ if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
+ error(self.pos, "Python objects cannot be cast from pointers of primitive types")
+ else:
+ # Should this be an error?
+ warning(self.pos, "No conversion from %s to %s, python object pointer used." % (
+ self.operand.type, self.type))
+ self.operand = self.operand.coerce_to_simple(env)
+ elif from_py and not to_py:
+ if self.type.create_from_py_utility_code(env):
+ self.operand = self.operand.coerce_to(self.type, env)
+ elif self.type.is_ptr:
+ if not (self.type.base_type.is_void or self.type.base_type.is_struct):
+ error(self.pos, "Python objects cannot be cast to pointers of primitive types")
+ else:
+ warning(self.pos, "No conversion from %s to %s, python object pointer used." % (
+ self.type, self.operand.type))
+ elif from_py and to_py:
+ if self.typecheck:
+ self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
+ elif isinstance(self.operand, SliceIndexNode):
+ # This cast can influence the created type of string slices.
+ self.operand = self.operand.coerce_to(self.type, env)
+ elif self.type.is_complex and self.operand.type.is_complex:
+ self.operand = self.operand.coerce_to_simple(env)
+ elif self.operand.type.is_fused:
+ self.operand = self.operand.coerce_to(self.type, env)
+ #self.type = self.operand.type
+ if self.type.is_ptr and self.type.base_type.is_cfunction and self.type.base_type.nogil:
+ op_type = self.operand.type
+ if op_type.is_ptr:
+ op_type = op_type.base_type
+ if op_type.is_cfunction and not op_type.nogil:
+ warning(self.pos,
+ "Casting a GIL-requiring function into a nogil function circumvents GIL validation", 1)
+ return self
+
+ def is_simple(self):
+ # either temp or a C cast => no side effects other than the operand's
+ return self.operand.is_simple()
+
+ def is_ephemeral(self):
+ # either temp or a C cast => no side effects other than the operand's
+ return self.operand.is_ephemeral()
+
+ def nonlocally_immutable(self):
+ return self.is_temp or self.operand.nonlocally_immutable()
+
+ def nogil_check(self, env):
+ if self.type and self.type.is_pyobject and self.is_temp:
+ self.gil_error()
+
+ def check_const(self):
+ return self.operand.check_const()
+
+ def calculate_constant_result(self):
+ self.constant_result = self.calculate_result_code(self.operand.constant_result)
+
+ def calculate_result_code(self, operand_result = None):
+ if operand_result is None:
+ operand_result = self.operand.result()
+ if self.type.is_complex:
+ operand_result = self.operand.result()
+ if self.operand.type.is_complex:
+ real_part = self.type.real_type.cast_code(
+ self.operand.type.real_code(operand_result))
+ imag_part = self.type.real_type.cast_code(
+ self.operand.type.imag_code(operand_result))
+ else:
+ real_part = self.type.real_type.cast_code(operand_result)
+ imag_part = "0"
+ return "%s(%s, %s)" % (
+ self.type.from_parts,
+ real_part,
+ imag_part)
+ else:
+ return self.type.cast_code(operand_result)
+
+ def get_constant_c_result_code(self):
+ operand_result = self.operand.get_constant_c_result_code()
+ if operand_result:
+ return self.type.cast_code(operand_result)
+
+ def result_as(self, type):
+ if self.type.is_pyobject and not self.is_temp:
+ # Optimise away some unnecessary casting
+ return self.operand.result_as(type)
+ else:
+ return ExprNode.result_as(self, type)
+
+ def generate_result_code(self, code):
+ if self.is_temp:
+ code.putln(
+ "%s = (PyObject *)%s;" % (
+ self.result(),
+ self.operand.result()))
+ code.put_incref(self.result(), self.ctype())
+
+
+ERR_START = "Start may not be given"
+ERR_NOT_STOP = "Stop must be provided to indicate shape"
+ERR_STEPS = ("Strides may only be given to indicate contiguity. "
+ "Consider slicing it after conversion")
+ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
+ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
+
+
+class CythonArrayNode(ExprNode):
+ """
+ Used when a pointer of base_type is cast to a memoryviewslice with that
+ base type. i.e.
+
+ p
+
+ creates a fortran-contiguous cython.array.
+
+ We leave the type set to object so coercions to object are more efficient
+ and less work. Acquiring a memoryviewslice from this will be just as
+ efficient. ExprNode.coerce_to() will do the additional typecheck on
+ self.compile_time_type
+
+ This also handles my_c_array
+
+
+ operand ExprNode the thing we're casting
+ base_type_node MemoryViewSliceTypeNode the cast expression node
+ """
+
+ subexprs = ['operand', 'shapes']
+
+ shapes = None
+ is_temp = True
+ mode = "c"
+ array_dtype = None
+
+ shape_type = PyrexTypes.c_py_ssize_t_type
+
+ def analyse_types(self, env):
+ from . import MemoryView
+
+ self.operand = self.operand.analyse_types(env)
+ if self.array_dtype:
+ array_dtype = self.array_dtype
+ else:
+ array_dtype = self.base_type_node.base_type_node.analyse(env)
+ axes = self.base_type_node.axes
+
+ self.type = error_type
+ self.shapes = []
+ ndim = len(axes)
+
+ # Base type of the pointer or C array we are converting
+ base_type = self.operand.type
+
+ if not self.operand.type.is_ptr and not self.operand.type.is_array:
+ error(self.operand.pos, ERR_NOT_POINTER)
+ return self
+
+ # Dimension sizes of C array
+ array_dimension_sizes = []
+ if base_type.is_array:
+ while base_type.is_array:
+ array_dimension_sizes.append(base_type.size)
+ base_type = base_type.base_type
+ elif base_type.is_ptr:
+ base_type = base_type.base_type
+ else:
+ error(self.pos, "unexpected base type %s found" % base_type)
+ return self
+
+ if not (base_type.same_as(array_dtype) or base_type.is_void):
+ error(self.operand.pos, ERR_BASE_TYPE)
+ return self
+ elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
+ error(self.operand.pos,
+ "Expected %d dimensions, array has %d dimensions" %
+ (ndim, len(array_dimension_sizes)))
+ return self
+
+ # Verify the start, stop and step values
+ # In case of a C array, use the size of C array in each dimension to
+ # get an automatic cast
+ for axis_no, axis in enumerate(axes):
+ if not axis.start.is_none:
+ error(axis.start.pos, ERR_START)
+ return self
+
+ if axis.stop.is_none:
+ if array_dimension_sizes:
+ dimsize = array_dimension_sizes[axis_no]
+ axis.stop = IntNode(self.pos, value=str(dimsize),
+ constant_result=dimsize,
+ type=PyrexTypes.c_int_type)
+ else:
+ error(axis.pos, ERR_NOT_STOP)
+ return self
+
+ axis.stop = axis.stop.analyse_types(env)
+ shape = axis.stop.coerce_to(self.shape_type, env)
+ if not shape.is_literal:
+ shape.coerce_to_temp(env)
+
+ self.shapes.append(shape)
+
+ first_or_last = axis_no in (0, ndim - 1)
+ if not axis.step.is_none and first_or_last:
+ # '1' in the first or last dimension denotes F or C contiguity
+ axis.step = axis.step.analyse_types(env)
+ if (not axis.step.type.is_int and axis.step.is_literal and not
+ axis.step.type.is_error):
+ error(axis.step.pos, "Expected an integer literal")
+ return self
+
+ if axis.step.compile_time_value(env) != 1:
+ error(axis.step.pos, ERR_STEPS)
+ return self
+
+ if axis_no == 0:
+ self.mode = "fortran"
+
+ elif not axis.step.is_none and not first_or_last:
+ # step provided in some other dimension
+ error(axis.step.pos, ERR_STEPS)
+ return self
+
+ if not self.operand.is_name:
+ self.operand = self.operand.coerce_to_temp(env)
+
+ axes = [('direct', 'follow')] * len(axes)
+ if self.mode == "fortran":
+ axes[0] = ('direct', 'contig')
+ else:
+ axes[-1] = ('direct', 'contig')
+
+ self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
+ self.coercion_type.validate_memslice_dtype(self.pos)
+ self.type = self.get_cython_array_type(env)
+ MemoryView.use_cython_array_utility_code(env)
+ env.use_utility_code(MemoryView.typeinfo_to_format_code)
+ return self
+
+ def allocate_temp_result(self, code):
+ if self.temp_code:
+ raise RuntimeError("temp allocated multiple times")
+
+ self.temp_code = code.funcstate.allocate_temp(self.type, True)
+
+ def infer_type(self, env):
+ return self.get_cython_array_type(env)
+
+ def get_cython_array_type(self, env):
+ cython_scope = env.global_scope().context.cython_scope
+ cython_scope.load_cythonscope()
+ return cython_scope.viewscope.lookup("array").type
+
+ def generate_result_code(self, code):
+ from . import Buffer
+
+ shapes = [self.shape_type.cast_code(shape.result())
+ for shape in self.shapes]
+ dtype = self.coercion_type.dtype
+
+ shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
+ format_temp = code.funcstate.allocate_temp(py_object_type, True)
+
+ itemsize = "sizeof(%s)" % dtype.empty_declaration_code()
+ type_info = Buffer.get_type_information_cname(code, dtype)
+
+ if self.operand.type.is_ptr:
+ code.putln("if (!%s) {" % self.operand.result())
+ code.putln( 'PyErr_SetString(PyExc_ValueError,'
+ '"Cannot create cython.array from NULL pointer");')
+ code.putln(code.error_goto(self.operand.pos))
+ code.putln("}")
+
+ code.putln("%s = __pyx_format_from_typeinfo(&%s); %s" % (
+ format_temp,
+ type_info,
+ code.error_goto_if_null(format_temp, self.pos),
+ ))
+ code.put_gotref(format_temp, py_object_type)
+
+ buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
+ code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s); %s' % (
+ shapes_temp,
+ buildvalue_fmt,
+ ", ".join(shapes),
+ code.error_goto_if_null(shapes_temp, self.pos),
+ ))
+ code.put_gotref(shapes_temp, py_object_type)
+
+ code.putln('%s = __pyx_array_new(%s, %s, PyBytes_AS_STRING(%s), (char *) "%s", (char *) %s); %s' % (
+ self.result(),
+ shapes_temp, itemsize, format_temp, self.mode, self.operand.result(),
+ code.error_goto_if_null(self.result(), self.pos),
+ ))
+ self.generate_gotref(code)
+
+ def dispose(temp):
+ code.put_decref_clear(temp, py_object_type)
+ code.funcstate.release_temp(temp)
+
+ dispose(shapes_temp)
+ dispose(format_temp)
+
+ @classmethod
+ def from_carray(cls, src_node, env):
+ """
+ Given a C array type, return a CythonArrayNode
+ """
+ pos = src_node.pos
+ base_type = src_node.type
+
+ none_node = NoneNode(pos)
+ axes = []
+
+ while base_type.is_array:
+ axes.append(SliceNode(pos, start=none_node, stop=none_node,
+ step=none_node))
+ base_type = base_type.base_type
+ axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
+
+ memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
+ base_type_node=base_type)
+ result = CythonArrayNode(pos, base_type_node=memslicenode,
+ operand=src_node, array_dtype=base_type)
+ result = result.analyse_types(env)
+ return result
+
+class SizeofNode(ExprNode):
+ # Abstract base class for sizeof(x) expression nodes.
+
+ type = PyrexTypes.c_size_t_type
+
+ def check_const(self):
+ return True
+
+ def generate_result_code(self, code):
+ pass
+
+
+class SizeofTypeNode(SizeofNode):
+ # C sizeof function applied to a type
+ #
+ # base_type CBaseTypeNode
+ # declarator CDeclaratorNode
+
+ subexprs = []
+ arg_type = None
+
+ def analyse_types(self, env):
+ # we may have incorrectly interpreted a dotted name as a type rather than an attribute
+ # this could be better handled by more uniformly treating types as runtime-available objects
+ if 0 and self.base_type.module_path:
+ path = self.base_type.module_path
+ obj = env.lookup(path[0])
+ if obj.as_module is None:
+ operand = NameNode(pos=self.pos, name=path[0])
+ for attr in path[1:]:
+ operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
+ operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
+ node = SizeofVarNode(self.pos, operand=operand).analyse_types(env)
+ return node
+ if self.arg_type is None:
+ base_type = self.base_type.analyse(env)
+ _, arg_type = self.declarator.analyse(base_type, env)
+ self.arg_type = arg_type
+ self.check_type()
+ return self
+
+ def check_type(self):
+ arg_type = self.arg_type
+ if not arg_type:
+ return
+ if arg_type.is_pyobject and not arg_type.is_extension_type:
+ error(self.pos, "Cannot take sizeof Python object")
+ elif arg_type.is_void:
+ error(self.pos, "Cannot take sizeof void")
+ elif not arg_type.is_complete():
+ error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
+
+ def calculate_result_code(self):
+ if self.arg_type.is_extension_type:
+ # the size of the pointer is boring
+ # we want the size of the actual struct
+ arg_code = self.arg_type.declaration_code("", deref=1)
+ else:
+ arg_code = self.arg_type.empty_declaration_code()
+ return "(sizeof(%s))" % arg_code
+
+
+class SizeofVarNode(SizeofNode):
+ # C sizeof function applied to a variable
+ #
+ # operand ExprNode
+
+ subexprs = ['operand']
+
+ def analyse_types(self, env):
+ # We may actually be looking at a type rather than a variable...
+ # If we are, traditional analysis would fail...
+ operand_as_type = self.operand.analyse_as_type(env)
+ if operand_as_type:
+ self.arg_type = operand_as_type
+ if self.arg_type.is_fused:
+ try:
+ self.arg_type = self.arg_type.specialize(env.fused_to_specific)
+ except CannotSpecialize:
+ error(self.operand.pos,
+ "Type cannot be specialized since it is not a fused argument to this function")
+ self.__class__ = SizeofTypeNode
+ self.check_type()
+ else:
+ self.operand = self.operand.analyse_types(env)
+ return self
+
+ def calculate_result_code(self):
+ return "(sizeof(%s))" % self.operand.result()
+
+ def generate_result_code(self, code):
+ pass
+
+
+class TypeidNode(ExprNode):
+ # C++ typeid operator applied to a type or variable
+ #
+ # operand ExprNode
+ # arg_type ExprNode
+ # is_variable boolean
+
+ subexprs = ['operand']
+
+ arg_type = None
+ is_variable = None
+ is_temp = 1
+
+ def get_type_info_type(self, env):
+ env_module = env
+ while not env_module.is_module_scope:
+ env_module = env_module.outer_scope
+ typeinfo_module = env_module.find_module('libcpp.typeinfo', self.pos)
+ typeinfo_entry = typeinfo_module.lookup('type_info')
+ return PyrexTypes.CFakeReferenceType(PyrexTypes.c_const_type(typeinfo_entry.type))
+
+ cpp_message = 'typeid operator'
+
+ def analyse_types(self, env):
+ if not self.type:
+ self.type = PyrexTypes.error_type # default value if it isn't analysed successfully
+ self.cpp_check(env)
+ type_info = self.get_type_info_type(env)
+ if not type_info:
+ self.error("The 'libcpp.typeinfo' module must be cimported to use the typeid() operator")
+ return self
+ if self.operand is None:
+ return self # already analysed, no need to repeat
+ self.type = type_info
+ as_type = self.operand.analyse_as_specialized_type(env)
+ if as_type:
+ self.arg_type = as_type
+ self.is_type = True
+ self.operand = None # nothing further uses self.operand - will only cause problems if its used in code generation
+ else:
+ self.arg_type = self.operand.analyse_types(env)
+ self.is_type = False
+ self.operand = None # nothing further uses self.operand - will only cause problems if its used in code generation
+ if self.arg_type.type.is_pyobject:
+ self.error("Cannot use typeid on a Python object")
+ return self
+ elif self.arg_type.type.is_void:
+ self.error("Cannot use typeid on void")
+ return self
+ elif not self.arg_type.type.is_complete():
+ self.error("Cannot use typeid on incomplete type '%s'" % self.arg_type.type)
+ return self
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+ return self
+
+ def error(self, mess):
+ error(self.pos, mess)
+ self.type = PyrexTypes.error_type
+ self.result_code = ""
+
+ def check_const(self):
+ return True
+
+ def calculate_result_code(self):
+ return self.temp_code
+
+ def generate_result_code(self, code):
+ if self.is_type:
+ arg_code = self.arg_type.empty_declaration_code()
+ else:
+ arg_code = self.arg_type.result()
+ translate_cpp_exception(code, self.pos,
+ "%s = typeid(%s);" % (self.temp_code, arg_code),
+ None, None, self.in_nogil_context)
+
+class TypeofNode(ExprNode):
+ # Compile-time type of an expression, as a string.
+ #
+ # operand ExprNode
+ # literal StringNode # internal
+
+ literal = None
+ type = py_object_type
+
+ subexprs = ['literal'] # 'operand' will be ignored after type analysis!
+
+ def analyse_types(self, env):
+ self.operand = self.operand.analyse_types(env)
+ value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
+ literal = StringNode(self.pos, value=value)
+ literal = literal.analyse_types(env)
+ self.literal = literal.coerce_to_pyobject(env)
+ return self
+
+ def analyse_as_type(self, env):
+ self.operand = self.operand.analyse_types(env)
+ return self.operand.type
+
+ def may_be_none(self):
+ return False
+
+ def generate_evaluation_code(self, code):
+ self.literal.generate_evaluation_code(code)
+
+ def calculate_result_code(self):
+ return self.literal.calculate_result_code()
+
+#-------------------------------------------------------------------
+#
+# Binary operator nodes
+#
+#-------------------------------------------------------------------
+
+try:
+ matmul_operator = operator.matmul
+except AttributeError:
+ def matmul_operator(a, b):
+ try:
+ func = a.__matmul__
+ except AttributeError:
+ func = b.__rmatmul__
+ return func(a, b)
+
+compile_time_binary_operators = {
+ '<': operator.lt,
+ '<=': operator.le,
+ '==': operator.eq,
+ '!=': operator.ne,
+ '>=': operator.ge,
+ '>': operator.gt,
+ 'is': operator.is_,
+ 'is_not': operator.is_not,
+ '+': operator.add,
+ '&': operator.and_,
+ '/': operator.truediv,
+ '//': operator.floordiv,
+ '<<': operator.lshift,
+ '%': operator.mod,
+ '*': operator.mul,
+ '|': operator.or_,
+ '**': operator.pow,
+ '>>': operator.rshift,
+ '-': operator.sub,
+ '^': operator.xor,
+ '@': matmul_operator,
+ 'in': lambda x, seq: x in seq,
+ 'not_in': lambda x, seq: x not in seq,
+}
+
+def get_compile_time_binop(node):
+ func = compile_time_binary_operators.get(node.operator)
+ if not func:
+ error(node.pos,
+ "Binary '%s' not supported in compile-time expression"
+ % node.operator)
+ return func
+
+
+class BinopNode(ExprNode):
+ # operator string
+ # operand1 ExprNode
+ # operand2 ExprNode
+ #
+ # Processing during analyse_expressions phase:
+ #
+ # analyse_c_operation
+ # Called when neither operand is a pyobject.
+ # - Check operand types and coerce if needed.
+ # - Determine result type and result code fragment.
+ # - Allocate temporary for result if needed.
+
+ subexprs = ['operand1', 'operand2']
+ inplace = False
+
+ def calculate_constant_result(self):
+ func = compile_time_binary_operators[self.operator]
+ self.constant_result = func(
+ self.operand1.constant_result,
+ self.operand2.constant_result)
+
+ def compile_time_value(self, denv):
+ func = get_compile_time_binop(self)
+ operand1 = self.operand1.compile_time_value(denv)
+ operand2 = self.operand2.compile_time_value(denv)
+ try:
+ return func(operand1, operand2)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def infer_type(self, env):
+ return self.result_type(self.operand1.infer_type(env),
+ self.operand2.infer_type(env), env)
+
+ def analyse_types(self, env):
+ self.operand1 = self.operand1.analyse_types(env)
+ self.operand2 = self.operand2.analyse_types(env)
+ self.analyse_operation(env)
+ return self
+
+ def analyse_operation(self, env):
+ if self.is_pythran_operation(env):
+ self.type = self.result_type(self.operand1.type,
+ self.operand2.type, env)
+ assert self.type.is_pythran_expr
+ self.is_temp = 1
+ elif self.is_py_operation():
+ self.coerce_operands_to_pyobjects(env)
+ self.type = self.result_type(self.operand1.type,
+ self.operand2.type, env)
+ assert self.type.is_pyobject
+ self.is_temp = 1
+ elif self.is_cpp_operation():
+ self.analyse_cpp_operation(env)
+ else:
+ self.analyse_c_operation(env)
+
+ def is_py_operation(self):
+ return self.is_py_operation_types(self.operand1.type, self.operand2.type)
+
+ def is_py_operation_types(self, type1, type2):
+ return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple
+
+ def is_pythran_operation(self, env):
+ return self.is_pythran_operation_types(self.operand1.type, self.operand2.type, env)
+
+ def is_pythran_operation_types(self, type1, type2, env):
+ # Support only expr op supported_type, or supported_type op expr
+ return has_np_pythran(env) and \
+ (is_pythran_supported_operation_type(type1) and is_pythran_supported_operation_type(type2)) and \
+ (is_pythran_expr(type1) or is_pythran_expr(type2))
+
+ def is_cpp_operation(self):
+ return (self.operand1.type.is_cpp_class
+ or self.operand2.type.is_cpp_class)
+
+ def analyse_cpp_operation(self, env):
+ entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
+ if not entry:
+ self.type_error()
+ return
+ func_type = entry.type
+ self.exception_check = func_type.exception_check
+ self.exception_value = func_type.exception_value
+ if self.exception_check == '+':
+ # Used by NumBinopNodes to break up expressions involving multiple
+ # operators so that exceptions can be handled properly.
+ self.is_temp = 1
+ if needs_cpp_exception_conversion(self):
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+ if len(func_type.args) == 1:
+ self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
+ else:
+ self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
+ self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
+ self.type = func_type.return_type
+
+ def result_type(self, type1, type2, env):
+ if self.is_pythran_operation_types(type1, type2, env):
+ return PythranExpr(pythran_binop_type(self.operator, type1, type2))
+ if self.is_py_operation_types(type1, type2):
+ if type2.is_string:
+ type2 = Builtin.bytes_type
+ elif type2.is_pyunicode_ptr:
+ type2 = Builtin.unicode_type
+ if type1.is_string:
+ type1 = Builtin.bytes_type
+ elif type1.is_pyunicode_ptr:
+ type1 = Builtin.unicode_type
+ if type1.is_builtin_type or type2.is_builtin_type:
+ if type1 is type2 and self.operator in '**%+|&^':
+ # FIXME: at least these operators should be safe - others?
+ return type1
+ result_type = self.infer_builtin_types_operation(type1, type2)
+ if result_type is not None:
+ return result_type
+ return py_object_type
+ elif type1.is_error or type2.is_error:
+ return PyrexTypes.error_type
+ else:
+ return self.compute_c_result_type(type1, type2)
+
+ def infer_builtin_types_operation(self, type1, type2):
+ return None
+
+ def nogil_check(self, env):
+ if self.is_py_operation():
+ self.gil_error()
+
+ def coerce_operands_to_pyobjects(self, env):
+ self.operand1 = self.operand1.coerce_to_pyobject(env)
+ self.operand2 = self.operand2.coerce_to_pyobject(env)
+
+ def check_const(self):
+ return self.operand1.check_const() and self.operand2.check_const()
+
+ def is_ephemeral(self):
+ return (super(BinopNode, self).is_ephemeral() or
+ self.operand1.is_ephemeral() or self.operand2.is_ephemeral())
+
+ def generate_result_code(self, code):
+ type1 = self.operand1.type
+ type2 = self.operand2.type
+ if self.type.is_pythran_expr:
+ code.putln("// Pythran binop")
+ code.putln("__Pyx_call_destructor(%s);" % self.result())
+ if self.operator == '**':
+ code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::power{}(%s, %s)};" % (
+ self.result(),
+ self.result(),
+ self.operand1.pythran_result(),
+ self.operand2.pythran_result()))
+ else:
+ code.putln("new (&%s) decltype(%s){%s %s %s};" % (
+ self.result(),
+ self.result(),
+ self.operand1.pythran_result(),
+ self.operator,
+ self.operand2.pythran_result()))
+ elif type1.is_pyobject or type2.is_pyobject:
+ function = self.py_operation_function(code)
+ extra_args = ", Py_None" if self.operator == '**' else ""
+ op1_result = self.operand1.py_result() if type1.is_pyobject else self.operand1.result()
+ op2_result = self.operand2.py_result() if type2.is_pyobject else self.operand2.result()
+ code.putln(
+ "%s = %s(%s, %s%s); %s" % (
+ self.result(),
+ function,
+ op1_result,
+ op2_result,
+ extra_args,
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+ elif self.is_temp:
+ # C++ overloaded operators with exception values are currently all
+ # handled through temporaries.
+ if self.is_cpp_operation() and self.exception_check == '+':
+ translate_cpp_exception(code, self.pos,
+ "%s = %s;" % (self.result(), self.calculate_result_code()),
+ self.result() if self.type.is_pyobject else None,
+ self.exception_value, self.in_nogil_context)
+ else:
+ code.putln("%s = %s;" % (self.result(), self.calculate_result_code()))
+
+ def type_error(self):
+ if not (self.operand1.type.is_error
+ or self.operand2.type.is_error):
+ error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
+ (self.operator, self.operand1.type,
+ self.operand2.type))
+ self.type = PyrexTypes.error_type
+
+
+class CBinopNode(BinopNode):
+
+ def analyse_types(self, env):
+ node = BinopNode.analyse_types(self, env)
+ if node.is_py_operation():
+ node.type = PyrexTypes.error_type
+ return node
+
+ def py_operation_function(self, code):
+ return ""
+
+ def calculate_result_code(self):
+ return "(%s %s %s)" % (
+ self.operand1.result(),
+ self.operator,
+ self.operand2.result())
+
+ def compute_c_result_type(self, type1, type2):
+ cpp_type = None
+ if type1.is_cpp_class or type1.is_ptr:
+ cpp_type = type1.find_cpp_operation_type(self.operator, type2)
+ if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
+ cpp_type = type2.find_cpp_operation_type(self.operator, type1)
+ # FIXME: do we need to handle other cases here?
+ return cpp_type
+
+
+def c_binop_constructor(operator):
+ def make_binop_node(pos, **operands):
+ return CBinopNode(pos, operator=operator, **operands)
+ return make_binop_node
+
+class NumBinopNode(BinopNode):
+ # Binary operation taking numeric arguments.
+
+ infix = True
+ overflow_check = False
+ overflow_bit_node = None
+
+ def analyse_c_operation(self, env):
+ type1 = self.operand1.type
+ type2 = self.operand2.type
+ self.type = self.compute_c_result_type(type1, type2)
+ if not self.type:
+ self.type_error()
+ return
+ if self.type.is_complex:
+ self.infix = False
+ if (self.type.is_int
+ and env.directives['overflowcheck']
+ and self.operator in self.overflow_op_names):
+ if (self.operator in ('+', '*')
+ and self.operand1.has_constant_result()
+ and not self.operand2.has_constant_result()):
+ self.operand1, self.operand2 = self.operand2, self.operand1
+ self.overflow_check = True
+ self.overflow_fold = env.directives['overflowcheck.fold']
+ self.func = self.type.overflow_check_binop(
+ self.overflow_op_names[self.operator],
+ env,
+ const_rhs = self.operand2.has_constant_result())
+ self.is_temp = True
+ if not self.infix or (type1.is_numeric and type2.is_numeric):
+ self.operand1 = self.operand1.coerce_to(self.type, env)
+ self.operand2 = self.operand2.coerce_to(self.type, env)
+
+ def compute_c_result_type(self, type1, type2):
+ if self.c_types_okay(type1, type2):
+ widest_type = PyrexTypes.widest_numeric_type(type1, type2)
+ if widest_type is PyrexTypes.c_bint_type:
+ if self.operator not in '|^&':
+ # False + False == 0 # not False!
+ widest_type = PyrexTypes.c_int_type
+ else:
+ widest_type = PyrexTypes.widest_numeric_type(
+ widest_type, PyrexTypes.c_int_type)
+ return widest_type
+ else:
+ return None
+
+ def may_be_none(self):
+ if self.type and self.type.is_builtin_type:
+ # if we know the result type, we know the operation, so it can't be None
+ return False
+ type1 = self.operand1.type
+ type2 = self.operand2.type
+ if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
+ # XXX: I can't think of any case where a binary operation
+ # on builtin types evaluates to None - add a special case
+ # here if there is one.
+ return False
+ return super(NumBinopNode, self).may_be_none()
+
+ def get_constant_c_result_code(self):
+ value1 = self.operand1.get_constant_c_result_code()
+ value2 = self.operand2.get_constant_c_result_code()
+ if value1 and value2:
+ return "(%s %s %s)" % (value1, self.operator, value2)
+ else:
+ return None
+
+ def c_types_okay(self, type1, type2):
+ #print "NumBinopNode.c_types_okay:", type1, type2 ###
+ return (type1.is_numeric or type1.is_enum) \
+ and (type2.is_numeric or type2.is_enum)
+
+ def generate_evaluation_code(self, code):
+ if self.overflow_check:
+ self.overflow_bit_node = self
+ self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = 0;" % self.overflow_bit)
+ super(NumBinopNode, self).generate_evaluation_code(code)
+ if self.overflow_check:
+ code.putln("if (unlikely(%s)) {" % self.overflow_bit)
+ code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");')
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+ code.funcstate.release_temp(self.overflow_bit)
+
+ def calculate_result_code(self):
+ if self.overflow_bit_node is not None:
+ return "%s(%s, %s, &%s)" % (
+ self.func,
+ self.operand1.result(),
+ self.operand2.result(),
+ self.overflow_bit_node.overflow_bit)
+ elif self.type.is_cpp_class or self.infix:
+ if is_pythran_expr(self.type):
+ result1, result2 = self.operand1.pythran_result(), self.operand2.pythran_result()
+ else:
+ result1, result2 = self.operand1.result(), self.operand2.result()
+ return "(%s %s %s)" % (result1, self.operator, result2)
+ else:
+ func = self.type.binary_op(self.operator)
+ if func is None:
+ error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
+ return "%s(%s, %s)" % (
+ func,
+ self.operand1.result(),
+ self.operand2.result())
+
+ def is_py_operation_types(self, type1, type2):
+ return (type1.is_unicode_char or
+ type2.is_unicode_char or
+ BinopNode.is_py_operation_types(self, type1, type2))
+
+ def py_operation_function(self, code):
+ function_name = self.py_functions[self.operator]
+ if self.inplace:
+ function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
+ return function_name
+
+ py_functions = {
+ "|": "PyNumber_Or",
+ "^": "PyNumber_Xor",
+ "&": "PyNumber_And",
+ "<<": "PyNumber_Lshift",
+ ">>": "PyNumber_Rshift",
+ "+": "PyNumber_Add",
+ "-": "PyNumber_Subtract",
+ "*": "PyNumber_Multiply",
+ "@": "__Pyx_PyNumber_MatrixMultiply",
+ "/": "__Pyx_PyNumber_Divide",
+ "//": "PyNumber_FloorDivide",
+ "%": "PyNumber_Remainder",
+ "**": "PyNumber_Power",
+ }
+
+ overflow_op_names = {
+ "+": "add",
+ "-": "sub",
+ "*": "mul",
+ "<<": "lshift",
+ }
+
+
+class IntBinopNode(NumBinopNode):
+ # Binary operation taking integer arguments.
+
+ def c_types_okay(self, type1, type2):
+ #print "IntBinopNode.c_types_okay:", type1, type2 ###
+ return (type1.is_int or type1.is_enum) \
+ and (type2.is_int or type2.is_enum)
+
+
+class AddNode(NumBinopNode):
+ # '+' operator.
+
+ def is_py_operation_types(self, type1, type2):
+ if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr:
+ return 1
+ else:
+ return NumBinopNode.is_py_operation_types(self, type1, type2)
+
+ def infer_builtin_types_operation(self, type1, type2):
+ # b'abc' + 'abc' raises an exception in Py3,
+ # so we can safely infer the Py2 type for bytes here
+ string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
+ if type1 in string_types and type2 in string_types:
+ return string_types[max(string_types.index(type1),
+ string_types.index(type2))]
+ return None
+
+ def compute_c_result_type(self, type1, type2):
+ #print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
+ if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
+ return type1
+ elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
+ return type2
+ else:
+ return NumBinopNode.compute_c_result_type(
+ self, type1, type2)
+
+ def py_operation_function(self, code):
+ type1, type2 = self.operand1.type, self.operand2.type
+ func = None
+ if type1 is unicode_type or type2 is unicode_type:
+ if type1 in (unicode_type, str_type) and type2 in (unicode_type, str_type):
+ is_unicode_concat = True
+ elif isinstance(self.operand1, FormattedValueNode) or isinstance(self.operand2, FormattedValueNode):
+ # Assume that even if we don't know the second type, it's going to be a string.
+ is_unicode_concat = True
+ else:
+ # Operation depends on the second type.
+ is_unicode_concat = False
+
+ if is_unicode_concat:
+ if self.inplace or self.operand1.is_temp:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("UnicodeConcatInPlace", "ObjectHandling.c"))
+ func = '__Pyx_PyUnicode_Concat'
+ elif type1 is str_type and type2 is str_type:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("StrConcatInPlace", "ObjectHandling.c"))
+ func = '__Pyx_PyStr_Concat'
+
+ if func:
+ # any necessary utility code will be got by "NumberAdd" in generate_evaluation_code
+ if self.inplace or self.operand1.is_temp:
+ func += 'InPlace' # upper case to indicate unintuitive macro
+ if self.operand1.may_be_none() or self.operand2.may_be_none():
+ func += 'Safe'
+ return func
+
+ return super(AddNode, self).py_operation_function(code)
+
+
+class SubNode(NumBinopNode):
+ # '-' operator.
+
+ def compute_c_result_type(self, type1, type2):
+ if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
+ return type1
+ elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
+ return PyrexTypes.c_ptrdiff_t_type
+ else:
+ return NumBinopNode.compute_c_result_type(
+ self, type1, type2)
+
+
+class MulNode(NumBinopNode):
+ # '*' operator.
+ is_sequence_mul = False
+
+ def analyse_types(self, env):
+ self.operand1 = self.operand1.analyse_types(env)
+ self.operand2 = self.operand2.analyse_types(env)
+ self.is_sequence_mul = self.calculate_is_sequence_mul()
+
+ # TODO: we could also optimise the case of "[...] * 2 * n", i.e. with an existing 'mult_factor'
+ if self.is_sequence_mul:
+ operand1 = self.operand1
+ operand2 = self.operand2
+ if operand1.is_sequence_constructor and operand1.mult_factor is None:
+ return self.analyse_sequence_mul(env, operand1, operand2)
+ elif operand2.is_sequence_constructor and operand2.mult_factor is None:
+ return self.analyse_sequence_mul(env, operand2, operand1)
+
+ self.analyse_operation(env)
+ return self
+
+ @staticmethod
+ def is_builtin_seqmul_type(type):
+ return type.is_builtin_type and type in builtin_sequence_types and type is not memoryview_type
+
+ def calculate_is_sequence_mul(self):
+ type1 = self.operand1.type
+ type2 = self.operand2.type
+ if type1 is long_type or type1.is_int:
+ # normalise to (X * int)
+ type1, type2 = type2, type1
+ if type2 is long_type or type2.is_int:
+ if type1.is_string or type1.is_ctuple:
+ return True
+ if self.is_builtin_seqmul_type(type1):
+ return True
+ return False
+
+ def analyse_sequence_mul(self, env, seq, mult):
+ assert seq.mult_factor is None
+ seq = seq.coerce_to_pyobject(env)
+ seq.mult_factor = mult
+ return seq.analyse_types(env)
+
+ def coerce_operands_to_pyobjects(self, env):
+ if self.is_sequence_mul:
+ # Keep operands as they are, but ctuples must become Python tuples to multiply them.
+ if self.operand1.type.is_ctuple:
+ self.operand1 = self.operand1.coerce_to_pyobject(env)
+ elif self.operand2.type.is_ctuple:
+ self.operand2 = self.operand2.coerce_to_pyobject(env)
+ return
+ super(MulNode, self).coerce_operands_to_pyobjects(env)
+
+ def is_py_operation_types(self, type1, type2):
+ return self.is_sequence_mul or super(MulNode, self).is_py_operation_types(type1, type2)
+
+ def py_operation_function(self, code):
+ if self.is_sequence_mul:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PySequenceMultiply", "ObjectHandling.c"))
+ return "__Pyx_PySequence_Multiply" if self.operand1.type.is_pyobject else "__Pyx_PySequence_Multiply_Left"
+ return super(MulNode, self).py_operation_function(code)
+
+ def infer_builtin_types_operation(self, type1, type2):
+ # let's assume that whatever builtin type you multiply a builtin sequence type with
+ # will either return a sequence of the same type or fail with an exception
+ if type1.is_builtin_type and type2.is_builtin_type:
+ if self.is_builtin_seqmul_type(type1):
+ return type1
+ if self.is_builtin_seqmul_type(type2):
+ return type2
+ # multiplication of containers/numbers with an integer value
+ # always (?) returns the same type
+ if type1.is_int:
+ return type2
+ if type2.is_int:
+ return type1
+ return None
+
+
+class MatMultNode(NumBinopNode):
+ # '@' operator.
+
+ def is_py_operation_types(self, type1, type2):
+ return True
+
+ def generate_evaluation_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("MatrixMultiply", "ObjectHandling.c"))
+ super(MatMultNode, self).generate_evaluation_code(code)
+
+
+class DivNode(NumBinopNode):
+ # '/' or '//' operator.
+
+ cdivision = None
+ truedivision = None # == "unknown" if operator == '/'
+ ctruedivision = False
+ cdivision_warnings = False
+ zerodivision_check = None
+
+ def find_compile_time_binary_operator(self, op1, op2):
+ func = compile_time_binary_operators[self.operator]
+ if self.operator == '/' and self.truedivision is None:
+ # => true div for floats, floor div for integers
+ if isinstance(op1, _py_int_types) and isinstance(op2, _py_int_types):
+ func = compile_time_binary_operators['//']
+ return func
+
+ def calculate_constant_result(self):
+ op1 = self.operand1.constant_result
+ op2 = self.operand2.constant_result
+ func = self.find_compile_time_binary_operator(op1, op2)
+ self.constant_result = func(
+ self.operand1.constant_result,
+ self.operand2.constant_result)
+
+ def compile_time_value(self, denv):
+ operand1 = self.operand1.compile_time_value(denv)
+ operand2 = self.operand2.compile_time_value(denv)
+ try:
+ func = self.find_compile_time_binary_operator(
+ operand1, operand2)
+ return func(operand1, operand2)
+ except Exception as e:
+ self.compile_time_value_error(e)
+
+ def _check_truedivision(self, env):
+ if self.cdivision or env.directives['cdivision']:
+ self.ctruedivision = False
+ else:
+ self.ctruedivision = self.truedivision
+
+ def infer_type(self, env):
+ self._check_truedivision(env)
+ return self.result_type(
+ self.operand1.infer_type(env),
+ self.operand2.infer_type(env), env)
+
+ def analyse_operation(self, env):
+ self._check_truedivision(env)
+ NumBinopNode.analyse_operation(self, env)
+ if self.is_cpp_operation():
+ self.cdivision = True
+ if not self.type.is_pyobject:
+ self.zerodivision_check = (
+ self.cdivision is None and not env.directives['cdivision']
+ and (not self.operand2.has_constant_result() or
+ self.operand2.constant_result == 0))
+ if self.zerodivision_check or env.directives['cdivision_warnings']:
+ # Need to check ahead of time to warn or raise zero division error
+ self.operand1 = self.operand1.coerce_to_simple(env)
+ self.operand2 = self.operand2.coerce_to_simple(env)
+
+ def compute_c_result_type(self, type1, type2):
+ if self.operator == '/' and self.ctruedivision and not type1.is_cpp_class and not type2.is_cpp_class:
+ if not type1.is_float and not type2.is_float:
+ widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
+ widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
+ return widest_type
+ return NumBinopNode.compute_c_result_type(self, type1, type2)
+
+ def zero_division_message(self):
+ if self.type.is_int:
+ return "integer division or modulo by zero"
+ else:
+ return "float division"
+
+ def generate_evaluation_code(self, code):
+ if not self.type.is_pyobject and not self.type.is_complex:
+ if self.cdivision is None:
+ self.cdivision = (
+ code.globalstate.directives['cdivision']
+ or self.type.is_float
+ or ((self.type.is_numeric or self.type.is_enum) and not self.type.signed)
+ )
+ if not self.cdivision:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("DivInt", "CMath.c").specialize(self.type))
+ NumBinopNode.generate_evaluation_code(self, code)
+ self.generate_div_warning_code(code)
+
+ def generate_div_warning_code(self, code):
+ in_nogil = self.in_nogil_context
+ if not self.type.is_pyobject:
+ if self.zerodivision_check:
+ if not self.infix:
+ zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
+ else:
+ zero_test = "%s == 0" % self.operand2.result()
+ code.putln("if (unlikely(%s)) {" % zero_test)
+ if in_nogil:
+ code.put_ensure_gil()
+ code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
+ if in_nogil:
+ code.put_release_ensured_gil()
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+ if self.type.is_int and self.type.signed and self.operator != '%':
+ code.globalstate.use_utility_code(UtilityCode.load_cached("UnaryNegOverflows", "Overflow.c"))
+ if self.operand2.type.signed == 2:
+ # explicitly signed, no runtime check needed
+ minus1_check = 'unlikely(%s == -1)' % self.operand2.result()
+ else:
+ type_of_op2 = self.operand2.type.empty_declaration_code()
+ minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % (
+ type_of_op2, self.operand2.result(), type_of_op2)
+ code.putln("else if (sizeof(%s) == sizeof(long) && %s "
+ " && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
+ self.type.empty_declaration_code(),
+ minus1_check,
+ self.operand1.result()))
+ if in_nogil:
+ code.put_ensure_gil()
+ code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");')
+ if in_nogil:
+ code.put_release_ensured_gil()
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+ if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("CDivisionWarning", "CMath.c"))
+ code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % (
+ self.operand1.result(),
+ self.operand2.result()))
+ warning_code = "__Pyx_cdivision_warning(%(FILENAME)s, %(LINENO)s)" % {
+ 'FILENAME': Naming.filename_cname,
+ 'LINENO': Naming.lineno_cname,
+ }
+
+ if in_nogil:
+ result_code = 'result'
+ code.putln("int %s;" % result_code)
+ code.put_ensure_gil()
+ code.putln(code.set_error_info(self.pos, used=True))
+ code.putln("%s = %s;" % (result_code, warning_code))
+ code.put_release_ensured_gil()
+ else:
+ result_code = warning_code
+ code.putln(code.set_error_info(self.pos, used=True))
+
+ code.put("if (unlikely(%s)) " % result_code)
+ code.put_goto(code.error_label)
+ code.putln("}")
+
+ def calculate_result_code(self):
+ if self.type.is_complex or self.is_cpp_operation():
+ return NumBinopNode.calculate_result_code(self)
+ elif self.type.is_float and self.operator == '//':
+ return "floor(%s / %s)" % (
+ self.operand1.result(),
+ self.operand2.result())
+ elif self.truedivision or self.cdivision:
+ op1 = self.operand1.result()
+ op2 = self.operand2.result()
+ if self.truedivision:
+ if self.type != self.operand1.type:
+ op1 = self.type.cast_code(op1)
+ if self.type != self.operand2.type:
+ op2 = self.type.cast_code(op2)
+ return "(%s / %s)" % (op1, op2)
+ else:
+ return "__Pyx_div_%s(%s, %s)" % (
+ self.type.specialization_name(),
+ self.operand1.result(),
+ self.operand2.result())
+
+
+_find_formatting_types = re.compile(
+ br"%"
+ br"(?:%|" # %%
+ br"(?:\([^)]+\))?" # %(name)
+ br"[-+#,0-9 ]*([a-z])" # %.2f etc.
+ br")").findall
+
+# These format conversion types can never trigger a Unicode string conversion in Py2.
+_safe_bytes_formats = frozenset({
+ # Excludes 's' and 'r', which can generate non-bytes strings.
+ b'd', b'i', b'o', b'u', b'x', b'X', b'e', b'E', b'f', b'F', b'g', b'G', b'c', b'b', b'a',
+})
+
+
+class ModNode(DivNode):
+ # '%' operator.
+
+ def is_py_operation_types(self, type1, type2):
+ return (type1.is_string
+ or type2.is_string
+ or NumBinopNode.is_py_operation_types(self, type1, type2))
+
+ def infer_builtin_types_operation(self, type1, type2):
+ # b'%s' % xyz raises an exception in Py3<3.5, so it's safe to infer the type for Py2 and later Py3's.
+ if type1 is unicode_type:
+ # None + xyz may be implemented by RHS
+ if type2.is_builtin_type or not self.operand1.may_be_none():
+ return type1
+ elif type1 in (bytes_type, str_type, basestring_type):
+ if type2 is unicode_type:
+ return type2
+ elif type2.is_numeric:
+ return type1
+ elif self.operand1.is_string_literal:
+ if type1 is str_type or type1 is bytes_type:
+ if set(_find_formatting_types(self.operand1.value)) <= _safe_bytes_formats:
+ return type1
+ return basestring_type
+ elif type1 is bytes_type and not type2.is_builtin_type:
+ return None # RHS might implement '% operator differently in Py3
+ else:
+ return basestring_type # either str or unicode, can't tell
+ return None
+
+ def zero_division_message(self):
+ if self.type.is_int:
+ return "integer division or modulo by zero"
+ else:
+ return "float divmod()"
+
+ def analyse_operation(self, env):
+ DivNode.analyse_operation(self, env)
+ if not self.type.is_pyobject:
+ if self.cdivision is None:
+ self.cdivision = env.directives['cdivision'] or not self.type.signed
+ if not self.cdivision and not self.type.is_int and not self.type.is_float:
+ error(self.pos, "mod operator not supported for type '%s'" % self.type)
+
+ def generate_evaluation_code(self, code):
+ if not self.type.is_pyobject and not self.cdivision:
+ if self.type.is_int:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ModInt", "CMath.c").specialize(self.type))
+ else: # float
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ModFloat", "CMath.c").specialize(
+ self.type, math_h_modifier=self.type.math_h_modifier))
+ # NOTE: skipping over DivNode here
+ NumBinopNode.generate_evaluation_code(self, code)
+ self.generate_div_warning_code(code)
+
+ def calculate_result_code(self):
+ if self.cdivision:
+ if self.type.is_float:
+ return "fmod%s(%s, %s)" % (
+ self.type.math_h_modifier,
+ self.operand1.result(),
+ self.operand2.result())
+ else:
+ return "(%s %% %s)" % (
+ self.operand1.result(),
+ self.operand2.result())
+ else:
+ return "__Pyx_mod_%s(%s, %s)" % (
+ self.type.specialization_name(),
+ self.operand1.result(),
+ self.operand2.result())
+
+ def py_operation_function(self, code):
+ type1, type2 = self.operand1.type, self.operand2.type
+ # ("..." % x) must call "x.__rmod__()" for string subtypes.
+ if type1 is unicode_type:
+ if self.operand1.may_be_none() or (
+ type2.is_extension_type and type2.subtype_of(type1) or
+ type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)):
+ return '__Pyx_PyUnicode_FormatSafe'
+ else:
+ return 'PyUnicode_Format'
+ elif type1 is str_type:
+ if self.operand1.may_be_none() or (
+ type2.is_extension_type and type2.subtype_of(type1) or
+ type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)):
+ return '__Pyx_PyString_FormatSafe'
+ else:
+ return '__Pyx_PyString_Format'
+ return super(ModNode, self).py_operation_function(code)
+
+
+class PowNode(NumBinopNode):
+ # '**' operator.
+
+ is_cpow = None
+ type_was_inferred = False # was the result type affected by cpow==False?
+ # Intended to allow it to be changed if the node is coerced.
+
+ def _check_cpow(self, env):
+ if self.is_cpow is not None:
+ return # already set
+ self.is_cpow = env.directives['cpow']
+
+ def infer_type(self, env):
+ self._check_cpow(env)
+ return super(PowNode, self).infer_type(env)
+
+ def analyse_types(self, env):
+ self._check_cpow(env)
+ return super(PowNode, self).analyse_types(env)
+
+ def analyse_c_operation(self, env):
+ NumBinopNode.analyse_c_operation(self, env)
+ if self.type.is_complex:
+ if self.type.real_type.is_float:
+ self.operand1 = self.operand1.coerce_to(self.type, env)
+ self.operand2 = self.operand2.coerce_to(self.type, env)
+ self.pow_func = self.type.binary_op('**')
+ else:
+ error(self.pos, "complex int powers not supported")
+ self.pow_func = ""
+ elif self.type.is_float:
+ self.pow_func = "pow" + self.type.math_h_modifier
+ elif self.type.is_int:
+ self.pow_func = "__Pyx_pow_%s" % self.type.empty_declaration_code().replace(' ', '_')
+ env.use_utility_code(
+ UtilityCode.load_cached("IntPow", "CMath.c").specialize(
+ func_name=self.pow_func,
+ type=self.type.empty_declaration_code(),
+ signed=self.type.signed and 1 or 0))
+ elif not self.type.is_error:
+ error(self.pos, "got unexpected types for C power operator: %s, %s" %
+ (self.operand1.type, self.operand2.type))
+
+ def compute_c_result_type(self, type1, type2):
+ from numbers import Real
+ c_result_type = None
+ op1_is_definitely_positive = (
+ self.operand1.has_constant_result()
+ and self.operand1.constant_result >= 0
+ ) or (
+ type1.is_int and type1.signed == 0 # definitely unsigned
+ )
+ type2_is_int = type2.is_int or (
+ self.operand2.has_constant_result() and
+ isinstance(self.operand2.constant_result, Real) and
+ int(self.operand2.constant_result) == self.operand2.constant_result
+ )
+ needs_widening = False
+ if self.is_cpow:
+ c_result_type = super(PowNode, self).compute_c_result_type(type1, type2)
+ if not self.operand2.has_constant_result():
+ needs_widening = (
+ isinstance(self.operand2.constant_result, _py_int_types) and self.operand2.constant_result < 0
+ )
+ elif op1_is_definitely_positive or type2_is_int: # cpow==False
+ # if type2 is an integer then we can't end up going from real to complex
+ c_result_type = super(PowNode, self).compute_c_result_type(type1, type2)
+ if not self.operand2.has_constant_result():
+ needs_widening = type2.is_int and type2.signed
+ if needs_widening:
+ self.type_was_inferred = True
+ else:
+ needs_widening = (
+ isinstance(self.operand2.constant_result, _py_int_types) and self.operand2.constant_result < 0
+ )
+ elif self.c_types_okay(type1, type2):
+ # Allowable result types are double or complex double.
+ # Return the special "soft complex" type to store it as a
+ # complex number but with specialized coercions to Python
+ c_result_type = PyrexTypes.soft_complex_type
+ self.type_was_inferred = True
+ if needs_widening:
+ c_result_type = PyrexTypes.widest_numeric_type(c_result_type, PyrexTypes.c_double_type)
+ return c_result_type
+
+ def calculate_result_code(self):
+ # Work around MSVC overloading ambiguity.
+ def typecast(operand):
+ if self.type == operand.type:
+ return operand.result()
+ else:
+ return self.type.cast_code(operand.result())
+ return "%s(%s, %s)" % (
+ self.pow_func,
+ typecast(self.operand1),
+ typecast(self.operand2))
+
+ def py_operation_function(self, code):
+ if (self.type.is_pyobject and
+ self.operand1.constant_result == 2 and
+ isinstance(self.operand1.constant_result, _py_int_types) and
+ self.operand2.type is py_object_type):
+ code.globalstate.use_utility_code(UtilityCode.load_cached('PyNumberPow2', 'Optimize.c'))
+ if self.inplace:
+ return '__Pyx_PyNumber_InPlacePowerOf2'
+ else:
+ return '__Pyx_PyNumber_PowerOf2'
+ return super(PowNode, self).py_operation_function(code)
+
+ def coerce_to(self, dst_type, env):
+ if dst_type == self.type:
+ return self
+ if (self.is_cpow is None and self.type_was_inferred and
+ (dst_type.is_float or dst_type.is_int)):
+ # if we're trying to coerce this directly to a C float or int
+ # then fall back to the cpow == True behaviour since this is
+ # almost certainly the user intent.
+ # However, ensure that the operand types are suitable C types
+ if self.type is PyrexTypes.soft_complex_type:
+ def check_types(operand, recurse=True):
+ if operand.type.is_float or operand.type.is_int:
+ return True, operand
+ if recurse and isinstance(operand, CoerceToComplexNode):
+ return check_types(operand.arg, recurse=False), operand.arg
+ return False, None
+ msg_detail = "a non-complex C numeric type"
+ elif dst_type.is_int:
+ def check_types(operand):
+ if operand.type.is_int:
+ return True, operand
+ else:
+ # int, int doesn't seem to involve coercion nodes
+ return False, None
+ msg_detail = "an integer C numeric type"
+ else:
+ def check_types(operand):
+ return False, None
+ check_op1, op1 = check_types(self.operand1)
+ check_op2, op2 = check_types(self.operand2)
+ if check_op1 and check_op2:
+ warning(self.pos, "Treating '**' as if 'cython.cpow(True)' since it "
+ "is directly assigned to a %s. "
+ "This is likely to be fragile and we recommend setting "
+ "'cython.cpow' explicitly." % msg_detail)
+ self.is_cpow = True
+ self.operand1 = op1
+ self.operand2 = op2
+ result = self.analyse_types(env)
+ if result.type != dst_type:
+ result = result.coerce_to(dst_type, env)
+ return result
+ return super(PowNode, self).coerce_to(dst_type, env)
+
+
+class BoolBinopNode(ExprNode):
+ """
+ Short-circuiting boolean operation.
+
+ Note that this node provides the same code generation method as
+ BoolBinopResultNode to simplify expression nesting.
+
+ operator string "and"/"or"
+ operand1 BoolBinopNode/BoolBinopResultNode left operand
+ operand2 BoolBinopNode/BoolBinopResultNode right operand
+ """
+ subexprs = ['operand1', 'operand2']
+ is_temp = True
+ operator = None
+ operand1 = None
+ operand2 = None
+
+ def infer_type(self, env):
+ type1 = self.operand1.infer_type(env)
+ type2 = self.operand2.infer_type(env)
+ return PyrexTypes.independent_spanning_type(type1, type2)
+
+ def may_be_none(self):
+ if self.operator == 'or':
+ return self.operand2.may_be_none()
+ else:
+ return self.operand1.may_be_none() or self.operand2.may_be_none()
+
+ def calculate_constant_result(self):
+ operand1 = self.operand1.constant_result
+ operand2 = self.operand2.constant_result
+ if self.operator == 'and':
+ self.constant_result = operand1 and operand2
+ else:
+ self.constant_result = operand1 or operand2
+
+ def compile_time_value(self, denv):
+ operand1 = self.operand1.compile_time_value(denv)
+ operand2 = self.operand2.compile_time_value(denv)
+ if self.operator == 'and':
+ return operand1 and operand2
+ else:
+ return operand1 or operand2
+
+ def is_ephemeral(self):
+ return self.operand1.is_ephemeral() or self.operand2.is_ephemeral()
+
+ def analyse_types(self, env):
+ # Note: we do not do any coercion here as we most likely do not know the final type anyway.
+ # We even accept to set self.type to ErrorType if both operands do not have a spanning type.
+ # The coercion to the final type and to a "simple" value is left to coerce_to().
+ operand1 = self.operand1.analyse_types(env)
+ operand2 = self.operand2.analyse_types(env)
+ self.type = PyrexTypes.independent_spanning_type(
+ operand1.type, operand2.type)
+ self.operand1 = self._wrap_operand(operand1, env)
+ self.operand2 = self._wrap_operand(operand2, env)
+ return self
+
+ def _wrap_operand(self, operand, env):
+ if not isinstance(operand, (BoolBinopNode, BoolBinopResultNode)):
+ operand = BoolBinopResultNode(operand, self.type, env)
+ return operand
+
+ def wrap_operands(self, env):
+ """
+ Must get called by transforms that want to create a correct BoolBinopNode
+ after the type analysis phase.
+ """
+ self.operand1 = self._wrap_operand(self.operand1, env)
+ self.operand2 = self._wrap_operand(self.operand2, env)
+
+ def coerce_to_boolean(self, env):
+ return self.coerce_to(PyrexTypes.c_bint_type, env)
+
+ def coerce_to(self, dst_type, env):
+ operand1 = self.operand1.coerce_to(dst_type, env)
+ operand2 = self.operand2.coerce_to(dst_type, env)
+ return BoolBinopNode.from_node(
+ self, type=dst_type,
+ operator=self.operator,
+ operand1=operand1, operand2=operand2)
+
+ def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
+ code.mark_pos(self.pos)
+
+ outer_labels = (and_label, or_label)
+ if self.operator == 'and':
+ my_label = and_label = code.new_label('next_and')
+ else:
+ my_label = or_label = code.new_label('next_or')
+ self.operand1.generate_bool_evaluation_code(
+ code, final_result_temp, final_result_type, and_label, or_label, end_label, my_label)
+
+ and_label, or_label = outer_labels
+
+ code.put_label(my_label)
+ self.operand2.generate_bool_evaluation_code(
+ code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through)
+
+ def generate_evaluation_code(self, code):
+ self.allocate_temp_result(code)
+ result_type = PyrexTypes.py_object_type if self.type.is_pyobject else self.type
+ or_label = and_label = None
+ end_label = code.new_label('bool_binop_done')
+ self.generate_bool_evaluation_code(code, self.result(), result_type, and_label, or_label, end_label, end_label)
+ code.put_label(end_label)
+
+ gil_message = "Truth-testing Python object"
+
+ def check_const(self):
+ return self.operand1.check_const() and self.operand2.check_const()
+
+ def generate_subexpr_disposal_code(self, code):
+ pass # nothing to do here, all done in generate_evaluation_code()
+
+ def free_subexpr_temps(self, code):
+ pass # nothing to do here, all done in generate_evaluation_code()
+
+ def generate_operand1_test(self, code):
+ # Generate code to test the truth of the first operand.
+ if self.type.is_pyobject:
+ test_result = code.funcstate.allocate_temp(
+ PyrexTypes.c_bint_type, manage_ref=False)
+ code.putln(
+ "%s = __Pyx_PyObject_IsTrue(%s); %s" % (
+ test_result,
+ self.operand1.py_result(),
+ code.error_goto_if_neg(test_result, self.pos)))
+ else:
+ test_result = self.operand1.result()
+ return (test_result, self.type.is_pyobject)
+
+
+class BoolBinopResultNode(ExprNode):
+ """
+ Intermediate result of a short-circuiting and/or expression.
+ Tests the result for 'truthiness' and takes care of coercing the final result
+ of the overall expression to the target type.
+
+ Note that this node provides the same code generation method as
+ BoolBinopNode to simplify expression nesting.
+
+ arg ExprNode the argument to test
+ value ExprNode the coerced result value node
+ """
+
+ subexprs = ['arg', 'value']
+ is_temp = True
+ arg = None
+ value = None
+
+ def __init__(self, arg, result_type, env):
+ # using 'arg' multiple times, so it must be a simple/temp value
+ arg = arg.coerce_to_simple(env)
+ # wrap in ProxyNode, in case a transform wants to replace self.arg later
+ arg = ProxyNode(arg)
+ super(BoolBinopResultNode, self).__init__(
+ arg.pos, arg=arg, type=result_type,
+ value=CloneNode(arg).coerce_to(result_type, env))
+
+ def coerce_to_boolean(self, env):
+ return self.coerce_to(PyrexTypes.c_bint_type, env)
+
+ def coerce_to(self, dst_type, env):
+ # unwrap, coerce, rewrap
+ arg = self.arg.arg
+ if dst_type is PyrexTypes.c_bint_type:
+ arg = arg.coerce_to_boolean(env)
+ # TODO: unwrap more coercion nodes?
+ return BoolBinopResultNode(arg, dst_type, env)
+
+ def nogil_check(self, env):
+ # let's leave all errors to BoolBinopNode
+ pass
+
+ def generate_operand_test(self, code):
+ # Generate code to test the truth of the first operand.
+ if self.arg.type.is_pyobject:
+ test_result = code.funcstate.allocate_temp(
+ PyrexTypes.c_bint_type, manage_ref=False)
+ code.putln(
+ "%s = __Pyx_PyObject_IsTrue(%s); %s" % (
+ test_result,
+ self.arg.py_result(),
+ code.error_goto_if_neg(test_result, self.pos)))
+ else:
+ test_result = self.arg.result()
+ return (test_result, self.arg.type.is_pyobject)
+
+ def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
+ code.mark_pos(self.pos)
+
+ # x => x
+ # x and ... or ... => next 'and' / 'or'
+ # False ... or x => next 'or'
+ # True and x => next 'and'
+ # True or x => True (operand)
+
+ self.arg.generate_evaluation_code(code)
+ if and_label or or_label:
+ test_result, uses_temp = self.generate_operand_test(code)
+ if uses_temp and (and_label and or_label):
+ # cannot become final result => free early
+ # disposal: uses_temp and (and_label and or_label)
+ self.arg.generate_disposal_code(code)
+ sense = '!' if or_label else ''
+ code.putln("if (%s%s) {" % (sense, test_result))
+ if uses_temp:
+ code.funcstate.release_temp(test_result)
+ if not uses_temp or not (and_label and or_label):
+ # disposal: (not uses_temp) or {not (and_label and or_label) [if]}
+ self.arg.generate_disposal_code(code)
+
+ if or_label and or_label != fall_through:
+ # value is false => short-circuit to next 'or'
+ code.put_goto(or_label)
+ if and_label:
+ # value is true => go to next 'and'
+ if or_label:
+ code.putln("} else {")
+ if not uses_temp:
+ # disposal: (not uses_temp) and {(and_label and or_label) [else]}
+ self.arg.generate_disposal_code(code)
+ if and_label != fall_through:
+ code.put_goto(and_label)
+
+ if not and_label or not or_label:
+ # if no next 'and' or 'or', we provide the result
+ if and_label or or_label:
+ code.putln("} else {")
+ self.value.generate_evaluation_code(code)
+ self.value.make_owned_reference(code)
+ code.putln("%s = %s;" % (final_result_temp, self.value.result_as(final_result_type)))
+ self.value.generate_post_assignment_code(code)
+ # disposal: {not (and_label and or_label) [else]}
+ self.arg.generate_disposal_code(code)
+ self.value.free_temps(code)
+ if end_label != fall_through:
+ code.put_goto(end_label)
+
+ if and_label or or_label:
+ code.putln("}")
+ self.arg.free_temps(code)
+
+ def analyse_types(self, env):
+ return self
+
+
+class CondExprNode(ExprNode):
+ # Short-circuiting conditional expression.
+ #
+ # test ExprNode
+ # true_val ExprNode
+ # false_val ExprNode
+
+ true_val = None
+ false_val = None
+ is_temp = True
+
+ subexprs = ['test', 'true_val', 'false_val']
+
+ def type_dependencies(self, env):
+ return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
+
+ def infer_type(self, env):
+ return PyrexTypes.independent_spanning_type(
+ self.true_val.infer_type(env),
+ self.false_val.infer_type(env))
+
+ def calculate_constant_result(self):
+ if self.test.constant_result:
+ self.constant_result = self.true_val.constant_result
+ else:
+ self.constant_result = self.false_val.constant_result
+
+ def is_ephemeral(self):
+ return self.true_val.is_ephemeral() or self.false_val.is_ephemeral()
+
+ def analyse_types(self, env):
+ self.test = self.test.analyse_temp_boolean_expression(env)
+ self.true_val = self.true_val.analyse_types(env)
+ self.false_val = self.false_val.analyse_types(env)
+ return self.analyse_result_type(env)
+
+ def analyse_result_type(self, env):
+ true_val_type = self.true_val.type
+ false_val_type = self.false_val.type
+ self.type = PyrexTypes.independent_spanning_type(true_val_type, false_val_type)
+
+ if self.type.is_reference:
+ self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
+ if self.type.is_pyobject:
+ self.result_ctype = py_object_type
+ elif self.true_val.is_ephemeral() or self.false_val.is_ephemeral():
+ error(self.pos, "Unsafe C derivative of temporary Python reference used in conditional expression")
+
+ if true_val_type.is_pyobject or false_val_type.is_pyobject or self.type.is_pyobject:
+ if true_val_type != self.type:
+ self.true_val = self.true_val.coerce_to(self.type, env)
+ if false_val_type != self.type:
+ self.false_val = self.false_val.coerce_to(self.type, env)
+
+ if self.type.is_error:
+ self.type_error()
+ return self
+
+ def coerce_to_integer(self, env):
+ if not self.true_val.type.is_int:
+ self.true_val = self.true_val.coerce_to_integer(env)
+ if not self.false_val.type.is_int:
+ self.false_val = self.false_val.coerce_to_integer(env)
+ self.result_ctype = None
+ out = self.analyse_result_type(env)
+ if not out.type.is_int:
+ # fall back to ordinary coercion since we haven't ended as the correct type
+ if out is self:
+ out = super(CondExprNode, out).coerce_to_integer(env)
+ else:
+ # I believe `analyse_result_type` always returns a CondExprNode but
+ # handle the opposite case just in case
+ out = out.coerce_to_integer(env)
+ return out
+
+ def coerce_to(self, dst_type, env):
+ if self.true_val.type != dst_type:
+ self.true_val = self.true_val.coerce_to(dst_type, env)
+ if self.false_val.type != dst_type:
+ self.false_val = self.false_val.coerce_to(dst_type, env)
+ self.result_ctype = None
+ out = self.analyse_result_type(env)
+ if out.type != dst_type:
+ # fall back to ordinary coercion since we haven't ended as the correct type
+ if out is self:
+ out = super(CondExprNode, out).coerce_to(dst_type, env)
+ else:
+ # I believe `analyse_result_type` always returns a CondExprNode but
+ # handle the opposite case just in case
+ out = out.coerce_to(dst_type, env)
+ return out
+
+ def type_error(self):
+ if not (self.true_val.type.is_error or self.false_val.type.is_error):
+ error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
+ (self.true_val.type, self.false_val.type))
+ self.type = PyrexTypes.error_type
+
+ def check_const(self):
+ return (self.test.check_const()
+ and self.true_val.check_const()
+ and self.false_val.check_const())
+
+ def generate_evaluation_code(self, code):
+ # Because subexprs may not be evaluated we can use a more optimal
+ # subexpr allocation strategy than the default, so override evaluation_code.
+
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+ self.test.generate_evaluation_code(code)
+ code.putln("if (%s) {" % self.test.result())
+ self.eval_and_get(code, self.true_val)
+ code.putln("} else {")
+ self.eval_and_get(code, self.false_val)
+ code.putln("}")
+ self.test.generate_disposal_code(code)
+ self.test.free_temps(code)
+
+ def eval_and_get(self, code, expr):
+ expr.generate_evaluation_code(code)
+ if self.type.is_memoryviewslice:
+ expr.make_owned_memoryviewslice(code)
+ else:
+ expr.make_owned_reference(code)
+ code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
+ expr.generate_post_assignment_code(code)
+ expr.free_temps(code)
+
+ def generate_subexpr_disposal_code(self, code):
+ pass # done explicitly above (cleanup must separately happen within the if/else blocks)
+
+ def free_subexpr_temps(self, code):
+ pass # done explicitly above (cleanup must separately happen within the if/else blocks)
+
+
+richcmp_constants = {
+ "<" : "Py_LT",
+ "<=": "Py_LE",
+ "==": "Py_EQ",
+ "!=": "Py_NE",
+ "<>": "Py_NE",
+ ">" : "Py_GT",
+ ">=": "Py_GE",
+ # the following are faked by special compare functions
+ "in" : "Py_EQ",
+ "not_in": "Py_NE",
+}
+
+class CmpNode(object):
+ # Mixin class containing code common to PrimaryCmpNodes
+ # and CascadedCmpNodes.
+
+ special_bool_cmp_function = None
+ special_bool_cmp_utility_code = None
+ special_bool_extra_args = []
+
+ def infer_type(self, env):
+ # TODO: Actually implement this (after merging with -unstable).
+ return py_object_type
+
+ def calculate_cascaded_constant_result(self, operand1_result):
+ func = compile_time_binary_operators[self.operator]
+ operand2_result = self.operand2.constant_result
+ if (isinstance(operand1_result, any_string_type) and
+ isinstance(operand2_result, any_string_type) and
+ type(operand1_result) != type(operand2_result)):
+ # string comparison of different types isn't portable
+ return
+
+ if self.operator in ('in', 'not_in'):
+ if isinstance(self.operand2, (ListNode, TupleNode, SetNode)):
+ if not self.operand2.args:
+ self.constant_result = self.operator == 'not_in'
+ return
+ elif isinstance(self.operand2, ListNode) and not self.cascade:
+ # tuples are more efficient to store than lists
+ self.operand2 = self.operand2.as_tuple()
+ elif isinstance(self.operand2, DictNode):
+ if not self.operand2.key_value_pairs:
+ self.constant_result = self.operator == 'not_in'
+ return
+
+ self.constant_result = func(operand1_result, operand2_result)
+
+ def cascaded_compile_time_value(self, operand1, denv):
+ func = get_compile_time_binop(self)
+ operand2 = self.operand2.compile_time_value(denv)
+ try:
+ result = func(operand1, operand2)
+ except Exception as e:
+ self.compile_time_value_error(e)
+ result = None
+ if result:
+ cascade = self.cascade
+ if cascade:
+ result = result and cascade.cascaded_compile_time_value(operand2, denv)
+ return result
+
+ def is_cpp_comparison(self):
+ return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
+
+ def find_common_int_type(self, env, op, operand1, operand2):
+ # type1 != type2 and at least one of the types is not a C int
+ type1 = operand1.type
+ type2 = operand2.type
+ type1_can_be_int = False
+ type2_can_be_int = False
+
+ if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
+ type1_can_be_int = True
+ if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
+ type2_can_be_int = True
+
+ if type1.is_int:
+ if type2_can_be_int:
+ return type1
+ elif type2.is_int:
+ if type1_can_be_int:
+ return type2
+ elif type1_can_be_int:
+ if type2_can_be_int:
+ if Builtin.unicode_type in (type1, type2):
+ return PyrexTypes.c_py_ucs4_type
+ else:
+ return PyrexTypes.c_uchar_type
+
+ return None
+
+ def find_common_type(self, env, op, operand1, common_type=None):
+ operand2 = self.operand2
+ type1 = operand1.type
+ type2 = operand2.type
+
+ new_common_type = None
+
+ # catch general errors
+ if (type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or
+ type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type))):
+ error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
+ new_common_type = error_type
+
+ # try to use numeric comparisons where possible
+ elif type1.is_complex or type2.is_complex:
+ if (op not in ('==', '!=')
+ and (type1.is_complex or type1.is_numeric)
+ and (type2.is_complex or type2.is_numeric)):
+ error(self.pos, "complex types are unordered")
+ new_common_type = error_type
+ elif type1.is_pyobject:
+ new_common_type = Builtin.complex_type if type1.subtype_of(Builtin.complex_type) else py_object_type
+ elif type2.is_pyobject:
+ new_common_type = Builtin.complex_type if type2.subtype_of(Builtin.complex_type) else py_object_type
+ else:
+ new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
+ elif type1.is_numeric and type2.is_numeric:
+ new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
+ elif common_type is None or not common_type.is_pyobject:
+ new_common_type = self.find_common_int_type(env, op, operand1, operand2)
+
+ if new_common_type is None:
+ # fall back to generic type compatibility tests
+ if type1.is_ctuple or type2.is_ctuple:
+ new_common_type = py_object_type
+ elif type1 == type2:
+ new_common_type = type1
+ elif type1.is_pyobject or type2.is_pyobject:
+ if type2.is_numeric or type2.is_string:
+ if operand2.check_for_coercion_error(type1, env):
+ new_common_type = error_type
+ else:
+ new_common_type = py_object_type
+ elif type1.is_numeric or type1.is_string:
+ if operand1.check_for_coercion_error(type2, env):
+ new_common_type = error_type
+ else:
+ new_common_type = py_object_type
+ elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
+ new_common_type = py_object_type
+ else:
+ # one Python type and one non-Python type, not assignable
+ self.invalid_types_error(operand1, op, operand2)
+ new_common_type = error_type
+ elif type1.assignable_from(type2):
+ new_common_type = type1
+ elif type2.assignable_from(type1):
+ new_common_type = type2
+ else:
+ # C types that we couldn't handle up to here are an error
+ self.invalid_types_error(operand1, op, operand2)
+ new_common_type = error_type
+
+ if new_common_type.is_string and (isinstance(operand1, BytesNode) or
+ isinstance(operand2, BytesNode)):
+ # special case when comparing char* to bytes literal: must
+ # compare string values!
+ new_common_type = bytes_type
+
+ # recursively merge types
+ if common_type is None or new_common_type.is_error:
+ common_type = new_common_type
+ else:
+ # we could do a lot better by splitting the comparison
+ # into a non-Python part and a Python part, but this is
+ # safer for now
+ common_type = PyrexTypes.spanning_type(common_type, new_common_type)
+
+ if self.cascade:
+ common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
+
+ return common_type
+
+ def invalid_types_error(self, operand1, op, operand2):
+ error(self.pos, "Invalid types for '%s' (%s, %s)" %
+ (op, operand1.type, operand2.type))
+
+ def is_python_comparison(self):
+ return (not self.is_ptr_contains()
+ and not self.is_c_string_contains()
+ and (self.has_python_operands()
+ or (self.cascade and self.cascade.is_python_comparison())
+ or self.operator in ('in', 'not_in')))
+
+ def coerce_operands_to(self, dst_type, env):
+ operand2 = self.operand2
+ if operand2.type != dst_type:
+ self.operand2 = operand2.coerce_to(dst_type, env)
+ if self.cascade:
+ self.cascade.coerce_operands_to(dst_type, env)
+
+ def is_python_result(self):
+ return ((self.has_python_operands() and
+ self.special_bool_cmp_function is None and
+ self.operator not in ('is', 'is_not', 'in', 'not_in') and
+ not self.is_c_string_contains() and
+ not self.is_ptr_contains())
+ or (self.cascade and self.cascade.is_python_result()))
+
+ def is_c_string_contains(self):
+ return self.operator in ('in', 'not_in') and \
+ ((self.operand1.type.is_int
+ and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
+ (self.operand1.type.is_unicode_char
+ and self.operand2.type is unicode_type))
+
+ def is_ptr_contains(self):
+ if self.operator in ('in', 'not_in'):
+ container_type = self.operand2.type
+ return (container_type.is_ptr or container_type.is_array) \
+ and not container_type.is_string
+
+ def find_special_bool_compare_function(self, env, operand1, result_is_bool=False):
+ # note: currently operand1 must get coerced to a Python object if we succeed here!
+ if self.operator in ('==', '!='):
+ type1, type2 = operand1.type, self.operand2.type
+ if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type):
+ if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
+ self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
+ return True
+ elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c")
+ self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
+ return True
+ elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type:
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
+ self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
+ return True
+ elif type1 is Builtin.str_type or type2 is Builtin.str_type:
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c")
+ self.special_bool_cmp_function = "__Pyx_PyString_Equals"
+ return True
+ elif result_is_bool:
+ from .Optimize import optimise_numeric_binop
+ result = optimise_numeric_binop(
+ "Eq" if self.operator == "==" else "Ne",
+ self,
+ PyrexTypes.c_bint_type,
+ operand1,
+ self.operand2
+ )
+ if result:
+ (self.special_bool_cmp_function,
+ self.special_bool_cmp_utility_code,
+ self.special_bool_extra_args,
+ _) = result
+ return True
+ elif self.operator in ('in', 'not_in'):
+ if self.operand2.type is Builtin.dict_type:
+ self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
+ self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF"
+ return True
+ elif self.operand2.type is Builtin.set_type:
+ self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySetContains", "ObjectHandling.c")
+ self.special_bool_cmp_function = "__Pyx_PySet_ContainsTF"
+ return True
+ elif self.operand2.type is Builtin.unicode_type:
+ self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
+ self.special_bool_cmp_function = "__Pyx_PyUnicode_ContainsTF"
+ return True
+ else:
+ if not self.operand2.type.is_pyobject:
+ self.operand2 = self.operand2.coerce_to_pyobject(env)
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c")
+ self.special_bool_cmp_function = "__Pyx_PySequence_ContainsTF"
+ return True
+ return False
+
+ def generate_operation_code(self, code, result_code,
+ operand1, op, operand2):
+ if self.type.is_pyobject:
+ error_clause = code.error_goto_if_null
+ got_ref = "__Pyx_XGOTREF(%s); " % result_code
+ if self.special_bool_cmp_function:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c"))
+ coerce_result = "__Pyx_PyBoolOrNull_FromLong"
+ else:
+ coerce_result = "__Pyx_PyBool_FromLong"
+ else:
+ error_clause = code.error_goto_if_neg
+ got_ref = ""
+ coerce_result = ""
+
+ if self.special_bool_cmp_function:
+ if operand1.type.is_pyobject:
+ result1 = operand1.py_result()
+ else:
+ result1 = operand1.result()
+ if operand2.type.is_pyobject:
+ result2 = operand2.py_result()
+ else:
+ result2 = operand2.result()
+ special_bool_extra_args_result = ", ".join([
+ extra_arg.result() for extra_arg in self.special_bool_extra_args
+ ])
+ if self.special_bool_cmp_utility_code:
+ code.globalstate.use_utility_code(self.special_bool_cmp_utility_code)
+ code.putln(
+ "%s = %s(%s(%s, %s, %s)); %s%s" % (
+ result_code,
+ coerce_result,
+ self.special_bool_cmp_function,
+ result1, result2,
+ special_bool_extra_args_result if self.special_bool_extra_args else richcmp_constants[op],
+ got_ref,
+ error_clause(result_code, self.pos)))
+
+ elif operand1.type.is_pyobject and op not in ('is', 'is_not'):
+ assert op not in ('in', 'not_in'), op
+ assert self.type.is_pyobject or self.type is PyrexTypes.c_bint_type
+ code.putln("%s = PyObject_RichCompare%s(%s, %s, %s); %s%s" % (
+ result_code,
+ "" if self.type.is_pyobject else "Bool",
+ operand1.py_result(),
+ operand2.py_result(),
+ richcmp_constants[op],
+ got_ref,
+ error_clause(result_code, self.pos)))
+
+ elif operand1.type.is_complex:
+ code.putln("%s = %s(%s%s(%s, %s));" % (
+ result_code,
+ coerce_result,
+ op == "!=" and "!" or "",
+ operand1.type.unary_op('eq'),
+ operand1.result(),
+ operand2.result()))
+
+ else:
+ type1 = operand1.type
+ type2 = operand2.type
+ if (type1.is_extension_type or type2.is_extension_type) \
+ and not type1.same_as(type2):
+ common_type = py_object_type
+ elif type1.is_numeric:
+ common_type = PyrexTypes.widest_numeric_type(type1, type2)
+ else:
+ common_type = type1
+ code1 = operand1.result_as(common_type)
+ code2 = operand2.result_as(common_type)
+ statement = "%s = %s(%s %s %s);" % (
+ result_code,
+ coerce_result,
+ code1,
+ self.c_operator(op),
+ code2)
+ if self.is_cpp_comparison() and self.exception_check == '+':
+ translate_cpp_exception(
+ code,
+ self.pos,
+ statement,
+ result_code if self.type.is_pyobject else None,
+ self.exception_value,
+ self.in_nogil_context)
+ else:
+ code.putln(statement)
+
+ def c_operator(self, op):
+ if op == 'is':
+ return "=="
+ elif op == 'is_not':
+ return "!="
+ else:
+ return op
+
+class PrimaryCmpNode(ExprNode, CmpNode):
+ # Non-cascaded comparison or first comparison of
+ # a cascaded sequence.
+ #
+ # operator string
+ # operand1 ExprNode
+ # operand2 ExprNode
+ # cascade CascadedCmpNode
+
+ # We don't use the subexprs mechanism, because
+ # things here are too complicated for it to handle.
+ # Instead, we override all the framework methods
+ # which use it.
+
+ child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade',
+ 'special_bool_extra_args']
+
+ cascade = None
+ coerced_operand2 = None
+ is_memslice_nonecheck = False
+
+ def infer_type(self, env):
+ type1 = self.operand1.infer_type(env)
+ type2 = self.operand2.infer_type(env)
+
+ if is_pythran_expr(type1) or is_pythran_expr(type2):
+ if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
+ return PythranExpr(pythran_binop_type(self.operator, type1, type2))
+
+ # TODO: implement this for other types.
+ return py_object_type
+
+ def type_dependencies(self, env):
+ return ()
+
+ def calculate_constant_result(self):
+ assert not self.cascade
+ self.calculate_cascaded_constant_result(self.operand1.constant_result)
+
+ def compile_time_value(self, denv):
+ operand1 = self.operand1.compile_time_value(denv)
+ return self.cascaded_compile_time_value(operand1, denv)
+
+ def unify_cascade_type(self):
+ cdr = self.cascade
+ while cdr:
+ cdr.type = self.type
+ cdr = cdr.cascade
+
+ def analyse_types(self, env):
+ self.operand1 = self.operand1.analyse_types(env)
+ self.operand2 = self.operand2.analyse_types(env)
+ if self.is_cpp_comparison():
+ self.analyse_cpp_comparison(env)
+ if self.cascade:
+ error(self.pos, "Cascading comparison not yet supported for cpp types.")
+ return self
+
+ type1 = self.operand1.type
+ type2 = self.operand2.type
+ if is_pythran_expr(type1) or is_pythran_expr(type2):
+ if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
+ self.type = PythranExpr(pythran_binop_type(self.operator, type1, type2))
+ self.is_pycmp = False
+ return self
+
+ if self.analyse_memoryviewslice_comparison(env):
+ return self
+
+ if self.cascade:
+ self.cascade = self.cascade.analyse_types(env)
+
+ if self.operator in ('in', 'not_in'):
+ if self.is_c_string_contains():
+ self.is_pycmp = False
+ common_type = None
+ if self.cascade:
+ error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
+ return self
+ if self.operand2.type is unicode_type:
+ env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
+ else:
+ if self.operand1.type is PyrexTypes.c_uchar_type:
+ self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
+ if self.operand2.type is not bytes_type:
+ self.operand2 = self.operand2.coerce_to(bytes_type, env)
+ env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
+ self.operand2 = self.operand2.as_none_safe_node(
+ "argument of type 'NoneType' is not iterable")
+ elif self.is_ptr_contains():
+ if self.cascade:
+ error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.")
+ self.type = PyrexTypes.c_bint_type
+ # Will be transformed by IterationTransform
+ return self
+ elif self.find_special_bool_compare_function(env, self.operand1):
+ if not self.operand1.type.is_pyobject:
+ self.operand1 = self.operand1.coerce_to_pyobject(env)
+ common_type = None # if coercion needed, the method call above has already done it
+ self.is_pycmp = False # result is bint
+ else:
+ common_type = py_object_type
+ self.is_pycmp = True
+ elif self.find_special_bool_compare_function(env, self.operand1):
+ if not self.operand1.type.is_pyobject:
+ self.operand1 = self.operand1.coerce_to_pyobject(env)
+ common_type = None # if coercion needed, the method call above has already done it
+ self.is_pycmp = False # result is bint
+ else:
+ common_type = self.find_common_type(env, self.operator, self.operand1)
+ self.is_pycmp = common_type.is_pyobject
+
+ if common_type is not None and not common_type.is_error:
+ if self.operand1.type != common_type:
+ self.operand1 = self.operand1.coerce_to(common_type, env)
+ self.coerce_operands_to(common_type, env)
+
+ if self.cascade:
+ self.operand2 = self.operand2.coerce_to_simple(env)
+ self.cascade.coerce_cascaded_operands_to_temp(env)
+ operand2 = self.cascade.optimise_comparison(self.operand2, env)
+ if operand2 is not self.operand2:
+ self.coerced_operand2 = operand2
+ if self.is_python_result():
+ self.type = PyrexTypes.py_object_type
+ else:
+ self.type = PyrexTypes.c_bint_type
+ self.unify_cascade_type()
+ if self.is_pycmp or self.cascade or self.special_bool_cmp_function:
+ # 1) owned reference, 2) reused value, 3) potential function error return value
+ self.is_temp = 1
+ return self
+
+ def analyse_cpp_comparison(self, env):
+ type1 = self.operand1.type
+ type2 = self.operand2.type
+ self.is_pycmp = False
+ entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
+ if entry is None:
+ error(self.pos, "Invalid types for '%s' (%s, %s)" %
+ (self.operator, type1, type2))
+ self.type = PyrexTypes.error_type
+ self.result_code = ""
+ return
+ func_type = entry.type
+ if func_type.is_ptr:
+ func_type = func_type.base_type
+ self.exception_check = func_type.exception_check
+ self.exception_value = func_type.exception_value
+ if self.exception_check == '+':
+ self.is_temp = True
+ if needs_cpp_exception_conversion(self):
+ env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
+ if len(func_type.args) == 1:
+ self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
+ else:
+ self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
+ self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
+ self.type = func_type.return_type
+
+ def analyse_memoryviewslice_comparison(self, env):
+ have_none = self.operand1.is_none or self.operand2.is_none
+ have_slice = (self.operand1.type.is_memoryviewslice or
+ self.operand2.type.is_memoryviewslice)
+ ops = ('==', '!=', 'is', 'is_not')
+ if have_slice and have_none and self.operator in ops:
+ self.is_pycmp = False
+ self.type = PyrexTypes.c_bint_type
+ self.is_memslice_nonecheck = True
+ return True
+
+ return False
+
+ def coerce_to_boolean(self, env):
+ if self.is_pycmp:
+ # coercing to bool => may allow for more efficient comparison code
+ if self.find_special_bool_compare_function(
+ env, self.operand1, result_is_bool=True):
+ self.is_pycmp = False
+ self.type = PyrexTypes.c_bint_type
+ self.is_temp = 1
+ if self.cascade:
+ operand2 = self.cascade.optimise_comparison(
+ self.operand2, env, result_is_bool=True)
+ if operand2 is not self.operand2:
+ self.coerced_operand2 = operand2
+ self.unify_cascade_type()
+ return self
+ # TODO: check if we can optimise parts of the cascade here
+ return ExprNode.coerce_to_boolean(self, env)
+
+ def has_python_operands(self):
+ return (self.operand1.type.is_pyobject
+ or self.operand2.type.is_pyobject)
+
+ def check_const(self):
+ if self.cascade:
+ self.not_const()
+ return False
+ else:
+ return self.operand1.check_const() and self.operand2.check_const()
+
+ def calculate_result_code(self):
+ operand1, operand2 = self.operand1, self.operand2
+ if operand1.type.is_complex:
+ if self.operator == "!=":
+ negation = "!"
+ else:
+ negation = ""
+ return "(%s%s(%s, %s))" % (
+ negation,
+ operand1.type.binary_op('=='),
+ operand1.result(),
+ operand2.result())
+ elif self.is_c_string_contains():
+ if operand2.type is unicode_type:
+ method = "__Pyx_UnicodeContainsUCS4"
+ else:
+ method = "__Pyx_BytesContains"
+ if self.operator == "not_in":
+ negation = "!"
+ else:
+ negation = ""
+ return "(%s%s(%s, %s))" % (
+ negation,
+ method,
+ operand2.result(),
+ operand1.result())
+ else:
+ if is_pythran_expr(self.type):
+ result1, result2 = operand1.pythran_result(), operand2.pythran_result()
+ else:
+ result1, result2 = operand1.result(), operand2.result()
+ if self.is_memslice_nonecheck:
+ if operand1.type.is_memoryviewslice:
+ result1 = "((PyObject *) %s.memview)" % result1
+ else:
+ result2 = "((PyObject *) %s.memview)" % result2
+
+ return "(%s %s %s)" % (
+ result1,
+ self.c_operator(self.operator),
+ result2)
+
+ def generate_evaluation_code(self, code):
+ self.operand1.generate_evaluation_code(code)
+ self.operand2.generate_evaluation_code(code)
+ for extra_arg in self.special_bool_extra_args:
+ extra_arg.generate_evaluation_code(code)
+ if self.is_temp:
+ self.allocate_temp_result(code)
+ self.generate_operation_code(code, self.result(),
+ self.operand1, self.operator, self.operand2)
+ if self.cascade:
+ self.cascade.generate_evaluation_code(
+ code, self.result(), self.coerced_operand2 or self.operand2,
+ needs_evaluation=self.coerced_operand2 is not None)
+ self.operand1.generate_disposal_code(code)
+ self.operand1.free_temps(code)
+ self.operand2.generate_disposal_code(code)
+ self.operand2.free_temps(code)
+
+ def generate_subexpr_disposal_code(self, code):
+ # If this is called, it is a non-cascaded cmp,
+ # so only need to dispose of the two main operands.
+ self.operand1.generate_disposal_code(code)
+ self.operand2.generate_disposal_code(code)
+
+ def free_subexpr_temps(self, code):
+ # If this is called, it is a non-cascaded cmp,
+ # so only need to dispose of the two main operands.
+ self.operand1.free_temps(code)
+ self.operand2.free_temps(code)
+
+ def annotate(self, code):
+ self.operand1.annotate(code)
+ self.operand2.annotate(code)
+ if self.cascade:
+ self.cascade.annotate(code)
+
+
+class CascadedCmpNode(Node, CmpNode):
+ # A CascadedCmpNode is not a complete expression node. It
+ # hangs off the side of another comparison node, shares
+ # its left operand with that node, and shares its result
+ # with the PrimaryCmpNode at the head of the chain.
+ #
+ # operator string
+ # operand2 ExprNode
+ # cascade CascadedCmpNode
+
+ child_attrs = ['operand2', 'coerced_operand2', 'cascade',
+ 'special_bool_extra_args']
+
+ cascade = None
+ coerced_operand2 = None
+ constant_result = constant_value_not_set # FIXME: where to calculate this?
+
+ def infer_type(self, env):
+ # TODO: Actually implement this (after merging with -unstable).
+ return py_object_type
+
+ def type_dependencies(self, env):
+ return ()
+
+ def has_constant_result(self):
+ return self.constant_result is not constant_value_not_set and \
+ self.constant_result is not not_a_constant
+
+ def analyse_types(self, env):
+ self.operand2 = self.operand2.analyse_types(env)
+ if self.cascade:
+ self.cascade = self.cascade.analyse_types(env)
+ return self
+
+ def has_python_operands(self):
+ return self.operand2.type.is_pyobject
+
+ def is_cpp_comparison(self):
+ # cascaded comparisons aren't currently implemented for c++ classes.
+ return False
+
+ def optimise_comparison(self, operand1, env, result_is_bool=False):
+ if self.find_special_bool_compare_function(env, operand1, result_is_bool):
+ self.is_pycmp = False
+ self.type = PyrexTypes.c_bint_type
+ if not operand1.type.is_pyobject:
+ operand1 = operand1.coerce_to_pyobject(env)
+ if self.cascade:
+ operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool)
+ if operand2 is not self.operand2:
+ self.coerced_operand2 = operand2
+ return operand1
+
+ def coerce_operands_to_pyobjects(self, env):
+ self.operand2 = self.operand2.coerce_to_pyobject(env)
+ if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
+ self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
+ if self.cascade:
+ self.cascade.coerce_operands_to_pyobjects(env)
+
+ def coerce_cascaded_operands_to_temp(self, env):
+ if self.cascade:
+ #self.operand2 = self.operand2.coerce_to_temp(env) #CTT
+ self.operand2 = self.operand2.coerce_to_simple(env)
+ self.cascade.coerce_cascaded_operands_to_temp(env)
+
+ def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
+ if self.type.is_pyobject:
+ code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
+ code.put_decref(result, self.type)
+ else:
+ code.putln("if (%s) {" % result)
+ if needs_evaluation:
+ operand1.generate_evaluation_code(code)
+ self.operand2.generate_evaluation_code(code)
+ for extra_arg in self.special_bool_extra_args:
+ extra_arg.generate_evaluation_code(code)
+ self.generate_operation_code(code, result,
+ operand1, self.operator, self.operand2)
+ if self.cascade:
+ self.cascade.generate_evaluation_code(
+ code, result, self.coerced_operand2 or self.operand2,
+ needs_evaluation=self.coerced_operand2 is not None)
+ if needs_evaluation:
+ operand1.generate_disposal_code(code)
+ operand1.free_temps(code)
+ # Cascaded cmp result is always temp
+ self.operand2.generate_disposal_code(code)
+ self.operand2.free_temps(code)
+ code.putln("}")
+
+ def annotate(self, code):
+ self.operand2.annotate(code)
+ if self.cascade:
+ self.cascade.annotate(code)
+
+
+binop_node_classes = {
+ "or": BoolBinopNode,
+ "and": BoolBinopNode,
+ "|": IntBinopNode,
+ "^": IntBinopNode,
+ "&": IntBinopNode,
+ "<<": IntBinopNode,
+ ">>": IntBinopNode,
+ "+": AddNode,
+ "-": SubNode,
+ "*": MulNode,
+ "@": MatMultNode,
+ "/": DivNode,
+ "//": DivNode,
+ "%": ModNode,
+ "**": PowNode,
+}
+
+
+def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs):
+ # Construct binop node of appropriate class for
+ # given operator.
+ return binop_node_classes[operator](
+ pos,
+ operator=operator,
+ operand1=operand1,
+ operand2=operand2,
+ inplace=inplace,
+ **kwargs)
+
+
+#-------------------------------------------------------------------
+#
+# Coercion nodes
+#
+# Coercion nodes are special in that they are created during
+# the analyse_types phase of parse tree processing.
+# Their __init__ methods consequently incorporate some aspects
+# of that phase.
+#
+#-------------------------------------------------------------------
+
+class CoercionNode(ExprNode):
+ # Abstract base class for coercion nodes.
+ #
+ # arg ExprNode node being coerced
+
+ subexprs = ['arg']
+ constant_result = not_a_constant
+
+ def __init__(self, arg):
+ super(CoercionNode, self).__init__(arg.pos)
+ self.arg = arg
+ if debug_coercion:
+ print("%s Coercing %s" % (self, self.arg))
+
+ def calculate_constant_result(self):
+ # constant folding can break type coercion, so this is disabled
+ pass
+
+ def annotate(self, code):
+ self.arg.annotate(code)
+ if self.arg.type != self.type:
+ file, line, col = self.pos
+ code.annotate((file, line, col-1), AnnotationItem(
+ style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
+
+ def analyse_types(self, env):
+ return self
+
+
+class CoerceToMemViewSliceNode(CoercionNode):
+ """
+ Coerce an object to a memoryview slice. This holds a new reference in
+ a managed temp.
+ """
+
+ def __init__(self, arg, dst_type, env):
+ assert dst_type.is_memoryviewslice
+ assert not arg.type.is_memoryviewslice
+ CoercionNode.__init__(self, arg)
+ self.type = dst_type
+ self.is_temp = 1
+ self.use_managed_ref = True
+ self.arg = arg
+ self.type.create_from_py_utility_code(env)
+
+ def generate_result_code(self, code):
+ code.putln(self.type.from_py_call_code(
+ self.arg.py_result(),
+ self.result(),
+ self.pos,
+ code
+ ))
+
+
+class CastNode(CoercionNode):
+ # Wrap a node in a C type cast.
+
+ def __init__(self, arg, new_type):
+ CoercionNode.__init__(self, arg)
+ self.type = new_type
+
+ def may_be_none(self):
+ return self.arg.may_be_none()
+
+ def calculate_result_code(self):
+ return self.arg.result_as(self.type)
+
+ def generate_result_code(self, code):
+ self.arg.generate_result_code(code)
+
+
+class PyTypeTestNode(CoercionNode):
+ # This node is used to check that a generic Python
+ # object is an instance of a particular extension type.
+ # This node borrows the result of its argument node.
+
+ exact_builtin_type = True
+
+ def __init__(self, arg, dst_type, env, notnone=False):
+ # The arg is known to be a Python object, and
+ # the dst_type is known to be an extension type.
+ assert dst_type.is_extension_type or dst_type.is_builtin_type, \
+ "PyTypeTest for %s against non extension type %s" % (arg.type, dst_type)
+ CoercionNode.__init__(self, arg)
+ self.type = dst_type
+ self.result_ctype = arg.ctype()
+ self.notnone = notnone
+
+ nogil_check = Node.gil_error
+ gil_message = "Python type test"
+
+ def analyse_types(self, env):
+ return self
+
+ def may_be_none(self):
+ if self.notnone:
+ return False
+ return self.arg.may_be_none()
+
+ def is_simple(self):
+ return self.arg.is_simple()
+
+ def result_in_temp(self):
+ return self.arg.result_in_temp()
+
+ def is_ephemeral(self):
+ return self.arg.is_ephemeral()
+
+ def nonlocally_immutable(self):
+ return self.arg.nonlocally_immutable()
+
+ def reanalyse(self):
+ if self.type != self.arg.type or not self.arg.is_temp:
+ return self
+ if not self.type.typeobj_is_available():
+ return self
+ if self.arg.may_be_none() and self.notnone:
+ return self.arg.as_none_safe_node("Cannot convert NoneType to %.200s" % self.type.name)
+ return self.arg
+
+ def calculate_constant_result(self):
+ # FIXME
+ pass
+
+ def calculate_result_code(self):
+ return self.arg.result()
+
+ def generate_result_code(self, code):
+ if self.type.typeobj_is_available():
+ if self.type.is_builtin_type:
+ type_test = self.type.type_test_code(
+ self.arg.py_result(),
+ self.notnone, exact=self.exact_builtin_type)
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "RaiseUnexpectedTypeError", "ObjectHandling.c"))
+ else:
+ type_test = self.type.type_test_code(
+ self.arg.py_result(), self.notnone)
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
+ code.putln("if (!(%s)) %s" % (
+ type_test, code.error_goto(self.pos)))
+ else:
+ error(self.pos, "Cannot test type of extern C class "
+ "without type object name specification")
+
+ def generate_post_assignment_code(self, code):
+ self.arg.generate_post_assignment_code(code)
+
+ def allocate_temp_result(self, code):
+ pass
+
+ def release_temp_result(self, code):
+ pass
+
+ def free_temps(self, code):
+ self.arg.free_temps(code)
+
+ def free_subexpr_temps(self, code):
+ self.arg.free_subexpr_temps(code)
+
+
+class NoneCheckNode(CoercionNode):
+ # This node is used to check that a Python object is not None and
+ # raises an appropriate exception (as specified by the creating
+ # transform).
+
+ is_nonecheck = True
+
+ def __init__(self, arg, exception_type_cname, exception_message,
+ exception_format_args=()):
+ CoercionNode.__init__(self, arg)
+ self.type = arg.type
+ self.result_ctype = arg.ctype()
+ self.exception_type_cname = exception_type_cname
+ self.exception_message = exception_message
+ self.exception_format_args = tuple(exception_format_args or ())
+
+ nogil_check = None # this node only guards an operation that would fail already
+
+ def analyse_types(self, env):
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def is_simple(self):
+ return self.arg.is_simple()
+
+ def result_in_temp(self):
+ return self.arg.result_in_temp()
+
+ def nonlocally_immutable(self):
+ return self.arg.nonlocally_immutable()
+
+ def calculate_result_code(self):
+ return self.arg.result()
+
+ def condition(self):
+ if self.type.is_pyobject:
+ return self.arg.py_result()
+ elif self.type.is_memoryviewslice:
+ return "((PyObject *) %s.memview)" % self.arg.result()
+ else:
+ raise Exception("unsupported type")
+
+ @classmethod
+ def generate(cls, arg, code, exception_message,
+ exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
+ node = cls(arg, exception_type_cname, exception_message, exception_format_args)
+ node.in_nogil_context = in_nogil_context
+ node.put_nonecheck(code)
+
+ @classmethod
+ def generate_if_needed(cls, arg, code, exception_message,
+ exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
+ if arg.may_be_none():
+ cls.generate(arg, code, exception_message, exception_type_cname, exception_format_args, in_nogil_context)
+
+ def put_nonecheck(self, code):
+ code.putln(
+ "if (unlikely(%s == Py_None)) {" % self.condition())
+
+ if self.in_nogil_context:
+ code.put_ensure_gil()
+
+ escape = StringEncoding.escape_byte_string
+ if self.exception_format_args:
+ code.putln('PyErr_Format(%s, "%s", %s);' % (
+ self.exception_type_cname,
+ StringEncoding.escape_byte_string(
+ self.exception_message.encode('UTF-8')),
+ ', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
+ for arg in self.exception_format_args ])))
+ else:
+ code.putln('PyErr_SetString(%s, "%s");' % (
+ self.exception_type_cname,
+ escape(self.exception_message.encode('UTF-8'))))
+
+ if self.in_nogil_context:
+ code.put_release_ensured_gil()
+
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+
+ def generate_result_code(self, code):
+ self.put_nonecheck(code)
+
+ def generate_post_assignment_code(self, code):
+ self.arg.generate_post_assignment_code(code)
+
+ def free_temps(self, code):
+ self.arg.free_temps(code)
+
+
+class CoerceToPyTypeNode(CoercionNode):
+ # This node is used to convert a C data type
+ # to a Python object.
+
+ type = py_object_type
+ target_type = py_object_type
+ is_temp = 1
+
+ def __init__(self, arg, env, type=py_object_type):
+ if not arg.type.create_to_py_utility_code(env):
+ error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
+ elif arg.type.is_complex:
+ # special case: complex coercion is so complex that it
+ # uses a macro ("__pyx_PyComplex_FromComplex()"), for
+ # which the argument must be simple
+ arg = arg.coerce_to_simple(env)
+ CoercionNode.__init__(self, arg)
+ if type is py_object_type:
+ # be specific about some known types
+ if arg.type.is_string or arg.type.is_cpp_string:
+ self.type = default_str_type(env)
+ elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char:
+ self.type = unicode_type
+ elif arg.type.is_complex:
+ self.type = Builtin.complex_type
+ self.target_type = self.type
+ elif arg.type.is_string or arg.type.is_cpp_string:
+ if (type not in (bytes_type, bytearray_type)
+ and not env.directives['c_string_encoding']):
+ error(arg.pos,
+ "default encoding required for conversion from '%s' to '%s'" %
+ (arg.type, type))
+ self.type = self.target_type = type
+ else:
+ # FIXME: check that the target type and the resulting type are compatible
+ self.target_type = type
+
+ gil_message = "Converting to Python object"
+
+ def may_be_none(self):
+ # FIXME: is this always safe?
+ return False
+
+ def coerce_to_boolean(self, env):
+ arg_type = self.arg.type
+ if (arg_type == PyrexTypes.c_bint_type or
+ (arg_type.is_pyobject and arg_type.name == 'bool')):
+ return self.arg.coerce_to_temp(env)
+ else:
+ return CoerceToBooleanNode(self, env)
+
+ def coerce_to_integer(self, env):
+ # If not already some C integer type, coerce to longint.
+ if self.arg.type.is_int:
+ return self.arg
+ else:
+ return self.arg.coerce_to(PyrexTypes.c_long_type, env)
+
+ def analyse_types(self, env):
+ # The arg is always already analysed
+ return self
+
+ def generate_result_code(self, code):
+ code.putln('%s; %s' % (
+ self.arg.type.to_py_call_code(
+ self.arg.result(),
+ self.result(),
+ self.target_type),
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ self.generate_gotref(code)
+
+
+class CoerceIntToBytesNode(CoerceToPyTypeNode):
+ # This node is used to convert a C int type to a Python bytes
+ # object.
+
+ is_temp = 1
+
+ def __init__(self, arg, env):
+ arg = arg.coerce_to_simple(env)
+ CoercionNode.__init__(self, arg)
+ self.type = Builtin.bytes_type
+
+ def generate_result_code(self, code):
+ arg = self.arg
+ arg_result = arg.result()
+ if arg.type not in (PyrexTypes.c_char_type,
+ PyrexTypes.c_uchar_type,
+ PyrexTypes.c_schar_type):
+ if arg.type.signed:
+ code.putln("if ((%s < 0) || (%s > 255)) {" % (
+ arg_result, arg_result))
+ else:
+ code.putln("if (%s > 255) {" % arg_result)
+ code.putln('PyErr_SetString(PyExc_OverflowError, '
+ '"value too large to pack into a byte"); %s' % (
+ code.error_goto(self.pos)))
+ code.putln('}')
+ temp = None
+ if arg.type is not PyrexTypes.c_char_type:
+ temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
+ code.putln("%s = (char)%s;" % (temp, arg_result))
+ arg_result = temp
+ code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
+ self.result(),
+ arg_result,
+ code.error_goto_if_null(self.result(), self.pos)))
+ if temp is not None:
+ code.funcstate.release_temp(temp)
+ self.generate_gotref(code)
+
+
+class CoerceFromPyTypeNode(CoercionNode):
+ # This node is used to convert a Python object
+ # to a C data type.
+
+ # Allow 'None' to map to a difference C value independent of the coercion, e.g. to 'NULL' or '0'.
+ special_none_cvalue = None
+
+ def __init__(self, result_type, arg, env):
+ CoercionNode.__init__(self, arg)
+ self.type = result_type
+ self.is_temp = 1
+ if not result_type.create_from_py_utility_code(env):
+ error(arg.pos,
+ "Cannot convert Python object to '%s'" % result_type)
+ if self.type.is_string or self.type.is_pyunicode_ptr:
+ if self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal:
+ warning(arg.pos,
+ "Obtaining '%s' from externally modifiable global Python value" % result_type,
+ level=1)
+ if self.type.is_pyunicode_ptr:
+ warning(arg.pos,
+ "Py_UNICODE* has been removed in Python 3.12. This conversion to a "
+ "Py_UNICODE* will no longer compile in the latest Python versions. "
+ "Use Python C API functions like PyUnicode_AsWideCharString if you "
+ "need to obtain a wchar_t* on Windows (and free the string manually after use).",
+ level=1)
+
+ def analyse_types(self, env):
+ # The arg is always already analysed
+ return self
+
+ def is_ephemeral(self):
+ return (self.type.is_ptr and not self.type.is_array) and self.arg.is_ephemeral()
+
+ def generate_result_code(self, code):
+ from_py_function = None
+ # for certain source types, we can do better than the generic coercion
+ if self.type.is_string and self.arg.type is bytes_type:
+ if self.type.from_py_function.startswith('__Pyx_PyObject_As'):
+ from_py_function = '__Pyx_PyBytes' + self.type.from_py_function[len('__Pyx_PyObject'):]
+ NoneCheckNode.generate_if_needed(self.arg, code, "expected bytes, NoneType found")
+
+ code.putln(self.type.from_py_call_code(
+ self.arg.py_result(), self.result(), self.pos, code,
+ from_py_function=from_py_function,
+ special_none_cvalue=self.special_none_cvalue,
+ ))
+ if self.type.is_pyobject:
+ self.generate_gotref(code)
+
+ def nogil_check(self, env):
+ error(self.pos, "Coercion from Python not allowed without the GIL")
+
+
+class CoerceToBooleanNode(CoercionNode):
+ # This node is used when a result needs to be used
+ # in a boolean context.
+
+ type = PyrexTypes.c_bint_type
+
+ _special_builtins = {
+ Builtin.list_type: 'PyList_GET_SIZE',
+ Builtin.tuple_type: 'PyTuple_GET_SIZE',
+ Builtin.set_type: 'PySet_GET_SIZE',
+ Builtin.frozenset_type: 'PySet_GET_SIZE',
+ Builtin.bytes_type: 'PyBytes_GET_SIZE',
+ Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
+ Builtin.unicode_type: '__Pyx_PyUnicode_IS_TRUE',
+ }
+
+ def __init__(self, arg, env):
+ CoercionNode.__init__(self, arg)
+ if arg.type.is_pyobject:
+ self.is_temp = 1
+
+ def nogil_check(self, env):
+ if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
+ self.gil_error()
+
+ gil_message = "Truth-testing Python object"
+
+ def check_const(self):
+ if self.is_temp:
+ self.not_const()
+ return False
+ return self.arg.check_const()
+
+ def calculate_result_code(self):
+ return "(%s != 0)" % self.arg.result()
+
+ def generate_result_code(self, code):
+ if not self.is_temp:
+ return
+ test_func = self._special_builtins.get(self.arg.type)
+ if test_func is not None:
+ checks = ["(%s != Py_None)" % self.arg.py_result()] if self.arg.may_be_none() else []
+ checks.append("(%s(%s) != 0)" % (test_func, self.arg.py_result()))
+ code.putln("%s = %s;" % (self.result(), '&&'.join(checks)))
+ else:
+ code.putln(
+ "%s = __Pyx_PyObject_IsTrue(%s); %s" % (
+ self.result(),
+ self.arg.py_result(),
+ code.error_goto_if_neg(self.result(), self.pos)))
+
+ def analyse_types(self, env):
+ return self
+
+
+class CoerceToComplexNode(CoercionNode):
+
+ def __init__(self, arg, dst_type, env):
+ if arg.type.is_complex:
+ arg = arg.coerce_to_simple(env)
+ self.type = dst_type
+ CoercionNode.__init__(self, arg)
+ dst_type.create_declaration_utility_code(env)
+
+ def calculate_result_code(self):
+ if self.arg.type.is_complex:
+ real_part = self.arg.type.real_code(self.arg.result())
+ imag_part = self.arg.type.imag_code(self.arg.result())
+ else:
+ real_part = self.arg.result()
+ imag_part = "0"
+ return "%s(%s, %s)" % (
+ self.type.from_parts,
+ real_part,
+ imag_part)
+
+ def generate_result_code(self, code):
+ pass
+
+ def analyse_types(self, env):
+ return self
+
+
+def coerce_from_soft_complex(arg, dst_type, env):
+ from .UtilNodes import HasGilNode
+ cfunc_type = PyrexTypes.CFuncType(
+ PyrexTypes.c_double_type,
+ [ PyrexTypes.CFuncTypeArg("value", PyrexTypes.soft_complex_type, None),
+ PyrexTypes.CFuncTypeArg("have_gil", PyrexTypes.c_bint_type, None) ],
+ exception_value="-1",
+ exception_check=True,
+ nogil=True # We can acquire the GIL internally on failure
+ )
+ call = PythonCapiCallNode(
+ arg.pos,
+ "__Pyx_SoftComplexToDouble",
+ cfunc_type,
+ utility_code = UtilityCode.load_cached("SoftComplexToDouble", "Complex.c"),
+ args = [arg, HasGilNode(arg.pos)],
+ )
+ call = call.analyse_types(env)
+ if call.type != dst_type:
+ call = call.coerce_to(dst_type, env)
+ return call
+
+
+class CoerceToTempNode(CoercionNode):
+ # This node is used to force the result of another node
+ # to be stored in a temporary. It is only used if the
+ # argument node's result is not already in a temporary.
+
+ def __init__(self, arg, env):
+ CoercionNode.__init__(self, arg)
+ self.type = self.arg.type.as_argument_type()
+ self.constant_result = self.arg.constant_result
+ self.is_temp = 1
+ if self.type.is_pyobject:
+ self.result_ctype = py_object_type
+
+ gil_message = "Creating temporary Python reference"
+
+ def analyse_types(self, env):
+ # The arg is always already analysed
+ return self
+
+ def may_be_none(self):
+ return self.arg.may_be_none()
+
+ def coerce_to_boolean(self, env):
+ self.arg = self.arg.coerce_to_boolean(env)
+ if self.arg.is_simple():
+ return self.arg
+ self.type = self.arg.type
+ self.result_ctype = self.type
+ return self
+
+ def generate_result_code(self, code):
+ #self.arg.generate_evaluation_code(code) # Already done
+ # by generic generate_subexpr_evaluation_code!
+ code.putln("%s = %s;" % (
+ self.result(), self.arg.result_as(self.ctype())))
+ if self.use_managed_ref:
+ if not self.type.is_memoryviewslice:
+ code.put_incref(self.result(), self.ctype())
+ else:
+ code.put_incref_memoryviewslice(self.result(), self.type,
+ have_gil=not self.in_nogil_context)
+
+
+class ProxyNode(CoercionNode):
+ """
+ A node that should not be replaced by transforms or other means,
+ and hence can be useful to wrap the argument to a clone node
+
+ MyNode -> ProxyNode -> ArgNode
+ CloneNode -^
+ """
+
+ nogil_check = None
+
+ def __init__(self, arg):
+ super(ProxyNode, self).__init__(arg)
+ self.constant_result = arg.constant_result
+ self.update_type_and_entry()
+
+ def analyse_types(self, env):
+ self.arg = self.arg.analyse_expressions(env)
+ self.update_type_and_entry()
+ return self
+
+ def infer_type(self, env):
+ return self.arg.infer_type(env)
+
+ def update_type_and_entry(self):
+ type = getattr(self.arg, 'type', None)
+ if type:
+ self.type = type
+ self.result_ctype = self.arg.result_ctype
+ arg_entry = getattr(self.arg, 'entry', None)
+ if arg_entry:
+ self.entry = arg_entry
+
+ def generate_result_code(self, code):
+ self.arg.generate_result_code(code)
+
+ def result(self):
+ return self.arg.result()
+
+ def is_simple(self):
+ return self.arg.is_simple()
+
+ def may_be_none(self):
+ return self.arg.may_be_none()
+
+ def generate_evaluation_code(self, code):
+ self.arg.generate_evaluation_code(code)
+
+ def generate_disposal_code(self, code):
+ self.arg.generate_disposal_code(code)
+
+ def free_temps(self, code):
+ self.arg.free_temps(code)
+
+class CloneNode(CoercionNode):
+ # This node is employed when the result of another node needs
+ # to be used multiple times. The argument node's result must
+ # be in a temporary. This node "borrows" the result from the
+ # argument node, and does not generate any evaluation or
+ # disposal code for it. The original owner of the argument
+ # node is responsible for doing those things.
+
+ subexprs = [] # Arg is not considered a subexpr
+ nogil_check = None
+
+ def __init__(self, arg):
+ CoercionNode.__init__(self, arg)
+ self.constant_result = arg.constant_result
+ type = getattr(arg, 'type', None)
+ if type:
+ self.type = type
+ self.result_ctype = arg.result_ctype
+ arg_entry = getattr(arg, 'entry', None)
+ if arg_entry:
+ self.entry = arg_entry
+
+ def result(self):
+ return self.arg.result()
+
+ def may_be_none(self):
+ return self.arg.may_be_none()
+
+ def type_dependencies(self, env):
+ return self.arg.type_dependencies(env)
+
+ def infer_type(self, env):
+ return self.arg.infer_type(env)
+
+ def analyse_types(self, env):
+ self.type = self.arg.type
+ self.result_ctype = self.arg.result_ctype
+ self.is_temp = 1
+ arg_entry = getattr(self.arg, 'entry', None)
+ if arg_entry:
+ self.entry = arg_entry
+ return self
+
+ def coerce_to(self, dest_type, env):
+ if self.arg.is_literal:
+ return self.arg.coerce_to(dest_type, env)
+ return super(CloneNode, self).coerce_to(dest_type, env)
+
+ def is_simple(self):
+ return True # result is always in a temp (or a name)
+
+ def generate_evaluation_code(self, code):
+ pass
+
+ def generate_result_code(self, code):
+ pass
+
+ def generate_disposal_code(self, code):
+ pass
+
+ def generate_post_assignment_code(self, code):
+ # if we're assigning from a CloneNode then it's "giveref"ed away, so it does
+ # need a matching incref (ideally this should happen before the assignment though)
+ if self.is_temp: # should usually be true
+ code.put_incref(self.result(), self.ctype())
+
+ def free_temps(self, code):
+ pass
+
+
+class CppOptionalTempCoercion(CoercionNode):
+ """
+ Used only in CoerceCppTemps - handles cases the temp is actually a OptionalCppClassType (and thus needs dereferencing when on the rhs)
+ """
+ is_temp = False
+
+ @property
+ def type(self):
+ return self.arg.type
+
+ def calculate_result_code(self):
+ return "(*%s)" % self.arg.result()
+
+ def generate_result_code(self, code):
+ pass
+
+ def _make_move_result_rhs(self, result, optional=False):
+ # this wouldn't normally get moved (because it isn't a temp), but force it to be because it
+ # is a thin wrapper around a temp
+ return super(CppOptionalTempCoercion, self)._make_move_result_rhs(result, optional=False)
+
+
+class CMethodSelfCloneNode(CloneNode):
+ # Special CloneNode for the self argument of builtin C methods
+ # that accepts subtypes of the builtin type. This is safe only
+ # for 'final' subtypes, as subtypes of the declared type may
+ # override the C method.
+
+ def coerce_to(self, dst_type, env):
+ if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
+ return self
+ return CloneNode.coerce_to(self, dst_type, env)
+
+
+class ModuleRefNode(ExprNode):
+ # Simple returns the module object
+
+ type = py_object_type
+ is_temp = False
+ subexprs = []
+
+ def analyse_types(self, env):
+ return self
+
+ def may_be_none(self):
+ return False
+
+ def calculate_result_code(self):
+ return Naming.module_cname
+
+ def generate_result_code(self, code):
+ pass
+
+class DocstringRefNode(ExprNode):
+ # Extracts the docstring of the body element
+
+ subexprs = ['body']
+ type = py_object_type
+ is_temp = True
+
+ def __init__(self, pos, body):
+ ExprNode.__init__(self, pos)
+ assert body.type.is_pyobject
+ self.body = body
+
+ def analyse_types(self, env):
+ return self
+
+ def generate_result_code(self, code):
+ code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
+ self.result(), self.body.result(),
+ code.intern_identifier(StringEncoding.EncodedString("__doc__")),
+ code.error_goto_if_null(self.result(), self.pos)))
+ self.generate_gotref(code)
+
+class AnnotationNode(ExprNode):
+ # Deals with the two possible uses of an annotation.
+ # 1. The post PEP-563 use where an annotation is stored
+ # as a string
+ # 2. The Cython use where the annotation can indicate an
+ # object type
+ #
+ # Doesn't handle the pre PEP-563 version where the
+ # annotation is evaluated into a Python Object.
+
+ subexprs = []
+
+ # 'untyped' is set for fused specializations:
+ # Once a fused function has been created we don't want
+ # annotations to override an already set type.
+ untyped = False
+
+ def __init__(self, pos, expr, string=None):
+ """string is expected to already be a StringNode or None"""
+ ExprNode.__init__(self, pos)
+ if string is None:
+ # import doesn't work at top of file?
+ from .AutoDocTransforms import AnnotationWriter
+ string = StringEncoding.EncodedString(
+ AnnotationWriter(description="annotation").write(expr))
+ string = StringNode(pos, unicode_value=string, value=string.as_utf8_string())
+ self.string = string
+ self.expr = expr
+
+ def analyse_types(self, env):
+ return self # nothing needs doing
+
+ def analyse_as_type(self, env):
+ # for compatibility when used as a return_type_node, have this interface too
+ return self.analyse_type_annotation(env)[1]
+
+ def _warn_on_unknown_annotation(self, env, annotation):
+ """Method checks for cases when user should be warned that annotation contains unknown types."""
+ if isinstance(annotation, SliceIndexNode):
+ annotation = annotation.base
+ if annotation.is_name:
+ # Validate annotation in form `var: type`
+ if not env.lookup(annotation.name):
+ warning(annotation.pos,
+ "Unknown type declaration '%s' in annotation, ignoring" % self.string.value, level=1)
+ elif annotation.is_attribute and annotation.obj.is_name:
+ # Validate annotation in form `var: module.type`
+ if not env.lookup(annotation.obj.name):
+ # `module` is undeclared
+ warning(annotation.pos,
+ "Unknown type declaration '%s' in annotation, ignoring" % self.string.value, level=1)
+ elif annotation.obj.is_cython_module:
+ # `module` is cython
+ module_scope = annotation.obj.analyse_as_module(env)
+ if module_scope and not module_scope.lookup_type(annotation.attribute):
+ error(annotation.pos,
+ "Unknown type declaration '%s' in annotation" % self.string.value)
+ else:
+ module_scope = annotation.obj.analyse_as_module(env)
+ if module_scope and module_scope.pxd_file_loaded:
+ warning(annotation.pos,
+ "Unknown type declaration '%s' in annotation, ignoring" % self.string.value, level=1)
+ else:
+ warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
+
+ def analyse_type_annotation(self, env, assigned_value=None):
+ if self.untyped:
+ # Already applied as a fused type, not re-evaluating it here.
+ return [], None
+ annotation = self.expr
+ explicit_pytype = explicit_ctype = False
+ if annotation.is_dict_literal:
+ warning(annotation.pos,
+ "Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.", level=1)
+ for name, value in annotation.key_value_pairs:
+ if not name.is_string_literal:
+ continue
+ if name.value in ('type', b'type'):
+ explicit_pytype = True
+ if not explicit_ctype:
+ annotation = value
+ elif name.value in ('ctype', b'ctype'):
+ explicit_ctype = True
+ annotation = value
+ if explicit_pytype and explicit_ctype:
+ warning(annotation.pos, "Duplicate type declarations found in signature annotation", level=1)
+ elif isinstance(annotation, TupleNode):
+ warning(annotation.pos,
+ "Tuples cannot be declared as simple tuples of types. Use 'tuple[type1, type2, ...]'.", level=1)
+ return [], None
+
+ with env.new_c_type_context(in_c_type_context=explicit_ctype):
+ arg_type = annotation.analyse_as_type(env)
+
+ if arg_type is None:
+ self._warn_on_unknown_annotation(env, annotation)
+ return [], arg_type
+
+ if annotation.is_string_literal:
+ warning(annotation.pos,
+ "Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.",
+ level=1)
+ if explicit_pytype and not explicit_ctype and not (arg_type.is_pyobject or arg_type.equivalent_type):
+ warning(annotation.pos,
+ "Python type declaration in signature annotation does not refer to a Python type")
+ if arg_type.is_complex:
+ # creating utility code needs to be special-cased for complex types
+ arg_type.create_declaration_utility_code(env)
+
+ # Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]"
+ modifiers = annotation.analyse_pytyping_modifiers(env) if annotation.is_subscript else []
+
+ return modifiers, arg_type
+
+
+class AssignmentExpressionNode(ExprNode):
+ """
+ Also known as a named expression or the walrus operator
+
+ Arguments
+ lhs - NameNode - not stored directly as an attribute of the node
+ rhs - ExprNode
+
+ Attributes
+ rhs - ExprNode
+ assignment - SingleAssignmentNode
+ """
+ # subexprs and child_attrs are intentionally different here, because the assignment is not an expression
+ subexprs = ["rhs"]
+ child_attrs = ["rhs", "assignment"] # This order is important for control-flow (i.e. xdecref) to be right
+
+ is_temp = False
+ assignment = None
+ clone_node = None
+
+ def __init__(self, pos, lhs, rhs, **kwds):
+ super(AssignmentExpressionNode, self).__init__(pos, **kwds)
+ self.rhs = ProxyNode(rhs)
+ assign_expr_rhs = CloneNode(self.rhs)
+ self.assignment = SingleAssignmentNode(
+ pos, lhs=lhs, rhs=assign_expr_rhs, is_assignment_expression=True)
+
+ @property
+ def type(self):
+ return self.rhs.type
+
+ @property
+ def target_name(self):
+ return self.assignment.lhs.name
+
+ def infer_type(self, env):
+ return self.rhs.infer_type(env)
+
+ def analyse_declarations(self, env):
+ self.assignment.analyse_declarations(env)
+
+ def analyse_types(self, env):
+ # we're trying to generate code that looks roughly like:
+ # __pyx_t_1 = rhs
+ # lhs = __pyx_t_1
+ # __pyx_t_1
+ # (plus any reference counting that's needed)
+
+ self.rhs = self.rhs.analyse_types(env)
+ if not self.rhs.arg.is_temp:
+ if not self.rhs.arg.is_literal:
+ # for anything but the simplest cases (where it can be used directly)
+ # we convert rhs to a temp, because CloneNode requires arg to be a temp
+ self.rhs.arg = self.rhs.arg.coerce_to_temp(env)
+ else:
+ # For literals we can optimize by just using the literal twice
+ #
+ # We aren't including `self.rhs.is_name` in this optimization
+ # because that goes wrong for assignment expressions run in
+ # parallel. e.g. `(a := b) + (b := a + c)`)
+ # This is a special case of https://github.com/cython/cython/issues/4146
+ # TODO - once that's fixed general revisit this code and possibly
+ # use coerce_to_simple
+ self.assignment.rhs = copy.copy(self.rhs)
+
+ # TODO - there's a missed optimization in the code generation stage
+ # for self.rhs.arg.is_temp: an incref/decref pair can be removed
+ # (but needs a general mechanism to do that)
+ self.assignment = self.assignment.analyse_types(env)
+ return self
+
+ def coerce_to(self, dst_type, env):
+ if dst_type == self.assignment.rhs.type:
+ # in this quite common case (for example, when both lhs, and self are being coerced to Python)
+ # we can optimize the coercion out by sharing it between
+ # this and the assignment
+ old_rhs_arg = self.rhs.arg
+ if isinstance(old_rhs_arg, CoerceToTempNode):
+ old_rhs_arg = old_rhs_arg.arg
+ rhs_arg = old_rhs_arg.coerce_to(dst_type, env)
+ if rhs_arg is not old_rhs_arg:
+ self.rhs.arg = rhs_arg
+ self.rhs.update_type_and_entry()
+ # clean up the old coercion node that the assignment has likely generated
+ if (isinstance(self.assignment.rhs, CoercionNode)
+ and not isinstance(self.assignment.rhs, CloneNode)):
+ self.assignment.rhs = self.assignment.rhs.arg
+ self.assignment.rhs.type = self.assignment.rhs.arg.type
+ return self
+ return super(AssignmentExpressionNode, self).coerce_to(dst_type, env)
+
+ def calculate_result_code(self):
+ return self.rhs.result()
+
+ def generate_result_code(self, code):
+ # we have to do this manually because it isn't a subexpression
+ self.assignment.generate_execution_code(code)
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..ecd02d788a4ef207aea16b6d8304cb63c161597d
Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.cp39-win_amd64.pyd differ
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.pxd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.pxd
new file mode 100644
index 0000000000000000000000000000000000000000..5338d4fe490aacf8bc3b781ffa82451f1548404e
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.pxd
@@ -0,0 +1,111 @@
+# cython: language_level=3
+
+cimport cython
+
+from .Visitor cimport CythonTransform, TreeVisitor
+
+cdef class ControlBlock:
+ cdef public set children
+ cdef public set parents
+ cdef public set positions
+ cdef public list stats
+ cdef public dict gen
+ cdef public set bounded
+
+ # Big integer bitsets
+ cdef public object i_input
+ cdef public object i_output
+ cdef public object i_gen
+ cdef public object i_kill
+ cdef public object i_state
+
+ cpdef bint empty(self)
+ cpdef detach(self)
+ cpdef add_child(self, block)
+
+cdef class ExitBlock(ControlBlock):
+ cpdef bint empty(self)
+
+cdef class NameAssignment:
+ cdef public bint is_arg
+ cdef public bint is_deletion
+ cdef public object lhs
+ cdef public object rhs
+ cdef public object entry
+ cdef public object pos
+ cdef public set refs
+ cdef public object bit
+ cdef public object inferred_type
+ cdef public object rhs_scope
+
+cdef class AssignmentList:
+ cdef public object bit
+ cdef public object mask
+ cdef public list stats
+
+cdef class AssignmentCollector(TreeVisitor):
+ cdef list assignments
+
+@cython.final
+cdef class ControlFlow:
+ cdef public set blocks
+ cdef public set entries
+ cdef public list loops
+ cdef public list exceptions
+
+ cdef public ControlBlock entry_point
+ cdef public ExitBlock exit_point
+ cdef public ControlBlock block
+
+ cdef public dict assmts
+
+ cdef public Py_ssize_t in_try_block
+
+ cpdef newblock(self, ControlBlock parent=*)
+ cpdef nextblock(self, ControlBlock parent=*)
+ cpdef bint is_tracked(self, entry)
+ cpdef bint is_statically_assigned(self, entry)
+ cpdef mark_position(self, node)
+ cpdef mark_assignment(self, lhs, rhs, entry, rhs_scope=*)
+ cpdef mark_argument(self, lhs, rhs, entry)
+ cpdef mark_deletion(self, node, entry)
+ cpdef mark_reference(self, node, entry)
+
+ @cython.locals(block=ControlBlock, parent=ControlBlock, unreachable=set)
+ cpdef normalize(self)
+
+ @cython.locals(bit=object, assmts=AssignmentList, block=ControlBlock)
+ cpdef initialize(self)
+
+ @cython.locals(assmts=AssignmentList, assmt=NameAssignment)
+ cpdef set map_one(self, istate, entry)
+
+ @cython.locals(block=ControlBlock, parent=ControlBlock)
+ cdef reaching_definitions(self)
+
+cdef class Uninitialized:
+ pass
+
+cdef class Unknown:
+ pass
+
+cdef class MessageCollection:
+ cdef set messages
+
+@cython.locals(dirty=bint, block=ControlBlock, parent=ControlBlock,
+ assmt=NameAssignment)
+cdef check_definitions(ControlFlow flow, dict compiler_directives)
+
+@cython.final
+cdef class ControlFlowAnalysis(CythonTransform):
+ cdef object gv_ctx
+ cdef object constant_folder
+ cdef set reductions
+ cdef list stack # a stack of (env, flow) tuples
+ cdef object env
+ cdef ControlFlow flow
+ cdef object object_expr
+ cdef bint in_inplace_assignment
+
+ cpdef mark_assignment(self, lhs, rhs=*, rhs_scope=*)
+ cpdef mark_position(self, node)
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8575435738eada568f58c894ced50e4afccb449
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FlowControl.py
@@ -0,0 +1,1383 @@
+# cython: language_level=3str
+# cython: auto_pickle=True
+
+from __future__ import absolute_import
+
+import cython
+cython.declare(PyrexTypes=object, ExprNodes=object, Nodes=object, Builtin=object,
+ Options=object, TreeVisitor=object, CythonTransform=object,
+ InternalError=object, error=object, warning=object,
+ fake_rhs_expr=object, TypedExprNode=object)
+
+from . import Builtin
+from . import ExprNodes
+from . import Nodes
+from . import Options
+from . import PyrexTypes
+
+from .Visitor import TreeVisitor, CythonTransform
+from .Errors import error, warning, InternalError
+
+
+class TypedExprNode(ExprNodes.ExprNode):
+ # Used for declaring assignments of a specified type without a known entry.
+ def __init__(self, type, may_be_none=None, pos=None):
+ super(TypedExprNode, self).__init__(pos)
+ self.type = type
+ self._may_be_none = may_be_none
+
+ def may_be_none(self):
+ return self._may_be_none != False
+
+# Fake rhs to silence "unused variable" warning
+fake_rhs_expr = TypedExprNode(PyrexTypes.unspecified_type)
+
+
+class ControlBlock(object):
+ """Control flow graph node. Sequence of assignments and name references.
+
+ children set of children nodes
+ parents set of parent nodes
+ positions set of position markers
+
+ stats list of block statements
+ gen dict of assignments generated by this block
+ bounded set of entries that are definitely bounded in this block
+
+ Example:
+
+ a = 1
+ b = a + c # 'c' is already bounded or exception here
+
+ stats = [Assignment(a), NameReference(a), NameReference(c),
+ Assignment(b)]
+ gen = {Entry(a): Assignment(a), Entry(b): Assignment(b)}
+ bounded = {Entry(a), Entry(c)}
+
+ """
+
+ def __init__(self):
+ self.children = set()
+ self.parents = set()
+ self.positions = set()
+
+ self.stats = []
+ self.gen = {}
+ self.bounded = set()
+
+ self.i_input = 0
+ self.i_output = 0
+ self.i_gen = 0
+ self.i_kill = 0
+ self.i_state = 0
+
+ def empty(self):
+ return (not self.stats and not self.positions)
+
+ def detach(self):
+ """Detach block from parents and children."""
+ for child in self.children:
+ child.parents.remove(self)
+ for parent in self.parents:
+ parent.children.remove(self)
+ self.parents.clear()
+ self.children.clear()
+
+ def add_child(self, block):
+ self.children.add(block)
+ block.parents.add(self)
+
+
+class ExitBlock(ControlBlock):
+ """Non-empty exit point block."""
+
+ def empty(self):
+ return False
+
+
+class AssignmentList(object):
+ def __init__(self):
+ self.stats = []
+
+
+class ControlFlow(object):
+ """Control-flow graph.
+
+ entry_point ControlBlock entry point for this graph
+ exit_point ControlBlock normal exit point
+ block ControlBlock current block
+ blocks set children nodes
+ entries set tracked entries
+ loops list stack for loop descriptors
+ exceptions list stack for exception descriptors
+ in_try_block int track if we're in a try...except or try...finally block
+ """
+
+ def __init__(self):
+ self.blocks = set()
+ self.entries = set()
+ self.loops = []
+ self.exceptions = []
+
+ self.entry_point = ControlBlock()
+ self.exit_point = ExitBlock()
+ self.blocks.add(self.exit_point)
+ self.block = self.entry_point
+ self.in_try_block = 0
+
+ def newblock(self, parent=None):
+ """Create floating block linked to `parent` if given.
+
+ NOTE: Block is NOT added to self.blocks
+ """
+ block = ControlBlock()
+ self.blocks.add(block)
+ if parent:
+ parent.add_child(block)
+ return block
+
+ def nextblock(self, parent=None):
+ """Create block children block linked to current or `parent` if given.
+
+ NOTE: Block is added to self.blocks
+ """
+ block = ControlBlock()
+ self.blocks.add(block)
+ if parent:
+ parent.add_child(block)
+ elif self.block:
+ self.block.add_child(block)
+ self.block = block
+ return self.block
+
+ def is_tracked(self, entry):
+ if entry.is_anonymous:
+ return False
+ return (entry.is_local or entry.is_pyclass_attr or entry.is_arg or
+ entry.from_closure or entry.in_closure or
+ entry.error_on_uninitialized)
+
+ def is_statically_assigned(self, entry):
+ if (entry.is_local and entry.is_variable and
+ (entry.type.is_struct_or_union or
+ entry.type.is_complex or
+ entry.type.is_array or
+ (entry.type.is_cpp_class and not entry.is_cpp_optional))):
+ # stack allocated structured variable => never uninitialised
+ return True
+ return False
+
+ def mark_position(self, node):
+ """Mark position, will be used to draw graph nodes."""
+ if self.block:
+ self.block.positions.add(node.pos[:2])
+
+ def mark_assignment(self, lhs, rhs, entry, rhs_scope=None):
+ if self.block and self.is_tracked(entry):
+ assignment = NameAssignment(lhs, rhs, entry, rhs_scope=rhs_scope)
+ self.block.stats.append(assignment)
+ self.block.gen[entry] = assignment
+ self.entries.add(entry)
+
+ def mark_argument(self, lhs, rhs, entry):
+ if self.block and self.is_tracked(entry):
+ assignment = Argument(lhs, rhs, entry)
+ self.block.stats.append(assignment)
+ self.block.gen[entry] = assignment
+ self.entries.add(entry)
+
+ def mark_deletion(self, node, entry):
+ if self.block and self.is_tracked(entry):
+ assignment = NameDeletion(node, entry)
+ self.block.stats.append(assignment)
+ self.block.gen[entry] = Uninitialized
+ self.entries.add(entry)
+
+ def mark_reference(self, node, entry):
+ if self.block and self.is_tracked(entry):
+ self.block.stats.append(NameReference(node, entry))
+ ## XXX: We don't track expression evaluation order so we can't use
+ ## XXX: successful reference as initialization sign.
+ ## # Local variable is definitely bound after this reference
+ ## if not node.allow_null:
+ ## self.block.bounded.add(entry)
+ self.entries.add(entry)
+
+ def normalize(self):
+ """Delete unreachable and orphan blocks."""
+ queue = {self.entry_point}
+ visited = set()
+ while queue:
+ root = queue.pop()
+ visited.add(root)
+ for child in root.children:
+ if child not in visited:
+ queue.add(child)
+ unreachable = self.blocks - visited
+ for block in unreachable:
+ block.detach()
+ visited.remove(self.entry_point)
+ for block in visited:
+ if block.empty():
+ for parent in block.parents: # Re-parent
+ for child in block.children:
+ parent.add_child(child)
+ block.detach()
+ unreachable.add(block)
+ self.blocks -= unreachable
+
+ def initialize(self):
+ """Set initial state, map assignments to bits."""
+ self.assmts = {}
+
+ bit = 1
+ for entry in self.entries:
+ assmts = AssignmentList()
+ assmts.mask = assmts.bit = bit
+ self.assmts[entry] = assmts
+ bit <<= 1
+
+ for block in self.blocks:
+ for stat in block.stats:
+ if isinstance(stat, NameAssignment):
+ stat.bit = bit
+ assmts = self.assmts[stat.entry]
+ assmts.stats.append(stat)
+ assmts.mask |= bit
+ bit <<= 1
+
+ for block in self.blocks:
+ for entry, stat in block.gen.items():
+ assmts = self.assmts[entry]
+ if stat is Uninitialized:
+ block.i_gen |= assmts.bit
+ else:
+ block.i_gen |= stat.bit
+ block.i_kill |= assmts.mask
+ block.i_output = block.i_gen
+ for entry in block.bounded:
+ block.i_kill |= self.assmts[entry].bit
+
+ for assmts in self.assmts.values():
+ self.entry_point.i_gen |= assmts.bit
+ self.entry_point.i_output = self.entry_point.i_gen
+
+ def map_one(self, istate, entry):
+ ret = set()
+ assmts = self.assmts[entry]
+ if istate & assmts.bit:
+ if self.is_statically_assigned(entry):
+ ret.add(StaticAssignment(entry))
+ elif entry.from_closure:
+ ret.add(Unknown)
+ else:
+ ret.add(Uninitialized)
+ for assmt in assmts.stats:
+ if istate & assmt.bit:
+ ret.add(assmt)
+ return ret
+
+ def reaching_definitions(self):
+ """Per-block reaching definitions analysis."""
+ dirty = True
+ while dirty:
+ dirty = False
+ for block in self.blocks:
+ i_input = 0
+ for parent in block.parents:
+ i_input |= parent.i_output
+ i_output = (i_input & ~block.i_kill) | block.i_gen
+ if i_output != block.i_output:
+ dirty = True
+ block.i_input = i_input
+ block.i_output = i_output
+
+
+class LoopDescr(object):
+ def __init__(self, next_block, loop_block):
+ self.next_block = next_block
+ self.loop_block = loop_block
+ self.exceptions = []
+
+
+class ExceptionDescr(object):
+ """Exception handling helper.
+
+ entry_point ControlBlock Exception handling entry point
+ finally_enter ControlBlock Normal finally clause entry point
+ finally_exit ControlBlock Normal finally clause exit point
+ """
+
+ def __init__(self, entry_point, finally_enter=None, finally_exit=None):
+ self.entry_point = entry_point
+ self.finally_enter = finally_enter
+ self.finally_exit = finally_exit
+
+
+class NameAssignment(object):
+ def __init__(self, lhs, rhs, entry, rhs_scope=None):
+ if lhs.cf_state is None:
+ lhs.cf_state = set()
+ self.lhs = lhs
+ self.rhs = rhs
+ self.entry = entry
+ self.pos = lhs.pos
+ self.refs = set()
+ self.is_arg = False
+ self.is_deletion = False
+ self.inferred_type = None
+ # For generator expression targets, the rhs can have a different scope than the lhs.
+ self.rhs_scope = rhs_scope
+
+ def __repr__(self):
+ return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
+
+ def infer_type(self):
+ self.inferred_type = self.rhs.infer_type(self.rhs_scope or self.entry.scope)
+ return self.inferred_type
+
+ def type_dependencies(self):
+ return self.rhs.type_dependencies(self.rhs_scope or self.entry.scope)
+
+ @property
+ def type(self):
+ if not self.entry.type.is_unspecified:
+ return self.entry.type
+ return self.inferred_type
+
+
+class StaticAssignment(NameAssignment):
+ """Initialised at declaration time, e.g. stack allocation."""
+ def __init__(self, entry):
+ if not entry.type.is_pyobject:
+ may_be_none = False
+ else:
+ may_be_none = None # unknown
+ lhs = TypedExprNode(
+ entry.type, may_be_none=may_be_none, pos=entry.pos)
+ super(StaticAssignment, self).__init__(lhs, lhs, entry)
+
+ def infer_type(self):
+ return self.entry.type
+
+ def type_dependencies(self):
+ return ()
+
+
+class Argument(NameAssignment):
+ def __init__(self, lhs, rhs, entry):
+ NameAssignment.__init__(self, lhs, rhs, entry)
+ self.is_arg = True
+
+
+class NameDeletion(NameAssignment):
+ def __init__(self, lhs, entry):
+ NameAssignment.__init__(self, lhs, lhs, entry)
+ self.is_deletion = True
+
+ def infer_type(self):
+ inferred_type = self.rhs.infer_type(self.entry.scope)
+ if (not inferred_type.is_pyobject
+ and inferred_type.can_coerce_to_pyobject(self.entry.scope)):
+ return PyrexTypes.py_object_type
+ self.inferred_type = inferred_type
+ return inferred_type
+
+
+class Uninitialized(object):
+ """Definitely not initialised yet."""
+
+
+class Unknown(object):
+ """Coming from outer closure, might be initialised or not."""
+
+
+class NameReference(object):
+ def __init__(self, node, entry):
+ if node.cf_state is None:
+ node.cf_state = set()
+ self.node = node
+ self.entry = entry
+ self.pos = node.pos
+
+ def __repr__(self):
+ return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
+
+
+class ControlFlowState(list):
+ # Keeps track of Node's entry assignments
+ #
+ # cf_is_null [boolean] It is uninitialized
+ # cf_maybe_null [boolean] May be uninitialized
+ # is_single [boolean] Has only one assignment at this point
+
+ cf_maybe_null = False
+ cf_is_null = False
+ is_single = False
+
+ def __init__(self, state):
+ if Uninitialized in state:
+ state.discard(Uninitialized)
+ self.cf_maybe_null = True
+ if not state:
+ self.cf_is_null = True
+ elif Unknown in state:
+ state.discard(Unknown)
+ self.cf_maybe_null = True
+ else:
+ if len(state) == 1:
+ self.is_single = True
+ # XXX: Remove fake_rhs_expr
+ super(ControlFlowState, self).__init__(
+ [i for i in state if i.rhs is not fake_rhs_expr])
+
+ def one(self):
+ return self[0]
+
+
+class GVContext(object):
+ """Graphviz subgraph object."""
+
+ def __init__(self):
+ self.blockids = {}
+ self.nextid = 0
+ self.children = []
+ self.sources = {}
+
+ def add(self, child):
+ self.children.append(child)
+
+ def nodeid(self, block):
+ if block not in self.blockids:
+ self.blockids[block] = 'block%d' % self.nextid
+ self.nextid += 1
+ return self.blockids[block]
+
+ def extract_sources(self, block):
+ if not block.positions:
+ return ''
+ start = min(block.positions)
+ stop = max(block.positions)
+ srcdescr = start[0]
+ if srcdescr not in self.sources:
+ self.sources[srcdescr] = list(srcdescr.get_lines())
+ lines = self.sources[srcdescr]
+ return '\\n'.join([l.strip() for l in lines[start[1] - 1:stop[1]]])
+
+ def render(self, fp, name, annotate_defs=False):
+ """Render graphviz dot graph"""
+ fp.write('digraph %s {\n' % name)
+ fp.write(' node [shape=box];\n')
+ for child in self.children:
+ child.render(fp, self, annotate_defs)
+ fp.write('}\n')
+
+ def escape(self, text):
+ return text.replace('"', '\\"').replace('\n', '\\n')
+
+
+class GV(object):
+ """Graphviz DOT renderer."""
+
+ def __init__(self, name, flow):
+ self.name = name
+ self.flow = flow
+
+ def render(self, fp, ctx, annotate_defs=False):
+ fp.write(' subgraph %s {\n' % self.name)
+ for block in self.flow.blocks:
+ label = ctx.extract_sources(block)
+ if annotate_defs:
+ for stat in block.stats:
+ if isinstance(stat, NameAssignment):
+ label += '\n %s [%s %s]' % (
+ stat.entry.name, 'deletion' if stat.is_deletion else 'definition', stat.pos[1])
+ elif isinstance(stat, NameReference):
+ if stat.entry:
+ label += '\n %s [reference %s]' % (stat.entry.name, stat.pos[1])
+ if not label:
+ label = 'empty'
+ pid = ctx.nodeid(block)
+ fp.write(' %s [label="%s"];\n' % (pid, ctx.escape(label)))
+ for block in self.flow.blocks:
+ pid = ctx.nodeid(block)
+ for child in block.children:
+ fp.write(' %s -> %s;\n' % (pid, ctx.nodeid(child)))
+ fp.write(' }\n')
+
+
+class MessageCollection(object):
+ """Collect error/warnings messages first then sort"""
+ def __init__(self):
+ self.messages = set()
+
+ def error(self, pos, message):
+ self.messages.add((pos, True, message))
+
+ def warning(self, pos, message):
+ self.messages.add((pos, False, message))
+
+ def report(self):
+ for pos, is_error, message in sorted(self.messages):
+ if is_error:
+ error(pos, message)
+ else:
+ warning(pos, message, 2)
+
+
+def check_definitions(flow, compiler_directives):
+ flow.initialize()
+ flow.reaching_definitions()
+
+ # Track down state
+ assignments = set()
+ # Node to entry map
+ references = {}
+ assmt_nodes = set()
+
+ for block in flow.blocks:
+ i_state = block.i_input
+ for stat in block.stats:
+ i_assmts = flow.assmts[stat.entry]
+ state = flow.map_one(i_state, stat.entry)
+ if isinstance(stat, NameAssignment):
+ stat.lhs.cf_state.update(state)
+ assmt_nodes.add(stat.lhs)
+ i_state = i_state & ~i_assmts.mask
+ if stat.is_deletion:
+ i_state |= i_assmts.bit
+ else:
+ i_state |= stat.bit
+ assignments.add(stat)
+ if stat.rhs is not fake_rhs_expr:
+ stat.entry.cf_assignments.append(stat)
+ elif isinstance(stat, NameReference):
+ references[stat.node] = stat.entry
+ stat.entry.cf_references.append(stat)
+ stat.node.cf_state.update(state)
+ ## if not stat.node.allow_null:
+ ## i_state &= ~i_assmts.bit
+ ## # after successful read, the state is known to be initialised
+ state.discard(Uninitialized)
+ state.discard(Unknown)
+ for assmt in state:
+ assmt.refs.add(stat)
+
+ # Check variable usage
+ warn_maybe_uninitialized = compiler_directives['warn.maybe_uninitialized']
+ warn_unused_result = compiler_directives['warn.unused_result']
+ warn_unused = compiler_directives['warn.unused']
+ warn_unused_arg = compiler_directives['warn.unused_arg']
+
+ messages = MessageCollection()
+
+ # assignment hints
+ for node in assmt_nodes:
+ if Uninitialized in node.cf_state:
+ node.cf_maybe_null = True
+ if len(node.cf_state) == 1:
+ node.cf_is_null = True
+ else:
+ node.cf_is_null = False
+ elif Unknown in node.cf_state:
+ node.cf_maybe_null = True
+ else:
+ node.cf_is_null = False
+ node.cf_maybe_null = False
+
+ # Find uninitialized references and cf-hints
+ for node, entry in references.items():
+ if Uninitialized in node.cf_state:
+ node.cf_maybe_null = True
+ if (not entry.from_closure and len(node.cf_state) == 1
+ and entry.name not in entry.scope.scope_predefined_names):
+ node.cf_is_null = True
+ if (node.allow_null or entry.from_closure
+ or entry.is_pyclass_attr or entry.type.is_error):
+ pass # Can be uninitialized here
+ elif node.cf_is_null and not entry.in_closure:
+ if entry.error_on_uninitialized or (
+ Options.error_on_uninitialized and (
+ entry.type.is_pyobject or entry.type.is_unspecified)):
+ messages.error(
+ node.pos,
+ "local variable '%s' referenced before assignment"
+ % entry.name)
+ else:
+ messages.warning(
+ node.pos,
+ "local variable '%s' referenced before assignment"
+ % entry.name)
+ elif warn_maybe_uninitialized:
+ msg = "local variable '%s' might be referenced before assignment" % entry.name
+ if entry.in_closure:
+ msg += " (maybe initialized inside a closure)"
+ messages.warning(
+ node.pos,
+ msg)
+ elif Unknown in node.cf_state:
+ # TODO: better cross-closure analysis to know when inner functions
+ # are being called before a variable is being set, and when
+ # a variable is known to be set before even defining the
+ # inner function, etc.
+ node.cf_maybe_null = True
+ else:
+ node.cf_is_null = False
+ node.cf_maybe_null = False
+
+ # Unused result
+ for assmt in assignments:
+ if (not assmt.refs and not assmt.entry.is_pyclass_attr
+ and not assmt.entry.in_closure):
+ if assmt.entry.cf_references and warn_unused_result:
+ if assmt.is_arg:
+ messages.warning(assmt.pos, "Unused argument value '%s'" %
+ assmt.entry.name)
+ else:
+ messages.warning(assmt.pos, "Unused result in '%s'" %
+ assmt.entry.name)
+ assmt.lhs.cf_used = False
+
+ # Unused entries
+ for entry in flow.entries:
+ if (not entry.cf_references
+ and not entry.is_pyclass_attr):
+ if entry.name != '_' and not entry.name.startswith('unused'):
+ # '_' is often used for unused variables, e.g. in loops
+ if entry.is_arg:
+ if warn_unused_arg:
+ messages.warning(entry.pos, "Unused argument '%s'" %
+ entry.name)
+ else:
+ if warn_unused:
+ messages.warning(entry.pos, "Unused entry '%s'" %
+ entry.name)
+ entry.cf_used = False
+
+ messages.report()
+
+ for node in assmt_nodes:
+ node.cf_state = ControlFlowState(node.cf_state)
+ for node in references:
+ node.cf_state = ControlFlowState(node.cf_state)
+
+
+class AssignmentCollector(TreeVisitor):
+ def __init__(self):
+ super(AssignmentCollector, self).__init__()
+ self.assignments = []
+
+ def visit_Node(self):
+ self._visitchildren(self, None, None)
+
+ def visit_SingleAssignmentNode(self, node):
+ self.assignments.append((node.lhs, node.rhs))
+
+ def visit_CascadedAssignmentNode(self, node):
+ for lhs in node.lhs_list:
+ self.assignments.append((lhs, node.rhs))
+
+
+class ControlFlowAnalysis(CythonTransform):
+
+ def find_in_stack(self, env):
+ if env == self.env:
+ return self.flow
+ for e, flow in reversed(self.stack):
+ if e is env:
+ return flow
+ assert False
+
+ def visit_ModuleNode(self, node):
+ dot_output = self.current_directives['control_flow.dot_output']
+ self.gv_ctx = GVContext() if dot_output else None
+
+ from .Optimize import ConstantFolding
+ self.constant_folder = ConstantFolding()
+
+ # Set of NameNode reductions
+ self.reductions = set()
+
+ self.in_inplace_assignment = False
+ self.env = node.scope
+ self.flow = ControlFlow()
+ self.stack = [] # a stack of (env, flow) tuples
+ self.object_expr = TypedExprNode(PyrexTypes.py_object_type, may_be_none=True)
+ self.visitchildren(node)
+
+ check_definitions(self.flow, self.current_directives)
+
+ if dot_output:
+ annotate_defs = self.current_directives['control_flow.dot_annotate_defs']
+ with open(dot_output, 'wt') as fp:
+ self.gv_ctx.render(fp, 'module', annotate_defs=annotate_defs)
+ return node
+
+ def visit_FuncDefNode(self, node):
+ for arg in node.args:
+ if arg.default:
+ self.visitchildren(arg)
+ self.visitchildren(node, ('decorators',))
+ self.stack.append((self.env, self.flow))
+ self.env = node.local_scope
+ self.flow = ControlFlow()
+
+ # Collect all entries
+ for entry in node.local_scope.entries.values():
+ if self.flow.is_tracked(entry):
+ self.flow.entries.add(entry)
+
+ self.mark_position(node)
+ # Function body block
+ self.flow.nextblock()
+
+ for arg in node.args:
+ self._visit(arg)
+ if node.star_arg:
+ self.flow.mark_argument(node.star_arg,
+ TypedExprNode(Builtin.tuple_type,
+ may_be_none=False),
+ node.star_arg.entry)
+ if node.starstar_arg:
+ self.flow.mark_argument(node.starstar_arg,
+ TypedExprNode(Builtin.dict_type,
+ may_be_none=False),
+ node.starstar_arg.entry)
+ self._visit(node.body)
+ # Workaround for generators
+ if node.is_generator:
+ self._visit(node.gbody.body)
+
+ # Exit point
+ if self.flow.block:
+ self.flow.block.add_child(self.flow.exit_point)
+
+ # Cleanup graph
+ self.flow.normalize()
+ check_definitions(self.flow, self.current_directives)
+ self.flow.blocks.add(self.flow.entry_point)
+
+ if self.gv_ctx is not None:
+ self.gv_ctx.add(GV(node.local_scope.name, self.flow))
+
+ self.env, self.flow = self.stack.pop()
+ return node
+
+ def visit_DefNode(self, node):
+ node.used = True
+ return self.visit_FuncDefNode(node)
+
+ def visit_GeneratorBodyDefNode(self, node):
+ return node
+
+ def visit_CTypeDefNode(self, node):
+ return node
+
+ def mark_assignment(self, lhs, rhs=None, rhs_scope=None):
+ if not self.flow.block:
+ return
+ if self.flow.exceptions:
+ exc_descr = self.flow.exceptions[-1]
+ self.flow.block.add_child(exc_descr.entry_point)
+ self.flow.nextblock()
+
+ if not rhs:
+ rhs = self.object_expr
+ if lhs.is_name:
+ if lhs.entry is not None:
+ entry = lhs.entry
+ else:
+ entry = self.env.lookup(lhs.name)
+ if entry is None: # TODO: This shouldn't happen...
+ return
+ self.flow.mark_assignment(lhs, rhs, entry, rhs_scope=rhs_scope)
+ elif lhs.is_sequence_constructor:
+ for i, arg in enumerate(lhs.args):
+ if arg.is_starred:
+ # "a, *b = x" assigns a list to "b"
+ item_node = TypedExprNode(Builtin.list_type, may_be_none=False, pos=arg.pos)
+ elif rhs is self.object_expr:
+ item_node = rhs
+ else:
+ item_node = rhs.inferable_item_node(i)
+ self.mark_assignment(arg, item_node)
+ else:
+ self._visit(lhs)
+
+ if self.flow.exceptions:
+ exc_descr = self.flow.exceptions[-1]
+ self.flow.block.add_child(exc_descr.entry_point)
+ self.flow.nextblock()
+
+ def mark_position(self, node):
+ """Mark position if DOT output is enabled."""
+ if self.current_directives['control_flow.dot_output']:
+ self.flow.mark_position(node)
+
+ def visit_FromImportStatNode(self, node):
+ for name, target in node.items:
+ if name != "*":
+ self.mark_assignment(target)
+ self.visitchildren(node)
+ return node
+
+ def visit_AssignmentNode(self, node):
+ raise InternalError("Unhandled assignment node %s" % type(node))
+
+ def visit_SingleAssignmentNode(self, node):
+ self._visit(node.rhs)
+ self.mark_assignment(node.lhs, node.rhs)
+ return node
+
+ def visit_CascadedAssignmentNode(self, node):
+ self._visit(node.rhs)
+ for lhs in node.lhs_list:
+ self.mark_assignment(lhs, node.rhs)
+ return node
+
+ def visit_ParallelAssignmentNode(self, node):
+ collector = AssignmentCollector()
+ collector.visitchildren(node)
+ for lhs, rhs in collector.assignments:
+ self._visit(rhs)
+ for lhs, rhs in collector.assignments:
+ self.mark_assignment(lhs, rhs)
+ return node
+
+ def visit_InPlaceAssignmentNode(self, node):
+ self.in_inplace_assignment = True
+ self.visitchildren(node)
+ self.in_inplace_assignment = False
+ self.mark_assignment(node.lhs, self.constant_folder(node.create_binop_node()))
+ return node
+
+ def visit_DelStatNode(self, node):
+ for arg in node.args:
+ if arg.is_name:
+ entry = arg.entry or self.env.lookup(arg.name)
+ if entry.in_closure or entry.from_closure:
+ error(arg.pos,
+ "can not delete variable '%s' "
+ "referenced in nested scope" % entry.name)
+ if not node.ignore_nonexisting:
+ self._visit(arg) # mark reference
+ self.flow.mark_deletion(arg, entry)
+ else:
+ self._visit(arg)
+ return node
+
+ def visit_CArgDeclNode(self, node):
+ entry = self.env.lookup(node.name)
+ if entry:
+ may_be_none = not node.not_none
+ self.flow.mark_argument(
+ node, TypedExprNode(entry.type, may_be_none), entry)
+ return node
+
+ def visit_NameNode(self, node):
+ if self.flow.block:
+ entry = node.entry or self.env.lookup(node.name)
+ if entry:
+ self.flow.mark_reference(node, entry)
+
+ if entry in self.reductions and not self.in_inplace_assignment:
+ error(node.pos,
+ "Cannot read reduction variable in loop body")
+
+ return node
+
+ def visit_StatListNode(self, node):
+ if self.flow.block:
+ for stat in node.stats:
+ self._visit(stat)
+ if not self.flow.block:
+ stat.is_terminator = True
+ break
+ return node
+
+ def visit_Node(self, node):
+ self.visitchildren(node)
+ self.mark_position(node)
+ return node
+
+ def visit_SizeofVarNode(self, node):
+ return node
+
+ def visit_TypeidNode(self, node):
+ return node
+
+ def visit_IfStatNode(self, node):
+ next_block = self.flow.newblock()
+ parent = self.flow.block
+ # If clauses
+ for clause in node.if_clauses:
+ parent = self.flow.nextblock(parent)
+ self._visit(clause.condition)
+ self.flow.nextblock()
+ self._visit(clause.body)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+ # Else clause
+ if node.else_clause:
+ self.flow.nextblock(parent=parent)
+ self._visit(node.else_clause)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+ else:
+ parent.add_child(next_block)
+
+ if next_block.parents:
+ self.flow.block = next_block
+ else:
+ self.flow.block = None
+ return node
+
+ def visit_AssertStatNode(self, node):
+ """Essentially an if-condition that wraps a RaiseStatNode.
+ """
+ self.mark_position(node)
+ next_block = self.flow.newblock()
+ parent = self.flow.block
+ # failure case
+ parent = self.flow.nextblock(parent)
+ self._visit(node.condition)
+ self.flow.nextblock()
+ self._visit(node.exception)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+ parent.add_child(next_block)
+ if next_block.parents:
+ self.flow.block = next_block
+ else:
+ self.flow.block = None
+ return node
+
+ def visit_WhileStatNode(self, node):
+ condition_block = self.flow.nextblock()
+ next_block = self.flow.newblock()
+ # Condition block
+ self.flow.loops.append(LoopDescr(next_block, condition_block))
+ if node.condition:
+ self._visit(node.condition)
+ # Body block
+ self.flow.nextblock()
+ self._visit(node.body)
+ self.flow.loops.pop()
+ # Loop it
+ if self.flow.block:
+ self.flow.block.add_child(condition_block)
+ self.flow.block.add_child(next_block)
+ # Else clause
+ if node.else_clause:
+ self.flow.nextblock(parent=condition_block)
+ self._visit(node.else_clause)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+ else:
+ condition_block.add_child(next_block)
+
+ if next_block.parents:
+ self.flow.block = next_block
+ else:
+ self.flow.block = None
+ return node
+
+ def mark_forloop_target(self, node):
+ # TODO: Remove redundancy with range optimization...
+ is_special = False
+ sequence = node.iterator.sequence
+ target = node.target
+ env = node.iterator.expr_scope or self.env
+ if isinstance(sequence, ExprNodes.SimpleCallNode):
+ function = sequence.function
+ if sequence.self is None and function.is_name:
+ entry = env.lookup(function.name)
+ if not entry or entry.is_builtin:
+ if function.name == 'reversed' and len(sequence.args) == 1:
+ sequence = sequence.args[0]
+ elif function.name == 'enumerate' and len(sequence.args) == 1:
+ if target.is_sequence_constructor and len(target.args) == 2:
+ iterator = sequence.args[0]
+ if iterator.is_name:
+ iterator_type = iterator.infer_type(env)
+ if iterator_type.is_builtin_type:
+ # assume that builtin types have a length within Py_ssize_t
+ self.mark_assignment(
+ target.args[0],
+ ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
+ type=PyrexTypes.c_py_ssize_t_type),
+ rhs_scope=node.iterator.expr_scope)
+ target = target.args[1]
+ sequence = sequence.args[0]
+ if isinstance(sequence, ExprNodes.SimpleCallNode):
+ function = sequence.function
+ if sequence.self is None and function.is_name:
+ entry = env.lookup(function.name)
+ if not entry or entry.is_builtin:
+ if function.name in ('range', 'xrange'):
+ is_special = True
+ for arg in sequence.args[:2]:
+ self.mark_assignment(target, arg, rhs_scope=node.iterator.expr_scope)
+ if len(sequence.args) > 2:
+ self.mark_assignment(target, self.constant_folder(
+ ExprNodes.binop_node(node.pos,
+ '+',
+ sequence.args[0],
+ sequence.args[2])),
+ rhs_scope=node.iterator.expr_scope)
+
+ if not is_special:
+ # A for-loop basically translates to subsequent calls to
+ # __getitem__(), so using an IndexNode here allows us to
+ # naturally infer the base type of pointers, C arrays,
+ # Python strings, etc., while correctly falling back to an
+ # object type when the base type cannot be handled.
+
+ self.mark_assignment(target, node.item, rhs_scope=node.iterator.expr_scope)
+
+ def visit_AsyncForStatNode(self, node):
+ return self.visit_ForInStatNode(node)
+
+ def visit_ForInStatNode(self, node):
+ condition_block = self.flow.nextblock()
+ next_block = self.flow.newblock()
+ # Condition with iterator
+ self.flow.loops.append(LoopDescr(next_block, condition_block))
+ self._visit(node.iterator)
+ # Target assignment
+ self.flow.nextblock()
+
+ if isinstance(node, Nodes.ForInStatNode):
+ self.mark_forloop_target(node)
+ elif isinstance(node, Nodes.AsyncForStatNode):
+ # not entirely correct, but good enough for now
+ self.mark_assignment(node.target, node.item)
+ else: # Parallel
+ self.mark_assignment(node.target)
+
+ # Body block
+ if isinstance(node, Nodes.ParallelRangeNode):
+ # In case of an invalid
+ self._delete_privates(node, exclude=node.target.entry)
+
+ self.flow.nextblock()
+ self._visit(node.body)
+ self.flow.loops.pop()
+
+ # Loop it
+ if self.flow.block:
+ self.flow.block.add_child(condition_block)
+ # Else clause
+ if node.else_clause:
+ self.flow.nextblock(parent=condition_block)
+ self._visit(node.else_clause)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+ else:
+ condition_block.add_child(next_block)
+
+ if next_block.parents:
+ self.flow.block = next_block
+ else:
+ self.flow.block = None
+ return node
+
+ def _delete_privates(self, node, exclude=None):
+ for private_node in node.assigned_nodes:
+ if not exclude or private_node.entry is not exclude:
+ self.flow.mark_deletion(private_node, private_node.entry)
+
+ def visit_ParallelRangeNode(self, node):
+ reductions = self.reductions
+
+ # if node.target is None or not a NameNode, an error will have
+ # been previously issued
+ if hasattr(node.target, 'entry'):
+ self.reductions = set(reductions)
+
+ for private_node in node.assigned_nodes:
+ private_node.entry.error_on_uninitialized = True
+ pos, reduction = node.assignments[private_node.entry]
+ if reduction:
+ self.reductions.add(private_node.entry)
+
+ node = self.visit_ForInStatNode(node)
+
+ self.reductions = reductions
+ return node
+
+ def visit_ParallelWithBlockNode(self, node):
+ for private_node in node.assigned_nodes:
+ private_node.entry.error_on_uninitialized = True
+
+ self._delete_privates(node)
+ self.visitchildren(node)
+ self._delete_privates(node)
+
+ return node
+
+ def visit_ForFromStatNode(self, node):
+ condition_block = self.flow.nextblock()
+ next_block = self.flow.newblock()
+ # Condition with iterator
+ self.flow.loops.append(LoopDescr(next_block, condition_block))
+ self._visit(node.bound1)
+ self._visit(node.bound2)
+ if node.step is not None:
+ self._visit(node.step)
+ # Target assignment
+ self.flow.nextblock()
+ self.mark_assignment(node.target, node.bound1)
+ if node.step is not None:
+ self.mark_assignment(node.target, self.constant_folder(
+ ExprNodes.binop_node(node.pos, '+', node.bound1, node.step)))
+ # Body block
+ self.flow.nextblock()
+ self._visit(node.body)
+ self.flow.loops.pop()
+ # Loop it
+ if self.flow.block:
+ self.flow.block.add_child(condition_block)
+ # Else clause
+ if node.else_clause:
+ self.flow.nextblock(parent=condition_block)
+ self._visit(node.else_clause)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+ else:
+ condition_block.add_child(next_block)
+
+ if next_block.parents:
+ self.flow.block = next_block
+ else:
+ self.flow.block = None
+ return node
+
+ def visit_LoopNode(self, node):
+ raise InternalError("Generic loops are not supported")
+
+ def visit_WithTargetAssignmentStatNode(self, node):
+ self.mark_assignment(node.lhs, node.with_node.enter_call)
+ return node
+
+ def visit_WithStatNode(self, node):
+ self._visit(node.manager)
+ self._visit(node.enter_call)
+ self._visit(node.body)
+ return node
+
+ def visit_TryExceptStatNode(self, node):
+ # After exception handling
+ next_block = self.flow.newblock()
+ # Body block
+ self.flow.newblock()
+ # Exception entry point
+ entry_point = self.flow.newblock()
+ self.flow.exceptions.append(ExceptionDescr(entry_point))
+ self.flow.nextblock()
+ ## XXX: links to exception handling point should be added by
+ ## XXX: children nodes
+ self.flow.block.add_child(entry_point)
+ self.flow.nextblock()
+ self.flow.in_try_block += 1
+ self._visit(node.body)
+ self.flow.in_try_block -= 1
+ self.flow.exceptions.pop()
+
+ # After exception
+ if self.flow.block:
+ if node.else_clause:
+ self.flow.nextblock()
+ self._visit(node.else_clause)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+
+ for clause in node.except_clauses:
+ self.flow.block = entry_point
+ if clause.pattern:
+ for pattern in clause.pattern:
+ self._visit(pattern)
+ else:
+ # TODO: handle * pattern
+ pass
+ entry_point = self.flow.newblock(parent=self.flow.block)
+ self.flow.nextblock()
+ if clause.target:
+ self.mark_assignment(clause.target)
+ self._visit(clause.body)
+ if self.flow.block:
+ self.flow.block.add_child(next_block)
+
+ if self.flow.exceptions:
+ entry_point.add_child(self.flow.exceptions[-1].entry_point)
+
+ if next_block.parents:
+ self.flow.block = next_block
+ else:
+ self.flow.block = None
+ return node
+
+ def visit_TryFinallyStatNode(self, node):
+ body_block = self.flow.nextblock()
+
+ # Exception entry point
+ entry_point = self.flow.newblock()
+ self.flow.block = entry_point
+ self._visit(node.finally_except_clause)
+
+ if self.flow.block and self.flow.exceptions:
+ self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
+
+ # Normal execution
+ finally_enter = self.flow.newblock()
+ self.flow.block = finally_enter
+ self._visit(node.finally_clause)
+ finally_exit = self.flow.block
+
+ descr = ExceptionDescr(entry_point, finally_enter, finally_exit)
+ self.flow.exceptions.append(descr)
+ if self.flow.loops:
+ self.flow.loops[-1].exceptions.append(descr)
+ self.flow.block = body_block
+ body_block.add_child(entry_point)
+ self.flow.nextblock()
+ self.flow.in_try_block += 1
+ self._visit(node.body)
+ self.flow.in_try_block -= 1
+ self.flow.exceptions.pop()
+ if self.flow.loops:
+ self.flow.loops[-1].exceptions.pop()
+
+ if self.flow.block:
+ self.flow.block.add_child(finally_enter)
+ if finally_exit:
+ self.flow.block = self.flow.nextblock(parent=finally_exit)
+ else:
+ self.flow.block = None
+ return node
+
+ def visit_RaiseStatNode(self, node):
+ self.mark_position(node)
+ self.visitchildren(node)
+ if self.flow.exceptions:
+ self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
+ self.flow.block = None
+ if self.flow.in_try_block:
+ node.in_try_block = True
+ return node
+
+ def visit_ReraiseStatNode(self, node):
+ self.mark_position(node)
+ if self.flow.exceptions:
+ self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
+ self.flow.block = None
+ return node
+
+ def visit_ReturnStatNode(self, node):
+ self.mark_position(node)
+ self.visitchildren(node)
+
+ outer_exception_handlers = iter(self.flow.exceptions[::-1])
+ for handler in outer_exception_handlers:
+ if handler.finally_enter:
+ self.flow.block.add_child(handler.finally_enter)
+ if handler.finally_exit:
+ # 'return' goes to function exit, or to the next outer 'finally' clause
+ exit_point = self.flow.exit_point
+ for next_handler in outer_exception_handlers:
+ if next_handler.finally_enter:
+ exit_point = next_handler.finally_enter
+ break
+ handler.finally_exit.add_child(exit_point)
+ break
+ else:
+ if self.flow.block:
+ self.flow.block.add_child(self.flow.exit_point)
+ self.flow.block = None
+ return node
+
+ def visit_BreakStatNode(self, node):
+ if not self.flow.loops:
+ #error(node.pos, "break statement not inside loop")
+ return node
+ loop = self.flow.loops[-1]
+ self.mark_position(node)
+ for exception in loop.exceptions[::-1]:
+ if exception.finally_enter:
+ self.flow.block.add_child(exception.finally_enter)
+ if exception.finally_exit:
+ exception.finally_exit.add_child(loop.next_block)
+ break
+ else:
+ self.flow.block.add_child(loop.next_block)
+ self.flow.block = None
+ return node
+
+ def visit_ContinueStatNode(self, node):
+ if not self.flow.loops:
+ #error(node.pos, "continue statement not inside loop")
+ return node
+ loop = self.flow.loops[-1]
+ self.mark_position(node)
+ for exception in loop.exceptions[::-1]:
+ if exception.finally_enter:
+ self.flow.block.add_child(exception.finally_enter)
+ if exception.finally_exit:
+ exception.finally_exit.add_child(loop.loop_block)
+ break
+ else:
+ self.flow.block.add_child(loop.loop_block)
+ self.flow.block = None
+ return node
+
+ def visit_ComprehensionNode(self, node):
+ if node.expr_scope:
+ self.stack.append((self.env, self.flow))
+ self.env = node.expr_scope
+ # Skip append node here
+ self._visit(node.loop)
+ if node.expr_scope:
+ self.env, _ = self.stack.pop()
+ return node
+
+ def visit_ScopedExprNode(self, node):
+ # currently this is written to deal with these two types
+ # (with comprehensions covered in their own function)
+ assert isinstance(node, (ExprNodes.IteratorNode, ExprNodes.AsyncIteratorNode)), node
+ if node.expr_scope:
+ self.stack.append((self.env, self.flow))
+ self.flow = self.find_in_stack(node.expr_scope)
+ self.env = node.expr_scope
+ self.visitchildren(node)
+ if node.expr_scope:
+ self.env, self.flow = self.stack.pop()
+ return node
+
+ def visit_PyClassDefNode(self, node):
+ self.visitchildren(node, ('dict', 'metaclass',
+ 'mkw', 'bases', 'class_result'))
+ self.flow.mark_assignment(node.target, node.classobj,
+ self.env.lookup(node.target.name))
+ self.stack.append((self.env, self.flow))
+ self.env = node.scope
+ self.flow.nextblock()
+ if node.doc_node:
+ self.flow.mark_assignment(node.doc_node, fake_rhs_expr, node.doc_node.entry)
+ self.visitchildren(node, ('body',))
+ self.flow.nextblock()
+ self.env, _ = self.stack.pop()
+ return node
+
+ def visit_CClassDefNode(self, node):
+ # just make sure the nodes scope is findable in-case there is a list comprehension in it
+ self.stack.append((node.scope, self.flow))
+ self.visitchildren(node)
+ self.stack.pop()
+ return node
+
+ def visit_AmpersandNode(self, node):
+ if node.operand.is_name:
+ # Fake assignment to silence warning
+ self.mark_assignment(node.operand, fake_rhs_expr)
+ self.visitchildren(node)
+ return node
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FusedNode.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FusedNode.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..644cd2206e7a2b1a61f7b861b43ac5850ded231a
Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FusedNode.cp39-win_amd64.pyd differ
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FusedNode.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FusedNode.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ce4142f1cb4ebed9a7e5fc7bc52e200efc9006a
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/FusedNode.py
@@ -0,0 +1,1015 @@
+from __future__ import absolute_import
+
+import copy
+
+from . import (ExprNodes, PyrexTypes, MemoryView,
+ ParseTreeTransforms, StringEncoding, Errors,
+ Naming)
+from .ExprNodes import CloneNode, ProxyNode, TupleNode
+from .Nodes import FuncDefNode, CFuncDefNode, StatListNode, DefNode
+from ..Utils import OrderedSet
+from .Errors import error, CannotSpecialize
+
+
+class FusedCFuncDefNode(StatListNode):
+ """
+ This node replaces a function with fused arguments. It deep-copies the
+ function for every permutation of fused types, and allocates a new local
+ scope for it. It keeps track of the original function in self.node, and
+ the entry of the original function in the symbol table is given the
+ 'fused_cfunction' attribute which points back to us.
+ Then when a function lookup occurs (to e.g. call it), the call can be
+ dispatched to the right function.
+
+ node FuncDefNode the original function
+ nodes [FuncDefNode] list of copies of node with different specific types
+ py_func DefNode the fused python function subscriptable from
+ Python space
+ __signatures__ A DictNode mapping signature specialization strings
+ to PyCFunction nodes
+ resulting_fused_function PyCFunction for the fused DefNode that delegates
+ to specializations
+ fused_func_assignment Assignment of the fused function to the function name
+ defaults_tuple TupleNode of defaults (letting PyCFunctionNode build
+ defaults would result in many different tuples)
+ specialized_pycfuncs List of synthesized pycfunction nodes for the
+ specializations
+ code_object CodeObjectNode shared by all specializations and the
+ fused function
+
+ fused_compound_types All fused (compound) types (e.g. floating[:])
+ """
+
+ __signatures__ = None
+ resulting_fused_function = None
+ fused_func_assignment = None
+ defaults_tuple = None
+ decorators = None
+
+ child_attrs = StatListNode.child_attrs + [
+ '__signatures__', 'resulting_fused_function', 'fused_func_assignment']
+
+ def __init__(self, node, env):
+ super(FusedCFuncDefNode, self).__init__(node.pos)
+
+ self.nodes = []
+ self.node = node
+
+ is_def = isinstance(self.node, DefNode)
+ if is_def:
+ # self.node.decorators = []
+ self.copy_def(env)
+ else:
+ self.copy_cdef(env)
+
+ # Perform some sanity checks. If anything fails, it's a bug
+ for n in self.nodes:
+ assert not n.entry.type.is_fused
+ assert not n.local_scope.return_type.is_fused
+ if node.return_type.is_fused:
+ assert not n.return_type.is_fused
+
+ if not is_def and n.cfunc_declarator.optional_arg_count:
+ assert n.type.op_arg_struct
+
+ node.entry.fused_cfunction = self
+ # Copy the nodes as AnalyseDeclarationsTransform will prepend
+ # self.py_func to self.stats, as we only want specialized
+ # CFuncDefNodes in self.nodes
+ self.stats = self.nodes[:]
+
+ def copy_def(self, env):
+ """
+ Create a copy of the original def or lambda function for specialized
+ versions.
+ """
+ fused_compound_types = PyrexTypes.unique(
+ [arg.type for arg in self.node.args if arg.type.is_fused])
+ fused_types = self._get_fused_base_types(fused_compound_types)
+ permutations = PyrexTypes.get_all_specialized_permutations(fused_types)
+
+ self.fused_compound_types = fused_compound_types
+
+ if self.node.entry in env.pyfunc_entries:
+ env.pyfunc_entries.remove(self.node.entry)
+
+ for cname, fused_to_specific in permutations:
+ copied_node = copy.deepcopy(self.node)
+ # keep signature object identity for special casing in DefNode.analyse_declarations()
+ copied_node.entry.signature = self.node.entry.signature
+
+ self._specialize_function_args(copied_node.args, fused_to_specific)
+ copied_node.return_type = self.node.return_type.specialize(
+ fused_to_specific)
+
+ copied_node.analyse_declarations(env)
+ # copied_node.is_staticmethod = self.node.is_staticmethod
+ # copied_node.is_classmethod = self.node.is_classmethod
+ self.create_new_local_scope(copied_node, env, fused_to_specific)
+ self.specialize_copied_def(copied_node, cname, self.node.entry,
+ fused_to_specific, fused_compound_types)
+
+ PyrexTypes.specialize_entry(copied_node.entry, cname)
+ copied_node.entry.used = True
+ env.entries[copied_node.entry.name] = copied_node.entry
+
+ if not self.replace_fused_typechecks(copied_node):
+ break
+
+ self.orig_py_func = self.node
+ self.py_func = self.make_fused_cpdef(self.node, env, is_def=True)
+
+ def copy_cdef(self, env):
+ """
+ Create a copy of the original c(p)def function for all specialized
+ versions.
+ """
+ permutations = self.node.type.get_all_specialized_permutations()
+ # print 'Node %s has %d specializations:' % (self.node.entry.name,
+ # len(permutations))
+ # import pprint; pprint.pprint([d for cname, d in permutations])
+
+ # Prevent copying of the python function
+ self.orig_py_func = orig_py_func = self.node.py_func
+ self.node.py_func = None
+ if orig_py_func:
+ env.pyfunc_entries.remove(orig_py_func.entry)
+
+ fused_types = self.node.type.get_fused_types()
+ self.fused_compound_types = fused_types
+
+ new_cfunc_entries = []
+ for cname, fused_to_specific in permutations:
+ copied_node = copy.deepcopy(self.node)
+
+ # Make the types in our CFuncType specific.
+ try:
+ type = copied_node.type.specialize(fused_to_specific)
+ except CannotSpecialize:
+ # unlike for the argument types, specializing the return type can fail
+ error(copied_node.pos, "Return type is a fused type that cannot "
+ "be determined from the function arguments")
+ self.py_func = None # this is just to let the compiler exit gracefully
+ return
+ entry = copied_node.entry
+ type.specialize_entry(entry, cname)
+
+ # Reuse existing Entries (e.g. from .pxd files).
+ for i, orig_entry in enumerate(env.cfunc_entries):
+ if entry.cname == orig_entry.cname and type.same_as_resolved_type(orig_entry.type):
+ copied_node.entry = env.cfunc_entries[i]
+ if not copied_node.entry.func_cname:
+ copied_node.entry.func_cname = entry.func_cname
+ entry = copied_node.entry
+ type = entry.type
+ break
+ else:
+ new_cfunc_entries.append(entry)
+
+ copied_node.type = type
+ entry.type, type.entry = type, entry
+
+ entry.used = (entry.used or
+ self.node.entry.defined_in_pxd or
+ env.is_c_class_scope or
+ entry.is_cmethod)
+
+ if self.node.cfunc_declarator.optional_arg_count:
+ self.node.cfunc_declarator.declare_optional_arg_struct(
+ type, env, fused_cname=cname)
+
+ copied_node.return_type = type.return_type
+ self.create_new_local_scope(copied_node, env, fused_to_specific)
+
+ # Make the argument types in the CFuncDeclarator specific
+ self._specialize_function_args(copied_node.cfunc_declarator.args,
+ fused_to_specific)
+
+ # If a cpdef, declare all specialized cpdefs (this
+ # also calls analyse_declarations)
+ copied_node.declare_cpdef_wrapper(env)
+ if copied_node.py_func:
+ env.pyfunc_entries.remove(copied_node.py_func.entry)
+
+ self.specialize_copied_def(
+ copied_node.py_func, cname, self.node.entry.as_variable,
+ fused_to_specific, fused_types)
+
+ if not self.replace_fused_typechecks(copied_node):
+ break
+
+ # replace old entry with new entries
+ if self.node.entry in env.cfunc_entries:
+ cindex = env.cfunc_entries.index(self.node.entry)
+ env.cfunc_entries[cindex:cindex+1] = new_cfunc_entries
+ else:
+ env.cfunc_entries.extend(new_cfunc_entries)
+
+ if orig_py_func:
+ self.py_func = self.make_fused_cpdef(orig_py_func, env,
+ is_def=False)
+ else:
+ self.py_func = orig_py_func
+
+ def _get_fused_base_types(self, fused_compound_types):
+ """
+ Get a list of unique basic fused types, from a list of
+ (possibly) compound fused types.
+ """
+ base_types = []
+ seen = set()
+ for fused_type in fused_compound_types:
+ fused_type.get_fused_types(result=base_types, seen=seen)
+ return base_types
+
+ def _specialize_function_args(self, args, fused_to_specific):
+ for arg in args:
+ if arg.type.is_fused:
+ arg.type = arg.type.specialize(fused_to_specific)
+ if arg.type.is_memoryviewslice:
+ arg.type.validate_memslice_dtype(arg.pos)
+ if arg.annotation:
+ # TODO might be nice if annotations were specialized instead?
+ # (Or might be hard to do reliably)
+ arg.annotation.untyped = True
+
+ def create_new_local_scope(self, node, env, f2s):
+ """
+ Create a new local scope for the copied node and append it to
+ self.nodes. A new local scope is needed because the arguments with the
+ fused types are already in the local scope, and we need the specialized
+ entries created after analyse_declarations on each specialized version
+ of the (CFunc)DefNode.
+ f2s is a dict mapping each fused type to its specialized version
+ """
+ node.create_local_scope(env)
+ node.local_scope.fused_to_specific = f2s
+
+ # This is copied from the original function, set it to false to
+ # stop recursion
+ node.has_fused_arguments = False
+ self.nodes.append(node)
+
+ def specialize_copied_def(self, node, cname, py_entry, f2s, fused_compound_types):
+ """Specialize the copy of a DefNode given the copied node,
+ the specialization cname and the original DefNode entry"""
+ fused_types = self._get_fused_base_types(fused_compound_types)
+ type_strings = [
+ PyrexTypes.specialization_signature_string(fused_type, f2s)
+ for fused_type in fused_types
+ ]
+
+ node.specialized_signature_string = '|'.join(type_strings)
+
+ node.entry.pymethdef_cname = PyrexTypes.get_fused_cname(
+ cname, node.entry.pymethdef_cname)
+ node.entry.doc = py_entry.doc
+ node.entry.doc_cname = py_entry.doc_cname
+
+ def replace_fused_typechecks(self, copied_node):
+ """
+ Branch-prune fused type checks like
+
+ if fused_t is int:
+ ...
+
+ Returns whether an error was issued and whether we should stop in
+ in order to prevent a flood of errors.
+ """
+ num_errors = Errors.get_errors_count()
+ transform = ParseTreeTransforms.ReplaceFusedTypeChecks(
+ copied_node.local_scope)
+ transform(copied_node)
+
+ if Errors.get_errors_count() > num_errors:
+ return False
+
+ return True
+
+ def _fused_instance_checks(self, normal_types, pyx_code, env):
+ """
+ Generate Cython code for instance checks, matching an object to
+ specialized types.
+ """
+ for specialized_type in normal_types:
+ # all_numeric = all_numeric and specialized_type.is_numeric
+ py_type_name = specialized_type.py_type_name()
+ if py_type_name == 'int':
+ # Support Python 2 long
+ py_type_name = '(int, long)'
+ pyx_code.context.update(
+ py_type_name=py_type_name,
+ specialized_type_name=specialized_type.specialization_string,
+ )
+ pyx_code.put_chunk(
+ u"""
+ if isinstance(arg, {{py_type_name}}):
+ dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'; break
+ """)
+
+ def _dtype_name(self, dtype):
+ name = str(dtype).replace('_', '__').replace(' ', '_')
+ if dtype.is_typedef:
+ name = Naming.fused_dtype_prefix + name
+ return name
+
+ def _dtype_type(self, dtype):
+ if dtype.is_typedef:
+ return self._dtype_name(dtype)
+ return str(dtype)
+
+ def _sizeof_dtype(self, dtype):
+ if dtype.is_pyobject:
+ return 'sizeof(void *)'
+ else:
+ return "sizeof(%s)" % self._dtype_type(dtype)
+
+ def _buffer_check_numpy_dtype_setup_cases(self, pyx_code):
+ "Setup some common cases to match dtypes against specializations"
+ with pyx_code.indenter("if kind in u'iu':"):
+ pyx_code.putln("pass")
+ pyx_code.named_insertion_point("dtype_int")
+
+ with pyx_code.indenter("elif kind == u'f':"):
+ pyx_code.putln("pass")
+ pyx_code.named_insertion_point("dtype_float")
+
+ with pyx_code.indenter("elif kind == u'c':"):
+ pyx_code.putln("pass")
+ pyx_code.named_insertion_point("dtype_complex")
+
+ with pyx_code.indenter("elif kind == u'O':"):
+ pyx_code.putln("pass")
+ pyx_code.named_insertion_point("dtype_object")
+
+ match = "dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'"
+ no_match = "dest_sig[{{dest_sig_idx}}] = None"
+ def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types, pythran_types):
+ """
+ Match a numpy dtype object to the individual specializations.
+ """
+ self._buffer_check_numpy_dtype_setup_cases(pyx_code)
+
+ for specialized_type in pythran_types+specialized_buffer_types:
+ final_type = specialized_type
+ if specialized_type.is_pythran_expr:
+ specialized_type = specialized_type.org_buffer
+ dtype = specialized_type.dtype
+ pyx_code.context.update(
+ itemsize_match=self._sizeof_dtype(dtype) + " == itemsize",
+ signed_match="not (%s_is_signed ^ dtype_signed)" % self._dtype_name(dtype),
+ dtype=dtype,
+ specialized_type_name=final_type.specialization_string)
+
+ dtypes = [
+ (dtype.is_int, pyx_code.dtype_int),
+ (dtype.is_float, pyx_code.dtype_float),
+ (dtype.is_complex, pyx_code.dtype_complex)
+ ]
+
+ for dtype_category, codewriter in dtypes:
+ if not dtype_category:
+ continue
+ cond = '{{itemsize_match}} and (arg.ndim) == %d' % (
+ specialized_type.ndim,)
+ if dtype.is_int:
+ cond += ' and {{signed_match}}'
+
+ if final_type.is_pythran_expr:
+ cond += ' and arg_is_pythran_compatible'
+
+ with codewriter.indenter("if %s:" % cond):
+ #codewriter.putln("print 'buffer match found based on numpy dtype'")
+ codewriter.putln(self.match)
+ codewriter.putln("break")
+
+ def _buffer_parse_format_string_check(self, pyx_code, decl_code,
+ specialized_type, env):
+ """
+ For each specialized type, try to coerce the object to a memoryview
+ slice of that type. This means obtaining a buffer and parsing the
+ format string.
+ TODO: separate buffer acquisition from format parsing
+ """
+ dtype = specialized_type.dtype
+ if specialized_type.is_buffer:
+ axes = [('direct', 'strided')] * specialized_type.ndim
+ else:
+ axes = specialized_type.axes
+
+ memslice_type = PyrexTypes.MemoryViewSliceType(dtype, axes)
+ memslice_type.create_from_py_utility_code(env)
+ pyx_code.context.update(
+ coerce_from_py_func=memslice_type.from_py_function,
+ dtype=dtype)
+ decl_code.putln(
+ "{{memviewslice_cname}} {{coerce_from_py_func}}(object, int)")
+
+ pyx_code.context.update(
+ specialized_type_name=specialized_type.specialization_string,
+ sizeof_dtype=self._sizeof_dtype(dtype),
+ ndim_dtype=specialized_type.ndim,
+ dtype_is_struct_obj=int(dtype.is_struct or dtype.is_pyobject))
+
+ # use the memoryview object to check itemsize and ndim.
+ # In principle it could check more, but these are the easiest to do quickly
+ pyx_code.put_chunk(
+ u"""
+ # try {{dtype}}
+ if (((itemsize == -1 and arg_as_memoryview.itemsize == {{sizeof_dtype}})
+ or itemsize == {{sizeof_dtype}})
+ and arg_as_memoryview.ndim == {{ndim_dtype}}):
+ {{if dtype_is_struct_obj}}
+ if __PYX_IS_PYPY2:
+ # I wasn't able to diagnose why, but PyPy2 fails to convert a
+ # memoryview to a Cython memoryview in this case
+ memslice = {{coerce_from_py_func}}(arg, 0)
+ else:
+ {{else}}
+ if True:
+ {{endif}}
+ memslice = {{coerce_from_py_func}}(arg_as_memoryview, 0)
+ if memslice.memview:
+ __PYX_XCLEAR_MEMVIEW(&memslice, 1)
+ # print 'found a match for the buffer through format parsing'
+ %s
+ break
+ else:
+ __pyx_PyErr_Clear()
+ """ % self.match)
+
+ def _buffer_checks(self, buffer_types, pythran_types, pyx_code, decl_code, accept_none, env):
+ """
+ Generate Cython code to match objects to buffer specializations.
+ First try to get a numpy dtype object and match it against the individual
+ specializations. If that fails, try naively to coerce the object
+ to each specialization, which obtains the buffer each time and tries
+ to match the format string.
+ """
+ # The first thing to find a match in this loop breaks out of the loop
+ pyx_code.put_chunk(
+ u"""
+ """ + (u"arg_is_pythran_compatible = False" if pythran_types else u"") + u"""
+ if ndarray is not None:
+ if isinstance(arg, ndarray):
+ dtype = arg.dtype
+ """ + (u"arg_is_pythran_compatible = True" if pythran_types else u"") + u"""
+ elif __pyx_memoryview_check(arg):
+ arg_base = arg.base
+ if isinstance(arg_base, ndarray):
+ dtype = arg_base.dtype
+ else:
+ dtype = None
+ else:
+ dtype = None
+
+ itemsize = -1
+ if dtype is not None:
+ itemsize = dtype.itemsize
+ kind = ord(dtype.kind)
+ dtype_signed = kind == u'i'
+ """)
+ pyx_code.indent(2)
+ if pythran_types:
+ pyx_code.put_chunk(
+ u"""
+ # Pythran only supports the endianness of the current compiler
+ byteorder = dtype.byteorder
+ if byteorder == "<" and not __Pyx_Is_Little_Endian():
+ arg_is_pythran_compatible = False
+ elif byteorder == ">" and __Pyx_Is_Little_Endian():
+ arg_is_pythran_compatible = False
+ if arg_is_pythran_compatible:
+ cur_stride = itemsize
+ shape = arg.shape
+ strides = arg.strides
+ for i in range(arg.ndim-1, -1, -1):
+ if (strides[i]) != cur_stride:
+ arg_is_pythran_compatible = False
+ break
+ cur_stride *= shape[i]
+ else:
+ arg_is_pythran_compatible = not (arg.flags.f_contiguous and (arg.ndim) > 1)
+ """)
+ pyx_code.named_insertion_point("numpy_dtype_checks")
+ self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types)
+ pyx_code.dedent(2)
+
+ if accept_none:
+ # If None is acceptable, then Cython <3.0 matched None with the
+ # first type. This behaviour isn't ideal, but keep it for backwards
+ # compatibility. Better behaviour would be to see if subsequent
+ # arguments give a stronger match.
+ pyx_code.context.update(
+ specialized_type_name=buffer_types[0].specialization_string
+ )
+ pyx_code.put_chunk(
+ """
+ if arg is None:
+ %s
+ break
+ """ % self.match)
+
+ # creating a Cython memoryview from a Python memoryview avoids the
+ # need to get the buffer multiple times, and we can
+ # also use it to check itemsizes etc
+ pyx_code.put_chunk(
+ """
+ try:
+ arg_as_memoryview = memoryview(arg)
+ except (ValueError, TypeError):
+ pass
+ """)
+ with pyx_code.indenter("else:"):
+ for specialized_type in buffer_types:
+ self._buffer_parse_format_string_check(
+ pyx_code, decl_code, specialized_type, env)
+
+ def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types):
+ """
+ If we have any buffer specializations, write out some variable
+ declarations and imports.
+ """
+ decl_code.put_chunk(
+ u"""
+ ctypedef struct {{memviewslice_cname}}:
+ void *memview
+
+ void __PYX_XCLEAR_MEMVIEW({{memviewslice_cname}} *, int have_gil)
+ bint __pyx_memoryview_check(object)
+ bint __PYX_IS_PYPY2 "(CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION == 2)"
+ """)
+
+ pyx_code.local_variable_declarations.put_chunk(
+ u"""
+ cdef {{memviewslice_cname}} memslice
+ cdef Py_ssize_t itemsize
+ cdef bint dtype_signed
+ cdef Py_UCS4 kind
+
+ itemsize = -1
+ """)
+
+ if pythran_types:
+ pyx_code.local_variable_declarations.put_chunk(u"""
+ cdef bint arg_is_pythran_compatible
+ cdef Py_ssize_t cur_stride
+ """)
+
+ pyx_code.imports.put_chunk(
+ u"""
+ cdef type ndarray
+ ndarray = __Pyx_ImportNumPyArrayTypeIfAvailable()
+ """)
+
+ pyx_code.imports.put_chunk(
+ u"""
+ cdef memoryview arg_as_memoryview
+ """
+ )
+
+ seen_typedefs = set()
+ seen_int_dtypes = set()
+ for buffer_type in all_buffer_types:
+ dtype = buffer_type.dtype
+ dtype_name = self._dtype_name(dtype)
+ if dtype.is_typedef:
+ if dtype_name not in seen_typedefs:
+ seen_typedefs.add(dtype_name)
+ decl_code.putln(
+ 'ctypedef %s %s "%s"' % (dtype.resolve(), dtype_name,
+ dtype.empty_declaration_code()))
+
+ if buffer_type.dtype.is_int:
+ if str(dtype) not in seen_int_dtypes:
+ seen_int_dtypes.add(str(dtype))
+ pyx_code.context.update(dtype_name=dtype_name,
+ dtype_type=self._dtype_type(dtype))
+ pyx_code.local_variable_declarations.put_chunk(
+ u"""
+ cdef bint {{dtype_name}}_is_signed
+ {{dtype_name}}_is_signed = not (<{{dtype_type}}> -1 > 0)
+ """)
+
+ def _split_fused_types(self, arg):
+ """
+ Specialize fused types and split into normal types and buffer types.
+ """
+ specialized_types = PyrexTypes.get_specialized_types(arg.type)
+
+ # Prefer long over int, etc by sorting (see type classes in PyrexTypes.py)
+ specialized_types.sort()
+
+ seen_py_type_names = set()
+ normal_types, buffer_types, pythran_types = [], [], []
+ has_object_fallback = False
+ for specialized_type in specialized_types:
+ py_type_name = specialized_type.py_type_name()
+ if py_type_name:
+ if py_type_name in seen_py_type_names:
+ continue
+ seen_py_type_names.add(py_type_name)
+ if py_type_name == 'object':
+ has_object_fallback = True
+ else:
+ normal_types.append(specialized_type)
+ elif specialized_type.is_pythran_expr:
+ pythran_types.append(specialized_type)
+ elif specialized_type.is_buffer or specialized_type.is_memoryviewslice:
+ buffer_types.append(specialized_type)
+
+ return normal_types, buffer_types, pythran_types, has_object_fallback
+
+ def _unpack_argument(self, pyx_code):
+ pyx_code.put_chunk(
+ u"""
+ # PROCESSING ARGUMENT {{arg_tuple_idx}}
+ if {{arg_tuple_idx}} < len(args):
+ arg = (args)[{{arg_tuple_idx}}]
+ elif kwargs is not None and '{{arg.name}}' in kwargs:
+ arg = (kwargs)['{{arg.name}}']
+ else:
+ {{if arg.default}}
+ arg = (defaults)[{{default_idx}}]
+ {{else}}
+ {{if arg_tuple_idx < min_positional_args}}
+ raise TypeError("Expected at least %d argument%s, got %d" % (
+ {{min_positional_args}}, {{'"s"' if min_positional_args != 1 else '""'}}, len(args)))
+ {{else}}
+ raise TypeError("Missing keyword-only argument: '%s'" % "{{arg.default}}")
+ {{endif}}
+ {{endif}}
+ """)
+
+ def _fused_signature_index(self, pyx_code):
+ """
+ Generate Cython code for constructing a persistent nested dictionary index of
+ fused type specialization signatures.
+ """
+ pyx_code.put_chunk(
+ u"""
+ if not _fused_sigindex:
+ for sig in signatures:
+ sigindex_node = _fused_sigindex
+ *sig_series, last_type = sig.strip('()').split('|')
+ for sig_type in sig_series:
+ if sig_type not in sigindex_node:
+ sigindex_node[sig_type] = sigindex_node = {}
+ else:
+ sigindex_node = sigindex_node[sig_type]
+ sigindex_node[last_type] = sig
+ """
+ )
+
+ def make_fused_cpdef(self, orig_py_func, env, is_def):
+ """
+ This creates the function that is indexable from Python and does
+ runtime dispatch based on the argument types. The function gets the
+ arg tuple and kwargs dict (or None) and the defaults tuple
+ as arguments from the Binding Fused Function's tp_call.
+ """
+ from . import TreeFragment, Code, UtilityCode
+
+ fused_types = self._get_fused_base_types([
+ arg.type for arg in self.node.args if arg.type.is_fused])
+
+ context = {
+ 'memviewslice_cname': MemoryView.memviewslice_cname,
+ 'func_args': self.node.args,
+ 'n_fused': len(fused_types),
+ 'min_positional_args':
+ self.node.num_required_args - self.node.num_required_kw_args
+ if is_def else
+ sum(1 for arg in self.node.args if arg.default is None),
+ 'name': orig_py_func.entry.name,
+ }
+
+ pyx_code = Code.PyxCodeWriter(context=context)
+ decl_code = Code.PyxCodeWriter(context=context)
+ decl_code.put_chunk(
+ u"""
+ cdef extern from *:
+ void __pyx_PyErr_Clear "PyErr_Clear" ()
+ type __Pyx_ImportNumPyArrayTypeIfAvailable()
+ int __Pyx_Is_Little_Endian()
+ """)
+ decl_code.indent()
+
+ pyx_code.put_chunk(
+ u"""
+ def __pyx_fused_cpdef(signatures, args, kwargs, defaults, _fused_sigindex={}):
+ # FIXME: use a typed signature - currently fails badly because
+ # default arguments inherit the types we specify here!
+
+ cdef list search_list
+ cdef dict sigindex_node
+
+ dest_sig = [None] * {{n_fused}}
+
+ if kwargs is not None and not kwargs:
+ kwargs = None
+
+ cdef Py_ssize_t i
+
+ # instance check body
+ """)
+
+ pyx_code.indent() # indent following code to function body
+ pyx_code.named_insertion_point("imports")
+ pyx_code.named_insertion_point("func_defs")
+ pyx_code.named_insertion_point("local_variable_declarations")
+
+ fused_index = 0
+ default_idx = 0
+ all_buffer_types = OrderedSet()
+ seen_fused_types = set()
+ for i, arg in enumerate(self.node.args):
+ if arg.type.is_fused:
+ arg_fused_types = arg.type.get_fused_types()
+ if len(arg_fused_types) > 1:
+ raise NotImplementedError("Determination of more than one fused base "
+ "type per argument is not implemented.")
+ fused_type = arg_fused_types[0]
+
+ if arg.type.is_fused and fused_type not in seen_fused_types:
+ seen_fused_types.add(fused_type)
+
+ context.update(
+ arg_tuple_idx=i,
+ arg=arg,
+ dest_sig_idx=fused_index,
+ default_idx=default_idx,
+ )
+
+ normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg)
+ self._unpack_argument(pyx_code)
+
+ # 'unrolled' loop, first match breaks out of it
+ with pyx_code.indenter("while 1:"):
+ if normal_types:
+ self._fused_instance_checks(normal_types, pyx_code, env)
+ if buffer_types or pythran_types:
+ env.use_utility_code(Code.UtilityCode.load_cached("IsLittleEndian", "ModuleSetupCode.c"))
+ self._buffer_checks(
+ buffer_types, pythran_types, pyx_code, decl_code,
+ arg.accept_none, env)
+ if has_object_fallback:
+ pyx_code.context.update(specialized_type_name='object')
+ pyx_code.putln(self.match)
+ else:
+ pyx_code.putln(self.no_match)
+ pyx_code.putln("break")
+
+ fused_index += 1
+ all_buffer_types.update(buffer_types)
+ all_buffer_types.update(ty.org_buffer for ty in pythran_types)
+
+ if arg.default:
+ default_idx += 1
+
+ if all_buffer_types:
+ self._buffer_declarations(pyx_code, decl_code, all_buffer_types, pythran_types)
+ env.use_utility_code(Code.UtilityCode.load_cached("Import", "ImportExport.c"))
+ env.use_utility_code(Code.UtilityCode.load_cached("ImportNumPyArray", "ImportExport.c"))
+
+ self._fused_signature_index(pyx_code)
+
+ pyx_code.put_chunk(
+ u"""
+ sigindex_matches = []
+ sigindex_candidates = [_fused_sigindex]
+
+ for dst_type in dest_sig:
+ found_matches = []
+ found_candidates = []
+ # Make two separate lists: One for signature sub-trees
+ # with at least one definite match, and another for
+ # signature sub-trees with only ambiguous matches
+ # (where `dest_sig[i] is None`).
+ if dst_type is None:
+ for sn in sigindex_matches:
+ found_matches.extend(( sn).values())
+ for sn in sigindex_candidates:
+ found_candidates.extend(( sn).values())
+ else:
+ for search_list in (sigindex_matches, sigindex_candidates):
+ for sn in search_list:
+ type_match = ( sn).get(dst_type)
+ if type_match is not None:
+ found_matches.append(type_match)
+ sigindex_matches = found_matches
+ sigindex_candidates = found_candidates
+ if not (found_matches or found_candidates):
+ break
+
+ candidates = sigindex_matches
+
+ if not candidates:
+ raise TypeError("No matching signature found")
+ elif len(candidates) > 1:
+ raise TypeError("Function call with ambiguous argument types")
+ else:
+ return (signatures)[candidates[0]]
+ """)
+
+ fragment_code = pyx_code.getvalue()
+ # print decl_code.getvalue()
+ # print fragment_code
+ from .Optimize import ConstantFolding
+ fragment = TreeFragment.TreeFragment(
+ fragment_code, level='module', pipeline=[ConstantFolding()])
+ ast = TreeFragment.SetPosTransform(self.node.pos)(fragment.root)
+ UtilityCode.declare_declarations_in_scope(
+ decl_code.getvalue(), env.global_scope())
+ ast.scope = env
+ # FIXME: for static methods of cdef classes, we build the wrong signature here: first arg becomes 'self'
+ ast.analyse_declarations(env)
+ py_func = ast.stats[-1] # the DefNode
+ self.fragment_scope = ast.scope
+
+ if isinstance(self.node, DefNode):
+ py_func.specialized_cpdefs = self.nodes[:]
+ else:
+ py_func.specialized_cpdefs = [n.py_func for n in self.nodes]
+
+ return py_func
+
+ def update_fused_defnode_entry(self, env):
+ copy_attributes = (
+ 'name', 'pos', 'cname', 'func_cname', 'pyfunc_cname',
+ 'pymethdef_cname', 'doc', 'doc_cname', 'is_member',
+ 'scope'
+ )
+
+ entry = self.py_func.entry
+
+ for attr in copy_attributes:
+ setattr(entry, attr,
+ getattr(self.orig_py_func.entry, attr))
+
+ self.py_func.name = self.orig_py_func.name
+ self.py_func.doc = self.orig_py_func.doc
+
+ env.entries.pop('__pyx_fused_cpdef', None)
+ if isinstance(self.node, DefNode):
+ env.entries[entry.name] = entry
+ else:
+ env.entries[entry.name].as_variable = entry
+
+ env.pyfunc_entries.append(entry)
+
+ self.py_func.entry.fused_cfunction = self
+ for node in self.nodes:
+ if isinstance(self.node, DefNode):
+ node.fused_py_func = self.py_func
+ else:
+ node.py_func.fused_py_func = self.py_func
+ node.entry.as_variable = entry
+
+ self.synthesize_defnodes()
+ self.stats.append(self.__signatures__)
+
+ def analyse_expressions(self, env):
+ """
+ Analyse the expressions. Take care to only evaluate default arguments
+ once and clone the result for all specializations
+ """
+ for fused_compound_type in self.fused_compound_types:
+ for fused_type in fused_compound_type.get_fused_types():
+ for specialization_type in fused_type.types:
+ if specialization_type.is_complex:
+ specialization_type.create_declaration_utility_code(env)
+
+ if self.py_func:
+ self.__signatures__ = self.__signatures__.analyse_expressions(env)
+ self.py_func = self.py_func.analyse_expressions(env)
+ self.resulting_fused_function = self.resulting_fused_function.analyse_expressions(env)
+ self.fused_func_assignment = self.fused_func_assignment.analyse_expressions(env)
+
+ self.defaults = defaults = []
+
+ for arg in self.node.args:
+ if arg.default:
+ arg.default = arg.default.analyse_expressions(env)
+ if arg.default.is_literal:
+ defaults.append(copy.copy(arg.default))
+ else:
+ # coerce the argument to temp since CloneNode really requires a temp
+ defaults.append(ProxyNode(arg.default.coerce_to_temp(env)))
+ else:
+ defaults.append(None)
+
+ for i, stat in enumerate(self.stats):
+ stat = self.stats[i] = stat.analyse_expressions(env)
+ if isinstance(stat, FuncDefNode) and stat is not self.py_func:
+ # the dispatcher specifically doesn't want its defaults overriding
+ for arg, default in zip(stat.args, defaults):
+ if default is not None:
+ if default.is_literal:
+ arg.default = default.coerce_to(arg.type, env)
+ else:
+ arg.default = CloneNode(default).analyse_expressions(env).coerce_to(arg.type, env)
+
+ if self.py_func:
+ args = [CloneNode(default) for default in defaults if default]
+ self.defaults_tuple = TupleNode(self.pos, args=args)
+ self.defaults_tuple = self.defaults_tuple.analyse_types(env, skip_children=True).coerce_to_pyobject(env)
+ self.defaults_tuple = ProxyNode(self.defaults_tuple)
+ self.code_object = ProxyNode(self.specialized_pycfuncs[0].code_object)
+
+ fused_func = self.resulting_fused_function.arg
+ fused_func.defaults_tuple = CloneNode(self.defaults_tuple)
+ fused_func.code_object = CloneNode(self.code_object)
+
+ for i, pycfunc in enumerate(self.specialized_pycfuncs):
+ pycfunc.code_object = CloneNode(self.code_object)
+ pycfunc = self.specialized_pycfuncs[i] = pycfunc.analyse_types(env)
+ pycfunc.defaults_tuple = CloneNode(self.defaults_tuple)
+ return self
+
+ def synthesize_defnodes(self):
+ """
+ Create the __signatures__ dict of PyCFunctionNode specializations.
+ """
+ if isinstance(self.nodes[0], CFuncDefNode):
+ nodes = [node.py_func for node in self.nodes]
+ else:
+ nodes = self.nodes
+
+ # For the moment, fused functions do not support METH_FASTCALL
+ for node in nodes:
+ node.entry.signature.use_fastcall = False
+
+ signatures = [StringEncoding.EncodedString(node.specialized_signature_string)
+ for node in nodes]
+ keys = [ExprNodes.StringNode(node.pos, value=sig)
+ for node, sig in zip(nodes, signatures)]
+ values = [ExprNodes.PyCFunctionNode.from_defnode(node, binding=True)
+ for node in nodes]
+
+ self.__signatures__ = ExprNodes.DictNode.from_pairs(self.pos, zip(keys, values))
+
+ self.specialized_pycfuncs = values
+ for pycfuncnode in values:
+ pycfuncnode.is_specialization = True
+
+ def generate_function_definitions(self, env, code):
+ if self.py_func:
+ self.py_func.pymethdef_required = True
+ self.fused_func_assignment.generate_function_definitions(env, code)
+
+ from . import Options
+ for stat in self.stats:
+ if isinstance(stat, FuncDefNode) and (
+ stat.entry.used or
+ (Options.cimport_from_pyx and not stat.entry.visibility == 'extern')):
+ code.mark_pos(stat.pos)
+ stat.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ # Note: all def function specialization are wrapped in PyCFunction
+ # nodes in the self.__signatures__ dictnode.
+ for default in self.defaults:
+ if default is not None:
+ default.generate_evaluation_code(code)
+
+ if self.py_func:
+ self.defaults_tuple.generate_evaluation_code(code)
+ self.code_object.generate_evaluation_code(code)
+
+ for stat in self.stats:
+ code.mark_pos(stat.pos)
+ if isinstance(stat, ExprNodes.ExprNode):
+ stat.generate_evaluation_code(code)
+ else:
+ stat.generate_execution_code(code)
+
+ if self.__signatures__:
+ self.resulting_fused_function.generate_evaluation_code(code)
+
+ code.putln(
+ "((__pyx_FusedFunctionObject *) %s)->__signatures__ = %s;" %
+ (self.resulting_fused_function.result(),
+ self.__signatures__.result()))
+ self.__signatures__.generate_giveref(code)
+ self.__signatures__.generate_post_assignment_code(code)
+ self.__signatures__.free_temps(code)
+
+ self.fused_func_assignment.generate_execution_code(code)
+
+ # Dispose of results
+ self.resulting_fused_function.generate_disposal_code(code)
+ self.resulting_fused_function.free_temps(code)
+ self.defaults_tuple.generate_disposal_code(code)
+ self.defaults_tuple.free_temps(code)
+ self.code_object.generate_disposal_code(code)
+ self.code_object.free_temps(code)
+
+ for default in self.defaults:
+ if default is not None:
+ default.generate_disposal_code(code)
+ default.free_temps(code)
+
+ def annotate(self, code):
+ for stat in self.stats:
+ stat.annotate(code)
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Future.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Future.py
new file mode 100644
index 0000000000000000000000000000000000000000..8de10c0cb583f206808269ce6dbcf7fcb59c39b4
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Future.py
@@ -0,0 +1,16 @@
+def _get_feature(name):
+ import __future__
+ # fall back to a unique fake object for earlier Python versions or Python 3
+ return getattr(__future__, name, object())
+
+unicode_literals = _get_feature("unicode_literals")
+with_statement = _get_feature("with_statement") # dummy
+division = _get_feature("division")
+print_function = _get_feature("print_function")
+absolute_import = _get_feature("absolute_import")
+nested_scopes = _get_feature("nested_scopes") # dummy
+generators = _get_feature("generators") # dummy
+generator_stop = _get_feature("generator_stop")
+annotations = _get_feature("annotations")
+
+del _get_feature
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Interpreter.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Interpreter.py
new file mode 100644
index 0000000000000000000000000000000000000000..244397264f76784e90cced119bb18eb57149f9e4
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Interpreter.py
@@ -0,0 +1,64 @@
+"""
+This module deals with interpreting the parse tree as Python
+would have done, in the compiler.
+
+For now this only covers parse tree to value conversion of
+compile-time values.
+"""
+
+from __future__ import absolute_import
+
+from .Nodes import *
+from .ExprNodes import *
+from .Errors import CompileError
+
+
+class EmptyScope(object):
+ def lookup(self, name):
+ return None
+
+empty_scope = EmptyScope()
+
+def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=()):
+ """
+ Tries to interpret a list of compile time option nodes.
+ The result will be a tuple (optlist, optdict) but where
+ all expression nodes have been interpreted. The result is
+ in the form of tuples (value, pos).
+
+ optlist is a list of nodes, while optdict is a DictNode (the
+ result optdict is a dict)
+
+ If type_env is set, all type nodes will be analysed and the resulting
+ type set. Otherwise only interpretateable ExprNodes
+ are allowed, other nodes raises errors.
+
+ A CompileError will be raised if there are problems.
+ """
+
+ def interpret(node, ix):
+ if ix in type_args:
+ if type_env:
+ type = node.analyse_as_type(type_env)
+ if not type:
+ raise CompileError(node.pos, "Invalid type.")
+ return (type, node.pos)
+ else:
+ raise CompileError(node.pos, "Type not allowed here.")
+ else:
+ if (sys.version_info[0] >=3 and
+ isinstance(node, StringNode) and
+ node.unicode_value is not None):
+ return (node.unicode_value, node.pos)
+ return (node.compile_time_value(empty_scope), node.pos)
+
+ if optlist:
+ optlist = [interpret(x, ix) for ix, x in enumerate(optlist)]
+ if optdict:
+ assert isinstance(optdict, DictNode)
+ new_optdict = {}
+ for item in optdict.key_value_pairs:
+ new_key, dummy = interpret(item.key, None)
+ new_optdict[new_key] = interpret(item.value, item.key.value)
+ optdict = new_optdict
+ return (optlist, new_optdict)
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Lexicon.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Lexicon.py
new file mode 100644
index 0000000000000000000000000000000000000000..0820c2397e1fb566756460d99be54ce377c0c9e4
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Lexicon.py
@@ -0,0 +1,342 @@
+# -*- coding: utf-8 -*-
+# cython: language_level=3, py2_import=True
+#
+# Cython Scanner - Lexical Definitions
+#
+
+from __future__ import absolute_import, unicode_literals
+
+raw_prefixes = "rR"
+bytes_prefixes = "bB"
+string_prefixes = "fFuU" + bytes_prefixes
+char_prefixes = "cC"
+any_string_prefix = raw_prefixes + string_prefixes + char_prefixes
+IDENT = 'IDENT'
+
+
+def make_lexicon():
+ from ..Plex import \
+ Str, Any, AnyBut, AnyChar, Rep, Rep1, Opt, Bol, Eol, Eof, \
+ TEXT, IGNORE, Method, State, Lexicon, Range
+
+ nonzero_digit = Any("123456789")
+ digit = Any("0123456789")
+ bindigit = Any("01")
+ octdigit = Any("01234567")
+ hexdigit = Any("0123456789ABCDEFabcdef")
+ indentation = Bol + Rep(Any(" \t"))
+
+ # The list of valid unicode identifier characters are pretty slow to generate at runtime,
+ # and require Python3, so are just included directly here
+ # (via the generated code block at the bottom of the file)
+ unicode_start_character = (Any(unicode_start_ch_any) | Range(unicode_start_ch_range))
+ unicode_continuation_character = (
+ unicode_start_character |
+ Any(unicode_continuation_ch_any) | Range(unicode_continuation_ch_range))
+
+ def underscore_digits(d):
+ return Rep1(d) + Rep(Str("_") + Rep1(d))
+
+ def prefixed_digits(prefix, digits):
+ return prefix + Opt(Str("_")) + underscore_digits(digits)
+
+ decimal = underscore_digits(digit)
+ dot = Str(".")
+ exponent = Any("Ee") + Opt(Any("+-")) + decimal
+ decimal_fract = (decimal + dot + Opt(decimal)) | (dot + decimal)
+
+ #name = letter + Rep(letter | digit)
+ name = unicode_start_character + Rep(unicode_continuation_character)
+ intconst = (prefixed_digits(nonzero_digit, digit) | # decimal literals with underscores must not start with '0'
+ (Str("0") + (prefixed_digits(Any("Xx"), hexdigit) |
+ prefixed_digits(Any("Oo"), octdigit) |
+ prefixed_digits(Any("Bb"), bindigit) )) |
+ underscore_digits(Str('0')) # 0_0_0_0... is allowed as a decimal literal
+ | Rep1(digit) # FIXME: remove these Py2 style decimal/octal literals (PY_VERSION_HEX < 3)
+ )
+ intsuffix = (Opt(Any("Uu")) + Opt(Any("Ll")) + Opt(Any("Ll"))) | (Opt(Any("Ll")) + Opt(Any("Ll")) + Opt(Any("Uu")))
+ intliteral = intconst + intsuffix
+ fltconst = (decimal_fract + Opt(exponent)) | (decimal + exponent)
+ imagconst = (intconst | fltconst) + Any("jJ")
+
+ # invalid combinations of prefixes are caught in p_string_literal
+ beginstring = Opt(Rep(Any(string_prefixes + raw_prefixes)) |
+ Any(char_prefixes)
+ ) + (Str("'") | Str('"') | Str("'''") | Str('"""'))
+ two_oct = octdigit + octdigit
+ three_oct = octdigit + octdigit + octdigit
+ two_hex = hexdigit + hexdigit
+ four_hex = two_hex + two_hex
+ escapeseq = Str("\\") + (two_oct | three_oct |
+ Str('N{') + Rep(AnyBut('}')) + Str('}') |
+ Str('u') + four_hex | Str('x') + two_hex |
+ Str('U') + four_hex + four_hex | AnyChar)
+
+ bra = Any("([{")
+ ket = Any(")]}")
+ ellipsis = Str("...")
+ punct = Any(":,;+-*/|&<>=.%`~^?!@")
+ diphthong = Str("==", "<>", "!=", "<=", ">=", "<<", ">>", "**", "//",
+ "+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=",
+ "<<=", ">>=", "**=", "//=", "->", "@=", "&&", "||", ':=')
+ spaces = Rep1(Any(" \t\f"))
+ escaped_newline = Str("\\\n")
+ lineterm = Eol + Opt(Str("\n"))
+
+ comment = Str("#") + Rep(AnyBut("\n"))
+
+ return Lexicon([
+ (name, Method('normalize_ident')),
+ (intliteral, Method('strip_underscores', symbol='INT')),
+ (fltconst, Method('strip_underscores', symbol='FLOAT')),
+ (imagconst, Method('strip_underscores', symbol='IMAG')),
+ (ellipsis | punct | diphthong, TEXT),
+
+ (bra, Method('open_bracket_action')),
+ (ket, Method('close_bracket_action')),
+ (lineterm, Method('newline_action')),
+
+ (beginstring, Method('begin_string_action')),
+
+ (comment, IGNORE),
+ (spaces, IGNORE),
+ (escaped_newline, IGNORE),
+
+ State('INDENT', [
+ (comment + lineterm, Method('commentline')),
+ (Opt(spaces) + Opt(comment) + lineterm, IGNORE),
+ (indentation, Method('indentation_action')),
+ (Eof, Method('eof_action'))
+ ]),
+
+ State('SQ_STRING', [
+ (escapeseq, 'ESCAPE'),
+ (Rep1(AnyBut("'\"\n\\")), 'CHARS'),
+ (Str('"'), 'CHARS'),
+ (Str("\n"), Method('unclosed_string_action')),
+ (Str("'"), Method('end_string_action')),
+ (Eof, 'EOF')
+ ]),
+
+ State('DQ_STRING', [
+ (escapeseq, 'ESCAPE'),
+ (Rep1(AnyBut('"\n\\')), 'CHARS'),
+ (Str("'"), 'CHARS'),
+ (Str("\n"), Method('unclosed_string_action')),
+ (Str('"'), Method('end_string_action')),
+ (Eof, 'EOF')
+ ]),
+
+ State('TSQ_STRING', [
+ (escapeseq, 'ESCAPE'),
+ (Rep1(AnyBut("'\"\n\\")), 'CHARS'),
+ (Any("'\""), 'CHARS'),
+ (Str("\n"), 'NEWLINE'),
+ (Str("'''"), Method('end_string_action')),
+ (Eof, 'EOF')
+ ]),
+
+ State('TDQ_STRING', [
+ (escapeseq, 'ESCAPE'),
+ (Rep1(AnyBut('"\'\n\\')), 'CHARS'),
+ (Any("'\""), 'CHARS'),
+ (Str("\n"), 'NEWLINE'),
+ (Str('"""'), Method('end_string_action')),
+ (Eof, 'EOF')
+ ]),
+
+ (Eof, Method('eof_action'))
+ ],
+
+ # FIXME: Plex 1.9 needs different args here from Plex 1.1.4
+ #debug_flags = scanner_debug_flags,
+ #debug_file = scanner_dump_file
+ )
+
+
+# BEGIN GENERATED CODE
+# Generated with 'cython-generate-lexicon.py' from:
+# cpython 3.12.0a7+ (heads/master:4cd1cc843a, Apr 11 2023, 10:32:26) [GCC 11.3.0]
+
+unicode_start_ch_any = (
+ u"\u005f\u00aa\u00b5\u00ba\u02ec\u02ee\u037f\u0386\u038c\u0559\u06d5"
+ u"\u06ff\u0710\u07b1\u07fa\u081a\u0824\u0828\u093d\u0950\u09b2\u09bd"
+ u"\u09ce\u09fc\u0a5e\u0abd\u0ad0\u0af9\u0b3d\u0b71\u0b83\u0b9c\u0bd0"
+ u"\u0c3d\u0c5d\u0c80\u0cbd\u0d3d\u0d4e\u0dbd\u0e32\u0e84\u0ea5\u0eb2"
+ u"\u0ebd\u0ec6\u0f00\u103f\u1061\u108e\u10c7\u10cd\u1258\u12c0\u17d7"
+ u"\u17dc\u18aa\u1aa7\u1cfa\u1f59\u1f5b\u1f5d\u1fbe\u2071\u207f\u2102"
+ u"\u2107\u2115\u2124\u2126\u2128\u214e\u2d27\u2d2d\u2d6f\ua7d3\ua8fb"
+ u"\ua9cf\uaa7a\uaab1\uaac0\uaac2\ufb1d\ufb3e\ufe71\ufe73\ufe77\ufe79"
+ u"\ufe7b\ufe7d\U00010808\U0001083c\U00010a00\U00010f27\U00011075\U00011144\U00011147\U00011176\U000111da"
+ u"\U000111dc\U00011288\U0001133d\U00011350\U000114c7\U00011644\U000116b8\U00011909\U0001193f\U00011941\U000119e1"
+ u"\U000119e3\U00011a00\U00011a3a\U00011a50\U00011a9d\U00011c40\U00011d46\U00011d98\U00011f02\U00011fb0\U00016f50"
+ u"\U00016fe3\U0001b132\U0001b155\U0001d4a2\U0001d4bb\U0001d546\U0001e14e\U0001e94b\U0001ee24\U0001ee27\U0001ee39"
+ u"\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f"
+ u"\U0001ee64\U0001ee7e"
+)
+unicode_start_ch_range = (
+ u"\u0041\u005a\u0061\u007a\u00c0\u00d6\u00d8\u00f6\u00f8\u02c1\u02c6"
+ u"\u02d1\u02e0\u02e4\u0370\u0374\u0376\u0377\u037b\u037d\u0388\u038a"
+ u"\u038e\u03a1\u03a3\u03f5\u03f7\u0481\u048a\u052f\u0531\u0556\u0560"
+ u"\u0588\u05d0\u05ea\u05ef\u05f2\u0620\u064a\u066e\u066f\u0671\u06d3"
+ u"\u06e5\u06e6\u06ee\u06ef\u06fa\u06fc\u0712\u072f\u074d\u07a5\u07ca"
+ u"\u07ea\u07f4\u07f5\u0800\u0815\u0840\u0858\u0860\u086a\u0870\u0887"
+ u"\u0889\u088e\u08a0\u08c9\u0904\u0939\u0958\u0961\u0971\u0980\u0985"
+ u"\u098c\u098f\u0990\u0993\u09a8\u09aa\u09b0\u09b6\u09b9\u09dc\u09dd"
+ u"\u09df\u09e1\u09f0\u09f1\u0a05\u0a0a\u0a0f\u0a10\u0a13\u0a28\u0a2a"
+ u"\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59\u0a5c\u0a72\u0a74"
+ u"\u0a85\u0a8d\u0a8f\u0a91\u0a93\u0aa8\u0aaa\u0ab0\u0ab2\u0ab3\u0ab5"
+ u"\u0ab9\u0ae0\u0ae1\u0b05\u0b0c\u0b0f\u0b10\u0b13\u0b28\u0b2a\u0b30"
+ u"\u0b32\u0b33\u0b35\u0b39\u0b5c\u0b5d\u0b5f\u0b61\u0b85\u0b8a\u0b8e"
+ u"\u0b90\u0b92\u0b95\u0b99\u0b9a\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8\u0baa"
+ u"\u0bae\u0bb9\u0c05\u0c0c\u0c0e\u0c10\u0c12\u0c28\u0c2a\u0c39\u0c58"
+ u"\u0c5a\u0c60\u0c61\u0c85\u0c8c\u0c8e\u0c90\u0c92\u0ca8\u0caa\u0cb3"
+ u"\u0cb5\u0cb9\u0cdd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d04\u0d0c\u0d0e"
+ u"\u0d10\u0d12\u0d3a\u0d54\u0d56\u0d5f\u0d61\u0d7a\u0d7f\u0d85\u0d96"
+ u"\u0d9a\u0db1\u0db3\u0dbb\u0dc0\u0dc6\u0e01\u0e30\u0e40\u0e46\u0e81"
+ u"\u0e82\u0e86\u0e8a\u0e8c\u0ea3\u0ea7\u0eb0\u0ec0\u0ec4\u0edc\u0edf"
+ u"\u0f40\u0f47\u0f49\u0f6c\u0f88\u0f8c\u1000\u102a\u1050\u1055\u105a"
+ u"\u105d\u1065\u1066\u106e\u1070\u1075\u1081\u10a0\u10c5\u10d0\u10fa"
+ u"\u10fc\u1248\u124a\u124d\u1250\u1256\u125a\u125d\u1260\u1288\u128a"
+ u"\u128d\u1290\u12b0\u12b2\u12b5\u12b8\u12be\u12c2\u12c5\u12c8\u12d6"
+ u"\u12d8\u1310\u1312\u1315\u1318\u135a\u1380\u138f\u13a0\u13f5\u13f8"
+ u"\u13fd\u1401\u166c\u166f\u167f\u1681\u169a\u16a0\u16ea\u16ee\u16f8"
+ u"\u1700\u1711\u171f\u1731\u1740\u1751\u1760\u176c\u176e\u1770\u1780"
+ u"\u17b3\u1820\u1878\u1880\u18a8\u18b0\u18f5\u1900\u191e\u1950\u196d"
+ u"\u1970\u1974\u1980\u19ab\u19b0\u19c9\u1a00\u1a16\u1a20\u1a54\u1b05"
+ u"\u1b33\u1b45\u1b4c\u1b83\u1ba0\u1bae\u1baf\u1bba\u1be5\u1c00\u1c23"
+ u"\u1c4d\u1c4f\u1c5a\u1c7d\u1c80\u1c88\u1c90\u1cba\u1cbd\u1cbf\u1ce9"
+ u"\u1cec\u1cee\u1cf3\u1cf5\u1cf6\u1d00\u1dbf\u1e00\u1f15\u1f18\u1f1d"
+ u"\u1f20\u1f45\u1f48\u1f4d\u1f50\u1f57\u1f5f\u1f7d\u1f80\u1fb4\u1fb6"
+ u"\u1fbc\u1fc2\u1fc4\u1fc6\u1fcc\u1fd0\u1fd3\u1fd6\u1fdb\u1fe0\u1fec"
+ u"\u1ff2\u1ff4\u1ff6\u1ffc\u2090\u209c\u210a\u2113\u2118\u211d\u212a"
+ u"\u2139\u213c\u213f\u2145\u2149\u2160\u2188\u2c00\u2ce4\u2ceb\u2cee"
+ u"\u2cf2\u2cf3\u2d00\u2d25\u2d30\u2d67\u2d80\u2d96\u2da0\u2da6\u2da8"
+ u"\u2dae\u2db0\u2db6\u2db8\u2dbe\u2dc0\u2dc6\u2dc8\u2dce\u2dd0\u2dd6"
+ u"\u2dd8\u2dde\u3005\u3007\u3021\u3029\u3031\u3035\u3038\u303c\u3041"
+ u"\u3096\u309d\u309f\u30a1\u30fa\u30fc\u30ff\u3105\u312f\u3131\u318e"
+ u"\u31a0\u31bf\u31f0\u31ff\u3400\u4dbf\u4e00\ua48c\ua4d0\ua4fd\ua500"
+ u"\ua60c\ua610\ua61f\ua62a\ua62b\ua640\ua66e\ua67f\ua69d\ua6a0\ua6ef"
+ u"\ua717\ua71f\ua722\ua788\ua78b\ua7ca\ua7d0\ua7d1\ua7d5\ua7d9\ua7f2"
+ u"\ua801\ua803\ua805\ua807\ua80a\ua80c\ua822\ua840\ua873\ua882\ua8b3"
+ u"\ua8f2\ua8f7\ua8fd\ua8fe\ua90a\ua925\ua930\ua946\ua960\ua97c\ua984"
+ u"\ua9b2\ua9e0\ua9e4\ua9e6\ua9ef\ua9fa\ua9fe\uaa00\uaa28\uaa40\uaa42"
+ u"\uaa44\uaa4b\uaa60\uaa76\uaa7e\uaaaf\uaab5\uaab6\uaab9\uaabd\uaadb"
+ u"\uaadd\uaae0\uaaea\uaaf2\uaaf4\uab01\uab06\uab09\uab0e\uab11\uab16"
+ u"\uab20\uab26\uab28\uab2e\uab30\uab5a\uab5c\uab69\uab70\uabe2\uac00"
+ u"\ud7a3\ud7b0\ud7c6\ud7cb\ud7fb\uf900\ufa6d\ufa70\ufad9\ufb00\ufb06"
+ u"\ufb13\ufb17\ufb1f\ufb28\ufb2a\ufb36\ufb38\ufb3c\ufb40\ufb41\ufb43"
+ u"\ufb44\ufb46\ufbb1\ufbd3\ufc5d\ufc64\ufd3d\ufd50\ufd8f\ufd92\ufdc7"
+ u"\ufdf0\ufdf9\ufe7f\ufefc\uff21\uff3a\uff41\uff5a\uff66\uff9d\uffa0"
+ u"\uffbe\uffc2\uffc7\uffca\uffcf\uffd2\uffd7\uffda\uffdc\U00010000\U0001000b"
+ u"\U0001000d\U00010026\U00010028\U0001003a\U0001003c\U0001003d\U0001003f\U0001004d\U00010050\U0001005d\U00010080"
+ u"\U000100fa\U00010140\U00010174\U00010280\U0001029c\U000102a0\U000102d0\U00010300\U0001031f\U0001032d\U0001034a"
+ u"\U00010350\U00010375\U00010380\U0001039d\U000103a0\U000103c3\U000103c8\U000103cf\U000103d1\U000103d5\U00010400"
+ u"\U0001049d\U000104b0\U000104d3\U000104d8\U000104fb\U00010500\U00010527\U00010530\U00010563\U00010570\U0001057a"
+ u"\U0001057c\U0001058a\U0001058c\U00010592\U00010594\U00010595\U00010597\U000105a1\U000105a3\U000105b1\U000105b3"
+ u"\U000105b9\U000105bb\U000105bc\U00010600\U00010736\U00010740\U00010755\U00010760\U00010767\U00010780\U00010785"
+ u"\U00010787\U000107b0\U000107b2\U000107ba\U00010800\U00010805\U0001080a\U00010835\U00010837\U00010838\U0001083f"
+ u"\U00010855\U00010860\U00010876\U00010880\U0001089e\U000108e0\U000108f2\U000108f4\U000108f5\U00010900\U00010915"
+ u"\U00010920\U00010939\U00010980\U000109b7\U000109be\U000109bf\U00010a10\U00010a13\U00010a15\U00010a17\U00010a19"
+ u"\U00010a35\U00010a60\U00010a7c\U00010a80\U00010a9c\U00010ac0\U00010ac7\U00010ac9\U00010ae4\U00010b00\U00010b35"
+ u"\U00010b40\U00010b55\U00010b60\U00010b72\U00010b80\U00010b91\U00010c00\U00010c48\U00010c80\U00010cb2\U00010cc0"
+ u"\U00010cf2\U00010d00\U00010d23\U00010e80\U00010ea9\U00010eb0\U00010eb1\U00010f00\U00010f1c\U00010f30\U00010f45"
+ u"\U00010f70\U00010f81\U00010fb0\U00010fc4\U00010fe0\U00010ff6\U00011003\U00011037\U00011071\U00011072\U00011083"
+ u"\U000110af\U000110d0\U000110e8\U00011103\U00011126\U00011150\U00011172\U00011183\U000111b2\U000111c1\U000111c4"
+ u"\U00011200\U00011211\U00011213\U0001122b\U0001123f\U00011240\U00011280\U00011286\U0001128a\U0001128d\U0001128f"
+ u"\U0001129d\U0001129f\U000112a8\U000112b0\U000112de\U00011305\U0001130c\U0001130f\U00011310\U00011313\U00011328"
+ u"\U0001132a\U00011330\U00011332\U00011333\U00011335\U00011339\U0001135d\U00011361\U00011400\U00011434\U00011447"
+ u"\U0001144a\U0001145f\U00011461\U00011480\U000114af\U000114c4\U000114c5\U00011580\U000115ae\U000115d8\U000115db"
+ u"\U00011600\U0001162f\U00011680\U000116aa\U00011700\U0001171a\U00011740\U00011746\U00011800\U0001182b\U000118a0"
+ u"\U000118df\U000118ff\U00011906\U0001190c\U00011913\U00011915\U00011916\U00011918\U0001192f\U000119a0\U000119a7"
+ u"\U000119aa\U000119d0\U00011a0b\U00011a32\U00011a5c\U00011a89\U00011ab0\U00011af8\U00011c00\U00011c08\U00011c0a"
+ u"\U00011c2e\U00011c72\U00011c8f\U00011d00\U00011d06\U00011d08\U00011d09\U00011d0b\U00011d30\U00011d60\U00011d65"
+ u"\U00011d67\U00011d68\U00011d6a\U00011d89\U00011ee0\U00011ef2\U00011f04\U00011f10\U00011f12\U00011f33\U00012000"
+ u"\U00012399\U00012400\U0001246e\U00012480\U00012543\U00012f90\U00012ff0\U00013000\U0001342f\U00013441\U00013446"
+ u"\U00014400\U00014646\U00016800\U00016a38\U00016a40\U00016a5e\U00016a70\U00016abe\U00016ad0\U00016aed\U00016b00"
+ u"\U00016b2f\U00016b40\U00016b43\U00016b63\U00016b77\U00016b7d\U00016b8f\U00016e40\U00016e7f\U00016f00\U00016f4a"
+ u"\U00016f93\U00016f9f\U00016fe0\U00016fe1\U00017000\U000187f7\U00018800\U00018cd5\U00018d00\U00018d08\U0001aff0"
+ u"\U0001aff3\U0001aff5\U0001affb\U0001affd\U0001affe\U0001b000\U0001b122\U0001b150\U0001b152\U0001b164\U0001b167"
+ u"\U0001b170\U0001b2fb\U0001bc00\U0001bc6a\U0001bc70\U0001bc7c\U0001bc80\U0001bc88\U0001bc90\U0001bc99\U0001d400"
+ u"\U0001d454\U0001d456\U0001d49c\U0001d49e\U0001d49f\U0001d4a5\U0001d4a6\U0001d4a9\U0001d4ac\U0001d4ae\U0001d4b9"
+ u"\U0001d4bd\U0001d4c3\U0001d4c5\U0001d505\U0001d507\U0001d50a\U0001d50d\U0001d514\U0001d516\U0001d51c\U0001d51e"
+ u"\U0001d539\U0001d53b\U0001d53e\U0001d540\U0001d544\U0001d54a\U0001d550\U0001d552\U0001d6a5\U0001d6a8\U0001d6c0"
+ u"\U0001d6c2\U0001d6da\U0001d6dc\U0001d6fa\U0001d6fc\U0001d714\U0001d716\U0001d734\U0001d736\U0001d74e\U0001d750"
+ u"\U0001d76e\U0001d770\U0001d788\U0001d78a\U0001d7a8\U0001d7aa\U0001d7c2\U0001d7c4\U0001d7cb\U0001df00\U0001df1e"
+ u"\U0001df25\U0001df2a\U0001e030\U0001e06d\U0001e100\U0001e12c\U0001e137\U0001e13d\U0001e290\U0001e2ad\U0001e2c0"
+ u"\U0001e2eb\U0001e4d0\U0001e4eb\U0001e7e0\U0001e7e6\U0001e7e8\U0001e7eb\U0001e7ed\U0001e7ee\U0001e7f0\U0001e7fe"
+ u"\U0001e800\U0001e8c4\U0001e900\U0001e943\U0001ee00\U0001ee03\U0001ee05\U0001ee1f\U0001ee21\U0001ee22\U0001ee29"
+ u"\U0001ee32\U0001ee34\U0001ee37\U0001ee4d\U0001ee4f\U0001ee51\U0001ee52\U0001ee61\U0001ee62\U0001ee67\U0001ee6a"
+ u"\U0001ee6c\U0001ee72\U0001ee74\U0001ee77\U0001ee79\U0001ee7c\U0001ee80\U0001ee89\U0001ee8b\U0001ee9b\U0001eea1"
+ u"\U0001eea3\U0001eea5\U0001eea9\U0001eeab\U0001eebb\U00020000\U0002a6df\U0002a700\U0002b739\U0002b740\U0002b81d"
+ u"\U0002b820\U0002cea1\U0002ceb0\U0002ebe0\U0002f800\U0002fa1d\U00030000\U0003134a"
+)
+unicode_continuation_ch_any = (
+ u"\u00b7\u0387\u05bf\u05c7\u0670\u0711\u07fd\u09bc\u09d7\u09fe\u0a3c"
+ u"\u0a51\u0a75\u0abc\u0b3c\u0b82\u0bd7\u0c3c\u0cbc\u0cf3\u0d57\u0dca"
+ u"\u0dd6\u0e31\u0eb1\u0f35\u0f37\u0f39\u0fc6\u17dd\u18a9\u1ced\u1cf4"
+ u"\u2054\u20e1\u2d7f\ua66f\ua802\ua806\ua80b\ua82c\ua9e5\uaa43\uaab0"
+ u"\uaac1\ufb1e\uff3f\U000101fd\U000102e0\U00010a3f\U000110c2\U00011173\U0001123e\U00011241\U00011357"
+ u"\U0001145e\U00011940\U000119e4\U00011a47\U00011d3a\U00011d47\U00011f03\U00013440\U00016f4f\U00016fe4\U0001da75"
+ u"\U0001da84\U0001e08f\U0001e2ae"
+)
+unicode_continuation_ch_range = (
+ u"\u0030\u0039\u0300\u036f\u0483\u0487\u0591\u05bd\u05c1\u05c2\u05c4"
+ u"\u05c5\u0610\u061a\u064b\u0669\u06d6\u06dc\u06df\u06e4\u06e7\u06e8"
+ u"\u06ea\u06ed\u06f0\u06f9\u0730\u074a\u07a6\u07b0\u07c0\u07c9\u07eb"
+ u"\u07f3\u0816\u0819\u081b\u0823\u0825\u0827\u0829\u082d\u0859\u085b"
+ u"\u0898\u089f\u08ca\u08e1\u08e3\u0903\u093a\u093c\u093e\u094f\u0951"
+ u"\u0957\u0962\u0963\u0966\u096f\u0981\u0983\u09be\u09c4\u09c7\u09c8"
+ u"\u09cb\u09cd\u09e2\u09e3\u09e6\u09ef\u0a01\u0a03\u0a3e\u0a42\u0a47"
+ u"\u0a48\u0a4b\u0a4d\u0a66\u0a71\u0a81\u0a83\u0abe\u0ac5\u0ac7\u0ac9"
+ u"\u0acb\u0acd\u0ae2\u0ae3\u0ae6\u0aef\u0afa\u0aff\u0b01\u0b03\u0b3e"
+ u"\u0b44\u0b47\u0b48\u0b4b\u0b4d\u0b55\u0b57\u0b62\u0b63\u0b66\u0b6f"
+ u"\u0bbe\u0bc2\u0bc6\u0bc8\u0bca\u0bcd\u0be6\u0bef\u0c00\u0c04\u0c3e"
+ u"\u0c44\u0c46\u0c48\u0c4a\u0c4d\u0c55\u0c56\u0c62\u0c63\u0c66\u0c6f"
+ u"\u0c81\u0c83\u0cbe\u0cc4\u0cc6\u0cc8\u0cca\u0ccd\u0cd5\u0cd6\u0ce2"
+ u"\u0ce3\u0ce6\u0cef\u0d00\u0d03\u0d3b\u0d3c\u0d3e\u0d44\u0d46\u0d48"
+ u"\u0d4a\u0d4d\u0d62\u0d63\u0d66\u0d6f\u0d81\u0d83\u0dcf\u0dd4\u0dd8"
+ u"\u0ddf\u0de6\u0def\u0df2\u0df3\u0e33\u0e3a\u0e47\u0e4e\u0e50\u0e59"
+ u"\u0eb3\u0ebc\u0ec8\u0ece\u0ed0\u0ed9\u0f18\u0f19\u0f20\u0f29\u0f3e"
+ u"\u0f3f\u0f71\u0f84\u0f86\u0f87\u0f8d\u0f97\u0f99\u0fbc\u102b\u103e"
+ u"\u1040\u1049\u1056\u1059\u105e\u1060\u1062\u1064\u1067\u106d\u1071"
+ u"\u1074\u1082\u108d\u108f\u109d\u135d\u135f\u1369\u1371\u1712\u1715"
+ u"\u1732\u1734\u1752\u1753\u1772\u1773\u17b4\u17d3\u17e0\u17e9\u180b"
+ u"\u180d\u180f\u1819\u1920\u192b\u1930\u193b\u1946\u194f\u19d0\u19da"
+ u"\u1a17\u1a1b\u1a55\u1a5e\u1a60\u1a7c\u1a7f\u1a89\u1a90\u1a99\u1ab0"
+ u"\u1abd\u1abf\u1ace\u1b00\u1b04\u1b34\u1b44\u1b50\u1b59\u1b6b\u1b73"
+ u"\u1b80\u1b82\u1ba1\u1bad\u1bb0\u1bb9\u1be6\u1bf3\u1c24\u1c37\u1c40"
+ u"\u1c49\u1c50\u1c59\u1cd0\u1cd2\u1cd4\u1ce8\u1cf7\u1cf9\u1dc0\u1dff"
+ u"\u203f\u2040\u20d0\u20dc\u20e5\u20f0\u2cef\u2cf1\u2de0\u2dff\u302a"
+ u"\u302f\u3099\u309a\ua620\ua629\ua674\ua67d\ua69e\ua69f\ua6f0\ua6f1"
+ u"\ua823\ua827\ua880\ua881\ua8b4\ua8c5\ua8d0\ua8d9\ua8e0\ua8f1\ua8ff"
+ u"\ua909\ua926\ua92d\ua947\ua953\ua980\ua983\ua9b3\ua9c0\ua9d0\ua9d9"
+ u"\ua9f0\ua9f9\uaa29\uaa36\uaa4c\uaa4d\uaa50\uaa59\uaa7b\uaa7d\uaab2"
+ u"\uaab4\uaab7\uaab8\uaabe\uaabf\uaaeb\uaaef\uaaf5\uaaf6\uabe3\uabea"
+ u"\uabec\uabed\uabf0\uabf9\ufe00\ufe0f\ufe20\ufe2f\ufe33\ufe34\ufe4d"
+ u"\ufe4f\uff10\uff19\uff9e\uff9f\U00010376\U0001037a\U000104a0\U000104a9\U00010a01\U00010a03"
+ u"\U00010a05\U00010a06\U00010a0c\U00010a0f\U00010a38\U00010a3a\U00010ae5\U00010ae6\U00010d24\U00010d27\U00010d30"
+ u"\U00010d39\U00010eab\U00010eac\U00010efd\U00010eff\U00010f46\U00010f50\U00010f82\U00010f85\U00011000\U00011002"
+ u"\U00011038\U00011046\U00011066\U00011070\U00011073\U00011074\U0001107f\U00011082\U000110b0\U000110ba\U000110f0"
+ u"\U000110f9\U00011100\U00011102\U00011127\U00011134\U00011136\U0001113f\U00011145\U00011146\U00011180\U00011182"
+ u"\U000111b3\U000111c0\U000111c9\U000111cc\U000111ce\U000111d9\U0001122c\U00011237\U000112df\U000112ea\U000112f0"
+ u"\U000112f9\U00011300\U00011303\U0001133b\U0001133c\U0001133e\U00011344\U00011347\U00011348\U0001134b\U0001134d"
+ u"\U00011362\U00011363\U00011366\U0001136c\U00011370\U00011374\U00011435\U00011446\U00011450\U00011459\U000114b0"
+ u"\U000114c3\U000114d0\U000114d9\U000115af\U000115b5\U000115b8\U000115c0\U000115dc\U000115dd\U00011630\U00011640"
+ u"\U00011650\U00011659\U000116ab\U000116b7\U000116c0\U000116c9\U0001171d\U0001172b\U00011730\U00011739\U0001182c"
+ u"\U0001183a\U000118e0\U000118e9\U00011930\U00011935\U00011937\U00011938\U0001193b\U0001193e\U00011942\U00011943"
+ u"\U00011950\U00011959\U000119d1\U000119d7\U000119da\U000119e0\U00011a01\U00011a0a\U00011a33\U00011a39\U00011a3b"
+ u"\U00011a3e\U00011a51\U00011a5b\U00011a8a\U00011a99\U00011c2f\U00011c36\U00011c38\U00011c3f\U00011c50\U00011c59"
+ u"\U00011c92\U00011ca7\U00011ca9\U00011cb6\U00011d31\U00011d36\U00011d3c\U00011d3d\U00011d3f\U00011d45\U00011d50"
+ u"\U00011d59\U00011d8a\U00011d8e\U00011d90\U00011d91\U00011d93\U00011d97\U00011da0\U00011da9\U00011ef3\U00011ef6"
+ u"\U00011f00\U00011f01\U00011f34\U00011f3a\U00011f3e\U00011f42\U00011f50\U00011f59\U00013447\U00013455\U00016a60"
+ u"\U00016a69\U00016ac0\U00016ac9\U00016af0\U00016af4\U00016b30\U00016b36\U00016b50\U00016b59\U00016f51\U00016f87"
+ u"\U00016f8f\U00016f92\U00016ff0\U00016ff1\U0001bc9d\U0001bc9e\U0001cf00\U0001cf2d\U0001cf30\U0001cf46\U0001d165"
+ u"\U0001d169\U0001d16d\U0001d172\U0001d17b\U0001d182\U0001d185\U0001d18b\U0001d1aa\U0001d1ad\U0001d242\U0001d244"
+ u"\U0001d7ce\U0001d7ff\U0001da00\U0001da36\U0001da3b\U0001da6c\U0001da9b\U0001da9f\U0001daa1\U0001daaf\U0001e000"
+ u"\U0001e006\U0001e008\U0001e018\U0001e01b\U0001e021\U0001e023\U0001e024\U0001e026\U0001e02a\U0001e130\U0001e136"
+ u"\U0001e140\U0001e149\U0001e2ec\U0001e2f9\U0001e4ec\U0001e4f9\U0001e8d0\U0001e8d6\U0001e944\U0001e94a\U0001e950"
+ u"\U0001e959\U0001fbf0\U0001fbf9"
+)
+
+# END GENERATED CODE
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Main.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Main.py
new file mode 100644
index 0000000000000000000000000000000000000000..80946c0776719637d247baaa87a7af9c988f0f2f
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Compiler/Main.py
@@ -0,0 +1,789 @@
+#
+# Cython Top Level
+#
+
+from __future__ import absolute_import, print_function
+
+import os
+import re
+import sys
+import io
+
+if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 3):
+ sys.stderr.write("Sorry, Cython requires Python 2.7 or 3.3+, found %d.%d\n" % tuple(sys.version_info[:2]))
+ sys.exit(1)
+
+try:
+ from __builtin__ import basestring
+except ImportError:
+ basestring = str
+
+# Do not import Parsing here, import it when needed, because Parsing imports
+# Nodes, which globally needs debug command line options initialized to set a
+# conditional metaclass. These options are processed by CmdLine called from
+# main() in this file.
+# import Parsing
+from . import Errors
+from .StringEncoding import EncodedString
+from .Scanning import PyrexScanner, FileSourceDescriptor
+from .Errors import PyrexError, CompileError, error, warning
+from .Symtab import ModuleScope
+from .. import Utils
+from . import Options
+from .Options import CompilationOptions, default_options
+from .CmdLine import parse_command_line
+from .Lexicon import (unicode_start_ch_any, unicode_continuation_ch_any,
+ unicode_start_ch_range, unicode_continuation_ch_range)
+
+
+def _make_range_re(chrs):
+ out = []
+ for i in range(0, len(chrs), 2):
+ out.append(u"{0}-{1}".format(chrs[i], chrs[i+1]))
+ return u"".join(out)
+
+# py2 version looked like r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)*$"
+module_name_pattern = u"[{0}{1}][{0}{2}{1}{3}]*".format(
+ unicode_start_ch_any, _make_range_re(unicode_start_ch_range),
+ unicode_continuation_ch_any,
+ _make_range_re(unicode_continuation_ch_range))
+module_name_pattern = re.compile(u"{0}(\\.{0})*$".format(module_name_pattern))
+
+
+standard_include_path = os.path.abspath(
+ os.path.join(os.path.dirname(os.path.dirname(__file__)), 'Includes'))
+
+
+class Context(object):
+ # This class encapsulates the context needed for compiling
+ # one or more Cython implementation files along with their
+ # associated and imported declaration files. It includes
+ # the root of the module import namespace and the list
+ # of directories to search for include files.
+ #
+ # modules {string : ModuleScope}
+ # include_directories [string]
+ # future_directives [object]
+ # language_level int currently 2 or 3 for Python 2/3
+
+ cython_scope = None
+ language_level = None # warn when not set but default to Py2
+
+ def __init__(self, include_directories, compiler_directives, cpp=False,
+ language_level=None, options=None):
+ # cython_scope is a hack, set to False by subclasses, in order to break
+ # an infinite loop.
+ # Better code organization would fix it.
+
+ from . import Builtin, CythonScope
+ self.modules = {"__builtin__" : Builtin.builtin_scope}
+ self.cython_scope = CythonScope.create_cython_scope(self)
+ self.modules["cython"] = self.cython_scope
+ self.include_directories = include_directories
+ self.future_directives = set()
+ self.compiler_directives = compiler_directives
+ self.cpp = cpp
+ self.options = options
+
+ self.pxds = {} # full name -> node tree
+ self._interned = {} # (type(value), value, *key_args) -> interned_value
+
+ if language_level is not None:
+ self.set_language_level(language_level)
+
+ self.legacy_implicit_noexcept = self.compiler_directives.get('legacy_implicit_noexcept', False)
+
+ self.gdb_debug_outputwriter = None
+
+ @classmethod
+ def from_options(cls, options):
+ return cls(options.include_path, options.compiler_directives,
+ options.cplus, options.language_level, options=options)
+
+ def set_language_level(self, level):
+ from .Future import print_function, unicode_literals, absolute_import, division, generator_stop
+ future_directives = set()
+ if level == '3str':
+ level = 3
+ else:
+ level = int(level)
+ if level >= 3:
+ future_directives.add(unicode_literals)
+ if level >= 3:
+ future_directives.update([print_function, absolute_import, division, generator_stop])
+ self.language_level = level
+ self.future_directives = future_directives
+ if level >= 3:
+ self.modules['builtins'] = self.modules['__builtin__']
+
+ def intern_ustring(self, value, encoding=None):
+ key = (EncodedString, value, encoding)
+ try:
+ return self._interned[key]
+ except KeyError:
+ pass
+ value = EncodedString(value)
+ if encoding:
+ value.encoding = encoding
+ self._interned[key] = value
+ return value
+
+ # pipeline creation functions can now be found in Pipeline.py
+
+ def process_pxd(self, source_desc, scope, module_name):
+ from . import Pipeline
+ if isinstance(source_desc, FileSourceDescriptor) and source_desc._file_type == 'pyx':
+ source = CompilationSource(source_desc, module_name, os.getcwd())
+ result_sink = create_default_resultobj(source, self.options)
+ pipeline = Pipeline.create_pyx_as_pxd_pipeline(self, result_sink)
+ result = Pipeline.run_pipeline(pipeline, source)
+ else:
+ pipeline = Pipeline.create_pxd_pipeline(self, scope, module_name)
+ result = Pipeline.run_pipeline(pipeline, source_desc)
+ return result
+
+ def nonfatal_error(self, exc):
+ return Errors.report_error(exc)
+
+ def _split_qualified_name(self, qualified_name, relative_import=False):
+ # Splits qualified_name into parts in form of 2-tuples: (PART_NAME, IS_PACKAGE).
+ qualified_name_parts = qualified_name.split('.')
+ last_part = qualified_name_parts.pop()
+ qualified_name_parts = [(p, True) for p in qualified_name_parts]
+ if last_part != '__init__':
+ # If Last part is __init__, then it is omitted. Otherwise, we need to check whether we can find
+ # __init__.pyx/__init__.py file to determine if last part is package or not.
+ is_package = False
+ for suffix in ('.py', '.pyx'):
+ path = self.search_include_directories(
+ qualified_name, suffix=suffix, source_pos=None, source_file_path=None, sys_path=not relative_import)
+ if path:
+ is_package = self._is_init_file(path)
+ break
+
+ qualified_name_parts.append((last_part, is_package))
+ return qualified_name_parts
+
+ @staticmethod
+ def _is_init_file(path):
+ return os.path.basename(path) in ('__init__.pyx', '__init__.py', '__init__.pxd') if path else False
+
+ @staticmethod
+ def _check_pxd_filename(pos, pxd_pathname, qualified_name):
+ if not pxd_pathname:
+ return
+ pxd_filename = os.path.basename(pxd_pathname)
+ if '.' in qualified_name and qualified_name == os.path.splitext(pxd_filename)[0]:
+ warning(pos, "Dotted filenames ('%s') are deprecated."
+ " Please use the normal Python package directory layout." % pxd_filename, level=1)
+
+ def find_module(self, module_name, from_module=None, pos=None, need_pxd=1,
+ absolute_fallback=True, relative_import=False):
+ # Finds and returns the module scope corresponding to
+ # the given relative or absolute module name. If this
+ # is the first time the module has been requested, finds
+ # the corresponding .pxd file and process it.
+ # If from_module is not None, it must be a module scope,
+ # and the module will first be searched for relative to
+ # that module, provided its name is not a dotted name.
+ debug_find_module = 0
+ if debug_find_module:
+ print("Context.find_module: module_name = %s, from_module = %s, pos = %s, need_pxd = %s" % (
+ module_name, from_module, pos, need_pxd))
+
+ scope = None
+ pxd_pathname = None
+ if from_module:
+ if module_name:
+ # from .module import ...
+ qualified_name = from_module.qualify_name(module_name)
+ else:
+ # from . import ...
+ qualified_name = from_module.qualified_name
+ scope = from_module
+ from_module = None
+ else:
+ qualified_name = module_name
+
+ if not module_name_pattern.match(qualified_name):
+ raise CompileError(pos or (module_name, 0, 0),
+ u"'%s' is not a valid module name" % module_name)
+
+ if from_module:
+ if debug_find_module:
+ print("...trying relative import")
+ scope = from_module.lookup_submodule(module_name)
+ if not scope:
+ pxd_pathname = self.find_pxd_file(qualified_name, pos, sys_path=not relative_import)
+ self._check_pxd_filename(pos, pxd_pathname, qualified_name)
+ if pxd_pathname:
+ is_package = self._is_init_file(pxd_pathname)
+ scope = from_module.find_submodule(module_name, as_package=is_package)
+ if not scope:
+ if debug_find_module:
+ print("...trying absolute import")
+ if absolute_fallback:
+ qualified_name = module_name
+ scope = self
+ for name, is_package in self._split_qualified_name(qualified_name, relative_import=relative_import):
+ scope = scope.find_submodule(name, as_package=is_package)
+ if debug_find_module:
+ print("...scope = %s" % scope)
+ if not scope.pxd_file_loaded:
+ if debug_find_module:
+ print("...pxd not loaded")
+ if not pxd_pathname:
+ if debug_find_module:
+ print("...looking for pxd file")
+ # Only look in sys.path if we are explicitly looking
+ # for a .pxd file.
+ pxd_pathname = self.find_pxd_file(qualified_name, pos, sys_path=need_pxd and not relative_import)
+ self._check_pxd_filename(pos, pxd_pathname, qualified_name)
+ if debug_find_module:
+ print("......found %s" % pxd_pathname)
+ if not pxd_pathname and need_pxd:
+ # Set pxd_file_loaded such that we don't need to
+ # look for the non-existing pxd file next time.
+ scope.pxd_file_loaded = True
+ package_pathname = self.search_include_directories(
+ qualified_name, suffix=".py", source_pos=pos, sys_path=not relative_import)
+ if package_pathname and package_pathname.endswith(Utils.PACKAGE_FILES):
+ pass
+ else:
+ error(pos, "'%s.pxd' not found" % qualified_name.replace('.', os.sep))
+ if pxd_pathname:
+ scope.pxd_file_loaded = True
+ try:
+ if debug_find_module:
+ print("Context.find_module: Parsing %s" % pxd_pathname)
+ rel_path = module_name.replace('.', os.sep) + os.path.splitext(pxd_pathname)[1]
+ if not pxd_pathname.endswith(rel_path):
+ rel_path = pxd_pathname # safety measure to prevent printing incorrect paths
+ source_desc = FileSourceDescriptor(pxd_pathname, rel_path)
+ err, result = self.process_pxd(source_desc, scope, qualified_name)
+ if err:
+ raise err
+ (pxd_codenodes, pxd_scope) = result
+ self.pxds[module_name] = (pxd_codenodes, pxd_scope)
+ except CompileError:
+ pass
+ return scope
+
+ def find_pxd_file(self, qualified_name, pos=None, sys_path=True, source_file_path=None):
+ # Search include path (and sys.path if sys_path is True) for
+ # the .pxd file corresponding to the given fully-qualified
+ # module name.
+ # Will find either a dotted filename or a file in a
+ # package directory. If a source file position is given,
+ # the directory containing the source file is searched first
+ # for a dotted filename, and its containing package root
+ # directory is searched first for a non-dotted filename.
+ pxd = self.search_include_directories(
+ qualified_name, suffix=".pxd", source_pos=pos, sys_path=sys_path, source_file_path=source_file_path)
+ if pxd is None and Options.cimport_from_pyx:
+ return self.find_pyx_file(qualified_name, pos, sys_path=sys_path)
+ return pxd
+
+ def find_pyx_file(self, qualified_name, pos=None, sys_path=True, source_file_path=None):
+ # Search include path for the .pyx file corresponding to the
+ # given fully-qualified module name, as for find_pxd_file().
+ return self.search_include_directories(
+ qualified_name, suffix=".pyx", source_pos=pos, sys_path=sys_path, source_file_path=source_file_path)
+
+ def find_include_file(self, filename, pos=None, source_file_path=None):
+ # Search list of include directories for filename.
+ # Reports an error and returns None if not found.
+ path = self.search_include_directories(
+ filename, source_pos=pos, include=True, source_file_path=source_file_path)
+ if not path:
+ error(pos, "'%s' not found" % filename)
+ return path
+
+ def search_include_directories(self, qualified_name,
+ suffix=None, source_pos=None, include=False, sys_path=False, source_file_path=None):
+ include_dirs = self.include_directories
+ if sys_path:
+ include_dirs = include_dirs + sys.path
+ # include_dirs must be hashable for caching in @cached_function
+ include_dirs = tuple(include_dirs + [standard_include_path])
+ return search_include_directories(
+ include_dirs, qualified_name, suffix or "", source_pos, include, source_file_path)
+
+ def find_root_package_dir(self, file_path):
+ return Utils.find_root_package_dir(file_path)
+
+ def check_package_dir(self, dir, package_names):
+ return Utils.check_package_dir(dir, tuple(package_names))
+
+ def c_file_out_of_date(self, source_path, output_path):
+ if not os.path.exists(output_path):
+ return 1
+ c_time = Utils.modification_time(output_path)
+ if Utils.file_newer_than(source_path, c_time):
+ return 1
+ pxd_path = Utils.replace_suffix(source_path, ".pxd")
+ if os.path.exists(pxd_path) and Utils.file_newer_than(pxd_path, c_time):
+ return 1
+ for kind, name in self.read_dependency_file(source_path):
+ if kind == "cimport":
+ dep_path = self.find_pxd_file(name, source_file_path=source_path)
+ elif kind == "include":
+ dep_path = self.search_include_directories(name, source_file_path=source_path)
+ else:
+ continue
+ if dep_path and Utils.file_newer_than(dep_path, c_time):
+ return 1
+ return 0
+
+ def find_cimported_module_names(self, source_path):
+ return [ name for kind, name in self.read_dependency_file(source_path)
+ if kind == "cimport" ]
+
+ def is_package_dir(self, dir_path):
+ return Utils.is_package_dir(dir_path)
+
+ def read_dependency_file(self, source_path):
+ dep_path = Utils.replace_suffix(source_path, ".dep")
+ if os.path.exists(dep_path):
+ with open(dep_path, "rU") as f:
+ chunks = [ line.split(" ", 1)
+ for line in (l.strip() for l in f)
+ if " " in line ]
+ return chunks
+ else:
+ return ()
+
+ def lookup_submodule(self, name):
+ # Look up a top-level module. Returns None if not found.
+ return self.modules.get(name, None)
+
+ def find_submodule(self, name, as_package=False):
+ # Find a top-level module, creating a new one if needed.
+ scope = self.lookup_submodule(name)
+ if not scope:
+ scope = ModuleScope(name,
+ parent_module = None, context = self, is_package=as_package)
+ self.modules[name] = scope
+ return scope
+
+ def parse(self, source_desc, scope, pxd, full_module_name):
+ if not isinstance(source_desc, FileSourceDescriptor):
+ raise RuntimeError("Only file sources for code supported")
+ source_filename = source_desc.filename
+ scope.cpp = self.cpp
+ # Parse the given source file and return a parse tree.
+ num_errors = Errors.get_errors_count()
+ try:
+ with Utils.open_source_file(source_filename) as f:
+ from . import Parsing
+ s = PyrexScanner(f, source_desc, source_encoding = f.encoding,
+ scope = scope, context = self)
+ tree = Parsing.p_module(s, pxd, full_module_name)
+ if self.options.formal_grammar:
+ try:
+ from ..Parser import ConcreteSyntaxTree
+ except ImportError:
+ raise RuntimeError(
+ "Formal grammar can only be used with compiled Cython with an available pgen.")
+ ConcreteSyntaxTree.p_module(source_filename)
+ except UnicodeDecodeError as e:
+ #import traceback
+ #traceback.print_exc()
+ raise self._report_decode_error(source_desc, e)
+
+ if Errors.get_errors_count() > num_errors:
+ raise CompileError()
+ return tree
+
+ def _report_decode_error(self, source_desc, exc):
+ msg = exc.args[-1]
+ position = exc.args[2]
+ encoding = exc.args[0]
+
+ line = 1
+ column = idx = 0
+ with io.open(source_desc.filename, "r", encoding='iso8859-1', newline='') as f:
+ for line, data in enumerate(f, 1):
+ idx += len(data)
+ if idx >= position:
+ column = position - (idx - len(data)) + 1
+ break
+
+ return error((source_desc, line, column),
+ "Decoding error, missing or incorrect coding= "
+ "at top of source (cannot decode with encoding %r: %s)" % (encoding, msg))
+
+ def extract_module_name(self, path, options):
+ # Find fully_qualified module name from the full pathname
+ # of a source file.
+ dir, filename = os.path.split(path)
+ module_name, _ = os.path.splitext(filename)
+ if "." in module_name:
+ return module_name
+ names = [module_name]
+ while self.is_package_dir(dir):
+ parent, package_name = os.path.split(dir)
+ if parent == dir:
+ break
+ names.append(package_name)
+ dir = parent
+ names.reverse()
+ return ".".join(names)
+
+ def setup_errors(self, options, result):
+ Errors.init_thread()
+ if options.use_listing_file:
+ path = result.listing_file = Utils.replace_suffix(result.main_source_file, ".lis")
+ else:
+ path = None
+ Errors.open_listing_file(path=path, echo_to_stderr=options.errors_to_stderr)
+
+ def teardown_errors(self, err, options, result):
+ source_desc = result.compilation_source.source_desc
+ if not isinstance(source_desc, FileSourceDescriptor):
+ raise RuntimeError("Only file sources for code supported")
+ Errors.close_listing_file()
+ result.num_errors = Errors.get_errors_count()
+ if result.num_errors > 0:
+ err = True
+ if err and result.c_file:
+ try:
+ Utils.castrate_file(result.c_file, os.stat(source_desc.filename))
+ except EnvironmentError:
+ pass
+ result.c_file = None
+
+
+def get_output_filename(source_filename, cwd, options):
+ if options.cplus:
+ c_suffix = ".cpp"
+ else:
+ c_suffix = ".c"
+ suggested_file_name = Utils.replace_suffix(source_filename, c_suffix)
+ if options.output_file:
+ out_path = os.path.join(cwd, options.output_file)
+ if os.path.isdir(out_path):
+ return os.path.join(out_path, os.path.basename(suggested_file_name))
+ else:
+ return out_path
+ else:
+ return suggested_file_name
+
+
+def create_default_resultobj(compilation_source, options):
+ result = CompilationResult()
+ result.main_source_file = compilation_source.source_desc.filename
+ result.compilation_source = compilation_source
+ source_desc = compilation_source.source_desc
+ result.c_file = get_output_filename(source_desc.filename,
+ compilation_source.cwd, options)
+ result.embedded_metadata = options.embedded_metadata
+ return result
+
+
+def run_pipeline(source, options, full_module_name=None, context=None):
+ from . import Pipeline
+
+ # ensure that the inputs are unicode (for Python 2)
+ if sys.version_info[0] == 2:
+ source = Utils.decode_filename(source)
+ if full_module_name:
+ full_module_name = Utils.decode_filename(full_module_name)
+
+ source_ext = os.path.splitext(source)[1]
+ options.configure_language_defaults(source_ext[1:]) # py/pyx
+ if context is None:
+ context = Context.from_options(options)
+
+ # Set up source object
+ cwd = os.getcwd()
+ abs_path = os.path.abspath(source)
+ full_module_name = full_module_name or context.extract_module_name(source, options)
+ full_module_name = EncodedString(full_module_name)
+
+ Utils.raise_error_if_module_name_forbidden(full_module_name)
+
+ if options.relative_path_in_code_position_comments:
+ rel_path = full_module_name.replace('.', os.sep) + source_ext
+ if not abs_path.endswith(rel_path):
+ rel_path = source # safety measure to prevent printing incorrect paths
+ else:
+ rel_path = abs_path
+ source_desc = FileSourceDescriptor(abs_path, rel_path)
+ source = CompilationSource(source_desc, full_module_name, cwd)
+
+ # Set up result object
+ result = create_default_resultobj(source, options)
+
+ if options.annotate is None:
+ # By default, decide based on whether an html file already exists.
+ html_filename = os.path.splitext(result.c_file)[0] + ".html"
+ if os.path.exists(html_filename):
+ with io.open(html_filename, "r", encoding="UTF-8") as html_file:
+ if u' State %d\n" % (key, state['number']))
+ for key in ('bol', 'eol', 'eof', 'else'):
+ state = special_to_state.get(key, None)
+ if state:
+ file.write(" %s --> State %d\n" % (key, state['number']))
+
+ @cython.locals(char_list=list, i=cython.Py_ssize_t, n=cython.Py_ssize_t, c1=cython.long, c2=cython.long)
+ def chars_to_ranges(self, char_list):
+ char_list.sort()
+ i = 0
+ n = len(char_list)
+ result = []
+ while i < n:
+ c1 = ord(char_list[i])
+ c2 = c1
+ i += 1
+ while i < n and ord(char_list[i]) == c2 + 1:
+ i += 1
+ c2 += 1
+ result.append((chr(c1), chr(c2)))
+ return tuple(result)
+
+ def ranges_to_string(self, range_list):
+ return ','.join(map(self.range_to_string, range_list))
+
+ def range_to_string(self, range_tuple):
+ (c1, c2) = range_tuple
+ if c1 == c2:
+ return repr(c1)
+ else:
+ return "%s..%s" % (repr(c1), repr(c2))
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Regexps.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Regexps.py
new file mode 100644
index 0000000000000000000000000000000000000000..99d8c994a55cbf8b0882e89f6d75cadc8dd82358
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Regexps.py
@@ -0,0 +1,540 @@
+"""
+Python Lexical Analyser
+
+Regular Expressions
+"""
+from __future__ import absolute_import
+
+import types
+
+from . import Errors
+
+maxint = 2**31-1 # sentinel value
+
+#
+# Constants
+#
+
+BOL = 'bol'
+EOL = 'eol'
+EOF = 'eof'
+
+nl_code = ord('\n')
+
+
+#
+# Helper functions
+#
+
+def chars_to_ranges(s):
+ """
+ Return a list of character codes consisting of pairs
+ [code1a, code1b, code2a, code2b,...] which cover all
+ the characters in |s|.
+ """
+ char_list = list(s)
+ char_list.sort()
+ i = 0
+ n = len(char_list)
+ result = []
+ while i < n:
+ code1 = ord(char_list[i])
+ code2 = code1 + 1
+ i += 1
+ while i < n and code2 >= ord(char_list[i]):
+ code2 += 1
+ i += 1
+ result.append(code1)
+ result.append(code2)
+ return result
+
+
+def uppercase_range(code1, code2):
+ """
+ If the range of characters from code1 to code2-1 includes any
+ lower case letters, return the corresponding upper case range.
+ """
+ code3 = max(code1, ord('a'))
+ code4 = min(code2, ord('z') + 1)
+ if code3 < code4:
+ d = ord('A') - ord('a')
+ return (code3 + d, code4 + d)
+ else:
+ return None
+
+
+def lowercase_range(code1, code2):
+ """
+ If the range of characters from code1 to code2-1 includes any
+ upper case letters, return the corresponding lower case range.
+ """
+ code3 = max(code1, ord('A'))
+ code4 = min(code2, ord('Z') + 1)
+ if code3 < code4:
+ d = ord('a') - ord('A')
+ return (code3 + d, code4 + d)
+ else:
+ return None
+
+
+def CodeRanges(code_list):
+ """
+ Given a list of codes as returned by chars_to_ranges, return
+ an RE which will match a character in any of the ranges.
+ """
+ re_list = [CodeRange(code_list[i], code_list[i + 1]) for i in range(0, len(code_list), 2)]
+ return Alt(*re_list)
+
+
+def CodeRange(code1, code2):
+ """
+ CodeRange(code1, code2) is an RE which matches any character
+ with a code |c| in the range |code1| <= |c| < |code2|.
+ """
+ if code1 <= nl_code < code2:
+ return Alt(RawCodeRange(code1, nl_code),
+ RawNewline,
+ RawCodeRange(nl_code + 1, code2))
+ else:
+ return RawCodeRange(code1, code2)
+
+
+#
+# Abstract classes
+#
+
+class RE(object):
+ """RE is the base class for regular expression constructors.
+ The following operators are defined on REs:
+
+ re1 + re2 is an RE which matches |re1| followed by |re2|
+ re1 | re2 is an RE which matches either |re1| or |re2|
+ """
+
+ nullable = 1 # True if this RE can match 0 input symbols
+ match_nl = 1 # True if this RE can match a string ending with '\n'
+ str = None # Set to a string to override the class's __str__ result
+
+ def build_machine(self, machine, initial_state, final_state,
+ match_bol, nocase):
+ """
+ This method should add states to |machine| to implement this
+ RE, starting at |initial_state| and ending at |final_state|.
+ If |match_bol| is true, the RE must be able to match at the
+ beginning of a line. If nocase is true, upper and lower case
+ letters should be treated as equivalent.
+ """
+ raise NotImplementedError("%s.build_machine not implemented" %
+ self.__class__.__name__)
+
+ def build_opt(self, m, initial_state, c):
+ """
+ Given a state |s| of machine |m|, return a new state
+ reachable from |s| on character |c| or epsilon.
+ """
+ s = m.new_state()
+ initial_state.link_to(s)
+ initial_state.add_transition(c, s)
+ return s
+
+ def __add__(self, other):
+ return Seq(self, other)
+
+ def __or__(self, other):
+ return Alt(self, other)
+
+ def __str__(self):
+ if self.str:
+ return self.str
+ else:
+ return self.calc_str()
+
+ def check_re(self, num, value):
+ if not isinstance(value, RE):
+ self.wrong_type(num, value, "Plex.RE instance")
+
+ def check_string(self, num, value):
+ if type(value) != type(''):
+ self.wrong_type(num, value, "string")
+
+ def check_char(self, num, value):
+ self.check_string(num, value)
+ if len(value) != 1:
+ raise Errors.PlexValueError("Invalid value for argument %d of Plex.%s."
+ "Expected a string of length 1, got: %s" % (
+ num, self.__class__.__name__, repr(value)))
+
+ def wrong_type(self, num, value, expected):
+ if type(value) == types.InstanceType:
+ got = "%s.%s instance" % (
+ value.__class__.__module__, value.__class__.__name__)
+ else:
+ got = type(value).__name__
+ raise Errors.PlexTypeError("Invalid type for argument %d of Plex.%s "
+ "(expected %s, got %s" % (
+ num, self.__class__.__name__, expected, got))
+
+#
+# Primitive RE constructors
+# -------------------------
+#
+# These are the basic REs from which all others are built.
+#
+
+
+def Char(c):
+ """
+ Char(c) is an RE which matches the character |c|.
+ """
+ if len(c) == 1:
+ result = CodeRange(ord(c), ord(c) + 1)
+ else:
+ result = SpecialSymbol(c)
+ result.str = "Char(%s)" % repr(c)
+ return result
+
+
+class RawCodeRange(RE):
+ """
+ RawCodeRange(code1, code2) is a low-level RE which matches any character
+ with a code |c| in the range |code1| <= |c| < |code2|, where the range
+ does not include newline. For internal use only.
+ """
+ nullable = 0
+ match_nl = 0
+ range = None # (code, code)
+ uppercase_range = None # (code, code) or None
+ lowercase_range = None # (code, code) or None
+
+ def __init__(self, code1, code2):
+ self.range = (code1, code2)
+ self.uppercase_range = uppercase_range(code1, code2)
+ self.lowercase_range = lowercase_range(code1, code2)
+
+ def build_machine(self, m, initial_state, final_state, match_bol, nocase):
+ if match_bol:
+ initial_state = self.build_opt(m, initial_state, BOL)
+ initial_state.add_transition(self.range, final_state)
+ if nocase:
+ if self.uppercase_range:
+ initial_state.add_transition(self.uppercase_range, final_state)
+ if self.lowercase_range:
+ initial_state.add_transition(self.lowercase_range, final_state)
+
+ def calc_str(self):
+ return "CodeRange(%d,%d)" % (self.code1, self.code2)
+
+
+class _RawNewline(RE):
+ """
+ RawNewline is a low-level RE which matches a newline character.
+ For internal use only.
+ """
+ nullable = 0
+ match_nl = 1
+
+ def build_machine(self, m, initial_state, final_state, match_bol, nocase):
+ if match_bol:
+ initial_state = self.build_opt(m, initial_state, BOL)
+ s = self.build_opt(m, initial_state, EOL)
+ s.add_transition((nl_code, nl_code + 1), final_state)
+
+
+RawNewline = _RawNewline()
+
+
+class SpecialSymbol(RE):
+ """
+ SpecialSymbol(sym) is an RE which matches the special input
+ symbol |sym|, which is one of BOL, EOL or EOF.
+ """
+ nullable = 0
+ match_nl = 0
+ sym = None
+
+ def __init__(self, sym):
+ self.sym = sym
+
+ def build_machine(self, m, initial_state, final_state, match_bol, nocase):
+ # Sequences 'bol bol' and 'bol eof' are impossible, so only need
+ # to allow for bol if sym is eol
+ if match_bol and self.sym == EOL:
+ initial_state = self.build_opt(m, initial_state, BOL)
+ initial_state.add_transition(self.sym, final_state)
+
+
+class Seq(RE):
+ """Seq(re1, re2, re3...) is an RE which matches |re1| followed by
+ |re2| followed by |re3|..."""
+
+ def __init__(self, *re_list):
+ nullable = 1
+ for i, re in enumerate(re_list):
+ self.check_re(i, re)
+ nullable = nullable and re.nullable
+ self.re_list = re_list
+ self.nullable = nullable
+ i = len(re_list)
+ match_nl = 0
+ while i:
+ i -= 1
+ re = re_list[i]
+ if re.match_nl:
+ match_nl = 1
+ break
+ if not re.nullable:
+ break
+ self.match_nl = match_nl
+
+ def build_machine(self, m, initial_state, final_state, match_bol, nocase):
+ re_list = self.re_list
+ if len(re_list) == 0:
+ initial_state.link_to(final_state)
+ else:
+ s1 = initial_state
+ n = len(re_list)
+ for i, re in enumerate(re_list):
+ if i < n - 1:
+ s2 = m.new_state()
+ else:
+ s2 = final_state
+ re.build_machine(m, s1, s2, match_bol, nocase)
+ s1 = s2
+ match_bol = re.match_nl or (match_bol and re.nullable)
+
+ def calc_str(self):
+ return "Seq(%s)" % ','.join(map(str, self.re_list))
+
+
+class Alt(RE):
+ """Alt(re1, re2, re3...) is an RE which matches either |re1| or
+ |re2| or |re3|..."""
+
+ def __init__(self, *re_list):
+ self.re_list = re_list
+ nullable = 0
+ match_nl = 0
+ nullable_res = []
+ non_nullable_res = []
+ i = 1
+ for re in re_list:
+ self.check_re(i, re)
+ if re.nullable:
+ nullable_res.append(re)
+ nullable = 1
+ else:
+ non_nullable_res.append(re)
+ if re.match_nl:
+ match_nl = 1
+ i += 1
+ self.nullable_res = nullable_res
+ self.non_nullable_res = non_nullable_res
+ self.nullable = nullable
+ self.match_nl = match_nl
+
+ def build_machine(self, m, initial_state, final_state, match_bol, nocase):
+ for re in self.nullable_res:
+ re.build_machine(m, initial_state, final_state, match_bol, nocase)
+ if self.non_nullable_res:
+ if match_bol:
+ initial_state = self.build_opt(m, initial_state, BOL)
+ for re in self.non_nullable_res:
+ re.build_machine(m, initial_state, final_state, 0, nocase)
+
+ def calc_str(self):
+ return "Alt(%s)" % ','.join(map(str, self.re_list))
+
+
+class Rep1(RE):
+ """Rep1(re) is an RE which matches one or more repetitions of |re|."""
+
+ def __init__(self, re):
+ self.check_re(1, re)
+ self.re = re
+ self.nullable = re.nullable
+ self.match_nl = re.match_nl
+
+ def build_machine(self, m, initial_state, final_state, match_bol, nocase):
+ s1 = m.new_state()
+ s2 = m.new_state()
+ initial_state.link_to(s1)
+ self.re.build_machine(m, s1, s2, match_bol or self.re.match_nl, nocase)
+ s2.link_to(s1)
+ s2.link_to(final_state)
+
+ def calc_str(self):
+ return "Rep1(%s)" % self.re
+
+
+class SwitchCase(RE):
+ """
+ SwitchCase(re, nocase) is an RE which matches the same strings as RE,
+ but treating upper and lower case letters according to |nocase|. If
+ |nocase| is true, case is ignored, otherwise it is not.
+ """
+ re = None
+ nocase = None
+
+ def __init__(self, re, nocase):
+ self.re = re
+ self.nocase = nocase
+ self.nullable = re.nullable
+ self.match_nl = re.match_nl
+
+ def build_machine(self, m, initial_state, final_state, match_bol, nocase):
+ self.re.build_machine(m, initial_state, final_state, match_bol,
+ self.nocase)
+
+ def calc_str(self):
+ if self.nocase:
+ name = "NoCase"
+ else:
+ name = "Case"
+ return "%s(%s)" % (name, self.re)
+
+
+#
+# Composite RE constructors
+# -------------------------
+#
+# These REs are defined in terms of the primitive REs.
+#
+
+Empty = Seq()
+Empty.__doc__ = \
+ """
+ Empty is an RE which matches the empty string.
+ """
+Empty.str = "Empty"
+
+
+def Str1(s):
+ """
+ Str1(s) is an RE which matches the literal string |s|.
+ """
+ result = Seq(*tuple(map(Char, s)))
+ result.str = "Str(%s)" % repr(s)
+ return result
+
+
+def Str(*strs):
+ """
+ Str(s) is an RE which matches the literal string |s|.
+ Str(s1, s2, s3, ...) is an RE which matches any of |s1| or |s2| or |s3|...
+ """
+ if len(strs) == 1:
+ return Str1(strs[0])
+ else:
+ result = Alt(*tuple(map(Str1, strs)))
+ result.str = "Str(%s)" % ','.join(map(repr, strs))
+ return result
+
+
+def Any(s):
+ """
+ Any(s) is an RE which matches any character in the string |s|.
+ """
+ result = CodeRanges(chars_to_ranges(s))
+ result.str = "Any(%s)" % repr(s)
+ return result
+
+
+def AnyBut(s):
+ """
+ AnyBut(s) is an RE which matches any character (including
+ newline) which is not in the string |s|.
+ """
+ ranges = chars_to_ranges(s)
+ ranges.insert(0, -maxint)
+ ranges.append(maxint)
+ result = CodeRanges(ranges)
+ result.str = "AnyBut(%s)" % repr(s)
+ return result
+
+
+AnyChar = AnyBut("")
+AnyChar.__doc__ = \
+ """
+ AnyChar is an RE which matches any single character (including a newline).
+ """
+AnyChar.str = "AnyChar"
+
+
+def Range(s1, s2=None):
+ """
+ Range(c1, c2) is an RE which matches any single character in the range
+ |c1| to |c2| inclusive.
+ Range(s) where |s| is a string of even length is an RE which matches
+ any single character in the ranges |s[0]| to |s[1]|, |s[2]| to |s[3]|,...
+ """
+ if s2:
+ result = CodeRange(ord(s1), ord(s2) + 1)
+ result.str = "Range(%s,%s)" % (s1, s2)
+ else:
+ ranges = []
+ for i in range(0, len(s1), 2):
+ ranges.append(CodeRange(ord(s1[i]), ord(s1[i + 1]) + 1))
+ result = Alt(*ranges)
+ result.str = "Range(%s)" % repr(s1)
+ return result
+
+
+def Opt(re):
+ """
+ Opt(re) is an RE which matches either |re| or the empty string.
+ """
+ result = Alt(re, Empty)
+ result.str = "Opt(%s)" % re
+ return result
+
+
+def Rep(re):
+ """
+ Rep(re) is an RE which matches zero or more repetitions of |re|.
+ """
+ result = Opt(Rep1(re))
+ result.str = "Rep(%s)" % re
+ return result
+
+
+def NoCase(re):
+ """
+ NoCase(re) is an RE which matches the same strings as RE, but treating
+ upper and lower case letters as equivalent.
+ """
+ return SwitchCase(re, nocase=1)
+
+
+def Case(re):
+ """
+ Case(re) is an RE which matches the same strings as RE, but treating
+ upper and lower case letters as distinct, i.e. it cancels the effect
+ of any enclosing NoCase().
+ """
+ return SwitchCase(re, nocase=0)
+
+
+#
+# RE Constants
+#
+
+Bol = Char(BOL)
+Bol.__doc__ = \
+ """
+ Bol is an RE which matches the beginning of a line.
+ """
+Bol.str = "Bol"
+
+Eol = Char(EOL)
+Eol.__doc__ = \
+ """
+ Eol is an RE which matches the end of a line.
+ """
+Eol.str = "Eol"
+
+Eof = Char(EOF)
+Eof.__doc__ = \
+ """
+ Eof is an RE which matches the end of the file.
+ """
+Eof.str = "Eof"
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..40b3b4e84e0b69d2dd1f015d495d5f152554dc6b
Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.cp39-win_amd64.pyd differ
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.pxd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.pxd
new file mode 100644
index 0000000000000000000000000000000000000000..664b1a6f0cefed86772cccb26f5ad7e0ef85b2d0
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.pxd
@@ -0,0 +1,48 @@
+from __future__ import absolute_import
+
+import cython
+
+from Cython.Plex.Actions cimport Action
+
+cdef class Scanner:
+
+ cdef public lexicon
+ cdef public stream
+ cdef public name
+ cdef public unicode buffer
+ cdef public Py_ssize_t buf_start_pos
+ cdef public Py_ssize_t next_pos
+ cdef public Py_ssize_t cur_pos
+ cdef public Py_ssize_t cur_line
+ cdef public Py_ssize_t cur_line_start
+ cdef public Py_ssize_t start_pos
+ cdef tuple current_scanner_position_tuple
+ cdef public tuple last_token_position_tuple
+ cdef public text
+ cdef public initial_state # int?
+ cdef public state_name
+ cdef public list queue
+ cdef public bint trace
+ cdef public cur_char
+ cdef public long input_state
+
+ cdef public level
+
+ @cython.locals(input_state=long)
+ cdef inline next_char(self)
+ @cython.locals(action=Action)
+ cpdef tuple read(self)
+ cdef inline unread(self, token, value, position)
+ cdef inline get_current_scan_pos(self)
+ cdef inline tuple scan_a_token(self)
+ ##cdef tuple position(self) # used frequently by Parsing.py
+
+ @cython.final
+ @cython.locals(cur_pos=Py_ssize_t, cur_line=Py_ssize_t, cur_line_start=Py_ssize_t,
+ input_state=long, next_pos=Py_ssize_t, state=dict,
+ buf_start_pos=Py_ssize_t, buf_len=Py_ssize_t, buf_index=Py_ssize_t,
+ trace=bint, discard=Py_ssize_t, data=unicode, buffer=unicode)
+ cdef run_machine_inlined(self)
+
+ cdef inline begin(self, state)
+ cdef inline produce(self, value, text = *)
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad85f4465ee6b5e6f0d23fc077c6d7a0eef712f4
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Scanners.py
@@ -0,0 +1,359 @@
+# cython: language_level=3str
+# cython: auto_pickle=False
+"""
+Python Lexical Analyser
+
+Scanning an input stream
+"""
+from __future__ import absolute_import
+
+import cython
+
+cython.declare(BOL=object, EOL=object, EOF=object, NOT_FOUND=object) # noqa:E402
+
+from . import Errors
+from .Regexps import BOL, EOL, EOF
+
+NOT_FOUND = object()
+
+
+class Scanner(object):
+ """
+ A Scanner is used to read tokens from a stream of characters
+ using the token set specified by a Plex.Lexicon.
+
+ Constructor:
+
+ Scanner(lexicon, stream, name = '')
+
+ See the docstring of the __init__ method for details.
+
+ Methods:
+
+ See the docstrings of the individual methods for more
+ information.
+
+ read() --> (value, text)
+ Reads the next lexical token from the stream.
+
+ position() --> (name, line, col)
+ Returns the position of the last token read using the
+ read() method.
+
+ begin(state_name)
+ Causes scanner to change state.
+
+ produce(value [, text])
+ Causes return of a token value to the caller of the
+ Scanner.
+
+ """
+
+ # lexicon = None # Lexicon
+ # stream = None # file-like object
+ # name = ''
+ # buffer = ''
+ #
+ # These positions are used by the scanner to track its internal state:
+ # buf_start_pos = 0 # position in input of start of buffer
+ # next_pos = 0 # position in input of next char to read
+ # cur_pos = 0 # position in input of current char
+ # cur_line = 1 # line number of current char
+ # cur_line_start = 0 # position in input of start of current line
+ # start_pos = 0 # position in input of start of token
+ # current_scanner_position_tuple = ("", 0, 0)
+ # tuple of filename, line number and position in line, really mainly for error reporting
+ #
+ # These positions are used to track what was read from the queue
+ # (which may differ from the internal state when tokens are replaced onto the queue)
+ # last_token_position_tuple = ("", 0, 0) # tuple of filename, line number and position in line
+
+ # text = None # text of last token read
+ # initial_state = None # Node
+ # state_name = '' # Name of initial state
+ # queue = None # list of tokens and positions to be returned
+ # trace = 0
+
+ def __init__(self, lexicon, stream, name='', initial_pos=None):
+ """
+ Scanner(lexicon, stream, name = '')
+
+ |lexicon| is a Plex.Lexicon instance specifying the lexical tokens
+ to be recognised.
+
+ |stream| can be a file object or anything which implements a
+ compatible read() method.
+
+ |name| is optional, and may be the name of the file being
+ scanned or any other identifying string.
+ """
+ self.trace = 0
+
+ self.buffer = u''
+ self.buf_start_pos = 0
+ self.next_pos = 0
+ self.cur_pos = 0
+ self.cur_line = 1
+ self.start_pos = 0
+ self.current_scanner_position_tuple = ("", 0, 0)
+ self.last_token_position_tuple = ("", 0, 0)
+ self.text = None
+ self.state_name = None
+
+ self.lexicon = lexicon
+ self.stream = stream
+ self.name = name
+ self.queue = []
+ self.initial_state = None
+ self.begin('')
+ self.next_pos = 0
+ self.cur_pos = 0
+ self.cur_line_start = 0
+ self.cur_char = BOL
+ self.input_state = 1
+ if initial_pos is not None:
+ self.cur_line, self.cur_line_start = initial_pos[1], -initial_pos[2]
+
+ def read(self):
+ """
+ Read the next lexical token from the stream and return a
+ tuple (value, text), where |value| is the value associated with
+ the token as specified by the Lexicon, and |text| is the actual
+ string read from the stream. Returns (None, '') on end of file.
+ """
+ queue = self.queue
+ while not queue:
+ self.text, action = self.scan_a_token()
+ if action is None:
+ self.produce(None)
+ self.eof()
+ else:
+ value = action.perform(self, self.text)
+ if value is not None:
+ self.produce(value)
+ result, self.last_token_position_tuple = queue[0]
+ del queue[0]
+ return result
+
+ def unread(self, token, value, position):
+ self.queue.insert(0, ((token, value), position))
+
+ def get_current_scan_pos(self):
+ # distinct from the position of the last token due to the queue
+ return self.current_scanner_position_tuple
+
+ def scan_a_token(self):
+ """
+ Read the next input sequence recognised by the machine
+ and return (text, action). Returns ('', None) on end of
+ file.
+ """
+ self.start_pos = self.cur_pos
+ self.current_scanner_position_tuple = (
+ self.name, self.cur_line, self.cur_pos - self.cur_line_start
+ )
+ action = self.run_machine_inlined()
+ if action is not None:
+ if self.trace:
+ print("Scanner: read: Performing %s %d:%d" % (
+ action, self.start_pos, self.cur_pos))
+ text = self.buffer[
+ self.start_pos - self.buf_start_pos:
+ self.cur_pos - self.buf_start_pos]
+ return (text, action)
+ else:
+ if self.cur_pos == self.start_pos:
+ if self.cur_char is EOL:
+ self.next_char()
+ if self.cur_char is None or self.cur_char is EOF:
+ return (u'', None)
+ raise Errors.UnrecognizedInput(self, self.state_name)
+
+ def run_machine_inlined(self):
+ """
+ Inlined version of run_machine for speed.
+ """
+ state = self.initial_state
+ cur_pos = self.cur_pos
+ cur_line = self.cur_line
+ cur_line_start = self.cur_line_start
+ cur_char = self.cur_char
+ input_state = self.input_state
+ next_pos = self.next_pos
+ buffer = self.buffer
+ buf_start_pos = self.buf_start_pos
+ buf_len = len(buffer)
+ b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \
+ None, 0, 0, 0, u'', 0, 0
+
+ trace = self.trace
+ while 1:
+ if trace:
+ print("State %d, %d/%d:%s -->" % (
+ state['number'], input_state, cur_pos, repr(cur_char)))
+
+ # Begin inlined self.save_for_backup()
+ action = state['action']
+ if action is not None:
+ b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \
+ action, cur_pos, cur_line, cur_line_start, cur_char, input_state, next_pos
+ # End inlined self.save_for_backup()
+
+ c = cur_char
+ new_state = state.get(c, NOT_FOUND)
+ if new_state is NOT_FOUND:
+ new_state = c and state.get('else')
+
+ if new_state:
+ if trace:
+ print("State %d" % new_state['number'])
+ state = new_state
+ # Begin inlined: self.next_char()
+ if input_state == 1:
+ cur_pos = next_pos
+ # Begin inlined: c = self.read_char()
+ buf_index = next_pos - buf_start_pos
+ if buf_index < buf_len:
+ c = buffer[buf_index]
+ next_pos += 1
+ else:
+ discard = self.start_pos - buf_start_pos
+ data = self.stream.read(0x1000)
+ buffer = self.buffer[discard:] + data
+ self.buffer = buffer
+ buf_start_pos += discard
+ self.buf_start_pos = buf_start_pos
+ buf_len = len(buffer)
+ buf_index -= discard
+ if data:
+ c = buffer[buf_index]
+ next_pos += 1
+ else:
+ c = u''
+ # End inlined: c = self.read_char()
+ if c == u'\n':
+ cur_char = EOL
+ input_state = 2
+ elif not c:
+ cur_char = EOL
+ input_state = 4
+ else:
+ cur_char = c
+ elif input_state == 2: # after EoL (1) -> BoL (3)
+ cur_char = u'\n'
+ input_state = 3
+ elif input_state == 3: # start new code line
+ cur_line += 1
+ cur_line_start = cur_pos = next_pos
+ cur_char = BOL
+ input_state = 1
+ elif input_state == 4: # after final line (1) -> EoF (5)
+ cur_char = EOF
+ input_state = 5
+ else: # input_state == 5 (EoF)
+ cur_char = u''
+ # End inlined self.next_char()
+ else: # not new_state
+ if trace:
+ print("blocked")
+ # Begin inlined: action = self.back_up()
+ if b_action is not None:
+ (action, cur_pos, cur_line, cur_line_start,
+ cur_char, input_state, next_pos) = \
+ (b_action, b_cur_pos, b_cur_line, b_cur_line_start,
+ b_cur_char, b_input_state, b_next_pos)
+ else:
+ action = None
+ break # while 1
+ # End inlined: action = self.back_up()
+
+ self.cur_pos = cur_pos
+ self.cur_line = cur_line
+ self.cur_line_start = cur_line_start
+ self.cur_char = cur_char
+ self.input_state = input_state
+ self.next_pos = next_pos
+ if trace:
+ if action is not None:
+ print("Doing %s" % action)
+ return action
+
+ def next_char(self):
+ input_state = self.input_state
+ if self.trace:
+ print("Scanner: next: %s [%d] %d" % (" " * 20, input_state, self.cur_pos))
+ if input_state == 1:
+ self.cur_pos = self.next_pos
+ c = self.read_char()
+ if c == u'\n':
+ self.cur_char = EOL
+ self.input_state = 2
+ elif not c:
+ self.cur_char = EOL
+ self.input_state = 4
+ else:
+ self.cur_char = c
+ elif input_state == 2:
+ self.cur_char = u'\n'
+ self.input_state = 3
+ elif input_state == 3:
+ self.cur_line += 1
+ self.cur_line_start = self.cur_pos = self.next_pos
+ self.cur_char = BOL
+ self.input_state = 1
+ elif input_state == 4:
+ self.cur_char = EOF
+ self.input_state = 5
+ else: # input_state = 5
+ self.cur_char = u''
+ if self.trace:
+ print("--> [%d] %d %r" % (input_state, self.cur_pos, self.cur_char))
+
+ def position(self):
+ """
+ Return a tuple (name, line, col) representing the location of
+ the last token read using the read() method. |name| is the
+ name that was provided to the Scanner constructor; |line|
+ is the line number in the stream (1-based); |col| is the
+ position within the line of the first character of the token
+ (0-based).
+ """
+ return self.last_token_position_tuple
+
+ def get_position(self):
+ """
+ Python accessible wrapper around position(), only for error reporting.
+ """
+ return self.position()
+
+ def begin(self, state_name):
+ """Set the current state of the scanner to the named state."""
+ self.initial_state = (
+ self.lexicon.get_initial_state(state_name))
+ self.state_name = state_name
+
+ def produce(self, value, text=None):
+ """
+ Called from an action procedure, causes |value| to be returned
+ as the token value from read(). If |text| is supplied, it is
+ returned in place of the scanned text.
+
+ produce() can be called more than once during a single call to an action
+ procedure, in which case the tokens are queued up and returned one
+ at a time by subsequent calls to read(), until the queue is empty,
+ whereupon scanning resumes.
+ """
+ if text is None:
+ text = self.text
+ self.queue.append(((value, text), self.current_scanner_position_tuple))
+
+ def eof(self):
+ """
+ Override this method if you want something to be done at
+ end of file.
+ """
+ pass
+
+ @property
+ def start_line(self):
+ return self.last_token_position_tuple[1]
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..90baab58f487161f688de72da07760370aef3217
Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.cp39-win_amd64.pyd differ
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.pxd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.pxd
new file mode 100644
index 0000000000000000000000000000000000000000..53dd4d58ea9e89b19c02dae4f7ece76a8083156b
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.pxd
@@ -0,0 +1,22 @@
+cimport cython
+
+cdef long maxint
+
+@cython.final
+cdef class TransitionMap:
+ cdef list map
+ cdef dict special
+
+ @cython.locals(i=cython.Py_ssize_t, j=cython.Py_ssize_t)
+ cpdef add(self, event, new_state)
+
+ @cython.locals(i=cython.Py_ssize_t, j=cython.Py_ssize_t)
+ cpdef add_set(self, event, new_set)
+
+ @cython.locals(i=cython.Py_ssize_t, n=cython.Py_ssize_t, else_set=cython.bint)
+ cpdef iteritems(self)
+
+ @cython.locals(map=list, lo=cython.Py_ssize_t, mid=cython.Py_ssize_t, hi=cython.Py_ssize_t)
+ cdef split(self, long code)
+
+ cdef get_special(self, event)
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.py
new file mode 100644
index 0000000000000000000000000000000000000000..f58dd538e26cea710a86c7ab1983650d0b4d15e5
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/Transitions.py
@@ -0,0 +1,234 @@
+# cython: auto_pickle=False
+"""
+Plex - Transition Maps
+
+This version represents state sets directly as dicts for speed.
+"""
+
+maxint = 2**31-1 # sentinel value
+
+
+class TransitionMap(object):
+ """
+ A TransitionMap maps an input event to a set of states.
+ An input event is one of: a range of character codes,
+ the empty string (representing an epsilon move), or one
+ of the special symbols BOL, EOL, EOF.
+
+ For characters, this implementation compactly represents
+ the map by means of a list:
+
+ [code_0, states_0, code_1, states_1, code_2, states_2,
+ ..., code_n-1, states_n-1, code_n]
+
+ where |code_i| is a character code, and |states_i| is a
+ set of states corresponding to characters with codes |c|
+ in the range |code_i| <= |c| <= |code_i+1|.
+
+ The following invariants hold:
+ n >= 1
+ code_0 == -maxint
+ code_n == maxint
+ code_i < code_i+1 for i in 0..n-1
+ states_0 == states_n-1
+
+ Mappings for the special events '', BOL, EOL, EOF are
+ kept separately in a dictionary.
+ """
+
+ def __init__(self, map=None, special=None):
+ if not map:
+ map = [-maxint, {}, maxint]
+ if not special:
+ special = {}
+ self.map = map # The list of codes and states
+ self.special = special # Mapping for special events
+
+ def add(self, event, new_state):
+ """
+ Add transition to |new_state| on |event|.
+ """
+ if type(event) is tuple:
+ code0, code1 = event
+ i = self.split(code0)
+ j = self.split(code1)
+ map = self.map
+ while i < j:
+ map[i + 1][new_state] = 1
+ i += 2
+ else:
+ self.get_special(event)[new_state] = 1
+
+ def add_set(self, event, new_set):
+ """
+ Add transitions to the states in |new_set| on |event|.
+ """
+ if type(event) is tuple:
+ code0, code1 = event
+ i = self.split(code0)
+ j = self.split(code1)
+ map = self.map
+ while i < j:
+ map[i + 1].update(new_set)
+ i += 2
+ else:
+ self.get_special(event).update(new_set)
+
+ def get_epsilon(self):
+ """
+ Return the mapping for epsilon, or None.
+ """
+ return self.special.get('')
+
+ def iteritems(self):
+ """
+ Return the mapping as an iterable of ((code1, code2), state_set) and
+ (special_event, state_set) pairs.
+ """
+ result = []
+ map = self.map
+ else_set = map[1]
+ i = 0
+ n = len(map) - 1
+ code0 = map[0]
+ while i < n:
+ set = map[i + 1]
+ code1 = map[i + 2]
+ if set or else_set:
+ result.append(((code0, code1), set))
+ code0 = code1
+ i += 2
+ for event, set in self.special.items():
+ if set:
+ result.append((event, set))
+ return iter(result)
+
+ items = iteritems
+
+ # ------------------- Private methods --------------------
+
+ def split(self, code):
+ """
+ Search the list for the position of the split point for |code|,
+ inserting a new split point if necessary. Returns index |i| such
+ that |code| == |map[i]|.
+ """
+ # We use a funky variation on binary search.
+ map = self.map
+ hi = len(map) - 1
+ # Special case: code == map[-1]
+ if code == maxint:
+ return hi
+
+ # General case
+ lo = 0
+ # loop invariant: map[lo] <= code < map[hi] and hi - lo >= 2
+ while hi - lo >= 4:
+ # Find midpoint truncated to even index
+ mid = ((lo + hi) // 2) & ~1
+ if code < map[mid]:
+ hi = mid
+ else:
+ lo = mid
+ # map[lo] <= code < map[hi] and hi - lo == 2
+ if map[lo] == code:
+ return lo
+ else:
+ map[hi:hi] = [code, map[hi - 1].copy()]
+ return hi
+
+ def get_special(self, event):
+ """
+ Get state set for special event, adding a new entry if necessary.
+ """
+ special = self.special
+ set = special.get(event, None)
+ if not set:
+ set = {}
+ special[event] = set
+ return set
+
+ # --------------------- Conversion methods -----------------------
+
+ def __str__(self):
+ map_strs = []
+ map = self.map
+ n = len(map)
+ i = 0
+ while i < n:
+ code = map[i]
+ if code == -maxint:
+ code_str = "-inf"
+ elif code == maxint:
+ code_str = "inf"
+ else:
+ code_str = str(code)
+ map_strs.append(code_str)
+ i += 1
+ if i < n:
+ map_strs.append(state_set_str(map[i]))
+ i += 1
+ special_strs = {}
+ for event, set in self.special.items():
+ special_strs[event] = state_set_str(set)
+ return "[%s]+%s" % (
+ ','.join(map_strs),
+ special_strs
+ )
+
+ # --------------------- Debugging methods -----------------------
+
+ def check(self):
+ """Check data structure integrity."""
+ if not self.map[-3] < self.map[-1]:
+ print(self)
+ assert 0
+
+ def dump(self, file):
+ map = self.map
+ i = 0
+ n = len(map) - 1
+ while i < n:
+ self.dump_range(map[i], map[i + 2], map[i + 1], file)
+ i += 2
+ for event, set in self.special.items():
+ if set:
+ if not event:
+ event = 'empty'
+ self.dump_trans(event, set, file)
+
+ def dump_range(self, code0, code1, set, file):
+ if set:
+ if code0 == -maxint:
+ if code1 == maxint:
+ k = "any"
+ else:
+ k = "< %s" % self.dump_char(code1)
+ elif code1 == maxint:
+ k = "> %s" % self.dump_char(code0 - 1)
+ elif code0 == code1 - 1:
+ k = self.dump_char(code0)
+ else:
+ k = "%s..%s" % (self.dump_char(code0),
+ self.dump_char(code1 - 1))
+ self.dump_trans(k, set, file)
+
+ def dump_char(self, code):
+ if 0 <= code <= 255:
+ return repr(chr(code))
+ else:
+ return "chr(%d)" % code
+
+ def dump_trans(self, key, set, file):
+ file.write(" %s --> %s\n" % (key, self.dump_set(set)))
+
+ def dump_set(self, set):
+ return state_set_str(set)
+
+
+#
+# State set manipulation functions
+#
+
+def state_set_str(set):
+ return "[%s]" % ','.join(["S%d" % state.number for state in set])
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..83bb9239abec1bc1da2f9aa85a266d73e264f6a1
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Plex/__init__.py
@@ -0,0 +1,35 @@
+"""
+Python Lexical Analyser
+
+The Plex module provides lexical analysers with similar capabilities
+to GNU Flex. The following classes and functions are exported;
+see the attached docstrings for more information.
+
+ Scanner For scanning a character stream under the
+ direction of a Lexicon.
+
+ Lexicon For constructing a lexical definition
+ to be used by a Scanner.
+
+ Str, Any, AnyBut, AnyChar, Seq, Alt, Opt, Rep, Rep1,
+ Bol, Eol, Eof, Empty
+
+ Regular expression constructors, for building pattern
+ definitions for a Lexicon.
+
+ State For defining scanner states when creating a
+ Lexicon.
+
+ TEXT, IGNORE, Begin
+
+ Actions for associating with patterns when
+ creating a Lexicon.
+"""
+# flake8: noqa:F401
+from __future__ import absolute_import
+
+from .Actions import TEXT, IGNORE, Begin, Method
+from .Lexicons import Lexicon, State
+from .Regexps import RE, Seq, Alt, Rep1, Empty, Str, Any, AnyBut, AnyChar, Range
+from .Regexps import Opt, Rep, Bol, Eol, Eof, Case, NoCase
+from .Scanners import Scanner
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/__init__.py b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa81adaff68e06d8e915a6afa375f62f7e5a8fad
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/__init__.py
@@ -0,0 +1 @@
+# empty file
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/refnanny.cp39-win_amd64.pyd b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/refnanny.cp39-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..252498e90d52f3103e6db62ea70058c850d0ae80
Binary files /dev/null and b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/refnanny.cp39-win_amd64.pyd differ
diff --git a/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/refnanny.pyx b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/refnanny.pyx
new file mode 100644
index 0000000000000000000000000000000000000000..bc72f62c6af52a55913af48edd7e13a37ded3f15
--- /dev/null
+++ b/.eggs/Cython-3.0.8-py3.9-win-amd64.egg/Cython/Runtime/refnanny.pyx
@@ -0,0 +1,192 @@
+# cython: language_level=3, auto_pickle=False
+
+from cpython.ref cimport PyObject, Py_INCREF, Py_CLEAR, Py_XDECREF, Py_XINCREF
+from cpython.exc cimport PyErr_Fetch, PyErr_Restore
+from cpython.pystate cimport PyThreadState_Get
+
+cimport cython
+
+loglevel = 0
+reflog = []
+
+cdef log(level, action, obj, lineno):
+ if reflog is None:
+ # can happen during finalisation
+ return
+ if loglevel >= level:
+ reflog.append((lineno, action, id(obj)))
+
+LOG_NONE, LOG_ALL = range(2)
+
+@cython.final
+cdef class Context(object):
+ cdef readonly object name, filename
+ cdef readonly dict refs
+ cdef readonly list errors
+ cdef readonly Py_ssize_t start
+
+ def __cinit__(self, name, line=0, filename=None):
+ self.name = name
+ self.start = line
+ self.filename = filename
+ self.refs = {} # id -> (count, [lineno])
+ self.errors = []
+
+ cdef regref(self, obj, Py_ssize_t lineno, bint is_null):
+ log(LOG_ALL, u'regref', u"" if is_null else obj, lineno)
+ if is_null:
+ self.errors.append(f"NULL argument on line {lineno}")
+ return
+ id_ = id(obj)
+ count, linenumbers = self.refs.get(id_, (0, []))
+ self.refs[id_] = (count + 1, linenumbers)
+ linenumbers.append(lineno)
+
+ cdef bint delref(self, obj, Py_ssize_t lineno, bint is_null) except -1:
+ # returns whether it is ok to do the decref operation
+ log(LOG_ALL, u'delref', u"" if is_null else obj, lineno)
+ if is_null:
+ self.errors.append(f"NULL argument on line {lineno}")
+ return False
+ id_ = id(obj)
+ count, linenumbers = self.refs.get(id_, (0, []))
+ if count == 0:
+ self.errors.append(f"Too many decrefs on line {lineno}, reference acquired on lines {linenumbers!r}")
+ return False
+ if count == 1:
+ del self.refs[id_]
+ else:
+ self.refs[id_] = (count - 1, linenumbers)
+ return True
+
+ cdef end(self):
+ if self.refs:
+ msg = u"References leaked:"
+ for count, linenos in self.refs.itervalues():
+ msg += f"\n ({count}) acquired on lines: {u', '.join([f'{x}' for x in linenos])}"
+ self.errors.append(msg)
+ return u"\n".join([f'REFNANNY: {error}' for error in self.errors]) if self.errors else None
+
+
+cdef void report_unraisable(filename, Py_ssize_t lineno, object e=None):
+ try:
+ if e is None:
+ import sys
+ e = sys.exc_info()[1]
+ print(f"refnanny raised an exception from {filename}:{lineno}: {e}")
+ finally:
+ return # We absolutely cannot exit with an exception
+
+
+# All Python operations must happen after any existing
+# exception has been fetched, in case we are called from
+# exception-handling code.
+
+cdef PyObject* SetupContext(char* funcname, Py_ssize_t lineno, char* filename) except NULL:
+ if Context is None:
+ # Context may be None during finalize phase.
+ # In that case, we don't want to be doing anything fancy
+ # like caching and resetting exceptions.
+ return NULL
+ cdef (PyObject*) type = NULL, value = NULL, tb = NULL, result = NULL
+ PyThreadState_Get() # Check that we hold the GIL
+ PyErr_Fetch(&type, &value, &tb)
+ try:
+ ctx = Context(funcname, lineno, filename)
+ Py_INCREF(ctx)
+ result = ctx
+ except Exception, e:
+ report_unraisable(filename, lineno, e)
+ PyErr_Restore(type, value, tb)
+ return result
+
+cdef void GOTREF(PyObject* ctx, PyObject* p_obj, Py_ssize_t lineno):
+ if ctx == NULL: return
+ cdef (PyObject*) type = NULL, value = NULL, tb = NULL
+ PyErr_Fetch(&type, &value, &tb)
+ try:
+ (ctx).regref(
+