file_path
stringlengths
21
202
content
stringlengths
19
1.02M
size
int64
19
1.02M
lang
stringclasses
8 values
avg_line_length
float64
5.88
100
max_line_length
int64
12
993
alphanum_fraction
float64
0.27
0.93
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_common.py
import os import sys import pytest import numpy as np from . import util class TestCommonBlock(util.F2PyTest): sources = [util.getpath("tests", "src", "common", "block.f")] @pytest.mark.skipif(sys.platform == "win32", reason="Fails with MinGW64 Gfortran (Issue #9673)") def test_common_block(self): self.module.initcb() assert self.module.block.long_bn == np.array(1.0, dtype=np.float64) assert self.module.block.string_bn == np.array("2", dtype="|S1") assert self.module.block.ok == np.array(3, dtype=np.int32)
584
Python
29.789472
75
0.640411
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_string.py
import os import pytest import textwrap import numpy as np from . import util class TestString(util.F2PyTest): sources = [util.getpath("tests", "src", "string", "char.f90")] @pytest.mark.slow def test_char(self): strings = np.array(["ab", "cd", "ef"], dtype="c").T inp, out = self.module.char_test.change_strings( strings, strings.shape[1]) assert inp == pytest.approx(strings) expected = strings.copy() expected[1, :] = "AAA" assert out == pytest.approx(expected) class TestDocStringArguments(util.F2PyTest): sources = [util.getpath("tests", "src", "string", "string.f")] def test_example(self): a = np.array(b"123\0\0") b = np.array(b"123\0\0") c = np.array(b"123") d = np.array(b"123") self.module.foo(a, b, c, d) assert a.tobytes() == b"123\0\0" assert b.tobytes() == b"B23\0\0" assert c.tobytes() == b"123" assert d.tobytes() == b"D23" class TestFixedString(util.F2PyTest): sources = [util.getpath("tests", "src", "string", "fixed_string.f90")] @staticmethod def _sint(s, start=0, end=None): """Return the content of a string buffer as integer value. For example: _sint('1234') -> 4321 _sint('123A') -> 17321 """ if isinstance(s, np.ndarray): s = s.tobytes() elif isinstance(s, str): s = s.encode() assert isinstance(s, bytes) if end is None: end = len(s) i = 0 for j in range(start, min(end, len(s))): i += s[j] * 10**j return i def _get_input(self, intent="in"): if intent in ["in"]: yield "" yield "1" yield "1234" yield "12345" yield b"" yield b"\0" yield b"1" yield b"\01" yield b"1\0" yield b"1234" yield b"12345" yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0') yield np.array(b"") # array(b'', dtype='|S1') yield np.array(b"\0") yield np.array(b"1") yield np.array(b"1\0") yield np.array(b"\01") yield np.array(b"1234") yield np.array(b"123\0") yield np.array(b"12345") def test_intent_in(self): for s in self._get_input(): r = self.module.test_in_bytes4(s) # also checks that s is not changed inplace expected = self._sint(s, end=4) assert r == expected, s def test_intent_inout(self): for s in self._get_input(intent="inout"): rest = self._sint(s, start=4) r = self.module.test_inout_bytes4(s) expected = self._sint(s, end=4) assert r == expected # check that the rest of input string is preserved assert rest == self._sint(s, start=4)
2,962
Python
28.336633
78
0.516205
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/util.py
""" Utility functions for - building and importing modules on test time, using a temporary location - detecting if compilers are present - determining paths to tests """ import os import sys import subprocess import tempfile import shutil import atexit import textwrap import re import pytest import contextlib import numpy from pathlib import Path from numpy.compat import asbytes, asstr from numpy.testing import temppath from importlib import import_module # # Maintaining a temporary module directory # _module_dir = None _module_num = 5403 def _cleanup(): global _module_dir if _module_dir is not None: try: sys.path.remove(_module_dir) except ValueError: pass try: shutil.rmtree(_module_dir) except OSError: pass _module_dir = None def get_module_dir(): global _module_dir if _module_dir is None: _module_dir = tempfile.mkdtemp() atexit.register(_cleanup) if _module_dir not in sys.path: sys.path.insert(0, _module_dir) return _module_dir def get_temp_module_name(): # Assume single-threaded, and the module dir usable only by this thread global _module_num d = get_module_dir() name = "_test_ext_module_%d" % _module_num _module_num += 1 if name in sys.modules: # this should not be possible, but check anyway raise RuntimeError("Temporary module name already in use.") return name def _memoize(func): memo = {} def wrapper(*a, **kw): key = repr((a, kw)) if key not in memo: try: memo[key] = func(*a, **kw) except Exception as e: memo[key] = e raise ret = memo[key] if isinstance(ret, Exception): raise ret return ret wrapper.__name__ = func.__name__ return wrapper # # Building modules # @_memoize def build_module(source_files, options=[], skip=[], only=[], module_name=None): """ Compile and import a f2py module, built from the given files. """ code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()" d = get_module_dir() # Copy files dst_sources = [] f2py_sources = [] for fn in source_files: if not os.path.isfile(fn): raise RuntimeError("%s is not a file" % fn) dst = os.path.join(d, os.path.basename(fn)) shutil.copyfile(fn, dst) dst_sources.append(dst) base, ext = os.path.splitext(dst) if ext in (".f90", ".f", ".c", ".pyf"): f2py_sources.append(dst) # Prepare options if module_name is None: module_name = get_temp_module_name() f2py_opts = ["-c", "-m", module_name] + options + f2py_sources if skip: f2py_opts += ["skip:"] + skip if only: f2py_opts += ["only:"] + only # Build cwd = os.getcwd() try: os.chdir(d) cmd = [sys.executable, "-c", code] + f2py_opts p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, err = p.communicate() if p.returncode != 0: raise RuntimeError("Running f2py failed: %s\n%s" % (cmd[4:], asstr(out))) finally: os.chdir(cwd) # Partial cleanup for fn in dst_sources: os.unlink(fn) # Import return import_module(module_name) @_memoize def build_code(source_code, options=[], skip=[], only=[], suffix=None, module_name=None): """ Compile and import Fortran code using f2py. """ if suffix is None: suffix = ".f" with temppath(suffix=suffix) as path: with open(path, "w") as f: f.write(source_code) return build_module([path], options=options, skip=skip, only=only, module_name=module_name) # # Check if compilers are available at all... # _compiler_status = None def _get_compiler_status(): global _compiler_status if _compiler_status is not None: return _compiler_status _compiler_status = (False, False, False) # XXX: this is really ugly. But I don't know how to invoke Distutils # in a safer way... code = textwrap.dedent(f"""\ import os import sys sys.path = {repr(sys.path)} def configuration(parent_name='',top_path=None): global config from numpy.distutils.misc_util import Configuration config = Configuration('', parent_name, top_path) return config from numpy.distutils.core import setup setup(configuration=configuration) config_cmd = config.get_config_cmd() have_c = config_cmd.try_compile('void foo() {{}}') print('COMPILERS:%%d,%%d,%%d' %% (have_c, config.have_f77c(), config.have_f90c())) sys.exit(99) """) code = code % dict(syspath=repr(sys.path)) tmpdir = tempfile.mkdtemp() try: script = os.path.join(tmpdir, "setup.py") with open(script, "w") as f: f.write(code) cmd = [sys.executable, "setup.py", "config"] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmpdir) out, err = p.communicate() finally: shutil.rmtree(tmpdir) m = re.search(br"COMPILERS:(\d+),(\d+),(\d+)", out) if m: _compiler_status = ( bool(int(m.group(1))), bool(int(m.group(2))), bool(int(m.group(3))), ) # Finished return _compiler_status def has_c_compiler(): return _get_compiler_status()[0] def has_f77_compiler(): return _get_compiler_status()[1] def has_f90_compiler(): return _get_compiler_status()[2] # # Building with distutils # @_memoize def build_module_distutils(source_files, config_code, module_name, **kw): """ Build a module via distutils and import it. """ d = get_module_dir() # Copy files dst_sources = [] for fn in source_files: if not os.path.isfile(fn): raise RuntimeError("%s is not a file" % fn) dst = os.path.join(d, os.path.basename(fn)) shutil.copyfile(fn, dst) dst_sources.append(dst) # Build script config_code = textwrap.dedent(config_code).replace("\n", "\n ") code = fr""" import os import sys sys.path = {repr(sys.path)} def configuration(parent_name='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('', parent_name, top_path) {config_code} return config if __name__ == "__main__": from numpy.distutils.core import setup setup(configuration=configuration) """ script = os.path.join(d, get_temp_module_name() + ".py") dst_sources.append(script) with open(script, "wb") as f: f.write(asbytes(code)) # Build cwd = os.getcwd() try: os.chdir(d) cmd = [sys.executable, script, "build_ext", "-i"] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, err = p.communicate() if p.returncode != 0: raise RuntimeError("Running distutils build failed: %s\n%s" % (cmd[4:], asstr(out))) finally: os.chdir(cwd) # Partial cleanup for fn in dst_sources: os.unlink(fn) # Import __import__(module_name) return sys.modules[module_name] # # Unittest convenience # class F2PyTest: code = None sources = None options = [] skip = [] only = [] suffix = ".f" module = None module_name = None def setup_method(self): if sys.platform == "win32": pytest.skip("Fails with MinGW64 Gfortran (Issue #9673)") if self.module is not None: return # Check compiler availability first if not has_c_compiler(): pytest.skip("No C compiler available") codes = [] if self.sources: codes.extend(self.sources) if self.code is not None: codes.append(self.suffix) needs_f77 = False needs_f90 = False needs_pyf = False for fn in codes: if str(fn).endswith(".f"): needs_f77 = True elif str(fn).endswith(".f90"): needs_f90 = True elif str(fn).endswith(".pyf"): needs_pyf = True if needs_f77 and not has_f77_compiler(): pytest.skip("No Fortran 77 compiler available") if needs_f90 and not has_f90_compiler(): pytest.skip("No Fortran 90 compiler available") if needs_pyf and not (has_f90_compiler() or has_f77_compiler()): pytest.skip("No Fortran compiler available") # Build the module if self.code is not None: self.module = build_code( self.code, options=self.options, skip=self.skip, only=self.only, suffix=self.suffix, module_name=self.module_name, ) if self.sources is not None: self.module = build_module( self.sources, options=self.options, skip=self.skip, only=self.only, module_name=self.module_name, ) # # Helper functions # def getpath(*a): # Package root d = Path(numpy.f2py.__file__).parent.resolve() return d.joinpath(*a) @contextlib.contextmanager def switchdir(path): curpath = Path.cwd() os.chdir(path) try: yield finally: os.chdir(curpath)
10,196
Python
23.810219
87
0.543743
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_f2cmap.py
from . import util import numpy as np class TestF2Cmap(util.F2PyTest): sources = [ util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90"), util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap") ] # gh-15095 def test_long_long_map(self): inp = np.ones(3) out = self.module.func1(inp) exp_out = 3 assert out == exp_out
391
Python
23.499999
71
0.57289
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_semicolon_split.py
import platform import pytest import numpy as np from . import util @pytest.mark.skipif( platform.system() == "Darwin", reason="Prone to error when run with numpy/f2py/tests on mac os, " "but not when run in isolation", ) @pytest.mark.skipif( np.dtype(np.intp).itemsize < 8, reason="32-bit builds are buggy" ) class TestMultiline(util.F2PyTest): suffix = ".pyf" module_name = "multiline" code = f""" python module {module_name} usercode ''' void foo(int* x) {{ char dummy = ';'; *x = 42; }} ''' interface subroutine foo(x) intent(c) foo integer intent(out) :: x end subroutine foo end interface end python module {module_name} """ def test_multiline(self): assert self.module.foo() == 42 @pytest.mark.skipif( platform.system() == "Darwin", reason="Prone to error when run with numpy/f2py/tests on mac os, " "but not when run in isolation", ) @pytest.mark.skipif( np.dtype(np.intp).itemsize < 8, reason="32-bit builds are buggy" ) class TestCallstatement(util.F2PyTest): suffix = ".pyf" module_name = "callstatement" code = f""" python module {module_name} usercode ''' void foo(int* x) {{ }} ''' interface subroutine foo(x) intent(c) foo integer intent(out) :: x callprotoargument int* callstatement {{ & ; & x = 42; & }} end subroutine foo end interface end python module {module_name} """ def test_callstatement(self): assert self.module.foo() == 42
1,635
Python
20.813333
70
0.585321
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_abstract_interface.py
from pathlib import Path import textwrap from . import util from numpy.f2py import crackfortran class TestAbstractInterface(util.F2PyTest): sources = [util.getpath("tests", "src", "abstract_interface", "foo.f90")] skip = ["add1", "add2"] def test_abstract_interface(self): assert self.module.ops_module.foo(3, 5) == (8, 13) def test_parse_abstract_interface(self): # Test gh18403 fpath = util.getpath("tests", "src", "abstract_interface", "gh18403_mod.f90") mod = crackfortran.crackfortran([str(fpath)]) assert len(mod) == 1 assert len(mod[0]["body"]) == 1 assert mod[0]["body"][0]["block"] == "abstract interface"
721
Python
30.391303
77
0.61165
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_return_complex.py
import pytest from numpy import array from . import util class TestReturnComplex(util.F2PyTest): def check_function(self, t, tname): if tname in ["t0", "t8", "s0", "s8"]: err = 1e-5 else: err = 0.0 assert abs(t(234j) - 234.0j) <= err assert abs(t(234.6) - 234.6) <= err assert abs(t(234) - 234.0) <= err assert abs(t(234.6 + 3j) - (234.6 + 3j)) <= err # assert abs(t('234')-234.)<=err # assert abs(t('234.6')-234.6)<=err assert abs(t(-234) + 234.0) <= err assert abs(t([234]) - 234.0) <= err assert abs(t((234, )) - 234.0) <= err assert abs(t(array(234)) - 234.0) <= err assert abs(t(array(23 + 4j, "F")) - (23 + 4j)) <= err assert abs(t(array([234])) - 234.0) <= err assert abs(t(array([[234]])) - 234.0) <= err assert abs(t(array([234], "b")) + 22.0) <= err assert abs(t(array([234], "h")) - 234.0) <= err assert abs(t(array([234], "i")) - 234.0) <= err assert abs(t(array([234], "l")) - 234.0) <= err assert abs(t(array([234], "q")) - 234.0) <= err assert abs(t(array([234], "f")) - 234.0) <= err assert abs(t(array([234], "d")) - 234.0) <= err assert abs(t(array([234 + 3j], "F")) - (234 + 3j)) <= err assert abs(t(array([234], "D")) - 234.0) <= err # pytest.raises(TypeError, t, array([234], 'a1')) pytest.raises(TypeError, t, "abc") pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) pytest.raises(TypeError, t, t) pytest.raises(TypeError, t, {}) try: r = t(10**400) assert repr(r) in ["(inf+0j)", "(Infinity+0j)"] except OverflowError: pass class TestFReturnComplex(TestReturnComplex): sources = [ util.getpath("tests", "src", "return_complex", "foo77.f"), util.getpath("tests", "src", "return_complex", "foo90.f90"), ] @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(",")) def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(",")) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_complex, name), name)
2,390
Python
35.227272
76
0.517155
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_return_integer.py
import pytest from numpy import array from . import util class TestReturnInteger(util.F2PyTest): def check_function(self, t, tname): assert t(123) == 123 assert t(123.6) == 123 assert t("123") == 123 assert t(-123) == -123 assert t([123]) == 123 assert t((123, )) == 123 assert t(array(123)) == 123 assert t(array([123])) == 123 assert t(array([[123]])) == 123 assert t(array([123], "b")) == 123 assert t(array([123], "h")) == 123 assert t(array([123], "i")) == 123 assert t(array([123], "l")) == 123 assert t(array([123], "B")) == 123 assert t(array([123], "f")) == 123 assert t(array([123], "d")) == 123 # pytest.raises(ValueError, t, array([123],'S3')) pytest.raises(ValueError, t, "abc") pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) pytest.raises(Exception, t, t) pytest.raises(Exception, t, {}) if tname in ["t8", "s8"]: pytest.raises(OverflowError, t, 100000000000000000000000) pytest.raises(OverflowError, t, 10000000011111111111111.23) class TestFReturnInteger(TestReturnInteger): sources = [ util.getpath("tests", "src", "return_integer", "foo77.f"), util.getpath("tests", "src", "return_integer", "foo90.f90"), ] @pytest.mark.parametrize("name", "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) @pytest.mark.parametrize("name", "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_integer, name), name)
1,850
Python
32.053571
74
0.542162
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_regression.py
import os import pytest import numpy as np from . import util class TestIntentInOut(util.F2PyTest): # Check that intent(in out) translates as intent(inout) sources = [util.getpath("tests", "src", "regression", "inout.f90")] @pytest.mark.slow def test_inout(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float32)[::2] pytest.raises(ValueError, self.module.foo, x) # check values with contiguous array x = np.arange(3, dtype=np.float32) self.module.foo(x) assert np.allclose(x, [3, 1, 2]) class TestNegativeBounds(util.F2PyTest): # Check that negative bounds work correctly sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] @pytest.mark.slow def test_negbound(self): xvec = np.arange(12) xlow = -6 xhigh = 4 # Calculate the upper bound, # Keeping the 1 index in mind def ubound(xl, xh): return xh - xl + 1 rval = self.module.foo(is_=xlow, ie_=xhigh, arr=xvec[:ubound(xlow, xhigh)]) expval = np.arange(11, dtype = np.float32) assert np.allclose(rval, expval) class TestNumpyVersionAttribute(util.F2PyTest): # Check that th attribute __f2py_numpy_version__ is present # in the compiled module and that has the value np.__version__. sources = [util.getpath("tests", "src", "regression", "inout.f90")] @pytest.mark.slow def test_numpy_version_attribute(self): # Check that self.module has an attribute named "__f2py_numpy_version__" assert hasattr(self.module, "__f2py_numpy_version__") # Check that the attribute __f2py_numpy_version__ is a string assert isinstance(self.module.__f2py_numpy_version__, str) # Check that __f2py_numpy_version__ has the value numpy.__version__ assert np.__version__ == self.module.__f2py_numpy_version__ def test_include_path(): incdir = np.f2py.get_include() fnames_in_dir = os.listdir(incdir) for fname in ("fortranobject.c", "fortranobject.h"): assert fname in fnames_in_dir
2,157
Python
31.208955
82
0.629578
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_parameter.py
import os import pytest import numpy as np from . import util class TestParameters(util.F2PyTest): # Check that intent(in out) translates as intent(inout) sources = [ util.getpath("tests", "src", "parameter", "constant_real.f90"), util.getpath("tests", "src", "parameter", "constant_integer.f90"), util.getpath("tests", "src", "parameter", "constant_both.f90"), util.getpath("tests", "src", "parameter", "constant_compound.f90"), util.getpath("tests", "src", "parameter", "constant_non_compound.f90"), ] @pytest.mark.slow def test_constant_real_single(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float32)[::2] pytest.raises(ValueError, self.module.foo_single, x) # check values with contiguous array x = np.arange(3, dtype=np.float32) self.module.foo_single(x) assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) @pytest.mark.slow def test_constant_real_double(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] pytest.raises(ValueError, self.module.foo_double, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_double(x) assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) @pytest.mark.slow def test_constant_compound_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] pytest.raises(ValueError, self.module.foo_compound_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_compound_int(x) assert np.allclose(x, [0 + 1 + 2 * 6, 1, 2]) @pytest.mark.slow def test_constant_non_compound_int(self): # check values x = np.arange(4, dtype=np.int32) self.module.foo_non_compound_int(x) assert np.allclose(x, [0 + 1 + 2 + 3 * 4, 1, 2, 3]) @pytest.mark.slow def test_constant_integer_int(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int32)[::2] pytest.raises(ValueError, self.module.foo_int, x) # check values with contiguous array x = np.arange(3, dtype=np.int32) self.module.foo_int(x) assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) @pytest.mark.slow def test_constant_integer_long(self): # non-contiguous should raise error x = np.arange(6, dtype=np.int64)[::2] pytest.raises(ValueError, self.module.foo_long, x) # check values with contiguous array x = np.arange(3, dtype=np.int64) self.module.foo_long(x) assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) @pytest.mark.slow def test_constant_both(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] pytest.raises(ValueError, self.module.foo, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo(x) assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) @pytest.mark.slow def test_constant_no(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] pytest.raises(ValueError, self.module.foo_no, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_no(x) assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) @pytest.mark.slow def test_constant_sum(self): # non-contiguous should raise error x = np.arange(6, dtype=np.float64)[::2] pytest.raises(ValueError, self.module.foo_sum, x) # check values with contiguous array x = np.arange(3, dtype=np.float64) self.module.foo_sum(x) assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
3,941
Python
33.884955
79
0.59325
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_kind.py
import os import pytest from numpy.f2py.crackfortran import ( _selected_int_kind_func as selected_int_kind, _selected_real_kind_func as selected_real_kind, ) from . import util class TestKind(util.F2PyTest): sources = [util.getpath("tests", "src", "kind", "foo.f90")] def test_all(self): selectedrealkind = self.module.selectedrealkind selectedintkind = self.module.selectedintkind for i in range(40): assert selectedintkind(i) == selected_int_kind( i ), f"selectedintkind({i}): expected {selected_int_kind(i)!r} but got {selectedintkind(i)!r}" for i in range(20): assert selectedrealkind(i) == selected_real_kind( i ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}"
847
Python
30.407406
107
0.62928
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_crackfortran.py
import pytest import numpy as np from numpy.f2py.crackfortran import markinnerspaces from . import util from numpy.f2py import crackfortran import textwrap class TestNoSpace(util.F2PyTest): # issue gh-15035: add handling for endsubroutine, endfunction with no space # between "end" and the block name sources = [util.getpath("tests", "src", "crackfortran", "gh15035.f")] def test_module(self): k = np.array([1, 2, 3], dtype=np.float64) w = np.array([1, 2, 3], dtype=np.float64) self.module.subb(k) assert np.allclose(k, w + 1) self.module.subc([w, k]) assert np.allclose(k, w + 1) assert self.module.t0(23) == b"2" class TestPublicPrivate: def test_defaultPrivate(self): fpath = util.getpath("tests", "src", "crackfortran", "privatemod.f90") mod = crackfortran.crackfortran([str(fpath)]) assert len(mod) == 1 mod = mod[0] assert "private" in mod["vars"]["a"]["attrspec"] assert "public" not in mod["vars"]["a"]["attrspec"] assert "private" in mod["vars"]["b"]["attrspec"] assert "public" not in mod["vars"]["b"]["attrspec"] assert "private" not in mod["vars"]["seta"]["attrspec"] assert "public" in mod["vars"]["seta"]["attrspec"] def test_defaultPublic(self, tmp_path): fpath = util.getpath("tests", "src", "crackfortran", "publicmod.f90") mod = crackfortran.crackfortran([str(fpath)]) assert len(mod) == 1 mod = mod[0] assert "private" in mod["vars"]["a"]["attrspec"] assert "public" not in mod["vars"]["a"]["attrspec"] assert "private" not in mod["vars"]["seta"]["attrspec"] assert "public" in mod["vars"]["seta"]["attrspec"] def test_access_type(self, tmp_path): fpath = util.getpath("tests", "src", "crackfortran", "accesstype.f90") mod = crackfortran.crackfortran([str(fpath)]) assert len(mod) == 1 tt = mod[0]['vars'] assert set(tt['a']['attrspec']) == {'private', 'bind(c)'} assert set(tt['b_']['attrspec']) == {'public', 'bind(c)'} assert set(tt['c']['attrspec']) == {'public'} class TestModuleProcedure(): def test_moduleOperators(self, tmp_path): fpath = util.getpath("tests", "src", "crackfortran", "operators.f90") mod = crackfortran.crackfortran([str(fpath)]) assert len(mod) == 1 mod = mod[0] assert "body" in mod and len(mod["body"]) == 9 assert mod["body"][1]["name"] == "operator(.item.)" assert "implementedby" in mod["body"][1] assert mod["body"][1]["implementedby"] == \ ["item_int", "item_real"] assert mod["body"][2]["name"] == "operator(==)" assert "implementedby" in mod["body"][2] assert mod["body"][2]["implementedby"] == ["items_are_equal"] assert mod["body"][3]["name"] == "assignment(=)" assert "implementedby" in mod["body"][3] assert mod["body"][3]["implementedby"] == \ ["get_int", "get_real"] class TestExternal(util.F2PyTest): # issue gh-17859: add external attribute support sources = [util.getpath("tests", "src", "crackfortran", "gh17859.f")] def test_external_as_statement(self): def incr(x): return x + 123 r = self.module.external_as_statement(incr) assert r == 123 def test_external_as_attribute(self): def incr(x): return x + 123 r = self.module.external_as_attribute(incr) assert r == 123 class TestCrackFortran(util.F2PyTest): # gh-2848: commented lines between parameters in subroutine parameter lists sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90")] def test_gh2848(self): r = self.module.gh2848(1, 2) assert r == (1, 2) class TestMarkinnerspaces: # gh-14118: markinnerspaces does not handle multiple quotations def test_do_not_touch_normal_spaces(self): test_list = ["a ", " a", "a b c", "'abcdefghij'"] for i in test_list: assert markinnerspaces(i) == i def test_one_relevant_space(self): assert markinnerspaces("a 'b c' \\' \\'") == "a 'b@_@c' \\' \\'" assert markinnerspaces(r'a "b c" \" \"') == r'a "b@_@c" \" \"' def test_ignore_inner_quotes(self): assert markinnerspaces("a 'b c\" \" d' e") == "a 'b@_@c\"@_@\"@_@d' e" assert markinnerspaces("a \"b c' ' d\" e") == "a \"b@_@c'@_@'@_@d\" e" def test_multiple_relevant_spaces(self): assert markinnerspaces("a 'b c' 'd e'") == "a 'b@_@c' 'd@_@e'" assert markinnerspaces(r'a "b c" "d e"') == r'a "b@_@c" "d@_@e"' class TestDimSpec(util.F2PyTest): """This test suite tests various expressions that are used as dimension specifications. There exists two usage cases where analyzing dimensions specifications are important. In the first case, the size of output arrays must be defined based on the inputs to a Fortran function. Because Fortran supports arbitrary bases for indexing, for instance, `arr(lower:upper)`, f2py has to evaluate an expression `upper - lower + 1` where `lower` and `upper` are arbitrary expressions of input parameters. The evaluation is performed in C, so f2py has to translate Fortran expressions to valid C expressions (an alternative approach is that a developer specifies the corresponding C expressions in a .pyf file). In the second case, when user provides an input array with a given size but some hidden parameters used in dimensions specifications need to be determined based on the input array size. This is a harder problem because f2py has to solve the inverse problem: find a parameter `p` such that `upper(p) - lower(p) + 1` equals to the size of input array. In the case when this equation cannot be solved (e.g. because the input array size is wrong), raise an error before calling the Fortran function (that otherwise would likely crash Python process when the size of input arrays is wrong). f2py currently supports this case only when the equation is linear with respect to unknown parameter. """ suffix = ".f90" code_template = textwrap.dedent(""" function get_arr_size_{count}(a, n) result (length) integer, intent(in) :: n integer, dimension({dimspec}), intent(out) :: a integer length length = size(a) end function subroutine get_inv_arr_size_{count}(a, n) integer :: n ! the value of n is computed in f2py wrapper !f2py intent(out) n integer, dimension({dimspec}), intent(in) :: a if (a({first}).gt.0) then print*, "a=", a endif end subroutine """) linear_dimspecs = [ "n", "2*n", "2:n", "n/2", "5 - n/2", "3*n:20", "n*(n+1):n*(n+5)", "2*n, n" ] nonlinear_dimspecs = ["2*n:3*n*n+2*n"] all_dimspecs = linear_dimspecs + nonlinear_dimspecs code = "" for count, dimspec in enumerate(all_dimspecs): lst = [(d.split(":")[0] if ":" in d else "1") for d in dimspec.split(',')] code += code_template.format( count=count, dimspec=dimspec, first=", ".join(lst), ) @pytest.mark.parametrize("dimspec", all_dimspecs) def test_array_size(self, dimspec): count = self.all_dimspecs.index(dimspec) get_arr_size = getattr(self.module, f"get_arr_size_{count}") for n in [1, 2, 3, 4, 5]: sz, a = get_arr_size(n) assert a.size == sz @pytest.mark.parametrize("dimspec", all_dimspecs) def test_inv_array_size(self, dimspec): count = self.all_dimspecs.index(dimspec) get_arr_size = getattr(self.module, f"get_arr_size_{count}") get_inv_arr_size = getattr(self.module, f"get_inv_arr_size_{count}") for n in [1, 2, 3, 4, 5]: sz, a = get_arr_size(n) if dimspec in self.nonlinear_dimspecs: # one must specify n as input, the call we'll ensure # that a and n are compatible: n1 = get_inv_arr_size(a, n) else: # in case of linear dependence, n can be determined # from the shape of a: n1 = get_inv_arr_size(a) # n1 may be different from n (for instance, when `a` size # is a function of some `n` fraction) but it must produce # the same sized array sz1, _ = get_arr_size(n1) assert sz == sz1, (n, n1, sz, sz1) class TestModuleDeclaration: def test_dependencies(self, tmp_path): fpath = util.getpath("tests", "src", "crackfortran", "foo_deps.f90") mod = crackfortran.crackfortran([str(fpath)]) assert len(mod) == 1 assert mod[0]["vars"]["abar"]["="] == "bar('abar')"
8,934
Python
37.183761
82
0.591336
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_symbolic.py
import pytest from numpy.f2py.symbolic import ( Expr, Op, ArithOp, Language, as_symbol, as_number, as_string, as_array, as_complex, as_terms, as_factors, eliminate_quotes, insert_quotes, fromstring, as_expr, as_apply, as_numer_denom, as_ternary, as_ref, as_deref, normalize, as_eq, as_ne, as_lt, as_gt, as_le, as_ge, ) from . import util class TestSymbolic(util.F2PyTest): def test_eliminate_quotes(self): def worker(s): r, d = eliminate_quotes(s) s1 = insert_quotes(r, d) assert s1 == s for kind in ["", "mykind_"]: worker(kind + '"1234" // "ABCD"') worker(kind + '"1234" // ' + kind + '"ABCD"') worker(kind + "\"1234\" // 'ABCD'") worker(kind + '"1234" // ' + kind + "'ABCD'") worker(kind + '"1\\"2\'AB\'34"') worker("a = " + kind + "'1\\'2\"AB\"34'") def test_sanity(self): x = as_symbol("x") y = as_symbol("y") z = as_symbol("z") assert x.op == Op.SYMBOL assert repr(x) == "Expr(Op.SYMBOL, 'x')" assert x == x assert x != y assert hash(x) is not None n = as_number(123) m = as_number(456) assert n.op == Op.INTEGER assert repr(n) == "Expr(Op.INTEGER, (123, 4))" assert n == n assert n != m assert hash(n) is not None fn = as_number(12.3) fm = as_number(45.6) assert fn.op == Op.REAL assert repr(fn) == "Expr(Op.REAL, (12.3, 4))" assert fn == fn assert fn != fm assert hash(fn) is not None c = as_complex(1, 2) c2 = as_complex(3, 4) assert c.op == Op.COMPLEX assert repr(c) == ("Expr(Op.COMPLEX, (Expr(Op.INTEGER, (1, 4))," " Expr(Op.INTEGER, (2, 4))))") assert c == c assert c != c2 assert hash(c) is not None s = as_string("'123'") s2 = as_string('"ABC"') assert s.op == Op.STRING assert repr(s) == "Expr(Op.STRING, (\"'123'\", 1))", repr(s) assert s == s assert s != s2 a = as_array((n, m)) b = as_array((n, )) assert a.op == Op.ARRAY assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4))," " Expr(Op.INTEGER, (456, 4))))") assert a == a assert a != b t = as_terms(x) u = as_terms(y) assert t.op == Op.TERMS assert repr(t) == "Expr(Op.TERMS, {Expr(Op.SYMBOL, 'x'): 1})" assert t == t assert t != u assert hash(t) is not None v = as_factors(x) w = as_factors(y) assert v.op == Op.FACTORS assert repr(v) == "Expr(Op.FACTORS, {Expr(Op.SYMBOL, 'x'): 1})" assert v == v assert w != v assert hash(v) is not None t = as_ternary(x, y, z) u = as_ternary(x, z, y) assert t.op == Op.TERNARY assert t == t assert t != u assert hash(t) is not None e = as_eq(x, y) f = as_lt(x, y) assert e.op == Op.RELATIONAL assert e == e assert e != f assert hash(e) is not None def test_tostring_fortran(self): x = as_symbol("x") y = as_symbol("y") z = as_symbol("z") n = as_number(123) m = as_number(456) a = as_array((n, m)) c = as_complex(n, m) assert str(x) == "x" assert str(n) == "123" assert str(a) == "[123, 456]" assert str(c) == "(123, 456)" assert str(Expr(Op.TERMS, {x: 1})) == "x" assert str(Expr(Op.TERMS, {x: 2})) == "2 * x" assert str(Expr(Op.TERMS, {x: -1})) == "-x" assert str(Expr(Op.TERMS, {x: -2})) == "-2 * x" assert str(Expr(Op.TERMS, {x: 1, y: 1})) == "x + y" assert str(Expr(Op.TERMS, {x: -1, y: -1})) == "-x - y" assert str(Expr(Op.TERMS, {x: 2, y: 3})) == "2 * x + 3 * y" assert str(Expr(Op.TERMS, {x: -2, y: 3})) == "-2 * x + 3 * y" assert str(Expr(Op.TERMS, {x: 2, y: -3})) == "2 * x - 3 * y" assert str(Expr(Op.FACTORS, {x: 1})) == "x" assert str(Expr(Op.FACTORS, {x: 2})) == "x ** 2" assert str(Expr(Op.FACTORS, {x: -1})) == "x ** -1" assert str(Expr(Op.FACTORS, {x: -2})) == "x ** -2" assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == "x * y" assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == "x ** 2 * y ** 3" v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3}) assert str(v) == "x ** 2 * (x + y) ** 3", str(v) v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3}) assert str(v) == "x ** 2 * (x * y) ** 3", str(v) assert str(Expr(Op.APPLY, ("f", (), {}))) == "f()" assert str(Expr(Op.APPLY, ("f", (x, ), {}))) == "f(x)" assert str(Expr(Op.APPLY, ("f", (x, y), {}))) == "f(x, y)" assert str(Expr(Op.INDEXING, ("f", x))) == "f[x]" assert str(as_ternary(x, y, z)) == "merge(y, z, x)" assert str(as_eq(x, y)) == "x .eq. y" assert str(as_ne(x, y)) == "x .ne. y" assert str(as_lt(x, y)) == "x .lt. y" assert str(as_le(x, y)) == "x .le. y" assert str(as_gt(x, y)) == "x .gt. y" assert str(as_ge(x, y)) == "x .ge. y" def test_tostring_c(self): language = Language.C x = as_symbol("x") y = as_symbol("y") z = as_symbol("z") n = as_number(123) assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == "x * x" assert (Expr(Op.FACTORS, { x + y: 2 }).tostring(language=language) == "(x + y) * (x + y)") assert Expr(Op.FACTORS, { x: 12 }).tostring(language=language) == "pow(x, 12)" assert as_apply(ArithOp.DIV, x, y).tostring(language=language) == "x / y" assert (as_apply(ArithOp.DIV, x, x + y).tostring(language=language) == "x / (x + y)") assert (as_apply(ArithOp.DIV, x - y, x + y).tostring(language=language) == "(x - y) / (x + y)") assert (x + (x - y) / (x + y) + n).tostring(language=language) == "123 + x + (x - y) / (x + y)" assert as_ternary(x, y, z).tostring(language=language) == "(x?y:z)" assert as_eq(x, y).tostring(language=language) == "x == y" assert as_ne(x, y).tostring(language=language) == "x != y" assert as_lt(x, y).tostring(language=language) == "x < y" assert as_le(x, y).tostring(language=language) == "x <= y" assert as_gt(x, y).tostring(language=language) == "x > y" assert as_ge(x, y).tostring(language=language) == "x >= y" def test_operations(self): x = as_symbol("x") y = as_symbol("y") z = as_symbol("z") assert x + x == Expr(Op.TERMS, {x: 2}) assert x - x == Expr(Op.INTEGER, (0, 4)) assert x + y == Expr(Op.TERMS, {x: 1, y: 1}) assert x - y == Expr(Op.TERMS, {x: 1, y: -1}) assert x * x == Expr(Op.FACTORS, {x: 2}) assert x * y == Expr(Op.FACTORS, {x: 1, y: 1}) assert +x == x assert -x == Expr(Op.TERMS, {x: -1}), repr(-x) assert 2 * x == Expr(Op.TERMS, {x: 2}) assert 2 + x == Expr(Op.TERMS, {x: 1, as_number(1): 2}) assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3}) assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2}) assert x**2 == Expr(Op.FACTORS, {x: 2}) assert (x + y)**2 == Expr( Op.TERMS, { Expr(Op.FACTORS, {x: 2}): 1, Expr(Op.FACTORS, {y: 2}): 1, Expr(Op.FACTORS, { x: 1, y: 1 }): 2, }, ) assert (x + y) * x == x**2 + x * y assert (x + y)**2 == x**2 + 2 * x * y + y**2 assert (x + y)**2 + (x - y)**2 == 2 * x**2 + 2 * y**2 assert (x + y) * z == x * z + y * z assert z * (x + y) == x * z + y * z assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2)) assert (2 * x / 2) == x assert (3 * x / 2) == as_apply(ArithOp.DIV, 3 * x, as_number(2)) assert (4 * x / 2) == 2 * x assert (5 * x / 2) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) assert (6 * x / 2) == 3 * x assert ((3 * 5) * x / 6) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) assert (30 * x**2 * y**4 / (24 * x**3 * y**3)) == as_apply( ArithOp.DIV, 5 * y, 4 * x) assert ((15 * x / 6) / 5) == as_apply(ArithOp.DIV, x, as_number(2)), (15 * x / 6) / 5 assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5)) assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5}) s = as_string('"ABC"') t = as_string('"123"') assert s // t == Expr(Op.STRING, ('"ABC123"', 1)) assert s // x == Expr(Op.CONCAT, (s, x)) assert x // s == Expr(Op.CONCAT, (x, s)) c = as_complex(1.0, 2.0) assert -c == as_complex(-1.0, -2.0) assert c + c == as_expr((1 + 2j) * 2) assert c * c == as_expr((1 + 2j)**2) def test_substitute(self): x = as_symbol("x") y = as_symbol("y") z = as_symbol("z") a = as_array((x, y)) assert x.substitute({x: y}) == y assert (x + y).substitute({x: z}) == y + z assert (x * y).substitute({x: z}) == y * z assert (x**4).substitute({x: z}) == z**4 assert (x / y).substitute({x: z}) == z / y assert x.substitute({x: y + z}) == y + z assert a.substitute({x: y + z}) == as_array((y + z, y)) assert as_ternary(x, y, z).substitute({x: y + z}) == as_ternary(y + z, y, z) assert as_eq(x, y).substitute({x: y + z}) == as_eq(y + z, y) def test_fromstring(self): x = as_symbol("x") y = as_symbol("y") z = as_symbol("z") f = as_symbol("f") s = as_string('"ABC"') t = as_string('"123"') a = as_array((x, y)) assert fromstring("x") == x assert fromstring("+ x") == x assert fromstring("- x") == -x assert fromstring("x + y") == x + y assert fromstring("x + 1") == x + 1 assert fromstring("x * y") == x * y assert fromstring("x * 2") == x * 2 assert fromstring("x / y") == x / y assert fromstring("x ** 2", language=Language.Python) == x**2 assert fromstring("x ** 2 ** 3", language=Language.Python) == x**2**3 assert fromstring("(x + y) * z") == (x + y) * z assert fromstring("f(x)") == f(x) assert fromstring("f(x,y)") == f(x, y) assert fromstring("f[x]") == f[x] assert fromstring("f[x][y]") == f[x][y] assert fromstring('"ABC"') == s assert (normalize( fromstring('"ABC" // "123" ', language=Language.Fortran)) == s // t) assert fromstring('f("ABC")') == f(s) assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', "MYSTRKIND") assert fromstring("(/x, y/)") == a, fromstring("(/x, y/)") assert fromstring("f((/x, y/))") == f(a) assert fromstring("(/(x+y)*z/)") == as_array(((x + y) * z, )) assert fromstring("123") == as_number(123) assert fromstring("123_2") == as_number(123, 2) assert fromstring("123_myintkind") == as_number(123, "myintkind") assert fromstring("123.0") == as_number(123.0, 4) assert fromstring("123.0_4") == as_number(123.0, 4) assert fromstring("123.0_8") == as_number(123.0, 8) assert fromstring("123.0e0") == as_number(123.0, 4) assert fromstring("123.0d0") == as_number(123.0, 8) assert fromstring("123d0") == as_number(123.0, 8) assert fromstring("123e-0") == as_number(123.0, 4) assert fromstring("123d+0") == as_number(123.0, 8) assert fromstring("123.0_myrealkind") == as_number(123.0, "myrealkind") assert fromstring("3E4") == as_number(30000.0, 4) assert fromstring("(1, 2)") == as_complex(1, 2) assert fromstring("(1e2, PI)") == as_complex(as_number(100.0), as_symbol("PI")) assert fromstring("[1, 2]") == as_array((as_number(1), as_number(2))) assert fromstring("POINT(x, y=1)") == as_apply(as_symbol("POINT"), x, y=as_number(1)) assert fromstring( 'PERSON(name="John", age=50, shape=(/34, 23/))') == as_apply( as_symbol("PERSON"), name=as_string('"John"'), age=as_number(50), shape=as_array((as_number(34), as_number(23))), ) assert fromstring("x?y:z") == as_ternary(x, y, z) assert fromstring("*x") == as_deref(x) assert fromstring("**x") == as_deref(as_deref(x)) assert fromstring("&x") == as_ref(x) assert fromstring("(*x) * (*y)") == as_deref(x) * as_deref(y) assert fromstring("(*x) * *y") == as_deref(x) * as_deref(y) assert fromstring("*x * *y") == as_deref(x) * as_deref(y) assert fromstring("*x**y") == as_deref(x) * as_deref(y) assert fromstring("x == y") == as_eq(x, y) assert fromstring("x != y") == as_ne(x, y) assert fromstring("x < y") == as_lt(x, y) assert fromstring("x > y") == as_gt(x, y) assert fromstring("x <= y") == as_le(x, y) assert fromstring("x >= y") == as_ge(x, y) assert fromstring("x .eq. y", language=Language.Fortran) == as_eq(x, y) assert fromstring("x .ne. y", language=Language.Fortran) == as_ne(x, y) assert fromstring("x .lt. y", language=Language.Fortran) == as_lt(x, y) assert fromstring("x .gt. y", language=Language.Fortran) == as_gt(x, y) assert fromstring("x .le. y", language=Language.Fortran) == as_le(x, y) assert fromstring("x .ge. y", language=Language.Fortran) == as_ge(x, y) def test_traverse(self): x = as_symbol("x") y = as_symbol("y") z = as_symbol("z") f = as_symbol("f") # Use traverse to substitute a symbol def replace_visit(s, r=z): if s == x: return r assert x.traverse(replace_visit) == z assert y.traverse(replace_visit) == y assert z.traverse(replace_visit) == z assert (f(y)).traverse(replace_visit) == f(y) assert (f(x)).traverse(replace_visit) == f(z) assert (f[y]).traverse(replace_visit) == f[y] assert (f[z]).traverse(replace_visit) == f[z] assert (x + y + z).traverse(replace_visit) == (2 * z + y) assert (x + f(y, x - z)).traverse(replace_visit) == (z + f(y, as_number(0))) assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y) # Use traverse to collect symbols, method 1 function_symbols = set() symbols = set() def collect_symbols(s): if s.op is Op.APPLY: oper = s.data[0] function_symbols.add(oper) if oper in symbols: symbols.remove(oper) elif s.op is Op.SYMBOL and s not in function_symbols: symbols.add(s) (x + f(y, x - z)).traverse(collect_symbols) assert function_symbols == {f} assert symbols == {x, y, z} # Use traverse to collect symbols, method 2 def collect_symbols2(expr, symbols): if expr.op is Op.SYMBOL: symbols.add(expr) symbols = set() (x + f(y, x - z)).traverse(collect_symbols2, symbols) assert symbols == {x, y, z, f} # Use traverse to partially collect symbols def collect_symbols3(expr, symbols): if expr.op is Op.APPLY: # skip traversing function calls return expr if expr.op is Op.SYMBOL: symbols.add(expr) symbols = set() (x + f(y, x - z)).traverse(collect_symbols3, symbols) assert symbols == {x} def test_linear_solve(self): x = as_symbol("x") y = as_symbol("y") z = as_symbol("z") assert x.linear_solve(x) == (as_number(1), as_number(0)) assert (x + 1).linear_solve(x) == (as_number(1), as_number(1)) assert (2 * x).linear_solve(x) == (as_number(2), as_number(0)) assert (2 * x + 3).linear_solve(x) == (as_number(2), as_number(3)) assert as_number(3).linear_solve(x) == (as_number(0), as_number(3)) assert y.linear_solve(x) == (as_number(0), y) assert (y * z).linear_solve(x) == (as_number(0), y * z) assert (x + y).linear_solve(x) == (as_number(1), y) assert (z * x + y).linear_solve(x) == (z, y) assert ((z + y) * x + y).linear_solve(x) == (z + y, y) assert (z * y * x + y).linear_solve(x) == (z * y, y) pytest.raises(RuntimeError, lambda: (x * x).linear_solve(x)) def test_as_numer_denom(self): x = as_symbol("x") y = as_symbol("y") n = as_number(123) assert as_numer_denom(x) == (x, as_number(1)) assert as_numer_denom(x / n) == (x, n) assert as_numer_denom(n / x) == (n, x) assert as_numer_denom(x / y) == (x, y) assert as_numer_denom(x * y) == (x * y, as_number(1)) assert as_numer_denom(n + x / y) == (x + n * y, y) assert as_numer_denom(n + x / (y - x / n)) == (y * n**2, y * n - x) def test_polynomial_atoms(self): x = as_symbol("x") y = as_symbol("y") n = as_number(123) assert x.polynomial_atoms() == {x} assert n.polynomial_atoms() == set() assert (y[x]).polynomial_atoms() == {y[x]} assert (y(x)).polynomial_atoms() == {y(x)} assert (y(x) + x).polynomial_atoms() == {y(x), x} assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} assert (y(x)**x).polynomial_atoms() == {y(x)}
18,341
Python
36.054545
79
0.464969
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_quoted_character.py
"""See https://github.com/numpy/numpy/pull/10676. """ import sys import pytest from . import util class TestQuotedCharacter(util.F2PyTest): sources = [util.getpath("tests", "src", "quoted_character", "foo.f")] @pytest.mark.skipif(sys.platform == "win32", reason="Fails with MinGW64 Gfortran (Issue #9673)") def test_quoted_character(self): assert self.module.foo() == (b"'", b'"', b";", b"!", b"(", b")")
454
Python
25.764704
75
0.603524
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_return_character.py
import pytest from numpy import array from . import util import platform IS_S390X = platform.machine() == "s390x" class TestReturnCharacter(util.F2PyTest): def check_function(self, t, tname): if tname in ["t0", "t1", "s0", "s1"]: assert t(23) == b"2" r = t("ab") assert r == b"a" r = t(array("ab")) assert r == b"a" r = t(array(77, "u1")) assert r == b"M" elif tname in ["ts", "ss"]: assert t(23) == b"23" assert t("123456789abcdef") == b"123456789a" elif tname in ["t5", "s5"]: assert t(23) == b"23" assert t("ab") == b"ab" assert t("123456789abcdef") == b"12345" else: raise NotImplementedError class TestFReturnCharacter(TestReturnCharacter): sources = [ util.getpath("tests", "src", "return_character", "foo77.f"), util.getpath("tests", "src", "return_character", "foo90.f90"), ] @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") @pytest.mark.parametrize("name", "t0,t1,t5,s0,s1,s5,ss".split(",")) def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") @pytest.mark.parametrize("name", "t0,t1,t5,ts,s0,s1,s5,ss".split(",")) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_char, name), name)
1,491
Python
31.434782
77
0.557344
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_f2py2e.py
import textwrap, re, sys, subprocess, shlex from pathlib import Path from collections import namedtuple import pytest from . import util from numpy.f2py.f2py2e import main as f2pycli ######################### # CLI utils and classes # ######################### PPaths = namedtuple("PPaths", "finp, f90inp, pyf, wrap77, wrap90, cmodf") def get_io_paths(fname_inp, mname="untitled"): """Takes in a temporary file for testing and returns the expected output and input paths Here expected output is essentially one of any of the possible generated files. ..note:: Since this does not actually run f2py, none of these are guaranteed to exist, and module names are typically incorrect Parameters ---------- fname_inp : str The input filename mname : str, optional The name of the module, untitled by default Returns ------- genp : NamedTuple PPaths The possible paths which are generated, not all of which exist """ bpath = Path(fname_inp) return PPaths( finp=bpath.with_suffix(".f"), f90inp=bpath.with_suffix(".f90"), pyf=bpath.with_suffix(".pyf"), wrap77=bpath.with_name(f"{mname}-f2pywrappers.f"), wrap90=bpath.with_name(f"{mname}-f2pywrappers2.f90"), cmodf=bpath.with_name(f"{mname}module.c"), ) ############## # CLI Fixtures and Tests # ############# @pytest.fixture(scope="session") def hello_world_f90(tmpdir_factory): """Generates a single f90 file for testing""" fdat = util.getpath("tests", "src", "cli", "hiworld.f90").read_text() fn = tmpdir_factory.getbasetemp() / "hello.f90" fn.write_text(fdat, encoding="ascii") return fn @pytest.fixture(scope="session") def hello_world_f77(tmpdir_factory): """Generates a single f77 file for testing""" fdat = util.getpath("tests", "src", "cli", "hi77.f").read_text() fn = tmpdir_factory.getbasetemp() / "hello.f" fn.write_text(fdat, encoding="ascii") return fn @pytest.fixture(scope="session") def retreal_f77(tmpdir_factory): """Generates a single f77 file for testing""" fdat = util.getpath("tests", "src", "return_real", "foo77.f").read_text() fn = tmpdir_factory.getbasetemp() / "foo.f" fn.write_text(fdat, encoding="ascii") return fn def test_gen_pyf(capfd, hello_world_f90, monkeypatch): """Ensures that a signature file is generated via the CLI CLI :: -h """ ipath = Path(hello_world_f90) opath = Path(hello_world_f90).stem + ".pyf" monkeypatch.setattr(sys, "argv", f'f2py -h {opath} {ipath}'.split()) with util.switchdir(ipath.parent): f2pycli() # Generate wrappers out, _ = capfd.readouterr() assert "Saving signatures to file" in out assert Path(f'{opath}').exists() def test_gen_pyf_stdout(capfd, hello_world_f90, monkeypatch): """Ensures that a signature file can be dumped to stdout CLI :: -h """ ipath = Path(hello_world_f90) monkeypatch.setattr(sys, "argv", f'f2py -h stdout {ipath}'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert "Saving signatures to file" in out def test_gen_pyf_no_overwrite(capfd, hello_world_f90, monkeypatch): """Ensures that the CLI refuses to overwrite signature files CLI :: -h without --overwrite-signature """ ipath = Path(hello_world_f90) monkeypatch.setattr(sys, "argv", f'f2py -h faker.pyf {ipath}'.split()) with util.switchdir(ipath.parent): Path("faker.pyf").write_text("Fake news", encoding="ascii") with pytest.raises(SystemExit): f2pycli() # Refuse to overwrite _, err = capfd.readouterr() assert "Use --overwrite-signature to overwrite" in err @pytest.mark.xfail def test_f2py_skip(capfd, retreal_f77, monkeypatch): """Tests that functions can be skipped CLI :: skip: """ foutl = get_io_paths(retreal_f77, mname="test") ipath = foutl.finp toskip = "t0 t4 t8 sd s8 s4" remaining = "td s0" monkeypatch.setattr( sys, "argv", f'f2py {ipath} -m test skip: {toskip}'.split()) with util.switchdir(ipath.parent): f2pycli() out, err = capfd.readouterr() for skey in toskip.split(): assert ( f'buildmodule: Could not found the body of interfaced routine "{skey}". Skipping.' in err) for rkey in remaining.split(): assert f'Constructing wrapper function "{rkey}"' in out def test_f2py_only(capfd, retreal_f77, monkeypatch): """Test that functions can be kept by only: CLI :: only: """ foutl = get_io_paths(retreal_f77, mname="test") ipath = foutl.finp toskip = "t0 t4 t8 sd s8 s4" tokeep = "td s0" monkeypatch.setattr( sys, "argv", f'f2py {ipath} -m test only: {tokeep}'.split()) with util.switchdir(ipath.parent): f2pycli() out, err = capfd.readouterr() for skey in toskip.split(): assert ( f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.' in err) for rkey in tokeep.split(): assert f'Constructing wrapper function "{rkey}"' in out def test_file_processing_switch(capfd, hello_world_f90, retreal_f77, monkeypatch): """Tests that it is possible to return to file processing mode CLI :: : BUG: numpy-gh #20520 """ foutl = get_io_paths(retreal_f77, mname="test") ipath = foutl.finp toskip = "t0 t4 t8 sd s8 s4" ipath2 = Path(hello_world_f90) tokeep = "td s0 hi" # hi is in ipath2 mname = "blah" monkeypatch.setattr( sys, "argv", f'f2py {ipath} -m {mname} only: {tokeep} : {ipath2}'.split( ), ) with util.switchdir(ipath.parent): f2pycli() out, err = capfd.readouterr() for skey in toskip.split(): assert ( f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.' in err) for rkey in tokeep.split(): assert f'Constructing wrapper function "{rkey}"' in out def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch): """Checks the generation of files based on a module name CLI :: -m """ MNAME = "hi" foutl = get_io_paths(hello_world_f90, mname=MNAME) ipath = foutl.f90inp monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split()) with util.switchdir(ipath.parent): f2pycli() # Always generate C module assert Path.exists(foutl.cmodf) # File contains a function, check for F77 wrappers assert Path.exists(foutl.wrap77) def test_lower_cmod(capfd, hello_world_f77, monkeypatch): """Lowers cases by flag or when -h is present CLI :: --[no-]lower """ foutl = get_io_paths(hello_world_f77, mname="test") ipath = foutl.finp capshi = re.compile(r"HI\(\)") capslo = re.compile(r"hi\(\)") # Case I: --lower is passed monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m test --lower'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert capslo.search(out) is not None assert capshi.search(out) is None # Case II: --no-lower is passed monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m test --no-lower'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert capslo.search(out) is None assert capshi.search(out) is not None def test_lower_sig(capfd, hello_world_f77, monkeypatch): """Lowers cases in signature files by flag or when -h is present CLI :: --[no-]lower -h """ foutl = get_io_paths(hello_world_f77, mname="test") ipath = foutl.finp # Signature files capshi = re.compile(r"Block: HI") capslo = re.compile(r"Block: hi") # Case I: --lower is implied by -h # TODO: Clean up to prevent passing --overwrite-signature monkeypatch.setattr( sys, "argv", f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature'.split(), ) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert capslo.search(out) is not None assert capshi.search(out) is None # Case II: --no-lower overrides -h monkeypatch.setattr( sys, "argv", f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature --no-lower' .split(), ) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert capslo.search(out) is None assert capshi.search(out) is not None def test_build_dir(capfd, hello_world_f90, monkeypatch): """Ensures that the build directory can be specified CLI :: --build-dir """ ipath = Path(hello_world_f90) mname = "blah" odir = "tttmp" monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath} --build-dir {odir}'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert f"Wrote C/API module \"{mname}\"" in out def test_overwrite(capfd, hello_world_f90, monkeypatch): """Ensures that the build directory can be specified CLI :: --overwrite-signature """ ipath = Path(hello_world_f90) monkeypatch.setattr( sys, "argv", f'f2py -h faker.pyf {ipath} --overwrite-signature'.split()) with util.switchdir(ipath.parent): Path("faker.pyf").write_text("Fake news", encoding="ascii") f2pycli() out, _ = capfd.readouterr() assert "Saving signatures to file" in out def test_latexdoc(capfd, hello_world_f90, monkeypatch): """Ensures that TeX documentation is written out CLI :: --latex-doc """ ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath} --latex-doc'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert "Documentation is saved to file" in out with Path(f"{mname}module.tex").open() as otex: assert "\\documentclass" in otex.read() def test_nolatexdoc(capfd, hello_world_f90, monkeypatch): """Ensures that TeX documentation is written out CLI :: --no-latex-doc """ ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath} --no-latex-doc'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert "Documentation is saved to file" not in out def test_shortlatex(capfd, hello_world_f90, monkeypatch): """Ensures that truncated documentation is written out TODO: Test to ensure this has no effect without --latex-doc CLI :: --latex-doc --short-latex """ ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr( sys, "argv", f'f2py -m {mname} {ipath} --latex-doc --short-latex'.split(), ) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert "Documentation is saved to file" in out with Path(f"./{mname}module.tex").open() as otex: assert "\\documentclass" not in otex.read() def test_restdoc(capfd, hello_world_f90, monkeypatch): """Ensures that RsT documentation is written out CLI :: --rest-doc """ ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath} --rest-doc'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert "ReST Documentation is saved to file" in out with Path(f"./{mname}module.rest").open() as orst: assert r".. -*- rest -*-" in orst.read() def test_norestexdoc(capfd, hello_world_f90, monkeypatch): """Ensures that TeX documentation is written out CLI :: --no-rest-doc """ ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath} --no-rest-doc'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert "ReST Documentation is saved to file" not in out def test_debugcapi(capfd, hello_world_f90, monkeypatch): """Ensures that debugging wrappers are written CLI :: --debug-capi """ ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath} --debug-capi'.split()) with util.switchdir(ipath.parent): f2pycli() with Path(f"./{mname}module.c").open() as ocmod: assert r"#define DEBUGCFUNCS" in ocmod.read() @pytest.mark.xfail(reason="Consistently fails on CI.") def test_debugcapi_bld(hello_world_f90, monkeypatch): """Ensures that debugging wrappers work CLI :: --debug-capi -c """ ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath} -c --debug-capi'.split()) with util.switchdir(ipath.parent): f2pycli() cmd_run = shlex.split("python3 -c \"import blah; blah.hi()\"") rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') eout = ' Hello World\n' eerr = textwrap.dedent("""\ debug-capi:Python C/API function blah.hi() debug-capi:float hi=:output,hidden,scalar debug-capi:hi=0 debug-capi:Fortran subroutine `f2pywraphi(&hi)' debug-capi:hi=0 debug-capi:Building return value. debug-capi:Python C/API function blah.hi: successful. debug-capi:Freeing memory. """) assert rout.stdout == eout assert rout.stderr == eerr def test_wrapfunc_def(capfd, hello_world_f90, monkeypatch): """Ensures that fortran subroutine wrappers for F77 are included by default CLI :: --[no]-wrap-functions """ # Implied ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath}'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert r"Fortran 77 wrappers are saved to" in out # Explicit monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath} --wrap-functions'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert r"Fortran 77 wrappers are saved to" in out def test_nowrapfunc(capfd, hello_world_f90, monkeypatch): """Ensures that fortran subroutine wrappers for F77 can be disabled CLI :: --no-wrap-functions """ ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath} --no-wrap-functions'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert r"Fortran 77 wrappers are saved to" not in out def test_inclheader(capfd, hello_world_f90, monkeypatch): """Add to the include directories CLI :: -include TODO: Document this in the help string """ ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr( sys, "argv", f'f2py -m {mname} {ipath} -include<stdbool.h> -include<stdio.h> '. split(), ) with util.switchdir(ipath.parent): f2pycli() with Path(f"./{mname}module.c").open() as ocmod: ocmr = ocmod.read() assert "#include <stdbool.h>" in ocmr assert "#include <stdio.h>" in ocmr def test_inclpath(): """Add to the include directories CLI :: --include-paths """ # TODO: populate pass def test_hlink(): """Add to the include directories CLI :: --help-link """ # TODO: populate pass def test_f2cmap(): """Check that Fortran-to-Python KIND specs can be passed CLI :: --f2cmap """ # TODO: populate pass def test_quiet(capfd, hello_world_f90, monkeypatch): """Reduce verbosity CLI :: --quiet """ ipath = Path(hello_world_f90) monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --quiet'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert len(out) == 0 def test_verbose(capfd, hello_world_f90, monkeypatch): """Increase verbosity CLI :: --verbose """ ipath = Path(hello_world_f90) monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --verbose'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert "analyzeline" in out def test_version(capfd, monkeypatch): """Ensure version CLI :: -v """ monkeypatch.setattr(sys, "argv", 'f2py -v'.split()) # TODO: f2py2e should not call sys.exit() after printing the version with pytest.raises(SystemExit): f2pycli() out, _ = capfd.readouterr() import numpy as np assert np.__version__ == out.strip() @pytest.mark.xfail(reason="Consistently fails on CI.") def test_npdistop(hello_world_f90, monkeypatch): """ CLI :: -c """ ipath = Path(hello_world_f90) monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c'.split()) with util.switchdir(ipath.parent): f2pycli() cmd_run = shlex.split("python -c \"import blah; blah.hi()\"") rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') eout = ' Hello World\n' assert rout.stdout == eout # Numpy distutils flags # TODO: These should be tested separately def test_npd_fcompiler(): """ CLI :: -c --fcompiler """ # TODO: populate pass def test_npd_compiler(): """ CLI :: -c --compiler """ # TODO: populate pass def test_npd_help_fcompiler(): """ CLI :: -c --help-fcompiler """ # TODO: populate pass def test_npd_f77exec(): """ CLI :: -c --f77exec """ # TODO: populate pass def test_npd_f90exec(): """ CLI :: -c --f90exec """ # TODO: populate pass def test_npd_f77flags(): """ CLI :: -c --f77flags """ # TODO: populate pass def test_npd_f90flags(): """ CLI :: -c --f90flags """ # TODO: populate pass def test_npd_opt(): """ CLI :: -c --opt """ # TODO: populate pass def test_npd_arch(): """ CLI :: -c --arch """ # TODO: populate pass def test_npd_noopt(): """ CLI :: -c --noopt """ # TODO: populate pass def test_npd_noarch(): """ CLI :: -c --noarch """ # TODO: populate pass def test_npd_debug(): """ CLI :: -c --debug """ # TODO: populate pass def test_npd_link_auto(): """ CLI :: -c --link-<resource> """ # TODO: populate pass def test_npd_lib(): """ CLI :: -c -L/path/to/lib/ -l<libname> """ # TODO: populate pass def test_npd_define(): """ CLI :: -D<define> """ # TODO: populate pass def test_npd_undefine(): """ CLI :: -U<name> """ # TODO: populate pass def test_npd_incl(): """ CLI :: -I/path/to/include/ """ # TODO: populate pass def test_npd_linker(): """ CLI :: <filename>.o <filename>.so <filename>.a """ # TODO: populate pass
19,766
Python
25.391188
98
0.591622
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_return_logical.py
import pytest from numpy import array from . import util class TestReturnLogical(util.F2PyTest): def check_function(self, t): assert t(True) == 1 assert t(False) == 0 assert t(0) == 0 assert t(None) == 0 assert t(0.0) == 0 assert t(0j) == 0 assert t(1j) == 1 assert t(234) == 1 assert t(234.6) == 1 assert t(234.6 + 3j) == 1 assert t("234") == 1 assert t("aaa") == 1 assert t("") == 0 assert t([]) == 0 assert t(()) == 0 assert t({}) == 0 assert t(t) == 1 assert t(-234) == 1 assert t(10**100) == 1 assert t([234]) == 1 assert t((234, )) == 1 assert t(array(234)) == 1 assert t(array([234])) == 1 assert t(array([[234]])) == 1 assert t(array([234], "b")) == 1 assert t(array([234], "h")) == 1 assert t(array([234], "i")) == 1 assert t(array([234], "l")) == 1 assert t(array([234], "f")) == 1 assert t(array([234], "d")) == 1 assert t(array([234 + 3j], "F")) == 1 assert t(array([234], "D")) == 1 assert t(array(0)) == 0 assert t(array([0])) == 0 assert t(array([[0]])) == 0 assert t(array([0j])) == 0 assert t(array([1])) == 1 pytest.raises(ValueError, t, array([0, 0])) class TestFReturnLogical(TestReturnLogical): sources = [ util.getpath("tests", "src", "return_logical", "foo77.f"), util.getpath("tests", "src", "return_logical", "foo90.f90"), ] @pytest.mark.slow @pytest.mark.parametrize("name", "t0,t1,t2,t4,s0,s1,s2,s4".split(",")) def test_all_f77(self, name): self.check_function(getattr(self.module, name)) @pytest.mark.slow @pytest.mark.parametrize("name", "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(",")) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_logical, name))
2,017
Python
30.046153
74
0.492315
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_array_from_pyobj.py
import os import sys import copy import platform import pytest import numpy as np from numpy.core.multiarray import typeinfo from . import util wrap = None def setup_module(): """ Build the required testing extension module """ global wrap # Check compiler availability first if not util.has_c_compiler(): pytest.skip("No C compiler available") if wrap is None: config_code = """ config.add_extension('test_array_from_pyobj_ext', sources=['wrapmodule.c', 'fortranobject.c'], define_macros=[]) """ d = os.path.dirname(__file__) src = [ util.getpath("tests", "src", "array_from_pyobj", "wrapmodule.c"), util.getpath("src", "fortranobject.c"), util.getpath("src", "fortranobject.h"), ] wrap = util.build_module_distutils(src, config_code, "test_array_from_pyobj_ext") def flags_info(arr): flags = wrap.array_attrs(arr)[6] return flags2names(flags) def flags2names(flags): info = [] for flagname in [ "CONTIGUOUS", "FORTRAN", "OWNDATA", "ENSURECOPY", "ENSUREARRAY", "ALIGNED", "NOTSWAPPED", "WRITEABLE", "WRITEBACKIFCOPY", "BEHAVED", "BEHAVED_RO", "CARRAY", "FARRAY", ]: if abs(flags) & getattr(wrap, flagname, 0): info.append(flagname) return info class Intent: def __init__(self, intent_list=[]): self.intent_list = intent_list[:] flags = 0 for i in intent_list: if i == "optional": flags |= wrap.F2PY_OPTIONAL else: flags |= getattr(wrap, "F2PY_INTENT_" + i.upper()) self.flags = flags def __getattr__(self, name): name = name.lower() if name == "in_": name = "in" return self.__class__(self.intent_list + [name]) def __str__(self): return "intent(%s)" % (",".join(self.intent_list)) def __repr__(self): return "Intent(%r)" % (self.intent_list) def is_intent(self, *names): for name in names: if name not in self.intent_list: return False return True def is_intent_exact(self, *names): return len(self.intent_list) == len(names) and self.is_intent(*names) intent = Intent() _type_names = [ "BOOL", "BYTE", "UBYTE", "SHORT", "USHORT", "INT", "UINT", "LONG", "ULONG", "LONGLONG", "ULONGLONG", "FLOAT", "DOUBLE", "CFLOAT", ] _cast_dict = {"BOOL": ["BOOL"]} _cast_dict["BYTE"] = _cast_dict["BOOL"] + ["BYTE"] _cast_dict["UBYTE"] = _cast_dict["BOOL"] + ["UBYTE"] _cast_dict["BYTE"] = ["BYTE"] _cast_dict["UBYTE"] = ["UBYTE"] _cast_dict["SHORT"] = _cast_dict["BYTE"] + ["UBYTE", "SHORT"] _cast_dict["USHORT"] = _cast_dict["UBYTE"] + ["BYTE", "USHORT"] _cast_dict["INT"] = _cast_dict["SHORT"] + ["USHORT", "INT"] _cast_dict["UINT"] = _cast_dict["USHORT"] + ["SHORT", "UINT"] _cast_dict["LONG"] = _cast_dict["INT"] + ["LONG"] _cast_dict["ULONG"] = _cast_dict["UINT"] + ["ULONG"] _cast_dict["LONGLONG"] = _cast_dict["LONG"] + ["LONGLONG"] _cast_dict["ULONGLONG"] = _cast_dict["ULONG"] + ["ULONGLONG"] _cast_dict["FLOAT"] = _cast_dict["SHORT"] + ["USHORT", "FLOAT"] _cast_dict["DOUBLE"] = _cast_dict["INT"] + ["UINT", "FLOAT", "DOUBLE"] _cast_dict["CFLOAT"] = _cast_dict["FLOAT"] + ["CFLOAT"] # 32 bit system malloc typically does not provide the alignment required by # 16 byte long double types this means the inout intent cannot be satisfied # and several tests fail as the alignment flag can be randomly true or fals # when numpy gains an aligned allocator the tests could be enabled again # # Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) and sys.platform != "win32" and (platform.system(), platform.processor()) != ("Darwin", "arm")): _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"]) _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [ "ULONG", "FLOAT", "DOUBLE", "LONGDOUBLE", ] _cast_dict["CLONGDOUBLE"] = _cast_dict["LONGDOUBLE"] + [ "CFLOAT", "CDOUBLE", "CLONGDOUBLE", ] _cast_dict["CDOUBLE"] = _cast_dict["DOUBLE"] + ["CFLOAT", "CDOUBLE"] class Type: _type_cache = {} def __new__(cls, name): if isinstance(name, np.dtype): dtype0 = name name = None for n, i in typeinfo.items(): if not isinstance(i, type) and dtype0.type is i.type: name = n break obj = cls._type_cache.get(name.upper(), None) if obj is not None: return obj obj = object.__new__(cls) obj._init(name) cls._type_cache[name.upper()] = obj return obj def _init(self, name): self.NAME = name.upper() info = typeinfo[self.NAME] self.type_num = getattr(wrap, "NPY_" + self.NAME) assert self.type_num == info.num self.dtype = np.dtype(info.type) self.type = info.type self.elsize = info.bits / 8 self.dtypechar = info.char def cast_types(self): return [self.__class__(_m) for _m in _cast_dict[self.NAME]] def all_types(self): return [self.__class__(_m) for _m in _type_names] def smaller_types(self): bits = typeinfo[self.NAME].alignment types = [] for name in _type_names: if typeinfo[name].alignment < bits: types.append(Type(name)) return types def equal_types(self): bits = typeinfo[self.NAME].alignment types = [] for name in _type_names: if name == self.NAME: continue if typeinfo[name].alignment == bits: types.append(Type(name)) return types def larger_types(self): bits = typeinfo[self.NAME].alignment types = [] for name in _type_names: if typeinfo[name].alignment > bits: types.append(Type(name)) return types class Array: def __init__(self, typ, dims, intent, obj): self.type = typ self.dims = dims self.intent = intent self.obj_copy = copy.deepcopy(obj) self.obj = obj # arr.dtypechar may be different from typ.dtypechar self.arr = wrap.call(typ.type_num, dims, intent.flags, obj) assert isinstance(self.arr, np.ndarray) self.arr_attr = wrap.array_attrs(self.arr) if len(dims) > 1: if self.intent.is_intent("c"): assert (intent.flags & wrap.F2PY_INTENT_C) assert not self.arr.flags["FORTRAN"] assert self.arr.flags["CONTIGUOUS"] assert (not self.arr_attr[6] & wrap.FORTRAN) else: assert (not intent.flags & wrap.F2PY_INTENT_C) assert self.arr.flags["FORTRAN"] assert not self.arr.flags["CONTIGUOUS"] assert (self.arr_attr[6] & wrap.FORTRAN) if obj is None: self.pyarr = None self.pyarr_attr = None return if intent.is_intent("cache"): assert isinstance(obj, np.ndarray), repr(type(obj)) self.pyarr = np.array(obj).reshape(*dims).copy() else: self.pyarr = np.array( np.array(obj, dtype=typ.dtypechar).reshape(*dims), order=self.intent.is_intent("c") and "C" or "F", ) assert self.pyarr.dtype == typ self.pyarr.setflags(write=self.arr.flags["WRITEABLE"]) assert self.pyarr.flags["OWNDATA"], (obj, intent) self.pyarr_attr = wrap.array_attrs(self.pyarr) if len(dims) > 1: if self.intent.is_intent("c"): assert not self.pyarr.flags["FORTRAN"] assert self.pyarr.flags["CONTIGUOUS"] assert (not self.pyarr_attr[6] & wrap.FORTRAN) else: assert self.pyarr.flags["FORTRAN"] assert not self.pyarr.flags["CONTIGUOUS"] assert (self.pyarr_attr[6] & wrap.FORTRAN) assert self.arr_attr[1] == self.pyarr_attr[1] # nd assert self.arr_attr[2] == self.pyarr_attr[2] # dimensions if self.arr_attr[1] <= 1: assert self.arr_attr[3] == self.pyarr_attr[3], repr(( self.arr_attr[3], self.pyarr_attr[3], self.arr.tobytes(), self.pyarr.tobytes(), )) # strides assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:] # descr assert self.arr_attr[6] == self.pyarr_attr[6], repr(( self.arr_attr[6], self.pyarr_attr[6], flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), flags2names(self.arr_attr[6]), intent, )) # flags if intent.is_intent("cache"): assert self.arr_attr[5][3] >= self.type.elsize else: assert self.arr_attr[5][3] == self.type.elsize assert (self.arr_equal(self.pyarr, self.arr)) if isinstance(self.obj, np.ndarray): if typ.elsize == Type(obj.dtype).elsize: if not intent.is_intent("copy") and self.arr_attr[1] <= 1: assert self.has_shared_memory() def arr_equal(self, arr1, arr2): if arr1.shape != arr2.shape: return False return (arr1 == arr2).all() def __str__(self): return str(self.arr) def has_shared_memory(self): """Check that created array shares data with input array.""" if self.obj is self.arr: return True if not isinstance(self.obj, np.ndarray): return False obj_attr = wrap.array_attrs(self.obj) return obj_attr[0] == self.arr_attr[0] class TestIntent: def test_in_out(self): assert str(intent.in_.out) == "intent(in,out)" assert intent.in_.c.is_intent("c") assert not intent.in_.c.is_intent_exact("c") assert intent.in_.c.is_intent_exact("c", "in") assert intent.in_.c.is_intent_exact("in", "c") assert not intent.in_.is_intent("c") class TestSharedMemory: num2seq = [1, 2] num23seq = [[1, 2, 3], [4, 5, 6]] @pytest.fixture(autouse=True, scope="class", params=_type_names) def setup_type(self, request): request.cls.type = Type(request.param) request.cls.array = lambda self, dims, intent, obj: Array( Type(request.param), dims, intent, obj) def test_in_from_2seq(self): a = self.array([2], intent.in_, self.num2seq) assert not a.has_shared_memory() def test_in_from_2casttype(self): for t in self.type.cast_types(): obj = np.array(self.num2seq, dtype=t.dtype) a = self.array([len(self.num2seq)], intent.in_, obj) if t.elsize == self.type.elsize: assert a.has_shared_memory(), repr((self.type.dtype, t.dtype)) else: assert not a.has_shared_memory() @pytest.mark.parametrize("write", ["w", "ro"]) @pytest.mark.parametrize("order", ["C", "F"]) @pytest.mark.parametrize("inp", ["2seq", "23seq"]) def test_in_nocopy(self, write, order, inp): """Test if intent(in) array can be passed without copies""" seq = getattr(self, "num" + inp) obj = np.array(seq, dtype=self.type.dtype, order=order) obj.setflags(write=(write == "w")) a = self.array(obj.shape, ((order == "C" and intent.in_.c) or intent.in_), obj) assert a.has_shared_memory() def test_inout_2seq(self): obj = np.array(self.num2seq, dtype=self.type.dtype) a = self.array([len(self.num2seq)], intent.inout, obj) assert a.has_shared_memory() try: a = self.array([2], intent.in_.inout, self.num2seq) except TypeError as msg: if not str(msg).startswith( "failed to initialize intent(inout|inplace|cache) array"): raise else: raise SystemError("intent(inout) should have failed on sequence") def test_f_inout_23seq(self): obj = np.array(self.num23seq, dtype=self.type.dtype, order="F") shape = (len(self.num23seq), len(self.num23seq[0])) a = self.array(shape, intent.in_.inout, obj) assert a.has_shared_memory() obj = np.array(self.num23seq, dtype=self.type.dtype, order="C") shape = (len(self.num23seq), len(self.num23seq[0])) try: a = self.array(shape, intent.in_.inout, obj) except ValueError as msg: if not str(msg).startswith( "failed to initialize intent(inout) array"): raise else: raise SystemError( "intent(inout) should have failed on improper array") def test_c_inout_23seq(self): obj = np.array(self.num23seq, dtype=self.type.dtype) shape = (len(self.num23seq), len(self.num23seq[0])) a = self.array(shape, intent.in_.c.inout, obj) assert a.has_shared_memory() def test_in_copy_from_2casttype(self): for t in self.type.cast_types(): obj = np.array(self.num2seq, dtype=t.dtype) a = self.array([len(self.num2seq)], intent.in_.copy, obj) assert not a.has_shared_memory() def test_c_in_from_23seq(self): a = self.array( [len(self.num23seq), len(self.num23seq[0])], intent.in_, self.num23seq) assert not a.has_shared_memory() def test_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype) a = self.array( [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) assert not a.has_shared_memory() def test_f_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype, order="F") a = self.array( [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) if t.elsize == self.type.elsize: assert a.has_shared_memory() else: assert not a.has_shared_memory() def test_c_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype) a = self.array( [len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj) if t.elsize == self.type.elsize: assert a.has_shared_memory() else: assert not a.has_shared_memory() def test_f_copy_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype, order="F") a = self.array( [len(self.num23seq), len(self.num23seq[0])], intent.in_.copy, obj) assert not a.has_shared_memory() def test_c_copy_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype) a = self.array( [len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy, obj) assert not a.has_shared_memory() def test_in_cache_from_2casttype(self): for t in self.type.all_types(): if t.elsize != self.type.elsize: continue obj = np.array(self.num2seq, dtype=t.dtype) shape = (len(self.num2seq), ) a = self.array(shape, intent.in_.c.cache, obj) assert a.has_shared_memory() a = self.array(shape, intent.in_.cache, obj) assert a.has_shared_memory() obj = np.array(self.num2seq, dtype=t.dtype, order="F") a = self.array(shape, intent.in_.c.cache, obj) assert a.has_shared_memory() a = self.array(shape, intent.in_.cache, obj) assert a.has_shared_memory(), repr(t.dtype) try: a = self.array(shape, intent.in_.cache, obj[::-1]) except ValueError as msg: if not str(msg).startswith( "failed to initialize intent(cache) array"): raise else: raise SystemError( "intent(cache) should have failed on multisegmented array") def test_in_cache_from_2casttype_failure(self): for t in self.type.all_types(): if t.elsize >= self.type.elsize: continue obj = np.array(self.num2seq, dtype=t.dtype) shape = (len(self.num2seq), ) try: self.array(shape, intent.in_.cache, obj) # Should succeed except ValueError as msg: if not str(msg).startswith( "failed to initialize intent(cache) array"): raise else: raise SystemError( "intent(cache) should have failed on smaller array") def test_cache_hidden(self): shape = (2, ) a = self.array(shape, intent.cache.hide, None) assert a.arr.shape == shape shape = (2, 3) a = self.array(shape, intent.cache.hide, None) assert a.arr.shape == shape shape = (-1, 3) try: a = self.array(shape, intent.cache.hide, None) except ValueError as msg: if not str(msg).startswith( "failed to create intent(cache|hide)|optional array"): raise else: raise SystemError( "intent(cache) should have failed on undefined dimensions") def test_hidden(self): shape = (2, ) a = self.array(shape, intent.hide, None) assert a.arr.shape == shape assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) shape = (2, 3) a = self.array(shape, intent.hide, None) assert a.arr.shape == shape assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] shape = (2, 3) a = self.array(shape, intent.c.hide, None) assert a.arr.shape == shape assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] shape = (-1, 3) try: a = self.array(shape, intent.hide, None) except ValueError as msg: if not str(msg).startswith( "failed to create intent(cache|hide)|optional array"): raise else: raise SystemError( "intent(hide) should have failed on undefined dimensions") def test_optional_none(self): shape = (2, ) a = self.array(shape, intent.optional, None) assert a.arr.shape == shape assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) shape = (2, 3) a = self.array(shape, intent.optional, None) assert a.arr.shape == shape assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] shape = (2, 3) a = self.array(shape, intent.c.optional, None) assert a.arr.shape == shape assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] def test_optional_from_2seq(self): obj = self.num2seq shape = (len(obj), ) a = self.array(shape, intent.optional, obj) assert a.arr.shape == shape assert not a.has_shared_memory() def test_optional_from_23seq(self): obj = self.num23seq shape = (len(obj), len(obj[0])) a = self.array(shape, intent.optional, obj) assert a.arr.shape == shape assert not a.has_shared_memory() a = self.array(shape, intent.optional.c, obj) assert a.arr.shape == shape assert not a.has_shared_memory() def test_inplace(self): obj = np.array(self.num23seq, dtype=self.type.dtype) assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] shape = obj.shape a = self.array(shape, intent.inplace, obj) assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) a.arr[1][2] = 54 assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) assert a.arr is obj assert obj.flags["FORTRAN"] # obj attributes are changed inplace! assert not obj.flags["CONTIGUOUS"] def test_inplace_from_casttype(self): for t in self.type.cast_types(): if t is self.type: continue obj = np.array(self.num23seq, dtype=t.dtype) assert obj.dtype.type == t.type assert obj.dtype.type is not self.type.type assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] shape = obj.shape a = self.array(shape, intent.inplace, obj) assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) a.arr[1][2] = 54 assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) assert a.arr is obj assert obj.flags["FORTRAN"] # obj attributes changed inplace! assert not obj.flags["CONTIGUOUS"] assert obj.dtype.type is self.type.type # obj changed inplace!
22,071
Python
34.146497
79
0.54438
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_callback.py
import math import textwrap import sys import pytest import threading import traceback import time import numpy as np from numpy.testing import IS_PYPY from . import util class TestF77Callback(util.F2PyTest): sources = [util.getpath("tests", "src", "callback", "foo.f")] @pytest.mark.parametrize("name", "t,t2".split(",")) def test_all(self, name): self.check_function(name) @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") def test_docstring(self): expected = textwrap.dedent("""\ a = t(fun,[fun_extra_args]) Wrapper for ``t``. Parameters ---------- fun : call-back function Other Parameters ---------------- fun_extra_args : input tuple, optional Default: () Returns ------- a : int Notes ----- Call-back functions:: def fun(): return a Return objects: a : int """) assert self.module.t.__doc__ == expected def check_function(self, name): t = getattr(self.module, name) r = t(lambda: 4) assert r == 4 r = t(lambda a: 5, fun_extra_args=(6, )) assert r == 5 r = t(lambda a: a, fun_extra_args=(6, )) assert r == 6 r = t(lambda a: 5 + a, fun_extra_args=(7, )) assert r == 12 r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, )) assert r == 180 r = t(math.degrees, fun_extra_args=(math.pi, )) assert r == 180 r = t(self.module.func, fun_extra_args=(6, )) assert r == 17 r = t(self.module.func0) assert r == 11 r = t(self.module.func0._cpointer) assert r == 11 class A: def __call__(self): return 7 def mth(self): return 9 a = A() r = t(a) assert r == 7 r = t(a.mth) assert r == 9 @pytest.mark.skipif(sys.platform == "win32", reason="Fails with MinGW64 Gfortran (Issue #9673)") def test_string_callback(self): def callback(code): if code == "r": return 0 else: return 1 f = getattr(self.module, "string_callback") r = f(callback) assert r == 0 @pytest.mark.skipif(sys.platform == "win32", reason="Fails with MinGW64 Gfortran (Issue #9673)") def test_string_callback_array(self): # See gh-10027 cu = np.zeros((1, 8), "S1") def callback(cu, lencu): if cu.shape != (lencu, 8): return 1 if cu.dtype != "S1": return 2 if not np.all(cu == b""): return 3 return 0 f = getattr(self.module, "string_callback_array") res = f(callback, cu, len(cu)) assert res == 0 def test_threadsafety(self): # Segfaults if the callback handling is not threadsafe errors = [] def cb(): # Sleep here to make it more likely for another thread # to call their callback at the same time. time.sleep(1e-3) # Check reentrancy r = self.module.t(lambda: 123) assert r == 123 return 42 def runner(name): try: for j in range(50): r = self.module.t(cb) assert r == 42 self.check_function(name) except Exception: errors.append(traceback.format_exc()) threads = [ threading.Thread(target=runner, args=(arg, )) for arg in ("t", "t2") for n in range(20) ] for t in threads: t.start() for t in threads: t.join() errors = "\n\n".join(errors) if errors: raise AssertionError(errors) def test_hidden_callback(self): try: self.module.hidden_callback(2) except Exception as msg: assert str(msg).startswith("Callback global_f not defined") try: self.module.hidden_callback2(2) except Exception as msg: assert str(msg).startswith("cb: Callback global_f not defined") self.module.global_f = lambda x: x + 1 r = self.module.hidden_callback(2) assert r == 3 self.module.global_f = lambda x: x + 2 r = self.module.hidden_callback(2) assert r == 4 del self.module.global_f try: self.module.hidden_callback(2) except Exception as msg: assert str(msg).startswith("Callback global_f not defined") self.module.global_f = lambda x=0: x + 3 r = self.module.hidden_callback(2) assert r == 5 # reproducer of gh18341 r = self.module.hidden_callback2(2) assert r == 3 class TestF77CallbackPythonTLS(TestF77Callback): """ Callback tests using Python thread-local storage instead of compiler-provided """ options = ["-DF2PY_USE_PYTHON_TLS"] class TestF90Callback(util.F2PyTest): sources = [util.getpath("tests", "src", "callback", "gh17797.f90")] def test_gh17797(self): def incr(x): return x + 123 y = np.array([1, 2, 3], dtype=np.int64) r = self.module.gh17797(incr, y) assert r == 123 + 1 + 2 + 3 class TestGH18335(util.F2PyTest): """The reproduction of the reported issue requires specific input that extensions may break the issue conditions, so the reproducer is implemented as a separate test class. Do not extend this test with other tests! """ sources = [util.getpath("tests", "src", "callback", "gh18335.f90")] def test_gh18335(self): def foo(x): x[0] += 1 y = np.array([1, 2, 3], dtype=np.int8) r = self.module.gh18335(foo) assert r == 123 + 1
6,087
Python
25.585153
77
0.518975
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_return_real.py
import platform import pytest import numpy as np from numpy import array from . import util class TestReturnReal(util.F2PyTest): def check_function(self, t, tname): if tname in ["t0", "t4", "s0", "s4"]: err = 1e-5 else: err = 0.0 assert abs(t(234) - 234.0) <= err assert abs(t(234.6) - 234.6) <= err assert abs(t("234") - 234) <= err assert abs(t("234.6") - 234.6) <= err assert abs(t(-234) + 234) <= err assert abs(t([234]) - 234) <= err assert abs(t((234, )) - 234.0) <= err assert abs(t(array(234)) - 234.0) <= err assert abs(t(array([234])) - 234.0) <= err assert abs(t(array([[234]])) - 234.0) <= err assert abs(t(array([234], "b")) + 22) <= err assert abs(t(array([234], "h")) - 234.0) <= err assert abs(t(array([234], "i")) - 234.0) <= err assert abs(t(array([234], "l")) - 234.0) <= err assert abs(t(array([234], "B")) - 234.0) <= err assert abs(t(array([234], "f")) - 234.0) <= err assert abs(t(array([234], "d")) - 234.0) <= err if tname in ["t0", "t4", "s0", "s4"]: assert t(1e200) == t(1e300) # inf # pytest.raises(ValueError, t, array([234], 'S1')) pytest.raises(ValueError, t, "abc") pytest.raises(IndexError, t, []) pytest.raises(IndexError, t, ()) pytest.raises(Exception, t, t) pytest.raises(Exception, t, {}) try: r = t(10**400) assert repr(r) in ["inf", "Infinity"] except OverflowError: pass @pytest.mark.skipif( platform.system() == "Darwin", reason="Prone to error when run with numpy/f2py/tests on mac os, " "but not when run in isolation", ) @pytest.mark.skipif( np.dtype(np.intp).itemsize < 8, reason="32-bit builds are buggy" ) class TestCReturnReal(TestReturnReal): suffix = ".pyf" module_name = "c_ext_return_real" code = """ python module c_ext_return_real usercode \'\'\' float t4(float value) { return value; } void s4(float *t4, float value) { *t4 = value; } double t8(double value) { return value; } void s8(double *t8, double value) { *t8 = value; } \'\'\' interface function t4(value) real*4 intent(c) :: t4,value end function t8(value) real*8 intent(c) :: t8,value end subroutine s4(t4,value) intent(c) s4 real*4 intent(out) :: t4 real*4 intent(c) :: value end subroutine s8(t8,value) intent(c) s8 real*8 intent(out) :: t8 real*8 intent(c) :: value end end interface end python module c_ext_return_real """ @pytest.mark.parametrize("name", "t4,t8,s4,s8".split(",")) def test_all(self, name): self.check_function(getattr(self.module, name), name) class TestFReturnReal(TestReturnReal): sources = [ util.getpath("tests", "src", "return_real", "foo77.f"), util.getpath("tests", "src", "return_real", "foo90.f90"), ] @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(",")) def test_all_f77(self, name): self.check_function(getattr(self.module, name), name) @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(",")) def test_all_f90(self, name): self.check_function(getattr(self.module.f90_return_real, name), name)
3,346
Python
29.427272
77
0.566348
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_mixed.py
import os import textwrap import pytest from numpy.testing import IS_PYPY from . import util class TestMixed(util.F2PyTest): sources = [ util.getpath("tests", "src", "mixed", "foo.f"), util.getpath("tests", "src", "mixed", "foo_fixed.f90"), util.getpath("tests", "src", "mixed", "foo_free.f90"), ] def test_all(self): assert self.module.bar11() == 11 assert self.module.foo_fixed.bar12() == 12 assert self.module.foo_free.bar13() == 13 @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") def test_docstring(self): expected = textwrap.dedent("""\ a = bar11() Wrapper for ``bar11``. Returns ------- a : int """) assert self.module.bar11.__doc__ == expected
848
Python
23.970588
77
0.558962
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_block_docstring.py
import sys import pytest from . import util from numpy.testing import IS_PYPY class TestBlockDocString(util.F2PyTest): sources = [util.getpath("tests", "src", "block_docstring", "foo.f")] @pytest.mark.skipif(sys.platform == "win32", reason="Fails with MinGW64 Gfortran (Issue #9673)") @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") def test_block_docstring(self): expected = "bar : 'i'-array(2,3)\n" assert self.module.block.__doc__ == expected
564
Python
30.388887
77
0.62766
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_assumed_shape.py
import os import pytest import tempfile from . import util class TestAssumedShapeSumExample(util.F2PyTest): sources = [ util.getpath("tests", "src", "assumed_shape", "foo_free.f90"), util.getpath("tests", "src", "assumed_shape", "foo_use.f90"), util.getpath("tests", "src", "assumed_shape", "precision.f90"), util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"), util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"), ] @pytest.mark.slow def test_all(self): r = self.module.fsum([1, 2]) assert r == 3 r = self.module.sum([1, 2]) assert r == 3 r = self.module.sum_with_use([1, 2]) assert r == 3 r = self.module.mod.sum([1, 2]) assert r == 3 r = self.module.mod.fsum([1, 2]) assert r == 3 class TestF2cmapOption(TestAssumedShapeSumExample): def setup_method(self): # Use a custom file name for .f2py_f2cmap self.sources = list(self.sources) f2cmap_src = self.sources.pop(-1) self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False) with open(f2cmap_src, "rb") as f: self.f2cmap_file.write(f.read()) self.f2cmap_file.close() self.sources.append(self.f2cmap_file.name) self.options = ["--f2cmap", self.f2cmap_file.name] super().setup_method() def teardown_method(self): os.unlink(self.f2cmap_file.name)
1,466
Python
28.339999
71
0.587312
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_compile_function.py
"""See https://github.com/numpy/numpy/pull/11937. """ import sys import os import uuid from importlib import import_module import pytest import numpy.f2py from . import util def setup_module(): if not util.has_c_compiler(): pytest.skip("Needs C compiler") if not util.has_f77_compiler(): pytest.skip("Needs FORTRAN 77 compiler") # extra_args can be a list (since gh-11937) or string. # also test absence of extra_args @pytest.mark.parametrize("extra_args", [["--noopt", "--debug"], "--noopt --debug", ""]) @pytest.mark.leaks_references(reason="Imported module seems never deleted.") def test_f2py_init_compile(extra_args): # flush through the f2py __init__ compile() function code path as a # crude test for input handling following migration from # exec_command() to subprocess.check_output() in gh-11937 # the Fortran 77 syntax requires 6 spaces before any commands, but # more space may be added/ fsource = """ integer function foo() foo = 10 + 5 return end """ # use various helper functions in util.py to enable robust build / # compile and reimport cycle in test suite moddir = util.get_module_dir() modname = util.get_temp_module_name() cwd = os.getcwd() target = os.path.join(moddir, str(uuid.uuid4()) + ".f") # try running compile() with and without a source_fn provided so # that the code path where a temporary file for writing Fortran # source is created is also explored for source_fn in [target, None]: # mimic the path changing behavior used by build_module() in # util.py, but don't actually use build_module() because it has # its own invocation of subprocess that circumvents the # f2py.compile code block under test with util.switchdir(moddir): ret_val = numpy.f2py.compile(fsource, modulename=modname, extra_args=extra_args, source_fn=source_fn) # check for compile success return value assert ret_val == 0 # we are not currently able to import the Python-Fortran # interface module on Windows / Appveyor, even though we do get # successful compilation on that platform with Python 3.x if sys.platform != "win32": # check for sensible result of Fortran function; that means # we can import the module name in Python and retrieve the # result of the sum operation return_check = import_module(modname) calc_result = return_check.foo() assert calc_result == 15 # Removal from sys.modules, is not as such necessary. Even with # removal, the module (dict) stays alive. del sys.modules[modname] def test_f2py_init_compile_failure(): # verify an appropriate integer status value returned by # f2py.compile() when invalid Fortran is provided ret_val = numpy.f2py.compile(b"invalid") assert ret_val == 1 def test_f2py_init_compile_bad_cmd(): # verify that usage of invalid command in f2py.compile() returns # status value of 127 for historic consistency with exec_command() # error handling # patch the sys Python exe path temporarily to induce an OSError # downstream NOTE: how bad of an idea is this patching? try: temp = sys.executable sys.executable = "does not exist" # the OSError should take precedence over invalid Fortran ret_val = numpy.f2py.compile(b"invalid") assert ret_val == 127 finally: sys.executable = temp @pytest.mark.parametrize( "fsource", [ "program test_f2py\nend program test_f2py", b"program test_f2py\nend program test_f2py", ], ) def test_compile_from_strings(tmpdir, fsource): # Make sure we can compile str and bytes gh-12796 with util.switchdir(tmpdir): ret_val = numpy.f2py.compile(fsource, modulename="test_compile_from_strings", extension=".f90") assert ret_val == 0
4,186
Python
34.483051
76
0.632824
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_size.py
import os import pytest import numpy as np from . import util class TestSizeSumExample(util.F2PyTest): sources = [util.getpath("tests", "src", "size", "foo.f90")] @pytest.mark.slow def test_all(self): r = self.module.foo([[]]) assert r == [0] r = self.module.foo([[1, 2]]) assert r == [3] r = self.module.foo([[1, 2], [3, 4]]) assert np.allclose(r, [3, 7]) r = self.module.foo([[1, 2], [3, 4], [5, 6]]) assert np.allclose(r, [3, 7, 11]) @pytest.mark.slow def test_transpose(self): r = self.module.trans([[]]) assert np.allclose(r.T, np.array([[]])) r = self.module.trans([[1, 2]]) assert np.allclose(r, [[1.], [2.]]) r = self.module.trans([[1, 2, 3], [4, 5, 6]]) assert np.allclose(r, [[1, 4], [2, 5], [3, 6]]) @pytest.mark.slow def test_flatten(self): r = self.module.flatten([[]]) assert np.allclose(r, []) r = self.module.flatten([[1, 2]]) assert np.allclose(r, [1, 2]) r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) assert np.allclose(r, [1, 2, 3, 4, 5, 6])
1,164
Python
24.326086
63
0.5
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/test_module_doc.py
import os import sys import pytest import textwrap from . import util from numpy.testing import IS_PYPY class TestModuleDocString(util.F2PyTest): sources = [ util.getpath("tests", "src", "module_data", "module_data_docstring.f90") ] @pytest.mark.skipif(sys.platform == "win32", reason="Fails with MinGW64 Gfortran (Issue #9673)") @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") def test_module_docstring(self): assert self.module.mod.__doc__ == textwrap.dedent("""\ i : 'i'-scalar x : 'i'-array(4) a : 'f'-array(2,3) b : 'f'-array(-1,-1), not allocated\x00 foo()\n Wrapper for ``foo``.\n\n""")
863
Python
29.857142
77
0.514484
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
/* * This file was auto-generated with f2py (version:2_1330) and hand edited by * Pearu for testing purposes. Do not edit this file unless you know what you * are doing!!! */ #ifdef __cplusplus extern "C" { #endif /*********************** See f2py2e/cfuncs.py: includes ***********************/ #define PY_SSIZE_T_CLEAN #include <Python.h> #include "fortranobject.h" #include <math.h> static PyObject *wrap_error; static PyObject *wrap_module; /************************************ call ************************************/ static char doc_f2py_rout_wrap_call[] = "\ Function signature:\n\ arr = call(type_num,dims,intent,obj)\n\ Required arguments:\n" " type_num : input int\n" " dims : input int-sequence\n" " intent : input int\n" " obj : input python object\n" "Return objects:\n" " arr : array"; static PyObject *f2py_rout_wrap_call(PyObject *capi_self, PyObject *capi_args) { PyObject * volatile capi_buildvalue = NULL; int type_num = 0; npy_intp *dims = NULL; PyObject *dims_capi = Py_None; int rank = 0; int intent = 0; PyArrayObject *capi_arr_tmp = NULL; PyObject *arr_capi = Py_None; int i; if (!PyArg_ParseTuple(capi_args,"iOiO|:wrap.call",\ &type_num,&dims_capi,&intent,&arr_capi)) return NULL; rank = PySequence_Length(dims_capi); dims = malloc(rank*sizeof(npy_intp)); for (i=0;i<rank;++i) { PyObject *tmp; tmp = PySequence_GetItem(dims_capi, i); if (tmp == NULL) { goto fail; } dims[i] = (npy_intp)PyLong_AsLong(tmp); Py_DECREF(tmp); if (dims[i] == -1 && PyErr_Occurred()) { goto fail; } } capi_arr_tmp = array_from_pyobj(type_num,dims,rank,intent|F2PY_INTENT_OUT,arr_capi); if (capi_arr_tmp == NULL) { free(dims); return NULL; } capi_buildvalue = Py_BuildValue("N",capi_arr_tmp); free(dims); return capi_buildvalue; fail: free(dims); return NULL; } static char doc_f2py_rout_wrap_attrs[] = "\ Function signature:\n\ arr = array_attrs(arr)\n\ Required arguments:\n" " arr : input array object\n" "Return objects:\n" " data : data address in hex\n" " nd : int\n" " dimensions : tuple\n" " strides : tuple\n" " base : python object\n" " (kind,type,type_num,elsize,alignment) : 4-tuple\n" " flags : int\n" " itemsize : int\n" ; static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self, PyObject *capi_args) { PyObject *arr_capi = Py_None; PyArrayObject *arr = NULL; PyObject *dimensions = NULL; PyObject *strides = NULL; char s[100]; int i; memset(s,0,100); if (!PyArg_ParseTuple(capi_args,"O!|:wrap.attrs", &PyArray_Type,&arr_capi)) return NULL; arr = (PyArrayObject *)arr_capi; sprintf(s,"%p",PyArray_DATA(arr)); dimensions = PyTuple_New(PyArray_NDIM(arr)); strides = PyTuple_New(PyArray_NDIM(arr)); for (i=0;i<PyArray_NDIM(arr);++i) { PyTuple_SetItem(dimensions,i,PyLong_FromLong(PyArray_DIM(arr,i))); PyTuple_SetItem(strides,i,PyLong_FromLong(PyArray_STRIDE(arr,i))); } return Py_BuildValue("siNNO(cciii)ii",s,PyArray_NDIM(arr), dimensions,strides, (PyArray_BASE(arr)==NULL?Py_None:PyArray_BASE(arr)), PyArray_DESCR(arr)->kind, PyArray_DESCR(arr)->type, PyArray_TYPE(arr), PyArray_ITEMSIZE(arr), PyArray_DESCR(arr)->alignment, PyArray_FLAGS(arr), PyArray_ITEMSIZE(arr)); } static PyMethodDef f2py_module_methods[] = { {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, {NULL,NULL} }; static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "test_array_from_pyobj_ext", NULL, -1, f2py_module_methods, NULL, NULL, NULL, NULL }; PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { PyObject *m,*d, *s; m = wrap_module = PyModule_Create(&moduledef); Py_SET_TYPE(&PyFortran_Type, &PyType_Type); import_array(); if (PyErr_Occurred()) Py_FatalError("can't initialize module wrap (failed to import numpy)"); d = PyModule_GetDict(m); s = PyUnicode_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n" " arr = call(type_num,dims,intent,obj)\n" "."); PyDict_SetItemString(d, "__doc__", s); wrap_error = PyErr_NewException ("wrap.error", NULL, NULL); Py_DECREF(s); #define ADDCONST(NAME, CONST) \ s = PyLong_FromLong(CONST); \ PyDict_SetItemString(d, NAME, s); \ Py_DECREF(s) ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN); ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT); ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT); ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE); ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE); ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY); ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C); ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL); ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE); ADDCONST("NPY_BOOL", NPY_BOOL); ADDCONST("NPY_BYTE", NPY_BYTE); ADDCONST("NPY_UBYTE", NPY_UBYTE); ADDCONST("NPY_SHORT", NPY_SHORT); ADDCONST("NPY_USHORT", NPY_USHORT); ADDCONST("NPY_INT", NPY_INT); ADDCONST("NPY_UINT", NPY_UINT); ADDCONST("NPY_INTP", NPY_INTP); ADDCONST("NPY_UINTP", NPY_UINTP); ADDCONST("NPY_LONG", NPY_LONG); ADDCONST("NPY_ULONG", NPY_ULONG); ADDCONST("NPY_LONGLONG", NPY_LONGLONG); ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG); ADDCONST("NPY_FLOAT", NPY_FLOAT); ADDCONST("NPY_DOUBLE", NPY_DOUBLE); ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE); ADDCONST("NPY_CFLOAT", NPY_CFLOAT); ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE); ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE); ADDCONST("NPY_OBJECT", NPY_OBJECT); ADDCONST("NPY_STRING", NPY_STRING); ADDCONST("NPY_UNICODE", NPY_UNICODE); ADDCONST("NPY_VOID", NPY_VOID); ADDCONST("NPY_NTYPES", NPY_NTYPES); ADDCONST("NPY_NOTYPE", NPY_NOTYPE); ADDCONST("NPY_USERDEF", NPY_USERDEF); ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS); ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS); ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA); ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST); ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY); ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY); ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED); ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE); ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY); ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED); ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS); ADDCONST("CARRAY", NPY_ARRAY_CARRAY); ADDCONST("FARRAY", NPY_ARRAY_FARRAY); ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO); ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO); ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); #undef ADDCONST( if (PyErr_Occurred()) Py_FatalError("can't initialize module wrap"); #ifdef F2PY_REPORT_ATEXIT on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); #endif return m; } #ifdef __cplusplus } #endif
7,244
C
30.5
107
0.628244
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/mingw32ccompiler.py
""" Support code for building Python extensions on Windows. # NT stuff # 1. Make sure libpython<version>.a exists for gcc. If not, build it. # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) # 3. Force windows to use g77 """ import os import platform import sys import subprocess import re import textwrap # Overwrite certain distutils.ccompiler functions: import numpy.distutils.ccompiler # noqa: F401 from numpy.distutils import log # NT stuff # 1. Make sure libpython<version>.a exists for gcc. If not, build it. # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) # --> this is done in numpy/distutils/ccompiler.py # 3. Force windows to use g77 import distutils.cygwinccompiler from distutils.unixccompiler import UnixCCompiler from distutils.msvccompiler import get_build_version as get_build_msvc_version from distutils.errors import UnknownFileError from numpy.distutils.misc_util import (msvc_runtime_library, msvc_runtime_version, msvc_runtime_major, get_build_architecture) def get_msvcr_replacement(): """Replacement for outdated version of get_msvcr from cygwinccompiler""" msvcr = msvc_runtime_library() return [] if msvcr is None else [msvcr] # Useful to generate table of symbols from a dll _START = re.compile(r'\[Ordinal/Name Pointer\] Table') _TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') # the same as cygwin plus some additional parameters class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): """ A modified MingW32 compiler compatible with an MSVC built Python. """ compiler_type = 'mingw32' def __init__ (self, verbose=0, dry_run=0, force=0): distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, dry_run, force) # **changes: eric jones 4/11/01 # 1. Check for import library on Windows. Build if it doesn't exist. build_import_library() # Check for custom msvc runtime library on Windows. Build if it doesn't exist. msvcr_success = build_msvcr_library() msvcr_dbg_success = build_msvcr_library(debug=True) if msvcr_success or msvcr_dbg_success: # add preprocessor statement for using customized msvcr lib self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') # Define the MSVC version as hint for MinGW msvcr_version = msvc_runtime_version() if msvcr_version: self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) # MS_WIN64 should be defined when building for amd64 on windows, # but python headers define it only for MS compilers, which has all # kind of bad consequences, like using Py_ModuleInit4 instead of # Py_ModuleInit4_64, etc... So we add it here if get_build_architecture() == 'AMD64': self.set_executables( compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall ' '-Wstrict-prototypes', linker_exe='gcc -g', linker_so='gcc -g -shared') else: self.set_executables( compiler='gcc -O2 -Wall', compiler_so='gcc -O2 -Wall -Wstrict-prototypes', linker_exe='g++ ', linker_so='g++ -shared') # added for python2.3 support # we can't pass it through set_executables because pre 2.2 would fail self.compiler_cxx = ['g++'] # Maybe we should also append -mthreads, but then the finished dlls # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support # thread-safe exception handling on `Mingw32') # no additional libraries needed #self.dll_libraries=[] return # __init__ () def link(self, target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols = None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): # Include the appropriate MSVC runtime library if Python was built # with MSVC >= 7.0 (MinGW standard is msvcrt) runtime_library = msvc_runtime_library() if runtime_library: if not libraries: libraries = [] libraries.append(runtime_library) args = (self, target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, None, #export_symbols, we do this in our def-file debug, extra_preargs, extra_postargs, build_temp, target_lang) func = UnixCCompiler.link func(*args[:func.__code__.co_argcount]) return def object_filenames (self, source_filenames, strip_dir=0, output_dir=''): if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: # use normcase to make sure '.rc' is really '.rc' and not '.RC' (base, ext) = os.path.splitext (os.path.normcase(src_name)) # added these lines to strip off windows drive letters # without it, .o files are placed next to .c files # instead of the build directory drv, base = os.path.splitdrive(base) if drv: base = base[1:] if ext not in (self.src_extensions + ['.rc', '.res']): raise UnknownFileError( "unknown file type '%s' (from '%s')" % \ (ext, src_name)) if strip_dir: base = os.path.basename (base) if ext == '.res' or ext == '.rc': # these need to be compiled to object files obj_names.append (os.path.join (output_dir, base + ext + self.obj_extension)) else: obj_names.append (os.path.join (output_dir, base + self.obj_extension)) return obj_names # object_filenames () def find_python_dll(): # We can't do much here: # - find it in the virtualenv (sys.prefix) # - find it in python main dir (sys.base_prefix, if in a virtualenv) # - sys.real_prefix is main dir for virtualenvs in Python 2.7 # - in system32, # - ortherwise (Sxs), I don't know how to get it. stems = [sys.prefix] if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: stems.append(sys.base_prefix) elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: stems.append(sys.real_prefix) sub_dirs = ['', 'lib', 'bin'] # generate possible combinations of directory trees and sub-directories lib_dirs = [] for stem in stems: for folder in sub_dirs: lib_dirs.append(os.path.join(stem, folder)) # add system directory as well if 'SYSTEMROOT' in os.environ: lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) # search in the file system for possible candidates major_version, minor_version = tuple(sys.version_info[:2]) implementation = platform.python_implementation() if implementation == 'CPython': dllname = f'python{major_version}{minor_version}.dll' elif implementation == 'PyPy': dllname = f'libpypy{major_version}-c.dll' else: dllname = f'Unknown platform {implementation}' print("Looking for %s" % dllname) for folder in lib_dirs: dll = os.path.join(folder, dllname) if os.path.exists(dll): return dll raise ValueError("%s not found in %s" % (dllname, lib_dirs)) def dump_table(dll): st = subprocess.check_output(["objdump.exe", "-p", dll]) return st.split(b'\n') def generate_def(dll, dfile): """Given a dll file location, get all its exported symbols and dump them into the given def file. The .def file will be overwritten""" dump = dump_table(dll) for i in range(len(dump)): if _START.match(dump[i].decode()): break else: raise ValueError("Symbol table not found") syms = [] for j in range(i+1, len(dump)): m = _TABLE.match(dump[j].decode()) if m: syms.append((int(m.group(1).strip()), m.group(2))) else: break if len(syms) == 0: log.warn('No symbols found in %s' % dll) with open(dfile, 'w') as d: d.write('LIBRARY %s\n' % os.path.basename(dll)) d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') d.write(';DATA PRELOAD SINGLE\n') d.write('\nEXPORTS\n') for s in syms: #d.write('@%d %s\n' % (s[0], s[1])) d.write('%s\n' % s[1]) def find_dll(dll_name): arch = {'AMD64' : 'amd64', 'Intel' : 'x86'}[get_build_architecture()] def _find_dll_in_winsxs(dll_name): # Walk through the WinSxS directory to find the dll. winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), 'winsxs') if not os.path.exists(winsxs_path): return None for root, dirs, files in os.walk(winsxs_path): if dll_name in files and arch in root: return os.path.join(root, dll_name) return None def _find_dll_in_path(dll_name): # First, look in the Python directory, then scan PATH for # the given dll name. for path in [sys.prefix] + os.environ['PATH'].split(';'): filepath = os.path.join(path, dll_name) if os.path.exists(filepath): return os.path.abspath(filepath) return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) def build_msvcr_library(debug=False): if os.name != 'nt': return False # If the version number is None, then we couldn't find the MSVC runtime at # all, because we are running on a Python distribution which is customed # compiled; trust that the compiler is the same as the one available to us # now, and that it is capable of linking with the correct runtime without # any extra options. msvcr_ver = msvc_runtime_major() if msvcr_ver is None: log.debug('Skip building import library: ' 'Runtime is not compiled with MSVC') return False # Skip using a custom library for versions < MSVC 8.0 if msvcr_ver < 80: log.debug('Skip building msvcr library:' ' custom functionality not present') return False msvcr_name = msvc_runtime_library() if debug: msvcr_name += 'd' # Skip if custom library already exists out_name = "lib%s.a" % msvcr_name out_file = os.path.join(sys.prefix, 'libs', out_name) if os.path.isfile(out_file): log.debug('Skip building msvcr library: "%s" exists' % (out_file,)) return True # Find the msvcr dll msvcr_dll_name = msvcr_name + '.dll' dll_file = find_dll(msvcr_dll_name) if not dll_file: log.warn('Cannot build msvcr library: "%s" not found' % msvcr_dll_name) return False def_name = "lib%s.def" % msvcr_name def_file = os.path.join(sys.prefix, 'libs', def_name) log.info('Building msvcr library: "%s" (from %s)' \ % (out_file, dll_file)) # Generate a symbol definition file from the msvcr dll generate_def(dll_file, def_file) # Create a custom mingw library for the given symbol definitions cmd = ['dlltool', '-d', def_file, '-l', out_file] retcode = subprocess.call(cmd) # Clean up symbol definitions os.remove(def_file) return (not retcode) def build_import_library(): if os.name != 'nt': return arch = get_build_architecture() if arch == 'AMD64': return _build_import_library_amd64() elif arch == 'Intel': return _build_import_library_x86() else: raise ValueError("Unhandled arch %s" % arch) def _check_for_import_lib(): """Check if an import library for the Python runtime already exists.""" major_version, minor_version = tuple(sys.version_info[:2]) # patterns for the file name of the library itself patterns = ['libpython%d%d.a', 'libpython%d%d.dll.a', 'libpython%d.%d.dll.a'] # directory trees that may contain the library stems = [sys.prefix] if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: stems.append(sys.base_prefix) elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: stems.append(sys.real_prefix) # possible subdirectories within those trees where it is placed sub_dirs = ['libs', 'lib'] # generate a list of candidate locations candidates = [] for pat in patterns: filename = pat % (major_version, minor_version) for stem_dir in stems: for folder in sub_dirs: candidates.append(os.path.join(stem_dir, folder, filename)) # test the filesystem to see if we can find any of these for fullname in candidates: if os.path.isfile(fullname): # already exists, in location given return (True, fullname) # needs to be built, preferred location given first return (False, candidates[0]) def _build_import_library_amd64(): out_exists, out_file = _check_for_import_lib() if out_exists: log.debug('Skip building import library: "%s" exists', out_file) return # get the runtime dll for which we are building import library dll_file = find_python_dll() log.info('Building import library (arch=AMD64): "%s" (from %s)' % (out_file, dll_file)) # generate symbol list from this library def_name = "python%d%d.def" % tuple(sys.version_info[:2]) def_file = os.path.join(sys.prefix, 'libs', def_name) generate_def(dll_file, def_file) # generate import library from this symbol list cmd = ['dlltool', '-d', def_file, '-l', out_file] subprocess.check_call(cmd) def _build_import_library_x86(): """ Build the import libraries for Mingw32-gcc on Windows """ out_exists, out_file = _check_for_import_lib() if out_exists: log.debug('Skip building import library: "%s" exists', out_file) return lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) lib_file = os.path.join(sys.prefix, 'libs', lib_name) if not os.path.isfile(lib_file): # didn't find library file in virtualenv, try base distribution, too, # and use that instead if found there. for Python 2.7 venvs, the base # directory is in attribute real_prefix instead of base_prefix. if hasattr(sys, 'base_prefix'): base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) elif hasattr(sys, 'real_prefix'): base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) else: base_lib = '' # os.path.isfile('') == False if os.path.isfile(base_lib): lib_file = base_lib else: log.warn('Cannot build import library: "%s" not found', lib_file) return log.info('Building import library (ARCH=x86): "%s"', out_file) from numpy.distutils import lib2def def_name = "python%d%d.def" % tuple(sys.version_info[:2]) def_file = os.path.join(sys.prefix, 'libs', def_name) nm_output = lib2def.getnm( lib2def.DEFAULT_NM + [lib_file], shell=False) dlist, flist = lib2def.parse_nm(nm_output) with open(def_file, 'w') as fid: lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid) dll_name = find_python_dll () cmd = ["dlltool", "--dllname", dll_name, "--def", def_file, "--output-lib", out_file] status = subprocess.check_output(cmd) if status: log.warn('Failed to build import library for gcc. Linking will fail.') return #===================================== # Dealing with Visual Studio MANIFESTS #===================================== # Functions to deal with visual studio manifests. Manifest are a mechanism to # enforce strong DLL versioning on windows, and has nothing to do with # distutils MANIFEST. manifests are XML files with version info, and used by # the OS loader; they are necessary when linking against a DLL not in the # system path; in particular, official python 2.6 binary is built against the # MS runtime 9 (the one from VS 2008), which is not available on most windows # systems; python 2.6 installer does install it in the Win SxS (Side by side) # directory, but this requires the manifest for this to work. This is a big # mess, thanks MS for a wonderful system. # XXX: ideally, we should use exactly the same version as used by python. I # submitted a patch to get this version, but it was only included for python # 2.6.1 and above. So for versions below, we use a "best guess". _MSVCRVER_TO_FULLVER = {} if sys.platform == 'win32': try: import msvcrt # I took one version in my SxS directory: no idea if it is the good # one, and we can't retrieve it from python _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 # on Windows XP: _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None) if crt_ver is not None: # Available at least back to Python 3.3 maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups() _MSVCRVER_TO_FULLVER[maj + min] = crt_ver del maj, min del crt_ver except ImportError: # If we are here, means python was not built with MSVC. Not sure what # to do in that case: manifest building will fail, but it should not be # used in that case anyway log.warn('Cannot import msvcrt: using manifest will not be possible') def msvc_manifest_xml(maj, min): """Given a major and minor version of the MSVCR, returns the corresponding XML file.""" try: fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] except KeyError: raise ValueError("Version %d,%d of MSVCRT not supported yet" % (maj, min)) from None # Don't be fooled, it looks like an XML, but it is not. In particular, it # should not have any space before starting, and its size should be # divisible by 4, most likely for alignment constraints when the xml is # embedded in the binary... # This template was copied directly from the python 2.6 binary (using # strings.exe from mingw on python.exe). template = textwrap.dedent("""\ <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3"> <security> <requestedPrivileges> <requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel> </requestedPrivileges> </security> </trustInfo> <dependency> <dependentAssembly> <assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity> </dependentAssembly> </dependency> </assembly>""") return template % {'fullver': fullver, 'maj': maj, 'min': min} def manifest_rc(name, type='dll'): """Return the rc file used to generate the res file which will be embedded as manifest for given manifest file name, of given type ('dll' or 'exe'). Parameters ---------- name : str name of the manifest file to embed type : str {'dll', 'exe'} type of the binary which will embed the manifest """ if type == 'dll': rctype = 2 elif type == 'exe': rctype = 1 else: raise ValueError("Type %s not supported" % type) return """\ #include "winuser.h" %d RT_MANIFEST %s""" % (rctype, name) def check_embedded_msvcr_match_linked(msver): """msver is the ms runtime version used for the MANIFEST.""" # check msvcr major version are the same for linking and # embedding maj = msvc_runtime_major() if maj: if not maj == int(msver): raise ValueError( "Discrepancy between linked msvcr " \ "(%d) and the one about to be embedded " \ "(%d)" % (int(msver), maj)) def configtest_name(config): base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) return os.path.splitext(base)[0] def manifest_name(config): # Get configest name (including suffix) root = configtest_name(config) exext = config.compiler.exe_extension return root + exext + ".manifest" def rc_name(config): # Get configtest name (including suffix) root = configtest_name(config) return root + ".rc" def generate_manifest(config): msver = get_build_msvc_version() if msver is not None: if msver >= 8: check_embedded_msvcr_match_linked(msver) ma_str, mi_str = str(msver).split('.') # Write the manifest file manxml = msvc_manifest_xml(int(ma_str), int(mi_str)) with open(manifest_name(config), "w") as man: config.temp_files.append(manifest_name(config)) man.write(manxml)
22,284
Python
36.39094
184
0.593969
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/unixccompiler.py
""" unixccompiler - can handle very long argument lists for ar. """ import os import sys import subprocess import shlex from distutils.errors import CompileError, DistutilsExecError, LibError from distutils.unixccompiler import UnixCCompiler from numpy.distutils.ccompiler import replace_method from numpy.distutils.misc_util import _commandline_dep_string from numpy.distutils import log # Note that UnixCCompiler._compile appeared in Python 2.3 def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): """Compile a single source files with a Unix-style compiler.""" # HP ad-hoc fix, see ticket 1383 ccomp = self.compiler_so if ccomp[0] == 'aCC': # remove flags that will trigger ANSI-C mode for aCC if '-Ae' in ccomp: ccomp.remove('-Ae') if '-Aa' in ccomp: ccomp.remove('-Aa') # add flags for (almost) sane C++ handling ccomp += ['-AA'] self.compiler_so = ccomp # ensure OPT environment variable is read if 'OPT' in os.environ: # XXX who uses this? from sysconfig import get_config_vars opt = shlex.join(shlex.split(os.environ['OPT'])) gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0])) ccomp_s = shlex.join(self.compiler_so) if opt not in ccomp_s: ccomp_s = ccomp_s.replace(gcv_opt, opt) self.compiler_so = shlex.split(ccomp_s) llink_s = shlex.join(self.linker_so) if opt not in llink_s: self.linker_so = self.linker_so + shlex.split(opt) display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) # gcc style automatic dependencies, outputs a makefile (-MF) that lists # all headers needed by a c file as a side effect of compilation (-MMD) if getattr(self, '_auto_depends', False): deps = ['-MMD', '-MF', obj + '.d'] else: deps = [] try: self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + extra_postargs, display = display) except DistutilsExecError as e: msg = str(e) raise CompileError(msg) from None # add commandline flags to dependency file if deps: # After running the compiler, the file created will be in EBCDIC # but will not be tagged as such. This tags it so the file does not # have multiple different encodings being written to it if sys.platform == 'zos': subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d']) with open(obj + '.d', 'a') as f: f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) def UnixCCompiler_create_static_lib(self, objects, output_libname, output_dir=None, debug=0, target_lang=None): """ Build a static library in a separate sub-process. Parameters ---------- objects : list or tuple of str List of paths to object files used to build the static library. output_libname : str The library name as an absolute or relative (if `output_dir` is used) path. output_dir : str, optional The path to the output directory. Default is None, in which case the ``output_dir`` attribute of the UnixCCompiler instance. debug : bool, optional This parameter is not used. target_lang : str, optional This parameter is not used. Returns ------- None """ objects, output_dir = self._fix_object_args(objects, output_dir) output_filename = \ self.library_filename(output_libname, output_dir=output_dir) if self._need_link(objects, output_filename): try: # previous .a may be screwed up; best to remove it first # and recreate. # Also, ar on OS X doesn't handle updating universal archives os.unlink(output_filename) except OSError: pass self.mkpath(os.path.dirname(output_filename)) tmp_objects = objects + self.objects while tmp_objects: objects = tmp_objects[:50] tmp_objects = tmp_objects[50:] display = '%s: adding %d object files to %s' % ( os.path.basename(self.archiver[0]), len(objects), output_filename) self.spawn(self.archiver + [output_filename] + objects, display = display) # Not many Unices required ranlib anymore -- SunOS 4.x is, I # think the only major Unix that does. Maybe we need some # platform intelligence here to skip ranlib if it's not # needed -- or maybe Python's configure script took care of # it for us, hence the check for leading colon. if self.ranlib: display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), output_filename) try: self.spawn(self.ranlib + [output_filename], display = display) except DistutilsExecError as e: msg = str(e) raise LibError(msg) from None else: log.debug("skipping %s (up-to-date)", output_filename) return replace_method(UnixCCompiler, 'create_static_lib', UnixCCompiler_create_static_lib)
5,426
Python
37.21831
82
0.60247
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/exec_command.py
""" exec_command Implements exec_command function that is (almost) equivalent to commands.getstatusoutput function but on NT, DOS systems the returned status is actually correct (though, the returned status values may be different by a factor). In addition, exec_command takes keyword arguments for (re-)defining environment variables. Provides functions: exec_command --- execute command in a specified directory and in the modified environment. find_executable --- locate a command using info from environment variable PATH. Equivalent to posix `which` command. Author: Pearu Peterson <[email protected]> Created: 11 January 2003 Requires: Python 2.x Successfully tested on: ======== ============ ================================================= os.name sys.platform comments ======== ============ ================================================= posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 PyCrust 0.9.3, Idle 1.0.2 posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 posix darwin Darwin 7.2.0, Python 2.3 nt win32 Windows Me Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 Python 2.1.1 Idle 0.8 nt win32 Windows 98, Python 2.1.1. Idle 0.8 nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests fail i.e. redefining environment variables may not work. FIXED: don't use cygwin echo! Comment: also `cmd /c echo` will not work but redefining environment variables do work. posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) nt win32 Windows XP, Python 2.3.3 ======== ============ ================================================= Known bugs: * Tests, that send messages to stderr, fail when executed from MSYS prompt because the messages are lost at some point. """ __all__ = ['exec_command', 'find_executable'] import os import sys import subprocess import locale import warnings from numpy.distutils.misc_util import is_sequence, make_temp_file from numpy.distutils import log def filepath_from_subprocess_output(output): """ Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. Inherited from `exec_command`, and possibly incorrect. """ mylocale = locale.getpreferredencoding(False) if mylocale is None: mylocale = 'ascii' output = output.decode(mylocale, errors='replace') output = output.replace('\r\n', '\n') # Another historical oddity if output[-1:] == '\n': output = output[:-1] return output def forward_bytes_to_stdout(val): """ Forward bytes from a subprocess call to the console, without attempting to decode them. The assumption is that the subprocess call already returned bytes in a suitable encoding. """ if hasattr(sys.stdout, 'buffer'): # use the underlying binary output if there is one sys.stdout.buffer.write(val) elif hasattr(sys.stdout, 'encoding'): # round-trip the encoding if necessary sys.stdout.write(val.decode(sys.stdout.encoding)) else: # make a best-guess at the encoding sys.stdout.write(val.decode('utf8', errors='replace')) def temp_file_name(): # 2019-01-30, 1.17 warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) fo, name = make_temp_file() fo.close() return name def get_pythonexe(): pythonexe = sys.executable if os.name in ['nt', 'dos']: fdir, fn = os.path.split(pythonexe) fn = fn.upper().replace('PYTHONW', 'PYTHON') pythonexe = os.path.join(fdir, fn) assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) return pythonexe def find_executable(exe, path=None, _cache={}): """Return full path of a executable or None. Symbolic links are not followed. """ key = exe, path try: return _cache[key] except KeyError: pass log.debug('find_executable(%r)' % exe) orig_exe = exe if path is None: path = os.environ.get('PATH', os.defpath) if os.name=='posix': realpath = os.path.realpath else: realpath = lambda a:a if exe.startswith('"'): exe = exe[1:-1] suffixes = [''] if os.name in ['nt', 'dos', 'os2']: fn, ext = os.path.splitext(exe) extra_suffixes = ['.exe', '.com', '.bat'] if ext.lower() not in extra_suffixes: suffixes = extra_suffixes if os.path.isabs(exe): paths = [''] else: paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] for path in paths: fn = os.path.join(path, exe) for s in suffixes: f_ext = fn+s if not os.path.islink(f_ext): f_ext = realpath(f_ext) if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): log.info('Found executable %s' % f_ext) _cache[key] = f_ext return f_ext log.warn('Could not locate executable %s' % orig_exe) return None ############################################################ def _preserve_environment( names ): log.debug('_preserve_environment(%r)' % (names)) env = {name: os.environ.get(name) for name in names} return env def _update_environment( **env ): log.debug('_update_environment(...)') for name, value in env.items(): os.environ[name] = value or '' def exec_command(command, execute_in='', use_shell=None, use_tee=None, _with_python = 1, **env ): """ Return (status,output) of executed command. .. deprecated:: 1.17 Use subprocess.Popen instead Parameters ---------- command : str A concatenated string of executable and arguments. execute_in : str Before running command ``cd execute_in`` and after ``cd -``. use_shell : {bool, None}, optional If True, execute ``sh -c command``. Default None (True) use_tee : {bool, None}, optional If True use tee. Default None (True) Returns ------- res : str Both stdout and stderr messages. Notes ----- On NT, DOS systems the returned status is correct for external commands. Wild cards will not work for non-posix systems or when use_shell=0. """ # 2019-01-30, 1.17 warnings.warn('exec_command is deprecated since NumPy v1.17, use ' 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) log.debug('exec_command(%r,%s)' % (command, ','.join(['%s=%r'%kv for kv in env.items()]))) if use_tee is None: use_tee = os.name=='posix' if use_shell is None: use_shell = os.name=='posix' execute_in = os.path.abspath(execute_in) oldcwd = os.path.abspath(os.getcwd()) if __name__[-12:] == 'exec_command': exec_dir = os.path.dirname(os.path.abspath(__file__)) elif os.path.isfile('exec_command.py'): exec_dir = os.path.abspath('.') else: exec_dir = os.path.abspath(sys.argv[0]) if os.path.isfile(exec_dir): exec_dir = os.path.dirname(exec_dir) if oldcwd!=execute_in: os.chdir(execute_in) log.debug('New cwd: %s' % execute_in) else: log.debug('Retaining cwd: %s' % oldcwd) oldenv = _preserve_environment( list(env.keys()) ) _update_environment( **env ) try: st = _exec_command(command, use_shell=use_shell, use_tee=use_tee, **env) finally: if oldcwd!=execute_in: os.chdir(oldcwd) log.debug('Restored cwd to %s' % oldcwd) _update_environment(**oldenv) return st def _exec_command(command, use_shell=None, use_tee = None, **env): """ Internal workhorse for exec_command(). """ if use_shell is None: use_shell = os.name=='posix' if use_tee is None: use_tee = os.name=='posix' if os.name == 'posix' and use_shell: # On POSIX, subprocess always uses /bin/sh, override sh = os.environ.get('SHELL', '/bin/sh') if is_sequence(command): command = [sh, '-c', ' '.join(command)] else: command = [sh, '-c', command] use_shell = False elif os.name == 'nt' and is_sequence(command): # On Windows, join the string for CreateProcess() ourselves as # subprocess does it a bit differently command = ' '.join(_quote_arg(arg) for arg in command) # Inherit environment by default env = env or None try: # universal_newlines is set to False so that communicate() # will return bytes. We need to decode the output ourselves # so that Python will not raise a UnicodeDecodeError when # it encounters an invalid character; rather, we simply replace it proc = subprocess.Popen(command, shell=use_shell, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=False) except OSError: # Return 127, as os.spawn*() and /bin/sh do return 127, '' text, err = proc.communicate() mylocale = locale.getpreferredencoding(False) if mylocale is None: mylocale = 'ascii' text = text.decode(mylocale, errors='replace') text = text.replace('\r\n', '\n') # Another historical oddity if text[-1:] == '\n': text = text[:-1] if use_tee and text: print(text) return proc.returncode, text def _quote_arg(arg): """ Quote the argument for safe use in a shell command line. """ # If there is a quote in the string, assume relevants parts of the # string are already quoted (e.g. '-I"C:\\Program Files\\..."') if '"' not in arg and ' ' in arg: return '"%s"' % arg return arg ############################################################
10,343
Python
31.630915
93
0.571788
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/pathccompiler.py
from distutils.unixccompiler import UnixCCompiler class PathScaleCCompiler(UnixCCompiler): """ PathScale compiler compatible with an gcc built Python. """ compiler_type = 'pathcc' cc_exe = 'pathcc' cxx_exe = 'pathCC' def __init__ (self, verbose=0, dry_run=0, force=0): UnixCCompiler.__init__ (self, verbose, dry_run, force) cc_compiler = self.cc_exe cxx_compiler = self.cxx_exe self.set_executables(compiler=cc_compiler, compiler_so=cc_compiler, compiler_cxx=cxx_compiler, linker_exe=cc_compiler, linker_so=cc_compiler + ' -shared')
713
Python
31.454544
64
0.562412
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/log.py
# Colored log import sys from distutils.log import * # noqa: F403 from distutils.log import Log as old_Log from distutils.log import _global_log from numpy.distutils.misc_util import (red_text, default_text, cyan_text, green_text, is_sequence, is_string) def _fix_args(args,flag=1): if is_string(args): return args.replace('%', '%%') if flag and is_sequence(args): return tuple([_fix_args(a, flag=0) for a in args]) return args class Log(old_Log): def _log(self, level, msg, args): if level >= self.threshold: if args: msg = msg % _fix_args(args) if 0: if msg.startswith('copying ') and msg.find(' -> ') != -1: return if msg.startswith('byte-compiling '): return print(_global_color_map[level](msg)) sys.stdout.flush() def good(self, msg, *args): """ If we log WARN messages, log this message as a 'nice' anti-warn message. """ if WARN >= self.threshold: if args: print(green_text(msg % _fix_args(args))) else: print(green_text(msg)) sys.stdout.flush() _global_log.__class__ = Log good = _global_log.good def set_threshold(level, force=False): prev_level = _global_log.threshold if prev_level > DEBUG or force: # If we're running at DEBUG, don't change the threshold, as there's # likely a good reason why we're running at this level. _global_log.threshold = level if level <= DEBUG: info('set_threshold: setting threshold to DEBUG level,' ' it can be changed only with force argument') else: info('set_threshold: not changing threshold from DEBUG level' ' %s to %s' % (prev_level, level)) return prev_level def get_threshold(): return _global_log.threshold def set_verbosity(v, force=False): prev_level = _global_log.threshold if v < 0: set_threshold(ERROR, force) elif v == 0: set_threshold(WARN, force) elif v == 1: set_threshold(INFO, force) elif v >= 2: set_threshold(DEBUG, force) return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) _global_color_map = { DEBUG:cyan_text, INFO:default_text, WARN:red_text, ERROR:red_text, FATAL:red_text } # don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. set_verbosity(0, force=True) _error = error _warn = warn _info = info _debug = debug def error(msg, *a, **kw): _error(f"ERROR: {msg}", *a, **kw) def warn(msg, *a, **kw): _warn(f"WARN: {msg}", *a, **kw) def info(msg, *a, **kw): _info(f"INFO: {msg}", *a, **kw) def debug(msg, *a, **kw): _debug(f"DEBUG: {msg}", *a, **kw)
2,879
Python
24.714285
78
0.5686
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/npy_pkg_config.py
import sys import re import os from configparser import RawConfigParser __all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', 'read_config', 'parse_flags'] _VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') class FormatError(OSError): """ Exception thrown when there is a problem parsing a configuration file. """ def __init__(self, msg): self.msg = msg def __str__(self): return self.msg class PkgNotFound(OSError): """Exception raised when a package can not be located.""" def __init__(self, msg): self.msg = msg def __str__(self): return self.msg def parse_flags(line): """ Parse a line from a config file containing compile flags. Parameters ---------- line : str A single line containing one or more compile flags. Returns ------- d : dict Dictionary of parsed flags, split into relevant categories. These categories are the keys of `d`: * 'include_dirs' * 'library_dirs' * 'libraries' * 'macros' * 'ignored' """ d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], 'macros': [], 'ignored': []} flags = (' ' + line).split(' -') for flag in flags: flag = '-' + flag if len(flag) > 0: if flag.startswith('-I'): d['include_dirs'].append(flag[2:].strip()) elif flag.startswith('-L'): d['library_dirs'].append(flag[2:].strip()) elif flag.startswith('-l'): d['libraries'].append(flag[2:].strip()) elif flag.startswith('-D'): d['macros'].append(flag[2:].strip()) else: d['ignored'].append(flag) return d def _escape_backslash(val): return val.replace('\\', '\\\\') class LibraryInfo: """ Object containing build information about a library. Parameters ---------- name : str The library name. description : str Description of the library. version : str Version string. sections : dict The sections of the configuration file for the library. The keys are the section headers, the values the text under each header. vars : class instance A `VariableSet` instance, which contains ``(name, value)`` pairs for variables defined in the configuration file for the library. requires : sequence, optional The required libraries for the library to be installed. Notes ----- All input parameters (except "sections" which is a method) are available as attributes of the same name. """ def __init__(self, name, description, version, sections, vars, requires=None): self.name = name self.description = description if requires: self.requires = requires else: self.requires = [] self.version = version self._sections = sections self.vars = vars def sections(self): """ Return the section headers of the config file. Parameters ---------- None Returns ------- keys : list of str The list of section headers. """ return list(self._sections.keys()) def cflags(self, section="default"): val = self.vars.interpolate(self._sections[section]['cflags']) return _escape_backslash(val) def libs(self, section="default"): val = self.vars.interpolate(self._sections[section]['libs']) return _escape_backslash(val) def __str__(self): m = ['Name: %s' % self.name, 'Description: %s' % self.description] if self.requires: m.append('Requires:') else: m.append('Requires: %s' % ",".join(self.requires)) m.append('Version: %s' % self.version) return "\n".join(m) class VariableSet: """ Container object for the variables defined in a config file. `VariableSet` can be used as a plain dictionary, with the variable names as keys. Parameters ---------- d : dict Dict of items in the "variables" section of the configuration file. """ def __init__(self, d): self._raw_data = dict([(k, v) for k, v in d.items()]) self._re = {} self._re_sub = {} self._init_parse() def _init_parse(self): for k, v in self._raw_data.items(): self._init_parse_var(k, v) def _init_parse_var(self, name, value): self._re[name] = re.compile(r'\$\{%s\}' % name) self._re_sub[name] = value def interpolate(self, value): # Brute force: we keep interpolating until there is no '${var}' anymore # or until interpolated string is equal to input string def _interpolate(value): for k in self._re.keys(): value = self._re[k].sub(self._re_sub[k], value) return value while _VAR.search(value): nvalue = _interpolate(value) if nvalue == value: break value = nvalue return value def variables(self): """ Return the list of variable names. Parameters ---------- None Returns ------- names : list of str The names of all variables in the `VariableSet` instance. """ return list(self._raw_data.keys()) # Emulate a dict to set/get variables values def __getitem__(self, name): return self._raw_data[name] def __setitem__(self, name, value): self._raw_data[name] = value self._init_parse_var(name, value) def parse_meta(config): if not config.has_section('meta'): raise FormatError("No meta section found !") d = dict(config.items('meta')) for k in ['name', 'description', 'version']: if not k in d: raise FormatError("Option %s (section [meta]) is mandatory, " "but not found" % k) if not 'requires' in d: d['requires'] = [] return d def parse_variables(config): if not config.has_section('variables'): raise FormatError("No variables section found !") d = {} for name, value in config.items("variables"): d[name] = value return VariableSet(d) def parse_sections(config): return meta_d, r def pkg_to_filename(pkg_name): return "%s.ini" % pkg_name def parse_config(filename, dirs=None): if dirs: filenames = [os.path.join(d, filename) for d in dirs] else: filenames = [filename] config = RawConfigParser() n = config.read(filenames) if not len(n) >= 1: raise PkgNotFound("Could not find file(s) %s" % str(filenames)) # Parse meta and variables sections meta = parse_meta(config) vars = {} if config.has_section('variables'): for name, value in config.items("variables"): vars[name] = _escape_backslash(value) # Parse "normal" sections secs = [s for s in config.sections() if not s in ['meta', 'variables']] sections = {} requires = {} for s in secs: d = {} if config.has_option(s, "requires"): requires[s] = config.get(s, 'requires') for name, value in config.items(s): d[name] = value sections[s] = d return meta, vars, sections, requires def _read_config_imp(filenames, dirs=None): def _read_config(f): meta, vars, sections, reqs = parse_config(f, dirs) # recursively add sections and variables of required libraries for rname, rvalue in reqs.items(): nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) # Update var dict for variables not in 'top' config file for k, v in nvars.items(): if not k in vars: vars[k] = v # Update sec dict for oname, ovalue in nsections[rname].items(): if ovalue: sections[rname][oname] += ' %s' % ovalue return meta, vars, sections, reqs meta, vars, sections, reqs = _read_config(filenames) # FIXME: document this. If pkgname is defined in the variables section, and # there is no pkgdir variable defined, pkgdir is automatically defined to # the path of pkgname. This requires the package to be imported to work if not 'pkgdir' in vars and "pkgname" in vars: pkgname = vars["pkgname"] if not pkgname in sys.modules: raise ValueError("You should import %s to get information on %s" % (pkgname, meta["name"])) mod = sys.modules[pkgname] vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) return LibraryInfo(name=meta["name"], description=meta["description"], version=meta["version"], sections=sections, vars=VariableSet(vars)) # Trivial cache to cache LibraryInfo instances creation. To be really # efficient, the cache should be handled in read_config, since a same file can # be parsed many time outside LibraryInfo creation, but I doubt this will be a # problem in practice _CACHE = {} def read_config(pkgname, dirs=None): """ Return library info for a package from its configuration file. Parameters ---------- pkgname : str Name of the package (should match the name of the .ini file, without the extension, e.g. foo for the file foo.ini). dirs : sequence, optional If given, should be a sequence of directories - usually including the NumPy base directory - where to look for npy-pkg-config files. Returns ------- pkginfo : class instance The `LibraryInfo` instance containing the build information. Raises ------ PkgNotFound If the package is not found. See Also -------- misc_util.get_info, misc_util.get_pkg_info Examples -------- >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') >>> type(npymath_info) <class 'numpy.distutils.npy_pkg_config.LibraryInfo'> >>> print(npymath_info) Name: npymath Description: Portable, core math library implementing C99 standard Requires: Version: 0.1 #random """ try: return _CACHE[pkgname] except KeyError: v = _read_config_imp(pkg_to_filename(pkgname), dirs) _CACHE[pkgname] = v return v # TODO: # - implements version comparison (modversion + atleast) # pkg-config simple emulator - useful for debugging, and maybe later to query # the system if __name__ == '__main__': from optparse import OptionParser import glob parser = OptionParser() parser.add_option("--cflags", dest="cflags", action="store_true", help="output all preprocessor and compiler flags") parser.add_option("--libs", dest="libs", action="store_true", help="output all linker flags") parser.add_option("--use-section", dest="section", help="use this section instead of default for options") parser.add_option("--version", dest="version", action="store_true", help="output version") parser.add_option("--atleast-version", dest="min_version", help="Minimal version") parser.add_option("--list-all", dest="list_all", action="store_true", help="Minimal version") parser.add_option("--define-variable", dest="define_variable", help="Replace variable with the given value") (options, args) = parser.parse_args(sys.argv) if len(args) < 2: raise ValueError("Expect package name on the command line:") if options.list_all: files = glob.glob("*.ini") for f in files: info = read_config(f) print("%s\t%s - %s" % (info.name, info.name, info.description)) pkg_name = args[1] d = os.environ.get('NPY_PKG_CONFIG_PATH') if d: info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d]) else: info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.']) if options.section: section = options.section else: section = "default" if options.define_variable: m = re.search(r'([\S]+)=([\S]+)', options.define_variable) if not m: raise ValueError("--define-variable option should be of " "the form --define-variable=foo=bar") else: name = m.group(1) value = m.group(2) info.vars[name] = value if options.cflags: print(info.cflags(section)) if options.libs: print(info.libs(section)) if options.version: print(info.version) if options.min_version: print(info.version >= options.min_version)
12,972
Python
28.618721
82
0.577629
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/numpy_distribution.py
# XXX: Handle setuptools ? from distutils.core import Distribution # This class is used because we add new files (sconscripts, and so on) with the # scons command class NumpyDistribution(Distribution): def __init__(self, attrs = None): # A list of (sconscripts, pre_hook, post_hook, src, parent_names) self.scons_data = [] # A list of installable libraries self.installed_libraries = [] # A dict of pkg_config files to generate/install self.installed_pkg_config = {} Distribution.__init__(self, attrs) def has_scons_scripts(self): return bool(self.scons_data)
634
Python
34.277776
79
0.660883
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/system_info.py
#!/usr/bin/env python3 """ This file defines a set of system_info classes for getting information about various resources (libraries, library directories, include directories, etc.) in the system. Usage: info_dict = get_info(<name>) where <name> is a string 'atlas','x11','fftw','lapack','blas', 'lapack_src', 'blas_src', etc. For a complete list of allowed names, see the definition of get_info() function below. Returned info_dict is a dictionary which is compatible with distutils.setup keyword arguments. If info_dict == {}, then the asked resource is not available (system_info could not find it). Several *_info classes specify an environment variable to specify the locations of software. When setting the corresponding environment variable to 'None' then the software will be ignored, even when it is available in system. Global parameters: system_info.search_static_first - search static libraries (.a) in precedence to shared ones (.so, .sl) if enabled. system_info.verbosity - output the results to stdout if enabled. The file 'site.cfg' is looked for in 1) Directory of main setup.py file being run. 2) Home directory of user running the setup.py file as ~/.numpy-site.cfg 3) System wide directory (location of this file...) The first one found is used to get system configuration options The format is that used by ConfigParser (i.e., Windows .INI style). The section ALL is not intended for general use. Appropriate defaults are used if nothing is specified. The order of finding the locations of resources is the following: 1. environment variable 2. section in site.cfg 3. DEFAULT section in site.cfg 4. System default search paths (see ``default_*`` variables below). Only the first complete match is returned. Currently, the following classes are available, along with their section names: Numeric_info:Numeric _numpy_info:Numeric _pkg_config_info:None accelerate_info:accelerate agg2_info:agg2 amd_info:amd atlas_3_10_blas_info:atlas atlas_3_10_blas_threads_info:atlas atlas_3_10_info:atlas atlas_3_10_threads_info:atlas atlas_blas_info:atlas atlas_blas_threads_info:atlas atlas_info:atlas atlas_threads_info:atlas blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix) blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS) blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix) blas_info:blas blas_mkl_info:mkl blas_opt_info:ALL # usage recommended blas_src_info:blas_src blis_info:blis boost_python_info:boost_python dfftw_info:fftw dfftw_threads_info:fftw djbfft_info:djbfft f2py_info:ALL fft_opt_info:ALL fftw2_info:fftw fftw3_info:fftw3 fftw_info:fftw fftw_threads_info:fftw flame_info:flame freetype2_info:freetype2 gdk_2_info:gdk_2 gdk_info:gdk gdk_pixbuf_2_info:gdk_pixbuf_2 gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2 gdk_x11_2_info:gdk_x11_2 gtkp_2_info:gtkp_2 gtkp_x11_2_info:gtkp_x11_2 lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix) lapack_atlas_3_10_info:atlas lapack_atlas_3_10_threads_info:atlas lapack_atlas_info:atlas lapack_atlas_threads_info:atlas lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK) lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix) lapack_info:lapack lapack_mkl_info:mkl lapack_opt_info:ALL # usage recommended lapack_src_info:lapack_src mkl_info:mkl numarray_info:numarray numerix_info:numerix numpy_info:numpy openblas64__info:openblas64_ openblas64__lapack_info:openblas64_ openblas_clapack_info:openblas openblas_ilp64_info:openblas_ilp64 openblas_ilp64_lapack_info:openblas_ilp64 openblas_info:openblas openblas_lapack_info:openblas sfftw_info:fftw sfftw_threads_info:fftw system_info:ALL umfpack_info:umfpack wx_info:wx x11_info:x11 xft_info:xft Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER and NPY_LAPACK_ORDER environment variables to determine the order in which specific BLAS and LAPACK libraries are searched for. This search (or autodetection) can be bypassed by defining the environment variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the exact linker flags to use (language will be set to F77). Building against Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK implementations at runtime. If using this to build NumPy itself, it is recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized otherwise). Example: ---------- [DEFAULT] # default section library_dirs = /usr/lib:/usr/local/lib:/opt/lib include_dirs = /usr/include:/usr/local/include:/opt/include src_dirs = /usr/local/src:/opt/src # search static libraries (.a) in preference to shared ones (.so) search_static_first = 0 [fftw] libraries = rfftw, fftw [atlas] library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas # for overriding the names of the atlas libraries libraries = lapack, f77blas, cblas, atlas [x11] library_dirs = /usr/X11R6/lib include_dirs = /usr/X11R6/include ---------- Note that the ``libraries`` key is the default setting for libraries. Authors: Pearu Peterson <[email protected]>, February 2002 David M. Cooke <[email protected]>, April 2002 Copyright 2002 Pearu Peterson all rights reserved, Pearu Peterson <[email protected]> Permission to use, modify, and distribute this software is given under the terms of the NumPy (BSD style) license. See LICENSE.txt that came with this distribution for specifics. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ import sys import os import re import copy import warnings import subprocess import textwrap from glob import glob from functools import reduce from configparser import NoOptionError from configparser import RawConfigParser as ConfigParser # It seems that some people are importing ConfigParser from here so is # good to keep its class name. Use of RawConfigParser is needed in # order to be able to load path names with percent in them, like # `feature%2Fcool` which is common on git flow branch names. from distutils.errors import DistutilsError from distutils.dist import Distribution import sysconfig from numpy.distutils import log from distutils.util import get_platform from numpy.distutils.exec_command import ( find_executable, filepath_from_subprocess_output, ) from numpy.distutils.misc_util import (is_sequence, is_string, get_shared_lib_extension) from numpy.distutils.command.config import config as cmd_config from numpy.distutils import customized_ccompiler as _customized_ccompiler from numpy.distutils import _shell_utils import distutils.ccompiler import tempfile import shutil __all__ = ['system_info'] # Determine number of bits import platform _bits = {'32bit': 32, '64bit': 64} platform_bits = _bits[platform.architecture()[0]] global_compiler = None def customized_ccompiler(): global global_compiler if not global_compiler: global_compiler = _customized_ccompiler() return global_compiler def _c_string_literal(s): """ Convert a python string into a literal suitable for inclusion into C code """ # only these three characters are forbidden in C strings s = s.replace('\\', r'\\') s = s.replace('"', r'\"') s = s.replace('\n', r'\n') return '"{}"'.format(s) def libpaths(paths, bits): """Return a list of library paths valid on 32 or 64 bit systems. Inputs: paths : sequence A sequence of strings (typically paths) bits : int An integer, the only valid values are 32 or 64. A ValueError exception is raised otherwise. Examples: Consider a list of directories >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] For a 32-bit platform, this is already valid: >>> np.distutils.system_info.libpaths(paths,32) ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] On 64 bits, we prepend the '64' postfix >>> np.distutils.system_info.libpaths(paths,64) ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', '/usr/lib64', '/usr/lib'] """ if bits not in (32, 64): raise ValueError("Invalid bit size in libpaths: 32 or 64 only") # Handle 32bit case if bits == 32: return paths # Handle 64bit case out = [] for p in paths: out.extend([p + '64', p]) return out if sys.platform == 'win32': default_lib_dirs = ['C:\\', os.path.join(sysconfig.get_config_var('exec_prefix'), 'libs')] default_runtime_dirs = [] default_include_dirs = [] default_src_dirs = ['.'] default_x11_lib_dirs = [] default_x11_include_dirs = [] _include_dirs = [ 'include', 'include/suitesparse', ] _lib_dirs = [ 'lib', ] _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] def add_system_root(library_root): """Add a package manager root to the include directories""" global default_lib_dirs global default_include_dirs library_root = os.path.normpath(library_root) default_lib_dirs.extend( os.path.join(library_root, d) for d in _lib_dirs) default_include_dirs.extend( os.path.join(library_root, d) for d in _include_dirs) # VCpkg is the de-facto package manager on windows for C/C++ # libraries. If it is on the PATH, then we append its paths here. vcpkg = shutil.which('vcpkg') if vcpkg: vcpkg_dir = os.path.dirname(vcpkg) if platform.architecture()[0] == '32bit': specifier = 'x86' else: specifier = 'x64' vcpkg_installed = os.path.join(vcpkg_dir, 'installed') for vcpkg_root in [ os.path.join(vcpkg_installed, specifier + '-windows'), os.path.join(vcpkg_installed, specifier + '-windows-static'), ]: add_system_root(vcpkg_root) # Conda is another popular package manager that provides libraries conda = shutil.which('conda') if conda: conda_dir = os.path.dirname(conda) add_system_root(os.path.join(conda_dir, '..', 'Library')) add_system_root(os.path.join(conda_dir, 'Library')) else: default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', '/opt/local/lib', '/sw/lib'], platform_bits) default_runtime_dirs = [] default_include_dirs = ['/usr/local/include', '/opt/include', # path of umfpack under macports '/opt/local/include/ufsparse', '/opt/local/include', '/sw/include', '/usr/include/suitesparse'] default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'], platform_bits) default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include'] if os.path.exists('/usr/lib/X11'): globbed_x11_dir = glob('/usr/lib/*/libX11.so') if globbed_x11_dir: x11_so_dir = os.path.split(globbed_x11_dir[0])[0] default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) default_x11_include_dirs.extend(['/usr/lib/X11/include', '/usr/include/X11']) with open(os.devnull, 'w') as tmp: try: p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE, stderr=tmp) except (OSError, DistutilsError): # OSError if gcc is not installed, or SandboxViolation (DistutilsError # subclass) if an old setuptools bug is triggered (see gh-3160). pass else: triplet = str(p.communicate()[0].decode().strip()) if p.returncode == 0: # gcc supports the "-print-multiarch" option default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] default_lib_dirs += [os.path.join("/usr/lib/", triplet)] if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) default_include_dirs.append(os.path.join(sys.prefix, 'include')) default_src_dirs.append(os.path.join(sys.prefix, 'src')) default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] so_ext = get_shared_lib_extension() def get_standard_file(fname): """Returns a list of files named 'fname' from 1) System-wide directory (directory-location of this module) 2) Users HOME directory (os.environ['HOME']) 3) Local directory """ # System-wide file filenames = [] try: f = __file__ except NameError: f = sys.argv[0] sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], fname) if os.path.isfile(sysfile): filenames.append(sysfile) # Home directory # And look for the user config file try: f = os.path.expanduser('~') except KeyError: pass else: user_file = os.path.join(f, fname) if os.path.isfile(user_file): filenames.append(user_file) # Local file if os.path.isfile(fname): filenames.append(os.path.abspath(fname)) return filenames def _parse_env_order(base_order, env): """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order` This method will sequence the environment variable and check for their individual elements in `base_order`. The items in the environment variable may be negated via '^item' or '!itema,itemb'. It must start with ^/! to negate all options. Raises ------ ValueError: for mixed negated and non-negated orders or multiple negated orders Parameters ---------- base_order : list of str the base list of orders env : str the environment variable to be parsed, if none is found, `base_order` is returned Returns ------- allow_order : list of str allowed orders in lower-case unknown_order : list of str for values not overlapping with `base_order` """ order_str = os.environ.get(env, None) # ensure all base-orders are lower-case (for easier comparison) base_order = [order.lower() for order in base_order] if order_str is None: return base_order, [] neg = order_str.startswith('^') or order_str.startswith('!') # Check format order_str_l = list(order_str) sum_neg = order_str_l.count('^') + order_str_l.count('!') if neg: if sum_neg > 1: raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}") # remove prefix order_str = order_str[1:] elif sum_neg > 0: raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}") # Split and lower case orders = order_str.lower().split(',') # to inform callee about non-overlapping elements unknown_order = [] # if negated, we have to remove from the order if neg: allow_order = base_order.copy() for order in orders: if not order: continue if order not in base_order: unknown_order.append(order) continue if order in allow_order: allow_order.remove(order) else: allow_order = [] for order in orders: if not order: continue if order not in base_order: unknown_order.append(order) continue if order not in allow_order: allow_order.append(order) return allow_order, unknown_order def get_info(name, notfound_action=0): """ notfound_action: 0 - do nothing 1 - display warning message 2 - raise error """ cl = {'armpl': armpl_info, 'blas_armpl': blas_armpl_info, 'lapack_armpl': lapack_armpl_info, 'fftw3_armpl': fftw3_armpl_info, 'atlas': atlas_info, # use lapack_opt or blas_opt instead 'atlas_threads': atlas_threads_info, # ditto 'atlas_blas': atlas_blas_info, 'atlas_blas_threads': atlas_blas_threads_info, 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto 'atlas_3_10_blas': atlas_3_10_blas_info, 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto 'flame': flame_info, # use lapack_opt instead 'mkl': mkl_info, # openblas which may or may not have embedded lapack 'openblas': openblas_info, # use blas_opt instead # openblas with embedded lapack 'openblas_lapack': openblas_lapack_info, # use blas_opt instead 'openblas_clapack': openblas_clapack_info, # use blas_opt instead 'blis': blis_info, # use blas_opt instead 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead 'blas_mkl': blas_mkl_info, # use blas_opt instead 'accelerate': accelerate_info, # use blas_opt instead 'openblas64_': openblas64__info, 'openblas64__lapack': openblas64__lapack_info, 'openblas_ilp64': openblas_ilp64_info, 'openblas_ilp64_lapack': openblas_ilp64_lapack_info, 'x11': x11_info, 'fft_opt': fft_opt_info, 'fftw': fftw_info, 'fftw2': fftw2_info, 'fftw3': fftw3_info, 'dfftw': dfftw_info, 'sfftw': sfftw_info, 'fftw_threads': fftw_threads_info, 'dfftw_threads': dfftw_threads_info, 'sfftw_threads': sfftw_threads_info, 'djbfft': djbfft_info, 'blas': blas_info, # use blas_opt instead 'lapack': lapack_info, # use lapack_opt instead 'lapack_src': lapack_src_info, 'blas_src': blas_src_info, 'numpy': numpy_info, 'f2py': f2py_info, 'Numeric': Numeric_info, 'numeric': Numeric_info, 'numarray': numarray_info, 'numerix': numerix_info, 'lapack_opt': lapack_opt_info, 'lapack_ilp64_opt': lapack_ilp64_opt_info, 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info, 'lapack64__opt': lapack64__opt_info, 'blas_opt': blas_opt_info, 'blas_ilp64_opt': blas_ilp64_opt_info, 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info, 'blas64__opt': blas64__opt_info, 'boost_python': boost_python_info, 'agg2': agg2_info, 'wx': wx_info, 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, 'gdk_pixbuf_2': gdk_pixbuf_2_info, 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, 'gdk': gdk_info, 'gdk_2': gdk_2_info, 'gdk-2.0': gdk_2_info, 'gdk_x11_2': gdk_x11_2_info, 'gdk-x11-2.0': gdk_x11_2_info, 'gtkp_x11_2': gtkp_x11_2_info, 'gtk+-x11-2.0': gtkp_x11_2_info, 'gtkp_2': gtkp_2_info, 'gtk+-2.0': gtkp_2_info, 'xft': xft_info, 'freetype2': freetype2_info, 'umfpack': umfpack_info, 'amd': amd_info, }.get(name.lower(), system_info) return cl().get_info(notfound_action) class NotFoundError(DistutilsError): """Some third-party program or library is not found.""" class AliasedOptionError(DistutilsError): """ Aliases entries in config files should not be existing. In section '{section}' we found multiple appearances of options {options}.""" class AtlasNotFoundError(NotFoundError): """ Atlas (http://github.com/math-atlas/math-atlas) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [atlas]) or by setting the ATLAS environment variable.""" class FlameNotFoundError(NotFoundError): """ FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [flame]).""" class LapackNotFoundError(NotFoundError): """ Lapack (http://www.netlib.org/lapack/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [lapack]) or by setting the LAPACK environment variable.""" class LapackSrcNotFoundError(LapackNotFoundError): """ Lapack (http://www.netlib.org/lapack/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [lapack_src]) or by setting the LAPACK_SRC environment variable.""" class LapackILP64NotFoundError(NotFoundError): """ 64-bit Lapack libraries not found. Known libraries in numpy/distutils/site.cfg file are: openblas64_, openblas_ilp64 """ class BlasOptNotFoundError(NotFoundError): """ Optimized (vendor) Blas libraries are not found. Falls back to netlib Blas library which has worse performance. A better performance should be easily gained by switching Blas library.""" class BlasNotFoundError(NotFoundError): """ Blas (http://www.netlib.org/blas/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable.""" class BlasILP64NotFoundError(NotFoundError): """ 64-bit Blas libraries not found. Known libraries in numpy/distutils/site.cfg file are: openblas64_, openblas_ilp64 """ class BlasSrcNotFoundError(BlasNotFoundError): """ Blas (http://www.netlib.org/blas/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [blas_src]) or by setting the BLAS_SRC environment variable.""" class FFTWNotFoundError(NotFoundError): """ FFTW (http://www.fftw.org/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [fftw]) or by setting the FFTW environment variable.""" class DJBFFTNotFoundError(NotFoundError): """ DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [djbfft]) or by setting the DJBFFT environment variable.""" class NumericNotFoundError(NotFoundError): """ Numeric (https://www.numpy.org/) module not found. Get it from above location, install it, and retry setup.py.""" class X11NotFoundError(NotFoundError): """X11 libraries not found.""" class UmfpackNotFoundError(NotFoundError): """ UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [umfpack]) or by setting the UMFPACK environment variable.""" class system_info: """ get_info() is the only public method. Don't use others. """ dir_env_var = None # XXX: search_static_first is disabled by default, may disappear in # future unless it is proved to be useful. search_static_first = 0 # The base-class section name is a random word "ALL" and is not really # intended for general use. It cannot be None nor can it be DEFAULT as # these break the ConfigParser. See gh-15338 section = 'ALL' saved_results = {} notfounderror = NotFoundError def __init__(self, default_lib_dirs=default_lib_dirs, default_include_dirs=default_include_dirs, ): self.__class__.info = {} self.local_prefixes = [] defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), 'include_dirs': os.pathsep.join(default_include_dirs), 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), 'rpath': '', 'src_dirs': os.pathsep.join(default_src_dirs), 'search_static_first': str(self.search_static_first), 'extra_compile_args': '', 'extra_link_args': ''} self.cp = ConfigParser(defaults) self.files = [] self.files.extend(get_standard_file('.numpy-site.cfg')) self.files.extend(get_standard_file('site.cfg')) self.parse_config_files() if self.section is not None: self.search_static_first = self.cp.getboolean( self.section, 'search_static_first') assert isinstance(self.search_static_first, int) def parse_config_files(self): self.cp.read(self.files) if not self.cp.has_section(self.section): if self.section is not None: self.cp.add_section(self.section) def calc_libraries_info(self): libs = self.get_libraries() dirs = self.get_lib_dirs() # The extensions use runtime_library_dirs r_dirs = self.get_runtime_lib_dirs() # Intrinsic distutils use rpath, we simply append both entries # as though they were one entry r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) info = {} for lib in libs: i = self.check_libs(dirs, [lib]) if i is not None: dict_append(info, **i) else: log.info('Library %s was not found. Ignoring' % (lib)) if r_dirs: i = self.check_libs(r_dirs, [lib]) if i is not None: # Swap library keywords found to runtime_library_dirs # the libraries are insisting on the user having defined # them using the library_dirs, and not necessarily by # runtime_library_dirs del i['libraries'] i['runtime_library_dirs'] = i.pop('library_dirs') dict_append(info, **i) else: log.info('Runtime library %s was not found. Ignoring' % (lib)) return info def set_info(self, **info): if info: lib_info = self.calc_libraries_info() dict_append(info, **lib_info) # Update extra information extra_info = self.calc_extra_info() dict_append(info, **extra_info) self.saved_results[self.__class__.__name__] = info def get_option_single(self, *options): """ Ensure that only one of `options` are found in the section Parameters ---------- *options : list of str a list of options to be found in the section (``self.section``) Returns ------- str : the option that is uniquely found in the section Raises ------ AliasedOptionError : in case more than one of the options are found """ found = [self.cp.has_option(self.section, opt) for opt in options] if sum(found) == 1: return options[found.index(True)] elif sum(found) == 0: # nothing is found anyways return options[0] # Else we have more than 1 key found if AliasedOptionError.__doc__ is None: raise AliasedOptionError() raise AliasedOptionError(AliasedOptionError.__doc__.format( section=self.section, options='[{}]'.format(', '.join(options)))) def has_info(self): return self.__class__.__name__ in self.saved_results def calc_extra_info(self): """ Updates the information in the current information with respect to these flags: extra_compile_args extra_link_args """ info = {} for key in ['extra_compile_args', 'extra_link_args']: # Get values opt = self.cp.get(self.section, key) opt = _shell_utils.NativeParser.split(opt) if opt: tmp = {key: opt} dict_append(info, **tmp) return info def get_info(self, notfound_action=0): """ Return a dictionary with items that are compatible with numpy.distutils.setup keyword arguments. """ flag = 0 if not self.has_info(): flag = 1 log.info(self.__class__.__name__ + ':') if hasattr(self, 'calc_info'): self.calc_info() if notfound_action: if not self.has_info(): if notfound_action == 1: warnings.warn(self.notfounderror.__doc__, stacklevel=2) elif notfound_action == 2: raise self.notfounderror(self.notfounderror.__doc__) else: raise ValueError(repr(notfound_action)) if not self.has_info(): log.info(' NOT AVAILABLE') self.set_info() else: log.info(' FOUND:') res = self.saved_results.get(self.__class__.__name__) if log.get_threshold() <= log.INFO and flag: for k, v in res.items(): v = str(v) if k in ['sources', 'libraries'] and len(v) > 270: v = v[:120] + '...\n...\n...' + v[-120:] log.info(' %s = %s', k, v) log.info('') return copy.deepcopy(res) def get_paths(self, section, key): dirs = self.cp.get(section, key).split(os.pathsep) env_var = self.dir_env_var if env_var: if is_sequence(env_var): e0 = env_var[-1] for e in env_var: if e in os.environ: e0 = e break if not env_var[0] == e0: log.info('Setting %s=%s' % (env_var[0], e0)) env_var = e0 if env_var and env_var in os.environ: d = os.environ[env_var] if d == 'None': log.info('Disabled %s: %s', self.__class__.__name__, '(%s is None)' % (env_var,)) return [] if os.path.isfile(d): dirs = [os.path.dirname(d)] + dirs l = getattr(self, '_lib_names', []) if len(l) == 1: b = os.path.basename(d) b = os.path.splitext(b)[0] if b[:3] == 'lib': log.info('Replacing _lib_names[0]==%r with %r' \ % (self._lib_names[0], b[3:])) self._lib_names[0] = b[3:] else: ds = d.split(os.pathsep) ds2 = [] for d in ds: if os.path.isdir(d): ds2.append(d) for dd in ['include', 'lib']: d1 = os.path.join(d, dd) if os.path.isdir(d1): ds2.append(d1) dirs = ds2 + dirs default_dirs = self.cp.get(self.section, key).split(os.pathsep) dirs.extend(default_dirs) ret = [] for d in dirs: if len(d) > 0 and not os.path.isdir(d): warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) continue if d not in ret: ret.append(d) log.debug('( %s = %s )', key, ':'.join(ret)) return ret def get_lib_dirs(self, key='library_dirs'): return self.get_paths(self.section, key) def get_runtime_lib_dirs(self, key='runtime_library_dirs'): path = self.get_paths(self.section, key) if path == ['']: path = [] return path def get_include_dirs(self, key='include_dirs'): return self.get_paths(self.section, key) def get_src_dirs(self, key='src_dirs'): return self.get_paths(self.section, key) def get_libs(self, key, default): try: libs = self.cp.get(self.section, key) except NoOptionError: if not default: return [] if is_string(default): return [default] return default return [b for b in [a.strip() for a in libs.split(',')] if b] def get_libraries(self, key='libraries'): if hasattr(self, '_lib_names'): return self.get_libs(key, default=self._lib_names) else: return self.get_libs(key, '') def library_extensions(self): c = customized_ccompiler() static_exts = [] if c.compiler_type != 'msvc': # MSVC doesn't understand binutils static_exts.append('.a') if sys.platform == 'win32': static_exts.append('.lib') # .lib is used by MSVC and others if self.search_static_first: exts = static_exts + [so_ext] else: exts = [so_ext] + static_exts if sys.platform == 'cygwin': exts.append('.dll.a') if sys.platform == 'darwin': exts.append('.dylib') return exts def check_libs(self, lib_dirs, libs, opt_libs=[]): """If static or shared libraries are available then return their info dictionary. Checks for all libraries as shared libraries first, then static (or vice versa if self.search_static_first is True). """ exts = self.library_extensions() info = None for ext in exts: info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) if info is not None: break if not info: log.info(' libraries %s not found in %s', ','.join(libs), lib_dirs) return info def check_libs2(self, lib_dirs, libs, opt_libs=[]): """If static or shared libraries are available then return their info dictionary. Checks each library for shared or static. """ exts = self.library_extensions() info = self._check_libs(lib_dirs, libs, opt_libs, exts) if not info: log.info(' libraries %s not found in %s', ','.join(libs), lib_dirs) return info def _find_lib(self, lib_dir, lib, exts): assert is_string(lib_dir) # under windows first try without 'lib' prefix if sys.platform == 'win32': lib_prefixes = ['', 'lib'] else: lib_prefixes = ['lib'] # for each library name, see if we can find a file for it. for ext in exts: for prefix in lib_prefixes: p = self.combine_paths(lib_dir, prefix + lib + ext) if p: break if p: assert len(p) == 1 # ??? splitext on p[0] would do this for cygwin # doesn't seem correct if ext == '.dll.a': lib += '.dll' if ext == '.lib': lib = prefix + lib return lib return False def _find_libs(self, lib_dirs, libs, exts): # make sure we preserve the order of libs, as it can be important found_dirs, found_libs = [], [] for lib in libs: for lib_dir in lib_dirs: found_lib = self._find_lib(lib_dir, lib, exts) if found_lib: found_libs.append(found_lib) if lib_dir not in found_dirs: found_dirs.append(lib_dir) break return found_dirs, found_libs def _check_libs(self, lib_dirs, libs, opt_libs, exts): """Find mandatory and optional libs in expected paths. Missing optional libraries are silently forgotten. """ if not is_sequence(lib_dirs): lib_dirs = [lib_dirs] # First, try to find the mandatory libraries found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) if len(found_libs) > 0 and len(found_libs) == len(libs): # Now, check for optional libraries opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) found_libs.extend(opt_found_libs) for lib_dir in opt_found_dirs: if lib_dir not in found_dirs: found_dirs.append(lib_dir) info = {'libraries': found_libs, 'library_dirs': found_dirs} return info else: return None def combine_paths(self, *args): """Return a list of existing paths composed by all combinations of items from the arguments. """ return combine_paths(*args) class fft_opt_info(system_info): def calc_info(self): info = {} fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') djbfft_info = get_info('djbfft') if fftw_info: dict_append(info, **fftw_info) if djbfft_info: dict_append(info, **djbfft_info) self.set_info(**info) return class fftw_info(system_info): #variables to override section = 'fftw' dir_env_var = 'FFTW' notfounderror = FFTWNotFoundError ver_info = [{'name':'fftw3', 'libs':['fftw3'], 'includes':['fftw3.h'], 'macros':[('SCIPY_FFTW3_H', None)]}, {'name':'fftw2', 'libs':['rfftw', 'fftw'], 'includes':['fftw.h', 'rfftw.h'], 'macros':[('SCIPY_FFTW_H', None)]}] def calc_ver_info(self, ver_param): """Returns True on successful version detection, else False""" lib_dirs = self.get_lib_dirs() incl_dirs = self.get_include_dirs() opt = self.get_option_single(self.section + '_libs', 'libraries') libs = self.get_libs(opt, ver_param['libs']) info = self.check_libs(lib_dirs, libs) if info is not None: flag = 0 for d in incl_dirs: if len(self.combine_paths(d, ver_param['includes'])) \ == len(ver_param['includes']): dict_append(info, include_dirs=[d]) flag = 1 break if flag: dict_append(info, define_macros=ver_param['macros']) else: info = None if info is not None: self.set_info(**info) return True else: log.info(' %s not found' % (ver_param['name'])) return False def calc_info(self): for i in self.ver_info: if self.calc_ver_info(i): break class fftw2_info(fftw_info): #variables to override section = 'fftw' dir_env_var = 'FFTW' notfounderror = FFTWNotFoundError ver_info = [{'name':'fftw2', 'libs':['rfftw', 'fftw'], 'includes':['fftw.h', 'rfftw.h'], 'macros':[('SCIPY_FFTW_H', None)]} ] class fftw3_info(fftw_info): #variables to override section = 'fftw3' dir_env_var = 'FFTW3' notfounderror = FFTWNotFoundError ver_info = [{'name':'fftw3', 'libs':['fftw3'], 'includes':['fftw3.h'], 'macros':[('SCIPY_FFTW3_H', None)]}, ] class fftw3_armpl_info(fftw_info): section = 'fftw3' dir_env_var = 'ARMPL_DIR' notfounderror = FFTWNotFoundError ver_info = [{'name': 'fftw3', 'libs': ['armpl_lp64_mp'], 'includes': ['fftw3.h'], 'macros': [('SCIPY_FFTW3_H', None)]}] class dfftw_info(fftw_info): section = 'fftw' dir_env_var = 'FFTW' ver_info = [{'name':'dfftw', 'libs':['drfftw', 'dfftw'], 'includes':['dfftw.h', 'drfftw.h'], 'macros':[('SCIPY_DFFTW_H', None)]}] class sfftw_info(fftw_info): section = 'fftw' dir_env_var = 'FFTW' ver_info = [{'name':'sfftw', 'libs':['srfftw', 'sfftw'], 'includes':['sfftw.h', 'srfftw.h'], 'macros':[('SCIPY_SFFTW_H', None)]}] class fftw_threads_info(fftw_info): section = 'fftw' dir_env_var = 'FFTW' ver_info = [{'name':'fftw threads', 'libs':['rfftw_threads', 'fftw_threads'], 'includes':['fftw_threads.h', 'rfftw_threads.h'], 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] class dfftw_threads_info(fftw_info): section = 'fftw' dir_env_var = 'FFTW' ver_info = [{'name':'dfftw threads', 'libs':['drfftw_threads', 'dfftw_threads'], 'includes':['dfftw_threads.h', 'drfftw_threads.h'], 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] class sfftw_threads_info(fftw_info): section = 'fftw' dir_env_var = 'FFTW' ver_info = [{'name':'sfftw threads', 'libs':['srfftw_threads', 'sfftw_threads'], 'includes':['sfftw_threads.h', 'srfftw_threads.h'], 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] class djbfft_info(system_info): section = 'djbfft' dir_env_var = 'DJBFFT' notfounderror = DJBFFTNotFoundError def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): lib_dirs = self.get_lib_dirs() incl_dirs = self.get_include_dirs() info = None for d in lib_dirs: p = self.combine_paths(d, ['djbfft.a']) if p: info = {'extra_objects': p} break p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) if p: info = {'libraries': ['djbfft'], 'library_dirs': [d]} break if info is None: return for d in incl_dirs: if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: dict_append(info, include_dirs=[d], define_macros=[('SCIPY_DJBFFT_H', None)]) self.set_info(**info) return return class mkl_info(system_info): section = 'mkl' dir_env_var = 'MKLROOT' _lib_mkl = ['mkl_rt'] def get_mkl_rootdir(self): mklroot = os.environ.get('MKLROOT', None) if mklroot is not None: return mklroot paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) ld_so_conf = '/etc/ld.so.conf' if os.path.isfile(ld_so_conf): with open(ld_so_conf, 'r') as f: for d in f: d = d.strip() if d: paths.append(d) intel_mkl_dirs = [] for path in paths: path_atoms = path.split(os.sep) for m in path_atoms: if m.startswith('mkl'): d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) intel_mkl_dirs.append(d) break for d in paths: dirs = glob(os.path.join(d, 'mkl', '*')) dirs += glob(os.path.join(d, 'mkl*')) for sub_dir in dirs: if os.path.isdir(os.path.join(sub_dir, 'lib')): return sub_dir return None def __init__(self): mklroot = self.get_mkl_rootdir() if mklroot is None: system_info.__init__(self) else: from .cpuinfo import cpu if cpu.is_Itanium(): plt = '64' elif cpu.is_Intel() and cpu.is_64bit(): plt = 'intel64' else: plt = '32' system_info.__init__( self, default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], default_include_dirs=[os.path.join(mklroot, 'include')]) def calc_info(self): lib_dirs = self.get_lib_dirs() incl_dirs = self.get_include_dirs() opt = self.get_option_single('mkl_libs', 'libraries') mkl_libs = self.get_libs(opt, self._lib_mkl) info = self.check_libs2(lib_dirs, mkl_libs) if info is None: return dict_append(info, define_macros=[('SCIPY_MKL_H', None), ('HAVE_CBLAS', None)], include_dirs=incl_dirs) if sys.platform == 'win32': pass # win32 has no pthread library else: dict_append(info, libraries=['pthread']) self.set_info(**info) class lapack_mkl_info(mkl_info): pass class blas_mkl_info(mkl_info): pass class armpl_info(system_info): section = 'armpl' dir_env_var = 'ARMPL_DIR' _lib_armpl = ['armpl_lp64_mp'] def calc_info(self): lib_dirs = self.get_lib_dirs() incl_dirs = self.get_include_dirs() armpl_libs = self.get_libs('armpl_libs', self._lib_armpl) info = self.check_libs2(lib_dirs, armpl_libs) if info is None: return dict_append(info, define_macros=[('SCIPY_MKL_H', None), ('HAVE_CBLAS', None)], include_dirs=incl_dirs) self.set_info(**info) class lapack_armpl_info(armpl_info): pass class blas_armpl_info(armpl_info): pass class atlas_info(system_info): section = 'atlas' dir_env_var = 'ATLAS' _lib_names = ['f77blas', 'cblas'] if sys.platform[:7] == 'freebsd': _lib_atlas = ['atlas_r'] _lib_lapack = ['alapack_r'] else: _lib_atlas = ['atlas'] _lib_lapack = ['lapack'] notfounderror = AtlasNotFoundError def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', 'sse', '3dnow', 'sse2']) + [d]) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): lib_dirs = self.get_lib_dirs() info = {} opt = self.get_option_single('atlas_libs', 'libraries') atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) atlas = None lapack = None atlas_1 = None for d in lib_dirs: atlas = self.check_libs2(d, atlas_libs, []) if atlas is not None: lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) lapack = self.check_libs2(lib_dirs2, lapack_libs, []) if lapack is not None: break if atlas: atlas_1 = atlas log.info(self.__class__) if atlas is None: atlas = atlas_1 if atlas is None: return include_dirs = self.get_include_dirs() h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) h = h[0] if h: h = os.path.dirname(h) dict_append(info, include_dirs=[h]) info['language'] = 'c' if lapack is not None: dict_append(info, **lapack) dict_append(info, **atlas) elif 'lapack_atlas' in atlas['libraries']: dict_append(info, **atlas) dict_append(info, define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) self.set_info(**info) return else: dict_append(info, **atlas) dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) message = textwrap.dedent(""" ********************************************************************* Could not find lapack library within the ATLAS installation. ********************************************************************* """) warnings.warn(message, stacklevel=2) self.set_info(**info) return # Check if lapack library is complete, only warn if it is not. lapack_dir = lapack['library_dirs'][0] lapack_name = lapack['libraries'][0] lapack_lib = None lib_prefixes = ['lib'] if sys.platform == 'win32': lib_prefixes.append('') for e in self.library_extensions(): for prefix in lib_prefixes: fn = os.path.join(lapack_dir, prefix + lapack_name + e) if os.path.exists(fn): lapack_lib = fn break if lapack_lib: break if lapack_lib is not None: sz = os.stat(lapack_lib)[6] if sz <= 4000 * 1024: message = textwrap.dedent(""" ********************************************************************* Lapack library (from ATLAS) is probably incomplete: size of %s is %sk (expected >4000k) Follow the instructions in the KNOWN PROBLEMS section of the file numpy/INSTALL.txt. ********************************************************************* """) % (lapack_lib, sz / 1024) warnings.warn(message, stacklevel=2) else: info['language'] = 'f77' atlas_version, atlas_extra_info = get_atlas_version(**atlas) dict_append(info, **atlas_extra_info) self.set_info(**info) class atlas_blas_info(atlas_info): _lib_names = ['f77blas', 'cblas'] def calc_info(self): lib_dirs = self.get_lib_dirs() info = {} opt = self.get_option_single('atlas_libs', 'libraries') atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) atlas = self.check_libs2(lib_dirs, atlas_libs, []) if atlas is None: return include_dirs = self.get_include_dirs() h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) h = h[0] if h: h = os.path.dirname(h) dict_append(info, include_dirs=[h]) info['language'] = 'c' info['define_macros'] = [('HAVE_CBLAS', None)] atlas_version, atlas_extra_info = get_atlas_version(**atlas) dict_append(atlas, **atlas_extra_info) dict_append(info, **atlas) self.set_info(**info) return class atlas_threads_info(atlas_info): dir_env_var = ['PTATLAS', 'ATLAS'] _lib_names = ['ptf77blas', 'ptcblas'] class atlas_blas_threads_info(atlas_blas_info): dir_env_var = ['PTATLAS', 'ATLAS'] _lib_names = ['ptf77blas', 'ptcblas'] class lapack_atlas_info(atlas_info): _lib_names = ['lapack_atlas'] + atlas_info._lib_names class lapack_atlas_threads_info(atlas_threads_info): _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names class atlas_3_10_info(atlas_info): _lib_names = ['satlas'] _lib_atlas = _lib_names _lib_lapack = _lib_names class atlas_3_10_blas_info(atlas_3_10_info): _lib_names = ['satlas'] def calc_info(self): lib_dirs = self.get_lib_dirs() info = {} opt = self.get_option_single('atlas_lib', 'libraries') atlas_libs = self.get_libs(opt, self._lib_names) atlas = self.check_libs2(lib_dirs, atlas_libs, []) if atlas is None: return include_dirs = self.get_include_dirs() h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) h = h[0] if h: h = os.path.dirname(h) dict_append(info, include_dirs=[h]) info['language'] = 'c' info['define_macros'] = [('HAVE_CBLAS', None)] atlas_version, atlas_extra_info = get_atlas_version(**atlas) dict_append(atlas, **atlas_extra_info) dict_append(info, **atlas) self.set_info(**info) return class atlas_3_10_threads_info(atlas_3_10_info): dir_env_var = ['PTATLAS', 'ATLAS'] _lib_names = ['tatlas'] _lib_atlas = _lib_names _lib_lapack = _lib_names class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): dir_env_var = ['PTATLAS', 'ATLAS'] _lib_names = ['tatlas'] class lapack_atlas_3_10_info(atlas_3_10_info): pass class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): pass class lapack_info(system_info): section = 'lapack' dir_env_var = 'LAPACK' _lib_names = ['lapack'] notfounderror = LapackNotFoundError def calc_info(self): lib_dirs = self.get_lib_dirs() opt = self.get_option_single('lapack_libs', 'libraries') lapack_libs = self.get_libs(opt, self._lib_names) info = self.check_libs(lib_dirs, lapack_libs, []) if info is None: return info['language'] = 'f77' self.set_info(**info) class lapack_src_info(system_info): # LAPACK_SRC is deprecated, please do not use this! # Build or install a BLAS library via your package manager or from # source separately. section = 'lapack_src' dir_env_var = 'LAPACK_SRC' notfounderror = LapackSrcNotFoundError def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): src_dirs = self.get_src_dirs() src_dir = '' for d in src_dirs: if os.path.isfile(os.path.join(d, 'dgesv.f')): src_dir = d break if not src_dir: #XXX: Get sources from netlib. May be ask first. return # The following is extracted from LAPACK-3.0/SRC/Makefile. # Added missing names from lapack-lite-3.1.1/SRC/Makefile # while keeping removed names for Lapack-3.0 compatibility. allaux = ''' ilaenv ieeeck lsame lsamen xerbla iparmq ''' # *.f laux = ''' bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf stebz stedc steqr sterf larra larrc larrd larr larrk larrj larrr laneg laisnan isnan lazq3 lazq4 ''' # [s|d]*.f lasrc = ''' gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv lartv larz larzb larzt laswp lasyf latbs latdf latps latrd latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs tzrqf tzrzf lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 ''' # [s|c|d|z]*.f sd_lasrc = ''' laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd sygvx sytd2 sytrd ''' # [s|d]*.f cz_lasrc = ''' bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr ''' # [c|z]*.f ####### sclaux = laux + ' econd ' # s*.f dzlaux = laux + ' secnd ' # d*.f slasrc = lasrc + sd_lasrc # s*.f dlasrc = lasrc + sd_lasrc # d*.f clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f oclasrc = ' icmax1 scsum1 ' # *.f ozlasrc = ' izmax1 dzsum1 ' # *.f sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ + ['c%s.f' % f for f in (clasrc).split()] \ + ['z%s.f' % f for f in (zlasrc).split()] \ + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] sources = [os.path.join(src_dir, f) for f in sources] # Lapack 3.1: src_dir2 = os.path.join(src_dir, '..', 'INSTALL') sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] # Lapack 3.2.1: sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] # Should we check here actual existence of source files? # Yes, the file listing is different between 3.0 and 3.1 # versions. sources = [f for f in sources if os.path.isfile(f)] info = {'sources': sources, 'language': 'f77'} self.set_info(**info) atlas_version_c_text = r''' /* This file is generated from numpy/distutils/system_info.py */ void ATL_buildinfo(void); int main(void) { ATL_buildinfo(); return 0; } ''' _cached_atlas_version = {} def get_atlas_version(**config): libraries = config.get('libraries', []) library_dirs = config.get('library_dirs', []) key = (tuple(libraries), tuple(library_dirs)) if key in _cached_atlas_version: return _cached_atlas_version[key] c = cmd_config(Distribution()) atlas_version = None info = {} try: s, o = c.get_output(atlas_version_c_text, libraries=libraries, library_dirs=library_dirs, ) if s and re.search(r'undefined reference to `_gfortran', o, re.M): s, o = c.get_output(atlas_version_c_text, libraries=libraries + ['gfortran'], library_dirs=library_dirs, ) if not s: warnings.warn(textwrap.dedent(""" ***************************************************** Linkage with ATLAS requires gfortran. Use python setup.py config_fc --fcompiler=gnu95 ... when building extension libraries that use ATLAS. Make sure that -lgfortran is used for C++ extensions. ***************************************************** """), stacklevel=2) dict_append(info, language='f90', define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) except Exception: # failed to get version from file -- maybe on Windows # look at directory name for o in library_dirs: m = re.search(r'ATLAS_(?P<version>\d+[.]\d+[.]\d+)_', o) if m: atlas_version = m.group('version') if atlas_version is not None: break # final choice --- look at ATLAS_VERSION environment # variable if atlas_version is None: atlas_version = os.environ.get('ATLAS_VERSION', None) if atlas_version: dict_append(info, define_macros=[( 'ATLAS_INFO', _c_string_literal(atlas_version)) ]) else: dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) return atlas_version or '?.?.?', info if not s: m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)', o) if m: atlas_version = m.group('version') if atlas_version is None: if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): atlas_version = '3.2.1_pre3.3.6' else: log.info('Status: %d', s) log.info('Output: %s', o) elif atlas_version == '3.2.1_pre3.3.6': dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) else: dict_append(info, define_macros=[( 'ATLAS_INFO', _c_string_literal(atlas_version)) ]) result = _cached_atlas_version[key] = atlas_version, info return result class lapack_opt_info(system_info): notfounderror = LapackNotFoundError # List of all known LAPACK libraries, in the default order lapack_order = ['armpl', 'mkl', 'openblas', 'flame', 'accelerate', 'atlas', 'lapack'] order_env_var_name = 'NPY_LAPACK_ORDER' def _calc_info_armpl(self): info = get_info('lapack_armpl') if info: self.set_info(**info) return True return False def _calc_info_mkl(self): info = get_info('lapack_mkl') if info: self.set_info(**info) return True return False def _calc_info_openblas(self): info = get_info('openblas_lapack') if info: self.set_info(**info) return True info = get_info('openblas_clapack') if info: self.set_info(**info) return True return False def _calc_info_flame(self): info = get_info('flame') if info: self.set_info(**info) return True return False def _calc_info_atlas(self): info = get_info('atlas_3_10_threads') if not info: info = get_info('atlas_3_10') if not info: info = get_info('atlas_threads') if not info: info = get_info('atlas') if info: # Figure out if ATLAS has lapack... # If not we need the lapack library, but not BLAS! l = info.get('define_macros', []) if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ or ('ATLAS_WITHOUT_LAPACK', None) in l: # Get LAPACK (with possible warnings) # If not found we don't accept anything # since we can't use ATLAS with LAPACK! lapack_info = self._get_info_lapack() if not lapack_info: return False dict_append(info, **lapack_info) self.set_info(**info) return True return False def _calc_info_accelerate(self): info = get_info('accelerate') if info: self.set_info(**info) return True return False def _get_info_blas(self): # Default to get the optimized BLAS implementation info = get_info('blas_opt') if not info: warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) info_src = get_info('blas_src') if not info_src: warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) return {} dict_append(info, libraries=[('fblas_src', info_src)]) return info def _get_info_lapack(self): info = get_info('lapack') if not info: warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3) info_src = get_info('lapack_src') if not info_src: warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3) return {} dict_append(info, libraries=[('flapack_src', info_src)]) return info def _calc_info_lapack(self): info = self._get_info_lapack() if info: info_blas = self._get_info_blas() dict_append(info, **info_blas) dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) self.set_info(**info) return True return False def _calc_info_from_envvar(self): info = {} info['language'] = 'f77' info['libraries'] = [] info['include_dirs'] = [] info['define_macros'] = [] info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split() self.set_info(**info) return True def _calc_info(self, name): return getattr(self, '_calc_info_{}'.format(name))() def calc_info(self): lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name) if len(unknown_order) > 0: raise ValueError("lapack_opt_info user defined " "LAPACK order has unacceptable " "values: {}".format(unknown_order)) if 'NPY_LAPACK_LIBS' in os.environ: # Bypass autodetection, set language to F77 and use env var linker # flags directly self._calc_info_from_envvar() return for lapack in lapack_order: if self._calc_info(lapack): return if 'lapack' not in lapack_order: # Since the user may request *not* to use any library, we still need # to raise warnings to signal missing packages! warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2) warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2) class _ilp64_opt_info_mixin: symbol_suffix = None symbol_prefix = None def _check_info(self, info): macros = dict(info.get('define_macros', [])) prefix = macros.get('BLAS_SYMBOL_PREFIX', '') suffix = macros.get('BLAS_SYMBOL_SUFFIX', '') if self.symbol_prefix not in (None, prefix): return False if self.symbol_suffix not in (None, suffix): return False return bool(info) class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): notfounderror = LapackILP64NotFoundError lapack_order = ['openblas64_', 'openblas_ilp64'] order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' def _calc_info(self, name): info = get_info(name + '_lapack') if self._check_info(info): self.set_info(**info) return True return False class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info): # Same as lapack_ilp64_opt_info, but fix symbol names symbol_prefix = '' symbol_suffix = '' class lapack64__opt_info(lapack_ilp64_opt_info): symbol_prefix = '' symbol_suffix = '64_' class blas_opt_info(system_info): notfounderror = BlasNotFoundError # List of all known BLAS libraries, in the default order blas_order = ['armpl', 'mkl', 'blis', 'openblas', 'accelerate', 'atlas', 'blas'] order_env_var_name = 'NPY_BLAS_ORDER' def _calc_info_armpl(self): info = get_info('blas_armpl') if info: self.set_info(**info) return True return False def _calc_info_mkl(self): info = get_info('blas_mkl') if info: self.set_info(**info) return True return False def _calc_info_blis(self): info = get_info('blis') if info: self.set_info(**info) return True return False def _calc_info_openblas(self): info = get_info('openblas') if info: self.set_info(**info) return True return False def _calc_info_atlas(self): info = get_info('atlas_3_10_blas_threads') if not info: info = get_info('atlas_3_10_blas') if not info: info = get_info('atlas_blas_threads') if not info: info = get_info('atlas_blas') if info: self.set_info(**info) return True return False def _calc_info_accelerate(self): info = get_info('accelerate') if info: self.set_info(**info) return True return False def _calc_info_blas(self): # Warn about a non-optimized BLAS library warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) info = {} dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) blas = get_info('blas') if blas: dict_append(info, **blas) else: # Not even BLAS was found! warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) blas_src = get_info('blas_src') if not blas_src: warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) return False dict_append(info, libraries=[('fblas_src', blas_src)]) self.set_info(**info) return True def _calc_info_from_envvar(self): info = {} info['language'] = 'f77' info['libraries'] = [] info['include_dirs'] = [] info['define_macros'] = [] info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split() if 'NPY_CBLAS_LIBS' in os.environ: info['define_macros'].append(('HAVE_CBLAS', None)) info['extra_link_args'].extend( os.environ['NPY_CBLAS_LIBS'].split()) self.set_info(**info) return True def _calc_info(self, name): return getattr(self, '_calc_info_{}'.format(name))() def calc_info(self): blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name) if len(unknown_order) > 0: raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order)) if 'NPY_BLAS_LIBS' in os.environ: # Bypass autodetection, set language to F77 and use env var linker # flags directly self._calc_info_from_envvar() return for blas in blas_order: if self._calc_info(blas): return if 'blas' not in blas_order: # Since the user may request *not* to use any library, we still need # to raise warnings to signal missing packages! warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2) warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2) class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): notfounderror = BlasILP64NotFoundError blas_order = ['openblas64_', 'openblas_ilp64'] order_env_var_name = 'NPY_BLAS_ILP64_ORDER' def _calc_info(self, name): info = get_info(name) if self._check_info(info): self.set_info(**info) return True return False class blas_ilp64_plain_opt_info(blas_ilp64_opt_info): symbol_prefix = '' symbol_suffix = '' class blas64__opt_info(blas_ilp64_opt_info): symbol_prefix = '' symbol_suffix = '64_' class cblas_info(system_info): section = 'cblas' dir_env_var = 'CBLAS' # No default as it's used only in blas_info _lib_names = [] notfounderror = BlasNotFoundError class blas_info(system_info): section = 'blas' dir_env_var = 'BLAS' _lib_names = ['blas'] notfounderror = BlasNotFoundError def calc_info(self): lib_dirs = self.get_lib_dirs() opt = self.get_option_single('blas_libs', 'libraries') blas_libs = self.get_libs(opt, self._lib_names) info = self.check_libs(lib_dirs, blas_libs, []) if info is None: return else: info['include_dirs'] = self.get_include_dirs() if platform.system() == 'Windows': # The check for windows is needed because get_cblas_libs uses the # same compiler that was used to compile Python and msvc is # often not installed when mingw is being used. This rough # treatment is not desirable, but windows is tricky. info['language'] = 'f77' # XXX: is it generally true? # If cblas is given as an option, use those cblas_info_obj = cblas_info() cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries') cblas_libs = cblas_info_obj.get_libs(cblas_opt, None) if cblas_libs: info['libraries'] = cblas_libs + blas_libs info['define_macros'] = [('HAVE_CBLAS', None)] else: lib = self.get_cblas_libs(info) if lib is not None: info['language'] = 'c' info['libraries'] = lib info['define_macros'] = [('HAVE_CBLAS', None)] self.set_info(**info) def get_cblas_libs(self, info): """ Check whether we can link with CBLAS interface This method will search through several combinations of libraries to check whether CBLAS is present: 1. Libraries in ``info['libraries']``, as is 2. As 1. but also explicitly adding ``'cblas'`` as a library 3. As 1. but also explicitly adding ``'blas'`` as a library 4. Check only library ``'cblas'`` 5. Check only library ``'blas'`` Parameters ---------- info : dict system information dictionary for compilation and linking Returns ------- libraries : list of str or None a list of libraries that enables the use of CBLAS interface. Returns None if not found or a compilation error occurs. Since 1.17 returns a list. """ # primitive cblas check by looking for the header and trying to link # cblas or blas c = customized_ccompiler() tmpdir = tempfile.mkdtemp() s = textwrap.dedent("""\ #include <cblas.h> int main(int argc, const char *argv[]) { double a[4] = {1,2,3,4}; double b[4] = {5,6,7,8}; return cblas_ddot(4, a, 1, b, 1) > 10; }""") src = os.path.join(tmpdir, 'source.c') try: with open(src, 'wt') as f: f.write(s) try: # check we can compile (find headers) obj = c.compile([src], output_dir=tmpdir, include_dirs=self.get_include_dirs()) except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): return None # check we can link (find library) # some systems have separate cblas and blas libs. for libs in [info['libraries'], ['cblas'] + info['libraries'], ['blas'] + info['libraries'], ['cblas'], ['blas']]: try: c.link_executable(obj, os.path.join(tmpdir, "a.out"), libraries=libs, library_dirs=info['library_dirs'], extra_postargs=info.get('extra_link_args', [])) return libs except distutils.ccompiler.LinkError: pass finally: shutil.rmtree(tmpdir) return None class openblas_info(blas_info): section = 'openblas' dir_env_var = 'OPENBLAS' _lib_names = ['openblas'] _require_symbols = [] notfounderror = BlasNotFoundError @property def symbol_prefix(self): try: return self.cp.get(self.section, 'symbol_prefix') except NoOptionError: return '' @property def symbol_suffix(self): try: return self.cp.get(self.section, 'symbol_suffix') except NoOptionError: return '' def _calc_info(self): c = customized_ccompiler() lib_dirs = self.get_lib_dirs() # Prefer to use libraries over openblas_libs opt = self.get_option_single('openblas_libs', 'libraries') openblas_libs = self.get_libs(opt, self._lib_names) info = self.check_libs(lib_dirs, openblas_libs, []) if c.compiler_type == "msvc" and info is None: from numpy.distutils.fcompiler import new_fcompiler f = new_fcompiler(c_compiler=c) if f and f.compiler_type == 'gnu95': # Try gfortran-compatible library files info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) # Skip lapack check, we'd need build_ext to do it skip_symbol_check = True elif info: skip_symbol_check = False info['language'] = 'c' if info is None: return None # Add extra info for OpenBLAS extra_info = self.calc_extra_info() dict_append(info, **extra_info) if not (skip_symbol_check or self.check_symbols(info)): return None info['define_macros'] = [('HAVE_CBLAS', None)] if self.symbol_prefix: info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] if self.symbol_suffix: info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)] return info def calc_info(self): info = self._calc_info() if info is not None: self.set_info(**info) def check_msvc_gfortran_libs(self, library_dirs, libraries): # First, find the full path to each library directory library_paths = [] for library in libraries: for library_dir in library_dirs: # MinGW static ext will be .a fullpath = os.path.join(library_dir, library + '.a') if os.path.isfile(fullpath): library_paths.append(fullpath) break else: return None # Generate numpy.distutils virtual static library file basename = self.__class__.__name__ tmpdir = os.path.join(os.getcwd(), 'build', basename) if not os.path.isdir(tmpdir): os.makedirs(tmpdir) info = {'library_dirs': [tmpdir], 'libraries': [basename], 'language': 'f77'} fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') with open(fake_lib_file, 'w') as f: f.write("\n".join(library_paths)) with open(fake_clib_file, 'w') as f: pass return info def check_symbols(self, info): res = False c = customized_ccompiler() tmpdir = tempfile.mkdtemp() prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, symbol_name, self.symbol_suffix) for symbol_name in self._require_symbols) calls = "\n".join("%s%s%s();" % (self.symbol_prefix, symbol_name, self.symbol_suffix) for symbol_name in self._require_symbols) s = textwrap.dedent("""\ %(prototypes)s int main(int argc, const char *argv[]) { %(calls)s return 0; }""") % dict(prototypes=prototypes, calls=calls) src = os.path.join(tmpdir, 'source.c') out = os.path.join(tmpdir, 'a.out') # Add the additional "extra" arguments try: extra_args = info['extra_link_args'] except Exception: extra_args = [] try: with open(src, 'wt') as f: f.write(s) obj = c.compile([src], output_dir=tmpdir) try: c.link_executable(obj, out, libraries=info['libraries'], library_dirs=info['library_dirs'], extra_postargs=extra_args) res = True except distutils.ccompiler.LinkError: res = False finally: shutil.rmtree(tmpdir) return res class openblas_lapack_info(openblas_info): section = 'openblas' dir_env_var = 'OPENBLAS' _lib_names = ['openblas'] _require_symbols = ['zungqr_'] notfounderror = BlasNotFoundError class openblas_clapack_info(openblas_lapack_info): _lib_names = ['openblas', 'lapack'] class openblas_ilp64_info(openblas_info): section = 'openblas_ilp64' dir_env_var = 'OPENBLAS_ILP64' _lib_names = ['openblas64'] _require_symbols = ['dgemm_', 'cblas_dgemm'] notfounderror = BlasILP64NotFoundError def _calc_info(self): info = super()._calc_info() if info is not None: info['define_macros'] += [('HAVE_BLAS_ILP64', None)] return info class openblas_ilp64_lapack_info(openblas_ilp64_info): _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr'] def _calc_info(self): info = super()._calc_info() if info: info['define_macros'] += [('HAVE_LAPACKE', None)] return info class openblas64__info(openblas_ilp64_info): # ILP64 Openblas, with default symbol suffix section = 'openblas64_' dir_env_var = 'OPENBLAS64_' _lib_names = ['openblas64_'] symbol_suffix = '64_' symbol_prefix = '' class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info): pass class blis_info(blas_info): section = 'blis' dir_env_var = 'BLIS' _lib_names = ['blis'] notfounderror = BlasNotFoundError def calc_info(self): lib_dirs = self.get_lib_dirs() opt = self.get_option_single('blis_libs', 'libraries') blis_libs = self.get_libs(opt, self._lib_names) info = self.check_libs2(lib_dirs, blis_libs, []) if info is None: return # Add include dirs incl_dirs = self.get_include_dirs() dict_append(info, language='c', define_macros=[('HAVE_CBLAS', None)], include_dirs=incl_dirs) self.set_info(**info) class flame_info(system_info): """ Usage of libflame for LAPACK operations This requires libflame to be compiled with lapack wrappers: ./configure --enable-lapack2flame ... Be aware that libflame 5.1.0 has some missing names in the shared library, so if you have problems, try the static flame library. """ section = 'flame' _lib_names = ['flame'] notfounderror = FlameNotFoundError def check_embedded_lapack(self, info): """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """ c = customized_ccompiler() tmpdir = tempfile.mkdtemp() s = textwrap.dedent("""\ void zungqr_(); int main(int argc, const char *argv[]) { zungqr_(); return 0; }""") src = os.path.join(tmpdir, 'source.c') out = os.path.join(tmpdir, 'a.out') # Add the additional "extra" arguments extra_args = info.get('extra_link_args', []) try: with open(src, 'wt') as f: f.write(s) obj = c.compile([src], output_dir=tmpdir) try: c.link_executable(obj, out, libraries=info['libraries'], library_dirs=info['library_dirs'], extra_postargs=extra_args) return True except distutils.ccompiler.LinkError: return False finally: shutil.rmtree(tmpdir) def calc_info(self): lib_dirs = self.get_lib_dirs() flame_libs = self.get_libs('libraries', self._lib_names) info = self.check_libs2(lib_dirs, flame_libs, []) if info is None: return # Add the extra flag args to info extra_info = self.calc_extra_info() dict_append(info, **extra_info) if self.check_embedded_lapack(info): # check if the user has supplied all information required self.set_info(**info) else: # Try and get the BLAS lib to see if we can get it to work blas_info = get_info('blas_opt') if not blas_info: # since we already failed once, this ain't going to work either return # Now we need to merge the two dictionaries for key in blas_info: if isinstance(blas_info[key], list): info[key] = info.get(key, []) + blas_info[key] elif isinstance(blas_info[key], tuple): info[key] = info.get(key, ()) + blas_info[key] else: info[key] = info.get(key, '') + blas_info[key] # Now check again if self.check_embedded_lapack(info): self.set_info(**info) class accelerate_info(system_info): section = 'accelerate' _lib_names = ['accelerate', 'veclib'] notfounderror = BlasNotFoundError def calc_info(self): # Make possible to enable/disable from config file/env var libraries = os.environ.get('ACCELERATE') if libraries: libraries = [libraries] else: libraries = self.get_libs('libraries', self._lib_names) libraries = [lib.strip().lower() for lib in libraries] if (sys.platform == 'darwin' and not os.getenv('_PYTHON_HOST_PLATFORM', None)): # Use the system BLAS from Accelerate or vecLib under OSX args = [] link_args = [] if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ 'x86_64' in get_platform() or \ 'i386' in platform.platform(): intel = 1 else: intel = 0 if (os.path.exists('/System/Library/Frameworks' '/Accelerate.framework/') and 'accelerate' in libraries): if intel: args.extend(['-msse3']) args.extend([ '-I/System/Library/Frameworks/vecLib.framework/Headers']) link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) elif (os.path.exists('/System/Library/Frameworks' '/vecLib.framework/') and 'veclib' in libraries): if intel: args.extend(['-msse3']) args.extend([ '-I/System/Library/Frameworks/vecLib.framework/Headers']) link_args.extend(['-Wl,-framework', '-Wl,vecLib']) if args: self.set_info(extra_compile_args=args, extra_link_args=link_args, define_macros=[('NO_ATLAS_INFO', 3), ('HAVE_CBLAS', None)]) return class blas_src_info(system_info): # BLAS_SRC is deprecated, please do not use this! # Build or install a BLAS library via your package manager or from # source separately. section = 'blas_src' dir_env_var = 'BLAS_SRC' notfounderror = BlasSrcNotFoundError def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend([d] + self.combine_paths(d, ['blas'])) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): src_dirs = self.get_src_dirs() src_dir = '' for d in src_dirs: if os.path.isfile(os.path.join(d, 'daxpy.f')): src_dir = d break if not src_dir: #XXX: Get sources from netlib. May be ask first. return blas1 = ''' caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap scabs1 ''' blas2 = ''' cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv ''' blas3 = ''' cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm ''' sources = [os.path.join(src_dir, f + '.f') \ for f in (blas1 + blas2 + blas3).split()] #XXX: should we check here actual existence of source files? sources = [f for f in sources if os.path.isfile(f)] info = {'sources': sources, 'language': 'f77'} self.set_info(**info) class x11_info(system_info): section = 'x11' notfounderror = X11NotFoundError _lib_names = ['X11'] def __init__(self): system_info.__init__(self, default_lib_dirs=default_x11_lib_dirs, default_include_dirs=default_x11_include_dirs) def calc_info(self): if sys.platform in ['win32']: return lib_dirs = self.get_lib_dirs() include_dirs = self.get_include_dirs() opt = self.get_option_single('x11_libs', 'libraries') x11_libs = self.get_libs(opt, self._lib_names) info = self.check_libs(lib_dirs, x11_libs, []) if info is None: return inc_dir = None for d in include_dirs: if self.combine_paths(d, 'X11/X.h'): inc_dir = d break if inc_dir is not None: dict_append(info, include_dirs=[inc_dir]) self.set_info(**info) class _numpy_info(system_info): section = 'Numeric' modulename = 'Numeric' notfounderror = NumericNotFoundError def __init__(self): include_dirs = [] try: module = __import__(self.modulename) prefix = [] for name in module.__file__.split(os.sep): if name == 'lib': break prefix.append(name) # Ask numpy for its own include path before attempting # anything else try: include_dirs.append(getattr(module, 'get_include')()) except AttributeError: pass include_dirs.append(sysconfig.get_path('include')) except ImportError: pass py_incl_dir = sysconfig.get_path('include') include_dirs.append(py_incl_dir) py_pincl_dir = sysconfig.get_path('platinclude') if py_pincl_dir not in include_dirs: include_dirs.append(py_pincl_dir) for d in default_include_dirs: d = os.path.join(d, os.path.basename(py_incl_dir)) if d not in include_dirs: include_dirs.append(d) system_info.__init__(self, default_lib_dirs=[], default_include_dirs=include_dirs) def calc_info(self): try: module = __import__(self.modulename) except ImportError: return info = {} macros = [] for v in ['__version__', 'version']: vrs = getattr(module, v, None) if vrs is None: continue macros = [(self.modulename.upper() + '_VERSION', _c_string_literal(vrs)), (self.modulename.upper(), None)] break dict_append(info, define_macros=macros) include_dirs = self.get_include_dirs() inc_dir = None for d in include_dirs: if self.combine_paths(d, os.path.join(self.modulename, 'arrayobject.h')): inc_dir = d break if inc_dir is not None: dict_append(info, include_dirs=[inc_dir]) if info: self.set_info(**info) return class numarray_info(_numpy_info): section = 'numarray' modulename = 'numarray' class Numeric_info(_numpy_info): section = 'Numeric' modulename = 'Numeric' class numpy_info(_numpy_info): section = 'numpy' modulename = 'numpy' class numerix_info(system_info): section = 'numerix' def calc_info(self): which = None, None if os.getenv("NUMERIX"): which = os.getenv("NUMERIX"), "environment var" # If all the above fail, default to numpy. if which[0] is None: which = "numpy", "defaulted" try: import numpy # noqa: F401 which = "numpy", "defaulted" except ImportError as e: msg1 = str(e) try: import Numeric # noqa: F401 which = "numeric", "defaulted" except ImportError as e: msg2 = str(e) try: import numarray # noqa: F401 which = "numarray", "defaulted" except ImportError as e: msg3 = str(e) log.info(msg1) log.info(msg2) log.info(msg3) which = which[0].strip().lower(), which[1] if which[0] not in ["numeric", "numarray", "numpy"]: raise ValueError("numerix selector must be either 'Numeric' " "or 'numarray' or 'numpy' but the value obtained" " from the %s was '%s'." % (which[1], which[0])) os.environ['NUMERIX'] = which[0] self.set_info(**get_info(which[0])) class f2py_info(system_info): def calc_info(self): try: import numpy.f2py as f2py except ImportError: return f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], include_dirs=[f2py_dir]) return class boost_python_info(system_info): section = 'boost_python' dir_env_var = 'BOOST' def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend([d] + self.combine_paths(d, ['boost*'])) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): src_dirs = self.get_src_dirs() src_dir = '' for d in src_dirs: if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', 'module.cpp')): src_dir = d break if not src_dir: return py_incl_dirs = [sysconfig.get_path('include')] py_pincl_dir = sysconfig.get_path('platinclude') if py_pincl_dir not in py_incl_dirs: py_incl_dirs.append(py_pincl_dir) srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) info = {'libraries': [('boost_python_src', {'include_dirs': [src_dir] + py_incl_dirs, 'sources':bpl_srcs} )], 'include_dirs': [src_dir], } if info: self.set_info(**info) return class agg2_info(system_info): section = 'agg2' dir_env_var = 'AGG2' def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend([d] + self.combine_paths(d, ['agg2*'])) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): src_dirs = self.get_src_dirs() src_dir = '' for d in src_dirs: if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): src_dir = d break if not src_dir: return if sys.platform == 'win32': agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', 'win32', 'agg_win32_bmp.cpp')) else: agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) agg2_srcs += [os.path.join(src_dir, 'src', 'platform', 'X11', 'agg_platform_support.cpp')] info = {'libraries': [('agg2_src', {'sources': agg2_srcs, 'include_dirs': [os.path.join(src_dir, 'include')], } )], 'include_dirs': [os.path.join(src_dir, 'include')], } if info: self.set_info(**info) return class _pkg_config_info(system_info): section = None config_env_var = 'PKG_CONFIG' default_config_exe = 'pkg-config' append_config_exe = '' version_macro_name = None release_macro_name = None version_flag = '--modversion' cflags_flag = '--cflags' def get_config_exe(self): if self.config_env_var in os.environ: return os.environ[self.config_env_var] return self.default_config_exe def get_config_output(self, config_exe, option): cmd = config_exe + ' ' + self.append_config_exe + ' ' + option try: o = subprocess.check_output(cmd) except (OSError, subprocess.CalledProcessError): pass else: o = filepath_from_subprocess_output(o) return o def calc_info(self): config_exe = find_executable(self.get_config_exe()) if not config_exe: log.warn('File not found: %s. Cannot determine %s info.' \ % (config_exe, self.section)) return info = {} macros = [] libraries = [] library_dirs = [] include_dirs = [] extra_link_args = [] extra_compile_args = [] version = self.get_config_output(config_exe, self.version_flag) if version: macros.append((self.__class__.__name__.split('.')[-1].upper(), _c_string_literal(version))) if self.version_macro_name: macros.append((self.version_macro_name + '_%s' % (version.replace('.', '_')), None)) if self.release_macro_name: release = self.get_config_output(config_exe, '--release') if release: macros.append((self.release_macro_name + '_%s' % (release.replace('.', '_')), None)) opts = self.get_config_output(config_exe, '--libs') if opts: for opt in opts.split(): if opt[:2] == '-l': libraries.append(opt[2:]) elif opt[:2] == '-L': library_dirs.append(opt[2:]) else: extra_link_args.append(opt) opts = self.get_config_output(config_exe, self.cflags_flag) if opts: for opt in opts.split(): if opt[:2] == '-I': include_dirs.append(opt[2:]) elif opt[:2] == '-D': if '=' in opt: n, v = opt[2:].split('=') macros.append((n, v)) else: macros.append((opt[2:], None)) else: extra_compile_args.append(opt) if macros: dict_append(info, define_macros=macros) if libraries: dict_append(info, libraries=libraries) if library_dirs: dict_append(info, library_dirs=library_dirs) if include_dirs: dict_append(info, include_dirs=include_dirs) if extra_link_args: dict_append(info, extra_link_args=extra_link_args) if extra_compile_args: dict_append(info, extra_compile_args=extra_compile_args) if info: self.set_info(**info) return class wx_info(_pkg_config_info): section = 'wx' config_env_var = 'WX_CONFIG' default_config_exe = 'wx-config' append_config_exe = '' version_macro_name = 'WX_VERSION' release_macro_name = 'WX_RELEASE' version_flag = '--version' cflags_flag = '--cxxflags' class gdk_pixbuf_xlib_2_info(_pkg_config_info): section = 'gdk_pixbuf_xlib_2' append_config_exe = 'gdk-pixbuf-xlib-2.0' version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' class gdk_pixbuf_2_info(_pkg_config_info): section = 'gdk_pixbuf_2' append_config_exe = 'gdk-pixbuf-2.0' version_macro_name = 'GDK_PIXBUF_VERSION' class gdk_x11_2_info(_pkg_config_info): section = 'gdk_x11_2' append_config_exe = 'gdk-x11-2.0' version_macro_name = 'GDK_X11_VERSION' class gdk_2_info(_pkg_config_info): section = 'gdk_2' append_config_exe = 'gdk-2.0' version_macro_name = 'GDK_VERSION' class gdk_info(_pkg_config_info): section = 'gdk' append_config_exe = 'gdk' version_macro_name = 'GDK_VERSION' class gtkp_x11_2_info(_pkg_config_info): section = 'gtkp_x11_2' append_config_exe = 'gtk+-x11-2.0' version_macro_name = 'GTK_X11_VERSION' class gtkp_2_info(_pkg_config_info): section = 'gtkp_2' append_config_exe = 'gtk+-2.0' version_macro_name = 'GTK_VERSION' class xft_info(_pkg_config_info): section = 'xft' append_config_exe = 'xft' version_macro_name = 'XFT_VERSION' class freetype2_info(_pkg_config_info): section = 'freetype2' append_config_exe = 'freetype2' version_macro_name = 'FREETYPE2_VERSION' class amd_info(system_info): section = 'amd' dir_env_var = 'AMD' _lib_names = ['amd'] def calc_info(self): lib_dirs = self.get_lib_dirs() opt = self.get_option_single('amd_libs', 'libraries') amd_libs = self.get_libs(opt, self._lib_names) info = self.check_libs(lib_dirs, amd_libs, []) if info is None: return include_dirs = self.get_include_dirs() inc_dir = None for d in include_dirs: p = self.combine_paths(d, 'amd.h') if p: inc_dir = os.path.dirname(p[0]) break if inc_dir is not None: dict_append(info, include_dirs=[inc_dir], define_macros=[('SCIPY_AMD_H', None)], swig_opts=['-I' + inc_dir]) self.set_info(**info) return class umfpack_info(system_info): section = 'umfpack' dir_env_var = 'UMFPACK' notfounderror = UmfpackNotFoundError _lib_names = ['umfpack'] def calc_info(self): lib_dirs = self.get_lib_dirs() opt = self.get_option_single('umfpack_libs', 'libraries') umfpack_libs = self.get_libs(opt, self._lib_names) info = self.check_libs(lib_dirs, umfpack_libs, []) if info is None: return include_dirs = self.get_include_dirs() inc_dir = None for d in include_dirs: p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') if p: inc_dir = os.path.dirname(p[0]) break if inc_dir is not None: dict_append(info, include_dirs=[inc_dir], define_macros=[('SCIPY_UMFPACK_H', None)], swig_opts=['-I' + inc_dir]) dict_append(info, **get_info('amd')) self.set_info(**info) return def combine_paths(*args, **kws): """ Return a list of existing paths composed by all combinations of items from arguments. """ r = [] for a in args: if not a: continue if is_string(a): a = [a] r.append(a) args = r if not args: return [] if len(args) == 1: result = reduce(lambda a, b: a + b, map(glob, args[0]), []) elif len(args) == 2: result = [] for a0 in args[0]: for a1 in args[1]: result.extend(glob(os.path.join(a0, a1))) else: result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) log.debug('(paths: %s)', ','.join(result)) return result language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} def dict_append(d, **kws): languages = [] for k, v in kws.items(): if k == 'language': languages.append(v) continue if k in d: if k in ['library_dirs', 'include_dirs', 'extra_compile_args', 'extra_link_args', 'runtime_library_dirs', 'define_macros']: [d[k].append(vv) for vv in v if vv not in d[k]] else: d[k].extend(v) else: d[k] = v if languages: l = inv_language_map[max([language_map.get(l, 0) for l in languages])] d['language'] = l return def parseCmdLine(argv=(None,)): import optparse parser = optparse.OptionParser("usage: %prog [-v] [info objs]") parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='be verbose and print more messages') opts, args = parser.parse_args(args=argv[1:]) return opts, args def show_all(argv=None): import inspect if argv is None: argv = sys.argv opts, args = parseCmdLine(argv) if opts.verbose: log.set_threshold(log.DEBUG) else: log.set_threshold(log.INFO) show_only = [] for n in args: if n[-5:] != '_info': n = n + '_info' show_only.append(n) show_all = not show_only _gdict_ = globals().copy() for name, c in _gdict_.items(): if not inspect.isclass(c): continue if not issubclass(c, system_info) or c is system_info: continue if not show_all: if name not in show_only: continue del show_only[show_only.index(name)] conf = c() conf.verbosity = 2 # we don't need the result, but we want # the side effect of printing diagnostics conf.get_info() if show_only: log.info('Info classes not defined: %s', ','.join(show_only)) if __name__ == "__main__": show_all()
111,028
Python
33.991806
120
0.549348
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/msvc9compiler.py
import os from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler from .system_info import platform_bits def _merge(old, new): """Concatenate two environment paths avoiding repeats. Here `old` is the environment string before the base class initialize function is called and `new` is the string after the call. The new string will be a fixed string if it is not obtained from the current environment, or the same as the old string if obtained from the same environment. The aim here is not to append the new string if it is already contained in the old string so as to limit the growth of the environment string. Parameters ---------- old : string Previous environment string. new : string New environment string. Returns ------- ret : string Updated environment string. """ if not old: return new if new in old: return old # Neither new nor old is empty. Give old priority. return ';'.join([old, new]) class MSVCCompiler(_MSVCCompiler): def __init__(self, verbose=0, dry_run=0, force=0): _MSVCCompiler.__init__(self, verbose, dry_run, force) def initialize(self, plat_name=None): # The 'lib' and 'include' variables may be overwritten # by MSVCCompiler.initialize, so save them for later merge. environ_lib = os.getenv('lib') environ_include = os.getenv('include') _MSVCCompiler.initialize(self, plat_name) # Merge current and previous values of 'lib' and 'include' os.environ['lib'] = _merge(environ_lib, os.environ['lib']) os.environ['include'] = _merge(environ_include, os.environ['include']) # msvc9 building for 32 bits requires SSE2 to work around a # compiler bug. if platform_bits == 32: self.compile_options += ['/arch:SSE2'] self.compile_options_debug += ['/arch:SSE2'] def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): ld_args.append('/MANIFEST') _MSVCCompiler.manifest_setup_ldargs(self, output_filename, build_temp, ld_args)
2,192
Python
33.265624
80
0.641423
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/extension.py
"""distutils.extension Provides the Extension class, used to describe C/C++ extension modules in setup scripts. Overridden to support f2py. """ import re from distutils.extension import Extension as old_Extension cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match class Extension(old_Extension): """ Parameters ---------- name : str Extension name. sources : list of str List of source file locations relative to the top directory of the package. extra_compile_args : list of str Extra command line arguments to pass to the compiler. extra_f77_compile_args : list of str Extra command line arguments to pass to the fortran77 compiler. extra_f90_compile_args : list of str Extra command line arguments to pass to the fortran90 compiler. """ def __init__( self, name, sources, include_dirs=None, define_macros=None, undef_macros=None, library_dirs=None, libraries=None, runtime_library_dirs=None, extra_objects=None, extra_compile_args=None, extra_link_args=None, export_symbols=None, swig_opts=None, depends=None, language=None, f2py_options=None, module_dirs=None, extra_c_compile_args=None, extra_cxx_compile_args=None, extra_f77_compile_args=None, extra_f90_compile_args=None,): old_Extension.__init__( self, name, [], include_dirs=include_dirs, define_macros=define_macros, undef_macros=undef_macros, library_dirs=library_dirs, libraries=libraries, runtime_library_dirs=runtime_library_dirs, extra_objects=extra_objects, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, export_symbols=export_symbols) # Avoid assert statements checking that sources contains strings: self.sources = sources # Python 2.4 distutils new features self.swig_opts = swig_opts or [] # swig_opts is assumed to be a list. Here we handle the case where it # is specified as a string instead. if isinstance(self.swig_opts, str): import warnings msg = "swig_opts is specified as a string instead of a list" warnings.warn(msg, SyntaxWarning, stacklevel=2) self.swig_opts = self.swig_opts.split() # Python 2.3 distutils new features self.depends = depends or [] self.language = language # numpy_distutils features self.f2py_options = f2py_options or [] self.module_dirs = module_dirs or [] self.extra_c_compile_args = extra_c_compile_args or [] self.extra_cxx_compile_args = extra_cxx_compile_args or [] self.extra_f77_compile_args = extra_f77_compile_args or [] self.extra_f90_compile_args = extra_f90_compile_args or [] return def has_cxx_sources(self): for source in self.sources: if cxx_ext_re(str(source)): return True return False def has_f2py_sources(self): for source in self.sources: if fortran_pyf_ext_re(source): return True return False # class Extension
3,568
Python
32.046296
81
0.589406
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/cpuinfo.py
#!/usr/bin/env python3 """ cpuinfo Copyright 2002 Pearu Peterson all rights reserved, Pearu Peterson <[email protected]> Permission to use, modify, and distribute this software is given under the terms of the NumPy (BSD style) license. See LICENSE.txt that came with this distribution for specifics. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. Pearu Peterson """ __all__ = ['cpu'] import os import platform import re import sys import types import warnings from subprocess import getstatusoutput def getoutput(cmd, successful_status=(0,), stacklevel=1): try: status, output = getstatusoutput(cmd) except OSError as e: warnings.warn(str(e), UserWarning, stacklevel=stacklevel) return False, "" if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: return True, output return False, output def command_info(successful_status=(0,), stacklevel=1, **kw): info = {} for key in kw: ok, output = getoutput(kw[key], successful_status=successful_status, stacklevel=stacklevel+1) if ok: info[key] = output.strip() return info def command_by_line(cmd, successful_status=(0,), stacklevel=1): ok, output = getoutput(cmd, successful_status=successful_status, stacklevel=stacklevel+1) if not ok: return for line in output.splitlines(): yield line.strip() def key_value_from_command(cmd, sep, successful_status=(0,), stacklevel=1): d = {} for line in command_by_line(cmd, successful_status=successful_status, stacklevel=stacklevel+1): l = [s.strip() for s in line.split(sep, 1)] if len(l) == 2: d[l[0]] = l[1] return d class CPUInfoBase: """Holds CPU information and provides methods for requiring the availability of various CPU features. """ def _try_call(self, func): try: return func() except Exception: pass def __getattr__(self, name): if not name.startswith('_'): if hasattr(self, '_'+name): attr = getattr(self, '_'+name) if isinstance(attr, types.MethodType): return lambda func=self._try_call,attr=attr : func(attr) else: return lambda : None raise AttributeError(name) def _getNCPUs(self): return 1 def __get_nbits(self): abits = platform.architecture()[0] nbits = re.compile(r'(\d+)bit').search(abits).group(1) return nbits def _is_32bit(self): return self.__get_nbits() == '32' def _is_64bit(self): return self.__get_nbits() == '64' class LinuxCPUInfo(CPUInfoBase): info = None def __init__(self): if self.info is not None: return info = [ {} ] ok, output = getoutput('uname -m') if ok: info[0]['uname_m'] = output.strip() try: fo = open('/proc/cpuinfo') except OSError as e: warnings.warn(str(e), UserWarning, stacklevel=2) else: for line in fo: name_value = [s.strip() for s in line.split(':', 1)] if len(name_value) != 2: continue name, value = name_value if not info or name in info[-1]: # next processor info.append({}) info[-1][name] = value fo.close() self.__class__.info = info def _not_impl(self): pass # Athlon def _is_AMD(self): return self.info[0]['vendor_id']=='AuthenticAMD' def _is_AthlonK6_2(self): return self._is_AMD() and self.info[0]['model'] == '2' def _is_AthlonK6_3(self): return self._is_AMD() and self.info[0]['model'] == '3' def _is_AthlonK6(self): return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None def _is_AthlonK7(self): return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None def _is_AthlonMP(self): return re.match(r'.*?Athlon\(tm\) MP\b', self.info[0]['model name']) is not None def _is_AMD64(self): return self.is_AMD() and self.info[0]['family'] == '15' def _is_Athlon64(self): return re.match(r'.*?Athlon\(tm\) 64\b', self.info[0]['model name']) is not None def _is_AthlonHX(self): return re.match(r'.*?Athlon HX\b', self.info[0]['model name']) is not None def _is_Opteron(self): return re.match(r'.*?Opteron\b', self.info[0]['model name']) is not None def _is_Hammer(self): return re.match(r'.*?Hammer\b', self.info[0]['model name']) is not None # Alpha def _is_Alpha(self): return self.info[0]['cpu']=='Alpha' def _is_EV4(self): return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' def _is_EV5(self): return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' def _is_EV56(self): return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' def _is_PCA56(self): return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' # Intel #XXX _is_i386 = _not_impl def _is_Intel(self): return self.info[0]['vendor_id']=='GenuineIntel' def _is_i486(self): return self.info[0]['cpu']=='i486' def _is_i586(self): return self.is_Intel() and self.info[0]['cpu family'] == '5' def _is_i686(self): return self.is_Intel() and self.info[0]['cpu family'] == '6' def _is_Celeron(self): return re.match(r'.*?Celeron', self.info[0]['model name']) is not None def _is_Pentium(self): return re.match(r'.*?Pentium', self.info[0]['model name']) is not None def _is_PentiumII(self): return re.match(r'.*?Pentium.*?II\b', self.info[0]['model name']) is not None def _is_PentiumPro(self): return re.match(r'.*?PentiumPro\b', self.info[0]['model name']) is not None def _is_PentiumMMX(self): return re.match(r'.*?Pentium.*?MMX\b', self.info[0]['model name']) is not None def _is_PentiumIII(self): return re.match(r'.*?Pentium.*?III\b', self.info[0]['model name']) is not None def _is_PentiumIV(self): return re.match(r'.*?Pentium.*?(IV|4)\b', self.info[0]['model name']) is not None def _is_PentiumM(self): return re.match(r'.*?Pentium.*?M\b', self.info[0]['model name']) is not None def _is_Prescott(self): return self.is_PentiumIV() and self.has_sse3() def _is_Nocona(self): return (self.is_Intel() and (self.info[0]['cpu family'] == '6' or self.info[0]['cpu family'] == '15') and (self.has_sse3() and not self.has_ssse3()) and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) def _is_Core2(self): return (self.is_64bit() and self.is_Intel() and re.match(r'.*?Core\(TM\)2\b', self.info[0]['model name']) is not None) def _is_Itanium(self): return re.match(r'.*?Itanium\b', self.info[0]['family']) is not None def _is_XEON(self): return re.match(r'.*?XEON\b', self.info[0]['model name'], re.IGNORECASE) is not None _is_Xeon = _is_XEON # Varia def _is_singleCPU(self): return len(self.info) == 1 def _getNCPUs(self): return len(self.info) def _has_fdiv_bug(self): return self.info[0]['fdiv_bug']=='yes' def _has_f00f_bug(self): return self.info[0]['f00f_bug']=='yes' def _has_mmx(self): return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None def _has_sse(self): return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None def _has_sse2(self): return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None def _has_sse3(self): return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None def _has_ssse3(self): return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None def _has_3dnow(self): return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None def _has_3dnowext(self): return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None class IRIXCPUInfo(CPUInfoBase): info = None def __init__(self): if self.info is not None: return info = key_value_from_command('sysconf', sep=' ', successful_status=(0, 1)) self.__class__.info = info def _not_impl(self): pass def _is_singleCPU(self): return self.info.get('NUM_PROCESSORS') == '1' def _getNCPUs(self): return int(self.info.get('NUM_PROCESSORS', 1)) def __cputype(self, n): return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) def _is_r2000(self): return self.__cputype(2000) def _is_r3000(self): return self.__cputype(3000) def _is_r3900(self): return self.__cputype(3900) def _is_r4000(self): return self.__cputype(4000) def _is_r4100(self): return self.__cputype(4100) def _is_r4300(self): return self.__cputype(4300) def _is_r4400(self): return self.__cputype(4400) def _is_r4600(self): return self.__cputype(4600) def _is_r4650(self): return self.__cputype(4650) def _is_r5000(self): return self.__cputype(5000) def _is_r6000(self): return self.__cputype(6000) def _is_r8000(self): return self.__cputype(8000) def _is_r10000(self): return self.__cputype(10000) def _is_r12000(self): return self.__cputype(12000) def _is_rorion(self): return self.__cputype('orion') def get_ip(self): try: return self.info.get('MACHINE') except Exception: pass def __machine(self, n): return self.info.get('MACHINE').lower() == 'ip%s' % (n) def _is_IP19(self): return self.__machine(19) def _is_IP20(self): return self.__machine(20) def _is_IP21(self): return self.__machine(21) def _is_IP22(self): return self.__machine(22) def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() def _is_IP24(self): return self.__machine(24) def _is_IP25(self): return self.__machine(25) def _is_IP26(self): return self.__machine(26) def _is_IP27(self): return self.__machine(27) def _is_IP28(self): return self.__machine(28) def _is_IP30(self): return self.__machine(30) def _is_IP32(self): return self.__machine(32) def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() class DarwinCPUInfo(CPUInfoBase): info = None def __init__(self): if self.info is not None: return info = command_info(arch='arch', machine='machine') info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') self.__class__.info = info def _not_impl(self): pass def _getNCPUs(self): return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) def _is_Power_Macintosh(self): return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' def _is_i386(self): return self.info['arch']=='i386' def _is_ppc(self): return self.info['arch']=='ppc' def __machine(self, n): return self.info['machine'] == 'ppc%s'%n def _is_ppc601(self): return self.__machine(601) def _is_ppc602(self): return self.__machine(602) def _is_ppc603(self): return self.__machine(603) def _is_ppc603e(self): return self.__machine('603e') def _is_ppc604(self): return self.__machine(604) def _is_ppc604e(self): return self.__machine('604e') def _is_ppc620(self): return self.__machine(620) def _is_ppc630(self): return self.__machine(630) def _is_ppc740(self): return self.__machine(740) def _is_ppc7400(self): return self.__machine(7400) def _is_ppc7450(self): return self.__machine(7450) def _is_ppc750(self): return self.__machine(750) def _is_ppc403(self): return self.__machine(403) def _is_ppc505(self): return self.__machine(505) def _is_ppc801(self): return self.__machine(801) def _is_ppc821(self): return self.__machine(821) def _is_ppc823(self): return self.__machine(823) def _is_ppc860(self): return self.__machine(860) class SunOSCPUInfo(CPUInfoBase): info = None def __init__(self): if self.info is not None: return info = command_info(arch='arch', mach='mach', uname_i='uname_i', isainfo_b='isainfo -b', isainfo_n='isainfo -n', ) info['uname_X'] = key_value_from_command('uname -X', sep='=') for line in command_by_line('psrinfo -v 0'): m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at', line) if m: info['processor'] = m.group('p') break self.__class__.info = info def _not_impl(self): pass def _is_i386(self): return self.info['isainfo_n']=='i386' def _is_sparc(self): return self.info['isainfo_n']=='sparc' def _is_sparcv9(self): return self.info['isainfo_n']=='sparcv9' def _getNCPUs(self): return int(self.info['uname_X'].get('NumCPU', 1)) def _is_sun4(self): return self.info['arch']=='sun4' def _is_SUNW(self): return re.match(r'SUNW', self.info['uname_i']) is not None def _is_sparcstation5(self): return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None def _is_ultra1(self): return re.match(r'.*Ultra-1', self.info['uname_i']) is not None def _is_ultra250(self): return re.match(r'.*Ultra-250', self.info['uname_i']) is not None def _is_ultra2(self): return re.match(r'.*Ultra-2', self.info['uname_i']) is not None def _is_ultra30(self): return re.match(r'.*Ultra-30', self.info['uname_i']) is not None def _is_ultra4(self): return re.match(r'.*Ultra-4', self.info['uname_i']) is not None def _is_ultra5_10(self): return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None def _is_ultra5(self): return re.match(r'.*Ultra-5', self.info['uname_i']) is not None def _is_ultra60(self): return re.match(r'.*Ultra-60', self.info['uname_i']) is not None def _is_ultra80(self): return re.match(r'.*Ultra-80', self.info['uname_i']) is not None def _is_ultraenterprice(self): return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None def _is_ultraenterprice10k(self): return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None def _is_sunfire(self): return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None def _is_ultra(self): return re.match(r'.*Ultra', self.info['uname_i']) is not None def _is_cpusparcv7(self): return self.info['processor']=='sparcv7' def _is_cpusparcv8(self): return self.info['processor']=='sparcv8' def _is_cpusparcv9(self): return self.info['processor']=='sparcv9' class Win32CPUInfo(CPUInfoBase): info = None pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" # XXX: what does the value of # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 # mean? def __init__(self): if self.info is not None: return info = [] try: #XXX: Bad style to use so long `try:...except:...`. Fix it! import winreg prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)" r"\s+stepping\s+(?P<STP>\d+)", re.IGNORECASE) chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) pnum=0 while True: try: proc=winreg.EnumKey(chnd, pnum) except winreg.error: break else: pnum+=1 info.append({"Processor":proc}) phnd=winreg.OpenKey(chnd, proc) pidx=0 while True: try: name, value, vtpe=winreg.EnumValue(phnd, pidx) except winreg.error: break else: pidx=pidx+1 info[-1][name]=value if name=="Identifier": srch=prgx.search(value) if srch: info[-1]["Family"]=int(srch.group("FML")) info[-1]["Model"]=int(srch.group("MDL")) info[-1]["Stepping"]=int(srch.group("STP")) except Exception as e: print(e, '(ignoring)') self.__class__.info = info def _not_impl(self): pass # Athlon def _is_AMD(self): return self.info[0]['VendorIdentifier']=='AuthenticAMD' def _is_Am486(self): return self.is_AMD() and self.info[0]['Family']==4 def _is_Am5x86(self): return self.is_AMD() and self.info[0]['Family']==4 def _is_AMDK5(self): return self.is_AMD() and self.info[0]['Family']==5 \ and self.info[0]['Model'] in [0, 1, 2, 3] def _is_AMDK6(self): return self.is_AMD() and self.info[0]['Family']==5 \ and self.info[0]['Model'] in [6, 7] def _is_AMDK6_2(self): return self.is_AMD() and self.info[0]['Family']==5 \ and self.info[0]['Model']==8 def _is_AMDK6_3(self): return self.is_AMD() and self.info[0]['Family']==5 \ and self.info[0]['Model']==9 def _is_AMDK7(self): return self.is_AMD() and self.info[0]['Family'] == 6 # To reliably distinguish between the different types of AMD64 chips # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would # require looking at the 'brand' from cpuid def _is_AMD64(self): return self.is_AMD() and self.info[0]['Family'] == 15 # Intel def _is_Intel(self): return self.info[0]['VendorIdentifier']=='GenuineIntel' def _is_i386(self): return self.info[0]['Family']==3 def _is_i486(self): return self.info[0]['Family']==4 def _is_i586(self): return self.is_Intel() and self.info[0]['Family']==5 def _is_i686(self): return self.is_Intel() and self.info[0]['Family']==6 def _is_Pentium(self): return self.is_Intel() and self.info[0]['Family']==5 def _is_PentiumMMX(self): return self.is_Intel() and self.info[0]['Family']==5 \ and self.info[0]['Model']==4 def _is_PentiumPro(self): return self.is_Intel() and self.info[0]['Family']==6 \ and self.info[0]['Model']==1 def _is_PentiumII(self): return self.is_Intel() and self.info[0]['Family']==6 \ and self.info[0]['Model'] in [3, 5, 6] def _is_PentiumIII(self): return self.is_Intel() and self.info[0]['Family']==6 \ and self.info[0]['Model'] in [7, 8, 9, 10, 11] def _is_PentiumIV(self): return self.is_Intel() and self.info[0]['Family']==15 def _is_PentiumM(self): return self.is_Intel() and self.info[0]['Family'] == 6 \ and self.info[0]['Model'] in [9, 13, 14] def _is_Core2(self): return self.is_Intel() and self.info[0]['Family'] == 6 \ and self.info[0]['Model'] in [15, 16, 17] # Varia def _is_singleCPU(self): return len(self.info) == 1 def _getNCPUs(self): return len(self.info) def _has_mmx(self): if self.is_Intel(): return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ or (self.info[0]['Family'] in [6, 15]) elif self.is_AMD(): return self.info[0]['Family'] in [5, 6, 15] else: return False def _has_sse(self): if self.is_Intel(): return ((self.info[0]['Family']==6 and self.info[0]['Model'] in [7, 8, 9, 10, 11]) or self.info[0]['Family']==15) elif self.is_AMD(): return ((self.info[0]['Family']==6 and self.info[0]['Model'] in [6, 7, 8, 10]) or self.info[0]['Family']==15) else: return False def _has_sse2(self): if self.is_Intel(): return self.is_Pentium4() or self.is_PentiumM() \ or self.is_Core2() elif self.is_AMD(): return self.is_AMD64() else: return False def _has_3dnow(self): return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] def _has_3dnowext(self): return self.is_AMD() and self.info[0]['Family'] in [6, 15] if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) cpuinfo = LinuxCPUInfo elif sys.platform.startswith('irix'): cpuinfo = IRIXCPUInfo elif sys.platform == 'darwin': cpuinfo = DarwinCPUInfo elif sys.platform.startswith('sunos'): cpuinfo = SunOSCPUInfo elif sys.platform.startswith('win32'): cpuinfo = Win32CPUInfo elif sys.platform.startswith('cygwin'): cpuinfo = LinuxCPUInfo #XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. else: cpuinfo = CPUInfoBase cpu = cpuinfo() #if __name__ == "__main__": # # cpu.is_blaa() # cpu.is_Intel() # cpu.is_Alpha() # # print('CPU information:'), # for name in dir(cpuinfo): # if name[0]=='_' and name[1]!='_': # r = getattr(cpu,name[1:])() # if r: # if r!=1: # print('%s=%s' %(name[1:],r)) # else: # print(name[1:]), # print()
22,639
Python
32.099415
86
0.543708
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/_shell_utils.py
""" Helper functions for interacting with the shell, and consuming shell-style parameters provided in config files. """ import os import shlex import subprocess try: from shlex import quote except ImportError: from pipes import quote __all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] class CommandLineParser: """ An object that knows how to split and join command-line arguments. It must be true that ``argv == split(join(argv))`` for all ``argv``. The reverse neednt be true - `join(split(cmd))` may result in the addition or removal of unnecessary escaping. """ @staticmethod def join(argv): """ Join a list of arguments into a command line string """ raise NotImplementedError @staticmethod def split(cmd): """ Split a command line string into a list of arguments """ raise NotImplementedError class WindowsParser: """ The parsing behavior used by `subprocess.call("string")` on Windows, which matches the Microsoft C/C++ runtime. Note that this is _not_ the behavior of cmd. """ @staticmethod def join(argv): # note that list2cmdline is specific to the windows syntax return subprocess.list2cmdline(argv) @staticmethod def split(cmd): import ctypes # guarded import for systems without ctypes try: ctypes.windll except AttributeError: raise NotImplementedError # Windows has special parsing rules for the executable (no quotes), # that we do not care about - insert a dummy element if not cmd: return [] cmd = 'dummy ' + cmd CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) nargs = ctypes.c_int() lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) args = [lpargs[i] for i in range(nargs.value)] assert not ctypes.windll.kernel32.LocalFree(lpargs) # strip the element we inserted assert args[0] == "dummy" return args[1:] class PosixParser: """ The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. """ @staticmethod def join(argv): return ' '.join(quote(arg) for arg in argv) @staticmethod def split(cmd): return shlex.split(cmd, posix=True) if os.name == 'nt': NativeParser = WindowsParser elif os.name == 'posix': NativeParser = PosixParser
2,613
Python
27.413043
86
0.652889
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/__init__.py
""" An enhanced distutils, providing support for Fortran compilers, for BLAS, LAPACK and other common libraries for numerical computing, and more. Public submodules are:: misc_util system_info cpu_info log exec_command For details, please see the *Packaging* and *NumPy Distutils User Guide* sections of the NumPy Reference Guide. For configuring the preference for and location of libraries like BLAS and LAPACK, and for setting include paths and similar build options, please see ``site.cfg.example`` in the root of the NumPy repository or sdist. """ import warnings # Must import local ccompiler ASAP in order to get # customized CCompiler.spawn effective. from . import ccompiler from . import unixccompiler from .npy_pkg_config import * warnings.warn("\n\n" " `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n" " of the deprecation of `distutils` itself. It will be removed for\n" " Python >= 3.12. For older Python versions it will remain present.\n" " It is recommended to use `setuptools < 60.0` for those Python versions.\n" " For more details, see:\n" " https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n", DeprecationWarning, stacklevel=2 ) del warnings # If numpy is installed, add distutils.test() try: from . import __config__ # Normally numpy is installed if the above import works, but an interrupted # in-place build could also have left a __config__.py. In that case the # next import may still fail, so keep it inside the try block. from numpy._pytesttester import PytestTester test = PytestTester(__name__) del PytestTester except ImportError: pass def customized_fcompiler(plat=None, compiler=None): from numpy.distutils.fcompiler import new_fcompiler c = new_fcompiler(plat=plat, compiler=compiler) c.customize() return c def customized_ccompiler(plat=None, compiler=None, verbose=1): c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose) c.customize('') return c
2,074
Python
30.923076
83
0.725651
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/intelccompiler.py
import platform from distutils.unixccompiler import UnixCCompiler from numpy.distutils.exec_command import find_executable from numpy.distutils.ccompiler import simple_version_match if platform.system() == 'Windows': from numpy.distutils.msvc9compiler import MSVCCompiler class IntelCCompiler(UnixCCompiler): """A modified Intel compiler compatible with a GCC-built Python.""" compiler_type = 'intel' cc_exe = 'icc' cc_args = 'fPIC' def __init__(self, verbose=0, dry_run=0, force=0): UnixCCompiler.__init__(self, verbose, dry_run, force) v = self.get_version() mpopt = 'openmp' if v and v < '15' else 'qopenmp' self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' '-fomit-frame-pointer -{}').format(mpopt) compiler = self.cc_exe if platform.system() == 'Darwin': shared_flag = '-Wl,-undefined,dynamic_lookup' else: shared_flag = '-shared' self.set_executables(compiler=compiler, compiler_so=compiler, compiler_cxx=compiler, archiver='xiar' + ' cru', linker_exe=compiler + ' -shared-intel', linker_so=compiler + ' ' + shared_flag + ' -shared-intel') class IntelItaniumCCompiler(IntelCCompiler): compiler_type = 'intele' # On Itanium, the Intel Compiler used to be called ecc, let's search for # it (now it's also icc, so ecc is last in the search). for cc_exe in map(find_executable, ['icc', 'ecc']): if cc_exe: break class IntelEM64TCCompiler(UnixCCompiler): """ A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. """ compiler_type = 'intelem' cc_exe = 'icc -m64' cc_args = '-fPIC' def __init__(self, verbose=0, dry_run=0, force=0): UnixCCompiler.__init__(self, verbose, dry_run, force) v = self.get_version() mpopt = 'openmp' if v and v < '15' else 'qopenmp' self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 ' '-fomit-frame-pointer -{}').format(mpopt) compiler = self.cc_exe if platform.system() == 'Darwin': shared_flag = '-Wl,-undefined,dynamic_lookup' else: shared_flag = '-shared' self.set_executables(compiler=compiler, compiler_so=compiler, compiler_cxx=compiler, archiver='xiar' + ' cru', linker_exe=compiler + ' -shared-intel', linker_so=compiler + ' ' + shared_flag + ' -shared-intel') if platform.system() == 'Windows': class IntelCCompilerW(MSVCCompiler): """ A modified Intel compiler compatible with an MSVC-built Python. """ compiler_type = 'intelw' compiler_cxx = 'icl' def __init__(self, verbose=0, dry_run=0, force=0): MSVCCompiler.__init__(self, verbose, dry_run, force) version_match = simple_version_match(start=r'Intel\(R\).*?32,') self.__version = version_match def initialize(self, plat_name=None): MSVCCompiler.initialize(self, plat_name) self.cc = self.find_exe('icl.exe') self.lib = self.find_exe('xilib') self.linker = self.find_exe('xilink') self.compile_options = ['/nologo', '/O3', '/MD', '/W3', '/Qstd=c99'] self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/Qstd=c99', '/Z7', '/D_DEBUG'] class IntelEM64TCCompilerW(IntelCCompilerW): """ A modified Intel x86_64 compiler compatible with a 64bit MSVC-built Python. """ compiler_type = 'intelemw' def __init__(self, verbose=0, dry_run=0, force=0): MSVCCompiler.__init__(self, verbose, dry_run, force) version_match = simple_version_match(start=r'Intel\(R\).*?64,') self.__version = version_match
4,234
Python
36.8125
78
0.541096
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/core.py
import sys from distutils.core import Distribution if 'setuptools' in sys.modules: have_setuptools = True from setuptools import setup as old_setup # easy_install imports math, it may be picked up from cwd from setuptools.command import easy_install try: # very old versions of setuptools don't have this from setuptools.command import bdist_egg except ImportError: have_setuptools = False else: from distutils.core import setup as old_setup have_setuptools = False import warnings import distutils.core import distutils.dist from numpy.distutils.extension import Extension # noqa: F401 from numpy.distutils.numpy_distribution import NumpyDistribution from numpy.distutils.command import config, config_compiler, \ build, build_py, build_ext, build_clib, build_src, build_scripts, \ sdist, install_data, install_headers, install, bdist_rpm, \ install_clib from numpy.distutils.misc_util import is_sequence, is_string numpy_cmdclass = {'build': build.build, 'build_src': build_src.build_src, 'build_scripts': build_scripts.build_scripts, 'config_cc': config_compiler.config_cc, 'config_fc': config_compiler.config_fc, 'config': config.config, 'build_ext': build_ext.build_ext, 'build_py': build_py.build_py, 'build_clib': build_clib.build_clib, 'sdist': sdist.sdist, 'install_data': install_data.install_data, 'install_headers': install_headers.install_headers, 'install_clib': install_clib.install_clib, 'install': install.install, 'bdist_rpm': bdist_rpm.bdist_rpm, } if have_setuptools: # Use our own versions of develop and egg_info to ensure that build_src is # handled appropriately. from numpy.distutils.command import develop, egg_info numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg numpy_cmdclass['develop'] = develop.develop numpy_cmdclass['easy_install'] = easy_install.easy_install numpy_cmdclass['egg_info'] = egg_info.egg_info def _dict_append(d, **kws): for k, v in kws.items(): if k not in d: d[k] = v continue dv = d[k] if isinstance(dv, tuple): d[k] = dv + tuple(v) elif isinstance(dv, list): d[k] = dv + list(v) elif isinstance(dv, dict): _dict_append(dv, **v) elif is_string(dv): d[k] = dv + v else: raise TypeError(repr(type(dv))) def _command_line_ok(_cache=None): """ Return True if command line does not contain any help or display requests. """ if _cache: return _cache[0] elif _cache is None: _cache = [] ok = True display_opts = ['--'+n for n in Distribution.display_option_names] for o in Distribution.display_options: if o[1]: display_opts.append('-'+o[1]) for arg in sys.argv: if arg.startswith('--help') or arg=='-h' or arg in display_opts: ok = False break _cache.append(ok) return ok def get_distribution(always=False): dist = distutils.core._setup_distribution # XXX Hack to get numpy installable with easy_install. # The problem is easy_install runs it's own setup(), which # sets up distutils.core._setup_distribution. However, # when our setup() runs, that gets overwritten and lost. # We can't use isinstance, as the DistributionWithoutHelpCommands # class is local to a function in setuptools.command.easy_install if dist is not None and \ 'DistributionWithoutHelpCommands' in repr(dist): dist = None if always and dist is None: dist = NumpyDistribution() return dist def setup(**attr): cmdclass = numpy_cmdclass.copy() new_attr = attr.copy() if 'cmdclass' in new_attr: cmdclass.update(new_attr['cmdclass']) new_attr['cmdclass'] = cmdclass if 'configuration' in new_attr: # To avoid calling configuration if there are any errors # or help request in command in the line. configuration = new_attr.pop('configuration') old_dist = distutils.core._setup_distribution old_stop = distutils.core._setup_stop_after distutils.core._setup_distribution = None distutils.core._setup_stop_after = "commandline" try: dist = setup(**new_attr) finally: distutils.core._setup_distribution = old_dist distutils.core._setup_stop_after = old_stop if dist.help or not _command_line_ok(): # probably displayed help, skip running any commands return dist # create setup dictionary and append to new_attr config = configuration() if hasattr(config, 'todict'): config = config.todict() _dict_append(new_attr, **config) # Move extension source libraries to libraries libraries = [] for ext in new_attr.get('ext_modules', []): new_libraries = [] for item in ext.libraries: if is_sequence(item): lib_name, build_info = item _check_append_ext_library(libraries, lib_name, build_info) new_libraries.append(lib_name) elif is_string(item): new_libraries.append(item) else: raise TypeError("invalid description of extension module " "library %r" % (item,)) ext.libraries = new_libraries if libraries: if 'libraries' not in new_attr: new_attr['libraries'] = [] for item in libraries: _check_append_library(new_attr['libraries'], item) # sources in ext_modules or libraries may contain header files if ('ext_modules' in new_attr or 'libraries' in new_attr) \ and 'headers' not in new_attr: new_attr['headers'] = [] # Use our custom NumpyDistribution class instead of distutils' one new_attr['distclass'] = NumpyDistribution return old_setup(**new_attr) def _check_append_library(libraries, item): for libitem in libraries: if is_sequence(libitem): if is_sequence(item): if item[0]==libitem[0]: if item[1] is libitem[1]: return warnings.warn("[0] libraries list contains %r with" " different build_info" % (item[0],), stacklevel=2) break else: if item==libitem[0]: warnings.warn("[1] libraries list contains %r with" " no build_info" % (item[0],), stacklevel=2) break else: if is_sequence(item): if item[0]==libitem: warnings.warn("[2] libraries list contains %r with" " no build_info" % (item[0],), stacklevel=2) break else: if item==libitem: return libraries.append(item) def _check_append_ext_library(libraries, lib_name, build_info): for item in libraries: if is_sequence(item): if item[0]==lib_name: if item[1] is build_info: return warnings.warn("[3] libraries list contains %r with" " different build_info" % (lib_name,), stacklevel=2) break elif item==lib_name: warnings.warn("[4] libraries list contains %r with" " no build_info" % (lib_name,), stacklevel=2) break libraries.append((lib_name, build_info))
8,173
Python
36.842592
78
0.558913
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/setup.py
#!/usr/bin/env python3 def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('distutils', parent_package, top_path) config.add_subpackage('command') config.add_subpackage('fcompiler') config.add_subpackage('tests') config.add_data_files('site.cfg') config.add_data_files('mingw/gfortran_vs2003_hack.c') config.add_data_dir('checks') config.add_data_files('*.pyi') config.make_config_py() return config if __name__ == '__main__': from numpy.distutils.core import setup setup(configuration=configuration)
634
Python
34.277776
65
0.695584
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/misc_util.py
import os import re import sys import copy import glob import atexit import tempfile import subprocess import shutil import multiprocessing import textwrap import importlib.util from threading import local as tlocal from functools import reduce import distutils from distutils.errors import DistutilsError # stores temporary directory of each thread to only create one per thread _tdata = tlocal() # store all created temporary directories so they can be deleted on exit _tmpdirs = [] def clean_up_temporary_directory(): if _tmpdirs is not None: for d in _tmpdirs: try: shutil.rmtree(d) except OSError: pass atexit.register(clean_up_temporary_directory) __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', 'dict_append', 'appendpath', 'generate_config_py', 'get_cmd', 'allpath', 'get_mathlibs', 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', 'has_f_sources', 'has_cxx_sources', 'filter_sources', 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', 'get_script_files', 'get_lib_source_files', 'get_data_files', 'dot_join', 'get_frame', 'minrelpath', 'njoin', 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', 'get_build_architecture', 'get_info', 'get_pkg_info', 'get_num_build_jobs', 'sanitize_cxx_flags', 'exec_mod_from_location'] class InstallableLib: """ Container to hold information on an installable library. Parameters ---------- name : str Name of the installed library. build_info : dict Dictionary holding build information. target_dir : str Absolute path specifying where to install the library. See Also -------- Configuration.add_installed_library Notes ----- The three parameters are stored as attributes with the same names. """ def __init__(self, name, build_info, target_dir): self.name = name self.build_info = build_info self.target_dir = target_dir def get_num_build_jobs(): """ Get number of parallel build jobs set by the --parallel command line argument of setup.py If the command did not receive a setting the environment variable NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of processors on the system, with a maximum of 8 (to prevent overloading the system if there a lot of CPUs). Returns ------- out : int number of parallel jobs that can be run """ from numpy.distutils.core import get_distribution try: cpu_count = len(os.sched_getaffinity(0)) except AttributeError: cpu_count = multiprocessing.cpu_count() cpu_count = min(cpu_count, 8) envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) dist = get_distribution() # may be None during configuration if dist is None: return envjobs # any of these three may have the job set, take the largest cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), getattr(dist.get_command_obj('build_ext'), 'parallel', None), getattr(dist.get_command_obj('build_clib'), 'parallel', None)) if all(x is None for x in cmdattr): return envjobs else: return max(x for x in cmdattr if x is not None) def quote_args(args): """Quote list of arguments. .. deprecated:: 1.22. """ import warnings warnings.warn('"quote_args" is deprecated.', DeprecationWarning, stacklevel=2) # don't used _nt_quote_args as it does not check if # args items already have quotes or not. args = list(args) for i in range(len(args)): a = args[i] if ' ' in a and a[0] not in '"\'': args[i] = '"%s"' % (a) return args def allpath(name): "Convert a /-separated pathname to one using the OS's path separator." split = name.split('/') return os.path.join(*split) def rel_path(path, parent_path): """Return path relative to parent_path.""" # Use realpath to avoid issues with symlinked dirs (see gh-7707) pd = os.path.realpath(os.path.abspath(parent_path)) apath = os.path.realpath(os.path.abspath(path)) if len(apath) < len(pd): return path if apath == pd: return '' if pd == apath[:len(pd)]: assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) path = apath[len(pd)+1:] return path def get_path_from_frame(frame, parent_path=None): """Return path of the module given a frame object from the call stack. Returned path is relative to parent_path when given, otherwise it is absolute path. """ # First, try to find if the file name is in the frame. try: caller_file = eval('__file__', frame.f_globals, frame.f_locals) d = os.path.dirname(os.path.abspath(caller_file)) except NameError: # __file__ is not defined, so let's try __name__. We try this second # because setuptools spoofs __name__ to be '__main__' even though # sys.modules['__main__'] might be something else, like easy_install(1). caller_name = eval('__name__', frame.f_globals, frame.f_locals) __import__(caller_name) mod = sys.modules[caller_name] if hasattr(mod, '__file__'): d = os.path.dirname(os.path.abspath(mod.__file__)) else: # we're probably running setup.py as execfile("setup.py") # (likely we're building an egg) d = os.path.abspath('.') if parent_path is not None: d = rel_path(d, parent_path) return d or '.' def njoin(*path): """Join two or more pathname components + - convert a /-separated pathname to one using the OS's path separator. - resolve `..` and `.` from path. Either passing n arguments as in njoin('a','b'), or a sequence of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. """ paths = [] for p in path: if is_sequence(p): # njoin(['a', 'b'], 'c') paths.append(njoin(*p)) else: assert is_string(p) paths.append(p) path = paths if not path: # njoin() joined = '' else: # njoin('a', 'b') joined = os.path.join(*path) if os.path.sep != '/': joined = joined.replace('/', os.path.sep) return minrelpath(joined) def get_mathlibs(path=None): """Return the MATHLIB line from numpyconfig.h """ if path is not None: config_file = os.path.join(path, '_numpyconfig.h') else: # Look for the file in each of the numpy include directories. dirs = get_numpy_include_dirs() for path in dirs: fn = os.path.join(path, '_numpyconfig.h') if os.path.exists(fn): config_file = fn break else: raise DistutilsError('_numpyconfig.h not found in numpy include ' 'dirs %r' % (dirs,)) with open(config_file) as fid: mathlibs = [] s = '#define MATHLIB' for line in fid: if line.startswith(s): value = line[len(s):].strip() if value: mathlibs.extend(value.split(',')) return mathlibs def minrelpath(path): """Resolve `..` and '.' from path. """ if not is_string(path): return path if '.' not in path: return path l = path.split(os.sep) while l: try: i = l.index('.', 1) except ValueError: break del l[i] j = 1 while l: try: i = l.index('..', j) except ValueError: break if l[i-1]=='..': j += 1 else: del l[i], l[i-1] j = 1 if not l: return '' return os.sep.join(l) def sorted_glob(fileglob): """sorts output of python glob for https://bugs.python.org/issue30461 to allow extensions to have reproducible build results""" return sorted(glob.glob(fileglob)) def _fix_paths(paths, local_path, include_non_existing): assert is_sequence(paths), repr(type(paths)) new_paths = [] assert not is_string(paths), repr(paths) for n in paths: if is_string(n): if '*' in n or '?' in n: p = sorted_glob(n) p2 = sorted_glob(njoin(local_path, n)) if p2: new_paths.extend(p2) elif p: new_paths.extend(p) else: if include_non_existing: new_paths.append(n) print('could not resolve pattern in %r: %r' % (local_path, n)) else: n2 = njoin(local_path, n) if os.path.exists(n2): new_paths.append(n2) else: if os.path.exists(n): new_paths.append(n) elif include_non_existing: new_paths.append(n) if not os.path.exists(n): print('non-existing path in %r: %r' % (local_path, n)) elif is_sequence(n): new_paths.extend(_fix_paths(n, local_path, include_non_existing)) else: new_paths.append(n) return [minrelpath(p) for p in new_paths] def gpaths(paths, local_path='', include_non_existing=True): """Apply glob to paths and prepend local_path if needed. """ if is_string(paths): paths = (paths,) return _fix_paths(paths, local_path, include_non_existing) def make_temp_file(suffix='', prefix='', text=True): if not hasattr(_tdata, 'tempdir'): _tdata.tempdir = tempfile.mkdtemp() _tmpdirs.append(_tdata.tempdir) fid, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=_tdata.tempdir, text=text) fo = os.fdopen(fid, 'w') return fo, name # Hooks for colored terminal output. # See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle def terminal_has_colors(): if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: # Avoid importing curses that causes illegal operation # with a message: # PYTHON2 caused an invalid page fault in # module CYGNURSES7.DLL as 015f:18bbfc28 # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] # ssh to Win32 machine from debian # curses.version is 2.2 # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) return 0 if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): try: import curses curses.setupterm() if (curses.tigetnum("colors") >= 0 and curses.tigetnum("pairs") >= 0 and ((curses.tigetstr("setf") is not None and curses.tigetstr("setb") is not None) or (curses.tigetstr("setaf") is not None and curses.tigetstr("setab") is not None) or curses.tigetstr("scp") is not None)): return 1 except Exception: pass return 0 if terminal_has_colors(): _colour_codes = dict(black=0, red=1, green=2, yellow=3, blue=4, magenta=5, cyan=6, white=7, default=9) def colour_text(s, fg=None, bg=None, bold=False): seq = [] if bold: seq.append('1') if fg: fgcode = 30 + _colour_codes.get(fg.lower(), 0) seq.append(str(fgcode)) if bg: bgcode = 40 + _colour_codes.get(fg.lower(), 7) seq.append(str(bgcode)) if seq: return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) else: return s else: def colour_text(s, fg=None, bg=None): return s def default_text(s): return colour_text(s, 'default') def red_text(s): return colour_text(s, 'red') def green_text(s): return colour_text(s, 'green') def yellow_text(s): return colour_text(s, 'yellow') def cyan_text(s): return colour_text(s, 'cyan') def blue_text(s): return colour_text(s, 'blue') ######################### def cyg2win32(path: str) -> str: """Convert a path from Cygwin-native to Windows-native. Uses the cygpath utility (part of the Base install) to do the actual conversion. Falls back to returning the original path if this fails. Handles the default ``/cygdrive`` mount prefix as well as the ``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or ``/home/username`` Parameters ---------- path : str The path to convert Returns ------- converted_path : str The converted path Notes ----- Documentation for cygpath utility: https://cygwin.com/cygwin-ug-net/cygpath.html Documentation for the C function it wraps: https://cygwin.com/cygwin-api/func-cygwin-conv-path.html """ if sys.platform != "cygwin": return path return subprocess.check_output( ["/usr/bin/cygpath", "--windows", path], universal_newlines=True ) def mingw32(): """Return true when using mingw32 environment. """ if sys.platform=='win32': if os.environ.get('OSTYPE', '')=='msys': return True if os.environ.get('MSYSTEM', '')=='MINGW32': return True return False def msvc_runtime_version(): "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" msc_pos = sys.version.find('MSC v.') if msc_pos != -1: msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) else: msc_ver = None return msc_ver def msvc_runtime_library(): "Return name of MSVC runtime library if Python was built with MSVC >= 7" ver = msvc_runtime_major () if ver: if ver < 140: return "msvcr%i" % ver else: return "vcruntime%i" % ver else: return None def msvc_runtime_major(): "Return major version of MSVC runtime coded like get_build_msvc_version" major = {1300: 70, # MSVC 7.0 1310: 71, # MSVC 7.1 1400: 80, # MSVC 8 1500: 90, # MSVC 9 (aka 2008) 1600: 100, # MSVC 10 (aka 2010) 1900: 140, # MSVC 14 (aka 2015) }.get(msvc_runtime_version(), None) return major ######################### #XXX need support for .C that is also C++ cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match def _get_f90_modules(source): """Return a list of Fortran f90 module names that given source file defines. """ if not f90_ext_match(source): return [] modules = [] with open(source, 'r') as f: for line in f: m = f90_module_name_match(line) if m: name = m.group('name') modules.append(name) # break # XXX can we assume that there is one module per file? return modules def is_string(s): return isinstance(s, str) def all_strings(lst): """Return True if all items in lst are string objects. """ for item in lst: if not is_string(item): return False return True def is_sequence(seq): if is_string(seq): return False try: len(seq) except Exception: return False return True def is_glob_pattern(s): return is_string(s) and ('*' in s or '?' in s) def as_list(seq): if is_sequence(seq): return list(seq) else: return [seq] def get_language(sources): # not used in numpy/scipy packages, use build_ext.detect_language instead """Determine language value (c,f77,f90) from sources """ language = None for source in sources: if isinstance(source, str): if f90_ext_match(source): language = 'f90' break elif fortran_ext_match(source): language = 'f77' return language def has_f_sources(sources): """Return True if sources contains Fortran files """ for source in sources: if fortran_ext_match(source): return True return False def has_cxx_sources(sources): """Return True if sources contains C++ files """ for source in sources: if cxx_ext_match(source): return True return False def filter_sources(sources): """Return four lists of filenames containing C, C++, Fortran, and Fortran 90 module sources, respectively. """ c_sources = [] cxx_sources = [] f_sources = [] fmodule_sources = [] for source in sources: if fortran_ext_match(source): modules = _get_f90_modules(source) if modules: fmodule_sources.append(source) else: f_sources.append(source) elif cxx_ext_match(source): cxx_sources.append(source) else: c_sources.append(source) return c_sources, cxx_sources, f_sources, fmodule_sources def _get_headers(directory_list): # get *.h files from list of directories headers = [] for d in directory_list: head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? headers.extend(head) return headers def _get_directories(list_of_sources): # get unique directories from list of sources. direcs = [] for f in list_of_sources: d = os.path.split(f) if d[0] != '' and not d[0] in direcs: direcs.append(d[0]) return direcs def _commandline_dep_string(cc_args, extra_postargs, pp_opts): """ Return commandline representation used to determine if a file needs to be recompiled """ cmdline = 'commandline: ' cmdline += ' '.join(cc_args) cmdline += ' '.join(extra_postargs) cmdline += ' '.join(pp_opts) + '\n' return cmdline def get_dependencies(sources): #XXX scan sources for include statements return _get_headers(_get_directories(sources)) def is_local_src_dir(directory): """Return true if directory is local directory. """ if not is_string(directory): return False abs_dir = os.path.abspath(directory) c = os.path.commonprefix([os.getcwd(), abs_dir]) new_dir = abs_dir[len(c):].split(os.sep) if new_dir and not new_dir[0]: new_dir = new_dir[1:] if new_dir and new_dir[0]=='build': return False new_dir = os.sep.join(new_dir) return os.path.isdir(new_dir) def general_source_files(top_path): pruned_directories = {'CVS':1, '.svn':1, 'build':1} prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): pruned = [ d for d in dirnames if d not in pruned_directories ] dirnames[:] = pruned for f in filenames: if not prune_file_pat.search(f): yield os.path.join(dirpath, f) def general_source_directories_files(top_path): """Return a directory name relative to top_path and files contained. """ pruned_directories = ['CVS', '.svn', 'build'] prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): pruned = [ d for d in dirnames if d not in pruned_directories ] dirnames[:] = pruned for d in dirnames: dpath = os.path.join(dirpath, d) rpath = rel_path(dpath, top_path) files = [] for f in os.listdir(dpath): fn = os.path.join(dpath, f) if os.path.isfile(fn) and not prune_file_pat.search(fn): files.append(fn) yield rpath, files dpath = top_path rpath = rel_path(dpath, top_path) filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ if not prune_file_pat.search(f)] files = [f for f in filenames if os.path.isfile(f)] yield rpath, files def get_ext_source_files(ext): # Get sources and any include files in the same directory. filenames = [] sources = [_m for _m in ext.sources if is_string(_m)] filenames.extend(sources) filenames.extend(get_dependencies(sources)) for d in ext.depends: if is_local_src_dir(d): filenames.extend(list(general_source_files(d))) elif os.path.isfile(d): filenames.append(d) return filenames def get_script_files(scripts): scripts = [_m for _m in scripts if is_string(_m)] return scripts def get_lib_source_files(lib): filenames = [] sources = lib[1].get('sources', []) sources = [_m for _m in sources if is_string(_m)] filenames.extend(sources) filenames.extend(get_dependencies(sources)) depends = lib[1].get('depends', []) for d in depends: if is_local_src_dir(d): filenames.extend(list(general_source_files(d))) elif os.path.isfile(d): filenames.append(d) return filenames def get_shared_lib_extension(is_python_ext=False): """Return the correct file extension for shared libraries. Parameters ---------- is_python_ext : bool, optional Whether the shared library is a Python extension. Default is False. Returns ------- so_ext : str The shared library extension. Notes ----- For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on POSIX systems according to PEP 3149. """ confvars = distutils.sysconfig.get_config_vars() so_ext = confvars.get('EXT_SUFFIX', '') if not is_python_ext: # hardcode known values, config vars (including SHLIB_SUFFIX) are # unreliable (see #3182) # darwin, windows and debug linux are wrong in 3.3.1 and older if (sys.platform.startswith('linux') or sys.platform.startswith('gnukfreebsd')): so_ext = '.so' elif sys.platform.startswith('darwin'): so_ext = '.dylib' elif sys.platform.startswith('win'): so_ext = '.dll' else: # fall back to config vars for unknown platforms # fix long extension for Python >=3.2, see PEP 3149. if 'SOABI' in confvars: # Does nothing unless SOABI config var exists so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) return so_ext def get_data_files(data): if is_string(data): return [data] sources = data[1] filenames = [] for s in sources: if hasattr(s, '__call__'): continue if is_local_src_dir(s): filenames.extend(list(general_source_files(s))) elif is_string(s): if os.path.isfile(s): filenames.append(s) else: print('Not existing data file:', s) else: raise TypeError(repr(s)) return filenames def dot_join(*args): return '.'.join([a for a in args if a]) def get_frame(level=0): """Return frame object from call stack with given level. """ try: return sys._getframe(level+1) except AttributeError: frame = sys.exc_info()[2].tb_frame for _ in range(level+1): frame = frame.f_back return frame ###################### class Configuration: _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', 'libraries', 'headers', 'scripts', 'py_modules', 'installed_libraries', 'define_macros'] _dict_keys = ['package_dir', 'installed_pkg_config'] _extra_keys = ['name', 'version'] numpy_include_dirs = [] def __init__(self, package_name=None, parent_name=None, top_path=None, package_path=None, caller_level=1, setup_name='setup.py', **attrs): """Construct configuration instance of a package. package_name -- name of the package Ex.: 'distutils' parent_name -- name of the parent package Ex.: 'numpy' top_path -- directory of the toplevel package Ex.: the directory where the numpy package source sits package_path -- directory of package. Will be computed by magic from the directory of the caller module if not specified Ex.: the directory where numpy.distutils is caller_level -- frame level to caller namespace, internal parameter. """ self.name = dot_join(parent_name, package_name) self.version = None caller_frame = get_frame(caller_level) self.local_path = get_path_from_frame(caller_frame, top_path) # local_path -- directory of a file (usually setup.py) that # defines a configuration() function. # local_path -- directory of a file (usually setup.py) that # defines a configuration() function. if top_path is None: top_path = self.local_path self.local_path = '' if package_path is None: package_path = self.local_path elif os.path.isdir(njoin(self.local_path, package_path)): package_path = njoin(self.local_path, package_path) if not os.path.isdir(package_path or '.'): raise ValueError("%r is not a directory" % (package_path,)) self.top_path = top_path self.package_path = package_path # this is the relative path in the installed package self.path_in_package = os.path.join(*self.name.split('.')) self.list_keys = self._list_keys[:] self.dict_keys = self._dict_keys[:] for n in self.list_keys: v = copy.copy(attrs.get(n, [])) setattr(self, n, as_list(v)) for n in self.dict_keys: v = copy.copy(attrs.get(n, {})) setattr(self, n, v) known_keys = self.list_keys + self.dict_keys self.extra_keys = self._extra_keys[:] for n in attrs.keys(): if n in known_keys: continue a = attrs[n] setattr(self, n, a) if isinstance(a, list): self.list_keys.append(n) elif isinstance(a, dict): self.dict_keys.append(n) else: self.extra_keys.append(n) if os.path.exists(njoin(package_path, '__init__.py')): self.packages.append(self.name) self.package_dir[self.name] = package_path self.options = dict( ignore_setup_xxx_py = False, assume_default_configuration = False, delegate_options_to_subpackages = False, quiet = False, ) caller_instance = None for i in range(1, 3): try: f = get_frame(i) except ValueError: break try: caller_instance = eval('self', f.f_globals, f.f_locals) break except NameError: pass if isinstance(caller_instance, self.__class__): if caller_instance.options['delegate_options_to_subpackages']: self.set_options(**caller_instance.options) self.setup_name = setup_name def todict(self): """ Return a dictionary compatible with the keyword arguments of distutils setup function. Examples -------- >>> setup(**config.todict()) #doctest: +SKIP """ self._optimize_data_files() d = {} known_keys = self.list_keys + self.dict_keys + self.extra_keys for n in known_keys: a = getattr(self, n) if a: d[n] = a return d def info(self, message): if not self.options['quiet']: print(message) def warn(self, message): sys.stderr.write('Warning: %s\n' % (message,)) def set_options(self, **options): """ Configure Configuration instance. The following options are available: - ignore_setup_xxx_py - assume_default_configuration - delegate_options_to_subpackages - quiet """ for key, value in options.items(): if key in self.options: self.options[key] = value else: raise ValueError('Unknown option: '+key) def get_distribution(self): """Return the distutils distribution object for self.""" from numpy.distutils.core import get_distribution return get_distribution() def _wildcard_get_subpackage(self, subpackage_name, parent_name, caller_level = 1): l = subpackage_name.split('.') subpackage_path = njoin([self.local_path]+l) dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] config_list = [] for d in dirs: if not os.path.isfile(njoin(d, '__init__.py')): continue if 'build' in d.split(os.sep): continue n = '.'.join(d.split(os.sep)[-len(l):]) c = self.get_subpackage(n, parent_name = parent_name, caller_level = caller_level+1) config_list.extend(c) return config_list def _get_configuration_from_setup_py(self, setup_py, subpackage_name, subpackage_path, parent_name, caller_level = 1): # In case setup_py imports local modules: sys.path.insert(0, os.path.dirname(setup_py)) try: setup_name = os.path.splitext(os.path.basename(setup_py))[0] n = dot_join(self.name, subpackage_name, setup_name) setup_module = exec_mod_from_location( '_'.join(n.split('.')), setup_py) if not hasattr(setup_module, 'configuration'): if not self.options['assume_default_configuration']: self.warn('Assuming default configuration '\ '(%s does not define configuration())'\ % (setup_module)) config = Configuration(subpackage_name, parent_name, self.top_path, subpackage_path, caller_level = caller_level + 1) else: pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) args = (pn,) if setup_module.configuration.__code__.co_argcount > 1: args = args + (self.top_path,) config = setup_module.configuration(*args) if config.name!=dot_join(parent_name, subpackage_name): self.warn('Subpackage %r configuration returned as %r' % \ (dot_join(parent_name, subpackage_name), config.name)) finally: del sys.path[0] return config def get_subpackage(self,subpackage_name, subpackage_path=None, parent_name=None, caller_level = 1): """Return list of subpackage configurations. Parameters ---------- subpackage_name : str or None Name of the subpackage to get the configuration. '*' in subpackage_name is handled as a wildcard. subpackage_path : str If None, then the path is assumed to be the local path plus the subpackage_name. If a setup.py file is not found in the subpackage_path, then a default configuration is used. parent_name : str Parent name. """ if subpackage_name is None: if subpackage_path is None: raise ValueError( "either subpackage_name or subpackage_path must be specified") subpackage_name = os.path.basename(subpackage_path) # handle wildcards l = subpackage_name.split('.') if subpackage_path is None and '*' in subpackage_name: return self._wildcard_get_subpackage(subpackage_name, parent_name, caller_level = caller_level+1) assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) if subpackage_path is None: subpackage_path = njoin([self.local_path] + l) else: subpackage_path = njoin([subpackage_path] + l[:-1]) subpackage_path = self.paths([subpackage_path])[0] setup_py = njoin(subpackage_path, self.setup_name) if not self.options['ignore_setup_xxx_py']: if not os.path.isfile(setup_py): setup_py = njoin(subpackage_path, 'setup_%s.py' % (subpackage_name)) if not os.path.isfile(setup_py): if not self.options['assume_default_configuration']: self.warn('Assuming default configuration '\ '(%s/{setup_%s,setup}.py was not found)' \ % (os.path.dirname(setup_py), subpackage_name)) config = Configuration(subpackage_name, parent_name, self.top_path, subpackage_path, caller_level = caller_level+1) else: config = self._get_configuration_from_setup_py( setup_py, subpackage_name, subpackage_path, parent_name, caller_level = caller_level + 1) if config: return [config] else: return [] def add_subpackage(self,subpackage_name, subpackage_path=None, standalone = False): """Add a sub-package to the current Configuration instance. This is useful in a setup.py script for adding sub-packages to a package. Parameters ---------- subpackage_name : str name of the subpackage subpackage_path : str if given, the subpackage path such as the subpackage is in subpackage_path / subpackage_name. If None,the subpackage is assumed to be located in the local path / subpackage_name. standalone : bool """ if standalone: parent_name = None else: parent_name = self.name config_list = self.get_subpackage(subpackage_name, subpackage_path, parent_name = parent_name, caller_level = 2) if not config_list: self.warn('No configuration returned, assuming unavailable.') for config in config_list: d = config if isinstance(config, Configuration): d = config.todict() assert isinstance(d, dict), repr(type(d)) self.info('Appending %s configuration to %s' \ % (d.get('name'), self.name)) self.dict_append(**d) dist = self.get_distribution() if dist is not None: self.warn('distutils distribution has been initialized,'\ ' it may be too late to add a subpackage '+ subpackage_name) def add_data_dir(self, data_path): """Recursively add files under data_path to data_files list. Recursively add files under data_path to the list of data_files to be installed (and distributed). The data_path can be either a relative path-name, or an absolute path-name, or a 2-tuple where the first argument shows where in the install directory the data directory should be installed to. Parameters ---------- data_path : seq or str Argument can be either * 2-sequence (<datadir suffix>, <path to data directory>) * path to data directory where python datadir suffix defaults to package dir. Notes ----- Rules for installation paths:: foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar (gun, foo/bar) -> parent/gun foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun (gun/*, foo/*) -> parent/gun/a, parent/gun/b /foo/bar -> (bar, /foo/bar) -> parent/bar (gun, /foo/bar) -> parent/gun (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar Examples -------- For example suppose the source directory contains fun/foo.dat and fun/bar/car.dat: >>> self.add_data_dir('fun') #doctest: +SKIP >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP Will install data-files to the locations:: <package install directory>/ fun/ foo.dat bar/ car.dat sun/ foo.dat bar/ car.dat gun/ foo.dat car.dat """ if is_sequence(data_path): d, data_path = data_path else: d = None if is_sequence(data_path): [self.add_data_dir((d, p)) for p in data_path] return if not is_string(data_path): raise TypeError("not a string: %r" % (data_path,)) if d is None: if os.path.isabs(data_path): return self.add_data_dir((os.path.basename(data_path), data_path)) return self.add_data_dir((data_path, data_path)) paths = self.paths(data_path, include_non_existing=False) if is_glob_pattern(data_path): if is_glob_pattern(d): pattern_list = allpath(d).split(os.sep) pattern_list.reverse() # /a/*//b/ -> /a/*/b rl = list(range(len(pattern_list)-1)); rl.reverse() for i in rl: if not pattern_list[i]: del pattern_list[i] # for path in paths: if not os.path.isdir(path): print('Not a directory, skipping', path) continue rpath = rel_path(path, self.local_path) path_list = rpath.split(os.sep) path_list.reverse() target_list = [] i = 0 for s in pattern_list: if is_glob_pattern(s): if i>=len(path_list): raise ValueError('cannot fill pattern %r with %r' \ % (d, path)) target_list.append(path_list[i]) else: assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) target_list.append(s) i += 1 if path_list[i:]: self.warn('mismatch of pattern_list=%s and path_list=%s'\ % (pattern_list, path_list)) target_list.reverse() self.add_data_dir((os.sep.join(target_list), path)) else: for path in paths: self.add_data_dir((d, path)) return assert not is_glob_pattern(d), repr(d) dist = self.get_distribution() if dist is not None and dist.data_files is not None: data_files = dist.data_files else: data_files = self.data_files for path in paths: for d1, f in list(general_source_directories_files(path)): target_path = os.path.join(self.path_in_package, d, d1) data_files.append((target_path, f)) def _optimize_data_files(self): data_dict = {} for p, files in self.data_files: if p not in data_dict: data_dict[p] = set() for f in files: data_dict[p].add(f) self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] def add_data_files(self,*files): """Add data files to configuration data_files. Parameters ---------- files : sequence Argument(s) can be either * 2-sequence (<datadir prefix>,<path to data file(s)>) * paths to data files where python datadir prefix defaults to package dir. Notes ----- The form of each element of the files sequence is very flexible allowing many combinations of where to get the files from the package and where they should ultimately be installed on the system. The most basic usage is for an element of the files argument sequence to be a simple filename. This will cause that file from the local path to be installed to the installation path of the self.name package (package path). The file argument can also be a relative path in which case the entire relative path will be installed into the package directory. Finally, the file can be an absolute path name in which case the file will be found at the absolute path name but installed to the package path. This basic behavior can be augmented by passing a 2-tuple in as the file argument. The first element of the tuple should specify the relative path (under the package install directory) where the remaining sequence of files should be installed to (it has nothing to do with the file-names in the source distribution). The second element of the tuple is the sequence of files that should be installed. The files in this sequence can be filenames, relative paths, or absolute paths. For absolute paths the file will be installed in the top-level package installation directory (regardless of the first argument). Filenames and relative path names will be installed in the package install directory under the path name given as the first element of the tuple. Rules for installation paths: #. file.txt -> (., file.txt)-> parent/file.txt #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt #. ``*``.txt -> parent/a.txt, parent/b.txt #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt #. (sun, file.txt) -> parent/sun/file.txt #. (sun, bar/file.txt) -> parent/sun/file.txt #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt An additional feature is that the path to a data-file can actually be a function that takes no arguments and returns the actual path(s) to the data-files. This is useful when the data files are generated while building the package. Examples -------- Add files to the list of data_files to be included with the package. >>> self.add_data_files('foo.dat', ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), ... 'bar/cat.dat', ... '/full/path/to/can.dat') #doctest: +SKIP will install these data files to:: <package install directory>/ foo.dat fun/ gun.dat nun/ pun.dat sun.dat bar/ car.dat can.dat where <package install directory> is the package (or sub-package) directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: \\Python2.4 \\Lib \\site-packages \\mypackage') or '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). """ if len(files)>1: for f in files: self.add_data_files(f) return assert len(files)==1 if is_sequence(files[0]): d, files = files[0] else: d = None if is_string(files): filepat = files elif is_sequence(files): if len(files)==1: filepat = files[0] else: for f in files: self.add_data_files((d, f)) return else: raise TypeError(repr(type(files))) if d is None: if hasattr(filepat, '__call__'): d = '' elif os.path.isabs(filepat): d = '' else: d = os.path.dirname(filepat) self.add_data_files((d, files)) return paths = self.paths(filepat, include_non_existing=False) if is_glob_pattern(filepat): if is_glob_pattern(d): pattern_list = d.split(os.sep) pattern_list.reverse() for path in paths: path_list = path.split(os.sep) path_list.reverse() path_list.pop() # filename target_list = [] i = 0 for s in pattern_list: if is_glob_pattern(s): target_list.append(path_list[i]) i += 1 else: target_list.append(s) target_list.reverse() self.add_data_files((os.sep.join(target_list), path)) else: self.add_data_files((d, paths)) return assert not is_glob_pattern(d), repr((d, filepat)) dist = self.get_distribution() if dist is not None and dist.data_files is not None: data_files = dist.data_files else: data_files = self.data_files data_files.append((os.path.join(self.path_in_package, d), paths)) ### XXX Implement add_py_modules def add_define_macros(self, macros): """Add define macros to configuration Add the given sequence of macro name and value duples to the beginning of the define_macros list This list will be visible to all extension modules of the current package. """ dist = self.get_distribution() if dist is not None: if not hasattr(dist, 'define_macros'): dist.define_macros = [] dist.define_macros.extend(macros) else: self.define_macros.extend(macros) def add_include_dirs(self,*paths): """Add paths to configuration include directories. Add the given sequence of paths to the beginning of the include_dirs list. This list will be visible to all extension modules of the current package. """ include_dirs = self.paths(paths) dist = self.get_distribution() if dist is not None: if dist.include_dirs is None: dist.include_dirs = [] dist.include_dirs.extend(include_dirs) else: self.include_dirs.extend(include_dirs) def add_headers(self,*files): """Add installable headers to configuration. Add the given sequence of files to the beginning of the headers list. By default, headers will be installed under <python- include>/<self.name.replace('.','/')>/ directory. If an item of files is a tuple, then its first argument specifies the actual installation location relative to the <python-include> path. Parameters ---------- files : str or seq Argument(s) can be either: * 2-sequence (<includedir suffix>,<path to header file(s)>) * path(s) to header file(s) where python includedir suffix will default to package name. """ headers = [] for path in files: if is_string(path): [headers.append((self.name, p)) for p in self.paths(path)] else: if not isinstance(path, (tuple, list)) or len(path) != 2: raise TypeError(repr(path)) [headers.append((path[0], p)) for p in self.paths(path[1])] dist = self.get_distribution() if dist is not None: if dist.headers is None: dist.headers = [] dist.headers.extend(headers) else: self.headers.extend(headers) def paths(self,*paths,**kws): """Apply glob to paths and prepend local_path if needed. Applies glob.glob(...) to each path in the sequence (if needed) and pre-pends the local_path if needed. Because this is called on all source lists, this allows wildcard characters to be specified in lists of sources for extension modules and libraries and scripts and allows path-names be relative to the source directory. """ include_non_existing = kws.get('include_non_existing', True) return gpaths(paths, local_path = self.local_path, include_non_existing=include_non_existing) def _fix_paths_dict(self, kw): for k in kw.keys(): v = kw[k] if k in ['sources', 'depends', 'include_dirs', 'library_dirs', 'module_dirs', 'extra_objects']: new_v = self.paths(v) kw[k] = new_v def add_extension(self,name,sources,**kw): """Add extension to configuration. Create and add an Extension instance to the ext_modules list. This method also takes the following optional keyword arguments that are passed on to the Extension constructor. Parameters ---------- name : str name of the extension sources : seq list of the sources. The list of sources may contain functions (called source generators) which must take an extension instance and a build directory as inputs and return a source file or list of source files or None. If None is returned then no sources are generated. If the Extension instance has no sources after processing all source generators, then no extension module is built. include_dirs : define_macros : undef_macros : library_dirs : libraries : runtime_library_dirs : extra_objects : extra_compile_args : extra_link_args : extra_f77_compile_args : extra_f90_compile_args : export_symbols : swig_opts : depends : The depends list contains paths to files or directories that the sources of the extension module depend on. If any path in the depends list is newer than the extension module, then the module will be rebuilt. language : f2py_options : module_dirs : extra_info : dict or list dict or list of dict of keywords to be appended to keywords. Notes ----- The self.paths(...) method is applied to all lists that may contain paths. """ ext_args = copy.copy(kw) ext_args['name'] = dot_join(self.name, name) ext_args['sources'] = sources if 'extra_info' in ext_args: extra_info = ext_args['extra_info'] del ext_args['extra_info'] if isinstance(extra_info, dict): extra_info = [extra_info] for info in extra_info: assert isinstance(info, dict), repr(info) dict_append(ext_args,**info) self._fix_paths_dict(ext_args) # Resolve out-of-tree dependencies libraries = ext_args.get('libraries', []) libnames = [] ext_args['libraries'] = [] for libname in libraries: if isinstance(libname, tuple): self._fix_paths_dict(libname[1]) # Handle library names of the form libname@relative/path/to/library if '@' in libname: lname, lpath = libname.split('@', 1) lpath = os.path.abspath(njoin(self.local_path, lpath)) if os.path.isdir(lpath): c = self.get_subpackage(None, lpath, caller_level = 2) if isinstance(c, Configuration): c = c.todict() for l in [l[0] for l in c.get('libraries', [])]: llname = l.split('__OF__', 1)[0] if llname == lname: c.pop('name', None) dict_append(ext_args,**c) break continue libnames.append(libname) ext_args['libraries'] = libnames + ext_args['libraries'] ext_args['define_macros'] = \ self.define_macros + ext_args.get('define_macros', []) from numpy.distutils.core import Extension ext = Extension(**ext_args) self.ext_modules.append(ext) dist = self.get_distribution() if dist is not None: self.warn('distutils distribution has been initialized,'\ ' it may be too late to add an extension '+name) return ext def add_library(self,name,sources,**build_info): """ Add library to configuration. Parameters ---------- name : str Name of the extension. sources : sequence List of the sources. The list of sources may contain functions (called source generators) which must take an extension instance and a build directory as inputs and return a source file or list of source files or None. If None is returned then no sources are generated. If the Extension instance has no sources after processing all source generators, then no extension module is built. build_info : dict, optional The following keys are allowed: * depends * macros * include_dirs * extra_compiler_args * extra_f77_compile_args * extra_f90_compile_args * f2py_options * language """ self._add_library(name, sources, None, build_info) dist = self.get_distribution() if dist is not None: self.warn('distutils distribution has been initialized,'\ ' it may be too late to add a library '+ name) def _add_library(self, name, sources, install_dir, build_info): """Common implementation for add_library and add_installed_library. Do not use directly""" build_info = copy.copy(build_info) build_info['sources'] = sources # Sometimes, depends is not set up to an empty list by default, and if # depends is not given to add_library, distutils barfs (#1134) if not 'depends' in build_info: build_info['depends'] = [] self._fix_paths_dict(build_info) # Add to libraries list so that it is build with build_clib self.libraries.append((name, build_info)) def add_installed_library(self, name, sources, install_dir, build_info=None): """ Similar to add_library, but the specified library is installed. Most C libraries used with `distutils` are only used to build python extensions, but libraries built through this method will be installed so that they can be reused by third-party packages. Parameters ---------- name : str Name of the installed library. sources : sequence List of the library's source files. See `add_library` for details. install_dir : str Path to install the library, relative to the current sub-package. build_info : dict, optional The following keys are allowed: * depends * macros * include_dirs * extra_compiler_args * extra_f77_compile_args * extra_f90_compile_args * f2py_options * language Returns ------- None See Also -------- add_library, add_npy_pkg_config, get_info Notes ----- The best way to encode the options required to link against the specified C libraries is to use a "libname.ini" file, and use `get_info` to retrieve the required options (see `add_npy_pkg_config` for more information). """ if not build_info: build_info = {} install_dir = os.path.join(self.package_path, install_dir) self._add_library(name, sources, install_dir, build_info) self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) def add_npy_pkg_config(self, template, install_dir, subst_dict=None): """ Generate and install a npy-pkg config file from a template. The config file generated from `template` is installed in the given install directory, using `subst_dict` for variable substitution. Parameters ---------- template : str The path of the template, relatively to the current package path. install_dir : str Where to install the npy-pkg config file, relatively to the current package path. subst_dict : dict, optional If given, any string of the form ``@key@`` will be replaced by ``subst_dict[key]`` in the template file when installed. The install prefix is always available through the variable ``@prefix@``, since the install prefix is not easy to get reliably from setup.py. See also -------- add_installed_library, get_info Notes ----- This works for both standard installs and in-place builds, i.e. the ``@prefix@`` refer to the source directory for in-place builds. Examples -------- :: config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) Assuming the foo.ini.in file has the following content:: [meta] Name=@foo@ Version=1.0 Description=dummy description [default] Cflags=-I@prefix@/include Libs= The generated file will have the following content:: [meta] Name=bar Version=1.0 Description=dummy description [default] Cflags=-Iprefix_dir/include Libs= and will be installed as foo.ini in the 'lib' subpath. When cross-compiling with numpy distutils, it might be necessary to use modified npy-pkg-config files. Using the default/generated files will link with the host libraries (i.e. libnpymath.a). For cross-compilation you of-course need to link with target libraries, while using the host Python installation. You can copy out the numpy/core/lib/npy-pkg-config directory, add a pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment variable to point to the directory with the modified npy-pkg-config files. Example npymath.ini modified for cross-compilation:: [meta] Name=npymath Description=Portable, core math library implementing C99 standard Version=0.1 [variables] pkgname=numpy.core pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core prefix=${pkgdir} libdir=${prefix}/lib includedir=${prefix}/include [default] Libs=-L${libdir} -lnpymath Cflags=-I${includedir} Requires=mlib [msvc] Libs=/LIBPATH:${libdir} npymath.lib Cflags=/INCLUDE:${includedir} Requires=mlib """ if subst_dict is None: subst_dict = {} template = os.path.join(self.package_path, template) if self.name in self.installed_pkg_config: self.installed_pkg_config[self.name].append((template, install_dir, subst_dict)) else: self.installed_pkg_config[self.name] = [(template, install_dir, subst_dict)] def add_scripts(self,*files): """Add scripts to configuration. Add the sequence of files to the beginning of the scripts list. Scripts will be installed under the <prefix>/bin/ directory. """ scripts = self.paths(files) dist = self.get_distribution() if dist is not None: if dist.scripts is None: dist.scripts = [] dist.scripts.extend(scripts) else: self.scripts.extend(scripts) def dict_append(self,**dict): for key in self.list_keys: a = getattr(self, key) a.extend(dict.get(key, [])) for key in self.dict_keys: a = getattr(self, key) a.update(dict.get(key, {})) known_keys = self.list_keys + self.dict_keys + self.extra_keys for key in dict.keys(): if key not in known_keys: a = getattr(self, key, None) if a and a==dict[key]: continue self.warn('Inheriting attribute %r=%r from %r' \ % (key, dict[key], dict.get('name', '?'))) setattr(self, key, dict[key]) self.extra_keys.append(key) elif key in self.extra_keys: self.info('Ignoring attempt to set %r (from %r to %r)' \ % (key, getattr(self, key), dict[key])) elif key in known_keys: # key is already processed above pass else: raise ValueError("Don't know about key=%r" % (key)) def __str__(self): from pprint import pformat known_keys = self.list_keys + self.dict_keys + self.extra_keys s = '<'+5*'-' + '\n' s += 'Configuration of '+self.name+':\n' known_keys.sort() for k in known_keys: a = getattr(self, k, None) if a: s += '%s = %s\n' % (k, pformat(a)) s += 5*'-' + '>' return s def get_config_cmd(self): """ Returns the numpy.distutils config command instance. """ cmd = get_cmd('config') cmd.ensure_finalized() cmd.dump_source = 0 cmd.noisy = 0 old_path = os.environ.get('PATH') if old_path: path = os.pathsep.join(['.', old_path]) os.environ['PATH'] = path return cmd def get_build_temp_dir(self): """ Return a path to a temporary directory where temporary files should be placed. """ cmd = get_cmd('build') cmd.ensure_finalized() return cmd.build_temp def have_f77c(self): """Check for availability of Fortran 77 compiler. Use it inside source generating function to ensure that setup distribution instance has been initialized. Notes ----- True if a Fortran 77 compiler is available (because a simple Fortran 77 code was able to be compiled successfully). """ simple_fortran_subroutine = ''' subroutine simple end ''' config_cmd = self.get_config_cmd() flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') return flag def have_f90c(self): """Check for availability of Fortran 90 compiler. Use it inside source generating function to ensure that setup distribution instance has been initialized. Notes ----- True if a Fortran 90 compiler is available (because a simple Fortran 90 code was able to be compiled successfully) """ simple_fortran_subroutine = ''' subroutine simple end ''' config_cmd = self.get_config_cmd() flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') return flag def append_to(self, extlib): """Append libraries, include_dirs to extension or library item. """ if is_sequence(extlib): lib_name, build_info = extlib dict_append(build_info, libraries=self.libraries, include_dirs=self.include_dirs) else: from numpy.distutils.core import Extension assert isinstance(extlib, Extension), repr(extlib) extlib.libraries.extend(self.libraries) extlib.include_dirs.extend(self.include_dirs) def _get_svn_revision(self, path): """Return path's SVN revision number. """ try: output = subprocess.check_output(['svnversion'], cwd=path) except (subprocess.CalledProcessError, OSError): pass else: m = re.match(rb'(?P<revision>\d+)', output) if m: return int(m.group('revision')) if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): entries = njoin(path, '_svn', 'entries') else: entries = njoin(path, '.svn', 'entries') if os.path.isfile(entries): with open(entries) as f: fstr = f.read() if fstr[:5] == '<?xml': # pre 1.4 m = re.search(r'revision="(?P<revision>\d+)"', fstr) if m: return int(m.group('revision')) else: # non-xml entries file --- check to be sure that m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr) if m: return int(m.group('revision')) return None def _get_hg_revision(self, path): """Return path's Mercurial revision number. """ try: output = subprocess.check_output( ['hg', 'identify', '--num'], cwd=path) except (subprocess.CalledProcessError, OSError): pass else: m = re.match(rb'(?P<revision>\d+)', output) if m: return int(m.group('revision')) branch_fn = njoin(path, '.hg', 'branch') branch_cache_fn = njoin(path, '.hg', 'branch.cache') if os.path.isfile(branch_fn): branch0 = None with open(branch_fn) as f: revision0 = f.read().strip() branch_map = {} with open(branch_cache_fn, 'r') as f: for line in f: branch1, revision1 = line.split()[:2] if revision1==revision0: branch0 = branch1 try: revision1 = int(revision1) except ValueError: continue branch_map[branch1] = revision1 return branch_map.get(branch0) return None def get_version(self, version_file=None, version_variable=None): """Try to get version string of a package. Return a version string of the current package or None if the version information could not be detected. Notes ----- This method scans files named __version__.py, <packagename>_version.py, version.py, and __svn_version__.py for string variables version, __version__, and <packagename>_version, until a version number is found. """ version = getattr(self, 'version', None) if version is not None: return version # Get version from version file. if version_file is None: files = ['__version__.py', self.name.split('.')[-1]+'_version.py', 'version.py', '__svn_version__.py', '__hg_version__.py'] else: files = [version_file] if version_variable is None: version_vars = ['version', '__version__', self.name.split('.')[-1]+'_version'] else: version_vars = [version_variable] for f in files: fn = njoin(self.local_path, f) if os.path.isfile(fn): info = ('.py', 'U', 1) name = os.path.splitext(os.path.basename(fn))[0] n = dot_join(self.name, name) try: version_module = exec_mod_from_location( '_'.join(n.split('.')), fn) except ImportError as e: self.warn(str(e)) version_module = None if version_module is None: continue for a in version_vars: version = getattr(version_module, a, None) if version is not None: break # Try if versioneer module try: version = version_module.get_versions()['version'] except AttributeError: pass if version is not None: break if version is not None: self.version = version return version # Get version as SVN or Mercurial revision number revision = self._get_svn_revision(self.local_path) if revision is None: revision = self._get_hg_revision(self.local_path) if revision is not None: version = str(revision) self.version = version return version def make_svn_version_py(self, delete=True): """Appends a data function to the data_files list that will generate __svn_version__.py file to the current package directory. Generate package __svn_version__.py file from SVN revision number, it will be removed after python exits but will be available when sdist, etc commands are executed. Notes ----- If __svn_version__.py existed before, nothing is done. This is intended for working with source directories that are in an SVN repository. """ target = njoin(self.local_path, '__svn_version__.py') revision = self._get_svn_revision(self.local_path) if os.path.isfile(target) or revision is None: return else: def generate_svn_version_py(): if not os.path.isfile(target): version = str(revision) self.info('Creating %s (version=%r)' % (target, version)) with open(target, 'w') as f: f.write('version = %r\n' % (version)) def rm_file(f=target,p=self.info): if delete: try: os.remove(f); p('removed '+f) except OSError: pass try: os.remove(f+'c'); p('removed '+f+'c') except OSError: pass atexit.register(rm_file) return target self.add_data_files(('', generate_svn_version_py())) def make_hg_version_py(self, delete=True): """Appends a data function to the data_files list that will generate __hg_version__.py file to the current package directory. Generate package __hg_version__.py file from Mercurial revision, it will be removed after python exits but will be available when sdist, etc commands are executed. Notes ----- If __hg_version__.py existed before, nothing is done. This is intended for working with source directories that are in an Mercurial repository. """ target = njoin(self.local_path, '__hg_version__.py') revision = self._get_hg_revision(self.local_path) if os.path.isfile(target) or revision is None: return else: def generate_hg_version_py(): if not os.path.isfile(target): version = str(revision) self.info('Creating %s (version=%r)' % (target, version)) with open(target, 'w') as f: f.write('version = %r\n' % (version)) def rm_file(f=target,p=self.info): if delete: try: os.remove(f); p('removed '+f) except OSError: pass try: os.remove(f+'c'); p('removed '+f+'c') except OSError: pass atexit.register(rm_file) return target self.add_data_files(('', generate_hg_version_py())) def make_config_py(self,name='__config__'): """Generate package __config__.py file containing system_info information used during building the package. This file is installed to the package installation directory. """ self.py_modules.append((self.name, name, generate_config_py)) def get_info(self,*names): """Get resources information. Return information (from system_info.get_info) for all of the names in the argument list in a single dictionary. """ from .system_info import get_info, dict_append info_dict = {} for a in names: dict_append(info_dict,**get_info(a)) return info_dict def get_cmd(cmdname, _cache={}): if cmdname not in _cache: import distutils.core dist = distutils.core._setup_distribution if dist is None: from distutils.errors import DistutilsInternalError raise DistutilsInternalError( 'setup distribution instance not initialized') cmd = dist.get_command_obj(cmdname) _cache[cmdname] = cmd return _cache[cmdname] def get_numpy_include_dirs(): # numpy_include_dirs are set by numpy/core/setup.py, otherwise [] include_dirs = Configuration.numpy_include_dirs[:] if not include_dirs: import numpy include_dirs = [ numpy.get_include() ] # else running numpy/core/setup.py return include_dirs def get_npy_pkg_dir(): """Return the path where to find the npy-pkg-config directory. If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that is returned. Otherwise, a path inside the location of the numpy module is returned. The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining customized npy-pkg-config .ini files for the cross-compilation environment, and using them when cross-compiling. """ d = os.environ.get('NPY_PKG_CONFIG_PATH') if d is not None: return d spec = importlib.util.find_spec('numpy') d = os.path.join(os.path.dirname(spec.origin), 'core', 'lib', 'npy-pkg-config') return d def get_pkg_info(pkgname, dirs=None): """ Return library info for the given package. Parameters ---------- pkgname : str Name of the package (should match the name of the .ini file, without the extension, e.g. foo for the file foo.ini). dirs : sequence, optional If given, should be a sequence of additional directories where to look for npy-pkg-config files. Those directories are searched prior to the NumPy directory. Returns ------- pkginfo : class instance The `LibraryInfo` instance containing the build information. Raises ------ PkgNotFound If the package is not found. See Also -------- Configuration.add_npy_pkg_config, Configuration.add_installed_library, get_info """ from numpy.distutils.npy_pkg_config import read_config if dirs: dirs.append(get_npy_pkg_dir()) else: dirs = [get_npy_pkg_dir()] return read_config(pkgname, dirs) def get_info(pkgname, dirs=None): """ Return an info dict for a given C library. The info dict contains the necessary options to use the C library. Parameters ---------- pkgname : str Name of the package (should match the name of the .ini file, without the extension, e.g. foo for the file foo.ini). dirs : sequence, optional If given, should be a sequence of additional directories where to look for npy-pkg-config files. Those directories are searched prior to the NumPy directory. Returns ------- info : dict The dictionary with build information. Raises ------ PkgNotFound If the package is not found. See Also -------- Configuration.add_npy_pkg_config, Configuration.add_installed_library, get_pkg_info Examples -------- To get the necessary information for the npymath library from NumPy: >>> npymath_info = np.distutils.misc_util.get_info('npymath') >>> npymath_info #doctest: +SKIP {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']} This info dict can then be used as input to a `Configuration` instance:: config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) """ from numpy.distutils.npy_pkg_config import parse_flags pkg_info = get_pkg_info(pkgname, dirs) # Translate LibraryInfo instance into a build_info dict info = parse_flags(pkg_info.cflags()) for k, v in parse_flags(pkg_info.libs()).items(): info[k].extend(v) # add_extension extra_info argument is ANAL info['define_macros'] = info['macros'] del info['macros'] del info['ignored'] return info def is_bootstrapping(): import builtins try: builtins.__NUMPY_SETUP__ return True except AttributeError: return False ######################### def default_config_dict(name = None, parent_name = None, local_path=None): """Return a configuration dictionary for usage in configuration() function defined in file setup_<name>.py. """ import warnings warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ 'deprecated default_config_dict(%r,%r,%r)' % (name, parent_name, local_path, name, parent_name, local_path, ), stacklevel=2) c = Configuration(name, parent_name, local_path) return c.todict() def dict_append(d, **kws): for k, v in kws.items(): if k in d: ov = d[k] if isinstance(ov, str): d[k] = v else: d[k].extend(v) else: d[k] = v def appendpath(prefix, path): if os.path.sep != '/': prefix = prefix.replace('/', os.path.sep) path = path.replace('/', os.path.sep) drive = '' if os.path.isabs(path): drive = os.path.splitdrive(prefix)[0] absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] pathdrive, path = os.path.splitdrive(path) d = os.path.commonprefix([absprefix, path]) if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ or os.path.join(path[:len(d)], path[len(d):]) != path: # Handle invalid paths d = os.path.dirname(d) subpath = path[len(d):] if os.path.isabs(subpath): subpath = subpath[1:] else: subpath = path return os.path.normpath(njoin(drive + prefix, subpath)) def generate_config_py(target): """Generate config.py file containing system_info information used during building the package. Usage: config['py_modules'].append((packagename, '__config__',generate_config_py)) """ from numpy.distutils.system_info import system_info from distutils.dir_util import mkpath mkpath(os.path.dirname(target)) with open(target, 'w') as f: f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) f.write('# It contains system_info results at the time of building this package.\n') f.write('__all__ = ["get_info","show"]\n\n') # For gfortran+msvc combination, extra shared libraries may exist f.write(textwrap.dedent(""" import os import sys extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): os.add_dll_directory(extra_dll_dir) """)) for k, i in system_info.saved_results.items(): f.write('%s=%r\n' % (k, i)) f.write(textwrap.dedent(r''' def get_info(name): g = globals() return g.get(name, g.get(name + "_info", {})) def show(): """ Show libraries in the system on which NumPy was built. Print information about various resources (libraries, library directories, include directories, etc.) in the system on which NumPy was built. See Also -------- get_include : Returns the directory containing NumPy C header files. Notes ----- 1. Classes specifying the information to be printed are defined in the `numpy.distutils.system_info` module. Information may include: * ``language``: language used to write the libraries (mostly C or f77) * ``libraries``: names of libraries found in the system * ``library_dirs``: directories containing the libraries * ``include_dirs``: directories containing library header files * ``src_dirs``: directories containing library source files * ``define_macros``: preprocessor macros used by ``distutils.setup`` * ``baseline``: minimum CPU features required * ``found``: dispatched features supported in the system * ``not found``: dispatched features that are not supported in the system 2. NumPy BLAS/LAPACK Installation Notes Installing a numpy wheel (``pip install numpy`` or force it via ``pip install numpy --only-binary :numpy: numpy``) includes an OpenBLAS implementation of the BLAS and LAPACK linear algebra APIs. In this case, ``library_dirs`` reports the original build time configuration as compiled with gcc/gfortran; at run time the OpenBLAS library is in ``site-packages/numpy.libs/`` (linux), or ``site-packages/numpy/.dylibs/`` (macOS), or ``site-packages/numpy/.libs/`` (windows). Installing numpy from source (``pip install numpy --no-binary numpy``) searches for BLAS and LAPACK dynamic link libraries at build time as influenced by environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER; or the optional file ``~/.numpy-site.cfg``. NumPy remembers those locations and expects to load the same libraries at run-time. In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS library) is in the default build-time search order after 'openblas'. Examples -------- >>> import numpy as np >>> np.show_config() blas_opt_info: language = c define_macros = [('HAVE_CBLAS', None)] libraries = ['openblas', 'openblas'] library_dirs = ['/usr/local/lib'] """ from numpy.core._multiarray_umath import ( __cpu_features__, __cpu_baseline__, __cpu_dispatch__ ) for name,info_dict in globals().items(): if name[0] == "_" or type(info_dict) is not type({}): continue print(name + ":") if not info_dict: print(" NOT AVAILABLE") for k,v in info_dict.items(): v = str(v) if k == "sources" and len(v) > 200: v = v[:60] + " ...\n... " + v[-60:] print(" %s = %s" % (k,v)) features_found, features_not_found = [], [] for feature in __cpu_dispatch__: if __cpu_features__[feature]: features_found.append(feature) else: features_not_found.append(feature) print("Supported SIMD extensions in this NumPy install:") print(" baseline = %s" % (','.join(__cpu_baseline__))) print(" found = %s" % (','.join(features_found))) print(" not found = %s" % (','.join(features_not_found))) ''')) return target def msvc_version(compiler): """Return version major and minor of compiler instance if it is MSVC, raise an exception otherwise.""" if not compiler.compiler_type == "msvc": raise ValueError("Compiler instance is not msvc (%s)"\ % compiler.compiler_type) return compiler._MSVCCompiler__version def get_build_architecture(): # Importing distutils.msvccompiler triggers a warning on non-Windows # systems, so delay the import to here. from distutils.msvccompiler import get_build_architecture return get_build_architecture() _cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'} def sanitize_cxx_flags(cxxflags): ''' Some flags are valid for C but not C++. Prune them. ''' return [flag for flag in cxxflags if flag not in _cxx_ignore_flags] def exec_mod_from_location(modname, modfile): ''' Use importlib machinery to import a module `modname` from the file `modfile`. Depending on the `spec.loader`, the module may not be registered in sys.modules. ''' spec = importlib.util.spec_from_file_location(modname, modfile) foo = importlib.util.module_from_spec(spec) spec.loader.exec_module(foo) return foo
89,383
Python
34.839615
102
0.544824
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/conv_template.py
#!/usr/bin/env python3 """ takes templated file .xxx.src and produces .xxx file where .xxx is .i or .c or .h, using the following template rules /**begin repeat -- on a line by itself marks the start of a repeated code segment /**end repeat**/ -- on a line by itself marks it's end After the /**begin repeat and before the */, all the named templates are placed these should all have the same number of replacements Repeat blocks can be nested, with each nested block labeled with its depth, i.e. /**begin repeat1 *.... */ /**end repeat1**/ When using nested loops, you can optionally exclude particular combinations of the variables using (inside the comment portion of the inner loop): :exclude: var1=value1, var2=value2, ... This will exclude the pattern where var1 is value1 and var2 is value2 when the result is being generated. In the main body each replace will use one entry from the list of named replacements Note that all #..# forms in a block must have the same number of comma-separated entries. Example: An input file containing /**begin repeat * #a = 1,2,3# * #b = 1,2,3# */ /**begin repeat1 * #c = ted, jim# */ @a@, @b@, @c@ /**end repeat1**/ /**end repeat**/ produces line 1 "template.c.src" /* ********************************************************************* ** This file was autogenerated from a template DO NOT EDIT!!** ** Changes should be made to the original source (.src) file ** ********************************************************************* */ #line 9 1, 1, ted #line 9 1, 1, jim #line 9 2, 2, ted #line 9 2, 2, jim #line 9 3, 3, ted #line 9 3, 3, jim """ __all__ = ['process_str', 'process_file'] import os import sys import re # names for replacement that are already global. global_names = {} # header placed at the front of head processed file header =\ """ /* ***************************************************************************** ** This file was autogenerated from a template DO NOT EDIT!!!! ** ** Changes should be made to the original source (.src) file ** ***************************************************************************** */ """ # Parse string for repeat loops def parse_structure(astr, level): """ The returned line number is from the beginning of the string, starting at zero. Returns an empty list if no loops found. """ if level == 0 : loopbeg = "/**begin repeat" loopend = "/**end repeat**/" else : loopbeg = "/**begin repeat%d" % level loopend = "/**end repeat%d**/" % level ind = 0 line = 0 spanlist = [] while True: start = astr.find(loopbeg, ind) if start == -1: break start2 = astr.find("*/", start) start2 = astr.find("\n", start2) fini1 = astr.find(loopend, start2) fini2 = astr.find("\n", fini1) line += astr.count("\n", ind, start2+1) spanlist.append((start, start2+1, fini1, fini2+1, line)) line += astr.count("\n", start2+1, fini2) ind = fini2 spanlist.sort() return spanlist def paren_repl(obj): torep = obj.group(1) numrep = obj.group(2) return ','.join([torep]*int(numrep)) parenrep = re.compile(r"\(([^)]*)\)\*(\d+)") plainrep = re.compile(r"([^*]+)\*(\d+)") def parse_values(astr): # replaces all occurrences of '(a,b,c)*4' in astr # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate # empty values, i.e., ()*4 yields ',,,'. The result is # split at ',' and a list of values returned. astr = parenrep.sub(paren_repl, astr) # replaces occurrences of xxx*3 with xxx, xxx, xxx astr = ','.join([plainrep.sub(paren_repl, x.strip()) for x in astr.split(',')]) return astr.split(',') stripast = re.compile(r"\n\s*\*?") named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") exclude_vars_re = re.compile(r"(\w*)=(\w*)") exclude_re = re.compile(":exclude:") def parse_loop_header(loophead) : """Find all named replacements in the header Returns a list of dictionaries, one for each loop iteration, where each key is a name to be substituted and the corresponding value is the replacement string. Also return a list of exclusions. The exclusions are dictionaries of key value pairs. There can be more than one exclusion. [{'var1':'value1', 'var2', 'value2'[,...]}, ...] """ # Strip out '\n' and leading '*', if any, in continuation lines. # This should not effect code previous to this change as # continuation lines were not allowed. loophead = stripast.sub("", loophead) # parse out the names and lists of values names = [] reps = named_re.findall(loophead) nsub = None for rep in reps: name = rep[0] vals = parse_values(rep[1]) size = len(vals) if nsub is None : nsub = size elif nsub != size : msg = "Mismatch in number of values, %d != %d\n%s = %s" raise ValueError(msg % (nsub, size, name, vals)) names.append((name, vals)) # Find any exclude variables excludes = [] for obj in exclude_re.finditer(loophead): span = obj.span() # find next newline endline = loophead.find('\n', span[1]) substr = loophead[span[1]:endline] ex_names = exclude_vars_re.findall(substr) excludes.append(dict(ex_names)) # generate list of dictionaries, one for each template iteration dlist = [] if nsub is None : raise ValueError("No substitution variables found") for i in range(nsub): tmp = {name: vals[i] for name, vals in names} dlist.append(tmp) return dlist replace_re = re.compile(r"@(\w+)@") def parse_string(astr, env, level, line) : lineno = "#line %d\n" % line # local function for string replacement, uses env def replace(match): name = match.group(1) try : val = env[name] except KeyError: msg = 'line %d: no definition of key "%s"'%(line, name) raise ValueError(msg) from None return val code = [lineno] struct = parse_structure(astr, level) if struct : # recurse over inner loops oldend = 0 newlevel = level + 1 for sub in struct: pref = astr[oldend:sub[0]] head = astr[sub[0]:sub[1]] text = astr[sub[1]:sub[2]] oldend = sub[3] newline = line + sub[4] code.append(replace_re.sub(replace, pref)) try : envlist = parse_loop_header(head) except ValueError as e: msg = "line %d: %s" % (newline, e) raise ValueError(msg) for newenv in envlist : newenv.update(env) newcode = parse_string(text, newenv, newlevel, newline) code.extend(newcode) suff = astr[oldend:] code.append(replace_re.sub(replace, suff)) else : # replace keys code.append(replace_re.sub(replace, astr)) code.append('\n') return ''.join(code) def process_str(astr): code = [header] code.extend(parse_string(astr, global_names, 0, 1)) return ''.join(code) include_src_re = re.compile(r"(\n|\A)#include\s*['\"]" r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I) def resolve_includes(source): d = os.path.dirname(source) with open(source) as fid: lines = [] for line in fid: m = include_src_re.match(line) if m: fn = m.group('name') if not os.path.isabs(fn): fn = os.path.join(d, fn) if os.path.isfile(fn): lines.extend(resolve_includes(fn)) else: lines.append(line) else: lines.append(line) return lines def process_file(source): lines = resolve_includes(source) sourcefile = os.path.normcase(source).replace("\\", "\\\\") try: code = process_str(''.join(lines)) except ValueError as e: raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None return '#line 1 "%s"\n%s' % (sourcefile, code) def unique_key(adict): # this obtains a unique key given a dictionary # currently it works by appending together n of the letters of the # current keys and increasing n until a unique key is found # -- not particularly quick allkeys = list(adict.keys()) done = False n = 1 while not done: newkey = "".join([x[:n] for x in allkeys]) if newkey in allkeys: n += 1 else: done = True return newkey def main(): try: file = sys.argv[1] except IndexError: fid = sys.stdin outfile = sys.stdout else: fid = open(file, 'r') (base, ext) = os.path.splitext(file) newname = base outfile = open(newname, 'w') allstr = fid.read() try: writestr = process_str(allstr) except ValueError as e: raise ValueError("In %s loop at %s" % (file, e)) from None outfile.write(writestr) if __name__ == "__main__": main()
9,536
Python
27.9
84
0.543624
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/from_template.py
#!/usr/bin/env python3 """ process_file(filename) takes templated file .xxx.src and produces .xxx file where .xxx is .pyf .f90 or .f using the following template rules: '<..>' denotes a template. All function and subroutine blocks in a source file with names that contain '<..>' will be replicated according to the rules in '<..>'. The number of comma-separated words in '<..>' will determine the number of replicates. '<..>' may have two different forms, named and short. For example, named: <p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with 'd', 's', 'z', and 'c' for each replicate of the block. <_c> is already defined: <_c=s,d,c,z> <_t> is already defined: <_t=real,double precision,complex,double complex> short: <s,d,c,z>, a short form of the named, useful when no <p> appears inside a block. In general, '<..>' contains a comma separated list of arbitrary expressions. If these expression must contain a comma|leftarrow|rightarrow, then prepend the comma|leftarrow|rightarrow with a backslash. If an expression matches '\\<index>' then it will be replaced by <index>-th expression. Note that all '<..>' forms in a block must have the same number of comma-separated entries. Predefined named template rules: <prefix=s,d,c,z> <ftype=real,double precision,complex,double complex> <ftypereal=real,double precision,\\0,\\1> <ctype=float,double,complex_float,complex_double> <ctypereal=float,double,\\0,\\1> """ __all__ = ['process_str', 'process_file'] import os import sys import re routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) def parse_structure(astr): """ Return a list of tuples for each function or subroutine each tuple is the start and end of a subroutine or function to be expanded. """ spanlist = [] ind = 0 while True: m = routine_start_re.search(astr, ind) if m is None: break start = m.start() if function_start_re.match(astr, start, m.end()): while True: i = astr.rfind('\n', ind, start) if i==-1: break start = i if astr[i:i+7]!='\n $': break start += 1 m = routine_end_re.search(astr, m.end()) ind = end = m and m.end()-1 or len(astr) spanlist.append((start, end)) return spanlist template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") list_re = re.compile(r"<\s*((.*?))\s*>") def find_repl_patterns(astr): reps = named_re.findall(astr) names = {} for rep in reps: name = rep[0].strip() or unique_key(names) repl = rep[1].replace(r'\,', '@comma@') thelist = conv(repl) names[name] = thelist return names def find_and_remove_repl_patterns(astr): names = find_repl_patterns(astr) astr = re.subn(named_re, '', astr)[0] return astr, names item_re = re.compile(r"\A\\(?P<index>\d+)\Z") def conv(astr): b = astr.split(',') l = [x.strip() for x in b] for i in range(len(l)): m = item_re.match(l[i]) if m: j = int(m.group('index')) l[i] = l[j] return ','.join(l) def unique_key(adict): """ Obtain a unique key given a dictionary.""" allkeys = list(adict.keys()) done = False n = 1 while not done: newkey = '__l%s' % (n) if newkey in allkeys: n += 1 else: done = True return newkey template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') def expand_sub(substr, names): substr = substr.replace(r'\>', '@rightarrow@') substr = substr.replace(r'\<', '@leftarrow@') lnames = find_repl_patterns(substr) substr = named_re.sub(r"<\1>", substr) # get rid of definition templates def listrepl(mobj): thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) if template_name_re.match(thelist): return "<%s>" % (thelist) name = None for key in lnames.keys(): # see if list is already in dictionary if lnames[key] == thelist: name = key if name is None: # this list is not in the dictionary yet name = unique_key(lnames) lnames[name] = thelist return "<%s>" % name substr = list_re.sub(listrepl, substr) # convert all lists to named templates # newnames are constructed as needed numsubs = None base_rule = None rules = {} for r in template_re.findall(substr): if r not in rules: thelist = lnames.get(r, names.get(r, None)) if thelist is None: raise ValueError('No replicates found for <%s>' % (r)) if r not in names and not thelist.startswith('_'): names[r] = thelist rule = [i.replace('@comma@', ',') for i in thelist.split(',')] num = len(rule) if numsubs is None: numsubs = num rules[r] = rule base_rule = r elif num == numsubs: rules[r] = rule else: print("Mismatch in number of replacements (base <%s=%s>)" " for <%s=%s>. Ignoring." % (base_rule, ','.join(rules[base_rule]), r, thelist)) if not rules: return substr def namerepl(mobj): name = mobj.group(1) return rules.get(name, (k+1)*[name])[k] newstr = '' for k in range(numsubs): newstr += template_re.sub(namerepl, substr) + '\n\n' newstr = newstr.replace('@rightarrow@', '>') newstr = newstr.replace('@leftarrow@', '<') return newstr def process_str(allstr): newstr = allstr writestr = '' struct = parse_structure(newstr) oldend = 0 names = {} names.update(_special_names) for sub in struct: cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) writestr += cleanedstr names.update(defs) writestr += expand_sub(newstr[sub[0]:sub[1]], names) oldend = sub[1] writestr += newstr[oldend:] return writestr include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+\.src)['\"]", re.I) def resolve_includes(source): d = os.path.dirname(source) with open(source) as fid: lines = [] for line in fid: m = include_src_re.match(line) if m: fn = m.group('name') if not os.path.isabs(fn): fn = os.path.join(d, fn) if os.path.isfile(fn): lines.extend(resolve_includes(fn)) else: lines.append(line) else: lines.append(line) return lines def process_file(source): lines = resolve_includes(source) return process_str(''.join(lines)) _special_names = find_repl_patterns(''' <_c=s,d,c,z> <_t=real,double precision,complex,double complex> <prefix=s,d,c,z> <ftype=real,double precision,complex,double complex> <ctype=float,double,complex_float,complex_double> <ftypereal=real,double precision,\\0,\\1> <ctypereal=float,double,\\0,\\1> ''') def main(): try: file = sys.argv[1] except IndexError: fid = sys.stdin outfile = sys.stdout else: fid = open(file, 'r') (base, ext) = os.path.splitext(file) newname = base outfile = open(newname, 'w') allstr = fid.read() writestr = process_str(allstr) outfile.write(writestr) if __name__ == "__main__": main()
7,913
Python
29.206107
94
0.560091
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/__init__.pyi
from typing import Any # TODO: remove when the full numpy namespace is defined def __getattr__(name: str) -> Any: ...
119
unknown
22.999995
55
0.697479
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/ccompiler_opt.py
"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware optimization, starting from parsing the command arguments, to managing the relation between the CPU baseline and dispatch-able features, also generating the required C headers and ending with compiling the sources with proper compiler's flags. `CCompilerOpt` doesn't provide runtime detection for the CPU features, instead only focuses on the compiler side, but it creates abstract C headers that can be used later for the final runtime dispatching process.""" import atexit import inspect import os import pprint import re import subprocess import textwrap # These flags are used to compile any C++ source within Numpy. # They are chosen to have very few runtime dependencies. NPY_CXX_FLAGS = [ '-std=c++11', # Minimal standard version '-D__STDC_VERSION__=0', # for compatibility with C headers '-fno-exceptions', # no exception support '-fno-rtti'] # no runtime type information class _Config: """An abstract class holds all configurable attributes of `CCompilerOpt`, these class attributes can be used to change the default behavior of `CCompilerOpt` in order to fit other requirements. Attributes ---------- conf_nocache : bool Set True to disable memory and file cache. Default is False. conf_noopt : bool Set True to forces the optimization to be disabled, in this case `CCompilerOpt` tends to generate all expected headers in order to 'not' break the build. Default is False. conf_cache_factors : list Add extra factors to the primary caching factors. The caching factors are utilized to determine if there are changes had happened that requires to discard the cache and re-updating it. The primary factors are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc). Default is list of two items, containing the time of last modification of `ccompiler_opt` and value of attribute "conf_noopt" conf_tmp_path : str, The path of temporary directory. Default is auto-created temporary directory via ``tempfile.mkdtemp()``. conf_check_path : str The path of testing files. Each added CPU feature must have a **C** source file contains at least one intrinsic or instruction that related to this feature, so it can be tested against the compiler. Default is ``./distutils/checks``. conf_target_groups : dict Extra tokens that can be reached from dispatch-able sources through the special mark ``@targets``. Default is an empty dictionary. **Notes**: - case-insensitive for tokens and group names - sign '#' must stick in the begin of group name and only within ``@targets`` **Example**: .. code-block:: console $ "@targets #avx_group other_tokens" > group_inside.c >>> CCompilerOpt.conf_target_groups["avx_group"] = \\ "$werror $maxopt avx2 avx512f avx512_skx" >>> cco = CCompilerOpt(cc_instance) >>> cco.try_dispatch(["group_inside.c"]) conf_c_prefix : str The prefix of public C definitions. Default is ``"NPY_"``. conf_c_prefix_ : str The prefix of internal C definitions. Default is ``"NPY__"``. conf_cc_flags : dict Nested dictionaries defining several compiler flags that linked to some major functions, the main key represent the compiler name and sub-keys represent flags names. Default is already covers all supported **C** compilers. Sub-keys explained as follows: "native": str or None used by argument option `native`, to detect the current machine support via the compiler. "werror": str or None utilized to treat warning as errors during testing CPU features against the compiler and also for target's policy `$werror` via dispatch-able sources. "maxopt": str or None utilized for target's policy '$maxopt' and the value should contains the maximum acceptable optimization by the compiler. e.g. in gcc `'-O3'` **Notes**: * case-sensitive for compiler names and flags * use space to separate multiple flags * any flag will tested against the compiler and it will skipped if it's not applicable. conf_min_features : dict A dictionary defines the used CPU features for argument option `'min'`, the key represent the CPU architecture name e.g. `'x86'`. Default values provide the best effort on wide range of users platforms. **Note**: case-sensitive for architecture names. conf_features : dict Nested dictionaries used for identifying the CPU features. the primary key is represented as a feature name or group name that gathers several features. Default values covers all supported features but without the major options like "flags", these undefined options handle it by method `conf_features_partial()`. Default value is covers almost all CPU features for *X86*, *IBM/Power64* and *ARM 7/8*. Sub-keys explained as follows: "implies" : str or list, optional, List of CPU feature names to be implied by it, the feature name must be defined within `conf_features`. Default is None. "flags": str or list, optional List of compiler flags. Default is None. "detect": str or list, optional List of CPU feature names that required to be detected in runtime. By default, its the feature name or features in "group" if its specified. "implies_detect": bool, optional If True, all "detect" of implied features will be combined. Default is True. see `feature_detect()`. "group": str or list, optional Same as "implies" but doesn't require the feature name to be defined within `conf_features`. "interest": int, required a key for sorting CPU features "headers": str or list, optional intrinsics C header file "disable": str, optional force disable feature, the string value should contains the reason of disabling. "autovec": bool or None, optional True or False to declare that CPU feature can be auto-vectorized by the compiler. By default(None), treated as True if the feature contains at least one applicable flag. see `feature_can_autovec()` "extra_checks": str or list, optional Extra test case names for the CPU feature that need to be tested against the compiler. Each test case must have a C file named ``extra_xxxx.c``, where ``xxxx`` is the case name in lower case, under 'conf_check_path'. It should contain at least one intrinsic or function related to the test case. If the compiler able to successfully compile the C file then `CCompilerOpt` will add a C ``#define`` for it into the main dispatch header, e.g. ``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case. **NOTES**: * space can be used as separator with options that supports "str or list" * case-sensitive for all values and feature name must be in upper-case. * if flags aren't applicable, its will skipped rather than disable the CPU feature * the CPU feature will disabled if the compiler fail to compile the test file """ conf_nocache = False conf_noopt = False conf_cache_factors = None conf_tmp_path = None conf_check_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "checks" ) conf_target_groups = {} conf_c_prefix = 'NPY_' conf_c_prefix_ = 'NPY__' conf_cc_flags = dict( gcc = dict( # native should always fail on arm and ppc64, # native usually works only with x86 native = '-march=native', opt = '-O3', werror = '-Werror', ), clang = dict( native = '-march=native', opt = "-O3", # One of the following flags needs to be applicable for Clang to # guarantee the sanity of the testing process, however in certain # cases `-Werror` gets skipped during the availability test due to # "unused arguments" warnings. # see https://github.com/numpy/numpy/issues/19624 werror = '-Werror=switch -Werror', ), icc = dict( native = '-xHost', opt = '-O3', werror = '-Werror', ), iccw = dict( native = '/QxHost', opt = '/O3', werror = '/Werror', ), msvc = dict( native = None, opt = '/O2', werror = '/WX', ) ) conf_min_features = dict( x86 = "SSE SSE2", x64 = "SSE SSE2 SSE3", ppc64 = '', # play it safe ppc64le = "VSX VSX2", s390x = '', armhf = '', # play it safe aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD" ) conf_features = dict( # X86 SSE = dict( interest=1, headers="xmmintrin.h", # enabling SSE without SSE2 is useless also # it's non-optional for x86_64 implies="SSE2" ), SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"), SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"), SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"), SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"), POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"), SSE42 = dict(interest=7, implies="POPCNT"), AVX = dict( interest=8, implies="SSE42", headers="immintrin.h", implies_detect=False ), XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"), FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"), F16C = dict(interest=11, implies="AVX"), FMA3 = dict(interest=12, implies="F16C"), AVX2 = dict(interest=13, implies="F16C"), AVX512F = dict( interest=20, implies="FMA3 AVX2", implies_detect=False, extra_checks="AVX512F_REDUCE" ), AVX512CD = dict(interest=21, implies="AVX512F"), AVX512_KNL = dict( interest=40, implies="AVX512CD", group="AVX512ER AVX512PF", detect="AVX512_KNL", implies_detect=False ), AVX512_KNM = dict( interest=41, implies="AVX512_KNL", group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ", detect="AVX512_KNM", implies_detect=False ), AVX512_SKX = dict( interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ", detect="AVX512_SKX", implies_detect=False, extra_checks="AVX512BW_MASK AVX512DQ_MASK" ), AVX512_CLX = dict( interest=43, implies="AVX512_SKX", group="AVX512VNNI", detect="AVX512_CLX" ), AVX512_CNL = dict( interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI", detect="AVX512_CNL", implies_detect=False ), AVX512_ICL = dict( interest=45, implies="AVX512_CLX AVX512_CNL", group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ", detect="AVX512_ICL", implies_detect=False ), # IBM/Power ## Power7/ISA 2.06 VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"), ## Power8/ISA 2.07 VSX2 = dict(interest=2, implies="VSX", implies_detect=False), ## Power9/ISA 3.00 VSX3 = dict(interest=3, implies="VSX2", implies_detect=False), ## Power10/ISA 3.1 VSX4 = dict(interest=4, implies="VSX3", implies_detect=False, extra_checks="VSX4_MMA"), # IBM/Z ## VX(z13) support VX = dict(interest=1, headers="vecintrin.h"), ## Vector-Enhancements Facility VXE = dict(interest=2, implies="VX", implies_detect=False), ## Vector-Enhancements Facility 2 VXE2 = dict(interest=3, implies="VXE", implies_detect=False), # ARM NEON = dict(interest=1, headers="arm_neon.h"), NEON_FP16 = dict(interest=2, implies="NEON"), ## FMA NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"), ## Advanced SIMD ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False), ## ARMv8.2 half-precision & vector arithm ASIMDHP = dict(interest=5, implies="ASIMD"), ## ARMv8.2 dot product ASIMDDP = dict(interest=6, implies="ASIMD"), ## ARMv8.2 Single & half-precision Multiply ASIMDFHM = dict(interest=7, implies="ASIMDHP"), ) def conf_features_partial(self): """Return a dictionary of supported CPU features by the platform, and accumulate the rest of undefined options in `conf_features`, the returned dict has same rules and notes in class attribute `conf_features`, also its override any options that been set in 'conf_features'. """ if self.cc_noopt: # optimization is disabled return {} on_x86 = self.cc_on_x86 or self.cc_on_x64 is_unix = self.cc_is_gcc or self.cc_is_clang if on_x86 and is_unix: return dict( SSE = dict(flags="-msse"), SSE2 = dict(flags="-msse2"), SSE3 = dict(flags="-msse3"), SSSE3 = dict(flags="-mssse3"), SSE41 = dict(flags="-msse4.1"), POPCNT = dict(flags="-mpopcnt"), SSE42 = dict(flags="-msse4.2"), AVX = dict(flags="-mavx"), F16C = dict(flags="-mf16c"), XOP = dict(flags="-mxop"), FMA4 = dict(flags="-mfma4"), FMA3 = dict(flags="-mfma"), AVX2 = dict(flags="-mavx2"), AVX512F = dict(flags="-mavx512f -mno-mmx"), AVX512CD = dict(flags="-mavx512cd"), AVX512_KNL = dict(flags="-mavx512er -mavx512pf"), AVX512_KNM = dict( flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq" ), AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"), AVX512_CLX = dict(flags="-mavx512vnni"), AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"), AVX512_ICL = dict( flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq" ) ) if on_x86 and self.cc_is_icc: return dict( SSE = dict(flags="-msse"), SSE2 = dict(flags="-msse2"), SSE3 = dict(flags="-msse3"), SSSE3 = dict(flags="-mssse3"), SSE41 = dict(flags="-msse4.1"), POPCNT = {}, SSE42 = dict(flags="-msse4.2"), AVX = dict(flags="-mavx"), F16C = {}, XOP = dict(disable="Intel Compiler doesn't support it"), FMA4 = dict(disable="Intel Compiler doesn't support it"), # Intel Compiler doesn't support AVX2 or FMA3 independently FMA3 = dict( implies="F16C AVX2", flags="-march=core-avx2" ), AVX2 = dict(implies="FMA3", flags="-march=core-avx2"), # Intel Compiler doesn't support AVX512F or AVX512CD independently AVX512F = dict( implies="AVX2 AVX512CD", flags="-march=common-avx512" ), AVX512CD = dict( implies="AVX2 AVX512F", flags="-march=common-avx512" ), AVX512_KNL = dict(flags="-xKNL"), AVX512_KNM = dict(flags="-xKNM"), AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"), AVX512_CLX = dict(flags="-xCASCADELAKE"), AVX512_CNL = dict(flags="-xCANNONLAKE"), AVX512_ICL = dict(flags="-xICELAKE-CLIENT"), ) if on_x86 and self.cc_is_iccw: return dict( SSE = dict(flags="/arch:SSE"), SSE2 = dict(flags="/arch:SSE2"), SSE3 = dict(flags="/arch:SSE3"), SSSE3 = dict(flags="/arch:SSSE3"), SSE41 = dict(flags="/arch:SSE4.1"), POPCNT = {}, SSE42 = dict(flags="/arch:SSE4.2"), AVX = dict(flags="/arch:AVX"), F16C = {}, XOP = dict(disable="Intel Compiler doesn't support it"), FMA4 = dict(disable="Intel Compiler doesn't support it"), # Intel Compiler doesn't support FMA3 or AVX2 independently FMA3 = dict( implies="F16C AVX2", flags="/arch:CORE-AVX2" ), AVX2 = dict( implies="FMA3", flags="/arch:CORE-AVX2" ), # Intel Compiler doesn't support AVX512F or AVX512CD independently AVX512F = dict( implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512" ), AVX512CD = dict( implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512" ), AVX512_KNL = dict(flags="/Qx:KNL"), AVX512_KNM = dict(flags="/Qx:KNM"), AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"), AVX512_CLX = dict(flags="/Qx:CASCADELAKE"), AVX512_CNL = dict(flags="/Qx:CANNONLAKE"), AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT") ) if on_x86 and self.cc_is_msvc: return dict( SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {}, SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {}, SSE3 = {}, SSSE3 = {}, SSE41 = {}, POPCNT = dict(headers="nmmintrin.h"), SSE42 = {}, AVX = dict(flags="/arch:AVX"), F16C = {}, XOP = dict(headers="ammintrin.h"), FMA4 = dict(headers="ammintrin.h"), # MSVC doesn't support FMA3 or AVX2 independently FMA3 = dict( implies="F16C AVX2", flags="/arch:AVX2" ), AVX2 = dict( implies="F16C FMA3", flags="/arch:AVX2" ), # MSVC doesn't support AVX512F or AVX512CD independently, # always generate instructions belong to (VL/VW/DQ) AVX512F = dict( implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512" ), AVX512CD = dict( implies="AVX512F AVX512_SKX", flags="/arch:AVX512" ), AVX512_KNL = dict( disable="MSVC compiler doesn't support it" ), AVX512_KNM = dict( disable="MSVC compiler doesn't support it" ), AVX512_SKX = dict(flags="/arch:AVX512"), AVX512_CLX = {}, AVX512_CNL = {}, AVX512_ICL = {} ) on_power = self.cc_on_ppc64le or self.cc_on_ppc64 if on_power: partial = dict( VSX = dict( implies=("VSX2" if self.cc_on_ppc64le else ""), flags="-mvsx" ), VSX2 = dict( flags="-mcpu=power8", implies_detect=False ), VSX3 = dict( flags="-mcpu=power9 -mtune=power9", implies_detect=False ), VSX4 = dict( flags="-mcpu=power10 -mtune=power10", implies_detect=False ) ) if self.cc_is_clang: partial["VSX"]["flags"] = "-maltivec -mvsx" partial["VSX2"]["flags"] = "-mpower8-vector" partial["VSX3"]["flags"] = "-mpower9-vector" partial["VSX4"]["flags"] = "-mpower10-vector" return partial on_zarch = self.cc_on_s390x if on_zarch: partial = dict( VX = dict( flags="-march=arch11 -mzvector" ), VXE = dict( flags="-march=arch12", implies_detect=False ), VXE2 = dict( flags="-march=arch13", implies_detect=False ) ) return partial if self.cc_on_aarch64 and is_unix: return dict( NEON = dict( implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True ), NEON_FP16 = dict( implies="NEON NEON_VFPV4 ASIMD", autovec=True ), NEON_VFPV4 = dict( implies="NEON NEON_FP16 ASIMD", autovec=True ), ASIMD = dict( implies="NEON NEON_FP16 NEON_VFPV4", autovec=True ), ASIMDHP = dict( flags="-march=armv8.2-a+fp16" ), ASIMDDP = dict( flags="-march=armv8.2-a+dotprod" ), ASIMDFHM = dict( flags="-march=armv8.2-a+fp16fml" ), ) if self.cc_on_armhf and is_unix: return dict( NEON = dict( flags="-mfpu=neon" ), NEON_FP16 = dict( flags="-mfpu=neon-fp16 -mfp16-format=ieee" ), NEON_VFPV4 = dict( flags="-mfpu=neon-vfpv4", ), ASIMD = dict( flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd", ), ASIMDHP = dict( flags="-march=armv8.2-a+fp16" ), ASIMDDP = dict( flags="-march=armv8.2-a+dotprod", ), ASIMDFHM = dict( flags="-march=armv8.2-a+fp16fml" ) ) # TODO: ARM MSVC return {} def __init__(self): if self.conf_tmp_path is None: import shutil import tempfile tmp = tempfile.mkdtemp() def rm_temp(): try: shutil.rmtree(tmp) except OSError: pass atexit.register(rm_temp) self.conf_tmp_path = tmp if self.conf_cache_factors is None: self.conf_cache_factors = [ os.path.getmtime(__file__), self.conf_nocache ] class _Distutils: """A helper class that provides a collection of fundamental methods implemented in a top of Python and NumPy Distutils. The idea behind this class is to gather all methods that it may need to override in case of reuse 'CCompilerOpt' in environment different than of what NumPy has. Parameters ---------- ccompiler : `CCompiler` The generate instance that returned from `distutils.ccompiler.new_compiler()`. """ def __init__(self, ccompiler): self._ccompiler = ccompiler def dist_compile(self, sources, flags, ccompiler=None, **kwargs): """Wrap CCompiler.compile()""" assert(isinstance(sources, list)) assert(isinstance(flags, list)) flags = kwargs.pop("extra_postargs", []) + flags if not ccompiler: ccompiler = self._ccompiler return ccompiler.compile(sources, extra_postargs=flags, **kwargs) def dist_test(self, source, flags, macros=[]): """Return True if 'CCompiler.compile()' able to compile a source file with certain flags. """ assert(isinstance(source, str)) from distutils.errors import CompileError cc = self._ccompiler; bk_spawn = getattr(cc, 'spawn', None) if bk_spawn: cc_type = getattr(self._ccompiler, "compiler_type", "") if cc_type in ("msvc",): setattr(cc, 'spawn', self._dist_test_spawn_paths) else: setattr(cc, 'spawn', self._dist_test_spawn) test = False try: self.dist_compile( [source], flags, macros=macros, output_dir=self.conf_tmp_path ) test = True except CompileError as e: self.dist_log(str(e), stderr=True) if bk_spawn: setattr(cc, 'spawn', bk_spawn) return test def dist_info(self): """ Return a tuple containing info about (platform, compiler, extra_args), required by the abstract class '_CCompiler' for discovering the platform environment. This is also used as a cache factor in order to detect any changes happening from outside. """ if hasattr(self, "_dist_info"): return self._dist_info cc_type = getattr(self._ccompiler, "compiler_type", '') if cc_type in ("intelem", "intelemw"): platform = "x86_64" elif cc_type in ("intel", "intelw", "intele"): platform = "x86" else: from distutils.util import get_platform platform = get_platform() cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", '')) if not cc_type or cc_type == "unix": if hasattr(cc_info, "__iter__"): compiler = cc_info[0] else: compiler = str(cc_info) else: compiler = cc_type if hasattr(cc_info, "__iter__") and len(cc_info) > 1: extra_args = ' '.join(cc_info[1:]) else: extra_args = os.environ.get("CFLAGS", "") extra_args += os.environ.get("CPPFLAGS", "") self._dist_info = (platform, compiler, extra_args) return self._dist_info @staticmethod def dist_error(*args): """Raise a compiler error""" from distutils.errors import CompileError raise CompileError(_Distutils._dist_str(*args)) @staticmethod def dist_fatal(*args): """Raise a distutils error""" from distutils.errors import DistutilsError raise DistutilsError(_Distutils._dist_str(*args)) @staticmethod def dist_log(*args, stderr=False): """Print a console message""" from numpy.distutils import log out = _Distutils._dist_str(*args) if stderr: log.warn(out) else: log.info(out) @staticmethod def dist_load_module(name, path): """Load a module from file, required by the abstract class '_Cache'.""" from .misc_util import exec_mod_from_location try: return exec_mod_from_location(name, path) except Exception as e: _Distutils.dist_log(e, stderr=True) return None @staticmethod def _dist_str(*args): """Return a string to print by log and errors.""" def to_str(arg): if not isinstance(arg, str) and hasattr(arg, '__iter__'): ret = [] for a in arg: ret.append(to_str(a)) return '('+ ' '.join(ret) + ')' return str(arg) stack = inspect.stack()[2] start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno) out = ' '.join([ to_str(a) for a in (*args,) ]) return start + out def _dist_test_spawn_paths(self, cmd, display=None): """ Fix msvc SDK ENV path same as distutils do without it we get c1: fatal error C1356: unable to find mspdbcore.dll """ if not hasattr(self._ccompiler, "_paths"): self._dist_test_spawn(cmd) return old_path = os.getenv("path") try: os.environ["path"] = self._ccompiler._paths self._dist_test_spawn(cmd) finally: os.environ["path"] = old_path _dist_warn_regex = re.compile( # intel and msvc compilers don't raise # fatal errors when flags are wrong or unsupported ".*(" "warning D9002|" # msvc, it should be work with any language. "invalid argument for option" # intel ").*" ) @staticmethod def _dist_test_spawn(cmd, display=None): try: o = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True) if o and re.match(_Distutils._dist_warn_regex, o): _Distutils.dist_error( "Flags in command", cmd ,"aren't supported by the compiler" ", output -> \n%s" % o ) except subprocess.CalledProcessError as exc: o = exc.output s = exc.returncode except OSError as e: o = e s = 127 else: return None _Distutils.dist_error( "Command", cmd, "failed with exit status %d output -> \n%s" % ( s, o )) _share_cache = {} class _Cache: """An abstract class handles caching functionality, provides two levels of caching, in-memory by share instances attributes among each other and by store attributes into files. **Note**: any attributes that start with ``_`` or ``conf_`` will be ignored. Parameters ---------- cache_path : str or None The path of cache file, if None then cache in file will disabled. *factors : The caching factors that need to utilize next to `conf_cache_factors`. Attributes ---------- cache_private : set Hold the attributes that need be skipped from "in-memory cache". cache_infile : bool Utilized during initializing this class, to determine if the cache was able to loaded from the specified cache path in 'cache_path'. """ # skip attributes from cache _cache_ignore = re.compile("^(_|conf_)") def __init__(self, cache_path=None, *factors): self.cache_me = {} self.cache_private = set() self.cache_infile = False self._cache_path = None if self.conf_nocache: self.dist_log("cache is disabled by `Config`") return self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors) self._cache_path = cache_path if cache_path: if os.path.exists(cache_path): self.dist_log("load cache from file ->", cache_path) cache_mod = self.dist_load_module("cache", cache_path) if not cache_mod: self.dist_log( "unable to load the cache file as a module", stderr=True ) elif not hasattr(cache_mod, "hash") or \ not hasattr(cache_mod, "data"): self.dist_log("invalid cache file", stderr=True) elif self._cache_hash == cache_mod.hash: self.dist_log("hit the file cache") for attr, val in cache_mod.data.items(): setattr(self, attr, val) self.cache_infile = True else: self.dist_log("miss the file cache") if not self.cache_infile: other_cache = _share_cache.get(self._cache_hash) if other_cache: self.dist_log("hit the memory cache") for attr, val in other_cache.__dict__.items(): if attr in other_cache.cache_private or \ re.match(self._cache_ignore, attr): continue setattr(self, attr, val) _share_cache[self._cache_hash] = self atexit.register(self.cache_flush) def __del__(self): for h, o in _share_cache.items(): if o == self: _share_cache.pop(h) break def cache_flush(self): """ Force update the cache. """ if not self._cache_path: return # TODO: don't write if the cache doesn't change self.dist_log("write cache to path ->", self._cache_path) cdict = self.__dict__.copy() for attr in self.__dict__.keys(): if re.match(self._cache_ignore, attr): cdict.pop(attr) d = os.path.dirname(self._cache_path) if not os.path.exists(d): os.makedirs(d) repr_dict = pprint.pformat(cdict, compact=True) with open(self._cache_path, "w") as f: f.write(textwrap.dedent("""\ # AUTOGENERATED DON'T EDIT # Please make changes to the code generator \ (distutils/ccompiler_opt.py) hash = {} data = \\ """).format(self._cache_hash)) f.write(repr_dict) def cache_hash(self, *factors): # is there a built-in non-crypto hash? # sdbm chash = 0 for f in factors: for char in str(f): chash = ord(char) + (chash << 6) + (chash << 16) - chash chash &= 0xFFFFFFFF return chash @staticmethod def me(cb): """ A static method that can be treated as a decorator to dynamically cache certain methods. """ def cache_wrap_me(self, *args, **kwargs): # good for normal args cache_key = str(( cb.__name__, *args, *kwargs.keys(), *kwargs.values() )) if cache_key in self.cache_me: return self.cache_me[cache_key] ccb = cb(self, *args, **kwargs) self.cache_me[cache_key] = ccb return ccb return cache_wrap_me class _CCompiler: """A helper class for `CCompilerOpt` containing all utilities that related to the fundamental compiler's functions. Attributes ---------- cc_on_x86 : bool True when the target architecture is 32-bit x86 cc_on_x64 : bool True when the target architecture is 64-bit x86 cc_on_ppc64 : bool True when the target architecture is 64-bit big-endian powerpc cc_on_ppc64le : bool True when the target architecture is 64-bit litle-endian powerpc cc_on_s390x : bool True when the target architecture is IBM/ZARCH on linux cc_on_armhf : bool True when the target architecture is 32-bit ARMv7+ cc_on_aarch64 : bool True when the target architecture is 64-bit Armv8-a+ cc_on_noarch : bool True when the target architecture is unknown or not supported cc_is_gcc : bool True if the compiler is GNU or if the compiler is unknown cc_is_clang : bool True if the compiler is Clang cc_is_icc : bool True if the compiler is Intel compiler (unix like) cc_is_iccw : bool True if the compiler is Intel compiler (msvc like) cc_is_nocc : bool True if the compiler isn't supported directly, Note: that cause a fail-back to gcc cc_has_debug : bool True if the compiler has debug flags cc_has_native : bool True if the compiler has native flags cc_noopt : bool True if the compiler has definition 'DISABLE_OPT*', or 'cc_on_noarch' is True cc_march : str The target architecture name, or "unknown" if the architecture isn't supported cc_name : str The compiler name, or "unknown" if the compiler isn't supported cc_flags : dict Dictionary containing the initialized flags of `_Config.conf_cc_flags` """ def __init__(self): if hasattr(self, "cc_is_cached"): return # attr regex compiler-expression detect_arch = ( ("cc_on_x64", ".*(x|x86_|amd)64.*", ""), ("cc_on_x86", ".*(win32|x86|i386|i686).*", ""), ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*", ""), ("cc_on_ppc64", ".*(powerpc|ppc)64.*", ""), ("cc_on_aarch64", ".*(aarch64|arm64).*", ""), ("cc_on_armhf", ".*arm.*", "defined(__ARM_ARCH_7__) || " "defined(__ARM_ARCH_7A__)"), ("cc_on_s390x", ".*s390x.*", ""), # undefined platform ("cc_on_noarch", "", ""), ) detect_compiler = ( ("cc_is_gcc", r".*(gcc|gnu\-g).*", ""), ("cc_is_clang", ".*clang.*", ""), # intel msvc like ("cc_is_iccw", ".*(intelw|intelemw|iccw).*", ""), ("cc_is_icc", ".*(intel|icc).*", ""), # intel unix like ("cc_is_msvc", ".*msvc.*", ""), # undefined compiler will be treat it as gcc ("cc_is_nocc", "", ""), ) detect_args = ( ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""), ("cc_has_native", ".*(-march=native|-xHost|/QxHost).*", ""), # in case if the class run with -DNPY_DISABLE_OPTIMIZATION ("cc_noopt", ".*DISABLE_OPT.*", ""), ) dist_info = self.dist_info() platform, compiler_info, extra_args = dist_info # set False to all attrs for section in (detect_arch, detect_compiler, detect_args): for attr, rgex, cexpr in section: setattr(self, attr, False) for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)): for attr, rgex, cexpr in detect: if rgex and not re.match(rgex, searchin, re.IGNORECASE): continue if cexpr and not self.cc_test_cexpr(cexpr): continue setattr(self, attr, True) break for attr, rgex, cexpr in detect_args: if rgex and not re.match(rgex, extra_args, re.IGNORECASE): continue if cexpr and not self.cc_test_cexpr(cexpr): continue setattr(self, attr, True) if self.cc_on_noarch: self.dist_log( "unable to detect CPU architecture which lead to disable the optimization. " f"check dist_info:<<\n{dist_info}\n>>", stderr=True ) self.cc_noopt = True if self.conf_noopt: self.dist_log("Optimization is disabled by the Config", stderr=True) self.cc_noopt = True if self.cc_is_nocc: """ mingw can be treated as a gcc, and also xlc even if it based on clang, but still has the same gcc optimization flags. """ self.dist_log( "unable to detect compiler type which leads to treating it as GCC. " "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC." f"check dist_info:<<\n{dist_info}\n>>", stderr=True ) self.cc_is_gcc = True self.cc_march = "unknown" for arch in ("x86", "x64", "ppc64", "ppc64le", "armhf", "aarch64", "s390x"): if getattr(self, "cc_on_" + arch): self.cc_march = arch break self.cc_name = "unknown" for name in ("gcc", "clang", "iccw", "icc", "msvc"): if getattr(self, "cc_is_" + name): self.cc_name = name break self.cc_flags = {} compiler_flags = self.conf_cc_flags.get(self.cc_name) if compiler_flags is None: self.dist_fatal( "undefined flag for compiler '%s', " "leave an empty dict instead" % self.cc_name ) for name, flags in compiler_flags.items(): self.cc_flags[name] = nflags = [] if flags: assert(isinstance(flags, str)) flags = flags.split() for f in flags: if self.cc_test_flags([f]): nflags.append(f) self.cc_is_cached = True @_Cache.me def cc_test_flags(self, flags): """ Returns True if the compiler supports 'flags'. """ assert(isinstance(flags, list)) self.dist_log("testing flags", flags) test_path = os.path.join(self.conf_check_path, "test_flags.c") test = self.dist_test(test_path, flags) if not test: self.dist_log("testing failed", stderr=True) return test @_Cache.me def cc_test_cexpr(self, cexpr, flags=[]): """ Same as the above but supports compile-time expressions. """ self.dist_log("testing compiler expression", cexpr) test_path = os.path.join(self.conf_tmp_path, "npy_dist_test_cexpr.c") with open(test_path, "w") as fd: fd.write(textwrap.dedent(f"""\ #if !({cexpr}) #error "unsupported expression" #endif int dummy; """)) test = self.dist_test(test_path, flags) if not test: self.dist_log("testing failed", stderr=True) return test def cc_normalize_flags(self, flags): """ Remove the conflicts that caused due gathering implied features flags. Parameters ---------- 'flags' list, compiler flags flags should be sorted from the lowest to the highest interest. Returns ------- list, filtered from any conflicts. Examples -------- >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod']) ['armv8.2-a+fp16+dotprod'] >>> self.cc_normalize_flags( ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2'] ) ['-march=core-avx2'] """ assert(isinstance(flags, list)) if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc: return self._cc_normalize_unix(flags) if self.cc_is_msvc or self.cc_is_iccw: return self._cc_normalize_win(flags) return flags _cc_normalize_unix_mrgx = re.compile( # 1- to check the highest of r"^(-mcpu=|-march=|-x[A-Z0-9\-])" ) _cc_normalize_unix_frgx = re.compile( # 2- to remove any flags starts with # -march, -mcpu, -x(INTEL) and '-m' without '=' r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|" # exclude: r"(?:-mzvector)" ) _cc_normalize_unix_krgx = re.compile( # 3- keep only the highest of r"^(-mfpu|-mtune)" ) _cc_normalize_arch_ver = re.compile( r"[0-9.]" ) def _cc_normalize_unix(self, flags): def ver_flags(f): # arch ver subflag # -march=armv8.2-a+fp16fml tokens = f.split('+') ver = float('0' + ''.join( re.findall(self._cc_normalize_arch_ver, tokens[0]) )) return ver, tokens[0], tokens[1:] if len(flags) <= 1: return flags # get the highest matched flag for i, cur_flag in enumerate(reversed(flags)): if not re.match(self._cc_normalize_unix_mrgx, cur_flag): continue lower_flags = flags[:-(i+1)] upper_flags = flags[-i:] filterd = list(filter( self._cc_normalize_unix_frgx.search, lower_flags )) # gather subflags ver, arch, subflags = ver_flags(cur_flag) if ver > 0 and len(subflags) > 0: for xflag in lower_flags: xver, _, xsubflags = ver_flags(xflag) if ver == xver: subflags = xsubflags + subflags cur_flag = arch + '+' + '+'.join(subflags) flags = filterd + [cur_flag] if i > 0: flags += upper_flags break # to remove overridable flags final_flags = [] matched = set() for f in reversed(flags): match = re.match(self._cc_normalize_unix_krgx, f) if not match: pass elif match[0] in matched: continue else: matched.add(match[0]) final_flags.insert(0, f) return final_flags _cc_normalize_win_frgx = re.compile( r"^(?!(/arch\:|/Qx\:))" ) _cc_normalize_win_mrgx = re.compile( r"^(/arch|/Qx:)" ) def _cc_normalize_win(self, flags): for i, f in enumerate(reversed(flags)): if not re.match(self._cc_normalize_win_mrgx, f): continue i += 1 return list(filter( self._cc_normalize_win_frgx.search, flags[:-i] )) + flags[-i:] return flags class _Feature: """A helper class for `CCompilerOpt` that managing CPU features. Attributes ---------- feature_supported : dict Dictionary containing all CPU features that supported by the platform, according to the specified values in attribute `_Config.conf_features` and `_Config.conf_features_partial()` feature_min : set The minimum support of CPU features, according to the specified values in attribute `_Config.conf_min_features`. """ def __init__(self): if hasattr(self, "feature_is_cached"): return self.feature_supported = pfeatures = self.conf_features_partial() for feature_name in list(pfeatures.keys()): feature = pfeatures[feature_name] cfeature = self.conf_features[feature_name] feature.update({ k:v for k,v in cfeature.items() if k not in feature }) disabled = feature.get("disable") if disabled is not None: pfeatures.pop(feature_name) self.dist_log( "feature '%s' is disabled," % feature_name, disabled, stderr=True ) continue # list is used internally for these options for option in ( "implies", "group", "detect", "headers", "flags", "extra_checks" ) : oval = feature.get(option) if isinstance(oval, str): feature[option] = oval.split() self.feature_min = set() min_f = self.conf_min_features.get(self.cc_march, "") for F in min_f.upper().split(): if F in self.feature_supported: self.feature_min.add(F) self.feature_is_cached = True def feature_names(self, names=None, force_flags=None, macros=[]): """ Returns a set of CPU feature names that supported by platform and the **C** compiler. Parameters ---------- names : sequence or None, optional Specify certain CPU features to test it against the **C** compiler. if None(default), it will test all current supported features. **Note**: feature names must be in upper-case. force_flags : list or None, optional If None(default), default compiler flags for every CPU feature will be used during the test. macros : list of tuples, optional A list of C macro definitions. """ assert( names is None or ( not isinstance(names, str) and hasattr(names, "__iter__") ) ) assert(force_flags is None or isinstance(force_flags, list)) if names is None: names = self.feature_supported.keys() supported_names = set() for f in names: if self.feature_is_supported( f, force_flags=force_flags, macros=macros ): supported_names.add(f) return supported_names def feature_is_exist(self, name): """ Returns True if a certain feature is exist and covered within `_Config.conf_features`. Parameters ---------- 'name': str feature name in uppercase. """ assert(name.isupper()) return name in self.conf_features def feature_sorted(self, names, reverse=False): """ Sort a list of CPU features ordered by the lowest interest. Parameters ---------- 'names': sequence sequence of supported feature names in uppercase. 'reverse': bool, optional If true, the sorted features is reversed. (highest interest) Returns ------- list, sorted CPU features """ def sort_cb(k): if isinstance(k, str): return self.feature_supported[k]["interest"] # multiple features rank = max([self.feature_supported[f]["interest"] for f in k]) # FIXME: that's not a safe way to increase the rank for # multi targets rank += len(k) -1 return rank return sorted(names, reverse=reverse, key=sort_cb) def feature_implies(self, names, keep_origins=False): """ Return a set of CPU features that implied by 'names' Parameters ---------- names : str or sequence of str CPU feature name(s) in uppercase. keep_origins : bool if False(default) then the returned set will not contain any features from 'names'. This case happens only when two features imply each other. Examples -------- >>> self.feature_implies("SSE3") {'SSE', 'SSE2'} >>> self.feature_implies("SSE2") {'SSE'} >>> self.feature_implies("SSE2", keep_origins=True) # 'SSE2' found here since 'SSE' and 'SSE2' imply each other {'SSE', 'SSE2'} """ def get_implies(name, _caller=set()): implies = set() d = self.feature_supported[name] for i in d.get("implies", []): implies.add(i) if i in _caller: # infinity recursive guard since # features can imply each other continue _caller.add(name) implies = implies.union(get_implies(i, _caller)) return implies if isinstance(names, str): implies = get_implies(names) names = [names] else: assert(hasattr(names, "__iter__")) implies = set() for n in names: implies = implies.union(get_implies(n)) if not keep_origins: implies.difference_update(names) return implies def feature_implies_c(self, names): """same as feature_implies() but combining 'names'""" if isinstance(names, str): names = set((names,)) else: names = set(names) return names.union(self.feature_implies(names)) def feature_ahead(self, names): """ Return list of features in 'names' after remove any implied features and keep the origins. Parameters ---------- 'names': sequence sequence of CPU feature names in uppercase. Returns ------- list of CPU features sorted as-is 'names' Examples -------- >>> self.feature_ahead(["SSE2", "SSE3", "SSE41"]) ["SSE41"] # assume AVX2 and FMA3 implies each other and AVX2 # is the highest interest >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) ["AVX2"] # assume AVX2 and FMA3 don't implies each other >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) ["AVX2", "FMA3"] """ assert( not isinstance(names, str) and hasattr(names, '__iter__') ) implies = self.feature_implies(names, keep_origins=True) ahead = [n for n in names if n not in implies] if len(ahead) == 0: # return the highest interested feature # if all features imply each other ahead = self.feature_sorted(names, reverse=True)[:1] return ahead def feature_untied(self, names): """ same as 'feature_ahead()' but if both features implied each other and keep the highest interest. Parameters ---------- 'names': sequence sequence of CPU feature names in uppercase. Returns ------- list of CPU features sorted as-is 'names' Examples -------- >>> self.feature_untied(["SSE2", "SSE3", "SSE41"]) ["SSE2", "SSE3", "SSE41"] # assume AVX2 and FMA3 implies each other >>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"]) ["SSE2", "SSE3", "SSE41", "AVX2"] """ assert( not isinstance(names, str) and hasattr(names, '__iter__') ) final = [] for n in names: implies = self.feature_implies(n) tied = [ nn for nn in final if nn in implies and n in self.feature_implies(nn) ] if tied: tied = self.feature_sorted(tied + [n]) if n not in tied[1:]: continue final.remove(tied[:1][0]) final.append(n) return final def feature_get_til(self, names, keyisfalse): """ same as `feature_implies_c()` but stop collecting implied features when feature's option that provided through parameter 'keyisfalse' is False, also sorting the returned features. """ def til(tnames): # sort from highest to lowest interest then cut if "key" is False tnames = self.feature_implies_c(tnames) tnames = self.feature_sorted(tnames, reverse=True) for i, n in enumerate(tnames): if not self.feature_supported[n].get(keyisfalse, True): tnames = tnames[:i+1] break return tnames if isinstance(names, str) or len(names) <= 1: names = til(names) # normalize the sort names.reverse() return names names = self.feature_ahead(names) names = {t for n in names for t in til(n)} return self.feature_sorted(names) def feature_detect(self, names): """ Return a list of CPU features that required to be detected sorted from the lowest to highest interest. """ names = self.feature_get_til(names, "implies_detect") detect = [] for n in names: d = self.feature_supported[n] detect += d.get("detect", d.get("group", [n])) return detect @_Cache.me def feature_flags(self, names): """ Return a list of CPU features flags sorted from the lowest to highest interest. """ names = self.feature_sorted(self.feature_implies_c(names)) flags = [] for n in names: d = self.feature_supported[n] f = d.get("flags", []) if not f or not self.cc_test_flags(f): continue flags += f return self.cc_normalize_flags(flags) @_Cache.me def feature_test(self, name, force_flags=None, macros=[]): """ Test a certain CPU feature against the compiler through its own check file. Parameters ---------- name : str Supported CPU feature name. force_flags : list or None, optional If None(default), the returned flags from `feature_flags()` will be used. macros : list of tuples, optional A list of C macro definitions. """ if force_flags is None: force_flags = self.feature_flags(name) self.dist_log( "testing feature '%s' with flags (%s)" % ( name, ' '.join(force_flags) )) # Each CPU feature must have C source code contains at # least one intrinsic or instruction related to this feature. test_path = os.path.join( self.conf_check_path, "cpu_%s.c" % name.lower() ) if not os.path.exists(test_path): self.dist_fatal("feature test file is not exist", test_path) test = self.dist_test( test_path, force_flags + self.cc_flags["werror"], macros=macros ) if not test: self.dist_log("testing failed", stderr=True) return test @_Cache.me def feature_is_supported(self, name, force_flags=None, macros=[]): """ Check if a certain CPU feature is supported by the platform and compiler. Parameters ---------- name : str CPU feature name in uppercase. force_flags : list or None, optional If None(default), default compiler flags for every CPU feature will be used during test. macros : list of tuples, optional A list of C macro definitions. """ assert(name.isupper()) assert(force_flags is None or isinstance(force_flags, list)) supported = name in self.feature_supported if supported: for impl in self.feature_implies(name): if not self.feature_test(impl, force_flags, macros=macros): return False if not self.feature_test(name, force_flags, macros=macros): return False return supported @_Cache.me def feature_can_autovec(self, name): """ check if the feature can be auto-vectorized by the compiler """ assert(isinstance(name, str)) d = self.feature_supported[name] can = d.get("autovec", None) if can is None: valid_flags = [ self.cc_test_flags([f]) for f in d.get("flags", []) ] can = valid_flags and any(valid_flags) return can @_Cache.me def feature_extra_checks(self, name): """ Return a list of supported extra checks after testing them against the compiler. Parameters ---------- names : str CPU feature name in uppercase. """ assert isinstance(name, str) d = self.feature_supported[name] extra_checks = d.get("extra_checks", []) if not extra_checks: return [] self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks) flags = self.feature_flags(name) available = [] not_available = [] for chk in extra_checks: test_path = os.path.join( self.conf_check_path, "extra_%s.c" % chk.lower() ) if not os.path.exists(test_path): self.dist_fatal("extra check file does not exist", test_path) is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"]) if is_supported: available.append(chk) else: not_available.append(chk) if not_available: self.dist_log("testing failed for checks", not_available, stderr=True) return available def feature_c_preprocessor(self, feature_name, tabs=0): """ Generate C preprocessor definitions and include headers of a CPU feature. Parameters ---------- 'feature_name': str CPU feature name in uppercase. 'tabs': int if > 0, align the generated strings to the right depend on number of tabs. Returns ------- str, generated C preprocessor Examples -------- >>> self.feature_c_preprocessor("SSE3") /** SSE3 **/ #define NPY_HAVE_SSE3 1 #include <pmmintrin.h> """ assert(feature_name.isupper()) feature = self.feature_supported.get(feature_name) assert(feature is not None) prepr = [ "/** %s **/" % feature_name, "#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name) ] prepr += [ "#include <%s>" % h for h in feature.get("headers", []) ] extra_defs = feature.get("group", []) extra_defs += self.feature_extra_checks(feature_name) for edef in extra_defs: # Guard extra definitions in case of duplicate with # another feature prepr += [ "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef), "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef), "#endif", ] if tabs > 0: prepr = [('\t'*tabs) + l for l in prepr] return '\n'.join(prepr) class _Parse: """A helper class that parsing main arguments of `CCompilerOpt`, also parsing configuration statements in dispatch-able sources. Parameters ---------- cpu_baseline : str or None minimal set of required CPU features or special options. cpu_dispatch : str or None dispatched set of additional CPU features or special options. Special options can be: - **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features` - **MAX**: Enables all supported CPU features by the Compiler and platform. - **NATIVE**: Enables all CPU features that supported by the current machine. - **NONE**: Enables nothing - **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**. NOTE: operand + is only added for nominal reason. NOTES: - Case-insensitive among all CPU features and special options. - Comma or space can be used as a separator. - If the CPU feature is not supported by the user platform or compiler, it will be skipped rather than raising a fatal error. - Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features - 'cpu_baseline' force enables implied features. Attributes ---------- parse_baseline_names : list Final CPU baseline's feature names(sorted from low to high) parse_baseline_flags : list Compiler flags of baseline features parse_dispatch_names : list Final CPU dispatch-able feature names(sorted from low to high) parse_target_groups : dict Dictionary containing initialized target groups that configured through class attribute `conf_target_groups`. The key is represent the group name and value is a tuple contains three items : - bool, True if group has the 'baseline' option. - list, list of CPU features. - list, list of extra compiler flags. """ def __init__(self, cpu_baseline, cpu_dispatch): self._parse_policies = dict( # POLICY NAME, (HAVE, NOT HAVE, [DEB]) KEEP_BASELINE = ( None, self._parse_policy_not_keepbase, [] ), KEEP_SORT = ( self._parse_policy_keepsort, self._parse_policy_not_keepsort, [] ), MAXOPT = ( self._parse_policy_maxopt, None, [] ), WERROR = ( self._parse_policy_werror, None, [] ), AUTOVEC = ( self._parse_policy_autovec, None, ["MAXOPT"] ) ) if hasattr(self, "parse_is_cached"): return self.parse_baseline_names = [] self.parse_baseline_flags = [] self.parse_dispatch_names = [] self.parse_target_groups = {} if self.cc_noopt: # skip parsing baseline and dispatch args and keep parsing target groups cpu_baseline = cpu_dispatch = None self.dist_log("check requested baseline") if cpu_baseline is not None: cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline) baseline_names = self.feature_names(cpu_baseline) self.parse_baseline_flags = self.feature_flags(baseline_names) self.parse_baseline_names = self.feature_sorted( self.feature_implies_c(baseline_names) ) self.dist_log("check requested dispatch-able features") if cpu_dispatch is not None: cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch) cpu_dispatch = { f for f in cpu_dispatch_ if f not in self.parse_baseline_names } conflict_baseline = cpu_dispatch_.difference(cpu_dispatch) self.parse_dispatch_names = self.feature_sorted( self.feature_names(cpu_dispatch) ) if len(conflict_baseline) > 0: self.dist_log( "skip features", conflict_baseline, "since its part of baseline" ) self.dist_log("initialize targets groups") for group_name, tokens in self.conf_target_groups.items(): self.dist_log("parse target group", group_name) GROUP_NAME = group_name.upper() if not tokens or not tokens.strip(): # allow empty groups, useful in case if there's a need # to disable certain group since '_parse_target_tokens()' # requires at least one valid target self.parse_target_groups[GROUP_NAME] = ( False, [], [] ) continue has_baseline, features, extra_flags = \ self._parse_target_tokens(tokens) self.parse_target_groups[GROUP_NAME] = ( has_baseline, features, extra_flags ) self.parse_is_cached = True def parse_targets(self, source): """ Fetch and parse configuration statements that required for defining the targeted CPU features, statements should be declared in the top of source in between **C** comment and start with a special mark **@targets**. Configuration statements are sort of keywords representing CPU features names, group of statements and policies, combined together to determine the required optimization. Parameters ---------- source : str the path of **C** source file. Returns ------- - bool, True if group has the 'baseline' option - list, list of CPU features - list, list of extra compiler flags """ self.dist_log("looking for '@targets' inside -> ", source) # get lines between /*@targets and */ with open(source) as fd: tokens = "" max_to_reach = 1000 # good enough, isn't? start_with = "@targets" start_pos = -1 end_with = "*/" end_pos = -1 for current_line, line in enumerate(fd): if current_line == max_to_reach: self.dist_fatal("reached the max of lines") break if start_pos == -1: start_pos = line.find(start_with) if start_pos == -1: continue start_pos += len(start_with) tokens += line end_pos = line.find(end_with) if end_pos != -1: end_pos += len(tokens) - len(line) break if start_pos == -1: self.dist_fatal("expected to find '%s' within a C comment" % start_with) if end_pos == -1: self.dist_fatal("expected to end with '%s'" % end_with) tokens = tokens[start_pos:end_pos] return self._parse_target_tokens(tokens) _parse_regex_arg = re.compile(r'\s|,|([+-])') def _parse_arg_features(self, arg_name, req_features): if not isinstance(req_features, str): self.dist_fatal("expected a string in '%s'" % arg_name) final_features = set() # space and comma can be used as a separator tokens = list(filter(None, re.split(self._parse_regex_arg, req_features))) append = True # append is the default for tok in tokens: if tok[0] in ("#", "$"): self.dist_fatal( arg_name, "target groups and policies " "aren't allowed from arguments, " "only from dispatch-able sources" ) if tok == '+': append = True continue if tok == '-': append = False continue TOK = tok.upper() # we use upper-case internally features_to = set() if TOK == "NONE": pass elif TOK == "NATIVE": native = self.cc_flags["native"] if not native: self.dist_fatal(arg_name, "native option isn't supported by the compiler" ) features_to = self.feature_names( force_flags=native, macros=[("DETECT_FEATURES", 1)] ) elif TOK == "MAX": features_to = self.feature_supported.keys() elif TOK == "MIN": features_to = self.feature_min else: if TOK in self.feature_supported: features_to.add(TOK) else: if not self.feature_is_exist(TOK): self.dist_fatal(arg_name, ", '%s' isn't a known feature or option" % tok ) if append: final_features = final_features.union(features_to) else: final_features = final_features.difference(features_to) append = True # back to default return final_features _parse_regex_target = re.compile(r'\s|[*,/]|([()])') def _parse_target_tokens(self, tokens): assert(isinstance(tokens, str)) final_targets = [] # to keep it sorted as specified extra_flags = [] has_baseline = False skipped = set() policies = set() multi_target = None tokens = list(filter(None, re.split(self._parse_regex_target, tokens))) if not tokens: self.dist_fatal("expected one token at least") for tok in tokens: TOK = tok.upper() ch = tok[0] if ch in ('+', '-'): self.dist_fatal( "+/- are 'not' allowed from target's groups or @targets, " "only from cpu_baseline and cpu_dispatch parms" ) elif ch == '$': if multi_target is not None: self.dist_fatal( "policies aren't allowed inside multi-target '()'" ", only CPU features" ) policies.add(self._parse_token_policy(TOK)) elif ch == '#': if multi_target is not None: self.dist_fatal( "target groups aren't allowed inside multi-target '()'" ", only CPU features" ) has_baseline, final_targets, extra_flags = \ self._parse_token_group(TOK, has_baseline, final_targets, extra_flags) elif ch == '(': if multi_target is not None: self.dist_fatal("unclosed multi-target, missing ')'") multi_target = set() elif ch == ')': if multi_target is None: self.dist_fatal("multi-target opener '(' wasn't found") targets = self._parse_multi_target(multi_target) if targets is None: skipped.add(tuple(multi_target)) else: if len(targets) == 1: targets = targets[0] if targets and targets not in final_targets: final_targets.append(targets) multi_target = None # back to default else: if TOK == "BASELINE": if multi_target is not None: self.dist_fatal("baseline isn't allowed inside multi-target '()'") has_baseline = True continue if multi_target is not None: multi_target.add(TOK) continue if not self.feature_is_exist(TOK): self.dist_fatal("invalid target name '%s'" % TOK) is_enabled = ( TOK in self.parse_baseline_names or TOK in self.parse_dispatch_names ) if is_enabled: if TOK not in final_targets: final_targets.append(TOK) continue skipped.add(TOK) if multi_target is not None: self.dist_fatal("unclosed multi-target, missing ')'") if skipped: self.dist_log( "skip targets", skipped, "not part of baseline or dispatch-able features" ) final_targets = self.feature_untied(final_targets) # add polices dependencies for p in list(policies): _, _, deps = self._parse_policies[p] for d in deps: if d in policies: continue self.dist_log( "policy '%s' force enables '%s'" % ( p, d )) policies.add(d) # release policies filtrations for p, (have, nhave, _) in self._parse_policies.items(): func = None if p in policies: func = have self.dist_log("policy '%s' is ON" % p) else: func = nhave if not func: continue has_baseline, final_targets, extra_flags = func( has_baseline, final_targets, extra_flags ) return has_baseline, final_targets, extra_flags def _parse_token_policy(self, token): """validate policy token""" if len(token) <= 1 or token[-1:] == token[0]: self.dist_fatal("'$' must stuck in the begin of policy name") token = token[1:] if token not in self._parse_policies: self.dist_fatal( "'%s' is an invalid policy name, available policies are" % token, self._parse_policies.keys() ) return token def _parse_token_group(self, token, has_baseline, final_targets, extra_flags): """validate group token""" if len(token) <= 1 or token[-1:] == token[0]: self.dist_fatal("'#' must stuck in the begin of group name") token = token[1:] ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get( token, (False, None, []) ) if gtargets is None: self.dist_fatal( "'%s' is an invalid target group name, " % token + \ "available target groups are", self.parse_target_groups.keys() ) if ghas_baseline: has_baseline = True # always keep sorting as specified final_targets += [f for f in gtargets if f not in final_targets] extra_flags += [f for f in gextra_flags if f not in extra_flags] return has_baseline, final_targets, extra_flags def _parse_multi_target(self, targets): """validate multi targets that defined between parentheses()""" # remove any implied features and keep the origins if not targets: self.dist_fatal("empty multi-target '()'") if not all([ self.feature_is_exist(tar) for tar in targets ]) : self.dist_fatal("invalid target name in multi-target", targets) if not all([ ( tar in self.parse_baseline_names or tar in self.parse_dispatch_names ) for tar in targets ]) : return None targets = self.feature_ahead(targets) if not targets: return None # force sort multi targets, so it can be comparable targets = self.feature_sorted(targets) targets = tuple(targets) # hashable return targets def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags): """skip all baseline features""" skipped = [] for tar in final_targets[:]: is_base = False if isinstance(tar, str): is_base = tar in self.parse_baseline_names else: # multi targets is_base = all([ f in self.parse_baseline_names for f in tar ]) if is_base: skipped.append(tar) final_targets.remove(tar) if skipped: self.dist_log("skip baseline features", skipped) return has_baseline, final_targets, extra_flags def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags): """leave a notice that $keep_sort is on""" self.dist_log( "policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n" "are 'not' sorted depend on the highest interest but" "as specified in the dispatch-able source or the extra group" ) return has_baseline, final_targets, extra_flags def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags): """sorted depend on the highest interest""" final_targets = self.feature_sorted(final_targets, reverse=True) return has_baseline, final_targets, extra_flags def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags): """append the compiler optimization flags""" if self.cc_has_debug: self.dist_log("debug mode is detected, policy 'maxopt' is skipped.") elif self.cc_noopt: self.dist_log("optimization is disabled, policy 'maxopt' is skipped.") else: flags = self.cc_flags["opt"] if not flags: self.dist_log( "current compiler doesn't support optimization flags, " "policy 'maxopt' is skipped", stderr=True ) else: extra_flags += flags return has_baseline, final_targets, extra_flags def _parse_policy_werror(self, has_baseline, final_targets, extra_flags): """force warnings to treated as errors""" flags = self.cc_flags["werror"] if not flags: self.dist_log( "current compiler doesn't support werror flags, " "warnings will 'not' treated as errors", stderr=True ) else: self.dist_log("compiler warnings are treated as errors") extra_flags += flags return has_baseline, final_targets, extra_flags def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags): """skip features that has no auto-vectorized support by compiler""" skipped = [] for tar in final_targets[:]: if isinstance(tar, str): can = self.feature_can_autovec(tar) else: # multiple target can = all([ self.feature_can_autovec(t) for t in tar ]) if not can: final_targets.remove(tar) skipped.append(tar) if skipped: self.dist_log("skip non auto-vectorized features", skipped) return has_baseline, final_targets, extra_flags class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse): """ A helper class for `CCompiler` aims to provide extra build options to effectively control of compiler optimizations that are directly related to CPU features. """ def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None): _Config.__init__(self) _Distutils.__init__(self, ccompiler) _Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch) _CCompiler.__init__(self) _Feature.__init__(self) if not self.cc_noopt and self.cc_has_native: self.dist_log( "native flag is specified through environment variables. " "force cpu-baseline='native'" ) cpu_baseline = "native" _Parse.__init__(self, cpu_baseline, cpu_dispatch) # keep the requested features untouched, need it later for report # and trace purposes self._requested_baseline = cpu_baseline self._requested_dispatch = cpu_dispatch # key is the dispatch-able source and value is a tuple # contains two items (has_baseline[boolean], dispatched-features[list]) self.sources_status = getattr(self, "sources_status", {}) # every instance should has a separate one self.cache_private.add("sources_status") # set it at the end to make sure the cache writing was done after init # this class self.hit_cache = hasattr(self, "hit_cache") def is_cached(self): """ Returns True if the class loaded from the cache file """ return self.cache_infile and self.hit_cache def cpu_baseline_flags(self): """ Returns a list of final CPU baseline compiler flags """ return self.parse_baseline_flags def cpu_baseline_names(self): """ return a list of final CPU baseline feature names """ return self.parse_baseline_names def cpu_dispatch_names(self): """ return a list of final CPU dispatch feature names """ return self.parse_dispatch_names def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs): """ Compile one or more dispatch-able sources and generates object files, also generates abstract C config headers and macros that used later for the final runtime dispatching process. The mechanism behind it is to takes each source file that specified in 'sources' and branching it into several files depend on special configuration statements that must be declared in the top of each source which contains targeted CPU features, then it compiles every branched source with the proper compiler flags. Parameters ---------- sources : list Must be a list of dispatch-able sources file paths, and configuration statements must be declared inside each file. src_dir : str Path of parent directory for the generated headers and wrapped sources. If None(default) the files will generated in-place. ccompiler : CCompiler Distutils `CCompiler` instance to be used for compilation. If None (default), the provided instance during the initialization will be used instead. **kwargs : any Arguments to pass on to the `CCompiler.compile()` Returns ------- list : generated object files Raises ------ CompileError Raises by `CCompiler.compile()` on compiling failure. DistutilsError Some errors during checking the sanity of configuration statements. See Also -------- parse_targets : Parsing the configuration statements of dispatch-able sources. """ to_compile = {} baseline_flags = self.cpu_baseline_flags() include_dirs = kwargs.setdefault("include_dirs", []) for src in sources: output_dir = os.path.dirname(src) if src_dir: if not output_dir.startswith(src_dir): output_dir = os.path.join(src_dir, output_dir) if output_dir not in include_dirs: # To allow including the generated config header(*.dispatch.h) # by the dispatch-able sources include_dirs.append(output_dir) has_baseline, targets, extra_flags = self.parse_targets(src) nochange = self._generate_config(output_dir, src, targets, has_baseline) for tar in targets: tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange) flags = tuple(extra_flags + self.feature_flags(tar)) to_compile.setdefault(flags, []).append(tar_src) if has_baseline: flags = tuple(extra_flags + baseline_flags) to_compile.setdefault(flags, []).append(src) self.sources_status[src] = (has_baseline, targets) # For these reasons, the sources are compiled in a separate loop: # - Gathering all sources with the same flags to benefit from # the parallel compiling as much as possible. # - To generate all config headers of the dispatchable sources, # before the compilation in case if there are dependency relationships # among them. objects = [] for flags, srcs in to_compile.items(): objects += self.dist_compile( srcs, list(flags), ccompiler=ccompiler, **kwargs ) return objects def generate_dispatch_header(self, header_path): """ Generate the dispatch header which contains the #definitions and headers for platform-specific instruction-sets for the enabled CPU baseline and dispatch-able features. Its highly recommended to take a look at the generated header also the generated source files via `try_dispatch()` in order to get the full picture. """ self.dist_log("generate CPU dispatch header: (%s)" % header_path) baseline_names = self.cpu_baseline_names() dispatch_names = self.cpu_dispatch_names() baseline_len = len(baseline_names) dispatch_len = len(dispatch_names) header_dir = os.path.dirname(header_path) if not os.path.exists(header_dir): self.dist_log( f"dispatch header dir {header_dir} does not exist, creating it", stderr=True ) os.makedirs(header_dir) with open(header_path, 'w') as f: baseline_calls = ' \\\n'.join([ ( "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" ) % (self.conf_c_prefix, f) for f in baseline_names ]) dispatch_calls = ' \\\n'.join([ ( "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" ) % (self.conf_c_prefix, f) for f in dispatch_names ]) f.write(textwrap.dedent("""\ /* * AUTOGENERATED DON'T EDIT * Please make changes to the code generator (distutils/ccompiler_opt.py) */ #define {pfx}WITH_CPU_BASELINE "{baseline_str}" #define {pfx}WITH_CPU_DISPATCH "{dispatch_str}" #define {pfx}WITH_CPU_BASELINE_N {baseline_len} #define {pfx}WITH_CPU_DISPATCH_N {dispatch_len} #define {pfx}WITH_CPU_EXPAND_(X) X #define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\ {baseline_calls} #define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\ {dispatch_calls} """).format( pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names), dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len, dispatch_len=dispatch_len, baseline_calls=baseline_calls, dispatch_calls=dispatch_calls )) baseline_pre = '' for name in baseline_names: baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n' dispatch_pre = '' for name in dispatch_names: dispatch_pre += textwrap.dedent("""\ #ifdef {pfx}CPU_TARGET_{name} {pre} #endif /*{pfx}CPU_TARGET_{name}*/ """).format( pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor( name, tabs=1 )) f.write(textwrap.dedent("""\ /******* baseline features *******/ {baseline_pre} /******* dispatch features *******/ {dispatch_pre} """).format( pfx=self.conf_c_prefix_, baseline_pre=baseline_pre, dispatch_pre=dispatch_pre )) def report(self, full=False): report = [] platform_rows = [] baseline_rows = [] dispatch_rows = [] report.append(("Platform", platform_rows)) report.append(("", "")) report.append(("CPU baseline", baseline_rows)) report.append(("", "")) report.append(("CPU dispatch", dispatch_rows)) ########## platform ########## platform_rows.append(("Architecture", ( "unsupported" if self.cc_on_noarch else self.cc_march) )) platform_rows.append(("Compiler", ( "unix-like" if self.cc_is_nocc else self.cc_name) )) ########## baseline ########## if self.cc_noopt: baseline_rows.append(("Requested", "optimization disabled")) else: baseline_rows.append(("Requested", repr(self._requested_baseline))) baseline_names = self.cpu_baseline_names() baseline_rows.append(( "Enabled", (' '.join(baseline_names) if baseline_names else "none") )) baseline_flags = self.cpu_baseline_flags() baseline_rows.append(( "Flags", (' '.join(baseline_flags) if baseline_flags else "none") )) extra_checks = [] for name in baseline_names: extra_checks += self.feature_extra_checks(name) baseline_rows.append(( "Extra checks", (' '.join(extra_checks) if extra_checks else "none") )) ########## dispatch ########## if self.cc_noopt: baseline_rows.append(("Requested", "optimization disabled")) else: dispatch_rows.append(("Requested", repr(self._requested_dispatch))) dispatch_names = self.cpu_dispatch_names() dispatch_rows.append(( "Enabled", (' '.join(dispatch_names) if dispatch_names else "none") )) ########## Generated ########## # TODO: # - collect object names from 'try_dispatch()' # then get size of each object and printed # - give more details about the features that not # generated due compiler support # - find a better output's design. # target_sources = {} for source, (_, targets) in self.sources_status.items(): for tar in targets: target_sources.setdefault(tar, []).append(source) if not full or not target_sources: generated = "" for tar in self.feature_sorted(target_sources): sources = target_sources[tar] name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) generated += name + "[%d] " % len(sources) dispatch_rows.append(("Generated", generated[:-1] if generated else "none")) else: dispatch_rows.append(("Generated", '')) for tar in self.feature_sorted(target_sources): sources = target_sources[tar] pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) flags = ' '.join(self.feature_flags(tar)) implies = ' '.join(self.feature_sorted(self.feature_implies(tar))) detect = ' '.join(self.feature_detect(tar)) extra_checks = [] for name in ((tar,) if isinstance(tar, str) else tar): extra_checks += self.feature_extra_checks(name) extra_checks = (' '.join(extra_checks) if extra_checks else "none") dispatch_rows.append(('', '')) dispatch_rows.append((pretty_name, implies)) dispatch_rows.append(("Flags", flags)) dispatch_rows.append(("Extra checks", extra_checks)) dispatch_rows.append(("Detect", detect)) for src in sources: dispatch_rows.append(("", src)) ############################### # TODO: add support for 'markdown' format text = [] secs_len = [len(secs) for secs, _ in report] cols_len = [len(col) for _, rows in report for col, _ in rows] tab = ' ' * 2 pad = max(max(secs_len), max(cols_len)) for sec, rows in report: if not sec: text.append("") # empty line continue sec += ' ' * (pad - len(sec)) text.append(sec + tab + ': ') for col, val in rows: col += ' ' * (pad - len(col)) text.append(tab + col + ': ' + val) return '\n'.join(text) def _wrap_target(self, output_dir, dispatch_src, target, nochange=False): assert(isinstance(target, (str, tuple))) if isinstance(target, str): ext_name = target_name = target else: # multi-target ext_name = '.'.join(target) target_name = '__'.join(target) wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src)) wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower()) if nochange and os.path.exists(wrap_path): return wrap_path self.dist_log("wrap dispatch-able target -> ", wrap_path) # sorting for readability features = self.feature_sorted(self.feature_implies_c(target)) target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_ target_defs = [target_join + f for f in features] target_defs = '\n'.join(target_defs) with open(wrap_path, "w") as fd: fd.write(textwrap.dedent("""\ /** * AUTOGENERATED DON'T EDIT * Please make changes to the code generator \ (distutils/ccompiler_opt.py) */ #define {pfx}CPU_TARGET_MODE #define {pfx}CPU_TARGET_CURRENT {target_name} {target_defs} #include "{path}" """).format( pfx=self.conf_c_prefix_, target_name=target_name, path=os.path.abspath(dispatch_src), target_defs=target_defs )) return wrap_path def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False): config_path = os.path.basename(dispatch_src) config_path = os.path.splitext(config_path)[0] + '.h' config_path = os.path.join(output_dir, config_path) # check if targets didn't change to avoid recompiling cache_hash = self.cache_hash(targets, has_baseline) try: with open(config_path) as f: last_hash = f.readline().split("cache_hash:") if len(last_hash) == 2 and int(last_hash[1]) == cache_hash: return True except OSError: pass os.makedirs(os.path.dirname(config_path), exist_ok=True) self.dist_log("generate dispatched config -> ", config_path) dispatch_calls = [] for tar in targets: if isinstance(tar, str): target_name = tar else: # multi target target_name = '__'.join([t for t in tar]) req_detect = self.feature_detect(tar) req_detect = '&&'.join([ "CHK(%s)" % f for f in req_detect ]) dispatch_calls.append( "\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % ( self.conf_c_prefix_, req_detect, target_name )) dispatch_calls = ' \\\n'.join(dispatch_calls) if has_baseline: baseline_calls = ( "\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))" ) % self.conf_c_prefix_ else: baseline_calls = '' with open(config_path, "w") as fd: fd.write(textwrap.dedent("""\ // cache_hash:{cache_hash} /** * AUTOGENERATED DON'T EDIT * Please make changes to the code generator (distutils/ccompiler_opt.py) */ #ifndef {pfx}CPU_DISPATCH_EXPAND_ #define {pfx}CPU_DISPATCH_EXPAND_(X) X #endif #undef {pfx}CPU_DISPATCH_BASELINE_CALL #undef {pfx}CPU_DISPATCH_CALL #define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\ {baseline_calls} #define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\ {dispatch_calls} """).format( pfx=self.conf_c_prefix_, baseline_calls=baseline_calls, dispatch_calls=dispatch_calls, cache_hash=cache_hash )) return False def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs): """ Create a new instance of 'CCompilerOpt' and generate the dispatch header which contains the #definitions and headers of platform-specific instruction-sets for the enabled CPU baseline and dispatch-able features. Parameters ---------- compiler : CCompiler instance dispatch_hpath : str path of the dispatch header **kwargs: passed as-is to `CCompilerOpt(...)` Returns ------- new instance of CCompilerOpt """ opt = CCompilerOpt(compiler, **kwargs) if not os.path.exists(dispatch_hpath) or not opt.is_cached(): opt.generate_dispatch_header(dispatch_hpath) return opt
99,751
Python
36.557229
107
0.53427
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/ccompiler.py
import os import re import sys import shlex import time import subprocess from copy import copy from distutils import ccompiler from distutils.ccompiler import ( compiler_class, gen_lib_options, get_default_compiler, new_compiler, CCompiler ) from distutils.errors import ( DistutilsExecError, DistutilsModuleError, DistutilsPlatformError, CompileError, UnknownFileError ) from distutils.sysconfig import customize_compiler from distutils.version import LooseVersion from numpy.distutils import log from numpy.distutils.exec_command import ( filepath_from_subprocess_output, forward_bytes_to_stdout ) from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ get_num_build_jobs, \ _commandline_dep_string, \ sanitize_cxx_flags # globals for parallel build management import threading _job_semaphore = None _global_lock = threading.Lock() _processing_files = set() def _needs_build(obj, cc_args, extra_postargs, pp_opts): """ Check if an objects needs to be rebuild based on its dependencies Parameters ---------- obj : str object file Returns ------- bool """ # defined in unixcompiler.py dep_file = obj + '.d' if not os.path.exists(dep_file): return True # dep_file is a makefile containing 'object: dependencies' # formatted like posix shell (spaces escaped, \ line continuations) # the last line contains the compiler commandline arguments as some # projects may compile an extension multiple times with different # arguments with open(dep_file, "r") as f: lines = f.readlines() cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts) last_cmdline = lines[-1] if last_cmdline != cmdline: return True contents = ''.join(lines[:-1]) deps = [x for x in shlex.split(contents, posix=True) if x != "\n" and not x.endswith(":")] try: t_obj = os.stat(obj).st_mtime # check if any of the dependencies is newer than the object # the dependencies includes the source used to create the object for f in deps: if os.stat(f).st_mtime > t_obj: return True except OSError: # no object counts as newer (shouldn't happen if dep_file exists) return True return False def replace_method(klass, method_name, func): # Py3k does not have unbound method anymore, MethodType does not work m = lambda self, *args, **kw: func(self, *args, **kw) setattr(klass, method_name, m) ###################################################################### ## Method that subclasses may redefine. But don't call this method, ## it i private to CCompiler class and may return unexpected ## results if used elsewhere. So, you have been warned.. def CCompiler_find_executables(self): """ Does nothing here, but is called by the get_version method and can be overridden by subclasses. In particular it is redefined in the `FCompiler` class where more documentation can be found. """ pass replace_method(CCompiler, 'find_executables', CCompiler_find_executables) # Using customized CCompiler.spawn. def CCompiler_spawn(self, cmd, display=None, env=None): """ Execute a command in a sub-process. Parameters ---------- cmd : str The command to execute. display : str or sequence of str, optional The text to add to the log file kept by `numpy.distutils`. If not given, `display` is equal to `cmd`. env : a dictionary for environment variables, optional Returns ------- None Raises ------ DistutilsExecError If the command failed, i.e. the exit status was not 0. """ env = env if env is not None else dict(os.environ) if display is None: display = cmd if is_sequence(display): display = ' '.join(list(display)) log.info(display) try: if self.verbose: subprocess.check_output(cmd, env=env) else: subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) except subprocess.CalledProcessError as exc: o = exc.output s = exc.returncode except OSError as e: # OSError doesn't have the same hooks for the exception # output, but exec_command() historically would use an # empty string for EnvironmentError (base class for # OSError) # o = b'' # still that would make the end-user lost in translation! o = f"\n\n{e}\n\n\n" try: o = o.encode(sys.stdout.encoding) except AttributeError: o = o.encode('utf8') # status previously used by exec_command() for parent # of OSError s = 127 else: # use a convenience return here so that any kind of # caught exception will execute the default code after the # try / except block, which handles various exceptions return None if is_sequence(cmd): cmd = ' '.join(list(cmd)) if self.verbose: forward_bytes_to_stdout(o) if re.search(b'Too many open files', o): msg = '\nTry rerunning setup command until build succeeds.' else: msg = '' raise DistutilsExecError('Command "%s" failed with exit status %d%s' % (cmd, s, msg)) replace_method(CCompiler, 'spawn', CCompiler_spawn) def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): """ Return the name of the object files for the given source files. Parameters ---------- source_filenames : list of str The list of paths to source files. Paths can be either relative or absolute, this is handled transparently. strip_dir : bool, optional Whether to strip the directory from the returned paths. If True, the file name prepended by `output_dir` is returned. Default is False. output_dir : str, optional If given, this path is prepended to the returned paths to the object files. Returns ------- obj_names : list of str The list of paths to the object files corresponding to the source files in `source_filenames`. """ if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: base, ext = os.path.splitext(os.path.normpath(src_name)) base = os.path.splitdrive(base)[1] # Chop off the drive base = base[os.path.isabs(base):] # If abs, chop off leading / if base.startswith('..'): # Resolve starting relative path components, middle ones # (if any) have been handled by os.path.normpath above. i = base.rfind('..')+2 d = base[:i] d = os.path.basename(os.path.abspath(d)) base = d + base[i:] if ext not in self.src_extensions: raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) if strip_dir: base = os.path.basename(base) obj_name = os.path.join(output_dir, base + self.obj_extension) obj_names.append(obj_name) return obj_names replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) def CCompiler_compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): """ Compile one or more source files. Please refer to the Python distutils API reference for more details. Parameters ---------- sources : list of str A list of filenames output_dir : str, optional Path to the output directory. macros : list of tuples A list of macro definitions. include_dirs : list of str, optional The directories to add to the default include file search path for this compilation only. debug : bool, optional Whether or not to output debug symbols in or alongside the object file(s). extra_preargs, extra_postargs : ? Extra pre- and post-arguments. depends : list of str, optional A list of file names that all targets depend on. Returns ------- objects : list of str A list of object file names, one per source file `sources`. Raises ------ CompileError If compilation fails. """ global _job_semaphore jobs = get_num_build_jobs() # setup semaphore to not exceed number of compile jobs when parallelized at # extension level (python >= 3.5) with _global_lock: if _job_semaphore is None: _job_semaphore = threading.Semaphore(jobs) if not sources: return [] from numpy.distutils.fcompiler import (FCompiler, is_f_file, has_f90_header) if isinstance(self, FCompiler): display = [] for fc in ['f77', 'f90', 'fix']: fcomp = getattr(self, 'compiler_'+fc) if fcomp is None: continue display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) display = '\n'.join(display) else: ccomp = self.compiler_so display = "C compiler: %s\n" % (' '.join(ccomp),) log.info(display) macros, objects, extra_postargs, pp_opts, build = \ self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) display = "compile options: '%s'" % (' '.join(cc_args)) if extra_postargs: display += "\nextra options: '%s'" % (' '.join(extra_postargs)) log.info(display) def single_compile(args): obj, (src, ext) = args if not _needs_build(obj, cc_args, extra_postargs, pp_opts): return # check if we are currently already processing the same object # happens when using the same source in multiple extensions while True: # need explicit lock as there is no atomic check and add with GIL with _global_lock: # file not being worked on, start working if obj not in _processing_files: _processing_files.add(obj) break # wait for the processing to end time.sleep(0.1) try: # retrieve slot from our #job semaphore and build with _job_semaphore: self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) finally: # register being done processing with _global_lock: _processing_files.remove(obj) if isinstance(self, FCompiler): objects_to_build = list(build.keys()) f77_objects, other_objects = [], [] for obj in objects: if obj in objects_to_build: src, ext = build[obj] if self.compiler_type=='absoft': obj = cyg2win32(obj) src = cyg2win32(src) if is_f_file(src) and not has_f90_header(src): f77_objects.append((obj, (src, ext))) else: other_objects.append((obj, (src, ext))) # f77 objects can be built in parallel build_items = f77_objects # build f90 modules serial, module files are generated during # compilation and may be used by files later in the list so the # ordering is important for o in other_objects: single_compile(o) else: build_items = build.items() if len(build) > 1 and jobs > 1: # build parallel from concurrent.futures import ThreadPoolExecutor with ThreadPoolExecutor(jobs) as pool: res = pool.map(single_compile, build_items) list(res) # access result to raise errors else: # build serial for o in build_items: single_compile(o) # Return *all* object filenames, not just the ones we just built. return objects replace_method(CCompiler, 'compile', CCompiler_compile) def CCompiler_customize_cmd(self, cmd, ignore=()): """ Customize compiler using distutils command. Parameters ---------- cmd : class instance An instance inheriting from `distutils.cmd.Command`. ignore : sequence of str, optional List of `CCompiler` commands (without ``'set_'``) that should not be altered. Strings that are checked for are: ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', 'rpath', 'link_objects')``. Returns ------- None """ log.info('customize %s using %s' % (self.__class__.__name__, cmd.__class__.__name__)) if hasattr(self, 'compiler') and 'clang' in self.compiler[0]: # clang defaults to a non-strict floating error point model. # Since NumPy and most Python libs give warnings for these, override: self.compiler.append('-ftrapping-math') self.compiler_so.append('-ftrapping-math') def allow(attr): return getattr(cmd, attr, None) is not None and attr not in ignore if allow('include_dirs'): self.set_include_dirs(cmd.include_dirs) if allow('define'): for (name, value) in cmd.define: self.define_macro(name, value) if allow('undef'): for macro in cmd.undef: self.undefine_macro(macro) if allow('libraries'): self.set_libraries(self.libraries + cmd.libraries) if allow('library_dirs'): self.set_library_dirs(self.library_dirs + cmd.library_dirs) if allow('rpath'): self.set_runtime_library_dirs(cmd.rpath) if allow('link_objects'): self.set_link_objects(cmd.link_objects) replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) def _compiler_to_string(compiler): props = [] mx = 0 keys = list(compiler.executables.keys()) for key in ['version', 'libraries', 'library_dirs', 'object_switch', 'compile_switch', 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: if key not in keys: keys.append(key) for key in keys: if hasattr(compiler, key): v = getattr(compiler, key) mx = max(mx, len(key)) props.append((key, repr(v))) fmt = '%-' + repr(mx+1) + 's = %s' lines = [fmt % prop for prop in props] return '\n'.join(lines) def CCompiler_show_customization(self): """ Print the compiler customizations to stdout. Parameters ---------- None Returns ------- None Notes ----- Printing is only done if the distutils log threshold is < 2. """ try: self.get_version() except Exception: pass if log._global_log.threshold<2: print('*'*80) print(self.__class__) print(_compiler_to_string(self)) print('*'*80) replace_method(CCompiler, 'show_customization', CCompiler_show_customization) def CCompiler_customize(self, dist, need_cxx=0): """ Do any platform-specific customization of a compiler instance. This method calls `distutils.sysconfig.customize_compiler` for platform-specific customization, as well as optionally remove a flag to suppress spurious warnings in case C++ code is being compiled. Parameters ---------- dist : object This parameter is not used for anything. need_cxx : bool, optional Whether or not C++ has to be compiled. If so (True), the ``"-Wstrict-prototypes"`` option is removed to prevent spurious warnings. Default is False. Returns ------- None Notes ----- All the default options used by distutils can be extracted with:: from distutils import sysconfig sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', 'CCSHARED', 'LDSHARED', 'SO') """ # See FCompiler.customize for suggested usage. log.info('customize %s' % (self.__class__.__name__)) customize_compiler(self) if need_cxx: # In general, distutils uses -Wstrict-prototypes, but this option is # not valid for C++ code, only for C. Remove it if it's there to # avoid a spurious warning on every compilation. try: self.compiler_so.remove('-Wstrict-prototypes') except (AttributeError, ValueError): pass if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: if not self.compiler_cxx: if self.compiler[0].startswith('gcc'): a, b = 'gcc', 'g++' else: a, b = 'cc', 'c++' self.compiler_cxx = [self.compiler[0].replace(a, b)]\ + self.compiler[1:] else: if hasattr(self, 'compiler'): log.warn("#### %s #######" % (self.compiler,)) if not hasattr(self, 'compiler_cxx'): log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) # check if compiler supports gcc style automatic dependencies # run on every extension so skip for known good compilers if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or 'g++' in self.compiler[0] or 'clang' in self.compiler[0]): self._auto_depends = True elif os.name == 'posix': import tempfile import shutil tmpdir = tempfile.mkdtemp() try: fn = os.path.join(tmpdir, "file.c") with open(fn, "w") as f: f.write("int a;\n") self.compile([fn], output_dir=tmpdir, extra_preargs=['-MMD', '-MF', fn + '.d']) self._auto_depends = True except CompileError: self._auto_depends = False finally: shutil.rmtree(tmpdir) return replace_method(CCompiler, 'customize', CCompiler_customize) def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): """ Simple matching of version numbers, for use in CCompiler and FCompiler. Parameters ---------- pat : str, optional A regular expression matching version numbers. Default is ``r'[-.\\d]+'``. ignore : str, optional A regular expression matching patterns to skip. Default is ``''``, in which case nothing is skipped. start : str, optional A regular expression matching the start of where to start looking for version numbers. Default is ``''``, in which case searching is started at the beginning of the version string given to `matcher`. Returns ------- matcher : callable A function that is appropriate to use as the ``.version_match`` attribute of a `CCompiler` class. `matcher` takes a single parameter, a version string. """ def matcher(self, version_string): # version string may appear in the second line, so getting rid # of new lines: version_string = version_string.replace('\n', ' ') pos = 0 if start: m = re.match(start, version_string) if not m: return None pos = m.end() while True: m = re.search(pat, version_string[pos:]) if not m: return None if ignore and re.match(ignore, m.group(0)): pos = m.end() continue break return m.group(0) return matcher def CCompiler_get_version(self, force=False, ok_status=[0]): """ Return compiler version, or None if compiler is not available. Parameters ---------- force : bool, optional If True, force a new determination of the version, even if the compiler already has a version attribute. Default is False. ok_status : list of int, optional The list of status values returned by the version look-up process for which a version string is returned. If the status value is not in `ok_status`, None is returned. Default is ``[0]``. Returns ------- version : str or None Version string, in the format of `distutils.version.LooseVersion`. """ if not force and hasattr(self, 'version'): return self.version self.find_executables() try: version_cmd = self.version_cmd except AttributeError: return None if not version_cmd or not version_cmd[0]: return None try: matcher = self.version_match except AttributeError: try: pat = self.version_pattern except AttributeError: return None def matcher(version_string): m = re.match(pat, version_string) if not m: return None version = m.group('version') return version try: output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as exc: output = exc.output status = exc.returncode except OSError: # match the historical returns for a parent # exception class caught by exec_command() status = 127 output = b'' else: # output isn't actually a filepath but we do this # for now to match previous distutils behavior output = filepath_from_subprocess_output(output) status = 0 version = None if status in ok_status: version = matcher(output) if version: version = LooseVersion(version) self.version = version return version replace_method(CCompiler, 'get_version', CCompiler_get_version) def CCompiler_cxx_compiler(self): """ Return the C++ compiler. Parameters ---------- None Returns ------- cxx : class instance The C++ compiler, as a `CCompiler` instance. """ if self.compiler_type in ('msvc', 'intelw', 'intelemw'): return self cxx = copy(self) cxx.compiler_cxx = cxx.compiler_cxx cxx.compiler_so = [cxx.compiler_cxx[0]] + \ sanitize_cxx_flags(cxx.compiler_so[1:]) if (sys.platform.startswith(('aix', 'os400')) and 'ld_so_aix' in cxx.linker_so[0]): # AIX needs the ld_so_aix script included with Python cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ + cxx.linker_so[2:] if sys.platform.startswith('os400'): #This is required by i 7.4 and prievous for PRId64 in printf() call. cxx.compiler_so.append('-D__STDC_FORMAT_MACROS') #This a bug of gcc10.3, which failed to handle the TLS init. cxx.compiler_so.append('-fno-extern-tls-init') cxx.linker_so.append('-fno-extern-tls-init') else: cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] return cxx replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', "Intel C Compiler for 32-bit applications") compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', "Intel C Itanium Compiler for Itanium-based applications") compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', "Intel C Compiler for 64-bit applications") compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW', "Intel C Compiler for 32-bit applications on Windows") compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', "Intel C Compiler for 64-bit applications on Windows") compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', "PathScale Compiler for SiCortex-based applications") compiler_class['arm'] = ('armccompiler', 'ArmCCompiler', "Arm C Compiler") ccompiler._default_compilers += (('linux.*', 'intel'), ('linux.*', 'intele'), ('linux.*', 'intelem'), ('linux.*', 'pathcc'), ('nt', 'intelw'), ('nt', 'intelemw')) if sys.platform == 'win32': compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', "Mingw32 port of GNU C Compiler for Win32"\ "(for MSC built Python)") if mingw32(): # On windows platforms, we want to default to mingw32 (gcc) # because msvc can't build blitz stuff. log.info('Setting mingw32 as default compiler for nt.') ccompiler._default_compilers = (('nt', 'mingw32'),) \ + ccompiler._default_compilers _distutils_new_compiler = new_compiler def new_compiler (plat=None, compiler=None, verbose=None, dry_run=0, force=0): # Try first C compilers from numpy.distutils. if verbose is None: verbose = log.get_threshold() <= log.INFO if plat is None: plat = os.name try: if compiler is None: compiler = get_default_compiler(plat) (module_name, class_name, long_description) = compiler_class[compiler] except KeyError: msg = "don't know how to compile C/C++ code on platform '%s'" % plat if compiler is not None: msg = msg + " with '%s' compiler" % compiler raise DistutilsPlatformError(msg) module_name = "numpy.distutils." + module_name try: __import__ (module_name) except ImportError as e: msg = str(e) log.info('%s in numpy.distutils; trying from distutils', str(msg)) module_name = module_name[6:] try: __import__(module_name) except ImportError as e: msg = str(e) raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \ module_name) try: module = sys.modules[module_name] klass = vars(module)[class_name] except KeyError: raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " + "in module '%s'") % (class_name, module_name)) compiler = klass(None, dry_run, force) compiler.verbose = verbose log.debug('new_compiler returns %s' % (klass)) return compiler ccompiler.new_compiler = new_compiler _distutils_gen_lib_options = gen_lib_options def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): # the version of this function provided by CPython allows the following # to return lists, which are unpacked automatically: # - compiler.runtime_library_dir_option # our version extends the behavior to: # - compiler.library_dir_option # - compiler.library_option # - compiler.find_library_file r = _distutils_gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries) lib_opts = [] for i in r: if is_sequence(i): lib_opts.extend(list(i)) else: lib_opts.append(i) return lib_opts ccompiler.gen_lib_options = gen_lib_options # Also fix up the various compiler modules, which do # from distutils.ccompiler import gen_lib_options # Don't bother with mwerks, as we don't support Classic Mac. for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: _m = sys.modules.get('distutils.' + _cc + 'compiler') if _m is not None: setattr(_m, 'gen_lib_options', gen_lib_options)
28,126
Python
33.511656
97
0.588246
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/lib2def.py
import re import sys import subprocess __doc__ = """This module generates a DEF file from the symbols in an MSVC-compiled DLL import library. It correctly discriminates between data and functions. The data is collected from the output of the program nm(1). Usage: python lib2def.py [libname.lib] [output.def] or python lib2def.py [libname.lib] > output.def libname.lib defaults to python<py_ver>.lib and output.def defaults to stdout Author: Robert Kern <[email protected]> Last Update: April 30, 1999 """ __version__ = '0.1a' py_ver = "%d%d" % tuple(sys.version_info[:2]) DEFAULT_NM = ['nm', '-Cs'] DEF_HEADER = """LIBRARY python%s.dll ;CODE PRELOAD MOVEABLE DISCARDABLE ;DATA PRELOAD SINGLE EXPORTS """ % py_ver # the header of the DEF file FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) def parse_cmd(): """Parses the command-line arguments. libfile, deffile = parse_cmd()""" if len(sys.argv) == 3: if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': libfile, deffile = sys.argv[1:] elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': deffile, libfile = sys.argv[1:] else: print("I'm assuming that your first argument is the library") print("and the second is the DEF file.") elif len(sys.argv) == 2: if sys.argv[1][-4:] == '.def': deffile = sys.argv[1] libfile = 'python%s.lib' % py_ver elif sys.argv[1][-4:] == '.lib': deffile = None libfile = sys.argv[1] else: libfile = 'python%s.lib' % py_ver deffile = None return libfile, deffile def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True): """Returns the output of nm_cmd via a pipe. nm_output = getnm(nm_cmd = 'nm -Cs py_lib')""" p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) nm_output, nm_err = p.communicate() if p.returncode != 0: raise RuntimeError('failed to run "%s": "%s"' % ( ' '.join(nm_cmd), nm_err)) return nm_output def parse_nm(nm_output): """Returns a tuple of lists: dlist for the list of data symbols and flist for the list of function symbols. dlist, flist = parse_nm(nm_output)""" data = DATA_RE.findall(nm_output) func = FUNC_RE.findall(nm_output) flist = [] for sym in data: if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): flist.append(sym) dlist = [] for sym in data: if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): dlist.append(sym) dlist.sort() flist.sort() return dlist, flist def output_def(dlist, flist, header, file = sys.stdout): """Outputs the final DEF file to a file defaulting to stdout. output_def(dlist, flist, header, file = sys.stdout)""" for data_sym in dlist: header = header + '\t%s DATA\n' % data_sym header = header + '\n' # blank line for func_sym in flist: header = header + '\t%s\n' % func_sym file.write(header) if __name__ == '__main__': libfile, deffile = parse_cmd() if deffile is None: deffile = sys.stdout else: deffile = open(deffile, 'w') nm_cmd = DEFAULT_NM + [str(libfile)] nm_output = getnm(nm_cmd, shell=False) dlist, flist = parse_nm(nm_output) output_def(dlist, flist, DEF_HEADER, deffile)
3,644
Python
30.153846
86
0.587267
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/armccompiler.py
from __future__ import division, absolute_import, print_function from distutils.unixccompiler import UnixCCompiler class ArmCCompiler(UnixCCompiler): """ Arm compiler. """ compiler_type = 'arm' cc_exe = 'armclang' cxx_exe = 'armclang++' def __init__(self, verbose=0, dry_run=0, force=0): UnixCCompiler.__init__(self, verbose, dry_run, force) cc_compiler = self.cc_exe cxx_compiler = self.cxx_exe self.set_executables(compiler=cc_compiler + ' -O3 -fPIC', compiler_so=cc_compiler + ' -O3 -fPIC', compiler_cxx=cxx_compiler + ' -O3 -fPIC', linker_exe=cc_compiler + ' -lamath', linker_so=cc_compiler + ' -lamath -shared')
1,043
Python
34.999999
79
0.422819
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/line_endings.py
""" Functions for converting from DOS to UNIX line endings """ import os import re import sys def dos2unix(file): "Replace CRLF with LF in argument files. Print names of changed files." if os.path.isdir(file): print(file, "Directory!") return with open(file, "rb") as fp: data = fp.read() if '\0' in data: print(file, "Binary!") return newdata = re.sub("\r\n", "\n", data) if newdata != data: print('dos2unix:', file) with open(file, "wb") as f: f.write(newdata) return file else: print(file, 'ok') def dos2unix_one_dir(modified_files, dir_name, file_names): for file in file_names: full_path = os.path.join(dir_name, file) file = dos2unix(full_path) if file is not None: modified_files.append(file) def dos2unix_dir(dir_name): modified_files = [] os.path.walk(dir_name, dos2unix_one_dir, modified_files) return modified_files #---------------------------------- def unix2dos(file): "Replace LF with CRLF in argument files. Print names of changed files." if os.path.isdir(file): print(file, "Directory!") return with open(file, "rb") as fp: data = fp.read() if '\0' in data: print(file, "Binary!") return newdata = re.sub("\r\n", "\n", data) newdata = re.sub("\n", "\r\n", newdata) if newdata != data: print('unix2dos:', file) with open(file, "wb") as f: f.write(newdata) return file else: print(file, 'ok') def unix2dos_one_dir(modified_files, dir_name, file_names): for file in file_names: full_path = os.path.join(dir_name, file) unix2dos(full_path) if file is not None: modified_files.append(file) def unix2dos_dir(dir_name): modified_files = [] os.path.walk(dir_name, unix2dos_one_dir, modified_files) return modified_files if __name__ == "__main__": dos2unix_dir(sys.argv[1])
2,032
Python
25.064102
76
0.57185
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/extra_avx512bw_mask.c
#include <immintrin.h> /** * Test BW mask operations due to: * - MSVC has supported it since vs2019 see, * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html * - Clang >= v8.0 * - GCC >= v7.1 */ int main(void) { __mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1)); m64 = _kor_mask64(m64, m64); m64 = _kxor_mask64(m64, m64); m64 = _cvtu64_mask64(_cvtmask64_u64(m64)); m64 = _mm512_kunpackd(m64, m64); m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64); return (int)_cvtmask64_u64(m64); }
636
C
32.526314
110
0.644654
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_sse2.c
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) /* * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, * whether or not the build options for those features are specified. * Therefore, we must test #definitions of CPU features when option native/host * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise * the test will be broken and leads to enable all possible features. */ #ifndef __SSE2__ #error "HOST/ARCH doesn't support SSE2" #endif #endif #include <emmintrin.h> int main(void) { __m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128()); return _mm_cvtsi128_si32(a); }
697
C
32.238094
83
0.674319
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_vxe2.c
#if (__VEC__ < 10303) || (__ARCH__ < 13) #error VXE2 not supported #endif #include <vecintrin.h> int main(int argc, char **argv) { int val; __vector signed short large = { 'a', 'b', 'c', 'a', 'g', 'h', 'g', 'o' }; __vector signed short search = { 'g', 'h', 'g', 'o' }; __vector unsigned char len = { 0 }; __vector unsigned char res = vec_search_string_cc(large, search, len, &val); __vector float x = vec_xl(argc, (float*)argv); __vector int i = vec_signed(x); i = vec_srdb(vec_sldb(i, i, 2), i, 3); val += (int)vec_extract(res, 1); val += vec_extract(i, 0); return val; }
624
C
27.40909
80
0.536859
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_ssse3.c
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) /* * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, * whether or not the build options for those features are specified. * Therefore, we must test #definitions of CPU features when option native/host * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise * the test will be broken and leads to enable all possible features. */ #ifndef __SSSE3__ #error "HOST/ARCH doesn't support SSSE3" #endif #endif #include <tmmintrin.h> int main(void) { __m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128()); return (int)_mm_cvtsi128_si32(a); }
705
C
32.619046
83
0.675177
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_neon_vfpv4.c
#ifdef _MSC_VER #include <Intrin.h> #endif #include <arm_neon.h> int main(int argc, char **argv) { float *src = (float*)argv[argc-1]; float32x4_t v1 = vdupq_n_f32(src[0]); float32x4_t v2 = vdupq_n_f32(src[1]); float32x4_t v3 = vdupq_n_f32(src[2]); int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0); #ifdef __aarch64__ double *src2 = (double*)argv[argc-2]; float64x2_t vd1 = vdupq_n_f64(src2[0]); float64x2_t vd2 = vdupq_n_f64(src2[1]); float64x2_t vd3 = vdupq_n_f64(src2[2]); ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0); #endif return ret; }
609
C
26.727272
60
0.604269
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_avx512_skx.c
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) /* * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, * whether or not the build options for those features are specified. * Therefore, we must test #definitions of CPU features when option native/host * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise * the test will be broken and leads to enable all possible features. */ #if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__) #error "HOST/ARCH doesn't support SkyLake AVX512 features" #endif #endif #include <immintrin.h> int main(int argc, char **argv) { __m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1])); /* VL */ __m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1)); /* DQ */ __m512i b = _mm512_broadcast_i32x8(a); /* BW */ b = _mm512_abs_epi16(b); return _mm_cvtsi128_si32(_mm512_castsi512_si128(b)); }
1,010
C
36.444443
84
0.648515
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_neon_fp16.c
#ifdef _MSC_VER #include <Intrin.h> #endif #include <arm_neon.h> int main(int argc, char **argv) { short *src = (short*)argv[argc-1]; float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16(src)); return (int)vgetq_lane_f32(v_z4, 0); }
251
C
19.999998
64
0.625498
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_avx512_cnl.c
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) /* * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, * whether or not the build options for those features are specified. * Therefore, we must test #definitions of CPU features when option native/host * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise * the test will be broken and leads to enable all possible features. */ #if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__) #error "HOST/ARCH doesn't support CannonLake AVX512 features" #endif #endif #include <immintrin.h> int main(int argc, char **argv) { __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); /* IFMA */ a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512()); /* VMBI */ a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a); return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); }
948
C
36.959999
83
0.663502
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_xop.c
#include <immintrin.h> #ifdef _MSC_VER #include <ammintrin.h> #else #include <x86intrin.h> #endif int main(void) { __m128i a = _mm_comge_epu32(_mm_setzero_si128(), _mm_setzero_si128()); return _mm_cvtsi128_si32(a); }
234
C
17.076922
74
0.636752
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_vsx4.c
#ifndef __VSX__ #error "VSX is not supported" #endif #include <altivec.h> typedef __vector unsigned int v_uint32x4; int main(void) { v_uint32x4 v1 = (v_uint32x4){2, 4, 8, 16}; v_uint32x4 v2 = (v_uint32x4){2, 2, 2, 2}; v_uint32x4 v3 = vec_mod(v1, v2); return (int)vec_extractm(v3); }
305
C
19.399999
46
0.613115
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_vsx3.c
#ifndef __VSX__ #error "VSX is not supported" #endif #include <altivec.h> typedef __vector unsigned int v_uint32x4; int main(void) { v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0}; z4 = vec_absd(z4, z4); return (int)vec_extract(z4, 0); }
250
C
16.92857
45
0.616
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_asimdhp.c
#ifdef _MSC_VER #include <Intrin.h> #endif #include <arm_neon.h> int main(int argc, char **argv) { float16_t *src = (float16_t*)argv[argc-1]; float16x8_t vhp = vdupq_n_f16(src[0]); float16x4_t vlhp = vdup_n_f16(src[1]); int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0); ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0); return ret; }
379
C
22.749999
60
0.593668
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_avx2.c
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) /* * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, * whether or not the build options for those features are specified. * Therefore, we must test #definitions of CPU features when option native/host * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise * the test will be broken and leads to enable all possible features. */ #ifndef __AVX2__ #error "HOST/ARCH doesn't support AVX2" #endif #endif #include <immintrin.h> int main(int argc, char **argv) { __m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1])); return _mm_cvtsi128_si32(_mm256_castsi256_si128(a)); }
749
C
34.714284
83
0.679573
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_avx512_knl.c
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) /* * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, * whether or not the build options for those features are specified. * Therefore, we must test #definitions of CPU features when option native/host * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise * the test will be broken and leads to enable all possible features. */ #if !defined(__AVX512ER__) || !defined(__AVX512PF__) #error "HOST/ARCH doesn't support Knights Landing AVX512 features" #endif #endif #include <immintrin.h> int main(int argc, char **argv) { int base[128]; __m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]); /* ER */ __m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad)); /* PF */ _mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1); return base[0]; }
956
C
35.807691
95
0.655858
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_popcnt.c
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) /* * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, * whether or not the build options for those features are specified. * Therefore, we must test #definitions of CPU features when option native/host * is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise * the test will be broken and leads to enable all possible features. */ #if !defined(__SSE4_2__) && !defined(__POPCNT__) #error "HOST/ARCH doesn't support POPCNT" #endif #endif #ifdef _MSC_VER #include <nmmintrin.h> #else #include <popcntintrin.h> #endif int main(int argc, char **argv) { // To make sure popcnt instructions are generated // and been tested against the assembler unsigned long long a = *((unsigned long long*)argv[argc-1]); unsigned int b = *((unsigned int*)argv[argc-2]); #if defined(_M_X64) || defined(__x86_64__) a = _mm_popcnt_u64(a); #endif b = _mm_popcnt_u32(b); return (int)a + b; }
1,049
C
30.818181
83
0.654909
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_asimdfhm.c
#ifdef _MSC_VER #include <Intrin.h> #endif #include <arm_neon.h> int main(int argc, char **argv) { float16_t *src = (float16_t*)argv[argc-1]; float *src2 = (float*)argv[argc-2]; float16x8_t vhp = vdupq_n_f16(src[0]); float16x4_t vlhp = vdup_n_f16(src[1]); float32x4_t vf = vdupq_n_f32(src2[0]); float32x2_t vlf = vdup_n_f32(src2[1]); int ret = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0); ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0); return ret; }
529
C
25.499999
70
0.597353
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_fma4.c
#include <immintrin.h> #ifdef _MSC_VER #include <ammintrin.h> #else #include <x86intrin.h> #endif int main(int argc, char **argv) { __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]); a = _mm256_macc_ps(a, a, a); return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); }
301
C
20.571427
59
0.607973
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/extra_avx512f_reduce.c
#include <immintrin.h> /** * The following intrinsics don't have direct native support but compilers * tend to emulate them. * They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19 */ int main(void) { __m512 one_ps = _mm512_set1_ps(1.0f); __m512d one_pd = _mm512_set1_pd(1.0); __m512i one_i64 = _mm512_set1_epi64(1); // add float sum_ps = _mm512_reduce_add_ps(one_ps); double sum_pd = _mm512_reduce_add_pd(one_pd); int sum_int = (int)_mm512_reduce_add_epi64(one_i64); sum_int += (int)_mm512_reduce_add_epi32(one_i64); // mul sum_ps += _mm512_reduce_mul_ps(one_ps); sum_pd += _mm512_reduce_mul_pd(one_pd); sum_int += (int)_mm512_reduce_mul_epi64(one_i64); sum_int += (int)_mm512_reduce_mul_epi32(one_i64); // min sum_ps += _mm512_reduce_min_ps(one_ps); sum_pd += _mm512_reduce_min_pd(one_pd); sum_int += (int)_mm512_reduce_min_epi32(one_i64); sum_int += (int)_mm512_reduce_min_epu32(one_i64); sum_int += (int)_mm512_reduce_min_epi64(one_i64); // max sum_ps += _mm512_reduce_max_ps(one_ps); sum_pd += _mm512_reduce_max_pd(one_pd); sum_int += (int)_mm512_reduce_max_epi32(one_i64); sum_int += (int)_mm512_reduce_max_epu32(one_i64); sum_int += (int)_mm512_reduce_max_epi64(one_i64); // and sum_int += (int)_mm512_reduce_and_epi32(one_i64); sum_int += (int)_mm512_reduce_and_epi64(one_i64); // or sum_int += (int)_mm512_reduce_or_epi32(one_i64); sum_int += (int)_mm512_reduce_or_epi64(one_i64); return (int)sum_ps + (int)sum_pd + sum_int; }
1,595
C
36.999999
74
0.603135
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_vsx2.c
#ifndef __VSX__ #error "VSX is not supported" #endif #include <altivec.h> typedef __vector unsigned long long v_uint64x2; int main(void) { v_uint64x2 z2 = (v_uint64x2){0, 0}; z2 = (v_uint64x2)vec_cmpeq(z2, z2); return (int)vec_extract(z2, 0); }
263
C
17.857142
47
0.634981
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/extra_vsx_asm.c
/** * Testing ASM VSX register number fixer '%x<n>' * * old versions of CLANG doesn't support %x<n> in the inline asm template * which fixes register number when using any of the register constraints wa, wd, wf. * * xref: * - https://bugs.llvm.org/show_bug.cgi?id=31837 * - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html */ #ifndef __VSX__ #error "VSX is not supported" #endif #include <altivec.h> #if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__)) #define vsx_ld vec_vsx_ld #define vsx_st vec_vsx_st #else #define vsx_ld vec_xl #define vsx_st vec_xst #endif int main(void) { float z4[] = {0, 0, 0, 0}; signed int zout[] = {0, 0, 0, 0}; __vector float vz4 = vsx_ld(0, z4); __vector signed int asm_ret = vsx_ld(0, zout); __asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret)); vsx_st(asm_ret, 0, zout); return zout[0]; }
945
C
24.567567
89
0.602116
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/extra_avx512dq_mask.c
#include <immintrin.h> /** * Test DQ mask operations due to: * - MSVC has supported it since vs2019 see, * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html * - Clang >= v8.0 * - GCC >= v7.1 */ int main(void) { __mmask8 m8 = _mm512_cmpeq_epi64_mask(_mm512_set1_epi64(1), _mm512_set1_epi64(1)); m8 = _kor_mask8(m8, m8); m8 = _kxor_mask8(m8, m8); m8 = _cvtu32_mask8(_cvtmask8_u32(m8)); return (int)_cvtmask8_u32(m8); }
504
C
28.705881
110
0.642857
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_asimd.c
#ifdef _MSC_VER #include <Intrin.h> #endif #include <arm_neon.h> int main(int argc, char **argv) { float *src = (float*)argv[argc-1]; float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]); /* MAXMIN */ int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0); ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0); /* ROUNDING */ ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0); #ifdef __aarch64__ { double *src2 = (double*)argv[argc-1]; float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]); /* MAXMIN */ ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0); ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0); /* ROUNDING */ ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0); } #endif return ret; }
818
C
28.249999
75
0.550122
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_asimddp.c
#ifdef _MSC_VER #include <Intrin.h> #endif #include <arm_neon.h> int main(int argc, char **argv) { unsigned char *src = (unsigned char*)argv[argc-1]; uint8x16_t v1 = vdupq_n_u8(src[0]), v2 = vdupq_n_u8(src[1]); uint32x4_t va = vdupq_n_u32(3); int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0); #ifdef __aarch64__ ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0); #endif return ret; }
432
C
24.470587
66
0.606481
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_neon.c
#ifdef _MSC_VER #include <Intrin.h> #endif #include <arm_neon.h> int main(int argc, char **argv) { // passing from untraced pointers to avoid optimizing out any constants // so we can test against the linker. float *src = (float*)argv[argc-1]; float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]); int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0); #ifdef __aarch64__ double *src2 = (double*)argv[argc-2]; float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]); ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0); #endif return ret; }
600
C
29.049999
75
0.628333
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_vx.c
#if (__VEC__ < 10301) || (__ARCH__ < 11) #error VX not supported #endif #include <vecintrin.h> int main(int argc, char **argv) { __vector double x = vec_abs(vec_xl(argc, (double*)argv)); __vector double y = vec_load_len((double*)argv, (unsigned int)argc); x = vec_round(vec_ceil(x) + vec_floor(y)); __vector bool long long m = vec_cmpge(x, y); __vector long long i = vec_signed(vec_sel(x, y, m)); return (int)vec_extract(i, 0); }
461
C
26.176469
72
0.59436
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/extra_vsx4_mma.c
#ifndef __VSX__ #error "VSX is not supported" #endif #include <altivec.h> typedef __vector float fv4sf_t; typedef __vector unsigned char vec_t; int main(void) { __vector_quad acc0; float a[4] = {0,1,2,3}; float b[4] = {0,1,2,3}; vec_t *va = (vec_t *) a; vec_t *vb = (vec_t *) b; __builtin_mma_xvf32ger(&acc0, va[0], vb[0]); fv4sf_t result[4]; __builtin_mma_disassemble_acc((void *)result, &acc0); fv4sf_t c0 = result[0]; return (int)((float*)&c0)[0]; }
499
C
21.727272
57
0.569138
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_avx512_knm.c
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) /* * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, * whether or not the build options for those features are specified. * Therefore, we must test #definitions of CPU features when option native/host * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise * the test will be broken and leads to enable all possible features. */ #if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__) #error "HOST/ARCH doesn't support Knights Mill AVX512 features" #endif #endif #include <immintrin.h> int main(int argc, char **argv) { __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); __m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]); /* 4FMAPS */ b = _mm512_4fmadd_ps(b, b, b, b, b, NULL); /* 4VNNIW */ a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL); /* VPOPCNTDQ */ a = _mm512_popcnt_epi64(a); a = _mm512_add_epi32(a, _mm512_castps_si512(b)); return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); }
1,132
C
35.548386
97
0.640459
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_vxe.c
#if (__VEC__ < 10302) || (__ARCH__ < 12) #error VXE not supported #endif #include <vecintrin.h> int main(int argc, char **argv) { __vector float x = vec_nabs(vec_xl(argc, (float*)argv)); __vector float y = vec_load_len((float*)argv, (unsigned int)argc); x = vec_round(vec_ceil(x) + vec_floor(y)); __vector bool int m = vec_cmpge(x, y); x = vec_sel(x, y, m); // need to test the existence of intrin "vflls" since vec_doublee // is vec_doublee maps to wrong intrin "vfll". // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100871 #if defined(__GNUC__) && !defined(__clang__) __vector long long i = vec_signed(__builtin_s390_vflls(x)); #else __vector long long i = vec_signed(vec_doublee(x)); #endif return (int)vec_extract(i, 0); }
788
C
29.346153
70
0.609137
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/checks/cpu_vsx.c
#ifndef __VSX__ #error "VSX is not supported" #endif #include <altivec.h> #if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__)) #define vsx_ld vec_vsx_ld #define vsx_st vec_vsx_st #else #define vsx_ld vec_xl #define vsx_st vec_xst #endif int main(void) { unsigned int zout[4]; unsigned int z4[] = {0, 0, 0, 0}; __vector unsigned int v_z4 = vsx_ld(0, z4); vsx_st(v_z4, 0, zout); return zout[0]; }
478
C
20.772726
89
0.575314
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/mingw/gfortran_vs2003_hack.c
int _get_output_format(void) { return 0; } int _imp____lc_codepage = 0;
77
C
10.142856
28
0.597403
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/compaq.py
#http://www.compaq.com/fortran/docs/ import os import sys from numpy.distutils.fcompiler import FCompiler from distutils.errors import DistutilsPlatformError compilers = ['CompaqFCompiler'] if os.name != 'posix' or sys.platform[:6] == 'cygwin' : # Otherwise we'd get a false positive on posix systems with # case-insensitive filesystems (like darwin), because we'll pick # up /bin/df compilers.append('CompaqVisualFCompiler') class CompaqFCompiler(FCompiler): compiler_type = 'compaq' description = 'Compaq Fortran Compiler' version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*' if sys.platform[:5]=='linux': fc_exe = 'fort' else: fc_exe = 'f90' executables = { 'version_cmd' : ['<F90>', "-version"], 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"], 'compiler_fix' : [fc_exe, "-fixed"], 'compiler_f90' : [fc_exe], 'linker_so' : ['<F90>'], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } module_dir_switch = '-module ' # not tested module_include_switch = '-I' def get_flags(self): return ['-assume no2underscore', '-nomixed_str_len_arg'] def get_flags_debug(self): return ['-g', '-check bounds'] def get_flags_opt(self): return ['-O4', '-align dcommons', '-assume bigarrays', '-assume nozsize', '-math_library fast'] def get_flags_arch(self): return ['-arch host', '-tune host'] def get_flags_linker_so(self): if sys.platform[:5]=='linux': return ['-shared'] return ['-shared', '-Wl,-expect_unresolved,*'] class CompaqVisualFCompiler(FCompiler): compiler_type = 'compaqv' description = 'DIGITAL or Compaq Visual Fortran Compiler' version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler' r' Version (?P<version>[^\s]*).*') compile_switch = '/compile_only' object_switch = '/object:' library_switch = '/OUT:' #No space after /OUT:! static_lib_extension = ".lib" static_lib_format = "%s%s" module_dir_switch = '/module:' module_include_switch = '/I' ar_exe = 'lib.exe' fc_exe = 'DF' if sys.platform=='win32': from numpy.distutils.msvccompiler import MSVCCompiler try: m = MSVCCompiler() m.initialize() ar_exe = m.lib except DistutilsPlatformError: pass except AttributeError as e: if '_MSVCCompiler__root' in str(e): print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e)) else: raise except OSError as e: if not "vcvarsall.bat" in str(e): print("Unexpected OSError in", __file__) raise except ValueError as e: if not "'path'" in str(e): print("Unexpected ValueError in", __file__) raise executables = { 'version_cmd' : ['<F90>', "/what"], 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"], 'compiler_fix' : [fc_exe, "/fixed"], 'compiler_f90' : [fc_exe], 'linker_so' : ['<F90>'], 'archiver' : [ar_exe, "/OUT:"], 'ranlib' : None } def get_flags(self): return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)', '/names:lowercase', '/assume:underscore'] def get_flags_opt(self): return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast'] def get_flags_arch(self): return ['/threads'] def get_flags_debug(self): return ['/debug'] if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils import customized_fcompiler print(customized_fcompiler(compiler='compaq').get_version())
3,903
Python
31.264463
81
0.555214
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/pathf95.py
from numpy.distutils.fcompiler import FCompiler compilers = ['PathScaleFCompiler'] class PathScaleFCompiler(FCompiler): compiler_type = 'pathf95' description = 'PathScale Fortran Compiler' version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P<version>[\d.]+)' executables = { 'version_cmd' : ["pathf95", "-version"], 'compiler_f77' : ["pathf95", "-fixedform"], 'compiler_fix' : ["pathf95", "-fixedform"], 'compiler_f90' : ["pathf95"], 'linker_so' : ["pathf95", "-shared"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } pic_flags = ['-fPIC'] module_dir_switch = '-module ' # Don't remove ending space! module_include_switch = '-I' def get_flags_opt(self): return ['-O3'] def get_flags_debug(self): return ['-g'] if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils import customized_fcompiler print(customized_fcompiler(compiler='pathf95').get_version())
1,061
Python
30.235293
85
0.592837
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/environment.py
import os from distutils.dist import Distribution __metaclass__ = type class EnvironmentConfig: def __init__(self, distutils_section='ALL', **kw): self._distutils_section = distutils_section self._conf_keys = kw self._conf = None self._hook_handler = None def dump_variable(self, name): conf_desc = self._conf_keys[name] hook, envvar, confvar, convert, append = conf_desc if not convert: convert = lambda x : x print('%s.%s:' % (self._distutils_section, name)) v = self._hook_handler(name, hook) print(' hook : %s' % (convert(v),)) if envvar: v = os.environ.get(envvar, None) print(' environ: %s' % (convert(v),)) if confvar and self._conf: v = self._conf.get(confvar, (None, None))[1] print(' config : %s' % (convert(v),)) def dump_variables(self): for name in self._conf_keys: self.dump_variable(name) def __getattr__(self, name): try: conf_desc = self._conf_keys[name] except KeyError: raise AttributeError( f"'EnvironmentConfig' object has no attribute '{name}'" ) from None return self._get_var(name, conf_desc) def get(self, name, default=None): try: conf_desc = self._conf_keys[name] except KeyError: return default var = self._get_var(name, conf_desc) if var is None: var = default return var def _get_var(self, name, conf_desc): hook, envvar, confvar, convert, append = conf_desc if convert is None: convert = lambda x: x var = self._hook_handler(name, hook) if envvar is not None: envvar_contents = os.environ.get(envvar) if envvar_contents is not None: envvar_contents = convert(envvar_contents) if var and append: if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1': var.extend(envvar_contents) else: # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0 # to keep old (overwrite flags rather than append to # them) behavior var = envvar_contents else: var = envvar_contents if confvar is not None and self._conf: if confvar in self._conf: source, confvar_contents = self._conf[confvar] var = convert(confvar_contents) return var def clone(self, hook_handler): ec = self.__class__(distutils_section=self._distutils_section, **self._conf_keys) ec._hook_handler = hook_handler return ec def use_distribution(self, dist): if isinstance(dist, Distribution): self._conf = dist.get_option_dict(self._distutils_section) else: self._conf = dist
3,080
Python
33.617977
80
0.530844
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/sun.py
from numpy.distutils.ccompiler import simple_version_match from numpy.distutils.fcompiler import FCompiler compilers = ['SunFCompiler'] class SunFCompiler(FCompiler): compiler_type = 'sun' description = 'Sun or Forte Fortran 95 Compiler' # ex: # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28 version_match = simple_version_match( start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95') executables = { 'version_cmd' : ["<F90>", "-V"], 'compiler_f77' : ["f90"], 'compiler_fix' : ["f90", "-fixed"], 'compiler_f90' : ["f90"], 'linker_so' : ["<F90>", "-Bdynamic", "-G"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } module_dir_switch = '-moddir=' module_include_switch = '-M' pic_flags = ['-xcode=pic32'] def get_flags_f77(self): ret = ["-ftrap=%none"] if (self.get_version() or '') >= '7': ret.append("-f77") else: ret.append("-fixed") return ret def get_opt(self): return ['-fast', '-dalign'] def get_arch(self): return ['-xtarget=generic'] def get_libraries(self): opt = [] opt.extend(['fsu', 'sunmath', 'mvec']) return opt def runtime_library_dir_option(self, dir): return '-R%s' % dir if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils import customized_fcompiler print(customized_fcompiler(compiler='sun').get_version())
1,577
Python
29.346153
76
0.554217
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/mips.py
from numpy.distutils.cpuinfo import cpu from numpy.distutils.fcompiler import FCompiler compilers = ['MIPSFCompiler'] class MIPSFCompiler(FCompiler): compiler_type = 'mips' description = 'MIPSpro Fortran Compiler' version_pattern = r'MIPSpro Compilers: Version (?P<version>[^\s*,]*)' executables = { 'version_cmd' : ["<F90>", "-version"], 'compiler_f77' : ["f77", "-f77"], 'compiler_fix' : ["f90", "-fixedform"], 'compiler_f90' : ["f90"], 'linker_so' : ["f90", "-shared"], 'archiver' : ["ar", "-cr"], 'ranlib' : None } module_dir_switch = None #XXX: fix me module_include_switch = None #XXX: fix me pic_flags = ['-KPIC'] def get_flags(self): return self.pic_flags + ['-n32'] def get_flags_opt(self): return ['-O3'] def get_flags_arch(self): opt = [] for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split(): if getattr(cpu, 'is_IP%s'%a)(): opt.append('-TARG:platform=IP%s' % a) break return opt def get_flags_arch_f77(self): r = None if cpu.is_r10000(): r = 10000 elif cpu.is_r12000(): r = 12000 elif cpu.is_r8000(): r = 8000 elif cpu.is_r5000(): r = 5000 elif cpu.is_r4000(): r = 4000 if r is not None: return ['r%s' % (r)] return [] def get_flags_arch_f90(self): r = self.get_flags_arch_f77() if r: r[0] = '-' + r[0] return r if __name__ == '__main__': from numpy.distutils import customized_fcompiler print(customized_fcompiler(compiler='mips').get_version())
1,714
Python
30.181818
79
0.529755
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/none.py
from numpy.distutils.fcompiler import FCompiler from numpy.distutils import customized_fcompiler compilers = ['NoneFCompiler'] class NoneFCompiler(FCompiler): compiler_type = 'none' description = 'Fake Fortran compiler' executables = {'compiler_f77': None, 'compiler_f90': None, 'compiler_fix': None, 'linker_so': None, 'linker_exe': None, 'archiver': None, 'ranlib': None, 'version_cmd': None, } def find_executables(self): pass if __name__ == '__main__': from distutils import log log.set_verbosity(2) print(customized_fcompiler(compiler='none').get_version())
758
Python
25.172413
62
0.550132
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/absoft.py
# http://www.absoft.com/literature/osxuserguide.pdf # http://www.absoft.com/documentation.html # Notes: # - when using -g77 then use -DUNDERSCORE_G77 to compile f2py # generated extension modules (works for f2py v2.45.241_1936 and up) import os from numpy.distutils.cpuinfo import cpu from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file from numpy.distutils.misc_util import cyg2win32 compilers = ['AbsoftFCompiler'] class AbsoftFCompiler(FCompiler): compiler_type = 'absoft' description = 'Absoft Corp Fortran Compiler' #version_pattern = r'FORTRAN 77 Compiler (?P<version>[^\s*,]*).*?Absoft Corp' version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\ r' (?P<version>[^\s*,]*)(.*?Absoft Corp|)' # on windows: f90 -V -c dummy.f # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16 # samt5735(8)$ f90 -V -c dummy.f # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0 # Note that fink installs g77 as f77, so need to use f90 for detection. executables = { 'version_cmd' : None, # set by update_executables 'compiler_f77' : ["f77"], 'compiler_fix' : ["f90"], 'compiler_f90' : ["f90"], 'linker_so' : ["<F90>"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } if os.name=='nt': library_switch = '/out:' #No space after /out:! module_dir_switch = None module_include_switch = '-p' def update_executables(self): f = cyg2win32(dummy_fortran_file()) self.executables['version_cmd'] = ['<F90>', '-V', '-c', f+'.f', '-o', f+'.o'] def get_flags_linker_so(self): if os.name=='nt': opt = ['/dll'] # The "-K shared" switches are being left in for pre-9.0 versions # of Absoft though I don't think versions earlier than 9 can # actually be used to build shared libraries. In fact, version # 8 of Absoft doesn't recognize "-K shared" and will fail. elif self.get_version() >= '9.0': opt = ['-shared'] else: opt = ["-K", "shared"] return opt def library_dir_option(self, dir): if os.name=='nt': return ['-link', '/PATH:%s' % (dir)] return "-L" + dir def library_option(self, lib): if os.name=='nt': return '%s.lib' % (lib) return "-l" + lib def get_library_dirs(self): opt = FCompiler.get_library_dirs(self) d = os.environ.get('ABSOFT') if d: if self.get_version() >= '10.0': # use shared libraries, the static libraries were not compiled -fPIC prefix = 'sh' else: prefix = '' if cpu.is_64bit(): suffix = '64' else: suffix = '' opt.append(os.path.join(d, '%slib%s' % (prefix, suffix))) return opt def get_libraries(self): opt = FCompiler.get_libraries(self) if self.get_version() >= '11.0': opt.extend(['af90math', 'afio', 'af77math', 'amisc']) elif self.get_version() >= '10.0': opt.extend(['af90math', 'afio', 'af77math', 'U77']) elif self.get_version() >= '8.0': opt.extend(['f90math', 'fio', 'f77math', 'U77']) else: opt.extend(['fio', 'f90math', 'fmath', 'U77']) if os.name =='nt': opt.append('COMDLG32') return opt def get_flags(self): opt = FCompiler.get_flags(self) if os.name != 'nt': opt.extend(['-s']) if self.get_version(): if self.get_version()>='8.2': opt.append('-fpic') return opt def get_flags_f77(self): opt = FCompiler.get_flags_f77(self) opt.extend(['-N22', '-N90', '-N110']) v = self.get_version() if os.name == 'nt': if v and v>='8.0': opt.extend(['-f', '-N15']) else: opt.append('-f') if v: if v<='4.6': opt.append('-B108') else: # Though -N15 is undocumented, it works with # Absoft 8.0 on Linux opt.append('-N15') return opt def get_flags_f90(self): opt = FCompiler.get_flags_f90(self) opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) if self.get_version(): if self.get_version()>'4.6': opt.extend(["-YDEALLOC=ALL"]) return opt def get_flags_fix(self): opt = FCompiler.get_flags_fix(self) opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX", "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"]) opt.extend(["-f", "fixed"]) return opt def get_flags_opt(self): opt = ['-O'] return opt if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils import customized_fcompiler print(customized_fcompiler(compiler='absoft').get_version())
5,499
Python
34.031847
155
0.525186
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/lahey.py
import os from numpy.distutils.fcompiler import FCompiler compilers = ['LaheyFCompiler'] class LaheyFCompiler(FCompiler): compiler_type = 'lahey' description = 'Lahey/Fujitsu Fortran 95 Compiler' version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)' executables = { 'version_cmd' : ["<F90>", "--version"], 'compiler_f77' : ["lf95", "--fix"], 'compiler_fix' : ["lf95", "--fix"], 'compiler_f90' : ["lf95"], 'linker_so' : ["lf95", "-shared"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } module_dir_switch = None #XXX Fix me module_include_switch = None #XXX Fix me def get_flags_opt(self): return ['-O'] def get_flags_debug(self): return ['-g', '--chk', '--chkglobal'] def get_library_dirs(self): opt = [] d = os.environ.get('LAHEY') if d: opt.append(os.path.join(d, 'lib')) return opt def get_libraries(self): opt = [] opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6']) return opt if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils import customized_fcompiler print(customized_fcompiler(compiler='lahey').get_version())
1,327
Python
27.869565
88
0.557649
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/vast.py
import os from numpy.distutils.fcompiler.gnu import GnuFCompiler compilers = ['VastFCompiler'] class VastFCompiler(GnuFCompiler): compiler_type = 'vast' compiler_aliases = () description = 'Pacific-Sierra Research Fortran 90 Compiler' version_pattern = (r'\s*Pacific-Sierra Research vf90 ' r'(Personal|Professional)\s+(?P<version>[^\s]*)') # VAST f90 does not support -o with -c. So, object files are created # to the current directory and then moved to build directory object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' executables = { 'version_cmd' : ["vf90", "-v"], 'compiler_f77' : ["g77"], 'compiler_fix' : ["f90", "-Wv,-ya"], 'compiler_f90' : ["f90"], 'linker_so' : ["<F90>"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } module_dir_switch = None #XXX Fix me module_include_switch = None #XXX Fix me def find_executables(self): pass def get_version_cmd(self): f90 = self.compiler_f90[0] d, b = os.path.split(f90) vf90 = os.path.join(d, 'v'+b) return vf90 def get_flags_arch(self): vast_version = self.get_version() gnu = GnuFCompiler() gnu.customize(None) self.version = gnu.get_version() opt = GnuFCompiler.get_flags_arch(self) self.version = vast_version return opt if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils import customized_fcompiler print(customized_fcompiler(compiler='vast').get_version())
1,667
Python
30.471698
83
0.584883
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/intel.py
# http://developer.intel.com/software/products/compilers/flin/ import sys from numpy.distutils.ccompiler import simple_version_match from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file compilers = ['IntelFCompiler', 'IntelVisualFCompiler', 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler', 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler'] def intel_version_match(type): # Match against the important stuff in the version string return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,)) class BaseIntelFCompiler(FCompiler): def update_executables(self): f = dummy_fortran_file() self.executables['version_cmd'] = ['<F77>', '-FI', '-V', '-c', f + '.f', '-o', f + '.o'] def runtime_library_dir_option(self, dir): # TODO: could use -Xlinker here, if it's supported assert "," not in dir return '-Wl,-rpath=%s' % dir class IntelFCompiler(BaseIntelFCompiler): compiler_type = 'intel' compiler_aliases = ('ifort',) description = 'Intel Fortran Compiler for 32-bit apps' version_match = intel_version_match('32-bit|IA-32') possible_executables = ['ifort', 'ifc'] executables = { 'version_cmd' : None, # set by update_executables 'compiler_f77' : [None, "-72", "-w90", "-w95"], 'compiler_f90' : [None], 'compiler_fix' : [None, "-FI"], 'linker_so' : ["<F90>", "-shared"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } pic_flags = ['-fPIC'] module_dir_switch = '-module ' # Don't remove ending space! module_include_switch = '-I' def get_flags_free(self): return ['-FR'] def get_flags(self): return ['-fPIC'] def get_flags_opt(self): # Scipy test failures with -O2 v = self.get_version() mpopt = 'openmp' if v and v < '15' else 'qopenmp' return ['-fp-model', 'strict', '-O1', '-assume', 'minus0', '-{}'.format(mpopt)] def get_flags_arch(self): return [] def get_flags_linker_so(self): opt = FCompiler.get_flags_linker_so(self) v = self.get_version() if v and v >= '8.0': opt.append('-nofor_main') if sys.platform == 'darwin': # Here, it's -dynamiclib try: idx = opt.index('-shared') opt.remove('-shared') except ValueError: idx = 0 opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup'] return opt class IntelItaniumFCompiler(IntelFCompiler): compiler_type = 'intele' compiler_aliases = () description = 'Intel Fortran Compiler for Itanium apps' version_match = intel_version_match('Itanium|IA-64') possible_executables = ['ifort', 'efort', 'efc'] executables = { 'version_cmd' : None, 'compiler_f77' : [None, "-FI", "-w90", "-w95"], 'compiler_fix' : [None, "-FI"], 'compiler_f90' : [None], 'linker_so' : ['<F90>', "-shared"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } class IntelEM64TFCompiler(IntelFCompiler): compiler_type = 'intelem' compiler_aliases = () description = 'Intel Fortran Compiler for 64-bit apps' version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit') possible_executables = ['ifort', 'efort', 'efc'] executables = { 'version_cmd' : None, 'compiler_f77' : [None, "-FI"], 'compiler_fix' : [None, "-FI"], 'compiler_f90' : [None], 'linker_so' : ['<F90>', "-shared"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } # Is there no difference in the version string between the above compilers # and the Visual compilers? class IntelVisualFCompiler(BaseIntelFCompiler): compiler_type = 'intelv' description = 'Intel Visual Fortran Compiler for 32-bit apps' version_match = intel_version_match('32-bit|IA-32') def update_executables(self): f = dummy_fortran_file() self.executables['version_cmd'] = ['<F77>', '/FI', '/c', f + '.f', '/o', f + '.o'] ar_exe = 'lib.exe' possible_executables = ['ifort', 'ifl'] executables = { 'version_cmd' : None, 'compiler_f77' : [None], 'compiler_fix' : [None], 'compiler_f90' : [None], 'linker_so' : [None], 'archiver' : [ar_exe, "/verbose", "/OUT:"], 'ranlib' : None } compile_switch = '/c ' object_switch = '/Fo' # No space after /Fo! library_switch = '/OUT:' # No space after /OUT:! module_dir_switch = '/module:' # No space after /module: module_include_switch = '/I' def get_flags(self): opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', '/assume:underscore', '/fpp'] return opt def get_flags_free(self): return [] def get_flags_debug(self): return ['/4Yb', '/d2'] def get_flags_opt(self): return ['/O1', '/assume:minus0'] # Scipy test failures with /O2 def get_flags_arch(self): return ["/arch:IA32", "/QaxSSE3"] def runtime_library_dir_option(self, dir): raise NotImplementedError class IntelItaniumVisualFCompiler(IntelVisualFCompiler): compiler_type = 'intelev' description = 'Intel Visual Fortran Compiler for Itanium apps' version_match = intel_version_match('Itanium') possible_executables = ['efl'] # XXX this is a wild guess ar_exe = IntelVisualFCompiler.ar_exe executables = { 'version_cmd' : None, 'compiler_f77' : [None, "-FI", "-w90", "-w95"], 'compiler_fix' : [None, "-FI", "-4L72", "-w"], 'compiler_f90' : [None], 'linker_so' : ['<F90>', "-shared"], 'archiver' : [ar_exe, "/verbose", "/OUT:"], 'ranlib' : None } class IntelEM64VisualFCompiler(IntelVisualFCompiler): compiler_type = 'intelvem' description = 'Intel Visual Fortran Compiler for 64-bit apps' version_match = simple_version_match(start=r'Intel\(R\).*?64,') def get_flags_arch(self): return [] if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils import customized_fcompiler print(customized_fcompiler(compiler='intel').get_version())
6,570
Python
29.995283
86
0.560426
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/__init__.py
"""numpy.distutils.fcompiler Contains FCompiler, an abstract base class that defines the interface for the numpy.distutils Fortran compiler abstraction model. Terminology: To be consistent, where the term 'executable' is used, it means the single file, like 'gcc', that is executed, and should be a string. In contrast, 'command' means the entire command line, like ['gcc', '-c', 'file.c'], and should be a list. But note that FCompiler.executables is actually a dictionary of commands. """ __all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers', 'dummy_fortran_file'] import os import sys import re from distutils.sysconfig import get_python_lib from distutils.fancy_getopt import FancyGetopt from distutils.errors import DistutilsModuleError, \ DistutilsExecError, CompileError, LinkError, DistutilsPlatformError from distutils.util import split_quoted, strtobool from numpy.distutils.ccompiler import CCompiler, gen_lib_options from numpy.distutils import log from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \ make_temp_file, get_shared_lib_extension from numpy.distutils.exec_command import find_executable from numpy.distutils import _shell_utils from .environment import EnvironmentConfig __metaclass__ = type class CompilerNotFound(Exception): pass def flaglist(s): if is_string(s): return split_quoted(s) else: return s def str2bool(s): if is_string(s): return strtobool(s) return bool(s) def is_sequence_of_strings(seq): return is_sequence(seq) and all_strings(seq) class FCompiler(CCompiler): """Abstract base class to define the interface that must be implemented by real Fortran compiler classes. Methods that subclasses may redefine: update_executables(), find_executables(), get_version() get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug() get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(), get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(), get_flags_arch_f90(), get_flags_debug_f90(), get_flags_fix(), get_flags_linker_so() DON'T call these methods (except get_version) after constructing a compiler instance or inside any other method. All methods, except update_executables() and find_executables(), may call the get_version() method. After constructing a compiler instance, always call customize(dist=None) method that finalizes compiler construction and makes the following attributes available: compiler_f77 compiler_f90 compiler_fix linker_so archiver ranlib libraries library_dirs """ # These are the environment variables and distutils keys used. # Each configuration description is # (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>, <append>) # The hook names are handled by the self._environment_hook method. # - names starting with 'self.' call methods in this class # - names starting with 'exe.' return the key in the executables dict # - names like 'flags.YYY' return self.get_flag_YYY() # convert is either None or a function to convert a string to the # appropriate type used. distutils_vars = EnvironmentConfig( distutils_section='config_fc', noopt = (None, None, 'noopt', str2bool, False), noarch = (None, None, 'noarch', str2bool, False), debug = (None, None, 'debug', str2bool, False), verbose = (None, None, 'verbose', str2bool, False), ) command_vars = EnvironmentConfig( distutils_section='config_fc', compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False), compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False), compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False), version_cmd = ('exe.version_cmd', None, None, None, False), linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False), linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False), archiver = (None, 'AR', 'ar', None, False), ranlib = (None, 'RANLIB', 'ranlib', None, False), ) flag_vars = EnvironmentConfig( distutils_section='config_fc', f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True), f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True), free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True), fix = ('flags.fix', None, None, flaglist, False), opt = ('flags.opt', 'FOPT', 'opt', flaglist, True), opt_f77 = ('flags.opt_f77', None, None, flaglist, False), opt_f90 = ('flags.opt_f90', None, None, flaglist, False), arch = ('flags.arch', 'FARCH', 'arch', flaglist, False), arch_f77 = ('flags.arch_f77', None, None, flaglist, False), arch_f90 = ('flags.arch_f90', None, None, flaglist, False), debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True), debug_f77 = ('flags.debug_f77', None, None, flaglist, False), debug_f90 = ('flags.debug_f90', None, None, flaglist, False), flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True), linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True), linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True), ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True), ) language_map = {'.f': 'f77', '.for': 'f77', '.F': 'f77', # XXX: needs preprocessor '.ftn': 'f77', '.f77': 'f77', '.f90': 'f90', '.F90': 'f90', # XXX: needs preprocessor '.f95': 'f90', } language_order = ['f90', 'f77'] # These will be set by the subclass compiler_type = None compiler_aliases = () version_pattern = None possible_executables = [] executables = { 'version_cmd': ["f77", "-v"], 'compiler_f77': ["f77"], 'compiler_f90': ["f90"], 'compiler_fix': ["f90", "-fixed"], 'linker_so': ["f90", "-shared"], 'linker_exe': ["f90"], 'archiver': ["ar", "-cr"], 'ranlib': None, } # If compiler does not support compiling Fortran 90 then it can # suggest using another compiler. For example, gnu would suggest # gnu95 compiler type when there are F90 sources. suggested_f90_compiler = None compile_switch = "-c" object_switch = "-o " # Ending space matters! It will be stripped # but if it is missing then object_switch # will be prefixed to object file name by # string concatenation. library_switch = "-o " # Ditto! # Switch to specify where module files are created and searched # for USE statement. Normally it is a string and also here ending # space matters. See above. module_dir_switch = None # Switch to specify where module files are searched for USE statement. module_include_switch = '-I' pic_flags = [] # Flags to create position-independent code src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR'] obj_extension = ".o" shared_lib_extension = get_shared_lib_extension() static_lib_extension = ".a" # or .lib static_lib_format = "lib%s%s" # or %s%s shared_lib_format = "%s%s" exe_extension = "" _exe_cache = {} _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90', 'compiler_fix', 'linker_so', 'linker_exe', 'archiver', 'ranlib'] # This will be set by new_fcompiler when called in # command/{build_ext.py, build_clib.py, config.py} files. c_compiler = None # extra_{f77,f90}_compile_args are set by build_ext.build_extension method extra_f77_compile_args = [] extra_f90_compile_args = [] def __init__(self, *args, **kw): CCompiler.__init__(self, *args, **kw) self.distutils_vars = self.distutils_vars.clone(self._environment_hook) self.command_vars = self.command_vars.clone(self._environment_hook) self.flag_vars = self.flag_vars.clone(self._environment_hook) self.executables = self.executables.copy() for e in self._executable_keys: if e not in self.executables: self.executables[e] = None # Some methods depend on .customize() being called first, so # this keeps track of whether that's happened yet. self._is_customised = False def __copy__(self): obj = self.__new__(self.__class__) obj.__dict__.update(self.__dict__) obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook) obj.command_vars = obj.command_vars.clone(obj._environment_hook) obj.flag_vars = obj.flag_vars.clone(obj._environment_hook) obj.executables = obj.executables.copy() return obj def copy(self): return self.__copy__() # Use properties for the attributes used by CCompiler. Setting them # as attributes from the self.executables dictionary is error-prone, # so we get them from there each time. def _command_property(key): def fget(self): assert self._is_customised return self.executables[key] return property(fget=fget) version_cmd = _command_property('version_cmd') compiler_f77 = _command_property('compiler_f77') compiler_f90 = _command_property('compiler_f90') compiler_fix = _command_property('compiler_fix') linker_so = _command_property('linker_so') linker_exe = _command_property('linker_exe') archiver = _command_property('archiver') ranlib = _command_property('ranlib') # Make our terminology consistent. def set_executable(self, key, value): self.set_command(key, value) def set_commands(self, **kw): for k, v in kw.items(): self.set_command(k, v) def set_command(self, key, value): if not key in self._executable_keys: raise ValueError( "unknown executable '%s' for class %s" % (key, self.__class__.__name__)) if is_string(value): value = split_quoted(value) assert value is None or is_sequence_of_strings(value[1:]), (key, value) self.executables[key] = value ###################################################################### ## Methods that subclasses may redefine. But don't call these methods! ## They are private to FCompiler class and may return unexpected ## results if used elsewhere. So, you have been warned.. def find_executables(self): """Go through the self.executables dictionary, and attempt to find and assign appropriate executables. Executable names are looked for in the environment (environment variables, the distutils.cfg, and command line), the 0th-element of the command list, and the self.possible_executables list. Also, if the 0th element is "<F77>" or "<F90>", the Fortran 77 or the Fortran 90 compiler executable is used, unless overridden by an environment setting. Subclasses should call this if overridden. """ assert self._is_customised exe_cache = self._exe_cache def cached_find_executable(exe): if exe in exe_cache: return exe_cache[exe] fc_exe = find_executable(exe) exe_cache[exe] = exe_cache[fc_exe] = fc_exe return fc_exe def verify_command_form(name, value): if value is not None and not is_sequence_of_strings(value): raise ValueError( "%s value %r is invalid in class %s" % (name, value, self.__class__.__name__)) def set_exe(exe_key, f77=None, f90=None): cmd = self.executables.get(exe_key, None) if not cmd: return None # Note that we get cmd[0] here if the environment doesn't # have anything set exe_from_environ = getattr(self.command_vars, exe_key) if not exe_from_environ: possibles = [f90, f77] + self.possible_executables else: possibles = [exe_from_environ] + self.possible_executables seen = set() unique_possibles = [] for e in possibles: if e == '<F77>': e = f77 elif e == '<F90>': e = f90 if not e or e in seen: continue seen.add(e) unique_possibles.append(e) for exe in unique_possibles: fc_exe = cached_find_executable(exe) if fc_exe: cmd[0] = fc_exe return fc_exe self.set_command(exe_key, None) return None ctype = self.compiler_type f90 = set_exe('compiler_f90') if not f90: f77 = set_exe('compiler_f77') if f77: log.warn('%s: no Fortran 90 compiler found' % ctype) else: raise CompilerNotFound('%s: f90 nor f77' % ctype) else: f77 = set_exe('compiler_f77', f90=f90) if not f77: log.warn('%s: no Fortran 77 compiler found' % ctype) set_exe('compiler_fix', f90=f90) set_exe('linker_so', f77=f77, f90=f90) set_exe('linker_exe', f77=f77, f90=f90) set_exe('version_cmd', f77=f77, f90=f90) set_exe('archiver') set_exe('ranlib') def update_executables(self): """Called at the beginning of customisation. Subclasses should override this if they need to set up the executables dictionary. Note that self.find_executables() is run afterwards, so the self.executables dictionary values can contain <F77> or <F90> as the command, which will be replaced by the found F77 or F90 compiler. """ pass def get_flags(self): """List of flags common to all compiler types.""" return [] + self.pic_flags def _get_command_flags(self, key): cmd = self.executables.get(key, None) if cmd is None: return [] return cmd[1:] def get_flags_f77(self): """List of Fortran 77 specific flags.""" return self._get_command_flags('compiler_f77') def get_flags_f90(self): """List of Fortran 90 specific flags.""" return self._get_command_flags('compiler_f90') def get_flags_free(self): """List of Fortran 90 free format specific flags.""" return [] def get_flags_fix(self): """List of Fortran 90 fixed format specific flags.""" return self._get_command_flags('compiler_fix') def get_flags_linker_so(self): """List of linker flags to build a shared library.""" return self._get_command_flags('linker_so') def get_flags_linker_exe(self): """List of linker flags to build an executable.""" return self._get_command_flags('linker_exe') def get_flags_ar(self): """List of archiver flags. """ return self._get_command_flags('archiver') def get_flags_opt(self): """List of architecture independent compiler flags.""" return [] def get_flags_arch(self): """List of architecture dependent compiler flags.""" return [] def get_flags_debug(self): """List of compiler flags to compile with debugging information.""" return [] get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug def get_libraries(self): """List of compiler libraries.""" return self.libraries[:] def get_library_dirs(self): """List of compiler library directories.""" return self.library_dirs[:] def get_version(self, force=False, ok_status=[0]): assert self._is_customised version = CCompiler.get_version(self, force=force, ok_status=ok_status) if version is None: raise CompilerNotFound() return version ############################################################ ## Public methods: def customize(self, dist = None): """Customize Fortran compiler. This method gets Fortran compiler specific information from (i) class definition, (ii) environment, (iii) distutils config files, and (iv) command line (later overrides earlier). This method should be always called after constructing a compiler instance. But not in __init__ because Distribution instance is needed for (iii) and (iv). """ log.info('customize %s' % (self.__class__.__name__)) self._is_customised = True self.distutils_vars.use_distribution(dist) self.command_vars.use_distribution(dist) self.flag_vars.use_distribution(dist) self.update_executables() # find_executables takes care of setting the compiler commands, # version_cmd, linker_so, linker_exe, ar, and ranlib self.find_executables() noopt = self.distutils_vars.get('noopt', False) noarch = self.distutils_vars.get('noarch', noopt) debug = self.distutils_vars.get('debug', False) f77 = self.command_vars.compiler_f77 f90 = self.command_vars.compiler_f90 f77flags = [] f90flags = [] freeflags = [] fixflags = [] if f77: f77 = _shell_utils.NativeParser.split(f77) f77flags = self.flag_vars.f77 if f90: f90 = _shell_utils.NativeParser.split(f90) f90flags = self.flag_vars.f90 freeflags = self.flag_vars.free # XXX Assuming that free format is default for f90 compiler. fix = self.command_vars.compiler_fix # NOTE: this and similar examples are probably just # excluding --coverage flag when F90 = gfortran --coverage # instead of putting that flag somewhere more appropriate # this and similar examples where a Fortran compiler # environment variable has been customized by CI or a user # should perhaps eventually be more thoroughly tested and more # robustly handled if fix: fix = _shell_utils.NativeParser.split(fix) fixflags = self.flag_vars.fix + f90flags oflags, aflags, dflags = [], [], [] # examine get_flags_<tag>_<compiler> for extra flags # only add them if the method is different from get_flags_<tag> def get_flags(tag, flags): # note that self.flag_vars.<tag> calls self.get_flags_<tag>() flags.extend(getattr(self.flag_vars, tag)) this_get = getattr(self, 'get_flags_' + tag) for name, c, flagvar in [('f77', f77, f77flags), ('f90', f90, f90flags), ('f90', fix, fixflags)]: t = '%s_%s' % (tag, name) if c and this_get is not getattr(self, 'get_flags_' + t): flagvar.extend(getattr(self.flag_vars, t)) if not noopt: get_flags('opt', oflags) if not noarch: get_flags('arch', aflags) if debug: get_flags('debug', dflags) fflags = self.flag_vars.flags + dflags + oflags + aflags if f77: self.set_commands(compiler_f77=f77+f77flags+fflags) if f90: self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags) if fix: self.set_commands(compiler_fix=fix+fixflags+fflags) #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS linker_so = self.linker_so if linker_so: linker_so_flags = self.flag_vars.linker_so if sys.platform.startswith('aix'): python_lib = get_python_lib(standard_lib=1) ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') python_exp = os.path.join(python_lib, 'config', 'python.exp') linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] if sys.platform.startswith('os400'): from distutils.sysconfig import get_config_var python_config = get_config_var('LIBPL') ld_so_aix = os.path.join(python_config, 'ld_so_aix') python_exp = os.path.join(python_config, 'python.exp') linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp] self.set_commands(linker_so=linker_so+linker_so_flags) linker_exe = self.linker_exe if linker_exe: linker_exe_flags = self.flag_vars.linker_exe self.set_commands(linker_exe=linker_exe+linker_exe_flags) ar = self.command_vars.archiver if ar: arflags = self.flag_vars.ar self.set_commands(archiver=[ar]+arflags) self.set_library_dirs(self.get_library_dirs()) self.set_libraries(self.get_libraries()) def dump_properties(self): """Print out the attributes of a compiler instance.""" props = [] for key in list(self.executables.keys()) + \ ['version', 'libraries', 'library_dirs', 'object_switch', 'compile_switch']: if hasattr(self, key): v = getattr(self, key) props.append((key, None, '= '+repr(v))) props.sort() pretty_printer = FancyGetopt(props) for l in pretty_printer.generate_help("%s instance properties:" \ % (self.__class__.__name__)): if l[:4]==' --': l = ' ' + l[4:] print(l) ################### def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): """Compile 'src' to product 'obj'.""" src_flags = {} if is_f_file(src) and not has_f90_header(src): flavor = ':f77' compiler = self.compiler_f77 src_flags = get_f77flags(src) extra_compile_args = self.extra_f77_compile_args or [] elif is_free_format(src): flavor = ':f90' compiler = self.compiler_f90 if compiler is None: raise DistutilsExecError('f90 not supported by %s needed for %s'\ % (self.__class__.__name__, src)) extra_compile_args = self.extra_f90_compile_args or [] else: flavor = ':fix' compiler = self.compiler_fix if compiler is None: raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\ % (self.__class__.__name__, src)) extra_compile_args = self.extra_f90_compile_args or [] if self.object_switch[-1]==' ': o_args = [self.object_switch.strip(), obj] else: o_args = [self.object_switch.strip()+obj] assert self.compile_switch.strip() s_args = [self.compile_switch, src] if extra_compile_args: log.info('extra %s options: %r' \ % (flavor[1:], ' '.join(extra_compile_args))) extra_flags = src_flags.get(self.compiler_type, []) if extra_flags: log.info('using compile options from source: %r' \ % ' '.join(extra_flags)) command = compiler + cc_args + extra_flags + s_args + o_args \ + extra_postargs + extra_compile_args display = '%s: %s' % (os.path.basename(compiler[0]) + flavor, src) try: self.spawn(command, display=display) except DistutilsExecError as e: msg = str(e) raise CompileError(msg) from None def module_options(self, module_dirs, module_build_dir): options = [] if self.module_dir_switch is not None: if self.module_dir_switch[-1]==' ': options.extend([self.module_dir_switch.strip(), module_build_dir]) else: options.append(self.module_dir_switch.strip()+module_build_dir) else: print('XXX: module_build_dir=%r option ignored' % (module_build_dir)) print('XXX: Fix module_dir_switch for ', self.__class__.__name__) if self.module_include_switch is not None: for d in [module_build_dir]+module_dirs: options.append('%s%s' % (self.module_include_switch, d)) else: print('XXX: module_dirs=%r option ignored' % (module_dirs)) print('XXX: Fix module_include_switch for ', self.__class__.__name__) return options def library_option(self, lib): return "-l" + lib def library_dir_option(self, dir): return "-L" + dir def link(self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): objects, output_dir = self._fix_object_args(objects, output_dir) libraries, library_dirs, runtime_library_dirs = \ self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries) if is_string(output_dir): output_filename = os.path.join(output_dir, output_filename) elif output_dir is not None: raise TypeError("'output_dir' must be a string or None") if self._need_link(objects, output_filename): if self.library_switch[-1]==' ': o_args = [self.library_switch.strip(), output_filename] else: o_args = [self.library_switch.strip()+output_filename] if is_string(self.objects): ld_args = objects + [self.objects] else: ld_args = objects + self.objects ld_args = ld_args + lib_opts + o_args if debug: ld_args[:0] = ['-g'] if extra_preargs: ld_args[:0] = extra_preargs if extra_postargs: ld_args.extend(extra_postargs) self.mkpath(os.path.dirname(output_filename)) if target_desc == CCompiler.EXECUTABLE: linker = self.linker_exe[:] else: linker = self.linker_so[:] command = linker + ld_args try: self.spawn(command) except DistutilsExecError as e: msg = str(e) raise LinkError(msg) from None else: log.debug("skipping %s (up-to-date)", output_filename) def _environment_hook(self, name, hook_name): if hook_name is None: return None if is_string(hook_name): if hook_name.startswith('self.'): hook_name = hook_name[5:] hook = getattr(self, hook_name) return hook() elif hook_name.startswith('exe.'): hook_name = hook_name[4:] var = self.executables[hook_name] if var: return var[0] else: return None elif hook_name.startswith('flags.'): hook_name = hook_name[6:] hook = getattr(self, 'get_flags_' + hook_name) return hook() else: return hook_name() def can_ccompiler_link(self, ccompiler): """ Check if the given C compiler can link objects produced by this compiler. """ return True def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): """ Convert a set of object files that are not compatible with the default linker, to a file that is compatible. Parameters ---------- objects : list List of object files to include. output_dir : str Output directory to place generated object files. extra_dll_dir : str Output directory to place extra DLL files that need to be included on Windows. Returns ------- converted_objects : list of str List of converted object files. Note that the number of output files is not necessarily the same as inputs. """ raise NotImplementedError() ## class FCompiler _default_compilers = ( # sys.platform mappings ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95', 'intelvem', 'intelem', 'flang')), ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')), ('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor', 'fujitsu')), ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')), ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')), ('irix.*', ('mips', 'gnu', 'gnu95',)), ('aix.*', ('ibm', 'gnu', 'gnu95',)), # os.name mappings ('posix', ('gnu', 'gnu95',)), ('nt', ('gnu', 'gnu95',)), ('mac', ('gnu95', 'gnu', 'pg')), ) fcompiler_class = None fcompiler_aliases = None def load_all_fcompiler_classes(): """Cache all the FCompiler classes found in modules in the numpy.distutils.fcompiler package. """ from glob import glob global fcompiler_class, fcompiler_aliases if fcompiler_class is not None: return pys = os.path.join(os.path.dirname(__file__), '*.py') fcompiler_class = {} fcompiler_aliases = {} for fname in glob(pys): module_name, ext = os.path.splitext(os.path.basename(fname)) module_name = 'numpy.distutils.fcompiler.' + module_name __import__ (module_name) module = sys.modules[module_name] if hasattr(module, 'compilers'): for cname in module.compilers: klass = getattr(module, cname) desc = (klass.compiler_type, klass, klass.description) fcompiler_class[klass.compiler_type] = desc for alias in klass.compiler_aliases: if alias in fcompiler_aliases: raise ValueError("alias %r defined for both %s and %s" % (alias, klass.__name__, fcompiler_aliases[alias][1].__name__)) fcompiler_aliases[alias] = desc def _find_existing_fcompiler(compiler_types, osname=None, platform=None, requiref90=False, c_compiler=None): from numpy.distutils.core import get_distribution dist = get_distribution(always=True) for compiler_type in compiler_types: v = None try: c = new_fcompiler(plat=platform, compiler=compiler_type, c_compiler=c_compiler) c.customize(dist) v = c.get_version() if requiref90 and c.compiler_f90 is None: v = None new_compiler = c.suggested_f90_compiler if new_compiler: log.warn('Trying %r compiler as suggested by %r ' 'compiler for f90 support.' % (compiler_type, new_compiler)) c = new_fcompiler(plat=platform, compiler=new_compiler, c_compiler=c_compiler) c.customize(dist) v = c.get_version() if v is not None: compiler_type = new_compiler if requiref90 and c.compiler_f90 is None: raise ValueError('%s does not support compiling f90 codes, ' 'skipping.' % (c.__class__.__name__)) except DistutilsModuleError: log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type) except CompilerNotFound: log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type) if v is not None: return compiler_type return None def available_fcompilers_for_platform(osname=None, platform=None): if osname is None: osname = os.name if platform is None: platform = sys.platform matching_compiler_types = [] for pattern, compiler_type in _default_compilers: if re.match(pattern, platform) or re.match(pattern, osname): for ct in compiler_type: if ct not in matching_compiler_types: matching_compiler_types.append(ct) if not matching_compiler_types: matching_compiler_types.append('gnu') return matching_compiler_types def get_default_fcompiler(osname=None, platform=None, requiref90=False, c_compiler=None): """Determine the default Fortran compiler to use for the given platform.""" matching_compiler_types = available_fcompilers_for_platform(osname, platform) log.info("get_default_fcompiler: matching types: '%s'", matching_compiler_types) compiler_type = _find_existing_fcompiler(matching_compiler_types, osname=osname, platform=platform, requiref90=requiref90, c_compiler=c_compiler) return compiler_type # Flag to avoid rechecking for Fortran compiler every time failed_fcompilers = set() def new_fcompiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0, requiref90=False, c_compiler = None): """Generate an instance of some FCompiler subclass for the supplied platform/compiler combination. """ global failed_fcompilers fcompiler_key = (plat, compiler) if fcompiler_key in failed_fcompilers: return None load_all_fcompiler_classes() if plat is None: plat = os.name if compiler is None: compiler = get_default_fcompiler(plat, requiref90=requiref90, c_compiler=c_compiler) if compiler in fcompiler_class: module_name, klass, long_description = fcompiler_class[compiler] elif compiler in fcompiler_aliases: module_name, klass, long_description = fcompiler_aliases[compiler] else: msg = "don't know how to compile Fortran code on platform '%s'" % plat if compiler is not None: msg = msg + " with '%s' compiler." % compiler msg = msg + " Supported compilers are: %s)" \ % (','.join(fcompiler_class.keys())) log.warn(msg) failed_fcompilers.add(fcompiler_key) return None compiler = klass(verbose=verbose, dry_run=dry_run, force=force) compiler.c_compiler = c_compiler return compiler def show_fcompilers(dist=None): """Print list of available compilers (used by the "--help-fcompiler" option to "config_fc"). """ if dist is None: from distutils.dist import Distribution from numpy.distutils.command.config_compiler import config_fc dist = Distribution() dist.script_name = os.path.basename(sys.argv[0]) dist.script_args = ['config_fc'] + sys.argv[1:] try: dist.script_args.remove('--help-fcompiler') except ValueError: pass dist.cmdclass['config_fc'] = config_fc dist.parse_config_files() dist.parse_command_line() compilers = [] compilers_na = [] compilers_ni = [] if not fcompiler_class: load_all_fcompiler_classes() platform_compilers = available_fcompilers_for_platform() for compiler in platform_compilers: v = None log.set_verbosity(-2) try: c = new_fcompiler(compiler=compiler, verbose=dist.verbose) c.customize(dist) v = c.get_version() except (DistutilsModuleError, CompilerNotFound) as e: log.debug("show_fcompilers: %s not found" % (compiler,)) log.debug(repr(e)) if v is None: compilers_na.append(("fcompiler="+compiler, None, fcompiler_class[compiler][2])) else: c.dump_properties() compilers.append(("fcompiler="+compiler, None, fcompiler_class[compiler][2] + ' (%s)' % v)) compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers)) compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2]) for fc in compilers_ni] compilers.sort() compilers_na.sort() compilers_ni.sort() pretty_printer = FancyGetopt(compilers) pretty_printer.print_help("Fortran compilers found:") pretty_printer = FancyGetopt(compilers_na) pretty_printer.print_help("Compilers available for this " "platform, but not found:") if compilers_ni: pretty_printer = FancyGetopt(compilers_ni) pretty_printer.print_help("Compilers not available on this platform:") print("For compiler details, run 'config_fc --verbose' setup command.") def dummy_fortran_file(): fo, name = make_temp_file(suffix='.f') fo.write(" subroutine dummy()\n end\n") fo.close() return name[:-2] is_f_file = re.compile(r'.*\.(for|ftn|f77|f)\Z', re.I).match _has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search _has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search _has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search _free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match def is_free_format(file): """Check if file is in free format Fortran.""" # f90 allows both fixed and free format, assuming fixed unless # signs of free format are detected. result = 0 with open(file, encoding='latin1') as f: line = f.readline() n = 10000 # the number of non-comment lines to scan for hints if _has_f_header(line) or _has_fix_header(line): n = 0 elif _has_f90_header(line): n = 0 result = 1 while n>0 and line: line = line.rstrip() if line and line[0]!='!': n -= 1 if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&': result = 1 break line = f.readline() return result def has_f90_header(src): with open(src, encoding='latin1') as f: line = f.readline() return _has_f90_header(line) or _has_fix_header(line) _f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)', re.I) def get_f77flags(src): """ Search the first 20 lines of fortran 77 code for line pattern `CF77FLAGS(<fcompiler type>)=<f77 flags>` Return a dictionary {<fcompiler type>:<f77 flags>}. """ flags = {} with open(src, encoding='latin1') as f: i = 0 for line in f: i += 1 if i>20: break m = _f77flags_re.match(line) if not m: continue fcname = m.group('fcname').strip() fflags = m.group('fflags').strip() flags[fcname] = split_quoted(fflags) return flags # TODO: implement get_f90flags and use it in _compile similarly to get_f77flags if __name__ == '__main__': show_fcompilers()
40,535
Python
38.317168
112
0.566671
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/hpux.py
from numpy.distutils.fcompiler import FCompiler compilers = ['HPUXFCompiler'] class HPUXFCompiler(FCompiler): compiler_type = 'hpux' description = 'HP Fortran 90 Compiler' version_pattern = r'HP F90 (?P<version>[^\s*,]*)' executables = { 'version_cmd' : ["f90", "+version"], 'compiler_f77' : ["f90"], 'compiler_fix' : ["f90"], 'compiler_f90' : ["f90"], 'linker_so' : ["ld", "-b"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } module_dir_switch = None #XXX: fix me module_include_switch = None #XXX: fix me pic_flags = ['+Z'] def get_flags(self): return self.pic_flags + ['+ppu', '+DD64'] def get_flags_opt(self): return ['-O3'] def get_libraries(self): return ['m'] def get_library_dirs(self): opt = ['/usr/lib/hpux64'] return opt def get_version(self, force=0, ok_status=[256, 0, 1]): # XXX status==256 may indicate 'unrecognized option' or # 'no input file'. So, version_cmd needs more work. return FCompiler.get_version(self, force, ok_status) if __name__ == '__main__': from distutils import log log.set_verbosity(10) from numpy.distutils import customized_fcompiler print(customized_fcompiler(compiler='hpux').get_version())
1,353
Python
31.238094
63
0.571323
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/gnu.py
import re import os import sys import warnings import platform import tempfile import hashlib import base64 import subprocess from subprocess import Popen, PIPE, STDOUT from numpy.distutils.exec_command import filepath_from_subprocess_output from numpy.distutils.fcompiler import FCompiler from distutils.version import LooseVersion compilers = ['GnuFCompiler', 'Gnu95FCompiler'] TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)") # XXX: handle cross compilation def is_win64(): return sys.platform == "win32" and platform.architecture()[0] == "64bit" class GnuFCompiler(FCompiler): compiler_type = 'gnu' compiler_aliases = ('g77', ) description = 'GNU Fortran 77 compiler' def gnu_version_match(self, version_string): """Handle the different versions of GNU fortran compilers""" # Strip warning(s) that may be emitted by gfortran while version_string.startswith('gfortran: warning'): version_string =\ version_string[version_string.find('\n') + 1:].strip() # Gfortran versions from after 2010 will output a simple string # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older # gfortrans may still return long version strings (``-dumpversion`` was # an alias for ``--version``) if len(version_string) <= 20: # Try to find a valid version string m = re.search(r'([0-9.]+)', version_string) if m: # g77 provides a longer version string that starts with GNU # Fortran if version_string.startswith('GNU Fortran'): return ('g77', m.group(1)) # gfortran only outputs a version string such as #.#.#, so check # if the match is at the start of the string elif m.start() == 0: return ('gfortran', m.group(1)) else: # Output probably from --version, try harder: m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string) if m: return ('gfortran', m.group(1)) m = re.search( r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string) if m: v = m.group(1) if v.startswith('0') or v.startswith('2') or v.startswith('3'): # the '0' is for early g77's return ('g77', v) else: # at some point in the 4.x series, the ' 95' was dropped # from the version string return ('gfortran', v) # If still nothing, raise an error to make the problem easy to find. err = 'A valid Fortran version was not found in this string:\n' raise ValueError(err + version_string) def version_match(self, version_string): v = self.gnu_version_match(version_string) if not v or v[0] != 'g77': return None return v[1] possible_executables = ['g77', 'f77'] executables = { 'version_cmd' : [None, "-dumpversion"], 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"], 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes 'compiler_fix' : None, 'linker_so' : [None, "-g", "-Wall"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"], 'linker_exe' : [None, "-g", "-Wall"] } module_dir_switch = None module_include_switch = None # Cygwin: f771: warning: -fPIC ignored for target (all code is # position independent) if os.name != 'nt' and sys.platform != 'cygwin': pic_flags = ['-fPIC'] # use -mno-cygwin for g77 when Python is not Cygwin-Python if sys.platform == 'win32': for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']: executables[key].append('-mno-cygwin') g2c = 'g2c' suggested_f90_compiler = 'gnu95' def get_flags_linker_so(self): opt = self.linker_so[1:] if sys.platform == 'darwin': target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value # and leave it alone. But, distutils will complain if the # environment's value is different from the one in the Python # Makefile used to build Python. We let distutils handle this # error checking. if not target: # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, # we try to get it first from sysconfig and then # fall back to setting it to 10.9 This is a reasonable default # even when using the official Python dist and those derived # from it. import sysconfig target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') if not target: target = '10.9' s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}' warnings.warn(s, stacklevel=2) os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target) opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) else: opt.append("-shared") if sys.platform.startswith('sunos'): # SunOS often has dynamically loaded symbols defined in the # static library libg2c.a The linker doesn't like this. To # ignore the problem, use the -mimpure-text flag. It isn't # the safest thing, but seems to work. 'man gcc' says: # ".. Instead of using -mimpure-text, you should compile all # source code with -fpic or -fPIC." opt.append('-mimpure-text') return opt def get_libgcc_dir(self): try: output = subprocess.check_output(self.compiler_f77 + ['-print-libgcc-file-name']) except (OSError, subprocess.CalledProcessError): pass else: output = filepath_from_subprocess_output(output) return os.path.dirname(output) return None def get_libgfortran_dir(self): if sys.platform[:5] == 'linux': libgfortran_name = 'libgfortran.so' elif sys.platform == 'darwin': libgfortran_name = 'libgfortran.dylib' else: libgfortran_name = None libgfortran_dir = None if libgfortran_name: find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)] try: output = subprocess.check_output( self.compiler_f77 + find_lib_arg) except (OSError, subprocess.CalledProcessError): pass else: output = filepath_from_subprocess_output(output) libgfortran_dir = os.path.dirname(output) return libgfortran_dir def get_library_dirs(self): opt = [] if sys.platform[:5] != 'linux': d = self.get_libgcc_dir() if d: # if windows and not cygwin, libg2c lies in a different folder if sys.platform == 'win32' and not d.startswith('/usr/lib'): d = os.path.normpath(d) path = os.path.join(d, "lib%s.a" % self.g2c) if not os.path.exists(path): root = os.path.join(d, *((os.pardir, ) * 4)) d2 = os.path.abspath(os.path.join(root, 'lib')) path = os.path.join(d2, "lib%s.a" % self.g2c) if os.path.exists(path): opt.append(d2) opt.append(d) # For Macports / Linux, libgfortran and libgcc are not co-located lib_gfortran_dir = self.get_libgfortran_dir() if lib_gfortran_dir: opt.append(lib_gfortran_dir) return opt def get_libraries(self): opt = [] d = self.get_libgcc_dir() if d is not None: g2c = self.g2c + '-pic' f = self.static_lib_format % (g2c, self.static_lib_extension) if not os.path.isfile(os.path.join(d, f)): g2c = self.g2c else: g2c = self.g2c if g2c is not None: opt.append(g2c) c_compiler = self.c_compiler if sys.platform == 'win32' and c_compiler and \ c_compiler.compiler_type == 'msvc': opt.append('gcc') if sys.platform == 'darwin': opt.append('cc_dynamic') return opt def get_flags_debug(self): return ['-g'] def get_flags_opt(self): v = self.get_version() if v and v <= '3.3.3': # With this compiler version building Fortran BLAS/LAPACK # with -O3 caused failures in lib.lapack heevr,syevr tests. opt = ['-O2'] else: opt = ['-O3'] opt.append('-funroll-loops') return opt def _c_arch_flags(self): """ Return detected arch flags from CFLAGS """ import sysconfig try: cflags = sysconfig.get_config_vars()['CFLAGS'] except KeyError: return [] arch_re = re.compile(r"-arch\s+(\w+)") arch_flags = [] for arch in arch_re.findall(cflags): arch_flags += ['-arch', arch] return arch_flags def get_flags_arch(self): return [] def runtime_library_dir_option(self, dir): if sys.platform == 'win32' or sys.platform == 'cygwin': # Linux/Solaris/Unix support RPATH, Windows does not raise NotImplementedError # TODO: could use -Xlinker here, if it's supported assert "," not in dir if sys.platform == 'darwin': return f'-Wl,-rpath,{dir}' elif sys.platform.startswith(('aix', 'os400')): # AIX RPATH is called LIBPATH return f'-Wl,-blibpath:{dir}' else: return f'-Wl,-rpath={dir}' class Gnu95FCompiler(GnuFCompiler): compiler_type = 'gnu95' compiler_aliases = ('gfortran', ) description = 'GNU Fortran 95 compiler' def version_match(self, version_string): v = self.gnu_version_match(version_string) if not v or v[0] != 'gfortran': return None v = v[1] if LooseVersion(v) >= "4": # gcc-4 series releases do not support -mno-cygwin option pass else: # use -mno-cygwin flag for gfortran when Python is not # Cygwin-Python if sys.platform == 'win32': for key in [ 'version_cmd', 'compiler_f77', 'compiler_f90', 'compiler_fix', 'linker_so', 'linker_exe' ]: self.executables[key].append('-mno-cygwin') return v possible_executables = ['gfortran', 'f95'] executables = { 'version_cmd' : ["<F90>", "-dumpversion"], 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form", "-fno-second-underscore"], 'compiler_f90' : [None, "-Wall", "-g", "-fno-second-underscore"], 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form", "-fno-second-underscore"], 'linker_so' : ["<F90>", "-Wall", "-g"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"], 'linker_exe' : [None, "-Wall"] } module_dir_switch = '-J' module_include_switch = '-I' if sys.platform.startswith(('aix', 'os400')): executables['linker_so'].append('-lpthread') if platform.architecture()[0][:2] == '64': for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']: executables[key].append('-maix64') g2c = 'gfortran' def _universal_flags(self, cmd): """Return a list of -arch flags for every supported architecture.""" if not sys.platform == 'darwin': return [] arch_flags = [] # get arches the C compiler gets. c_archs = self._c_arch_flags() if "i386" in c_archs: c_archs[c_archs.index("i386")] = "i686" # check the arches the Fortran compiler supports, and compare with # arch flags from C compiler for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]: if _can_target(cmd, arch) and arch in c_archs: arch_flags.extend(["-arch", arch]) return arch_flags def get_flags(self): flags = GnuFCompiler.get_flags(self) arch_flags = self._universal_flags(self.compiler_f90) if arch_flags: flags[:0] = arch_flags return flags def get_flags_linker_so(self): flags = GnuFCompiler.get_flags_linker_so(self) arch_flags = self._universal_flags(self.linker_so) if arch_flags: flags[:0] = arch_flags return flags def get_library_dirs(self): opt = GnuFCompiler.get_library_dirs(self) if sys.platform == 'win32': c_compiler = self.c_compiler if c_compiler and c_compiler.compiler_type == "msvc": target = self.get_target() if target: d = os.path.normpath(self.get_libgcc_dir()) root = os.path.join(d, *((os.pardir, ) * 4)) path = os.path.join(root, "lib") mingwdir = os.path.normpath(path) if os.path.exists(os.path.join(mingwdir, "libmingwex.a")): opt.append(mingwdir) # For Macports / Linux, libgfortran and libgcc are not co-located lib_gfortran_dir = self.get_libgfortran_dir() if lib_gfortran_dir: opt.append(lib_gfortran_dir) return opt def get_libraries(self): opt = GnuFCompiler.get_libraries(self) if sys.platform == 'darwin': opt.remove('cc_dynamic') if sys.platform == 'win32': c_compiler = self.c_compiler if c_compiler and c_compiler.compiler_type == "msvc": if "gcc" in opt: i = opt.index("gcc") opt.insert(i + 1, "mingwex") opt.insert(i + 1, "mingw32") c_compiler = self.c_compiler if c_compiler and c_compiler.compiler_type == "msvc": return [] else: pass return opt def get_target(self): try: p = subprocess.Popen( self.compiler_f77 + ['-v'], stdin=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = p.communicate() output = (stdout or b"") + (stderr or b"") except (OSError, subprocess.CalledProcessError): pass else: output = filepath_from_subprocess_output(output) m = TARGET_R.search(output) if m: return m.group(1) return "" def _hash_files(self, filenames): h = hashlib.sha1() for fn in filenames: with open(fn, 'rb') as f: while True: block = f.read(131072) if not block: break h.update(block) text = base64.b32encode(h.digest()) text = text.decode('ascii') return text.rstrip('=') def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir, chained_dlls, is_archive): """Create a wrapper shared library for the given objects Return an MSVC-compatible lib """ c_compiler = self.c_compiler if c_compiler.compiler_type != "msvc": raise ValueError("This method only supports MSVC") object_hash = self._hash_files(list(objects) + list(chained_dlls)) if is_win64(): tag = 'win_amd64' else: tag = 'win32' basename = 'lib' + os.path.splitext( os.path.basename(objects[0]))[0][:8] root_name = basename + '.' + object_hash + '.gfortran-' + tag dll_name = root_name + '.dll' def_name = root_name + '.def' lib_name = root_name + '.lib' dll_path = os.path.join(extra_dll_dir, dll_name) def_path = os.path.join(output_dir, def_name) lib_path = os.path.join(output_dir, lib_name) if os.path.isfile(lib_path): # Nothing to do return lib_path, dll_path if is_archive: objects = (["-Wl,--whole-archive"] + list(objects) + ["-Wl,--no-whole-archive"]) self.link_shared_object( objects, dll_name, output_dir=extra_dll_dir, extra_postargs=list(chained_dlls) + [ '-Wl,--allow-multiple-definition', '-Wl,--output-def,' + def_path, '-Wl,--export-all-symbols', '-Wl,--enable-auto-import', '-static', '-mlong-double-64', ]) # No PowerPC! if is_win64(): specifier = '/MACHINE:X64' else: specifier = '/MACHINE:X86' # MSVC specific code lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier] if not c_compiler.initialized: c_compiler.initialize() c_compiler.spawn([c_compiler.lib] + lib_args) return lib_path, dll_path def can_ccompiler_link(self, compiler): # MSVC cannot link objects compiled by GNU fortran return compiler.compiler_type not in ("msvc", ) def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir): """ Convert a set of object files that are not compatible with the default linker, to a file that is compatible. """ if self.c_compiler.compiler_type == "msvc": # Compile a DLL and return the lib for the DLL as # the object. Also keep track of previous DLLs that # we have compiled so that we can link against them. # If there are .a archives, assume they are self-contained # static libraries, and build separate DLLs for each archives = [] plain_objects = [] for obj in objects: if obj.lower().endswith('.a'): archives.append(obj) else: plain_objects.append(obj) chained_libs = [] chained_dlls = [] for archive in archives[::-1]: lib, dll = self._link_wrapper_lib( [archive], output_dir, extra_dll_dir, chained_dlls=chained_dlls, is_archive=True) chained_libs.insert(0, lib) chained_dlls.insert(0, dll) if not plain_objects: return chained_libs lib, dll = self._link_wrapper_lib( plain_objects, output_dir, extra_dll_dir, chained_dlls=chained_dlls, is_archive=False) return [lib] + chained_libs else: raise ValueError("Unsupported C compiler") def _can_target(cmd, arch): """Return true if the architecture supports the -arch flag""" newcmd = cmd[:] fid, filename = tempfile.mkstemp(suffix=".f") os.close(fid) try: d = os.path.dirname(filename) output = os.path.splitext(filename)[0] + ".o" try: newcmd.extend(["-arch", arch, "-c", filename]) p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d) p.communicate() return p.returncode == 0 finally: if os.path.exists(output): os.remove(output) finally: os.remove(filename) if __name__ == '__main__': from distutils import log from numpy.distutils import customized_fcompiler log.set_verbosity(2) print(customized_fcompiler('gnu').get_version()) try: print(customized_fcompiler('g95').get_version()) except Exception as e: print(e)
20,502
Python
35.875899
97
0.526339
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/nag.py
import sys import re from numpy.distutils.fcompiler import FCompiler compilers = ['NAGFCompiler', 'NAGFORCompiler'] class BaseNAGFCompiler(FCompiler): version_pattern = r'NAG.* Release (?P<version>[^(\s]*)' def version_match(self, version_string): m = re.search(self.version_pattern, version_string) if m: return m.group('version') else: return None def get_flags_linker_so(self): return ["-Wl,-shared"] def get_flags_opt(self): return ['-O4'] def get_flags_arch(self): return [] class NAGFCompiler(BaseNAGFCompiler): compiler_type = 'nag' description = 'NAGWare Fortran 95 Compiler' executables = { 'version_cmd' : ["<F90>", "-V"], 'compiler_f77' : ["f95", "-fixed"], 'compiler_fix' : ["f95", "-fixed"], 'compiler_f90' : ["f95"], 'linker_so' : ["<F90>"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } def get_flags_linker_so(self): if sys.platform == 'darwin': return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] return BaseNAGFCompiler.get_flags_linker_so(self) def get_flags_arch(self): version = self.get_version() if version and version < '5.1': return ['-target=native'] else: return BaseNAGFCompiler.get_flags_arch(self) def get_flags_debug(self): return ['-g', '-gline', '-g90', '-nan', '-C'] class NAGFORCompiler(BaseNAGFCompiler): compiler_type = 'nagfor' description = 'NAG Fortran Compiler' executables = { 'version_cmd' : ["nagfor", "-V"], 'compiler_f77' : ["nagfor", "-fixed"], 'compiler_fix' : ["nagfor", "-fixed"], 'compiler_f90' : ["nagfor"], 'linker_so' : ["nagfor"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } def get_flags_linker_so(self): if sys.platform == 'darwin': return ['-unsharedrts', '-Wl,-bundle,-flat_namespace,-undefined,suppress'] return BaseNAGFCompiler.get_flags_linker_so(self) def get_flags_debug(self): version = self.get_version() if version and version > '6.1': return ['-g', '-u', '-nan', '-C=all', '-thread_safe', '-kind=unique', '-Warn=allocation', '-Warn=subnormal'] else: return ['-g', '-nan', '-C=all', '-u', '-thread_safe'] if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils import customized_fcompiler compiler = customized_fcompiler(compiler='nagfor') print(compiler.get_version()) print(compiler.get_flags_debug())
2,777
Python
30.568181
86
0.550954
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/arm.py
from __future__ import division, absolute_import, print_function import sys from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file from sys import platform from os.path import join, dirname, normpath compilers = ['ArmFlangCompiler'] import functools class ArmFlangCompiler(FCompiler): compiler_type = 'arm' description = 'Arm Compiler' version_pattern = r'\s*Arm.*version (?P<version>[\d.-]+).*' ar_exe = 'lib.exe' possible_executables = ['armflang'] executables = { 'version_cmd': ["", "--version"], 'compiler_f77': ["armflang", "-fPIC"], 'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"], 'compiler_f90': ["armflang", "-fPIC"], 'linker_so': ["armflang", "-fPIC", "-shared"], 'archiver': ["ar", "-cr"], 'ranlib': None } pic_flags = ["-fPIC", "-DPIC"] c_compiler = 'arm' module_dir_switch = '-module ' # Don't remove ending space! def get_libraries(self): opt = FCompiler.get_libraries(self) opt.extend(['flang', 'flangrti', 'ompstub']) return opt @functools.lru_cache(maxsize=128) def get_library_dirs(self): """List of compiler library directories.""" opt = FCompiler.get_library_dirs(self) flang_dir = dirname(self.executables['compiler_f77'][0]) opt.append(normpath(join(flang_dir, '..', 'lib'))) return opt def get_flags(self): return [] def get_flags_free(self): return [] def get_flags_debug(self): return ['-g'] def get_flags_opt(self): return ['-O3'] def get_flags_arch(self): return [] def runtime_library_dir_option(self, dir): return '-Wl,-rpath=%s' % dir if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils import customized_fcompiler print(customized_fcompiler(compiler='armflang').get_version())
2,235
Python
29.216216
79
0.524385
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/pg.py
# http://www.pgroup.com import sys from numpy.distutils.fcompiler import FCompiler from sys import platform from os.path import join, dirname, normpath compilers = ['PGroupFCompiler', 'PGroupFlangCompiler'] class PGroupFCompiler(FCompiler): compiler_type = 'pg' description = 'Portland Group Fortran Compiler' version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P<version>[\d.-]+).*' if platform == 'darwin': executables = { 'version_cmd': ["<F77>", "-V"], 'compiler_f77': ["pgfortran", "-dynamiclib"], 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"], 'compiler_f90': ["pgfortran", "-dynamiclib"], 'linker_so': ["libtool"], 'archiver': ["ar", "-cr"], 'ranlib': ["ranlib"] } pic_flags = [''] else: executables = { 'version_cmd': ["<F77>", "-V"], 'compiler_f77': ["pgfortran"], 'compiler_fix': ["pgfortran", "-Mfixed"], 'compiler_f90': ["pgfortran"], 'linker_so': ["<F90>"], 'archiver': ["ar", "-cr"], 'ranlib': ["ranlib"] } pic_flags = ['-fpic'] module_dir_switch = '-module ' module_include_switch = '-I' def get_flags(self): opt = ['-Minform=inform', '-Mnosecond_underscore'] return self.pic_flags + opt def get_flags_opt(self): return ['-fast'] def get_flags_debug(self): return ['-g'] if platform == 'darwin': def get_flags_linker_so(self): return ["-dynamic", '-undefined', 'dynamic_lookup'] else: def get_flags_linker_so(self): return ["-shared", '-fpic'] def runtime_library_dir_option(self, dir): return '-R%s' % dir import functools class PGroupFlangCompiler(FCompiler): compiler_type = 'flang' description = 'Portland Group Fortran LLVM Compiler' version_pattern = r'\s*(flang|clang) version (?P<version>[\d.-]+).*' ar_exe = 'lib.exe' possible_executables = ['flang'] executables = { 'version_cmd': ["<F77>", "--version"], 'compiler_f77': ["flang"], 'compiler_fix': ["flang"], 'compiler_f90': ["flang"], 'linker_so': [None], 'archiver': [ar_exe, "/verbose", "/OUT:"], 'ranlib': None } library_switch = '/OUT:' # No space after /OUT:! module_dir_switch = '-module ' # Don't remove ending space! def get_libraries(self): opt = FCompiler.get_libraries(self) opt.extend(['flang', 'flangrti', 'ompstub']) return opt @functools.lru_cache(maxsize=128) def get_library_dirs(self): """List of compiler library directories.""" opt = FCompiler.get_library_dirs(self) flang_dir = dirname(self.executables['compiler_f77'][0]) opt.append(normpath(join(flang_dir, '..', 'lib'))) return opt def get_flags(self): return [] def get_flags_free(self): return [] def get_flags_debug(self): return ['-g'] def get_flags_opt(self): return ['-O3'] def get_flags_arch(self): return [] def runtime_library_dir_option(self, dir): raise NotImplementedError if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils import customized_fcompiler if 'flang' in sys.argv: print(customized_fcompiler(compiler='flang').get_version()) else: print(customized_fcompiler(compiler='pg').get_version())
3,568
Python
26.666666
74
0.557175
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/g95.py
# http://g95.sourceforge.net/ from numpy.distutils.fcompiler import FCompiler compilers = ['G95FCompiler'] class G95FCompiler(FCompiler): compiler_type = 'g95' description = 'G95 Fortran Compiler' # version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95!\) (?P<version>.*)\).*' # $ g95 --version # G95 (GCC 4.0.3 (g95!) May 22 2006) version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95 (?P<version>.*)!\) (?P<date>.*)\).*' # $ g95 --version # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006) executables = { 'version_cmd' : ["<F90>", "--version"], 'compiler_f77' : ["g95", "-ffixed-form"], 'compiler_fix' : ["g95", "-ffixed-form"], 'compiler_f90' : ["g95"], 'linker_so' : ["<F90>", "-shared"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } pic_flags = ['-fpic'] module_dir_switch = '-fmod=' module_include_switch = '-I' def get_flags(self): return ['-fno-second-underscore'] def get_flags_opt(self): return ['-O'] def get_flags_debug(self): return ['-g'] if __name__ == '__main__': from distutils import log from numpy.distutils import customized_fcompiler log.set_verbosity(2) print(customized_fcompiler('g95').get_version())
1,330
Python
29.953488
105
0.539098
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/ibm.py
import os import re import sys import subprocess from numpy.distutils.fcompiler import FCompiler from numpy.distutils.exec_command import find_executable from numpy.distutils.misc_util import make_temp_file from distutils import log compilers = ['IBMFCompiler'] class IBMFCompiler(FCompiler): compiler_type = 'ibm' description = 'IBM XL Fortran Compiler' version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)' #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004 executables = { 'version_cmd' : ["<F77>", "-qversion"], 'compiler_f77' : ["xlf"], 'compiler_fix' : ["xlf90", "-qfixed"], 'compiler_f90' : ["xlf90"], 'linker_so' : ["xlf95"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } def get_version(self,*args,**kwds): version = FCompiler.get_version(self,*args,**kwds) if version is None and sys.platform.startswith('aix'): # use lslpp to find out xlf version lslpp = find_executable('lslpp') xlf = find_executable('xlf') if os.path.exists(xlf) and os.path.exists(lslpp): try: o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp']) except (OSError, subprocess.CalledProcessError): pass else: m = re.search(r'xlfcmp:(?P<version>\d+([.]\d+)+)', o) if m: version = m.group('version') xlf_dir = '/etc/opt/ibmcmp/xlf' if version is None and os.path.isdir(xlf_dir): # linux: # If the output of xlf does not contain version info # (that's the case with xlf 8.1, for instance) then # let's try another method: l = sorted(os.listdir(xlf_dir)) l.reverse() l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))] if l: from distutils.version import LooseVersion self.version = version = LooseVersion(l[0]) return version def get_flags(self): return ['-qextname'] def get_flags_debug(self): return ['-g'] def get_flags_linker_so(self): opt = [] if sys.platform=='darwin': opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress') else: opt.append('-bshared') version = self.get_version(ok_status=[0, 40]) if version is not None: if sys.platform.startswith('aix'): xlf_cfg = '/etc/xlf.cfg' else: xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version fo, new_cfg = make_temp_file(suffix='_xlf.cfg') log.info('Creating '+new_cfg) with open(xlf_cfg, 'r') as fi: crt1_match = re.compile(r'\s*crt\s*=\s*(?P<path>.*)/crt1.o').match for line in fi: m = crt1_match(line) if m: fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) else: fo.write(line) fo.close() opt.append('-F'+new_cfg) return opt def get_flags_opt(self): return ['-O3'] if __name__ == '__main__': from numpy.distutils import customized_fcompiler log.set_verbosity(2) print(customized_fcompiler(compiler='ibm').get_version())
3,539
Python
35.122449
137
0.532354