file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/pathf95.py | from numpy.distutils.fcompiler import FCompiler
compilers = ['PathScaleFCompiler']
class PathScaleFCompiler(FCompiler):
compiler_type = 'pathf95'
description = 'PathScale Fortran Compiler'
version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P<version>[\d.]+)'
executables = {
'version_cmd' : ["pathf95", "-version"],
'compiler_f77' : ["pathf95", "-fixedform"],
'compiler_fix' : ["pathf95", "-fixedform"],
'compiler_f90' : ["pathf95"],
'linker_so' : ["pathf95", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fPIC']
module_dir_switch = '-module ' # Don't remove ending space!
module_include_switch = '-I'
def get_flags_opt(self):
return ['-O3']
def get_flags_debug(self):
return ['-g']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='pathf95').get_version())
| 1,061 | Python | 30.235293 | 85 | 0.592837 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/environment.py | import os
from distutils.dist import Distribution
__metaclass__ = type
class EnvironmentConfig:
def __init__(self, distutils_section='ALL', **kw):
self._distutils_section = distutils_section
self._conf_keys = kw
self._conf = None
self._hook_handler = None
def dump_variable(self, name):
conf_desc = self._conf_keys[name]
hook, envvar, confvar, convert, append = conf_desc
if not convert:
convert = lambda x : x
print('%s.%s:' % (self._distutils_section, name))
v = self._hook_handler(name, hook)
print(' hook : %s' % (convert(v),))
if envvar:
v = os.environ.get(envvar, None)
print(' environ: %s' % (convert(v),))
if confvar and self._conf:
v = self._conf.get(confvar, (None, None))[1]
print(' config : %s' % (convert(v),))
def dump_variables(self):
for name in self._conf_keys:
self.dump_variable(name)
def __getattr__(self, name):
try:
conf_desc = self._conf_keys[name]
except KeyError:
raise AttributeError(
f"'EnvironmentConfig' object has no attribute '{name}'"
) from None
return self._get_var(name, conf_desc)
def get(self, name, default=None):
try:
conf_desc = self._conf_keys[name]
except KeyError:
return default
var = self._get_var(name, conf_desc)
if var is None:
var = default
return var
def _get_var(self, name, conf_desc):
hook, envvar, confvar, convert, append = conf_desc
if convert is None:
convert = lambda x: x
var = self._hook_handler(name, hook)
if envvar is not None:
envvar_contents = os.environ.get(envvar)
if envvar_contents is not None:
envvar_contents = convert(envvar_contents)
if var and append:
if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1':
var.extend(envvar_contents)
else:
# NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0
# to keep old (overwrite flags rather than append to
# them) behavior
var = envvar_contents
else:
var = envvar_contents
if confvar is not None and self._conf:
if confvar in self._conf:
source, confvar_contents = self._conf[confvar]
var = convert(confvar_contents)
return var
def clone(self, hook_handler):
ec = self.__class__(distutils_section=self._distutils_section,
**self._conf_keys)
ec._hook_handler = hook_handler
return ec
def use_distribution(self, dist):
if isinstance(dist, Distribution):
self._conf = dist.get_option_dict(self._distutils_section)
else:
self._conf = dist
| 3,080 | Python | 33.617977 | 80 | 0.530844 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/sun.py | from numpy.distutils.ccompiler import simple_version_match
from numpy.distutils.fcompiler import FCompiler
compilers = ['SunFCompiler']
class SunFCompiler(FCompiler):
compiler_type = 'sun'
description = 'Sun or Forte Fortran 95 Compiler'
# ex:
# f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28
version_match = simple_version_match(
start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95')
executables = {
'version_cmd' : ["<F90>", "-V"],
'compiler_f77' : ["f90"],
'compiler_fix' : ["f90", "-fixed"],
'compiler_f90' : ["f90"],
'linker_so' : ["<F90>", "-Bdynamic", "-G"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = '-moddir='
module_include_switch = '-M'
pic_flags = ['-xcode=pic32']
def get_flags_f77(self):
ret = ["-ftrap=%none"]
if (self.get_version() or '') >= '7':
ret.append("-f77")
else:
ret.append("-fixed")
return ret
def get_opt(self):
return ['-fast', '-dalign']
def get_arch(self):
return ['-xtarget=generic']
def get_libraries(self):
opt = []
opt.extend(['fsu', 'sunmath', 'mvec'])
return opt
def runtime_library_dir_option(self, dir):
return '-R%s' % dir
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='sun').get_version())
| 1,577 | Python | 29.346153 | 76 | 0.554217 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/mips.py | from numpy.distutils.cpuinfo import cpu
from numpy.distutils.fcompiler import FCompiler
compilers = ['MIPSFCompiler']
class MIPSFCompiler(FCompiler):
compiler_type = 'mips'
description = 'MIPSpro Fortran Compiler'
version_pattern = r'MIPSpro Compilers: Version (?P<version>[^\s*,]*)'
executables = {
'version_cmd' : ["<F90>", "-version"],
'compiler_f77' : ["f77", "-f77"],
'compiler_fix' : ["f90", "-fixedform"],
'compiler_f90' : ["f90"],
'linker_so' : ["f90", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : None
}
module_dir_switch = None #XXX: fix me
module_include_switch = None #XXX: fix me
pic_flags = ['-KPIC']
def get_flags(self):
return self.pic_flags + ['-n32']
def get_flags_opt(self):
return ['-O3']
def get_flags_arch(self):
opt = []
for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split():
if getattr(cpu, 'is_IP%s'%a)():
opt.append('-TARG:platform=IP%s' % a)
break
return opt
def get_flags_arch_f77(self):
r = None
if cpu.is_r10000(): r = 10000
elif cpu.is_r12000(): r = 12000
elif cpu.is_r8000(): r = 8000
elif cpu.is_r5000(): r = 5000
elif cpu.is_r4000(): r = 4000
if r is not None:
return ['r%s' % (r)]
return []
def get_flags_arch_f90(self):
r = self.get_flags_arch_f77()
if r:
r[0] = '-' + r[0]
return r
if __name__ == '__main__':
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='mips').get_version())
| 1,714 | Python | 30.181818 | 79 | 0.529755 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/none.py | from numpy.distutils.fcompiler import FCompiler
from numpy.distutils import customized_fcompiler
compilers = ['NoneFCompiler']
class NoneFCompiler(FCompiler):
compiler_type = 'none'
description = 'Fake Fortran compiler'
executables = {'compiler_f77': None,
'compiler_f90': None,
'compiler_fix': None,
'linker_so': None,
'linker_exe': None,
'archiver': None,
'ranlib': None,
'version_cmd': None,
}
def find_executables(self):
pass
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
print(customized_fcompiler(compiler='none').get_version())
| 758 | Python | 25.172413 | 62 | 0.550132 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/absoft.py |
# http://www.absoft.com/literature/osxuserguide.pdf
# http://www.absoft.com/documentation.html
# Notes:
# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py
# generated extension modules (works for f2py v2.45.241_1936 and up)
import os
from numpy.distutils.cpuinfo import cpu
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
from numpy.distutils.misc_util import cyg2win32
compilers = ['AbsoftFCompiler']
class AbsoftFCompiler(FCompiler):
compiler_type = 'absoft'
description = 'Absoft Corp Fortran Compiler'
#version_pattern = r'FORTRAN 77 Compiler (?P<version>[^\s*,]*).*?Absoft Corp'
version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\
r' (?P<version>[^\s*,]*)(.*?Absoft Corp|)'
# on windows: f90 -V -c dummy.f
# f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16
# samt5735(8)$ f90 -V -c dummy.f
# f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0
# Note that fink installs g77 as f77, so need to use f90 for detection.
executables = {
'version_cmd' : None, # set by update_executables
'compiler_f77' : ["f77"],
'compiler_fix' : ["f90"],
'compiler_f90' : ["f90"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
if os.name=='nt':
library_switch = '/out:' #No space after /out:!
module_dir_switch = None
module_include_switch = '-p'
def update_executables(self):
f = cyg2win32(dummy_fortran_file())
self.executables['version_cmd'] = ['<F90>', '-V', '-c',
f+'.f', '-o', f+'.o']
def get_flags_linker_so(self):
if os.name=='nt':
opt = ['/dll']
# The "-K shared" switches are being left in for pre-9.0 versions
# of Absoft though I don't think versions earlier than 9 can
# actually be used to build shared libraries. In fact, version
# 8 of Absoft doesn't recognize "-K shared" and will fail.
elif self.get_version() >= '9.0':
opt = ['-shared']
else:
opt = ["-K", "shared"]
return opt
def library_dir_option(self, dir):
if os.name=='nt':
return ['-link', '/PATH:%s' % (dir)]
return "-L" + dir
def library_option(self, lib):
if os.name=='nt':
return '%s.lib' % (lib)
return "-l" + lib
def get_library_dirs(self):
opt = FCompiler.get_library_dirs(self)
d = os.environ.get('ABSOFT')
if d:
if self.get_version() >= '10.0':
# use shared libraries, the static libraries were not compiled -fPIC
prefix = 'sh'
else:
prefix = ''
if cpu.is_64bit():
suffix = '64'
else:
suffix = ''
opt.append(os.path.join(d, '%slib%s' % (prefix, suffix)))
return opt
def get_libraries(self):
opt = FCompiler.get_libraries(self)
if self.get_version() >= '11.0':
opt.extend(['af90math', 'afio', 'af77math', 'amisc'])
elif self.get_version() >= '10.0':
opt.extend(['af90math', 'afio', 'af77math', 'U77'])
elif self.get_version() >= '8.0':
opt.extend(['f90math', 'fio', 'f77math', 'U77'])
else:
opt.extend(['fio', 'f90math', 'fmath', 'U77'])
if os.name =='nt':
opt.append('COMDLG32')
return opt
def get_flags(self):
opt = FCompiler.get_flags(self)
if os.name != 'nt':
opt.extend(['-s'])
if self.get_version():
if self.get_version()>='8.2':
opt.append('-fpic')
return opt
def get_flags_f77(self):
opt = FCompiler.get_flags_f77(self)
opt.extend(['-N22', '-N90', '-N110'])
v = self.get_version()
if os.name == 'nt':
if v and v>='8.0':
opt.extend(['-f', '-N15'])
else:
opt.append('-f')
if v:
if v<='4.6':
opt.append('-B108')
else:
# Though -N15 is undocumented, it works with
# Absoft 8.0 on Linux
opt.append('-N15')
return opt
def get_flags_f90(self):
opt = FCompiler.get_flags_f90(self)
opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX",
"-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"])
if self.get_version():
if self.get_version()>'4.6':
opt.extend(["-YDEALLOC=ALL"])
return opt
def get_flags_fix(self):
opt = FCompiler.get_flags_fix(self)
opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX",
"-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"])
opt.extend(["-f", "fixed"])
return opt
def get_flags_opt(self):
opt = ['-O']
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='absoft').get_version())
| 5,499 | Python | 34.031847 | 155 | 0.525186 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/lahey.py | import os
from numpy.distutils.fcompiler import FCompiler
compilers = ['LaheyFCompiler']
class LaheyFCompiler(FCompiler):
compiler_type = 'lahey'
description = 'Lahey/Fujitsu Fortran 95 Compiler'
version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)'
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : ["lf95", "--fix"],
'compiler_fix' : ["lf95", "--fix"],
'compiler_f90' : ["lf95"],
'linker_so' : ["lf95", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g', '--chk', '--chkglobal']
def get_library_dirs(self):
opt = []
d = os.environ.get('LAHEY')
if d:
opt.append(os.path.join(d, 'lib'))
return opt
def get_libraries(self):
opt = []
opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='lahey').get_version())
| 1,327 | Python | 27.869565 | 88 | 0.557649 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/vast.py | import os
from numpy.distutils.fcompiler.gnu import GnuFCompiler
compilers = ['VastFCompiler']
class VastFCompiler(GnuFCompiler):
compiler_type = 'vast'
compiler_aliases = ()
description = 'Pacific-Sierra Research Fortran 90 Compiler'
version_pattern = (r'\s*Pacific-Sierra Research vf90 '
r'(Personal|Professional)\s+(?P<version>[^\s]*)')
# VAST f90 does not support -o with -c. So, object files are created
# to the current directory and then moved to build directory
object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile '
executables = {
'version_cmd' : ["vf90", "-v"],
'compiler_f77' : ["g77"],
'compiler_fix' : ["f90", "-Wv,-ya"],
'compiler_f90' : ["f90"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def find_executables(self):
pass
def get_version_cmd(self):
f90 = self.compiler_f90[0]
d, b = os.path.split(f90)
vf90 = os.path.join(d, 'v'+b)
return vf90
def get_flags_arch(self):
vast_version = self.get_version()
gnu = GnuFCompiler()
gnu.customize(None)
self.version = gnu.get_version()
opt = GnuFCompiler.get_flags_arch(self)
self.version = vast_version
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='vast').get_version())
| 1,667 | Python | 30.471698 | 83 | 0.584883 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/intel.py | # http://developer.intel.com/software/products/compilers/flin/
import sys
from numpy.distutils.ccompiler import simple_version_match
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
compilers = ['IntelFCompiler', 'IntelVisualFCompiler',
'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler',
'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler']
def intel_version_match(type):
# Match against the important stuff in the version string
return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,))
class BaseIntelFCompiler(FCompiler):
def update_executables(self):
f = dummy_fortran_file()
self.executables['version_cmd'] = ['<F77>', '-FI', '-V', '-c',
f + '.f', '-o', f + '.o']
def runtime_library_dir_option(self, dir):
# TODO: could use -Xlinker here, if it's supported
assert "," not in dir
return '-Wl,-rpath=%s' % dir
class IntelFCompiler(BaseIntelFCompiler):
compiler_type = 'intel'
compiler_aliases = ('ifort',)
description = 'Intel Fortran Compiler for 32-bit apps'
version_match = intel_version_match('32-bit|IA-32')
possible_executables = ['ifort', 'ifc']
executables = {
'version_cmd' : None, # set by update_executables
'compiler_f77' : [None, "-72", "-w90", "-w95"],
'compiler_f90' : [None],
'compiler_fix' : [None, "-FI"],
'linker_so' : ["<F90>", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fPIC']
module_dir_switch = '-module ' # Don't remove ending space!
module_include_switch = '-I'
def get_flags_free(self):
return ['-FR']
def get_flags(self):
return ['-fPIC']
def get_flags_opt(self): # Scipy test failures with -O2
v = self.get_version()
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
return ['-fp-model', 'strict', '-O1',
'-assume', 'minus0', '-{}'.format(mpopt)]
def get_flags_arch(self):
return []
def get_flags_linker_so(self):
opt = FCompiler.get_flags_linker_so(self)
v = self.get_version()
if v and v >= '8.0':
opt.append('-nofor_main')
if sys.platform == 'darwin':
# Here, it's -dynamiclib
try:
idx = opt.index('-shared')
opt.remove('-shared')
except ValueError:
idx = 0
opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup']
return opt
class IntelItaniumFCompiler(IntelFCompiler):
compiler_type = 'intele'
compiler_aliases = ()
description = 'Intel Fortran Compiler for Itanium apps'
version_match = intel_version_match('Itanium|IA-64')
possible_executables = ['ifort', 'efort', 'efc']
executables = {
'version_cmd' : None,
'compiler_f77' : [None, "-FI", "-w90", "-w95"],
'compiler_fix' : [None, "-FI"],
'compiler_f90' : [None],
'linker_so' : ['<F90>', "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
class IntelEM64TFCompiler(IntelFCompiler):
compiler_type = 'intelem'
compiler_aliases = ()
description = 'Intel Fortran Compiler for 64-bit apps'
version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit')
possible_executables = ['ifort', 'efort', 'efc']
executables = {
'version_cmd' : None,
'compiler_f77' : [None, "-FI"],
'compiler_fix' : [None, "-FI"],
'compiler_f90' : [None],
'linker_so' : ['<F90>', "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
# Is there no difference in the version string between the above compilers
# and the Visual compilers?
class IntelVisualFCompiler(BaseIntelFCompiler):
compiler_type = 'intelv'
description = 'Intel Visual Fortran Compiler for 32-bit apps'
version_match = intel_version_match('32-bit|IA-32')
def update_executables(self):
f = dummy_fortran_file()
self.executables['version_cmd'] = ['<F77>', '/FI', '/c',
f + '.f', '/o', f + '.o']
ar_exe = 'lib.exe'
possible_executables = ['ifort', 'ifl']
executables = {
'version_cmd' : None,
'compiler_f77' : [None],
'compiler_fix' : [None],
'compiler_f90' : [None],
'linker_so' : [None],
'archiver' : [ar_exe, "/verbose", "/OUT:"],
'ranlib' : None
}
compile_switch = '/c '
object_switch = '/Fo' # No space after /Fo!
library_switch = '/OUT:' # No space after /OUT:!
module_dir_switch = '/module:' # No space after /module:
module_include_switch = '/I'
def get_flags(self):
opt = ['/nologo', '/MD', '/nbs', '/names:lowercase',
'/assume:underscore', '/fpp']
return opt
def get_flags_free(self):
return []
def get_flags_debug(self):
return ['/4Yb', '/d2']
def get_flags_opt(self):
return ['/O1', '/assume:minus0'] # Scipy test failures with /O2
def get_flags_arch(self):
return ["/arch:IA32", "/QaxSSE3"]
def runtime_library_dir_option(self, dir):
raise NotImplementedError
class IntelItaniumVisualFCompiler(IntelVisualFCompiler):
compiler_type = 'intelev'
description = 'Intel Visual Fortran Compiler for Itanium apps'
version_match = intel_version_match('Itanium')
possible_executables = ['efl'] # XXX this is a wild guess
ar_exe = IntelVisualFCompiler.ar_exe
executables = {
'version_cmd' : None,
'compiler_f77' : [None, "-FI", "-w90", "-w95"],
'compiler_fix' : [None, "-FI", "-4L72", "-w"],
'compiler_f90' : [None],
'linker_so' : ['<F90>', "-shared"],
'archiver' : [ar_exe, "/verbose", "/OUT:"],
'ranlib' : None
}
class IntelEM64VisualFCompiler(IntelVisualFCompiler):
compiler_type = 'intelvem'
description = 'Intel Visual Fortran Compiler for 64-bit apps'
version_match = simple_version_match(start=r'Intel\(R\).*?64,')
def get_flags_arch(self):
return []
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='intel').get_version())
| 6,570 | Python | 29.995283 | 86 | 0.560426 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/__init__.py | """numpy.distutils.fcompiler
Contains FCompiler, an abstract base class that defines the interface
for the numpy.distutils Fortran compiler abstraction model.
Terminology:
To be consistent, where the term 'executable' is used, it means the single
file, like 'gcc', that is executed, and should be a string. In contrast,
'command' means the entire command line, like ['gcc', '-c', 'file.c'], and
should be a list.
But note that FCompiler.executables is actually a dictionary of commands.
"""
__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers',
'dummy_fortran_file']
import os
import sys
import re
from distutils.sysconfig import get_python_lib
from distutils.fancy_getopt import FancyGetopt
from distutils.errors import DistutilsModuleError, \
DistutilsExecError, CompileError, LinkError, DistutilsPlatformError
from distutils.util import split_quoted, strtobool
from numpy.distutils.ccompiler import CCompiler, gen_lib_options
from numpy.distutils import log
from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \
make_temp_file, get_shared_lib_extension
from numpy.distutils.exec_command import find_executable
from numpy.distutils import _shell_utils
from .environment import EnvironmentConfig
__metaclass__ = type
class CompilerNotFound(Exception):
pass
def flaglist(s):
if is_string(s):
return split_quoted(s)
else:
return s
def str2bool(s):
if is_string(s):
return strtobool(s)
return bool(s)
def is_sequence_of_strings(seq):
return is_sequence(seq) and all_strings(seq)
class FCompiler(CCompiler):
"""Abstract base class to define the interface that must be implemented
by real Fortran compiler classes.
Methods that subclasses may redefine:
update_executables(), find_executables(), get_version()
get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug()
get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(),
get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(),
get_flags_arch_f90(), get_flags_debug_f90(),
get_flags_fix(), get_flags_linker_so()
DON'T call these methods (except get_version) after
constructing a compiler instance or inside any other method.
All methods, except update_executables() and find_executables(),
may call the get_version() method.
After constructing a compiler instance, always call customize(dist=None)
method that finalizes compiler construction and makes the following
attributes available:
compiler_f77
compiler_f90
compiler_fix
linker_so
archiver
ranlib
libraries
library_dirs
"""
# These are the environment variables and distutils keys used.
# Each configuration description is
# (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>, <append>)
# The hook names are handled by the self._environment_hook method.
# - names starting with 'self.' call methods in this class
# - names starting with 'exe.' return the key in the executables dict
# - names like 'flags.YYY' return self.get_flag_YYY()
# convert is either None or a function to convert a string to the
# appropriate type used.
distutils_vars = EnvironmentConfig(
distutils_section='config_fc',
noopt = (None, None, 'noopt', str2bool, False),
noarch = (None, None, 'noarch', str2bool, False),
debug = (None, None, 'debug', str2bool, False),
verbose = (None, None, 'verbose', str2bool, False),
)
command_vars = EnvironmentConfig(
distutils_section='config_fc',
compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False),
compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False),
compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False),
version_cmd = ('exe.version_cmd', None, None, None, False),
linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False),
linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False),
archiver = (None, 'AR', 'ar', None, False),
ranlib = (None, 'RANLIB', 'ranlib', None, False),
)
flag_vars = EnvironmentConfig(
distutils_section='config_fc',
f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True),
f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True),
free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True),
fix = ('flags.fix', None, None, flaglist, False),
opt = ('flags.opt', 'FOPT', 'opt', flaglist, True),
opt_f77 = ('flags.opt_f77', None, None, flaglist, False),
opt_f90 = ('flags.opt_f90', None, None, flaglist, False),
arch = ('flags.arch', 'FARCH', 'arch', flaglist, False),
arch_f77 = ('flags.arch_f77', None, None, flaglist, False),
arch_f90 = ('flags.arch_f90', None, None, flaglist, False),
debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True),
debug_f77 = ('flags.debug_f77', None, None, flaglist, False),
debug_f90 = ('flags.debug_f90', None, None, flaglist, False),
flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True),
linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True),
linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True),
ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True),
)
language_map = {'.f': 'f77',
'.for': 'f77',
'.F': 'f77', # XXX: needs preprocessor
'.ftn': 'f77',
'.f77': 'f77',
'.f90': 'f90',
'.F90': 'f90', # XXX: needs preprocessor
'.f95': 'f90',
}
language_order = ['f90', 'f77']
# These will be set by the subclass
compiler_type = None
compiler_aliases = ()
version_pattern = None
possible_executables = []
executables = {
'version_cmd': ["f77", "-v"],
'compiler_f77': ["f77"],
'compiler_f90': ["f90"],
'compiler_fix': ["f90", "-fixed"],
'linker_so': ["f90", "-shared"],
'linker_exe': ["f90"],
'archiver': ["ar", "-cr"],
'ranlib': None,
}
# If compiler does not support compiling Fortran 90 then it can
# suggest using another compiler. For example, gnu would suggest
# gnu95 compiler type when there are F90 sources.
suggested_f90_compiler = None
compile_switch = "-c"
object_switch = "-o " # Ending space matters! It will be stripped
# but if it is missing then object_switch
# will be prefixed to object file name by
# string concatenation.
library_switch = "-o " # Ditto!
# Switch to specify where module files are created and searched
# for USE statement. Normally it is a string and also here ending
# space matters. See above.
module_dir_switch = None
# Switch to specify where module files are searched for USE statement.
module_include_switch = '-I'
pic_flags = [] # Flags to create position-independent code
src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR']
obj_extension = ".o"
shared_lib_extension = get_shared_lib_extension()
static_lib_extension = ".a" # or .lib
static_lib_format = "lib%s%s" # or %s%s
shared_lib_format = "%s%s"
exe_extension = ""
_exe_cache = {}
_executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe', 'archiver',
'ranlib']
# This will be set by new_fcompiler when called in
# command/{build_ext.py, build_clib.py, config.py} files.
c_compiler = None
# extra_{f77,f90}_compile_args are set by build_ext.build_extension method
extra_f77_compile_args = []
extra_f90_compile_args = []
def __init__(self, *args, **kw):
CCompiler.__init__(self, *args, **kw)
self.distutils_vars = self.distutils_vars.clone(self._environment_hook)
self.command_vars = self.command_vars.clone(self._environment_hook)
self.flag_vars = self.flag_vars.clone(self._environment_hook)
self.executables = self.executables.copy()
for e in self._executable_keys:
if e not in self.executables:
self.executables[e] = None
# Some methods depend on .customize() being called first, so
# this keeps track of whether that's happened yet.
self._is_customised = False
def __copy__(self):
obj = self.__new__(self.__class__)
obj.__dict__.update(self.__dict__)
obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook)
obj.command_vars = obj.command_vars.clone(obj._environment_hook)
obj.flag_vars = obj.flag_vars.clone(obj._environment_hook)
obj.executables = obj.executables.copy()
return obj
def copy(self):
return self.__copy__()
# Use properties for the attributes used by CCompiler. Setting them
# as attributes from the self.executables dictionary is error-prone,
# so we get them from there each time.
def _command_property(key):
def fget(self):
assert self._is_customised
return self.executables[key]
return property(fget=fget)
version_cmd = _command_property('version_cmd')
compiler_f77 = _command_property('compiler_f77')
compiler_f90 = _command_property('compiler_f90')
compiler_fix = _command_property('compiler_fix')
linker_so = _command_property('linker_so')
linker_exe = _command_property('linker_exe')
archiver = _command_property('archiver')
ranlib = _command_property('ranlib')
# Make our terminology consistent.
def set_executable(self, key, value):
self.set_command(key, value)
def set_commands(self, **kw):
for k, v in kw.items():
self.set_command(k, v)
def set_command(self, key, value):
if not key in self._executable_keys:
raise ValueError(
"unknown executable '%s' for class %s" %
(key, self.__class__.__name__))
if is_string(value):
value = split_quoted(value)
assert value is None or is_sequence_of_strings(value[1:]), (key, value)
self.executables[key] = value
######################################################################
## Methods that subclasses may redefine. But don't call these methods!
## They are private to FCompiler class and may return unexpected
## results if used elsewhere. So, you have been warned..
def find_executables(self):
"""Go through the self.executables dictionary, and attempt to
find and assign appropriate executables.
Executable names are looked for in the environment (environment
variables, the distutils.cfg, and command line), the 0th-element of
the command list, and the self.possible_executables list.
Also, if the 0th element is "<F77>" or "<F90>", the Fortran 77
or the Fortran 90 compiler executable is used, unless overridden
by an environment setting.
Subclasses should call this if overridden.
"""
assert self._is_customised
exe_cache = self._exe_cache
def cached_find_executable(exe):
if exe in exe_cache:
return exe_cache[exe]
fc_exe = find_executable(exe)
exe_cache[exe] = exe_cache[fc_exe] = fc_exe
return fc_exe
def verify_command_form(name, value):
if value is not None and not is_sequence_of_strings(value):
raise ValueError(
"%s value %r is invalid in class %s" %
(name, value, self.__class__.__name__))
def set_exe(exe_key, f77=None, f90=None):
cmd = self.executables.get(exe_key, None)
if not cmd:
return None
# Note that we get cmd[0] here if the environment doesn't
# have anything set
exe_from_environ = getattr(self.command_vars, exe_key)
if not exe_from_environ:
possibles = [f90, f77] + self.possible_executables
else:
possibles = [exe_from_environ] + self.possible_executables
seen = set()
unique_possibles = []
for e in possibles:
if e == '<F77>':
e = f77
elif e == '<F90>':
e = f90
if not e or e in seen:
continue
seen.add(e)
unique_possibles.append(e)
for exe in unique_possibles:
fc_exe = cached_find_executable(exe)
if fc_exe:
cmd[0] = fc_exe
return fc_exe
self.set_command(exe_key, None)
return None
ctype = self.compiler_type
f90 = set_exe('compiler_f90')
if not f90:
f77 = set_exe('compiler_f77')
if f77:
log.warn('%s: no Fortran 90 compiler found' % ctype)
else:
raise CompilerNotFound('%s: f90 nor f77' % ctype)
else:
f77 = set_exe('compiler_f77', f90=f90)
if not f77:
log.warn('%s: no Fortran 77 compiler found' % ctype)
set_exe('compiler_fix', f90=f90)
set_exe('linker_so', f77=f77, f90=f90)
set_exe('linker_exe', f77=f77, f90=f90)
set_exe('version_cmd', f77=f77, f90=f90)
set_exe('archiver')
set_exe('ranlib')
def update_executables(self):
"""Called at the beginning of customisation. Subclasses should
override this if they need to set up the executables dictionary.
Note that self.find_executables() is run afterwards, so the
self.executables dictionary values can contain <F77> or <F90> as
the command, which will be replaced by the found F77 or F90
compiler.
"""
pass
def get_flags(self):
"""List of flags common to all compiler types."""
return [] + self.pic_flags
def _get_command_flags(self, key):
cmd = self.executables.get(key, None)
if cmd is None:
return []
return cmd[1:]
def get_flags_f77(self):
"""List of Fortran 77 specific flags."""
return self._get_command_flags('compiler_f77')
def get_flags_f90(self):
"""List of Fortran 90 specific flags."""
return self._get_command_flags('compiler_f90')
def get_flags_free(self):
"""List of Fortran 90 free format specific flags."""
return []
def get_flags_fix(self):
"""List of Fortran 90 fixed format specific flags."""
return self._get_command_flags('compiler_fix')
def get_flags_linker_so(self):
"""List of linker flags to build a shared library."""
return self._get_command_flags('linker_so')
def get_flags_linker_exe(self):
"""List of linker flags to build an executable."""
return self._get_command_flags('linker_exe')
def get_flags_ar(self):
"""List of archiver flags. """
return self._get_command_flags('archiver')
def get_flags_opt(self):
"""List of architecture independent compiler flags."""
return []
def get_flags_arch(self):
"""List of architecture dependent compiler flags."""
return []
def get_flags_debug(self):
"""List of compiler flags to compile with debugging information."""
return []
get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt
get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch
get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug
def get_libraries(self):
"""List of compiler libraries."""
return self.libraries[:]
def get_library_dirs(self):
"""List of compiler library directories."""
return self.library_dirs[:]
def get_version(self, force=False, ok_status=[0]):
assert self._is_customised
version = CCompiler.get_version(self, force=force, ok_status=ok_status)
if version is None:
raise CompilerNotFound()
return version
############################################################
## Public methods:
def customize(self, dist = None):
"""Customize Fortran compiler.
This method gets Fortran compiler specific information from
(i) class definition, (ii) environment, (iii) distutils config
files, and (iv) command line (later overrides earlier).
This method should be always called after constructing a
compiler instance. But not in __init__ because Distribution
instance is needed for (iii) and (iv).
"""
log.info('customize %s' % (self.__class__.__name__))
self._is_customised = True
self.distutils_vars.use_distribution(dist)
self.command_vars.use_distribution(dist)
self.flag_vars.use_distribution(dist)
self.update_executables()
# find_executables takes care of setting the compiler commands,
# version_cmd, linker_so, linker_exe, ar, and ranlib
self.find_executables()
noopt = self.distutils_vars.get('noopt', False)
noarch = self.distutils_vars.get('noarch', noopt)
debug = self.distutils_vars.get('debug', False)
f77 = self.command_vars.compiler_f77
f90 = self.command_vars.compiler_f90
f77flags = []
f90flags = []
freeflags = []
fixflags = []
if f77:
f77 = _shell_utils.NativeParser.split(f77)
f77flags = self.flag_vars.f77
if f90:
f90 = _shell_utils.NativeParser.split(f90)
f90flags = self.flag_vars.f90
freeflags = self.flag_vars.free
# XXX Assuming that free format is default for f90 compiler.
fix = self.command_vars.compiler_fix
# NOTE: this and similar examples are probably just
# excluding --coverage flag when F90 = gfortran --coverage
# instead of putting that flag somewhere more appropriate
# this and similar examples where a Fortran compiler
# environment variable has been customized by CI or a user
# should perhaps eventually be more thoroughly tested and more
# robustly handled
if fix:
fix = _shell_utils.NativeParser.split(fix)
fixflags = self.flag_vars.fix + f90flags
oflags, aflags, dflags = [], [], []
# examine get_flags_<tag>_<compiler> for extra flags
# only add them if the method is different from get_flags_<tag>
def get_flags(tag, flags):
# note that self.flag_vars.<tag> calls self.get_flags_<tag>()
flags.extend(getattr(self.flag_vars, tag))
this_get = getattr(self, 'get_flags_' + tag)
for name, c, flagvar in [('f77', f77, f77flags),
('f90', f90, f90flags),
('f90', fix, fixflags)]:
t = '%s_%s' % (tag, name)
if c and this_get is not getattr(self, 'get_flags_' + t):
flagvar.extend(getattr(self.flag_vars, t))
if not noopt:
get_flags('opt', oflags)
if not noarch:
get_flags('arch', aflags)
if debug:
get_flags('debug', dflags)
fflags = self.flag_vars.flags + dflags + oflags + aflags
if f77:
self.set_commands(compiler_f77=f77+f77flags+fflags)
if f90:
self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags)
if fix:
self.set_commands(compiler_fix=fix+fixflags+fflags)
#XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS
linker_so = self.linker_so
if linker_so:
linker_so_flags = self.flag_vars.linker_so
if sys.platform.startswith('aix'):
python_lib = get_python_lib(standard_lib=1)
ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
python_exp = os.path.join(python_lib, 'config', 'python.exp')
linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]
if sys.platform.startswith('os400'):
from distutils.sysconfig import get_config_var
python_config = get_config_var('LIBPL')
ld_so_aix = os.path.join(python_config, 'ld_so_aix')
python_exp = os.path.join(python_config, 'python.exp')
linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]
self.set_commands(linker_so=linker_so+linker_so_flags)
linker_exe = self.linker_exe
if linker_exe:
linker_exe_flags = self.flag_vars.linker_exe
self.set_commands(linker_exe=linker_exe+linker_exe_flags)
ar = self.command_vars.archiver
if ar:
arflags = self.flag_vars.ar
self.set_commands(archiver=[ar]+arflags)
self.set_library_dirs(self.get_library_dirs())
self.set_libraries(self.get_libraries())
def dump_properties(self):
"""Print out the attributes of a compiler instance."""
props = []
for key in list(self.executables.keys()) + \
['version', 'libraries', 'library_dirs',
'object_switch', 'compile_switch']:
if hasattr(self, key):
v = getattr(self, key)
props.append((key, None, '= '+repr(v)))
props.sort()
pretty_printer = FancyGetopt(props)
for l in pretty_printer.generate_help("%s instance properties:" \
% (self.__class__.__name__)):
if l[:4]==' --':
l = ' ' + l[4:]
print(l)
###################
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
src_flags = {}
if is_f_file(src) and not has_f90_header(src):
flavor = ':f77'
compiler = self.compiler_f77
src_flags = get_f77flags(src)
extra_compile_args = self.extra_f77_compile_args or []
elif is_free_format(src):
flavor = ':f90'
compiler = self.compiler_f90
if compiler is None:
raise DistutilsExecError('f90 not supported by %s needed for %s'\
% (self.__class__.__name__, src))
extra_compile_args = self.extra_f90_compile_args or []
else:
flavor = ':fix'
compiler = self.compiler_fix
if compiler is None:
raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\
% (self.__class__.__name__, src))
extra_compile_args = self.extra_f90_compile_args or []
if self.object_switch[-1]==' ':
o_args = [self.object_switch.strip(), obj]
else:
o_args = [self.object_switch.strip()+obj]
assert self.compile_switch.strip()
s_args = [self.compile_switch, src]
if extra_compile_args:
log.info('extra %s options: %r' \
% (flavor[1:], ' '.join(extra_compile_args)))
extra_flags = src_flags.get(self.compiler_type, [])
if extra_flags:
log.info('using compile options from source: %r' \
% ' '.join(extra_flags))
command = compiler + cc_args + extra_flags + s_args + o_args \
+ extra_postargs + extra_compile_args
display = '%s: %s' % (os.path.basename(compiler[0]) + flavor,
src)
try:
self.spawn(command, display=display)
except DistutilsExecError as e:
msg = str(e)
raise CompileError(msg) from None
def module_options(self, module_dirs, module_build_dir):
options = []
if self.module_dir_switch is not None:
if self.module_dir_switch[-1]==' ':
options.extend([self.module_dir_switch.strip(), module_build_dir])
else:
options.append(self.module_dir_switch.strip()+module_build_dir)
else:
print('XXX: module_build_dir=%r option ignored' % (module_build_dir))
print('XXX: Fix module_dir_switch for ', self.__class__.__name__)
if self.module_include_switch is not None:
for d in [module_build_dir]+module_dirs:
options.append('%s%s' % (self.module_include_switch, d))
else:
print('XXX: module_dirs=%r option ignored' % (module_dirs))
print('XXX: Fix module_include_switch for ', self.__class__.__name__)
return options
def library_option(self, lib):
return "-l" + lib
def library_dir_option(self, dir):
return "-L" + dir
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = \
self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if is_string(output_dir):
output_filename = os.path.join(output_dir, output_filename)
elif output_dir is not None:
raise TypeError("'output_dir' must be a string or None")
if self._need_link(objects, output_filename):
if self.library_switch[-1]==' ':
o_args = [self.library_switch.strip(), output_filename]
else:
o_args = [self.library_switch.strip()+output_filename]
if is_string(self.objects):
ld_args = objects + [self.objects]
else:
ld_args = objects + self.objects
ld_args = ld_args + lib_opts + o_args
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
command = linker + ld_args
try:
self.spawn(command)
except DistutilsExecError as e:
msg = str(e)
raise LinkError(msg) from None
else:
log.debug("skipping %s (up-to-date)", output_filename)
def _environment_hook(self, name, hook_name):
if hook_name is None:
return None
if is_string(hook_name):
if hook_name.startswith('self.'):
hook_name = hook_name[5:]
hook = getattr(self, hook_name)
return hook()
elif hook_name.startswith('exe.'):
hook_name = hook_name[4:]
var = self.executables[hook_name]
if var:
return var[0]
else:
return None
elif hook_name.startswith('flags.'):
hook_name = hook_name[6:]
hook = getattr(self, 'get_flags_' + hook_name)
return hook()
else:
return hook_name()
def can_ccompiler_link(self, ccompiler):
"""
Check if the given C compiler can link objects produced by
this compiler.
"""
return True
def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
"""
Convert a set of object files that are not compatible with the default
linker, to a file that is compatible.
Parameters
----------
objects : list
List of object files to include.
output_dir : str
Output directory to place generated object files.
extra_dll_dir : str
Output directory to place extra DLL files that need to be
included on Windows.
Returns
-------
converted_objects : list of str
List of converted object files.
Note that the number of output files is not necessarily
the same as inputs.
"""
raise NotImplementedError()
## class FCompiler
_default_compilers = (
# sys.platform mappings
('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95',
'intelvem', 'intelem', 'flang')),
('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')),
('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag',
'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95',
'pathf95', 'nagfor', 'fujitsu')),
('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu',
'g95', 'pg')),
('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')),
('irix.*', ('mips', 'gnu', 'gnu95',)),
('aix.*', ('ibm', 'gnu', 'gnu95',)),
# os.name mappings
('posix', ('gnu', 'gnu95',)),
('nt', ('gnu', 'gnu95',)),
('mac', ('gnu95', 'gnu', 'pg')),
)
fcompiler_class = None
fcompiler_aliases = None
def load_all_fcompiler_classes():
"""Cache all the FCompiler classes found in modules in the
numpy.distutils.fcompiler package.
"""
from glob import glob
global fcompiler_class, fcompiler_aliases
if fcompiler_class is not None:
return
pys = os.path.join(os.path.dirname(__file__), '*.py')
fcompiler_class = {}
fcompiler_aliases = {}
for fname in glob(pys):
module_name, ext = os.path.splitext(os.path.basename(fname))
module_name = 'numpy.distutils.fcompiler.' + module_name
__import__ (module_name)
module = sys.modules[module_name]
if hasattr(module, 'compilers'):
for cname in module.compilers:
klass = getattr(module, cname)
desc = (klass.compiler_type, klass, klass.description)
fcompiler_class[klass.compiler_type] = desc
for alias in klass.compiler_aliases:
if alias in fcompiler_aliases:
raise ValueError("alias %r defined for both %s and %s"
% (alias, klass.__name__,
fcompiler_aliases[alias][1].__name__))
fcompiler_aliases[alias] = desc
def _find_existing_fcompiler(compiler_types,
osname=None, platform=None,
requiref90=False,
c_compiler=None):
from numpy.distutils.core import get_distribution
dist = get_distribution(always=True)
for compiler_type in compiler_types:
v = None
try:
c = new_fcompiler(plat=platform, compiler=compiler_type,
c_compiler=c_compiler)
c.customize(dist)
v = c.get_version()
if requiref90 and c.compiler_f90 is None:
v = None
new_compiler = c.suggested_f90_compiler
if new_compiler:
log.warn('Trying %r compiler as suggested by %r '
'compiler for f90 support.' % (compiler_type,
new_compiler))
c = new_fcompiler(plat=platform, compiler=new_compiler,
c_compiler=c_compiler)
c.customize(dist)
v = c.get_version()
if v is not None:
compiler_type = new_compiler
if requiref90 and c.compiler_f90 is None:
raise ValueError('%s does not support compiling f90 codes, '
'skipping.' % (c.__class__.__name__))
except DistutilsModuleError:
log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type)
except CompilerNotFound:
log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type)
if v is not None:
return compiler_type
return None
def available_fcompilers_for_platform(osname=None, platform=None):
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
matching_compiler_types = []
for pattern, compiler_type in _default_compilers:
if re.match(pattern, platform) or re.match(pattern, osname):
for ct in compiler_type:
if ct not in matching_compiler_types:
matching_compiler_types.append(ct)
if not matching_compiler_types:
matching_compiler_types.append('gnu')
return matching_compiler_types
def get_default_fcompiler(osname=None, platform=None, requiref90=False,
c_compiler=None):
"""Determine the default Fortran compiler to use for the given
platform."""
matching_compiler_types = available_fcompilers_for_platform(osname,
platform)
log.info("get_default_fcompiler: matching types: '%s'",
matching_compiler_types)
compiler_type = _find_existing_fcompiler(matching_compiler_types,
osname=osname,
platform=platform,
requiref90=requiref90,
c_compiler=c_compiler)
return compiler_type
# Flag to avoid rechecking for Fortran compiler every time
failed_fcompilers = set()
def new_fcompiler(plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0,
requiref90=False,
c_compiler = None):
"""Generate an instance of some FCompiler subclass for the supplied
platform/compiler combination.
"""
global failed_fcompilers
fcompiler_key = (plat, compiler)
if fcompiler_key in failed_fcompilers:
return None
load_all_fcompiler_classes()
if plat is None:
plat = os.name
if compiler is None:
compiler = get_default_fcompiler(plat, requiref90=requiref90,
c_compiler=c_compiler)
if compiler in fcompiler_class:
module_name, klass, long_description = fcompiler_class[compiler]
elif compiler in fcompiler_aliases:
module_name, klass, long_description = fcompiler_aliases[compiler]
else:
msg = "don't know how to compile Fortran code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler." % compiler
msg = msg + " Supported compilers are: %s)" \
% (','.join(fcompiler_class.keys()))
log.warn(msg)
failed_fcompilers.add(fcompiler_key)
return None
compiler = klass(verbose=verbose, dry_run=dry_run, force=force)
compiler.c_compiler = c_compiler
return compiler
def show_fcompilers(dist=None):
"""Print list of available compilers (used by the "--help-fcompiler"
option to "config_fc").
"""
if dist is None:
from distutils.dist import Distribution
from numpy.distutils.command.config_compiler import config_fc
dist = Distribution()
dist.script_name = os.path.basename(sys.argv[0])
dist.script_args = ['config_fc'] + sys.argv[1:]
try:
dist.script_args.remove('--help-fcompiler')
except ValueError:
pass
dist.cmdclass['config_fc'] = config_fc
dist.parse_config_files()
dist.parse_command_line()
compilers = []
compilers_na = []
compilers_ni = []
if not fcompiler_class:
load_all_fcompiler_classes()
platform_compilers = available_fcompilers_for_platform()
for compiler in platform_compilers:
v = None
log.set_verbosity(-2)
try:
c = new_fcompiler(compiler=compiler, verbose=dist.verbose)
c.customize(dist)
v = c.get_version()
except (DistutilsModuleError, CompilerNotFound) as e:
log.debug("show_fcompilers: %s not found" % (compiler,))
log.debug(repr(e))
if v is None:
compilers_na.append(("fcompiler="+compiler, None,
fcompiler_class[compiler][2]))
else:
c.dump_properties()
compilers.append(("fcompiler="+compiler, None,
fcompiler_class[compiler][2] + ' (%s)' % v))
compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers))
compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2])
for fc in compilers_ni]
compilers.sort()
compilers_na.sort()
compilers_ni.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("Fortran compilers found:")
pretty_printer = FancyGetopt(compilers_na)
pretty_printer.print_help("Compilers available for this "
"platform, but not found:")
if compilers_ni:
pretty_printer = FancyGetopt(compilers_ni)
pretty_printer.print_help("Compilers not available on this platform:")
print("For compiler details, run 'config_fc --verbose' setup command.")
def dummy_fortran_file():
fo, name = make_temp_file(suffix='.f')
fo.write(" subroutine dummy()\n end\n")
fo.close()
return name[:-2]
is_f_file = re.compile(r'.*\.(for|ftn|f77|f)\Z', re.I).match
_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search
_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search
_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search
_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
with open(file, encoding='latin1') as f:
line = f.readline()
n = 10000 # the number of non-comment lines to scan for hints
if _has_f_header(line) or _has_fix_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n>0 and line:
line = line.rstrip()
if line and line[0]!='!':
n -= 1
if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
result = 1
break
line = f.readline()
return result
def has_f90_header(src):
with open(src, encoding='latin1') as f:
line = f.readline()
return _has_f90_header(line) or _has_fix_header(line)
_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)', re.I)
def get_f77flags(src):
"""
Search the first 20 lines of fortran 77 code for line pattern
`CF77FLAGS(<fcompiler type>)=<f77 flags>`
Return a dictionary {<fcompiler type>:<f77 flags>}.
"""
flags = {}
with open(src, encoding='latin1') as f:
i = 0
for line in f:
i += 1
if i>20: break
m = _f77flags_re.match(line)
if not m: continue
fcname = m.group('fcname').strip()
fflags = m.group('fflags').strip()
flags[fcname] = split_quoted(fflags)
return flags
# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags
if __name__ == '__main__':
show_fcompilers()
| 40,535 | Python | 38.317168 | 112 | 0.566671 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/hpux.py | from numpy.distutils.fcompiler import FCompiler
compilers = ['HPUXFCompiler']
class HPUXFCompiler(FCompiler):
compiler_type = 'hpux'
description = 'HP Fortran 90 Compiler'
version_pattern = r'HP F90 (?P<version>[^\s*,]*)'
executables = {
'version_cmd' : ["f90", "+version"],
'compiler_f77' : ["f90"],
'compiler_fix' : ["f90"],
'compiler_f90' : ["f90"],
'linker_so' : ["ld", "-b"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX: fix me
module_include_switch = None #XXX: fix me
pic_flags = ['+Z']
def get_flags(self):
return self.pic_flags + ['+ppu', '+DD64']
def get_flags_opt(self):
return ['-O3']
def get_libraries(self):
return ['m']
def get_library_dirs(self):
opt = ['/usr/lib/hpux64']
return opt
def get_version(self, force=0, ok_status=[256, 0, 1]):
# XXX status==256 may indicate 'unrecognized option' or
# 'no input file'. So, version_cmd needs more work.
return FCompiler.get_version(self, force, ok_status)
if __name__ == '__main__':
from distutils import log
log.set_verbosity(10)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='hpux').get_version())
| 1,353 | Python | 31.238094 | 63 | 0.571323 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/gnu.py | import re
import os
import sys
import warnings
import platform
import tempfile
import hashlib
import base64
import subprocess
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.exec_command import filepath_from_subprocess_output
from numpy.distutils.fcompiler import FCompiler
from distutils.version import LooseVersion
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77', )
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
# Strip warning(s) that may be emitted by gfortran
while version_string.startswith('gfortran: warning'):
version_string =\
version_string[version_string.find('\n') + 1:].strip()
# Gfortran versions from after 2010 will output a simple string
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
# gfortrans may still return long version strings (``-dumpversion`` was
# an alias for ``--version``)
if len(version_string) <= 20:
# Try to find a valid version string
m = re.search(r'([0-9.]+)', version_string)
if m:
# g77 provides a longer version string that starts with GNU
# Fortran
if version_string.startswith('GNU Fortran'):
return ('g77', m.group(1))
# gfortran only outputs a version string such as #.#.#, so check
# if the match is at the start of the string
elif m.start() == 0:
return ('gfortran', m.group(1))
else:
# Output probably from --version, try harder:
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
m = re.search(
r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
# the '0' is for early g77's
return ('g77', v)
else:
# at some point in the 4.x series, the ' 95' was dropped
# from the version string
return ('gfortran', v)
# If still nothing, raise an error to make the problem easy to find.
err = 'A valid Fortran version was not found in this string:\n'
raise ValueError(err + version_string)
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'g77':
return None
return v[1]
possible_executables = ['g77', 'f77']
executables = {
'version_cmd' : [None, "-dumpversion"],
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
'compiler_fix' : None,
'linker_so' : [None, "-g", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
}
module_dir_switch = None
module_include_switch = None
# Cygwin: f771: warning: -fPIC ignored for target (all code is
# position independent)
if os.name != 'nt' and sys.platform != 'cygwin':
pic_flags = ['-fPIC']
# use -mno-cygwin for g77 when Python is not Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
executables[key].append('-mno-cygwin')
g2c = 'g2c'
suggested_f90_compiler = 'gnu95'
def get_flags_linker_so(self):
opt = self.linker_so[1:]
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
# Makefile used to build Python. We let distutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
# we try to get it first from sysconfig and then
# fall back to setting it to 10.9 This is a reasonable default
# even when using the official Python dist and those derived
# from it.
import sysconfig
target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if not target:
target = '10.9'
s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}'
warnings.warn(s, stacklevel=2)
os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target)
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared")
if sys.platform.startswith('sunos'):
# SunOS often has dynamically loaded symbols defined in the
# static library libg2c.a The linker doesn't like this. To
# ignore the problem, use the -mimpure-text flag. It isn't
# the safest thing, but seems to work. 'man gcc' says:
# ".. Instead of using -mimpure-text, you should compile all
# source code with -fpic or -fPIC."
opt.append('-mimpure-text')
return opt
def get_libgcc_dir(self):
try:
output = subprocess.check_output(self.compiler_f77 +
['-print-libgcc-file-name'])
except (OSError, subprocess.CalledProcessError):
pass
else:
output = filepath_from_subprocess_output(output)
return os.path.dirname(output)
return None
def get_libgfortran_dir(self):
if sys.platform[:5] == 'linux':
libgfortran_name = 'libgfortran.so'
elif sys.platform == 'darwin':
libgfortran_name = 'libgfortran.dylib'
else:
libgfortran_name = None
libgfortran_dir = None
if libgfortran_name:
find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)]
try:
output = subprocess.check_output(
self.compiler_f77 + find_lib_arg)
except (OSError, subprocess.CalledProcessError):
pass
else:
output = filepath_from_subprocess_output(output)
libgfortran_dir = os.path.dirname(output)
return libgfortran_dir
def get_library_dirs(self):
opt = []
if sys.platform[:5] != 'linux':
d = self.get_libgcc_dir()
if d:
# if windows and not cygwin, libg2c lies in a different folder
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
d = os.path.normpath(d)
path = os.path.join(d, "lib%s.a" % self.g2c)
if not os.path.exists(path):
root = os.path.join(d, *((os.pardir, ) * 4))
d2 = os.path.abspath(os.path.join(root, 'lib'))
path = os.path.join(d2, "lib%s.a" % self.g2c)
if os.path.exists(path):
opt.append(d2)
opt.append(d)
# For Macports / Linux, libgfortran and libgcc are not co-located
lib_gfortran_dir = self.get_libgfortran_dir()
if lib_gfortran_dir:
opt.append(lib_gfortran_dir)
return opt
def get_libraries(self):
opt = []
d = self.get_libgcc_dir()
if d is not None:
g2c = self.g2c + '-pic'
f = self.static_lib_format % (g2c, self.static_lib_extension)
if not os.path.isfile(os.path.join(d, f)):
g2c = self.g2c
else:
g2c = self.g2c
if g2c is not None:
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
c_compiler.compiler_type == 'msvc':
opt.append('gcc')
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
v = self.get_version()
if v and v <= '3.3.3':
# With this compiler version building Fortran BLAS/LAPACK
# with -O3 caused failures in lib.lapack heevr,syevr tests.
opt = ['-O2']
else:
opt = ['-O3']
opt.append('-funroll-loops')
return opt
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
return []
arch_re = re.compile(r"-arch\s+(\w+)")
arch_flags = []
for arch in arch_re.findall(cflags):
arch_flags += ['-arch', arch]
return arch_flags
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
if sys.platform == 'win32' or sys.platform == 'cygwin':
# Linux/Solaris/Unix support RPATH, Windows does not
raise NotImplementedError
# TODO: could use -Xlinker here, if it's supported
assert "," not in dir
if sys.platform == 'darwin':
return f'-Wl,-rpath,{dir}'
elif sys.platform.startswith(('aix', 'os400')):
# AIX RPATH is called LIBPATH
return f'-Wl,-blibpath:{dir}'
else:
return f'-Wl,-rpath={dir}'
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
compiler_aliases = ('gfortran', )
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'gfortran':
return None
v = v[1]
if LooseVersion(v) >= "4":
# gcc-4 series releases do not support -mno-cygwin option
pass
else:
# use -mno-cygwin flag for gfortran when Python is not
# Cygwin-Python
if sys.platform == 'win32':
for key in [
'version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe'
]:
self.executables[key].append('-mno-cygwin')
return v
possible_executables = ['gfortran', 'f95']
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
"-fno-second-underscore"],
'compiler_f90' : [None, "-Wall", "-g",
"-fno-second-underscore"],
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
"-fno-second-underscore"],
'linker_so' : ["<F90>", "-Wall", "-g"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
}
module_dir_switch = '-J'
module_include_switch = '-I'
if sys.platform.startswith(('aix', 'os400')):
executables['linker_so'].append('-lpthread')
if platform.architecture()[0][:2] == '64':
for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']:
executables[key].append('-maix64')
g2c = 'gfortran'
def _universal_flags(self, cmd):
"""Return a list of -arch flags for every supported architecture."""
if not sys.platform == 'darwin':
return []
arch_flags = []
# get arches the C compiler gets.
c_archs = self._c_arch_flags()
if "i386" in c_archs:
c_archs[c_archs.index("i386")] = "i686"
# check the arches the Fortran compiler supports, and compare with
# arch flags from C compiler
for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]:
if _can_target(cmd, arch) and arch in c_archs:
arch_flags.extend(["-arch", arch])
return arch_flags
def get_flags(self):
flags = GnuFCompiler.get_flags(self)
arch_flags = self._universal_flags(self.compiler_f90)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_flags_linker_so(self):
flags = GnuFCompiler.get_flags_linker_so(self)
arch_flags = self._universal_flags(self.linker_so)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_library_dirs(self):
opt = GnuFCompiler.get_library_dirs(self)
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
root = os.path.join(d, *((os.pardir, ) * 4))
path = os.path.join(root, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
opt.append(mingwdir)
# For Macports / Linux, libgfortran and libgcc are not co-located
lib_gfortran_dir = self.get_libgfortran_dir()
if lib_gfortran_dir:
opt.append(lib_gfortran_dir)
return opt
def get_libraries(self):
opt = GnuFCompiler.get_libraries(self)
if sys.platform == 'darwin':
opt.remove('cc_dynamic')
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
opt.insert(i + 1, "mingwex")
opt.insert(i + 1, "mingw32")
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
return []
else:
pass
return opt
def get_target(self):
try:
p = subprocess.Popen(
self.compiler_f77 + ['-v'],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
output = (stdout or b"") + (stderr or b"")
except (OSError, subprocess.CalledProcessError):
pass
else:
output = filepath_from_subprocess_output(output)
m = TARGET_R.search(output)
if m:
return m.group(1)
return ""
def _hash_files(self, filenames):
h = hashlib.sha1()
for fn in filenames:
with open(fn, 'rb') as f:
while True:
block = f.read(131072)
if not block:
break
h.update(block)
text = base64.b32encode(h.digest())
text = text.decode('ascii')
return text.rstrip('=')
def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir,
chained_dlls, is_archive):
"""Create a wrapper shared library for the given objects
Return an MSVC-compatible lib
"""
c_compiler = self.c_compiler
if c_compiler.compiler_type != "msvc":
raise ValueError("This method only supports MSVC")
object_hash = self._hash_files(list(objects) + list(chained_dlls))
if is_win64():
tag = 'win_amd64'
else:
tag = 'win32'
basename = 'lib' + os.path.splitext(
os.path.basename(objects[0]))[0][:8]
root_name = basename + '.' + object_hash + '.gfortran-' + tag
dll_name = root_name + '.dll'
def_name = root_name + '.def'
lib_name = root_name + '.lib'
dll_path = os.path.join(extra_dll_dir, dll_name)
def_path = os.path.join(output_dir, def_name)
lib_path = os.path.join(output_dir, lib_name)
if os.path.isfile(lib_path):
# Nothing to do
return lib_path, dll_path
if is_archive:
objects = (["-Wl,--whole-archive"] + list(objects) +
["-Wl,--no-whole-archive"])
self.link_shared_object(
objects,
dll_name,
output_dir=extra_dll_dir,
extra_postargs=list(chained_dlls) + [
'-Wl,--allow-multiple-definition',
'-Wl,--output-def,' + def_path,
'-Wl,--export-all-symbols',
'-Wl,--enable-auto-import',
'-static',
'-mlong-double-64',
])
# No PowerPC!
if is_win64():
specifier = '/MACHINE:X64'
else:
specifier = '/MACHINE:X86'
# MSVC specific code
lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier]
if not c_compiler.initialized:
c_compiler.initialize()
c_compiler.spawn([c_compiler.lib] + lib_args)
return lib_path, dll_path
def can_ccompiler_link(self, compiler):
# MSVC cannot link objects compiled by GNU fortran
return compiler.compiler_type not in ("msvc", )
def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
"""
Convert a set of object files that are not compatible with the default
linker, to a file that is compatible.
"""
if self.c_compiler.compiler_type == "msvc":
# Compile a DLL and return the lib for the DLL as
# the object. Also keep track of previous DLLs that
# we have compiled so that we can link against them.
# If there are .a archives, assume they are self-contained
# static libraries, and build separate DLLs for each
archives = []
plain_objects = []
for obj in objects:
if obj.lower().endswith('.a'):
archives.append(obj)
else:
plain_objects.append(obj)
chained_libs = []
chained_dlls = []
for archive in archives[::-1]:
lib, dll = self._link_wrapper_lib(
[archive],
output_dir,
extra_dll_dir,
chained_dlls=chained_dlls,
is_archive=True)
chained_libs.insert(0, lib)
chained_dlls.insert(0, dll)
if not plain_objects:
return chained_libs
lib, dll = self._link_wrapper_lib(
plain_objects,
output_dir,
extra_dll_dir,
chained_dlls=chained_dlls,
is_archive=False)
return [lib] + chained_libs
else:
raise ValueError("Unsupported C compiler")
def _can_target(cmd, arch):
"""Return true if the architecture supports the -arch flag"""
newcmd = cmd[:]
fid, filename = tempfile.mkstemp(suffix=".f")
os.close(fid)
try:
d = os.path.dirname(filename)
output = os.path.splitext(filename)[0] + ".o"
try:
newcmd.extend(["-arch", arch, "-c", filename])
p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
p.communicate()
return p.returncode == 0
finally:
if os.path.exists(output):
os.remove(output)
finally:
os.remove(filename)
if __name__ == '__main__':
from distutils import log
from numpy.distutils import customized_fcompiler
log.set_verbosity(2)
print(customized_fcompiler('gnu').get_version())
try:
print(customized_fcompiler('g95').get_version())
except Exception as e:
print(e)
| 20,502 | Python | 35.875899 | 97 | 0.526339 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/nag.py | import sys
import re
from numpy.distutils.fcompiler import FCompiler
compilers = ['NAGFCompiler', 'NAGFORCompiler']
class BaseNAGFCompiler(FCompiler):
version_pattern = r'NAG.* Release (?P<version>[^(\s]*)'
def version_match(self, version_string):
m = re.search(self.version_pattern, version_string)
if m:
return m.group('version')
else:
return None
def get_flags_linker_so(self):
return ["-Wl,-shared"]
def get_flags_opt(self):
return ['-O4']
def get_flags_arch(self):
return []
class NAGFCompiler(BaseNAGFCompiler):
compiler_type = 'nag'
description = 'NAGWare Fortran 95 Compiler'
executables = {
'version_cmd' : ["<F90>", "-V"],
'compiler_f77' : ["f95", "-fixed"],
'compiler_fix' : ["f95", "-fixed"],
'compiler_f90' : ["f95"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_flags_linker_so(self):
if sys.platform == 'darwin':
return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress']
return BaseNAGFCompiler.get_flags_linker_so(self)
def get_flags_arch(self):
version = self.get_version()
if version and version < '5.1':
return ['-target=native']
else:
return BaseNAGFCompiler.get_flags_arch(self)
def get_flags_debug(self):
return ['-g', '-gline', '-g90', '-nan', '-C']
class NAGFORCompiler(BaseNAGFCompiler):
compiler_type = 'nagfor'
description = 'NAG Fortran Compiler'
executables = {
'version_cmd' : ["nagfor", "-V"],
'compiler_f77' : ["nagfor", "-fixed"],
'compiler_fix' : ["nagfor", "-fixed"],
'compiler_f90' : ["nagfor"],
'linker_so' : ["nagfor"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_flags_linker_so(self):
if sys.platform == 'darwin':
return ['-unsharedrts',
'-Wl,-bundle,-flat_namespace,-undefined,suppress']
return BaseNAGFCompiler.get_flags_linker_so(self)
def get_flags_debug(self):
version = self.get_version()
if version and version > '6.1':
return ['-g', '-u', '-nan', '-C=all', '-thread_safe',
'-kind=unique', '-Warn=allocation', '-Warn=subnormal']
else:
return ['-g', '-nan', '-C=all', '-u', '-thread_safe']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
compiler = customized_fcompiler(compiler='nagfor')
print(compiler.get_version())
print(compiler.get_flags_debug())
| 2,777 | Python | 30.568181 | 86 | 0.550954 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/arm.py | from __future__ import division, absolute_import, print_function
import sys
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
from sys import platform
from os.path import join, dirname, normpath
compilers = ['ArmFlangCompiler']
import functools
class ArmFlangCompiler(FCompiler):
compiler_type = 'arm'
description = 'Arm Compiler'
version_pattern = r'\s*Arm.*version (?P<version>[\d.-]+).*'
ar_exe = 'lib.exe'
possible_executables = ['armflang']
executables = {
'version_cmd': ["", "--version"],
'compiler_f77': ["armflang", "-fPIC"],
'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"],
'compiler_f90': ["armflang", "-fPIC"],
'linker_so': ["armflang", "-fPIC", "-shared"],
'archiver': ["ar", "-cr"],
'ranlib': None
}
pic_flags = ["-fPIC", "-DPIC"]
c_compiler = 'arm'
module_dir_switch = '-module ' # Don't remove ending space!
def get_libraries(self):
opt = FCompiler.get_libraries(self)
opt.extend(['flang', 'flangrti', 'ompstub'])
return opt
@functools.lru_cache(maxsize=128)
def get_library_dirs(self):
"""List of compiler library directories."""
opt = FCompiler.get_library_dirs(self)
flang_dir = dirname(self.executables['compiler_f77'][0])
opt.append(normpath(join(flang_dir, '..', 'lib')))
return opt
def get_flags(self):
return []
def get_flags_free(self):
return []
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
return ['-O3']
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
return '-Wl,-rpath=%s' % dir
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='armflang').get_version())
| 2,235 | Python | 29.216216 | 79 | 0.524385 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/pg.py | # http://www.pgroup.com
import sys
from numpy.distutils.fcompiler import FCompiler
from sys import platform
from os.path import join, dirname, normpath
compilers = ['PGroupFCompiler', 'PGroupFlangCompiler']
class PGroupFCompiler(FCompiler):
compiler_type = 'pg'
description = 'Portland Group Fortran Compiler'
version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P<version>[\d.-]+).*'
if platform == 'darwin':
executables = {
'version_cmd': ["<F77>", "-V"],
'compiler_f77': ["pgfortran", "-dynamiclib"],
'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"],
'compiler_f90': ["pgfortran", "-dynamiclib"],
'linker_so': ["libtool"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
pic_flags = ['']
else:
executables = {
'version_cmd': ["<F77>", "-V"],
'compiler_f77': ["pgfortran"],
'compiler_fix': ["pgfortran", "-Mfixed"],
'compiler_f90': ["pgfortran"],
'linker_so': ["<F90>"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
pic_flags = ['-fpic']
module_dir_switch = '-module '
module_include_switch = '-I'
def get_flags(self):
opt = ['-Minform=inform', '-Mnosecond_underscore']
return self.pic_flags + opt
def get_flags_opt(self):
return ['-fast']
def get_flags_debug(self):
return ['-g']
if platform == 'darwin':
def get_flags_linker_so(self):
return ["-dynamic", '-undefined', 'dynamic_lookup']
else:
def get_flags_linker_so(self):
return ["-shared", '-fpic']
def runtime_library_dir_option(self, dir):
return '-R%s' % dir
import functools
class PGroupFlangCompiler(FCompiler):
compiler_type = 'flang'
description = 'Portland Group Fortran LLVM Compiler'
version_pattern = r'\s*(flang|clang) version (?P<version>[\d.-]+).*'
ar_exe = 'lib.exe'
possible_executables = ['flang']
executables = {
'version_cmd': ["<F77>", "--version"],
'compiler_f77': ["flang"],
'compiler_fix': ["flang"],
'compiler_f90': ["flang"],
'linker_so': [None],
'archiver': [ar_exe, "/verbose", "/OUT:"],
'ranlib': None
}
library_switch = '/OUT:' # No space after /OUT:!
module_dir_switch = '-module ' # Don't remove ending space!
def get_libraries(self):
opt = FCompiler.get_libraries(self)
opt.extend(['flang', 'flangrti', 'ompstub'])
return opt
@functools.lru_cache(maxsize=128)
def get_library_dirs(self):
"""List of compiler library directories."""
opt = FCompiler.get_library_dirs(self)
flang_dir = dirname(self.executables['compiler_f77'][0])
opt.append(normpath(join(flang_dir, '..', 'lib')))
return opt
def get_flags(self):
return []
def get_flags_free(self):
return []
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
return ['-O3']
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
raise NotImplementedError
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
if 'flang' in sys.argv:
print(customized_fcompiler(compiler='flang').get_version())
else:
print(customized_fcompiler(compiler='pg').get_version())
| 3,568 | Python | 26.666666 | 74 | 0.557175 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/g95.py | # http://g95.sourceforge.net/
from numpy.distutils.fcompiler import FCompiler
compilers = ['G95FCompiler']
class G95FCompiler(FCompiler):
compiler_type = 'g95'
description = 'G95 Fortran Compiler'
# version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95!\) (?P<version>.*)\).*'
# $ g95 --version
# G95 (GCC 4.0.3 (g95!) May 22 2006)
version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95 (?P<version>.*)!\) (?P<date>.*)\).*'
# $ g95 --version
# G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006)
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : ["g95", "-ffixed-form"],
'compiler_fix' : ["g95", "-ffixed-form"],
'compiler_f90' : ["g95"],
'linker_so' : ["<F90>", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fpic']
module_dir_switch = '-fmod='
module_include_switch = '-I'
def get_flags(self):
return ['-fno-second-underscore']
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g']
if __name__ == '__main__':
from distutils import log
from numpy.distutils import customized_fcompiler
log.set_verbosity(2)
print(customized_fcompiler('g95').get_version())
| 1,330 | Python | 29.953488 | 105 | 0.539098 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/ibm.py | import os
import re
import sys
import subprocess
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import find_executable
from numpy.distutils.misc_util import make_temp_file
from distutils import log
compilers = ['IBMFCompiler']
class IBMFCompiler(FCompiler):
compiler_type = 'ibm'
description = 'IBM XL Fortran Compiler'
version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)'
#IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004
executables = {
'version_cmd' : ["<F77>", "-qversion"],
'compiler_f77' : ["xlf"],
'compiler_fix' : ["xlf90", "-qfixed"],
'compiler_f90' : ["xlf90"],
'linker_so' : ["xlf95"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_version(self,*args,**kwds):
version = FCompiler.get_version(self,*args,**kwds)
if version is None and sys.platform.startswith('aix'):
# use lslpp to find out xlf version
lslpp = find_executable('lslpp')
xlf = find_executable('xlf')
if os.path.exists(xlf) and os.path.exists(lslpp):
try:
o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp'])
except (OSError, subprocess.CalledProcessError):
pass
else:
m = re.search(r'xlfcmp:(?P<version>\d+([.]\d+)+)', o)
if m: version = m.group('version')
xlf_dir = '/etc/opt/ibmcmp/xlf'
if version is None and os.path.isdir(xlf_dir):
# linux:
# If the output of xlf does not contain version info
# (that's the case with xlf 8.1, for instance) then
# let's try another method:
l = sorted(os.listdir(xlf_dir))
l.reverse()
l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))]
if l:
from distutils.version import LooseVersion
self.version = version = LooseVersion(l[0])
return version
def get_flags(self):
return ['-qextname']
def get_flags_debug(self):
return ['-g']
def get_flags_linker_so(self):
opt = []
if sys.platform=='darwin':
opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress')
else:
opt.append('-bshared')
version = self.get_version(ok_status=[0, 40])
if version is not None:
if sys.platform.startswith('aix'):
xlf_cfg = '/etc/xlf.cfg'
else:
xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version
fo, new_cfg = make_temp_file(suffix='_xlf.cfg')
log.info('Creating '+new_cfg)
with open(xlf_cfg, 'r') as fi:
crt1_match = re.compile(r'\s*crt\s*=\s*(?P<path>.*)/crt1.o').match
for line in fi:
m = crt1_match(line)
if m:
fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
else:
fo.write(line)
fo.close()
opt.append('-F'+new_cfg)
return opt
def get_flags_opt(self):
return ['-O3']
if __name__ == '__main__':
from numpy.distutils import customized_fcompiler
log.set_verbosity(2)
print(customized_fcompiler(compiler='ibm').get_version())
| 3,539 | Python | 35.122449 | 137 | 0.532354 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/nv.py | from numpy.distutils.fcompiler import FCompiler
compilers = ['NVHPCFCompiler']
class NVHPCFCompiler(FCompiler):
""" NVIDIA High Performance Computing (HPC) SDK Fortran Compiler
https://developer.nvidia.com/hpc-sdk
Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers,
https://www.pgroup.com/index.htm.
See also `numpy.distutils.fcompiler.pg`.
"""
compiler_type = 'nv'
description = 'NVIDIA HPC SDK'
version_pattern = r'\s*(nvfortran|(pg(f77|f90|fortran)) \(aka nvfortran\)) (?P<version>[\d.-]+).*'
executables = {
'version_cmd': ["<F90>", "-V"],
'compiler_f77': ["nvfortran"],
'compiler_fix': ["nvfortran", "-Mfixed"],
'compiler_f90': ["nvfortran"],
'linker_so': ["<F90>"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
pic_flags = ['-fpic']
module_dir_switch = '-module '
module_include_switch = '-I'
def get_flags(self):
opt = ['-Minform=inform', '-Mnosecond_underscore']
return self.pic_flags + opt
def get_flags_opt(self):
return ['-fast']
def get_flags_debug(self):
return ['-g']
def get_flags_linker_so(self):
return ["-shared", '-fpic']
def runtime_library_dir_option(self, dir):
return '-R%s' % dir
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='nv').get_version())
| 1,560 | Python | 27.907407 | 111 | 0.603205 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/fcompiler/fujitsu.py | """
fujitsu
Supports Fujitsu compiler function.
This compiler is developed by Fujitsu and is used in A64FX on Fugaku.
"""
from numpy.distutils.fcompiler import FCompiler
compilers = ['FujitsuFCompiler']
class FujitsuFCompiler(FCompiler):
compiler_type = 'fujitsu'
description = 'Fujitsu Fortran Compiler'
possible_executables = ['frt']
version_pattern = r'frt \(FRT\) (?P<version>[a-z\d.]+)'
# $ frt --version
# frt (FRT) x.x.x yyyymmdd
executables = {
'version_cmd' : ["<F77>", "--version"],
'compiler_f77' : ["frt", "-Fixed"],
'compiler_fix' : ["frt", "-Fixed"],
'compiler_f90' : ["frt"],
'linker_so' : ["frt", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-KPIC']
module_dir_switch = '-M'
module_include_switch = '-I'
def get_flags_opt(self):
return ['-O3']
def get_flags_debug(self):
return ['-g']
def runtime_library_dir_option(self, dir):
return f'-Wl,-rpath={dir}'
def get_libraries(self):
return ['fj90f', 'fj90i', 'fjsrcinfo']
if __name__ == '__main__':
from distutils import log
from numpy.distutils import customized_fcompiler
log.set_verbosity(2)
print(customized_fcompiler('fujitsu').get_version())
| 1,333 | Python | 27.382978 | 69 | 0.581395 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_ccompiler_opt.py | import re, textwrap, os
from os import sys, path
from distutils.errors import DistutilsError
is_standalone = __name__ == '__main__' and __package__ is None
if is_standalone:
import unittest, contextlib, tempfile, shutil
sys.path.append(path.abspath(path.join(path.dirname(__file__), "..")))
from ccompiler_opt import CCompilerOpt
# from numpy/testing/_private/utils.py
@contextlib.contextmanager
def tempdir(*args, **kwargs):
tmpdir = tempfile.mkdtemp(*args, **kwargs)
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
def assert_(expr, msg=''):
if not expr:
raise AssertionError(msg)
else:
from numpy.distutils.ccompiler_opt import CCompilerOpt
from numpy.testing import assert_, tempdir
# architectures and compilers to test
arch_compilers = dict(
x86 = ("gcc", "clang", "icc", "iccw", "msvc"),
x64 = ("gcc", "clang", "icc", "iccw", "msvc"),
ppc64 = ("gcc", "clang"),
ppc64le = ("gcc", "clang"),
armhf = ("gcc", "clang"),
aarch64 = ("gcc", "clang"),
s390x = ("gcc", "clang"),
noarch = ("gcc",)
)
class FakeCCompilerOpt(CCompilerOpt):
fake_info = ""
def __init__(self, trap_files="", trap_flags="", *args, **kwargs):
self.fake_trap_files = trap_files
self.fake_trap_flags = trap_flags
CCompilerOpt.__init__(self, None, **kwargs)
def __repr__(self):
return textwrap.dedent("""\
<<<<
march : {}
compiler : {}
----------------
{}
>>>>
""").format(self.cc_march, self.cc_name, self.report())
def dist_compile(self, sources, flags, **kwargs):
assert(isinstance(sources, list))
assert(isinstance(flags, list))
if self.fake_trap_files:
for src in sources:
if re.match(self.fake_trap_files, src):
self.dist_error("source is trapped by a fake interface")
if self.fake_trap_flags:
for f in flags:
if re.match(self.fake_trap_flags, f):
self.dist_error("flag is trapped by a fake interface")
# fake objects
return zip(sources, [' '.join(flags)] * len(sources))
def dist_info(self):
return FakeCCompilerOpt.fake_info
@staticmethod
def dist_log(*args, stderr=False):
pass
class _Test_CCompilerOpt:
arch = None # x86_64
cc = None # gcc
def setup_class(self):
FakeCCompilerOpt.conf_nocache = True
self._opt = None
def nopt(self, *args, **kwargs):
FakeCCompilerOpt.fake_info = (self.arch, self.cc, "")
return FakeCCompilerOpt(*args, **kwargs)
def opt(self):
if not self._opt:
self._opt = self.nopt()
return self._opt
def march(self):
return self.opt().cc_march
def cc_name(self):
return self.opt().cc_name
def get_targets(self, targets, groups, **kwargs):
FakeCCompilerOpt.conf_target_groups = groups
opt = self.nopt(
cpu_baseline=kwargs.get("baseline", "min"),
cpu_dispatch=kwargs.get("dispatch", "max"),
trap_files=kwargs.get("trap_files", ""),
trap_flags=kwargs.get("trap_flags", "")
)
with tempdir() as tmpdir:
file = os.path.join(tmpdir, "test_targets.c")
with open(file, 'w') as f:
f.write(targets)
gtargets = []
gflags = {}
fake_objects = opt.try_dispatch([file])
for source, flags in fake_objects:
gtar = path.basename(source).split('.')[1:-1]
glen = len(gtar)
if glen == 0:
gtar = "baseline"
elif glen == 1:
gtar = gtar[0].upper()
else:
# converting multi-target into parentheses str format to be equivalent
# to the configuration statements syntax.
gtar = ('('+' '.join(gtar)+')').upper()
gtargets.append(gtar)
gflags[gtar] = flags
has_baseline, targets = opt.sources_status[file]
targets = targets + ["baseline"] if has_baseline else targets
# convert tuple that represent multi-target into parentheses str format
targets = [
'('+' '.join(tar)+')' if isinstance(tar, tuple) else tar
for tar in targets
]
if len(targets) != len(gtargets) or not all(t in gtargets for t in targets):
raise AssertionError(
"'sources_status' returns different targets than the compiled targets\n"
"%s != %s" % (targets, gtargets)
)
# return targets from 'sources_status' since the order is matters
return targets, gflags
def arg_regex(self, **kwargs):
map2origin = dict(
x64 = "x86",
ppc64le = "ppc64",
aarch64 = "armhf",
clang = "gcc",
)
march = self.march(); cc_name = self.cc_name()
map_march = map2origin.get(march, march)
map_cc = map2origin.get(cc_name, cc_name)
for key in (
march, cc_name, map_march, map_cc,
march + '_' + cc_name,
map_march + '_' + cc_name,
march + '_' + map_cc,
map_march + '_' + map_cc,
) :
regex = kwargs.pop(key, None)
if regex is not None:
break
if regex:
if isinstance(regex, dict):
for k, v in regex.items():
if v[-1:] not in ')}$?\\.+*':
regex[k] = v + '$'
else:
assert(isinstance(regex, str))
if regex[-1:] not in ')}$?\\.+*':
regex += '$'
return regex
def expect(self, dispatch, baseline="", **kwargs):
match = self.arg_regex(**kwargs)
if match is None:
return
opt = self.nopt(
cpu_baseline=baseline, cpu_dispatch=dispatch,
trap_files=kwargs.get("trap_files", ""),
trap_flags=kwargs.get("trap_flags", "")
)
features = ' '.join(opt.cpu_dispatch_names())
if not match:
if len(features) != 0:
raise AssertionError(
'expected empty features, not "%s"' % features
)
return
if not re.match(match, features, re.IGNORECASE):
raise AssertionError(
'dispatch features "%s" not match "%s"' % (features, match)
)
def expect_baseline(self, baseline, dispatch="", **kwargs):
match = self.arg_regex(**kwargs)
if match is None:
return
opt = self.nopt(
cpu_baseline=baseline, cpu_dispatch=dispatch,
trap_files=kwargs.get("trap_files", ""),
trap_flags=kwargs.get("trap_flags", "")
)
features = ' '.join(opt.cpu_baseline_names())
if not match:
if len(features) != 0:
raise AssertionError(
'expected empty features, not "%s"' % features
)
return
if not re.match(match, features, re.IGNORECASE):
raise AssertionError(
'baseline features "%s" not match "%s"' % (features, match)
)
def expect_flags(self, baseline, dispatch="", **kwargs):
match = self.arg_regex(**kwargs)
if match is None:
return
opt = self.nopt(
cpu_baseline=baseline, cpu_dispatch=dispatch,
trap_files=kwargs.get("trap_files", ""),
trap_flags=kwargs.get("trap_flags", "")
)
flags = ' '.join(opt.cpu_baseline_flags())
if not match:
if len(flags) != 0:
raise AssertionError(
'expected empty flags not "%s"' % flags
)
return
if not re.match(match, flags):
raise AssertionError(
'flags "%s" not match "%s"' % (flags, match)
)
def expect_targets(self, targets, groups={}, **kwargs):
match = self.arg_regex(**kwargs)
if match is None:
return
targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs)
targets = ' '.join(targets)
if not match:
if len(targets) != 0:
raise AssertionError(
'expected empty targets, not "%s"' % targets
)
return
if not re.match(match, targets, re.IGNORECASE):
raise AssertionError(
'targets "%s" not match "%s"' % (targets, match)
)
def expect_target_flags(self, targets, groups={}, **kwargs):
match_dict = self.arg_regex(**kwargs)
if match_dict is None:
return
assert(isinstance(match_dict, dict))
_, tar_flags = self.get_targets(targets=targets, groups=groups)
for match_tar, match_flags in match_dict.items():
if match_tar not in tar_flags:
raise AssertionError(
'expected to find target "%s"' % match_tar
)
flags = tar_flags[match_tar]
if not match_flags:
if len(flags) != 0:
raise AssertionError(
'expected to find empty flags in target "%s"' % match_tar
)
if not re.match(match_flags, flags):
raise AssertionError(
'"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags)
)
def test_interface(self):
wrong_arch = "ppc64" if self.arch != "ppc64" else "x86"
wrong_cc = "clang" if self.cc != "clang" else "icc"
opt = self.opt()
assert_(getattr(opt, "cc_on_" + self.arch))
assert_(not getattr(opt, "cc_on_" + wrong_arch))
assert_(getattr(opt, "cc_is_" + self.cc))
assert_(not getattr(opt, "cc_is_" + wrong_cc))
def test_args_empty(self):
for baseline, dispatch in (
("", "none"),
(None, ""),
("none +none", "none - none"),
("none -max", "min - max"),
("+vsx2 -VSX2", "vsx avx2 avx512f -max"),
("max -vsx - avx + avx512f neon -MAX ",
"min -min + max -max -vsx + avx2 -avx2 +NONE")
) :
opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch)
assert(len(opt.cpu_baseline_names()) == 0)
assert(len(opt.cpu_dispatch_names()) == 0)
def test_args_validation(self):
if self.march() == "unknown":
return
# check sanity of argument's validation
for baseline, dispatch in (
("unkown_feature - max +min", "unknown max min"), # unknowing features
("#avx2", "$vsx") # groups and polices aren't acceptable
) :
try:
self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch)
raise AssertionError("excepted an exception for invalid arguments")
except DistutilsError:
pass
def test_skip(self):
# only takes what platform supports and skip the others
# without casing exceptions
self.expect(
"sse vsx neon",
x86="sse", ppc64="vsx", armhf="neon", unknown=""
)
self.expect(
"sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd",
x86 = "sse41 avx avx2",
ppc64 = "vsx2 vsx3",
armhf = "neon_vfpv4 asimd",
unknown = ""
)
# any features in cpu_dispatch must be ignored if it's part of baseline
self.expect(
"sse neon vsx", baseline="sse neon vsx",
x86="", ppc64="", armhf=""
)
self.expect(
"avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp",
x86="", ppc64="", armhf=""
)
def test_implies(self):
# baseline combining implied features, so we count
# on it instead of testing 'feature_implies()'' directly
self.expect_baseline(
"fma3 avx2 asimd vsx3",
# .* between two spaces can validate features in between
x86 = "sse .* sse41 .* fma3.*avx2",
ppc64 = "vsx vsx2 vsx3",
armhf = "neon neon_fp16 neon_vfpv4 asimd"
)
"""
special cases
"""
# in icc and msvc, FMA3 and AVX2 can't be separated
# both need to implies each other, same for avx512f & cd
for f0, f1 in (
("fma3", "avx2"),
("avx512f", "avx512cd"),
):
diff = ".* sse42 .* %s .*%s$" % (f0, f1)
self.expect_baseline(f0,
x86_gcc=".* sse42 .* %s$" % f0,
x86_icc=diff, x86_iccw=diff
)
self.expect_baseline(f1,
x86_gcc=".* avx .* %s$" % f1,
x86_icc=diff, x86_iccw=diff
)
# in msvc, following features can't be separated too
for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")):
for ff in f:
self.expect_baseline(ff,
x86_msvc=".*%s" % ' '.join(f)
)
# in ppc64le VSX and VSX2 can't be separated
self.expect_baseline("vsx", ppc64le="vsx vsx2")
# in aarch64 following features can't be separated
for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"):
self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd")
def test_args_options(self):
# max & native
for o in ("max", "native"):
if o == "native" and self.cc_name() == "msvc":
continue
self.expect(o,
trap_files=".*cpu_(sse|vsx|neon|vx).c",
x86="", ppc64="", armhf="", s390x=""
)
self.expect(o,
trap_files=".*cpu_(sse3|vsx2|neon_vfpv4|vxe).c",
x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16",
aarch64="", ppc64le="", s390x="vx"
)
self.expect(o,
trap_files=".*cpu_(popcnt|vsx3).c",
x86="sse .* sse41", ppc64="vsx vsx2",
armhf="neon neon_fp16 .* asimd .*",
s390x="vx vxe vxe2"
)
self.expect(o,
x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*",
# in icc, xop and fam4 aren't supported
x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*",
x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*",
# in msvc, avx512_knl avx512_knm aren't supported
x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*",
armhf=".* asimd asimdhp asimddp .*",
ppc64="vsx vsx2 vsx3 vsx4.*",
s390x="vx vxe vxe2.*"
)
# min
self.expect("min",
x86="sse sse2", x64="sse sse2 sse3",
armhf="", aarch64="neon neon_fp16 .* asimd",
ppc64="", ppc64le="vsx vsx2", s390x=""
)
self.expect(
"min", trap_files=".*cpu_(sse2|vsx2).c",
x86="", ppc64le=""
)
# an exception must triggered if native flag isn't supported
# when option "native" is activated through the args
try:
self.expect("native",
trap_flags=".*(-march=native|-xHost|/QxHost).*",
x86=".*", ppc64=".*", armhf=".*", s390x=".*"
)
if self.march() != "unknown":
raise AssertionError(
"excepted an exception for %s" % self.march()
)
except DistutilsError:
if self.march() == "unknown":
raise AssertionError("excepted no exceptions")
def test_flags(self):
self.expect_flags(
"sse sse2 vsx vsx2 neon neon_fp16 vx vxe",
x86_gcc="-msse -msse2", x86_icc="-msse -msse2",
x86_iccw="/arch:SSE2",
x86_msvc="/arch:SSE2" if self.march() == "x86" else "",
ppc64_gcc= "-mcpu=power8",
ppc64_clang="-maltivec -mvsx -mpower8-vector",
armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee",
aarch64="",
s390x="-mzvector -march=arch12"
)
# testing normalize -march
self.expect_flags(
"asimd",
aarch64="",
armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd"
)
self.expect_flags(
"asimdhp",
aarch64_gcc=r"-march=armv8.2-a\+fp16",
armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16"
)
self.expect_flags(
"asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod"
)
self.expect_flags(
# asimdfhm implies asimdhp
"asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml"
)
self.expect_flags(
"asimddp asimdhp asimdfhm",
aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml"
)
self.expect_flags(
"vx vxe vxe2",
s390x=r"-mzvector -march=arch13"
)
def test_targets_exceptions(self):
for targets in (
"bla bla", "/*@targets",
"/*@targets */",
"/*@targets unknown */",
"/*@targets $unknown_policy avx2 */",
"/*@targets #unknown_group avx2 */",
"/*@targets $ */",
"/*@targets # vsx */",
"/*@targets #$ vsx */",
"/*@targets vsx avx2 ) */",
"/*@targets vsx avx2 (avx2 */",
"/*@targets vsx avx2 () */",
"/*@targets vsx avx2 ($autovec) */", # no features
"/*@targets vsx avx2 (xxx) */",
"/*@targets vsx avx2 (baseline) */",
) :
try:
self.expect_targets(
targets,
x86="", armhf="", ppc64="", s390x=""
)
if self.march() != "unknown":
raise AssertionError(
"excepted an exception for %s" % self.march()
)
except DistutilsError:
if self.march() == "unknown":
raise AssertionError("excepted no exceptions")
def test_targets_syntax(self):
for targets in (
"/*@targets $keep_baseline sse vsx neon vx*/",
"/*@targets,$keep_baseline,sse,vsx,neon vx*/",
"/*@targets*$keep_baseline*sse*vsx*neon*vx*/",
"""
/*
** @targets
** $keep_baseline, sse vsx,neon, vx
*/
""",
"""
/*
************@targets****************
** $keep_baseline, sse vsx, neon, vx
************************************
*/
""",
"""
/*
/////////////@targets/////////////////
//$keep_baseline//sse//vsx//neon//vx
/////////////////////////////////////
*/
""",
"""
/*
@targets
$keep_baseline
SSE VSX NEON VX*/
"""
) :
self.expect_targets(targets,
x86="sse", ppc64="vsx", armhf="neon", s390x="vx", unknown=""
)
def test_targets(self):
# test skipping baseline features
self.expect_targets(
"""
/*@targets
sse sse2 sse41 avx avx2 avx512f
vsx vsx2 vsx3 vsx4
neon neon_fp16 asimdhp asimddp
vx vxe vxe2
*/
""",
baseline="avx vsx2 asimd vx vxe",
x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx4 vsx3",
s390x="vxe2"
)
# test skipping non-dispatch features
self.expect_targets(
"""
/*@targets
sse41 avx avx2 avx512f
vsx2 vsx3 vsx4
asimd asimdhp asimddp
vx vxe vxe2
*/
""",
baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp vxe2",
x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2", s390x="vxe2"
)
# test skipping features that not supported
self.expect_targets(
"""
/*@targets
sse2 sse41 avx2 avx512f
vsx2 vsx3 vsx4
neon asimdhp asimddp
vx vxe vxe2
*/
""",
baseline="",
trap_files=".*(avx2|avx512f|vsx3|vsx4|asimddp|vxe2).c",
x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon",
s390x="vxe vx"
)
# test skipping features that implies each other
self.expect_targets(
"""
/*@targets
sse sse2 avx fma3 avx2 avx512f avx512cd
vsx vsx2 vsx3
neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp
asimddp asimdfhm
*/
""",
baseline="",
x86_gcc="avx512cd avx512f avx2 fma3 avx sse2",
x86_msvc="avx512cd avx2 avx sse2",
x86_icc="avx512cd avx2 avx sse2",
x86_iccw="avx512cd avx2 avx sse2",
ppc64="vsx3 vsx2 vsx",
ppc64le="vsx3 vsx2",
armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon",
aarch64="asimdfhm asimddp asimdhp asimd"
)
def test_targets_policies(self):
# 'keep_baseline', generate objects for baseline features
self.expect_targets(
"""
/*@targets
$keep_baseline
sse2 sse42 avx2 avx512f
vsx2 vsx3
neon neon_vfpv4 asimd asimddp
vx vxe vxe2
*/
""",
baseline="sse41 avx2 vsx2 asimd vsx3 vxe",
x86="avx512f avx2 sse42 sse2",
ppc64="vsx3 vsx2",
armhf="asimddp asimd neon_vfpv4 neon",
# neon, neon_vfpv4, asimd implies each other
aarch64="asimddp asimd",
s390x="vxe2 vxe vx"
)
# 'keep_sort', leave the sort as-is
self.expect_targets(
"""
/*@targets
$keep_baseline $keep_sort
avx512f sse42 avx2 sse2
vsx2 vsx3
asimd neon neon_vfpv4 asimddp
vxe vxe2
*/
""",
x86="avx512f sse42 avx2 sse2",
ppc64="vsx2 vsx3",
armhf="asimd neon neon_vfpv4 asimddp",
# neon, neon_vfpv4, asimd implies each other
aarch64="asimd asimddp",
s390x="vxe vxe2"
)
# 'autovec', skipping features that can't be
# vectorized by the compiler
self.expect_targets(
"""
/*@targets
$keep_baseline $keep_sort $autovec
avx512f avx2 sse42 sse41 sse2
vsx3 vsx2
asimddp asimd neon_vfpv4 neon
*/
""",
x86_gcc="avx512f avx2 sse42 sse41 sse2",
x86_icc="avx512f avx2 sse42 sse41 sse2",
x86_iccw="avx512f avx2 sse42 sse41 sse2",
x86_msvc="avx512f avx2 sse2"
if self.march() == 'x86' else "avx512f avx2",
ppc64="vsx3 vsx2",
armhf="asimddp asimd neon_vfpv4 neon",
# neon, neon_vfpv4, asimd implies each other
aarch64="asimddp asimd"
)
for policy in ("$maxopt", "$autovec"):
# 'maxopt' and autovec set the max acceptable optimization flags
self.expect_target_flags(
"/*@targets baseline %s */" % policy,
gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"},
iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"},
unknown={"baseline":".*"}
)
# 'werror', force compilers to treat warnings as errors
self.expect_target_flags(
"/*@targets baseline $werror */",
gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"},
iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"},
unknown={"baseline":".*"}
)
def test_targets_groups(self):
self.expect_targets(
"""
/*@targets $keep_baseline baseline #test_group */
""",
groups=dict(
test_group=("""
$keep_baseline
asimddp sse2 vsx2 avx2 vsx3
avx512f asimdhp
""")
),
x86="avx512f avx2 sse2 baseline",
ppc64="vsx3 vsx2 baseline",
armhf="asimddp asimdhp baseline"
)
# test skip duplicating and sorting
self.expect_targets(
"""
/*@targets
* sse42 avx avx512f
* #test_group_1
* vsx2
* #test_group_2
* asimddp asimdfhm
*/
""",
groups=dict(
test_group_1=("""
VSX2 vsx3 asimd avx2 SSE41
"""),
test_group_2=("""
vsx2 vsx3 asImd aVx2 sse41
""")
),
x86="avx512f avx2 avx sse42 sse41",
ppc64="vsx3 vsx2",
# vsx2 part of the default baseline of ppc64le, option ("min")
ppc64le="vsx3",
armhf="asimdfhm asimddp asimd",
# asimd part of the default baseline of aarch64, option ("min")
aarch64="asimdfhm asimddp"
)
def test_targets_multi(self):
self.expect_targets(
"""
/*@targets
(avx512_clx avx512_cnl) (asimdhp asimddp)
*/
""",
x86=r"\(avx512_clx avx512_cnl\)",
armhf=r"\(asimdhp asimddp\)",
)
# test skipping implied features and auto-sort
self.expect_targets(
"""
/*@targets
f16c (sse41 avx sse42) (sse3 avx2 avx512f)
vsx2 (vsx vsx3 vsx2)
(neon neon_vfpv4 asimd asimdhp asimddp)
*/
""",
x86="avx512f f16c avx",
ppc64="vsx3 vsx2",
ppc64le="vsx3", # vsx2 part of baseline
armhf=r"\(asimdhp asimddp\)",
)
# test skipping implied features and keep sort
self.expect_targets(
"""
/*@targets $keep_sort
(sse41 avx sse42) (sse3 avx2 avx512f)
(vsx vsx3 vsx2)
(asimddp neon neon_vfpv4 asimd asimdhp)
(vx vxe vxe2)
*/
""",
x86="avx avx512f",
ppc64="vsx3",
armhf=r"\(asimdhp asimddp\)",
s390x="vxe2"
)
# test compiler variety and avoiding duplicating
self.expect_targets(
"""
/*@targets $keep_sort
fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3
*/
""",
x86_gcc=r"fma3 avx2 \(fma3 avx2\)",
x86_icc="avx2", x86_iccw="avx2",
x86_msvc="avx2"
)
def new_test(arch, cc):
if is_standalone: return textwrap.dedent("""\
class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase):
arch = '{arch}'
cc = '{cc}'
def __init__(self, methodName="runTest"):
unittest.TestCase.__init__(self, methodName)
self.setup_class()
""").format(
class_name=arch + '_' + cc, arch=arch, cc=cc
)
return textwrap.dedent("""\
class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt):
arch = '{arch}'
cc = '{cc}'
""").format(
class_name=arch + '_' + cc, arch=arch, cc=cc
)
"""
if 1 and is_standalone:
FakeCCompilerOpt.fake_info = "x86_icc"
cco = FakeCCompilerOpt(None, cpu_baseline="avx2")
print(' '.join(cco.cpu_baseline_names()))
print(cco.cpu_baseline_flags())
unittest.main()
sys.exit()
"""
for arch, compilers in arch_compilers.items():
for cc in compilers:
exec(new_test(arch, cc))
if is_standalone:
unittest.main()
| 28,763 | Python | 34.555006 | 90 | 0.481208 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_exec_command.py | import os
import sys
from tempfile import TemporaryFile
from numpy.distutils import exec_command
from numpy.distutils.exec_command import get_pythonexe
from numpy.testing import tempdir, assert_, assert_warns
# In python 3 stdout, stderr are text (unicode compliant) devices, so to
# emulate them import StringIO from the io module.
from io import StringIO
class redirect_stdout:
"""Context manager to redirect stdout for exec_command test."""
def __init__(self, stdout=None):
self._stdout = stdout or sys.stdout
def __enter__(self):
self.old_stdout = sys.stdout
sys.stdout = self._stdout
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush()
sys.stdout = self.old_stdout
# note: closing sys.stdout won't close it.
self._stdout.close()
class redirect_stderr:
"""Context manager to redirect stderr for exec_command test."""
def __init__(self, stderr=None):
self._stderr = stderr or sys.stderr
def __enter__(self):
self.old_stderr = sys.stderr
sys.stderr = self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stderr.flush()
sys.stderr = self.old_stderr
# note: closing sys.stderr won't close it.
self._stderr.close()
class emulate_nonposix:
"""Context manager to emulate os.name != 'posix' """
def __init__(self, osname='non-posix'):
self._new_name = osname
def __enter__(self):
self._old_name = os.name
os.name = self._new_name
def __exit__(self, exc_type, exc_value, traceback):
os.name = self._old_name
def test_exec_command_stdout():
# Regression test for gh-2999 and gh-2915.
# There are several packages (nose, scipy.weave.inline, Sage inline
# Fortran) that replace stdout, in which case it doesn't have a fileno
# method. This is tested here, with a do-nothing command that fails if the
# presence of fileno() is assumed in exec_command.
# The code has a special case for posix systems, so if we are on posix test
# both that the special case works and that the generic code works.
# Test posix version:
with redirect_stdout(StringIO()):
with redirect_stderr(TemporaryFile()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
if os.name == 'posix':
# Test general (non-posix) version:
with emulate_nonposix():
with redirect_stdout(StringIO()):
with redirect_stderr(TemporaryFile()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
def test_exec_command_stderr():
# Test posix version:
with redirect_stdout(TemporaryFile(mode='w+')):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
if os.name == 'posix':
# Test general (non-posix) version:
with emulate_nonposix():
with redirect_stdout(TemporaryFile()):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
exec_command.exec_command("cd '.'")
class TestExecCommand:
def setup_method(self):
self.pyexe = get_pythonexe()
def check_nt(self, **kws):
s, o = exec_command.exec_command('cmd /C echo path=%path%')
assert_(s == 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe)
assert_(s == 0)
assert_(o == 'win32')
def check_posix(self, **kws):
s, o = exec_command.exec_command("echo Hello", **kws)
assert_(s == 0)
assert_(o == 'Hello')
s, o = exec_command.exec_command('echo $AAA', **kws)
assert_(s == 0)
assert_(o == '')
s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws)
assert_(s == 0)
assert_(o == 'Tere')
s, o = exec_command.exec_command('echo "$AAA"', **kws)
assert_(s == 0)
assert_(o == '')
if 'BBB' not in os.environ:
os.environ['BBB'] = 'Hi'
s, o = exec_command.exec_command('echo "$BBB"', **kws)
assert_(s == 0)
assert_(o == 'Hi')
s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws)
assert_(s == 0)
assert_(o == 'Hey')
s, o = exec_command.exec_command('echo "$BBB"', **kws)
assert_(s == 0)
assert_(o == 'Hi')
del os.environ['BBB']
s, o = exec_command.exec_command('echo "$BBB"', **kws)
assert_(s == 0)
assert_(o == '')
s, o = exec_command.exec_command('this_is_not_a_command', **kws)
assert_(s != 0)
assert_(o != '')
s, o = exec_command.exec_command('echo path=$PATH', **kws)
assert_(s == 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys,os;sys.stderr.write(os.name)"' %
self.pyexe, **kws)
assert_(s == 0)
assert_(o == 'posix')
def check_basic(self, *kws):
s, o = exec_command.exec_command(
'"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws)
assert_(s != 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(\'0\');'
'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' %
self.pyexe, **kws)
assert_(s == 0)
assert_(o == '012')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws)
assert_(s == 15)
assert_(o == '')
s, o = exec_command.exec_command(
'"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws)
assert_(s == 0)
assert_(o == 'Heipa')
def check_execute_in(self, **kws):
with tempdir() as tmpdir:
fn = "file"
tmpfile = os.path.join(tmpdir, fn)
with open(tmpfile, 'w') as f:
f.write('Hello')
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); f.close()"' %
(self.pyexe, fn), **kws)
assert_(s != 0)
assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); '
'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws)
assert_(s == 0)
assert_(o == 'Hello')
def test_basic(self):
with redirect_stdout(StringIO()):
with redirect_stderr(StringIO()):
with assert_warns(DeprecationWarning):
if os.name == "posix":
self.check_posix(use_tee=0)
self.check_posix(use_tee=1)
elif os.name == "nt":
self.check_nt(use_tee=0)
self.check_nt(use_tee=1)
self.check_execute_in(use_tee=0)
self.check_execute_in(use_tee=1)
| 7,308 | Python | 32.995349 | 79 | 0.52422 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_log.py | import io
import re
from contextlib import redirect_stdout
import pytest
from numpy.distutils import log
def setup_module():
f = io.StringIO() # changing verbosity also logs here, capture that
with redirect_stdout(f):
log.set_verbosity(2, force=True) # i.e. DEBUG
def teardown_module():
log.set_verbosity(0, force=True) # the default
r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"])
def test_log_prefix(func_name):
func = getattr(log, func_name)
msg = f"{func_name} message"
f = io.StringIO()
with redirect_stdout(f):
func(msg)
out = f.getvalue()
assert out # sanity check
clean_out = r_ansi.sub("", out)
line = next(line for line in clean_out.splitlines())
assert line == f"{func_name.upper()}: {msg}"
| 868 | Python | 23.828571 | 73 | 0.625576 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_from_template.py |
from numpy.distutils.from_template import process_str
from numpy.testing import assert_equal
pyf_src = """
python module foo
<_rd=real,double precision>
interface
subroutine <s,d>foosub(tol)
<_rd>, intent(in,out) :: tol
end subroutine <s,d>foosub
end interface
end python module foo
"""
expected_pyf = """
python module foo
interface
subroutine sfoosub(tol)
real, intent(in,out) :: tol
end subroutine sfoosub
subroutine dfoosub(tol)
double precision, intent(in,out) :: tol
end subroutine dfoosub
end interface
end python module foo
"""
def normalize_whitespace(s):
"""
Remove leading and trailing whitespace, and convert internal
stretches of whitespace to a single space.
"""
return ' '.join(s.split())
def test_from_template():
"""Regression test for gh-10712."""
pyf = process_str(pyf_src)
normalized_pyf = normalize_whitespace(pyf)
normalized_expected_pyf = normalize_whitespace(expected_pyf)
assert_equal(normalized_pyf, normalized_expected_pyf)
| 1,103 | Python | 23.533333 | 64 | 0.660925 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_system_info.py | import os
import shutil
import pytest
from tempfile import mkstemp, mkdtemp
from subprocess import Popen, PIPE
from distutils.errors import DistutilsError
from numpy.testing import assert_, assert_equal, assert_raises
from numpy.distutils import ccompiler, customized_ccompiler
from numpy.distutils.system_info import system_info, ConfigParser, mkl_info
from numpy.distutils.system_info import AliasedOptionError
from numpy.distutils.system_info import default_lib_dirs, default_include_dirs
from numpy.distutils import _shell_utils
def get_class(name, notfound_action=1):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'temp1': Temp1Info,
'temp2': Temp2Info,
'duplicate_options': DuplicateOptionInfo,
}.get(name.lower(), _system_info)
return cl()
simple_site = """
[ALL]
library_dirs = {dir1:s}{pathsep:s}{dir2:s}
libraries = {lib1:s},{lib2:s}
extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os
runtime_library_dirs = {dir1:s}
[temp1]
library_dirs = {dir1:s}
libraries = {lib1:s}
runtime_library_dirs = {dir1:s}
[temp2]
library_dirs = {dir2:s}
libraries = {lib2:s}
extra_link_args = -Wl,-rpath={lib2_escaped:s}
rpath = {dir2:s}
[duplicate_options]
mylib_libs = {lib1:s}
libraries = {lib2:s}
"""
site_cfg = simple_site
fakelib_c_text = """
/* This file is generated from numpy/distutils/testing/test_system_info.py */
#include<stdio.h>
void foo(void) {
printf("Hello foo");
}
void bar(void) {
printf("Hello bar");
}
"""
def have_compiler():
""" Return True if there appears to be an executable compiler
"""
compiler = customized_ccompiler()
try:
cmd = compiler.compiler # Unix compilers
except AttributeError:
try:
if not compiler.initialized:
compiler.initialize() # MSVC is different
except (DistutilsError, ValueError):
return False
cmd = [compiler.cc]
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
p.stdout.close()
p.stderr.close()
p.wait()
except OSError:
return False
return True
HAVE_COMPILER = have_compiler()
class _system_info(system_info):
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {'library_dirs': '',
'include_dirs': '',
'runtime_library_dirs': '',
'rpath': '',
'src_dirs': '',
'search_static_first': "0",
'extra_compile_args': '',
'extra_link_args': ''}
self.cp = ConfigParser(defaults)
# We have to parse the config files afterwards
# to have a consistent temporary filepath
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Override _check_libs to return with all dirs """
info = {'libraries': libs, 'library_dirs': lib_dirs}
return info
class Temp1Info(_system_info):
"""For testing purposes"""
section = 'temp1'
class Temp2Info(_system_info):
"""For testing purposes"""
section = 'temp2'
class DuplicateOptionInfo(_system_info):
"""For testing purposes"""
section = 'duplicate_options'
class TestSystemInfoReading:
def setup_method(self):
""" Create the libraries """
# Create 2 sources and 2 libraries
self._dir1 = mkdtemp()
self._src1 = os.path.join(self._dir1, 'foo.c')
self._lib1 = os.path.join(self._dir1, 'libfoo.so')
self._dir2 = mkdtemp()
self._src2 = os.path.join(self._dir2, 'bar.c')
self._lib2 = os.path.join(self._dir2, 'libbar.so')
# Update local site.cfg
global simple_site, site_cfg
site_cfg = simple_site.format(**{
'dir1': self._dir1,
'lib1': self._lib1,
'dir2': self._dir2,
'lib2': self._lib2,
'pathsep': os.pathsep,
'lib2_escaped': _shell_utils.NativeParser.join([self._lib2])
})
# Write site.cfg
fd, self._sitecfg = mkstemp()
os.close(fd)
with open(self._sitecfg, 'w') as fd:
fd.write(site_cfg)
# Write the sources
with open(self._src1, 'w') as fd:
fd.write(fakelib_c_text)
with open(self._src2, 'w') as fd:
fd.write(fakelib_c_text)
# We create all class-instances
def site_and_parse(c, site_cfg):
c.files = [site_cfg]
c.parse_config_files()
return c
self.c_default = site_and_parse(get_class('default'), self._sitecfg)
self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg)
self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg)
self.c_dup_options = site_and_parse(get_class('duplicate_options'),
self._sitecfg)
def teardown_method(self):
# Do each removal separately
try:
shutil.rmtree(self._dir1)
except Exception:
pass
try:
shutil.rmtree(self._dir2)
except Exception:
pass
try:
os.remove(self._sitecfg)
except Exception:
pass
def test_all(self):
# Read in all information in the ALL block
tsi = self.c_default
assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2])
assert_equal(tsi.get_libraries(), [self._lib1, self._lib2])
assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
extra = tsi.calc_extra_info()
assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os'])
def test_temp1(self):
# Read in all information in the temp1 block
tsi = self.c_temp1
assert_equal(tsi.get_lib_dirs(), [self._dir1])
assert_equal(tsi.get_libraries(), [self._lib1])
assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
def test_temp2(self):
# Read in all information in the temp2 block
tsi = self.c_temp2
assert_equal(tsi.get_lib_dirs(), [self._dir2])
assert_equal(tsi.get_libraries(), [self._lib2])
# Now from rpath and not runtime_library_dirs
assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2])
extra = tsi.calc_extra_info()
assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2])
def test_duplicate_options(self):
# Ensure that duplicates are raising an AliasedOptionError
tsi = self.c_dup_options
assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries")
assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1])
assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2])
@pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
def test_compile1(self):
# Compile source and link the first source
c = customized_ccompiler()
previousDir = os.getcwd()
try:
# Change directory to not screw up directories
os.chdir(self._dir1)
c.compile([os.path.basename(self._src1)], output_dir=self._dir1)
# Ensure that the object exists
assert_(os.path.isfile(self._src1.replace('.c', '.o')) or
os.path.isfile(self._src1.replace('.c', '.obj')))
finally:
os.chdir(previousDir)
@pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
@pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()),
reason="Fails with MSVC compiler ")
def test_compile2(self):
# Compile source and link the second source
tsi = self.c_temp2
c = customized_ccompiler()
extra_link_args = tsi.calc_extra_info()['extra_link_args']
previousDir = os.getcwd()
try:
# Change directory to not screw up directories
os.chdir(self._dir2)
c.compile([os.path.basename(self._src2)], output_dir=self._dir2,
extra_postargs=extra_link_args)
# Ensure that the object exists
assert_(os.path.isfile(self._src2.replace('.c', '.o')))
finally:
os.chdir(previousDir)
HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", [])
@pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if "
"numpy is built with MKL support"))
def test_overrides(self):
previousDir = os.getcwd()
cfg = os.path.join(self._dir1, 'site.cfg')
shutil.copy(self._sitecfg, cfg)
try:
os.chdir(self._dir1)
# Check that the '[ALL]' section does not override
# missing values from other sections
info = mkl_info()
lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep)
assert info.get_lib_dirs() != lib_dirs
# But if we copy the values to a '[mkl]' section the value
# is correct
with open(cfg, 'r') as fid:
mkl = fid.read().replace('[ALL]', '[mkl]', 1)
with open(cfg, 'w') as fid:
fid.write(mkl)
info = mkl_info()
assert info.get_lib_dirs() == lib_dirs
# Also, the values will be taken from a section named '[DEFAULT]'
with open(cfg, 'r') as fid:
dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1)
with open(cfg, 'w') as fid:
fid.write(dflt)
info = mkl_info()
assert info.get_lib_dirs() == lib_dirs
finally:
os.chdir(previousDir)
def test_distutils_parse_env_order(monkeypatch):
from numpy.distutils.system_info import _parse_env_order
env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER'
base_order = list('abcdef')
monkeypatch.setenv(env, 'b,i,e,f')
order, unknown = _parse_env_order(base_order, env)
assert len(order) == 3
assert order == list('bef')
assert len(unknown) == 1
# For when LAPACK/BLAS optimization is disabled
monkeypatch.setenv(env, '')
order, unknown = _parse_env_order(base_order, env)
assert len(order) == 0
assert len(unknown) == 0
for prefix in '^!':
monkeypatch.setenv(env, f'{prefix}b,i,e')
order, unknown = _parse_env_order(base_order, env)
assert len(order) == 4
assert order == list('acdf')
assert len(unknown) == 1
with pytest.raises(ValueError):
monkeypatch.setenv(env, 'b,^e,i')
_parse_env_order(base_order, env)
with pytest.raises(ValueError):
monkeypatch.setenv(env, '!b,^e,i')
_parse_env_order(base_order, env)
| 11,009 | Python | 32.981481 | 102 | 0.578708 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_fcompiler_intel.py | import numpy.distutils.fcompiler
from numpy.testing import assert_
intel_32bit_version_strings = [
("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications"
"running on Intel(R) 32, Version 11.1", '11.1'),
]
intel_64bit_version_strings = [
("Intel(R) Fortran IA-64 Compiler Professional for applications"
"running on IA-64, Version 11.0", '11.0'),
("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications"
"running on Intel(R) 64, Version 11.1", '11.1')
]
class TestIntelFCompilerVersions:
def test_32bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
for vs, version in intel_32bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
class TestIntelEM64TFCompilerVersions:
def test_64bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem')
for vs, version in intel_64bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
| 1,058 | Python | 33.161289 | 78 | 0.672023 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_fcompiler_nagfor.py | from numpy.testing import assert_
import numpy.distutils.fcompiler
nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release '
'6.2(Chiyoda) Build 6200', '6.2'),
('nagfor', 'NAG Fortran Compiler Release '
'6.1(Tozai) Build 6136', '6.1'),
('nagfor', 'NAG Fortran Compiler Release '
'6.0(Hibiya) Build 1021', '6.0'),
('nagfor', 'NAG Fortran Compiler Release '
'5.3.2(971)', '5.3.2'),
('nag', 'NAGWare Fortran 95 compiler Release 5.1'
'(347,355-367,375,380-383,389,394,399,401-402,407,'
'431,435,437,446,459-460,463,472,494,496,503,508,'
'511,517,529,555,557,565)', '5.1')]
class TestNagFCompilerVersions:
def test_version_match(self):
for comp, vs, version in nag_version_strings:
fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp)
v = fc.version_match(vs)
assert_(v == version)
| 1,102 | Python | 46.95652 | 75 | 0.50726 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_mingw32ccompiler.py | import shutil
import subprocess
import sys
import pytest
from numpy.distutils import mingw32ccompiler
@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test')
def test_build_import():
'''Test the mingw32ccompiler.build_import_library, which builds a
`python.a` from the MSVC `python.lib`
'''
# make sure `nm.exe` exists and supports the current python version. This
# can get mixed up when the PATH has a 64-bit nm but the python is 32-bit
try:
out = subprocess.check_output(['nm.exe', '--help'])
except FileNotFoundError:
pytest.skip("'nm.exe' not on path, is mingw installed?")
supported = out[out.find(b'supported targets:'):]
if sys.maxsize < 2**32:
if b'pe-i386' not in supported:
raise ValueError("'nm.exe' found but it does not support 32-bit "
"dlls when using 32-bit python. Supported "
"formats: '%s'" % supported)
elif b'pe-x86-64' not in supported:
raise ValueError("'nm.exe' found but it does not support 64-bit "
"dlls when using 64-bit python. Supported "
"formats: '%s'" % supported)
# Hide the import library to force a build
has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib()
if has_import_lib:
shutil.move(fullpath, fullpath + '.bak')
try:
# Whew, now we can actually test the function
mingw32ccompiler.build_import_library()
finally:
if has_import_lib:
shutil.move(fullpath + '.bak', fullpath)
| 1,609 | Python | 36.44186 | 77 | 0.61964 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_build_ext.py | '''Tests for numpy.distutils.build_ext.'''
import os
import subprocess
import sys
from textwrap import indent, dedent
import pytest
@pytest.mark.slow
def test_multi_fortran_libs_link(tmp_path):
'''
Ensures multiple "fake" static libraries are correctly linked.
see gh-18295
'''
# We need to make sure we actually have an f77 compiler.
# This is nontrivial, so we'll borrow the utilities
# from f2py tests:
from numpy.f2py.tests.util import has_f77_compiler
if not has_f77_compiler():
pytest.skip('No F77 compiler found')
# make some dummy sources
with open(tmp_path / '_dummy1.f', 'w') as fid:
fid.write(indent(dedent('''\
FUNCTION dummy_one()
RETURN
END FUNCTION'''), prefix=' '*6))
with open(tmp_path / '_dummy2.f', 'w') as fid:
fid.write(indent(dedent('''\
FUNCTION dummy_two()
RETURN
END FUNCTION'''), prefix=' '*6))
with open(tmp_path / '_dummy.c', 'w') as fid:
# doesn't need to load - just needs to exist
fid.write('int PyInit_dummyext;')
# make a setup file
with open(tmp_path / 'setup.py', 'w') as fid:
srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..')
fid.write(dedent(f'''\
def configuration(parent_package="", top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration("", parent_package, top_path)
config.add_library("dummy1", sources=["_dummy1.f"])
config.add_library("dummy2", sources=["_dummy2.f"])
config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"])
return config
if __name__ == "__main__":
import sys
sys.path.insert(0, r"{srctree}")
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())'''))
# build the test extensino and "install" into a temporary directory
build_dir = tmp_path
subprocess.check_call([sys.executable, 'setup.py', 'build', 'install',
'--prefix', str(tmp_path / 'installdir'),
'--record', str(tmp_path / 'tmp_install_log.txt'),
],
cwd=str(build_dir),
)
# get the path to the so
so = None
with open(tmp_path /'tmp_install_log.txt') as fid:
for line in fid:
if 'dummyext' in line:
so = line.strip()
break
assert so is not None
| 2,664 | Python | 35.506849 | 102 | 0.546547 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_ccompiler_opt_conf.py | import unittest
from os import sys, path
is_standalone = __name__ == '__main__' and __package__ is None
if is_standalone:
sys.path.append(path.abspath(path.join(path.dirname(__file__), "..")))
from ccompiler_opt import CCompilerOpt
else:
from numpy.distutils.ccompiler_opt import CCompilerOpt
arch_compilers = dict(
x86 = ("gcc", "clang", "icc", "iccw", "msvc"),
x64 = ("gcc", "clang", "icc", "iccw", "msvc"),
ppc64 = ("gcc", "clang"),
ppc64le = ("gcc", "clang"),
armhf = ("gcc", "clang"),
aarch64 = ("gcc", "clang"),
narch = ("gcc",)
)
class FakeCCompilerOpt(CCompilerOpt):
fake_info = ("arch", "compiler", "extra_args")
def __init__(self, *args, **kwargs):
CCompilerOpt.__init__(self, None, **kwargs)
def dist_compile(self, sources, flags, **kwargs):
return sources
def dist_info(self):
return FakeCCompilerOpt.fake_info
@staticmethod
def dist_log(*args, stderr=False):
pass
class _TestConfFeatures(FakeCCompilerOpt):
"""A hook to check the sanity of configured features
- before it called by the abstract class '_Feature'
"""
def conf_features_partial(self):
conf_all = self.conf_features
for feature_name, feature in conf_all.items():
self.test_feature(
"attribute conf_features",
conf_all, feature_name, feature
)
conf_partial = FakeCCompilerOpt.conf_features_partial(self)
for feature_name, feature in conf_partial.items():
self.test_feature(
"conf_features_partial()",
conf_partial, feature_name, feature
)
return conf_partial
def test_feature(self, log, search_in, feature_name, feature_dict):
error_msg = (
"during validate '{}' within feature '{}', "
"march '{}' and compiler '{}'\n>> "
).format(log, feature_name, self.cc_march, self.cc_name)
if not feature_name.isupper():
raise AssertionError(error_msg + "feature name must be in uppercase")
for option, val in feature_dict.items():
self.test_option_types(error_msg, option, val)
self.test_duplicates(error_msg, option, val)
self.test_implies(error_msg, search_in, feature_name, feature_dict)
self.test_group(error_msg, search_in, feature_name, feature_dict)
self.test_extra_checks(error_msg, search_in, feature_name, feature_dict)
def test_option_types(self, error_msg, option, val):
for tp, available in (
((str, list), (
"implies", "headers", "flags", "group", "detect", "extra_checks"
)),
((str,), ("disable",)),
((int,), ("interest",)),
((bool,), ("implies_detect",)),
((bool, type(None)), ("autovec",)),
) :
found_it = option in available
if not found_it:
continue
if not isinstance(val, tp):
error_tp = [t.__name__ for t in (*tp,)]
error_tp = ' or '.join(error_tp)
raise AssertionError(error_msg +
"expected '%s' type for option '%s' not '%s'" % (
error_tp, option, type(val).__name__
))
break
if not found_it:
raise AssertionError(error_msg + "invalid option name '%s'" % option)
def test_duplicates(self, error_msg, option, val):
if option not in (
"implies", "headers", "flags", "group", "detect", "extra_checks"
) : return
if isinstance(val, str):
val = val.split()
if len(val) != len(set(val)):
raise AssertionError(error_msg + "duplicated values in option '%s'" % option)
def test_implies(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
implies = feature_dict.get("implies", "")
if not implies:
return
if isinstance(implies, str):
implies = implies.split()
if feature_name in implies:
raise AssertionError(error_msg + "feature implies itself")
for impl in implies:
impl_dict = search_in.get(impl)
if impl_dict is not None:
if "disable" in impl_dict:
raise AssertionError(error_msg + "implies disabled feature '%s'" % impl)
continue
raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl)
def test_group(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
group = feature_dict.get("group", "")
if not group:
return
if isinstance(group, str):
group = group.split()
for f in group:
impl_dict = search_in.get(f)
if not impl_dict or "disable" in impl_dict:
continue
raise AssertionError(error_msg +
"in option 'group', '%s' already exists as a feature name" % f
)
def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
extra_checks = feature_dict.get("extra_checks", "")
if not extra_checks:
return
if isinstance(extra_checks, str):
extra_checks = extra_checks.split()
for f in extra_checks:
impl_dict = search_in.get(f)
if not impl_dict or "disable" in impl_dict:
continue
raise AssertionError(error_msg +
"in option 'extra_checks', extra test case '%s' already exists as a feature name" % f
)
class TestConfFeatures(unittest.TestCase):
def __init__(self, methodName="runTest"):
unittest.TestCase.__init__(self, methodName)
self._setup()
def _setup(self):
FakeCCompilerOpt.conf_nocache = True
def test_features(self):
for arch, compilers in arch_compilers.items():
for cc in compilers:
FakeCCompilerOpt.fake_info = (arch, cc, "")
_TestConfFeatures()
if is_standalone:
unittest.main()
| 6,347 | Python | 34.864407 | 101 | 0.557271 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_fcompiler_gnu.py | from numpy.testing import assert_
import numpy.distutils.fcompiler
g77_version_strings = [
('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'),
('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'),
('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'),
('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'),
('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2'
' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'),
]
gfortran_version_strings = [
('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))',
'4.0.3'),
('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'),
('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'),
('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'),
('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'),
('4.8.0', '4.8.0'),
('4.0.3-7', '4.0.3'),
("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1",
'4.9.1'),
("gfortran: warning: couldn't understand kern.osversion '14.1.0\n"
"gfortran: warning: yet another warning\n4.9.1",
'4.9.1'),
('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0')
]
class TestG77Versions:
def test_g77_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
for vs, version in g77_version_strings:
v = fc.version_match(vs)
assert_(v == version, (vs, v))
def test_not_g77(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
for vs, _ in gfortran_version_strings:
v = fc.version_match(vs)
assert_(v is None, (vs, v))
class TestGFortranVersions:
def test_gfortran_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for vs, version in gfortran_version_strings:
v = fc.version_match(vs)
assert_(v == version, (vs, v))
def test_not_gfortran(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for vs, _ in g77_version_strings:
v = fc.version_match(vs)
assert_(v is None, (vs, v))
| 2,136 | Python | 37.160714 | 76 | 0.589888 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_shell_utils.py | import pytest
import subprocess
import json
import sys
from numpy.distutils import _shell_utils
argv_cases = [
[r'exe'],
[r'path/exe'],
[r'path\exe'],
[r'\\server\path\exe'],
[r'path to/exe'],
[r'path to\exe'],
[r'exe', '--flag'],
[r'path/exe', '--flag'],
[r'path\exe', '--flag'],
[r'path to/exe', '--flag'],
[r'path to\exe', '--flag'],
# flags containing literal quotes in their name
[r'path to/exe', '--flag-"quoted"'],
[r'path to\exe', '--flag-"quoted"'],
[r'path to/exe', '"--flag-quoted"'],
[r'path to\exe', '"--flag-quoted"'],
]
@pytest.fixture(params=[
_shell_utils.WindowsParser,
_shell_utils.PosixParser
])
def Parser(request):
return request.param
@pytest.fixture
def runner(Parser):
if Parser != _shell_utils.NativeParser:
pytest.skip('Unable to run with non-native parser')
if Parser == _shell_utils.WindowsParser:
return lambda cmd: subprocess.check_output(cmd)
elif Parser == _shell_utils.PosixParser:
# posix has no non-shell string parsing
return lambda cmd: subprocess.check_output(cmd, shell=True)
else:
raise NotImplementedError
@pytest.mark.parametrize('argv', argv_cases)
def test_join_matches_subprocess(Parser, runner, argv):
"""
Test that join produces strings understood by subprocess
"""
# invoke python to return its arguments as json
cmd = [
sys.executable, '-c',
'import json, sys; print(json.dumps(sys.argv[1:]))'
]
joined = Parser.join(cmd + argv)
json_out = runner(joined).decode()
assert json.loads(json_out) == argv
@pytest.mark.parametrize('argv', argv_cases)
def test_roundtrip(Parser, argv):
"""
Test that split is the inverse operation of join
"""
try:
joined = Parser.join(argv)
assert argv == Parser.split(joined)
except NotImplementedError:
pytest.skip("Not implemented")
| 1,954 | Python | 24.38961 | 67 | 0.622313 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_misc_util.py | from os.path import join, sep, dirname
from numpy.distutils.misc_util import (
appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info
)
from numpy.testing import (
assert_, assert_equal
)
ajoin = lambda *paths: join(*((sep,)+paths))
class TestAppendpath:
def test_1(self):
assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name'))
assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name'))
assert_equal(appendpath('prefix', '/name'), join('prefix', 'name'))
def test_2(self):
assert_equal(appendpath('prefix/sub', 'name'),
join('prefix', 'sub', 'name'))
assert_equal(appendpath('prefix/sub', 'sup/name'),
join('prefix', 'sub', 'sup', 'name'))
assert_equal(appendpath('/prefix/sub', '/prefix/name'),
ajoin('prefix', 'sub', 'name'))
def test_3(self):
assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'),
ajoin('prefix', 'sub', 'sup', 'name'))
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name'))
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
class TestMinrelpath:
def test_1(self):
n = lambda path: path.replace('/', sep)
assert_equal(minrelpath(n('aa/bb')), n('aa/bb'))
assert_equal(minrelpath('..'), '..')
assert_equal(minrelpath(n('aa/..')), '')
assert_equal(minrelpath(n('aa/../bb')), 'bb')
assert_equal(minrelpath(n('aa/bb/..')), 'aa')
assert_equal(minrelpath(n('aa/bb/../..')), '')
assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd'))
assert_equal(minrelpath(n('.././..')), n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
class TestGpaths:
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__), '..'))
ls = gpaths('command/*.py', local_path)
assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls))
f = gpaths('system_info.py', local_path)
assert_(join(local_path, 'system_info.py') == f[0], repr(f))
class TestSharedExtension:
def test_get_shared_lib_extension(self):
import sys
ext = get_shared_lib_extension(is_python_ext=False)
if sys.platform.startswith('linux'):
assert_equal(ext, '.so')
elif sys.platform.startswith('gnukfreebsd'):
assert_equal(ext, '.so')
elif sys.platform.startswith('darwin'):
assert_equal(ext, '.dylib')
elif sys.platform.startswith('win'):
assert_equal(ext, '.dll')
# just check for no crash
assert_(get_shared_lib_extension(is_python_ext=True))
def test_installed_npymath_ini():
# Regression test for gh-7707. If npymath.ini wasn't installed, then this
# will give an error.
info = get_info('npymath')
assert isinstance(info, dict)
assert "define_macros" in info
| 3,218 | Python | 37.783132 | 78 | 0.569608 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_fcompiler.py | from numpy.testing import assert_
import numpy.distutils.fcompiler
customizable_flags = [
('f77', 'F77FLAGS'),
('f90', 'F90FLAGS'),
('free', 'FREEFLAGS'),
('arch', 'FARCH'),
('debug', 'FDEBUG'),
('flags', 'FFLAGS'),
('linker_so', 'LDFLAGS'),
]
def test_fcompiler_flags(monkeypatch):
monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0')
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none')
flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None)
for opt, envvar in customizable_flags:
new_flag = '-dummy-{}-flag'.format(opt)
prev_flags = getattr(flag_vars, opt)
monkeypatch.setenv(envvar, new_flag)
new_flags = getattr(flag_vars, opt)
monkeypatch.delenv(envvar)
assert_(new_flags == [new_flag])
monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1')
for opt, envvar in customizable_flags:
new_flag = '-dummy-{}-flag'.format(opt)
prev_flags = getattr(flag_vars, opt)
monkeypatch.setenv(envvar, new_flag)
new_flags = getattr(flag_vars, opt)
monkeypatch.delenv(envvar)
if prev_flags is None:
assert_(new_flags == [new_flag])
else:
assert_(new_flags == prev_flags + [new_flag])
| 1,277 | Python | 28.045454 | 65 | 0.60924 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/distutils/tests/test_npy_pkg_config.py | import os
from numpy.distutils.npy_pkg_config import read_config, parse_flags
from numpy.testing import temppath, assert_
simple = """\
[meta]
Name = foo
Description = foo lib
Version = 0.1
[default]
cflags = -I/usr/include
libs = -L/usr/lib
"""
simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib',
'version': '0.1', 'name': 'foo'}
simple_variable = """\
[meta]
Name = foo
Description = foo lib
Version = 0.1
[variables]
prefix = /foo/bar
libdir = ${prefix}/lib
includedir = ${prefix}/include
[default]
cflags = -I${includedir}
libs = -L${libdir}
"""
simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
'version': '0.1', 'name': 'foo'}
class TestLibraryInfo:
def test_simple(self):
with temppath('foo.ini') as path:
with open(path, 'w') as f:
f.write(simple)
pkg = os.path.splitext(path)[0]
out = read_config(pkg)
assert_(out.cflags() == simple_d['cflags'])
assert_(out.libs() == simple_d['libflags'])
assert_(out.name == simple_d['name'])
assert_(out.version == simple_d['version'])
def test_simple_variable(self):
with temppath('foo.ini') as path:
with open(path, 'w') as f:
f.write(simple_variable)
pkg = os.path.splitext(path)[0]
out = read_config(pkg)
assert_(out.cflags() == simple_variable_d['cflags'])
assert_(out.libs() == simple_variable_d['libflags'])
assert_(out.name == simple_variable_d['name'])
assert_(out.version == simple_variable_d['version'])
out.vars['prefix'] = '/Users/david'
assert_(out.cflags() == '-I/Users/david/include')
class TestParseFlags:
def test_simple_cflags(self):
d = parse_flags("-I/usr/include")
assert_(d['include_dirs'] == ['/usr/include'])
d = parse_flags("-I/usr/include -DFOO")
assert_(d['include_dirs'] == ['/usr/include'])
assert_(d['macros'] == ['FOO'])
d = parse_flags("-I /usr/include -DFOO")
assert_(d['include_dirs'] == ['/usr/include'])
assert_(d['macros'] == ['FOO'])
def test_simple_lflags(self):
d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar")
assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
assert_(d['libraries'] == ['foo', 'bar'])
d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar")
assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
assert_(d['libraries'] == ['foo', 'bar'])
| 2,557 | Python | 29.094117 | 82 | 0.559249 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/doc/constants.py | """
=========
Constants
=========
.. currentmodule:: numpy
NumPy includes several constants:
%(constant_list)s
"""
#
# Note: the docstring is autogenerated.
#
import re
import textwrap
# Maintain same format as in numpy.add_newdocs
constants = []
def add_newdoc(module, name, doc):
constants.append((name, doc))
add_newdoc('numpy', 'pi',
"""
``pi = 3.1415926535897932384626433...``
References
----------
https://en.wikipedia.org/wiki/Pi
""")
add_newdoc('numpy', 'e',
"""
Euler's constant, base of natural logarithms, Napier's constant.
``e = 2.71828182845904523536028747135266249775724709369995...``
See Also
--------
exp : Exponential function
log : Natural logarithm
References
----------
https://en.wikipedia.org/wiki/E_%28mathematical_constant%29
""")
add_newdoc('numpy', 'euler_gamma',
"""
``γ = 0.5772156649015328606065120900824024310421...``
References
----------
https://en.wikipedia.org/wiki/Euler-Mascheroni_constant
""")
add_newdoc('numpy', 'inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Returns
-------
y : float
A floating point representation of positive infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
`Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
Examples
--------
>>> np.inf
inf
>>> np.array([1]) / 0.
array([ Inf])
""")
add_newdoc('numpy', 'nan',
"""
IEEE 754 floating point representation of Not a Number (NaN).
Returns
-------
y : A floating point representation of Not a Number.
See Also
--------
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite (not one of
Not a Number, positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
`NaN` and `NAN` are aliases of `nan`.
Examples
--------
>>> np.nan
nan
>>> np.log(-1)
nan
>>> np.log([-1, 1, 2])
array([ NaN, 0. , 0.69314718])
""")
add_newdoc('numpy', 'newaxis',
"""
A convenient alias for None, useful for indexing arrays.
Examples
--------
>>> newaxis is None
True
>>> x = np.arange(3)
>>> x
array([0, 1, 2])
>>> x[:, newaxis]
array([[0],
[1],
[2]])
>>> x[:, newaxis, newaxis]
array([[[0]],
[[1]],
[[2]]])
>>> x[:, newaxis] * x
array([[0, 0, 0],
[0, 1, 2],
[0, 2, 4]])
Outer product, same as ``outer(x, y)``:
>>> y = np.arange(3, 6)
>>> x[:, newaxis] * y
array([[ 0, 0, 0],
[ 3, 4, 5],
[ 6, 8, 10]])
``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
>>> x[newaxis, :].shape
(1, 3)
>>> x[newaxis].shape
(1, 3)
>>> x[None].shape
(1, 3)
>>> x[:, newaxis].shape
(3, 1)
""")
add_newdoc('numpy', 'NZERO',
"""
IEEE 754 floating point representation of negative zero.
Returns
-------
y : float
A floating point representation of negative zero.
See Also
--------
PZERO : Defines positive zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Negative zero is considered to be a finite number.
Examples
--------
>>> np.NZERO
-0.0
>>> np.PZERO
0.0
>>> np.isfinite([np.NZERO])
array([ True])
>>> np.isnan([np.NZERO])
array([False])
>>> np.isinf([np.NZERO])
array([False])
""")
add_newdoc('numpy', 'PZERO',
"""
IEEE 754 floating point representation of positive zero.
Returns
-------
y : float
A floating point representation of positive zero.
See Also
--------
NZERO : Defines negative zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Positive zero is considered to be a finite number.
Examples
--------
>>> np.PZERO
0.0
>>> np.NZERO
-0.0
>>> np.isfinite([np.PZERO])
array([ True])
>>> np.isnan([np.PZERO])
array([False])
>>> np.isinf([np.PZERO])
array([False])
""")
add_newdoc('numpy', 'NAN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NAN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NaN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NaN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NINF',
"""
IEEE 754 floating point representation of negative infinity.
Returns
-------
y : float
A floating point representation of negative infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Examples
--------
>>> np.NINF
-inf
>>> np.log(0)
-inf
""")
add_newdoc('numpy', 'PINF',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'infty',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Infinity',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
if __doc__:
constants_str = []
constants.sort()
for name, doc in constants:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
new_lines.append('%s.. rubric:: %s' % (m.group(1), prev))
new_lines.append('')
else:
new_lines.append(line)
s = "\n".join(new_lines)
# Done.
constants_str.append(""".. data:: %s\n %s""" % (name, s))
constants_str = "\n".join(constants_str)
__doc__ = __doc__ % dict(constant_list=constants_str)
del constants_str, name, doc
del line, lines, new_lines, m, s, prev
del constants, add_newdoc
| 9,154 | Python | 21.16707 | 75 | 0.582915 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/doc/__init__.py | import os
ref_dir = os.path.join(os.path.dirname(__file__))
__all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and
not f.startswith('__'))
for f in __all__:
__import__(__name__ + '.' + f)
del f, ref_dir
__doc__ = """\
Topical documentation
=====================
The following topics are available:
%s
You can view them by
>>> help(np.doc.TOPIC) #doctest: +SKIP
""" % '\n- '.join([''] + __all__)
__all__.extend(['__doc__'])
| 508 | Python | 17.851851 | 77 | 0.488189 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/doc/ufuncs.py | """
===================
Universal Functions
===================
Ufuncs are, generally speaking, mathematical functions or operations that are
applied element-by-element to the contents of an array. That is, the result
in each output array element only depends on the value in the corresponding
input array (or arrays) and on no other array elements. NumPy comes with a
large suite of ufuncs, and scipy extends that suite substantially. The simplest
example is the addition operator: ::
>>> np.array([0,2,3,4]) + np.array([1,1,-1,2])
array([1, 3, 2, 6])
The ufunc module lists all the available ufuncs in numpy. Documentation on
the specific ufuncs may be found in those modules. This documentation is
intended to address the more general aspects of ufuncs common to most of
them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.)
have equivalent functions defined (e.g. add() for +)
Type coercion
=============
What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of
two different types? What is the type of the result? Typically, the result is
the higher of the two types. For example: ::
float32 + float64 -> float64
int8 + int32 -> int32
int16 + float32 -> float32
float32 + complex64 -> complex64
There are some less obvious cases generally involving mixes of types
(e.g. uints, ints and floats) where equal bit sizes for each are not
capable of saving all the information in a different type of equivalent
bit size. Some examples are int32 vs float32 or uint32 vs int32.
Generally, the result is the higher type of larger size than both
(if available). So: ::
int32 + float32 -> float64
uint32 + int32 -> int64
Finally, the type coercion behavior when expressions involve Python
scalars is different than that seen for arrays. Since Python has a
limited number of types, combining a Python int with a dtype=np.int8
array does not coerce to the higher type but instead, the type of the
array prevails. So the rules for Python scalars combined with arrays is
that the result will be that of the array equivalent the Python scalar
if the Python scalar is of a higher 'kind' than the array (e.g., float
vs. int), otherwise the resultant type will be that of the array.
For example: ::
Python int + int8 -> int8
Python float + int8 -> float64
ufunc methods
=============
Binary ufuncs support 4 methods.
**.reduce(arr)** applies the binary operator to elements of the array in
sequence. For example: ::
>>> np.add.reduce(np.arange(10)) # adds all elements of array
45
For multidimensional arrays, the first dimension is reduced by default: ::
>>> np.add.reduce(np.arange(10).reshape(2,5))
array([ 5, 7, 9, 11, 13])
The axis keyword can be used to specify different axes to reduce: ::
>>> np.add.reduce(np.arange(10).reshape(2,5),axis=1)
array([10, 35])
**.accumulate(arr)** applies the binary operator and generates an
equivalently shaped array that includes the accumulated amount for each
element of the array. A couple examples: ::
>>> np.add.accumulate(np.arange(10))
array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
>>> np.multiply.accumulate(np.arange(1,9))
array([ 1, 2, 6, 24, 120, 720, 5040, 40320])
The behavior for multidimensional arrays is the same as for .reduce(),
as is the use of the axis keyword).
**.reduceat(arr,indices)** allows one to apply reduce to selected parts
of an array. It is a difficult method to understand. See the documentation
at:
**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and
arr2. It will work on multidimensional arrays (the shape of the result is
the concatenation of the two input shapes.: ::
>>> np.multiply.outer(np.arange(3),np.arange(4))
array([[0, 0, 0, 0],
[0, 1, 2, 3],
[0, 2, 4, 6]])
Output arguments
================
All ufuncs accept an optional output array. The array must be of the expected
output shape. Beware that if the type of the output array is of a different
(and lower) type than the output result, the results may be silently truncated
or otherwise corrupted in the downcast to the lower type. This usage is useful
when one wants to avoid creating large temporary arrays and instead allows one
to reuse the same array memory repeatedly (at the expense of not being able to
use more convenient operator notation in expressions). Note that when the
output argument is used, the ufunc still returns a reference to the result.
>>> x = np.arange(2)
>>> np.add(np.arange(2),np.arange(2.),x)
array([0, 2])
>>> x
array([0, 2])
and & or as ufuncs
==================
Invariably people try to use the python 'and' and 'or' as logical operators
(and quite understandably). But these operators do not behave as normal
operators since Python treats these quite differently. They cannot be
overloaded with array equivalents. Thus using 'and' or 'or' with an array
results in an error. There are two alternatives:
1) use the ufunc functions logical_and() and logical_or().
2) use the bitwise operators & and \\|. The drawback of these is that if
the arguments to these operators are not boolean arrays, the result is
likely incorrect. On the other hand, most usages of logical_and and
logical_or are with boolean arrays. As long as one is careful, this is
a convenient way to apply these operators.
"""
| 5,357 | Python | 37.826087 | 79 | 0.720179 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/multiarray.py | """
Create the numpy.core.multiarray namespace for backward compatibility. In v1.16
the multiarray and umath c-extension modules were merged into a single
_multiarray_umath extension module. So we replicate the old namespace
by importing from the extension module.
"""
import functools
from . import overrides
from . import _multiarray_umath
from ._multiarray_umath import * # noqa: F403
# These imports are needed for backward compatibility,
# do not change them. issue gh-15518
# _get_ndarray_c_version is semi-public, on purpose not added to __all__
from ._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, from_dlpack, _insert, _reconstruct,
_vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version,
_get_madvise_hugepage, _set_madvise_hugepage,
)
__all__ = [
'_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose',
'_flagdict', 'from_dlpack', '_insert', '_reconstruct', '_vec_string',
'_monotonicity', 'add_docstring', 'arange', 'array', 'asarray',
'asanyarray', 'ascontiguousarray', 'asfortranarray', 'bincount',
'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
'frombuffer', 'fromfile', 'fromiter', 'fromstring',
'get_handler_name', 'get_handler_version', 'inner', 'interp',
'interp_complex', 'is_busday', 'lexsort', 'matmul', 'may_share_memory',
'min_scalar_type', 'ndarray', 'nditer', 'nested_iters',
'normalize_axis_index', 'packbits', 'promote_types', 'putmask',
'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function',
'set_legacy_print_mode', 'set_numeric_ops', 'set_string_function',
'set_typeDict', 'shares_memory', 'tracemalloc_domain', 'typeinfo',
'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros']
# For backward compatibility, make sure pickle imports these functions from here
_reconstruct.__module__ = 'numpy.core.multiarray'
scalar.__module__ = 'numpy.core.multiarray'
from_dlpack.__module__ = 'numpy'
arange.__module__ = 'numpy'
array.__module__ = 'numpy'
asarray.__module__ = 'numpy'
asanyarray.__module__ = 'numpy'
ascontiguousarray.__module__ = 'numpy'
asfortranarray.__module__ = 'numpy'
datetime_data.__module__ = 'numpy'
empty.__module__ = 'numpy'
frombuffer.__module__ = 'numpy'
fromfile.__module__ = 'numpy'
fromiter.__module__ = 'numpy'
frompyfunc.__module__ = 'numpy'
fromstring.__module__ = 'numpy'
geterrobj.__module__ = 'numpy'
may_share_memory.__module__ = 'numpy'
nested_iters.__module__ = 'numpy'
promote_types.__module__ = 'numpy'
set_numeric_ops.__module__ = 'numpy'
seterrobj.__module__ = 'numpy'
zeros.__module__ = 'numpy'
# We can't verify dispatcher signatures because NumPy's C functions don't
# support introspection.
array_function_from_c_func_and_dispatcher = functools.partial(
overrides.array_function_from_dispatcher,
module='numpy', docs_from_dispatcher=True, verify=False)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
def empty_like(prototype, dtype=None, order=None, subok=None, shape=None):
"""
empty_like(prototype, dtype=None, order='K', subok=True, shape=None)
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : array_like
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `prototype` is Fortran
contiguous, 'C' otherwise. 'K' means match the layout of `prototype`
as closely as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `prototype`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], # uninitialized
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
"""
return (prototype,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
"""
concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind")
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
dtype : str or dtype
If provided, the destination array will have this dtype. Cannot be
provided together with `out`.
.. versionadded:: 1.20.0
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'same_kind'.
.. versionadded:: 1.20.0
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
block : Assemble arrays from blocks.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
column_stack : Stack 1-D arrays as columns into a 2-D array.
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
>>> np.concatenate((a, b), axis=None)
array([1, 2, 3, 4, 5, 6])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data=[0, --, 2],
mask=[False, True, False],
fill_value=999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data=[0, 1, 2, 2, 3, 4],
mask=False,
fill_value=999999)
>>> np.ma.concatenate([a, b])
masked_array(data=[0, --, 2, 2, 3, 4],
mask=[False, True, False, False, False, False],
fill_value=999999)
"""
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
def inner(a, b):
"""
inner(a, b, /)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
``out.shape = (*a.shape[:-1], *b.shape[:-1])``
Raises
------
ValueError
If both `a` and `b` are nonscalar and their last dimensions have
different sizes.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-2,j0,...,js-2]
= sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
Some multidimensional examples:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> c = np.inner(a, b)
>>> c.shape
(2, 3)
>>> c
array([[ 14, 38, 62],
[ 86, 110, 134]])
>>> a = np.arange(2).reshape((1,1,2))
>>> b = np.arange(6).reshape((3,2))
>>> c = np.inner(a, b)
>>> c.shape
(1, 1, 3)
>>> c
array([[[1, 3, 5]]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[7., 0.],
[0., 7.]])
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
def where(condition, x=None, y=None):
"""
where(condition, [x, y], /)
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments are
provided.
Parameters
----------
condition : array_like, bool
Where True, yield `x`, otherwise yield `y`.
x, y : array_like
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
See Also
--------
choose
nonzero : The function that is called when x and y are omitted
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.where(a < 5, a, 10*a)
array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
This can be used on multidimensional arrays too:
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = np.ogrid[:3, :4]
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]])
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0, 1, 2],
[ 0, 2, -1],
[ 0, 3, -1]])
"""
return (condition, x, y)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
def lexsort(keys, axis=None):
"""
lexsort(keys, axis=-1)
Perform an indirect stable sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, its rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> ind
array([2, 0, 4, 6, 5, 3, 1])
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
"""
if isinstance(keys, tuple):
return keys
else:
return (keys,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
def can_cast(from_, to, casting=None):
"""
can_cast(from_, to, casting='safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from_ : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
to : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
.. versionchanged:: 1.17.0
Casting between a simple data type and a structured one is possible only
for "unsafe" casting. Casting to multiple fields is allowed, but
casting from multiple fields is not.
.. versionchanged:: 1.9.0
Casting from numeric to string types in 'safe' casting mode requires
that the string dtype length is long enough to store the maximum
integer/float value converted.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, complex)
True
>>> np.can_cast(complex, float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
"""
return (from_,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
def min_scalar_type(a):
"""
min_scalar_type(a, /)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
def result_type(*arrays_and_dtypes):
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
"""
return arrays_and_dtypes
@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
def dot(a, b, out=None):
"""
dot(a, b, out=None)
Dot product of two arrays. Specifically,
- If both `a` and `b` are 1-D arrays, it is inner product of vectors
(without complex conjugation).
- If both `a` and `b` are 2-D arrays, it is matrix multiplication,
but using :func:`matmul` or ``a @ b`` is preferred.
- If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
- If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
- If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
matmul : '@' operator as method with out parameter.
linalg.multi_dot : Chained dot product.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
"""
return (a, b, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
def vdot(a, b):
"""
vdot(a, b, /)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
def bincount(x, weights=None, minlength=None):
"""
bincount(x, /, weights=None, minlength=0)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
A minimum number of bins for the output array.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is negative.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
...
TypeError: Cannot cast array data from dtype('float64') to dtype('int64')
according to the rule 'safe'
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
return (x, weights)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
def ravel_multi_index(multi_index, dims, mode=None, order=None):
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as
indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
"""
return multi_index
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
def unravel_index(indices, shape=None, order=None):
"""
unravel_index(indices, shape, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``shape``. Before version 1.6.0,
this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling ``indices``.
.. versionchanged:: 1.16.0
Renamed from ``dims`` to ``shape``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
.. versionadded:: 1.6.0
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
"""
return (indices,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
def copyto(dst, src, casting=None, where=None):
"""
copyto(dst, src, casting='same_kind', where=True)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
"""
return (dst, src, where)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
def putmask(a, mask, values):
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : ndarray
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
"""
return (a, mask, values)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
def packbits(a, axis=None, bitorder='big'):
"""
packbits(a, /, axis=None, bitorder='big')
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
a : array_like
An array of integers or booleans whose elements should be packed to
bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
bitorder : {'big', 'little'}, optional
The order of the input bits. 'big' will mimic bin(val),
``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will
reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``.
Defaults to 'big'.
.. versionadded:: 1.17.0
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],
[ 64]],
[[192],
[ 32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
def unpackbits(a, axis=None, count=None, bitorder='big'):
"""
unpackbits(a, /, axis=None, count=None, bitorder='big')
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `a` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is
either 1-D (if `axis` is ``None``) or the same shape as the input
array with unpacking done along the axis specified.
Parameters
----------
a : ndarray, uint8 type
Input array.
axis : int, optional
The dimension over which bit-unpacking is done.
``None`` implies unpacking the flattened array.
count : int or None, optional
The number of elements to unpack along `axis`, provided as a way
of undoing the effect of packing a size that is not a multiple
of eight. A non-negative number means to only unpack `count`
bits. A negative number means to trim off that many bits from
the end. ``None`` means to unpack the entire array (the
default). Counts larger than the available number of bits will
add zero padding to the output. Negative counts must not
exceed the available number of bits.
.. versionadded:: 1.17.0
bitorder : {'big', 'little'}, optional
The order of the returned bits. 'big' will mimic bin(val),
``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse
the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``.
Defaults to 'big'.
.. versionadded:: 1.17.0
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in
a uint8 array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
>>> c = np.unpackbits(a, axis=1, count=-3)
>>> c
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0]], dtype=uint8)
>>> p = np.packbits(b, axis=0)
>>> np.unpackbits(p, axis=0)
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0]))
True
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
def shares_memory(a, b, max_work=None):
"""
shares_memory(a, b, /, max_work=None)
Determine if two arrays share memory.
.. warning::
This function can be exponentially slow for some inputs, unless
`max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``.
If in doubt, use `numpy.may_share_memory` instead.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays. Finding
the exact solution may take extremely long in some cases.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
Raises
------
numpy.TooHardError
Exceeded max_work.
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> np.shares_memory(x, np.array([5, 6, 7]))
False
>>> np.shares_memory(x[::2], x)
True
>>> np.shares_memory(x[::2], x[1::2])
False
Checking whether two arrays share memory is NP-complete, and
runtime may increase exponentially in the number of
dimensions. Hence, `max_work` should generally be set to a finite
number, as it is possible to construct examples that take
extremely long to run:
>>> from numpy.lib.stride_tricks import as_strided
>>> x = np.zeros([192163377], dtype=np.int8)
>>> x1 = as_strided(x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049))
>>> x2 = as_strided(x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1))
>>> np.shares_memory(x1, x2, max_work=1000)
Traceback (most recent call last):
...
numpy.TooHardError: Exceeded max_work
Running ``np.shares_memory(x1, x2)`` without `max_work` set takes
around 1 minute for this case. It is possible to find problems
that take still significantly longer.
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
def may_share_memory(a, b, max_work=None):
"""
may_share_memory(a, b, /, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar : An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True])
"""
return (dates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
out : array of datetime64[D]
An array with a shape from broadcasting ``dates`` and ``offsets``
together, containing the dates with offsets applied.
See Also
--------
busdaycalendar : An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
numpy.datetime64('2011-10-03')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
numpy.datetime64('2012-02-29')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
numpy.datetime64('2011-01-19')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
numpy.datetime64('2012-05-13')
>>> # First business day on or after a date
... np.busday_offset('2011-03-20', 0, roll='forward')
numpy.datetime64('2011-03-21')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
numpy.datetime64('2011-03-22')
>>> # First business day after a date
... np.busday_offset('2011-03-20', 1, roll='backward')
numpy.datetime64('2011-03-21')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
numpy.datetime64('2011-03-23')
"""
return (dates, offsets, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
def busday_count(begindates, enddates, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar : An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>> # Number of weekdays in January 2011
... np.busday_count('2011-01', '2011-02')
21
>>> # Number of weekdays in 2011
>>> np.busday_count('2011', '2012')
260
>>> # Number of Saturdays in 2011
... np.busday_count('2011', '2012', weekmask='Sat')
53
"""
return (begindates, enddates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(
_multiarray_umath.datetime_as_string)
def datetime_as_string(arr, unit=None, timezone=None, casting=None):
"""
datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
Convert an array of datetimes into an array of strings.
Parameters
----------
arr : array_like of datetime64
The array of UTC timestamps to format.
unit : str
One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
timezone : {'naive', 'UTC', 'local'} or tzinfo
Timezone information to use when displaying the datetime. If 'UTC', end
with a Z to indicate UTC time. If 'local', convert to the local timezone
first, and suffix with a +-#### timezone offset. If a tzinfo object,
then do as with 'local', but use the specified timezone.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
Casting to allow when changing between datetime units.
Returns
-------
str_arr : ndarray
An array of strings the same shape as `arr`.
Examples
--------
>>> import pytz
>>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
>>> d
array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
'2002-10-27T07:30'], dtype='datetime64[m]')
Setting the timezone to UTC shows the same information, but with a Z suffix
>>> np.datetime_as_string(d, timezone='UTC')
array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
'2002-10-27T07:30Z'], dtype='<U35')
Note that we picked datetimes that cross a DST boundary. Passing in a
``pytz`` timezone object will print the appropriate offset
>>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
'2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
Passing in a unit will change the precision
>>> np.datetime_as_string(d, unit='h')
array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
dtype='<U32')
>>> np.datetime_as_string(d, unit='s')
array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
'2002-10-27T07:30:00'], dtype='<U38')
'casting' can be used to specify whether precision can be changed
>>> np.datetime_as_string(d, unit='h', casting='safe')
Traceback (most recent call last):
...
TypeError: Cannot create a datetime string as units 'h' from a NumPy
datetime with units 'm' according to the rule 'safe'
"""
return (arr,)
| 55,434 | Python | 31.782377 | 128 | 0.604449 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/setup_common.py | # Code common to build tools
import copy
import pathlib
import sys
import textwrap
import warnings
from numpy.distutils.misc_util import mingw32
#-------------------
# Versioning support
#-------------------
# How to change C_API_VERSION ?
# - increase C_API_VERSION value
# - record the hash for the new C API with the cversions.py script
# and add the hash to cversions.txt
# The hash values are used to remind developers when the C API number was not
# updated - generates a MismatchCAPIWarning warning which is turned into an
# exception for released version.
# Binary compatibility version number. This number is increased whenever the
# C-API is changed such that binary compatibility is broken, i.e. whenever a
# recompile of extension modules is needed.
C_ABI_VERSION = 0x01000009
# Minor API version. This number is increased whenever a change is made to the
# C-API -- whether it breaks binary compatibility or not. Some changes, such
# as adding a function pointer to the end of the function table, can be made
# without breaking binary compatibility. In this case, only the C_API_VERSION
# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is
# broken, both C_API_VERSION and C_ABI_VERSION should be increased.
#
# 0x00000008 - 1.7.x
# 0x00000009 - 1.8.x
# 0x00000009 - 1.9.x
# 0x0000000a - 1.10.x
# 0x0000000a - 1.11.x
# 0x0000000a - 1.12.x
# 0x0000000b - 1.13.x
# 0x0000000c - 1.14.x
# 0x0000000c - 1.15.x
# 0x0000000d - 1.16.x
# 0x0000000d - 1.19.x
# 0x0000000e - 1.20.x
# 0x0000000e - 1.21.x
# 0x0000000f - 1.22.x
# 0x00000010 - 1.23.x
C_API_VERSION = 0x00000010
class MismatchCAPIWarning(Warning):
pass
def get_api_versions(apiversion, codegen_dir):
"""
Return current C API checksum and the recorded checksum.
Return current C API checksum and the recorded checksum for the given
version of the C API version.
"""
# Compute the hash of the current API as defined in the .txt files in
# code_generators
sys.path.insert(0, codegen_dir)
try:
m = __import__('genapi')
numpy_api = __import__('numpy_api')
curapi_hash = m.fullapi_hash(numpy_api.full_api)
apis_hash = m.get_versions_hash()
finally:
del sys.path[0]
return curapi_hash, apis_hash[apiversion]
def check_api_version(apiversion, codegen_dir):
"""Emits a MismatchCAPIWarning if the C API version needs updating."""
curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)
# If different hash, it means that the api .txt files in
# codegen_dir have been updated without the API version being
# updated. Any modification in those .txt files should be reflected
# in the api and eventually abi versions.
# To compute the checksum of the current API, use numpy/core/cversions.py
if not curapi_hash == api_hash:
msg = ("API mismatch detected, the C API version "
"numbers have to be updated. Current C api version is %d, "
"with checksum %s, but recorded checksum for C API version %d "
"in core/codegen_dir/cversions.txt is %s. If functions were "
"added in the C API, you have to update C_API_VERSION in %s."
)
warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
__file__),
MismatchCAPIWarning, stacklevel=2)
FUNC_CALL_ARGS = {}
def set_sig(sig):
prefix, _, args = sig.partition("(")
args = args.rpartition(")")[0]
funcname = prefix.rpartition(" ")[-1]
args = [arg.strip() for arg in args.split(",")]
FUNC_CALL_ARGS[funcname] = ", ".join("(%s) 0" % arg for arg in args)
for file in [
"feature_detection_locale.h",
"feature_detection_math.h",
"feature_detection_misc.h",
"feature_detection_stdio.h",
]:
with open(pathlib.Path(__file__).parent / file) as f:
for line in f:
if line.startswith("#"):
continue
if not line.strip():
continue
set_sig(line)
# Mandatory functions: if not found, fail the build
MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs",
"floor", "ceil", "sqrt", "log10", "log", "exp", "asin",
"acos", "atan", "fmod", 'modf', 'frexp', 'ldexp']
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
"rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
"copysign", "nextafter", "strtoll", "strtoull", "cbrt"]
OPTIONAL_LOCALE_FUNCS = ["strtold_l"]
OPTIONAL_FILE_FUNCS = ["ftello", "fseeko", "fallocate"]
OPTIONAL_MISC_FUNCS = ["backtrace", "madvise"]
OPTIONAL_HEADERS = [
# sse headers only enabled automatically on amd64/x32 builds
"xmmintrin.h", # SSE
"emmintrin.h", # SSE2
"immintrin.h", # AVX
"features.h", # for glibc version linux
"xlocale.h", # see GH#8367
"dlfcn.h", # dladdr
"execinfo.h", # backtrace
"libunwind.h", # backtrace for LLVM/Clang using libunwind
"sys/mman.h", #madvise
]
# optional gcc compiler builtins and their call arguments and optional a
# required header and definition name (HAVE_ prepended)
# call arguments are required as the compiler will do strict signature checking
OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
("__builtin_isinf", '5.'),
("__builtin_isfinite", '5.'),
("__builtin_bswap32", '5u'),
("__builtin_bswap64", '5u'),
("__builtin_expect", '5, 0'),
("__builtin_mul_overflow", '5, 5, (int*)5'),
# MMX only needed for icc, but some clangs don't have it
("_m_from_int64", '0', "emmintrin.h"),
("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
("_mm_prefetch", '(float*)0, _MM_HINT_NTA',
"xmmintrin.h"), # SSE
("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2
("__builtin_prefetch", "(float*)0, 0, 3"),
# check that the linker can handle avx
("__asm__ volatile", '"vpand %xmm1, %xmm2, %xmm3"',
"stdio.h", "LINK_AVX"),
("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"',
"stdio.h", "LINK_AVX2"),
("__asm__ volatile", '"vpaddd %zmm1, %zmm2, %zmm3"',
"stdio.h", "LINK_AVX512F"),
("__asm__ volatile", '"vfpclasspd $0x40, %zmm15, %k6\\n"\
"vmovdqu8 %xmm0, %xmm1\\n"\
"vpbroadcastmb2q %k0, %xmm0\\n"',
"stdio.h", "LINK_AVX512_SKX"),
("__asm__ volatile", '"xgetbv"', "stdio.h", "XGETBV"),
]
# function attributes
# tested via "int %s %s(void *);" % (attribute, name)
# function name will be converted to HAVE_<upper-case-name> preprocessor macro
OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
'attribute_optimize_unroll_loops'),
('__attribute__((optimize("O3")))',
'attribute_optimize_opt_3'),
('__attribute__((optimize("O2")))',
'attribute_optimize_opt_2'),
('__attribute__((nonnull (1)))',
'attribute_nonnull'),
('__attribute__((target ("avx")))',
'attribute_target_avx'),
('__attribute__((target ("avx2")))',
'attribute_target_avx2'),
('__attribute__((target ("avx512f")))',
'attribute_target_avx512f'),
('__attribute__((target ("avx512f,avx512dq,avx512bw,avx512vl,avx512cd")))',
'attribute_target_avx512_skx'),
]
# function attributes with intrinsics
# To ensure your compiler can compile avx intrinsics with just the attributes
# gcc 4.8.4 support attributes but not with intrisics
# tested via "#include<%s> int %s %s(void *){code; return 0;};" % (header, attribute, name, code)
# function name will be converted to HAVE_<upper-case-name> preprocessor macro
# The _mm512_castps_si512 instruction is specific check for AVX-512F support
# in gcc-4.9 which is missing a subset of intrinsics. See
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61878
OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS = [('__attribute__((target("avx2,fma")))',
'attribute_target_avx2_with_intrinsics',
'__m256 temp = _mm256_set1_ps(1.0); temp = \
_mm256_fmadd_ps(temp, temp, temp)',
'immintrin.h'),
('__attribute__((target("avx512f")))',
'attribute_target_avx512f_with_intrinsics',
'__m512i temp = _mm512_castps_si512(_mm512_set1_ps(1.0))',
'immintrin.h'),
('__attribute__((target ("avx512f,avx512dq,avx512bw,avx512vl,avx512cd")))',
'attribute_target_avx512_skx_with_intrinsics',
'__mmask8 temp = _mm512_fpclass_pd_mask(_mm512_set1_pd(1.0), 0x01);\
__m512i unused_temp = \
_mm512_castps_si512(_mm512_set1_ps(1.0));\
_mm_mask_storeu_epi8(NULL, 0xFF, _mm_broadcastmb_epi64(temp))',
'immintrin.h'),
]
# variable attributes tested via "int %s a" % attribute
OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"]
# Subset of OPTIONAL_STDFUNCS which may already have HAVE_* defined by Python.h
OPTIONAL_STDFUNCS_MAYBE = [
"expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign",
"ftello", "fseeko"
]
# C99 functions: float and long double versions
C99_FUNCS = [
"sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil",
"rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1",
"asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2",
"pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign",
"nextafter", "cbrt"
]
C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS]
C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS]
C99_COMPLEX_TYPES = [
'complex double', 'complex float', 'complex long double'
]
C99_COMPLEX_FUNCS = [
"cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan",
"catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow",
"cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh"
]
def fname2def(name):
return "HAVE_%s" % name.upper()
def sym2def(symbol):
define = symbol.replace(' ', '')
return define.upper()
def type2def(symbol):
define = symbol.replace(' ', '_')
return define.upper()
# Code to detect long double representation taken from MPFR m4 macro
def check_long_double_representation(cmd):
cmd._check_compiler()
body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}
# Disable whole program optimization (the default on vs2015, with python 3.5+)
# which generates intermediary object files and prevents checking the
# float representation.
if sys.platform == "win32" and not mingw32():
try:
cmd.compiler.compile_options.remove("/GL")
except (AttributeError, ValueError):
pass
# Disable multi-file interprocedural optimization in the Intel compiler on Linux
# which generates intermediary object files and prevents checking the
# float representation.
elif (sys.platform != "win32"
and cmd.compiler.compiler_type.startswith('intel')
and '-ipo' in cmd.compiler.cc_exe):
newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '')
cmd.compiler.set_executables(
compiler=newcompiler,
compiler_so=newcompiler,
compiler_cxx=newcompiler,
linker_exe=newcompiler,
linker_so=newcompiler + ' -shared'
)
# We need to use _compile because we need the object filename
src, obj = cmd._compile(body, None, None, 'c')
try:
ltype = long_double_representation(pyod(obj))
return ltype
except ValueError:
# try linking to support CC="gcc -flto" or icc -ipo
# struct needs to be volatile so it isn't optimized away
# additionally "clang -flto" requires the foo struct to be used
body = body.replace('struct', 'volatile struct')
body += "int main(void) { return foo.before[0]; }\n"
src, obj = cmd._compile(body, None, None, 'c')
cmd.temp_files.append("_configtest")
cmd.compiler.link_executable([obj], "_configtest")
ltype = long_double_representation(pyod("_configtest"))
return ltype
finally:
cmd._clean()
LONG_DOUBLE_REPRESENTATION_SRC = r"""
/* "before" is 16 bytes to ensure there's no padding between it and "x".
* We're not expecting any "long double" bigger than 16 bytes or with
* alignment requirements stricter than 16 bytes. */
typedef %(type)s test_type;
struct {
char before[16];
test_type x;
char after[8];
} foo = {
{ '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
'\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' },
-123456789.0,
{ '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' }
};
"""
def pyod(filename):
"""Python implementation of the od UNIX utility (od -b, more exactly).
Parameters
----------
filename : str
name of the file to get the dump from.
Returns
-------
out : seq
list of lines of od output
Notes
-----
We only implement enough to get the necessary information for long double
representation, this is not intended as a compatible replacement for od.
"""
out = []
with open(filename, 'rb') as fid:
yo2 = [oct(o)[2:] for o in fid.read()]
for i in range(0, len(yo2), 16):
line = ['%07d' % int(oct(i)[2:])]
line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
out.append(" ".join(line))
return out
_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',
'001', '043', '105', '147', '211', '253', '315', '357']
_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020']
_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000']
_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1]
_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',
'031', '300', '000', '000']
_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',
'031', '300', '000', '000', '000', '000', '000', '000']
_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',
'242', '240', '000', '000', '000', '000']
_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
'000', '000', '000', '000', '000', '000', '000', '000']
_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
_IBM_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] +
['000'] * 8)
_IBM_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] +
['000'] * 8)
def long_double_representation(lines):
"""Given a binary dump as given by GNU od -b, look for long double
representation."""
# Read contains a list of 32 items, each item is a byte (in octal
# representation, as a string). We 'slide' over the output until read is of
# the form before_seq + content + after_sequence, where content is the long double
# representation:
# - content is 12 bytes: 80 bits Intel representation
# - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision
# - content is 8 bytes: same as double (not implemented yet)
read = [''] * 32
saw = None
for line in lines:
# we skip the first word, as od -b output an index at the beginning of
# each line
for w in line.split()[1:]:
read.pop(0)
read.append(w)
# If the end of read is equal to the after_sequence, read contains
# the long double
if read[-8:] == _AFTER_SEQ:
saw = copy.copy(read)
# if the content was 12 bytes, we only have 32 - 8 - 12 = 12
# "before" bytes. In other words the first 4 "before" bytes went
# past the sliding window.
if read[:12] == _BEFORE_SEQ[4:]:
if read[12:-8] == _INTEL_EXTENDED_12B:
return 'INTEL_EXTENDED_12_BYTES_LE'
if read[12:-8] == _MOTOROLA_EXTENDED_12B:
return 'MOTOROLA_EXTENDED_12_BYTES_BE'
# if the content was 16 bytes, we are left with 32-8-16 = 16
# "before" bytes, so 8 went past the sliding window.
elif read[:8] == _BEFORE_SEQ[8:]:
if read[8:-8] == _INTEL_EXTENDED_16B:
return 'INTEL_EXTENDED_16_BYTES_LE'
elif read[8:-8] == _IEEE_QUAD_PREC_BE:
return 'IEEE_QUAD_BE'
elif read[8:-8] == _IEEE_QUAD_PREC_LE:
return 'IEEE_QUAD_LE'
elif read[8:-8] == _IBM_DOUBLE_DOUBLE_LE:
return 'IBM_DOUBLE_DOUBLE_LE'
elif read[8:-8] == _IBM_DOUBLE_DOUBLE_BE:
return 'IBM_DOUBLE_DOUBLE_BE'
# if the content was 8 bytes, left with 32-8-8 = 16 bytes
elif read[:16] == _BEFORE_SEQ:
if read[16:-8] == _IEEE_DOUBLE_LE:
return 'IEEE_DOUBLE_LE'
elif read[16:-8] == _IEEE_DOUBLE_BE:
return 'IEEE_DOUBLE_BE'
if saw is not None:
raise ValueError("Unrecognized format (%s)" % saw)
else:
# We never detected the after_sequence
raise ValueError("Could not lock sequences (%s)" % saw)
def check_for_right_shift_internal_compiler_error(cmd):
"""
On our arm CI, this fails with an internal compilation error
The failure looks like the following, and can be reproduced on ARM64 GCC 5.4:
<source>: In function 'right_shift':
<source>:4:20: internal compiler error: in expand_shift_1, at expmed.c:2349
ip1[i] = ip1[i] >> in2;
^
Please submit a full bug report,
with preprocessed source if appropriate.
See <http://gcc.gnu.org/bugs.html> for instructions.
Compiler returned: 1
This function returns True if this compiler bug is present, and we need to
turn off optimization for the function
"""
cmd._check_compiler()
has_optimize = cmd.try_compile(textwrap.dedent("""\
__attribute__((optimize("O3"))) void right_shift() {}
"""), None, None)
if not has_optimize:
return False
no_err = cmd.try_compile(textwrap.dedent("""\
typedef long the_type; /* fails also for unsigned and long long */
__attribute__((optimize("O3"))) void right_shift(the_type in2, the_type *ip1, int n) {
for (int i = 0; i < n; i++) {
if (in2 < (the_type)sizeof(the_type) * 8) {
ip1[i] = ip1[i] >> in2;
}
}
}
"""), None, None)
return not no_err
| 20,393 | Python | 41.844538 | 107 | 0.544402 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/generate_numpy_api.py | import os
import genapi
from genapi import \
TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
import numpy_api
# use annotated api when running under cpychecker
h_template = r"""
#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
typedef struct {
PyObject_HEAD
npy_bool obval;
} PyBoolScalarObject;
extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
%s
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
#endif
#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
extern void **PyArray_API;
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
void **PyArray_API;
#else
static void **PyArray_API=NULL;
#endif
#endif
%s
#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
static int
_import_array(void)
{
int st;
PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
PyObject *c_api = NULL;
if (numpy == NULL) {
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
Py_DECREF(numpy);
if (c_api == NULL) {
PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
return -1;
}
if (!PyCapsule_CheckExact(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
Py_DECREF(c_api);
if (PyArray_API == NULL) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
return -1;
}
/* Perform runtime check of C API version */
if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"ABI version 0x%%x but this version of numpy is 0x%%x", \
(int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
return -1;
}
if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"API version 0x%%x but this version of numpy is 0x%%x", \
(int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
return -1;
}
/*
* Perform runtime check of endianness and check it matches the one set by
* the headers (npy_endian.h) as a safeguard
*/
st = PyArray_GetEndianness();
if (st == NPY_CPU_UNKNOWN_ENDIAN) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian");
return -1;
}
#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
if (st != NPY_CPU_BIG) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"big endian, but detected different endianness at runtime");
return -1;
}
#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
if (st != NPY_CPU_LITTLE) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"little endian, but detected different endianness at runtime");
return -1;
}
#endif
return 0;
}
#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NULL; } }
#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
#endif
#endif
"""
c_template = r"""
/* These pointers will be stored in the C-object for use in other
extension modules
*/
void *PyArray_API[] = {
%s
};
"""
c_api_header = """
===========
NumPy C-API
===========
"""
def generate_api(output_dir, force=False):
basename = 'multiarray_api'
h_file = os.path.join(output_dir, '__%s.h' % basename)
c_file = os.path.join(output_dir, '__%s.c' % basename)
d_file = os.path.join(output_dir, '%s.txt' % basename)
targets = (h_file, c_file, d_file)
sources = numpy_api.multiarray_api
if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):
return targets
else:
do_generate_api(targets, sources)
return targets
def do_generate_api(targets, sources):
header_file = targets[0]
c_file = targets[1]
doc_file = targets[2]
global_vars = sources[0]
scalar_bool_values = sources[1]
types_api = sources[2]
multiarray_funcs = sources[3]
multiarray_api = sources[:]
module_list = []
extension_list = []
init_list = []
# Check multiarray api indexes
multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
genapi.check_api_dict(multiarray_api_index)
numpyapi_list = genapi.get_api_functions('NUMPY_API',
multiarray_funcs)
# Create dict name -> *Api instance
api_name = 'PyArray_API'
multiarray_api_dict = {}
for f in numpyapi_list:
name = f.name
index = multiarray_funcs[name][0]
annotations = multiarray_funcs[name][1:]
multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations,
f.return_type,
f.args, api_name)
for name, val in global_vars.items():
index, type = val
multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
for name, val in scalar_bool_values.items():
index = val[0]
multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
for name, val in types_api.items():
index = val[0]
internal_type = None if len(val) == 1 else val[1]
multiarray_api_dict[name] = TypeApi(
name, index, 'PyTypeObject', api_name, internal_type)
if len(multiarray_api_dict) != len(multiarray_api_index):
keys_dict = set(multiarray_api_dict.keys())
keys_index = set(multiarray_api_index.keys())
raise AssertionError(
"Multiarray API size mismatch - "
"index has extra keys {}, dict has extra keys {}"
.format(keys_index - keys_dict, keys_dict - keys_index)
)
extension_list = []
for name, index in genapi.order_dict(multiarray_api_index):
api_item = multiarray_api_dict[name]
extension_list.append(api_item.define_from_array_api_string())
init_list.append(api_item.array_api_define())
module_list.append(api_item.internal_define())
# Write to header
s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
genapi.write_file(header_file, s)
# Write to c-code
s = c_template % ',\n'.join(init_list)
genapi.write_file(c_file, s)
# write to documentation
s = c_api_header
for func in numpyapi_list:
s += func.to_ReST()
s += '\n\n'
genapi.write_file(doc_file, s)
return targets
| 7,007 | Python | 28.56962 | 162 | 0.632082 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/records.py | """
Record Arrays
=============
Record arrays expose the fields of structured arrays as properties.
Most commonly, ndarrays contain elements of a single type, e.g. floats,
integers, bools etc. However, it is possible for elements to be combinations
of these using structured types, such as::
>>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', np.int64), ('y', np.float64)])
>>> a
array([(1, 2.), (1, 2.)], dtype=[('x', '<i8'), ('y', '<f8')])
Here, each element consists of two fields: x (and int), and y (a float).
This is known as a structured array. The different fields are analogous
to columns in a spread-sheet. The different fields can be accessed as
one would a dictionary::
>>> a['x']
array([1, 1])
>>> a['y']
array([2., 2.])
Record arrays allow us to access fields as properties::
>>> ar = np.rec.array(a)
>>> ar.x
array([1, 1])
>>> ar.y
array([2., 2.])
"""
import warnings
from collections import Counter
from contextlib import nullcontext
from . import numeric as sb
from . import numerictypes as nt
from numpy.compat import os_fspath
from numpy.core.overrides import set_module
from .arrayprint import _get_legacy_print_mode
# All of the functions allow formats to be a dtype
__all__ = [
'record', 'recarray', 'format_parser',
'fromarrays', 'fromrecords', 'fromstring', 'fromfile', 'array',
]
ndarray = sb.ndarray
_byteorderconv = {'b':'>',
'l':'<',
'n':'=',
'B':'>',
'L':'<',
'N':'=',
'S':'s',
's':'s',
'>':'>',
'<':'<',
'=':'=',
'|':'|',
'I':'|',
'i':'|'}
# formats regular expression
# allows multidimensional spec with a tuple syntax in front
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
# are equally allowed
numfmt = nt.sctypeDict
def find_duplicate(list):
"""Find duplication in a list, return a list of duplicated elements"""
return [
item
for item, counts in Counter(list).items()
if counts > 1
]
@set_module('numpy')
class format_parser:
"""
Class to convert formats, names, titles description to a dtype.
After constructing the format_parser object, the dtype attribute is
the converted data-type:
``dtype = format_parser(formats, names, titles).dtype``
Attributes
----------
dtype : dtype
The converted data-type.
Parameters
----------
formats : str or list of str
The format description, either specified as a string with
comma-separated format descriptions in the form ``'f8, i4, a5'``, or
a list of format description strings in the form
``['f8', 'i4', 'a5']``.
names : str or list/tuple of str
The field names, either specified as a comma-separated string in the
form ``'col1, col2, col3'``, or as a list or tuple of strings in the
form ``['col1', 'col2', 'col3']``.
An empty list can be used, in that case default field names
('f0', 'f1', ...) are used.
titles : sequence
Sequence of title strings. An empty list can be used to leave titles
out.
aligned : bool, optional
If True, align the fields by padding as the C-compiler would.
Default is False.
byteorder : str, optional
If specified, all the fields will be changed to the
provided byte-order. Otherwise, the default byte-order is
used. For all available string specifiers, see `dtype.newbyteorder`.
See Also
--------
dtype, typename, sctype2char
Examples
--------
>>> np.format_parser(['<f8', '<i4', '<a5'], ['col1', 'col2', 'col3'],
... ['T1', 'T2', 'T3']).dtype
dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'), (('T3', 'col3'), 'S5')])
`names` and/or `titles` can be empty lists. If `titles` is an empty list,
titles will simply not appear. If `names` is empty, default field names
will be used.
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... []).dtype
dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '<S5')])
>>> np.format_parser(['<f8', '<i4', '<a5'], [], []).dtype
dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', 'S5')])
"""
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
self._parseFormats(formats, aligned)
self._setfieldnames(names, titles)
self._createdtype(byteorder)
def _parseFormats(self, formats, aligned=False):
""" Parse the field formats """
if formats is None:
raise ValueError("Need formats argument")
if isinstance(formats, list):
dtype = sb.dtype(
[('f{}'.format(i), format_) for i, format_ in enumerate(formats)],
aligned,
)
else:
dtype = sb.dtype(formats, aligned)
fields = dtype.fields
if fields is None:
dtype = sb.dtype([('f1', dtype)], aligned)
fields = dtype.fields
keys = dtype.names
self._f_formats = [fields[key][0] for key in keys]
self._offsets = [fields[key][1] for key in keys]
self._nfields = len(keys)
def _setfieldnames(self, names, titles):
"""convert input field names into a list and assign to the _names
attribute """
if names:
if type(names) in [list, tuple]:
pass
elif isinstance(names, str):
names = names.split(',')
else:
raise NameError("illegal input names %s" % repr(names))
self._names = [n.strip() for n in names[:self._nfields]]
else:
self._names = []
# if the names are not specified, they will be assigned as
# "f0, f1, f2,..."
# if not enough names are specified, they will be assigned as "f[n],
# f[n+1],..." etc. where n is the number of specified names..."
self._names += ['f%d' % i for i in range(len(self._names),
self._nfields)]
# check for redundant names
_dup = find_duplicate(self._names)
if _dup:
raise ValueError("Duplicate field names: %s" % _dup)
if titles:
self._titles = [n.strip() for n in titles[:self._nfields]]
else:
self._titles = []
titles = []
if self._nfields > len(titles):
self._titles += [None] * (self._nfields - len(titles))
def _createdtype(self, byteorder):
dtype = sb.dtype({
'names': self._names,
'formats': self._f_formats,
'offsets': self._offsets,
'titles': self._titles,
})
if byteorder is not None:
byteorder = _byteorderconv[byteorder[0]]
dtype = dtype.newbyteorder(byteorder)
self.dtype = dtype
class record(nt.void):
"""A data-type scalar that allows field access as attribute lookup.
"""
# manually set name and module so that this class's type shows up
# as numpy.record when printed
__name__ = 'record'
__module__ = 'numpy'
def __repr__(self):
if _get_legacy_print_mode() <= 113:
return self.__str__()
return super().__repr__()
def __str__(self):
if _get_legacy_print_mode() <= 113:
return str(self.item())
return super().__str__()
def __getattribute__(self, attr):
if attr in ('setfield', 'getfield', 'dtype'):
return nt.void.__getattribute__(self, attr)
try:
return nt.void.__getattribute__(self, attr)
except AttributeError:
pass
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
obj = self.getfield(*res[:2])
# if it has fields return a record,
# otherwise return the object
try:
dt = obj.dtype
except AttributeError:
#happens if field is Object type
return obj
if dt.names is not None:
return obj.view((self.__class__, obj.dtype))
return obj
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __setattr__(self, attr, val):
if attr in ('setfield', 'getfield', 'dtype'):
raise AttributeError("Cannot set '%s' attribute" % attr)
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
return self.setfield(val, *res[:2])
else:
if getattr(self, attr, None):
return nt.void.__setattr__(self, attr, val)
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __getitem__(self, indx):
obj = nt.void.__getitem__(self, indx)
# copy behavior of record.__getattribute__,
if isinstance(obj, nt.void) and obj.dtype.names is not None:
return obj.view((self.__class__, obj.dtype))
else:
# return a single element
return obj
def pprint(self):
"""Pretty-print all fields."""
# pretty-print all fields
names = self.dtype.names
maxlen = max(len(name) for name in names)
fmt = '%% %ds: %%s' % maxlen
rows = [fmt % (name, getattr(self, name)) for name in names]
return "\n".join(rows)
# The recarray is almost identical to a standard array (which supports
# named fields already) The biggest difference is that it can use
# attribute-lookup to find the fields and it is constructed using
# a record.
# If byteorder is given it forces a particular byteorder on all
# the fields (and any subfields)
class recarray(ndarray):
"""Construct an ndarray that allows field access using attributes.
Arrays may have a data-types containing fields, analogous
to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,
where each entry in the array is a pair of ``(int, float)``. Normally,
these attributes are accessed using dictionary lookups such as ``arr['x']``
and ``arr['y']``. Record arrays allow the fields to be accessed as members
of the array, using ``arr.x`` and ``arr.y``.
Parameters
----------
shape : tuple
Shape of output array.
dtype : data-type, optional
The desired data-type. By default, the data-type is determined
from `formats`, `names`, `titles`, `aligned` and `byteorder`.
formats : list of data-types, optional
A list containing the data-types for the different columns, e.g.
``['i4', 'f8', 'i4']``. `formats` does *not* support the new
convention of using types directly, i.e. ``(int, float, int)``.
Note that `formats` must be a list, not a tuple.
Given that `formats` is somewhat limited, we recommend specifying
`dtype` instead.
names : tuple of str, optional
The name of each column, e.g. ``('x', 'y', 'z')``.
buf : buffer, optional
By default, a new array is created of the given shape and data-type.
If `buf` is specified and is an object exposing the buffer interface,
the array will use the memory from the existing buffer. In this case,
the `offset` and `strides` keywords are available.
Other Parameters
----------------
titles : tuple of str, optional
Aliases for column names. For example, if `names` were
``('x', 'y', 'z')`` and `titles` is
``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
byteorder : {'<', '>', '='}, optional
Byte-order for all fields.
aligned : bool, optional
Align the fields in memory as the C-compiler would.
strides : tuple of ints, optional
Buffer (`buf`) is interpreted according to these strides (strides
define how many bytes each array element, row, column, etc.
occupy in memory).
offset : int, optional
Start reading buffer (`buf`) from this offset onwards.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Returns
-------
rec : recarray
Empty array of the given shape and type.
See Also
--------
core.records.fromrecords : Construct a record array from data.
record : fundamental data-type for `recarray`.
format_parser : determine a data-type from formats, names, titles.
Notes
-----
This constructor can be compared to ``empty``: it creates a new record
array but does not fill it with data. To create a record array from data,
use one of the following methods:
1. Create a standard ndarray and convert it to a record array,
using ``arr.view(np.recarray)``
2. Use the `buf` keyword.
3. Use `np.rec.fromrecords`.
Examples
--------
Create an array with two fields, ``x`` and ``y``:
>>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '<f8'), ('y', '<i8')])
>>> x
array([(1., 2), (3., 4)], dtype=[('x', '<f8'), ('y', '<i8')])
>>> x['x']
array([1., 3.])
View the array as a record array:
>>> x = x.view(np.recarray)
>>> x.x
array([1., 3.])
>>> x.y
array([2, 4])
Create a new, empty record array:
>>> np.recarray((2,),
... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
(3471280, 1.2134086255804012e-316, 0)],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])
"""
# manually set name and module so that this class's type shows
# up as "numpy.recarray" when printed
__name__ = 'recarray'
__module__ = 'numpy'
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False, order='C'):
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder).dtype
if buf is None:
self = ndarray.__new__(subtype, shape, (record, descr), order=order)
else:
self = ndarray.__new__(subtype, shape, (record, descr),
buffer=buf, offset=offset,
strides=strides, order=order)
return self
def __array_finalize__(self, obj):
if self.dtype.type is not record and self.dtype.names is not None:
# if self.dtype is not np.record, invoke __setattr__ which will
# convert it to a record if it is a void dtype.
self.dtype = self.dtype
def __getattribute__(self, attr):
# See if ndarray has this attr, and return it if so. (note that this
# means a field with the same name as an ndarray attr cannot be
# accessed by attribute).
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
# look for a field with this name
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError) as e:
raise AttributeError("recarray has no attribute %s" % attr) from e
obj = self.getfield(*res)
# At this point obj will always be a recarray, since (see
# PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is
# non-structured, convert it to an ndarray. Then if obj is structured
# with void type convert it to the same dtype.type (eg to preserve
# numpy.record type if present), since nested structured fields do not
# inherit type. Don't do this for non-void structures though.
if obj.dtype.names is not None:
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
else:
return obj.view(ndarray)
# Save the dictionary.
# If the attr is a field name and not in the saved dictionary
# Undo any "setting" of the attribute and do a setfield
# Thus, you can't create attributes on-the-fly that are field names.
def __setattr__(self, attr, val):
# Automatically convert (void) structured types to records
# (but not non-void structures, subarrays, or non-structured voids)
if attr == 'dtype' and issubclass(val.type, nt.void) and val.names is not None:
val = sb.dtype((record, val))
newattr = attr not in self.__dict__
try:
ret = object.__setattr__(self, attr, val)
except Exception:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
raise
else:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
return ret
if newattr:
# We just added this one or this setattr worked on an
# internal attribute.
try:
object.__delattr__(self, attr)
except Exception:
return ret
try:
res = fielddict[attr][:2]
except (TypeError, KeyError) as e:
raise AttributeError(
"record array has no attribute %s" % attr
) from e
return self.setfield(val, *res)
def __getitem__(self, indx):
obj = super().__getitem__(indx)
# copy behavior of getattr, except that here
# we might also be returning a single element
if isinstance(obj, ndarray):
if obj.dtype.names is not None:
obj = obj.view(type(self))
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
else:
return obj.view(type=ndarray)
else:
# return a single element
return obj
def __repr__(self):
repr_dtype = self.dtype
if self.dtype.type is record or not issubclass(self.dtype.type, nt.void):
# If this is a full record array (has numpy.record dtype),
# or if it has a scalar (non-void) dtype with no records,
# represent it using the rec.array function. Since rec.array
# converts dtype to a numpy.record for us, convert back
# to non-record before printing
if repr_dtype.type is record:
repr_dtype = sb.dtype((nt.void, repr_dtype))
prefix = "rec.array("
fmt = 'rec.array(%s,%sdtype=%s)'
else:
# otherwise represent it using np.array plus a view
# This should only happen if the user is playing
# strange games with dtypes.
prefix = "array("
fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)'
# get data/shape string. logic taken from numeric.array_repr
if self.size > 0 or self.shape == (0,):
lst = sb.array2string(
self, separator=', ', prefix=prefix, suffix=',')
else:
# show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(self.shape),)
lf = '\n'+' '*len(prefix)
if _get_legacy_print_mode() <= 113:
lf = ' ' + lf # trailing space
return fmt % (lst, lf, repr_dtype)
def field(self, attr, val=None):
if isinstance(attr, int):
names = ndarray.__getattribute__(self, 'dtype').names
attr = names[attr]
fielddict = ndarray.__getattribute__(self, 'dtype').fields
res = fielddict[attr][:2]
if val is None:
obj = self.getfield(*res)
if obj.dtype.names is not None:
return obj
return obj.view(ndarray)
else:
return self.setfield(val, *res)
def _deprecate_shape_0_as_None(shape):
if shape == 0:
warnings.warn(
"Passing `shape=0` to have the shape be inferred is deprecated, "
"and in future will be equivalent to `shape=(0,)`. To infer "
"the shape and suppress this warning, pass `shape=None` instead.",
FutureWarning, stacklevel=3)
return None
else:
return shape
@set_module("numpy.rec")
def fromarrays(arrayList, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create a record array from a (flat) list of arrays
Parameters
----------
arrayList : list or tuple
List of array-like objects (such as lists, tuples,
and ndarrays).
dtype : data-type, optional
valid dtype for all arrays
shape : int or tuple of ints, optional
Shape of the resulting array. If not provided, inferred from
``arrayList[0]``.
formats, names, titles, aligned, byteorder :
If `dtype` is ``None``, these arguments are passed to
`numpy.format_parser` to construct a dtype. See that function for
detailed documentation.
Returns
-------
np.recarray
Record array consisting of given arrayList columns.
Examples
--------
>>> x1=np.array([1,2,3,4])
>>> x2=np.array(['a','dd','xyz','12'])
>>> x3=np.array([1.1,2,3,4])
>>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
>>> print(r[1])
(2, 'dd', 2.0) # may vary
>>> x1[1]=34
>>> r.a
array([1, 2, 3, 4])
>>> x1 = np.array([1, 2, 3, 4])
>>> x2 = np.array(['a', 'dd', 'xyz', '12'])
>>> x3 = np.array([1.1, 2, 3,4])
>>> r = np.core.records.fromarrays(
... [x1, x2, x3],
... dtype=np.dtype([('a', np.int32), ('b', 'S3'), ('c', np.float32)]))
>>> r
rec.array([(1, b'a', 1.1), (2, b'dd', 2. ), (3, b'xyz', 3. ),
(4, b'12', 4. )],
dtype=[('a', '<i4'), ('b', 'S3'), ('c', '<f4')])
"""
arrayList = [sb.asarray(x) for x in arrayList]
# NumPy 1.19.0, 2020-01-01
shape = _deprecate_shape_0_as_None(shape)
if shape is None:
shape = arrayList[0].shape
elif isinstance(shape, int):
shape = (shape,)
if formats is None and dtype is None:
# go through each object in the list to see if it is an ndarray
# and determine the formats.
formats = [obj.dtype for obj in arrayList]
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder).dtype
_names = descr.names
# Determine shape from data-type.
if len(descr) != len(arrayList):
raise ValueError("mismatch between the number of fields "
"and the number of arrays")
d0 = descr[0].shape
nn = len(d0)
if nn > 0:
shape = shape[:-nn]
_array = recarray(shape, descr)
# populate the record array (makes a copy)
for k, obj in enumerate(arrayList):
nn = descr[k].ndim
testshape = obj.shape[:obj.ndim - nn]
name = _names[k]
if testshape != shape:
raise ValueError(f'array-shape mismatch in array {k} ("{name}")')
_array[name] = obj
return _array
@set_module("numpy.rec")
def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None):
"""Create a recarray from a list of records in text form.
Parameters
----------
recList : sequence
data in the same field may be heterogeneous - they will be promoted
to the highest data type.
dtype : data-type, optional
valid dtype for all arrays
shape : int or tuple of ints, optional
shape of each array.
formats, names, titles, aligned, byteorder :
If `dtype` is ``None``, these arguments are passed to
`numpy.format_parser` to construct a dtype. See that function for
detailed documentation.
If both `formats` and `dtype` are None, then this will auto-detect
formats. Use list of tuples rather than list of lists for faster
processing.
Returns
-------
np.recarray
record array consisting of given recList rows.
Examples
--------
>>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
... names='col1,col2,col3')
>>> print(r[0])
(456, 'dbe', 1.2)
>>> r.col1
array([456, 2])
>>> r.col2
array(['dbe', 'de'], dtype='<U3')
>>> import pickle
>>> pickle.loads(pickle.dumps(r))
rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)],
dtype=[('col1', '<i8'), ('col2', '<U3'), ('col3', '<f8')])
"""
if formats is None and dtype is None: # slower
obj = sb.array(recList, dtype=object)
arrlist = [sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])]
return fromarrays(arrlist, formats=formats, shape=shape, names=names,
titles=titles, aligned=aligned, byteorder=byteorder)
if dtype is not None:
descr = sb.dtype((record, dtype))
else:
descr = format_parser(formats, names, titles, aligned, byteorder).dtype
try:
retval = sb.array(recList, dtype=descr)
except (TypeError, ValueError):
# NumPy 1.19.0, 2020-01-01
shape = _deprecate_shape_0_as_None(shape)
if shape is None:
shape = len(recList)
if isinstance(shape, int):
shape = (shape,)
if len(shape) > 1:
raise ValueError("Can only deal with 1-d array.")
_array = recarray(shape, descr)
for k in range(_array.size):
_array[k] = tuple(recList[k])
# list of lists instead of list of tuples ?
# 2018-02-07, 1.14.1
warnings.warn(
"fromrecords expected a list of tuples, may have received a list "
"of lists instead. In the future that will raise an error",
FutureWarning, stacklevel=2)
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(recarray)
return res
@set_module("numpy.rec")
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
r"""Create a record array from binary data
Note that despite the name of this function it does not accept `str`
instances.
Parameters
----------
datastring : bytes-like
Buffer of binary data
dtype : data-type, optional
Valid dtype for all arrays
shape : int or tuple of ints, optional
Shape of each array.
offset : int, optional
Position in the buffer to start reading from.
formats, names, titles, aligned, byteorder :
If `dtype` is ``None``, these arguments are passed to
`numpy.format_parser` to construct a dtype. See that function for
detailed documentation.
Returns
-------
np.recarray
Record array view into the data in datastring. This will be readonly
if `datastring` is readonly.
See Also
--------
numpy.frombuffer
Examples
--------
>>> a = b'\x01\x02\x03abc'
>>> np.core.records.fromstring(a, dtype='u1,u1,u1,S3')
rec.array([(1, 2, 3, b'abc')],
dtype=[('f0', 'u1'), ('f1', 'u1'), ('f2', 'u1'), ('f3', 'S3')])
>>> grades_dtype = [('Name', (np.str_, 10)), ('Marks', np.float64),
... ('GradeLevel', np.int32)]
>>> grades_array = np.array([('Sam', 33.3, 3), ('Mike', 44.4, 5),
... ('Aadi', 66.6, 6)], dtype=grades_dtype)
>>> np.core.records.fromstring(grades_array.tobytes(), dtype=grades_dtype)
rec.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), ('Aadi', 66.6, 6)],
dtype=[('Name', '<U10'), ('Marks', '<f8'), ('GradeLevel', '<i4')])
>>> s = '\x01\x02\x03abc'
>>> np.core.records.fromstring(s, dtype='u1,u1,u1,S3')
Traceback (most recent call last)
...
TypeError: a bytes-like object is required, not 'str'
"""
if dtype is None and formats is None:
raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder).dtype
itemsize = descr.itemsize
# NumPy 1.19.0, 2020-01-01
shape = _deprecate_shape_0_as_None(shape)
if shape in (None, -1):
shape = (len(datastring) - offset) // itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
def get_remaining_size(fd):
pos = fd.tell()
try:
fd.seek(0, 2)
return fd.tell() - pos
finally:
fd.seek(pos, 0)
@set_module("numpy.rec")
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
Parameters
----------
fd : str or file type
If file is a string or a path-like object then that file is opened,
else it is assumed to be a file object. The file object must
support random access (i.e. it must have tell and seek methods).
dtype : data-type, optional
valid dtype for all arrays
shape : int or tuple of ints, optional
shape of each array.
offset : int, optional
Position in the file to start reading from.
formats, names, titles, aligned, byteorder :
If `dtype` is ``None``, these arguments are passed to
`numpy.format_parser` to construct a dtype. See that function for
detailed documentation
Returns
-------
np.recarray
record array consisting of data enclosed in file.
Examples
--------
>>> from tempfile import TemporaryFile
>>> a = np.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>>
>>> fd=TemporaryFile()
>>> a = a.newbyteorder('<')
>>> a.tofile(fd)
>>>
>>> _ = fd.seek(0)
>>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
... byteorder='<')
>>> print(r[5])
(0.5, 10, 'abcde')
>>> r.shape
(10,)
"""
if dtype is None and formats is None:
raise TypeError("fromfile() needs a 'dtype' or 'formats' argument")
# NumPy 1.19.0, 2020-01-01
shape = _deprecate_shape_0_as_None(shape)
if shape is None:
shape = (-1,)
elif isinstance(shape, int):
shape = (shape,)
if hasattr(fd, 'readinto'):
# GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase interface.
# Example of fd: gzip, BytesIO, BufferedReader
# file already opened
ctx = nullcontext(fd)
else:
# open file
ctx = open(os_fspath(fd), 'rb')
with ctx as fd:
if offset > 0:
fd.seek(offset, 1)
size = get_remaining_size(fd)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder).dtype
itemsize = descr.itemsize
shapeprod = sb.array(shape).prod(dtype=nt.intp)
shapesize = shapeprod * itemsize
if shapesize < 0:
shape = list(shape)
shape[shape.index(-1)] = size // -shapesize
shape = tuple(shape)
shapeprod = sb.array(shape).prod(dtype=nt.intp)
nbytes = shapeprod * itemsize
if nbytes > size:
raise ValueError(
"Not enough bytes left in file for specified shape and type")
# create the array
_array = recarray(shape, descr)
nbytesread = fd.readinto(_array.data)
if nbytesread != nbytes:
raise OSError("Didn't read as many bytes as expected")
return _array
@set_module("numpy.rec")
def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None, copy=True):
"""
Construct a record array from a wide-variety of objects.
A general-purpose record array constructor that dispatches to the
appropriate `recarray` creation function based on the inputs (see Notes).
Parameters
----------
obj : any
Input object. See Notes for details on how various input types are
treated.
dtype : data-type, optional
Valid dtype for array.
shape : int or tuple of ints, optional
Shape of each array.
offset : int, optional
Position in the file or buffer to start reading from.
strides : tuple of ints, optional
Buffer (`buf`) is interpreted according to these strides (strides
define how many bytes each array element, row, column, etc.
occupy in memory).
formats, names, titles, aligned, byteorder :
If `dtype` is ``None``, these arguments are passed to
`numpy.format_parser` to construct a dtype. See that function for
detailed documentation.
copy : bool, optional
Whether to copy the input object (True), or to use a reference instead.
This option only applies when the input is an ndarray or recarray.
Defaults to True.
Returns
-------
np.recarray
Record array created from the specified object.
Notes
-----
If `obj` is ``None``, then call the `~numpy.recarray` constructor. If
`obj` is a string, then call the `fromstring` constructor. If `obj` is a
list or a tuple, then if the first object is an `~numpy.ndarray`, call
`fromarrays`, otherwise call `fromrecords`. If `obj` is a
`~numpy.recarray`, then make a copy of the data in the recarray
(if ``copy=True``) and use the new formats, names, and titles. If `obj`
is a file, then call `fromfile`. Finally, if obj is an `ndarray`, then
return ``obj.view(recarray)``, making a copy of the data if ``copy=True``.
Examples
--------
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.core.records.array(a)
rec.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
dtype=int32)
>>> b = [(1, 1), (2, 4), (3, 9)]
>>> c = np.core.records.array(b, formats = ['i2', 'f2'], names = ('x', 'y'))
>>> c
rec.array([(1, 1.0), (2, 4.0), (3, 9.0)],
dtype=[('x', '<i2'), ('y', '<f2')])
>>> c.x
rec.array([1, 2, 3], dtype=int16)
>>> c.y
rec.array([ 1.0, 4.0, 9.0], dtype=float16)
>>> r = np.rec.array(['abc','def'], names=['col1','col2'])
>>> print(r.col1)
abc
>>> r.col1
array('abc', dtype='<U3')
>>> r.col2
array('def', dtype='<U3')
"""
if ((isinstance(obj, (type(None), str)) or hasattr(obj, 'readinto')) and
formats is None and dtype is None):
raise ValueError("Must define formats (or dtype) if object is "
"None, string, or an open file")
kwds = {}
if dtype is not None:
dtype = sb.dtype(dtype)
elif formats is not None:
dtype = format_parser(formats, names, titles,
aligned, byteorder).dtype
else:
kwds = {'formats': formats,
'names': names,
'titles': titles,
'aligned': aligned,
'byteorder': byteorder
}
if obj is None:
if shape is None:
raise ValueError("Must define a shape if obj is None")
return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
elif isinstance(obj, bytes):
return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
elif isinstance(obj, (list, tuple)):
if isinstance(obj[0], (tuple, list)):
return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
else:
return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
elif isinstance(obj, recarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new
elif hasattr(obj, 'readinto'):
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
elif isinstance(obj, ndarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new.view(recarray)
else:
interface = getattr(obj, "__array_interface__", None)
if interface is None or not isinstance(interface, dict):
raise ValueError("Unknown input type")
obj = sb.array(obj)
if dtype is not None and (obj.dtype != dtype):
obj = obj.view(dtype)
return obj.view(recarray)
| 37,545 | Python | 33.132727 | 87 | 0.562285 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/_internal.py | """
A place for internal code
Some things are more easily handled Python.
"""
import ast
import re
import sys
import platform
import warnings
from .multiarray import dtype, array, ndarray, promote_types
try:
import ctypes
except ImportError:
ctypes = None
IS_PYPY = platform.python_implementation() == 'PyPy'
if sys.byteorder == 'little':
_nbo = '<'
else:
_nbo = '>'
def _makenames_list(adict, align):
allfields = []
for fname, obj in adict.items():
n = len(obj)
if not isinstance(obj, tuple) or n not in (2, 3):
raise ValueError("entry not a 2- or 3- tuple")
if n > 2 and obj[2] == fname:
continue
num = int(obj[1])
if num < 0:
raise ValueError("invalid offset.")
format = dtype(obj[0], align=align)
if n > 2:
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(key=lambda x: x[2])
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict, align)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if len(res) > 2:
titles.append(res[2])
else:
titles.append(None)
return dtype({"names": names,
"formats": formats,
"offsets": offsets,
"titles": titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
if new:
return (descriptor.str, new)
else:
return descriptor.str
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('', f'|V{num}'))
offset += num
elif field[1] < offset:
raise ValueError(
"dtype.descr is not defined for types with overlapping or "
"out-of-order fields")
if len(field) > 3:
name = (field[2], field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
if descriptor.itemsize > offset:
num = descriptor.itemsize - offset
result.append(('', f'|V{num}'))
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibility.
def _reconstruct(subtype, shape, dtype):
return ndarray.__new__(subtype, shape, dtype)
# format_re was originally from numarray by J. Todd Miller
format_re = re.compile(r'(?P<order1>[<>|=]?)'
r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
r'(?P<order2>[<>|=]?)'
r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
sep_re = re.compile(r'\s*,\s*')
space_re = re.compile(r'\s+$')
# astr is a string (perhaps comma separated)
_convorder = {'=': _nbo}
def _commastring(astr):
startindex = 0
result = []
while startindex < len(astr):
mo = format_re.match(astr, pos=startindex)
try:
(order1, repeats, order2, dtype) = mo.groups()
except (TypeError, AttributeError):
raise ValueError(
f'format number {len(result)+1} of "{astr}" is not recognized'
) from None
startindex = mo.end()
# Separator or ending padding
if startindex < len(astr):
if space_re.match(astr, pos=startindex):
startindex = len(astr)
else:
mo = sep_re.match(astr, pos=startindex)
if not mo:
raise ValueError(
'format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
if order2 == '':
order = order1
elif order1 == '':
order = order2
else:
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
raise ValueError(
'inconsistent byte-order specification %s and %s' %
(order1, order2))
order = order1
if order in ('|', '=', _nbo):
order = ''
dtype = order + dtype
if (repeats == ''):
newitem = dtype
else:
newitem = (dtype, ast.literal_eval(repeats))
result.append(newitem)
return result
class dummy_ctype:
def __init__(self, cls):
self._cls = cls
def __mul__(self, other):
return self
def __call__(self, *other):
return self._cls(other)
def __eq__(self, other):
return self._cls == other._cls
def __ne__(self, other):
return self._cls != other._cls
def _getintp_ctype():
val = _getintp_ctype.cache
if val is not None:
return val
if ctypes is None:
import numpy as np
val = dummy_ctype(np.intp)
else:
char = dtype('p').char
if char == 'i':
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes:
def cast(self, num, obj):
return num.value
class c_void_p:
def __init__(self, ptr):
self.value = ptr
class _ctypes:
def __init__(self, array, ptr=None):
self._arr = array
if ctypes:
self._ctypes = ctypes
self._data = self._ctypes.c_void_p(ptr)
else:
# fake a pointer-like object that holds onto the reference
self._ctypes = _missing_ctypes()
self._data = self._ctypes.c_void_p(ptr)
self._data._objects = array
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
"""
Return the data pointer cast to a particular c-types object.
For example, calling ``self._as_parameter_`` is equivalent to
``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
``self.data_as(ctypes.POINTER(ctypes.c_double))``.
The returned pointer will keep a reference to the array.
"""
# _ctypes.cast function causes a circular reference of self._data in
# self._data._objects. Attributes of self._data cannot be released
# until gc.collect is called. Make a copy of the pointer first then let
# it hold the array reference. This is a workaround to circumvent the
# CPython bug https://bugs.python.org/issue12836
ptr = self._ctypes.cast(self._data, obj)
ptr._arr = self._arr
return ptr
def shape_as(self, obj):
"""
Return the shape tuple as an array of some other c-types
type. For example: ``self.shape_as(ctypes.c_short)``.
"""
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
"""
Return the strides tuple as an array of some other
c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
"""
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
@property
def data(self):
"""
A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
byte-order. The memory area may not even be writeable. The array
flags and data-type of this array should be respected when passing this
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as ``self._array_interface_['data'][0]``.
Note that unlike ``data_as``, a reference will not be kept to the array:
code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
pointer to a deallocated array, and should be spelt
``(a + b).ctypes.data_as(ctypes.c_void_p)``
"""
return self._data.value
@property
def shape(self):
"""
(c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to ``dtype('p')`` on this
platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
`ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
the platform. The ctypes array contains the shape of
the underlying array.
"""
return self.shape_as(_getintp_ctype())
@property
def strides(self):
"""
(c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
"""
return self.strides_as(_getintp_ctype())
@property
def _as_parameter_(self):
"""
Overrides the ctypes semi-magic method
Enables `c_func(some_array.ctypes)`
"""
return self.data_as(ctypes.c_void_p)
# Numpy 1.21.0, 2021-05-18
def get_data(self):
"""Deprecated getter for the `_ctypes.data` property.
.. deprecated:: 1.21
"""
warnings.warn('"get_data" is deprecated. Use "data" instead',
DeprecationWarning, stacklevel=2)
return self.data
def get_shape(self):
"""Deprecated getter for the `_ctypes.shape` property.
.. deprecated:: 1.21
"""
warnings.warn('"get_shape" is deprecated. Use "shape" instead',
DeprecationWarning, stacklevel=2)
return self.shape
def get_strides(self):
"""Deprecated getter for the `_ctypes.strides` property.
.. deprecated:: 1.21
"""
warnings.warn('"get_strides" is deprecated. Use "strides" instead',
DeprecationWarning, stacklevel=2)
return self.strides
def get_as_parameter(self):
"""Deprecated getter for the `_ctypes._as_parameter_` property.
.. deprecated:: 1.21
"""
warnings.warn(
'"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
DeprecationWarning, stacklevel=2,
)
return self._as_parameter_
def _newnames(datatype, order):
"""
Given a datatype and an order object, return a new names tuple, with the
order indicated
"""
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
seen = set()
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
if name in seen:
raise ValueError(f"duplicate field name: {name}") from None
else:
raise ValueError(f"unknown field name: {name}") from None
seen.add(name)
return tuple(list(order) + nameslist)
raise ValueError(f"unsupported order value: {order}")
def _copy_fields(ary):
"""Return copy of structured array with padding between fields removed.
Parameters
----------
ary : ndarray
Structured array from which to remove padding bytes
Returns
-------
ary_copy : ndarray
Copy of ary with padding bytes removed
"""
dt = ary.dtype
copy_dtype = {'names': dt.names,
'formats': [dt.fields[name][0] for name in dt.names]}
return array(ary, dtype=copy_dtype, copy=True)
def _promote_fields(dt1, dt2):
""" Perform type promotion for two structured dtypes.
Parameters
----------
dt1 : structured dtype
First dtype.
dt2 : structured dtype
Second dtype.
Returns
-------
out : dtype
The promoted dtype
Notes
-----
If one of the inputs is aligned, the result will be. The titles of
both descriptors must match (point to the same field).
"""
# Both must be structured and have the same names in the same order
if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
raise TypeError("invalid type promotion")
# if both are identical, we can (maybe!) just return the same dtype.
identical = dt1 is dt2
new_fields = []
for name in dt1.names:
field1 = dt1.fields[name]
field2 = dt2.fields[name]
new_descr = promote_types(field1[0], field2[0])
identical = identical and new_descr is field1[0]
# Check that the titles match (if given):
if field1[2:] != field2[2:]:
raise TypeError("invalid type promotion")
if len(field1) == 2:
new_fields.append((name, new_descr))
else:
new_fields.append(((field1[2], name), new_descr))
res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
# Might as well preserve identity (and metadata) if the dtype is identical
# and the itemsize, offsets are also unmodified. This could probably be
# sped up, but also probably just be removed entirely.
if identical and res.itemsize == dt1.itemsize:
for name in dt1.names:
if dt1.fields[name][1] != res.fields[name][1]:
return res # the dtype changed.
return dt1
return res
def _getfield_is_safe(oldtype, newtype, offset):
""" Checks safety of getfield for object arrays.
As in _view_is_safe, we need to check that memory containing objects is not
reinterpreted as a non-object datatype and vice versa.
Parameters
----------
oldtype : data-type
Data type of the original ndarray.
newtype : data-type
Data type of the field being accessed by ndarray.getfield
offset : int
Offset of the field being accessed by ndarray.getfield
Raises
------
TypeError
If the field access is invalid
"""
if newtype.hasobject or oldtype.hasobject:
if offset == 0 and newtype == oldtype:
return
if oldtype.names is not None:
for name in oldtype.names:
if (oldtype.fields[name][1] == offset and
oldtype.fields[name][0] == newtype):
return
raise TypeError("Cannot get/set field of an object array")
return
def _view_is_safe(oldtype, newtype):
""" Checks safety of a view involving object arrays, for example when
doing::
np.zeros(10, dtype=oldtype).view(newtype)
Parameters
----------
oldtype : data-type
Data type of original ndarray
newtype : data-type
Data type of the view
Raises
------
TypeError
If the new type is incompatible with the old type.
"""
# if the types are equivalent, there is no problem.
# for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
if oldtype == newtype:
return
if newtype.hasobject or oldtype.hasobject:
raise TypeError("Cannot change data-type for object array.")
return
# Given a string containing a PEP 3118 format specifier,
# construct a NumPy dtype
_pep3118_native_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'h',
'H': 'H',
'i': 'i',
'I': 'I',
'l': 'l',
'L': 'L',
'q': 'q',
'Q': 'Q',
'e': 'e',
'f': 'f',
'd': 'd',
'g': 'g',
'Zf': 'F',
'Zd': 'D',
'Zg': 'G',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
_pep3118_standard_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
'e': 'f2',
'f': 'f',
'd': 'd',
'Zf': 'F',
'Zd': 'D',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
_pep3118_unsupported_map = {
'u': 'UCS-2 strings',
'&': 'pointers',
't': 'bitfields',
'X': 'function pointers',
}
class _Stream:
def __init__(self, s):
self.s = s
self.byteorder = '@'
def advance(self, n):
res = self.s[:n]
self.s = self.s[n:]
return res
def consume(self, c):
if self.s[:len(c)] == c:
self.advance(len(c))
return True
return False
def consume_until(self, c):
if callable(c):
i = 0
while i < len(self.s) and not c(self.s[i]):
i = i + 1
return self.advance(i)
else:
i = self.s.index(c)
res = self.advance(i)
self.advance(len(c))
return res
@property
def next(self):
return self.s[0]
def __bool__(self):
return bool(self.s)
def _dtype_from_pep3118(spec):
stream = _Stream(spec)
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
return dtype
def __dtype_from_pep3118(stream, is_subdtype):
field_spec = dict(
names=[],
formats=[],
offsets=[],
itemsize=0
)
offset = 0
common_alignment = 1
is_padding = False
# Parse spec
while stream:
value = None
# End of structure, bail out to upper level
if stream.consume('}'):
break
# Sub-arrays (1)
shape = None
if stream.consume('('):
shape = stream.consume_until(')')
shape = tuple(map(int, shape.split(',')))
# Byte order
if stream.next in ('@', '=', '<', '>', '^', '!'):
byteorder = stream.advance(1)
if byteorder == '!':
byteorder = '>'
stream.byteorder = byteorder
# Byte order characters also control native vs. standard type sizes
if stream.byteorder in ('@', '^'):
type_map = _pep3118_native_map
type_map_chars = _pep3118_native_typechars
else:
type_map = _pep3118_standard_map
type_map_chars = _pep3118_standard_typechars
# Item sizes
itemsize_str = stream.consume_until(lambda c: not c.isdigit())
if itemsize_str:
itemsize = int(itemsize_str)
else:
itemsize = 1
# Data types
is_padding = False
if stream.consume('T{'):
value, align = __dtype_from_pep3118(
stream, is_subdtype=True)
elif stream.next in type_map_chars:
if stream.next == 'Z':
typechar = stream.advance(2)
else:
typechar = stream.advance(1)
is_padding = (typechar == 'x')
dtypechar = type_map[typechar]
if dtypechar in 'USV':
dtypechar += '%d' % itemsize
itemsize = 1
numpy_byteorder = {'@': '=', '^': '='}.get(
stream.byteorder, stream.byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
elif stream.next in _pep3118_unsupported_map:
desc = _pep3118_unsupported_map[stream.next]
raise NotImplementedError(
"Unrepresentable PEP 3118 data type {!r} ({})"
.format(stream.next, desc))
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
#
# Native alignment may require padding
#
# Here we assume that the presence of a '@' character implicitly implies
# that the start of the array is *already* aligned.
#
extra_offset = 0
if stream.byteorder == '@':
start_padding = (-offset) % align
intra_padding = (-value.itemsize) % align
offset += start_padding
if intra_padding != 0:
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
# Inject internal padding to the end of the sub-item
value = _add_trailing_padding(value, intra_padding)
else:
# We can postpone the injection of internal padding,
# as the item appears at most once
extra_offset += intra_padding
# Update common alignment
common_alignment = _lcm(align, common_alignment)
# Convert itemsize to sub-array
if itemsize != 1:
value = dtype((value, (itemsize,)))
# Sub-arrays (2)
if shape is not None:
value = dtype((value, shape))
# Field name
if stream.consume(':'):
name = stream.consume_until(':')
else:
name = None
if not (is_padding and name is None):
if name is not None and name in field_spec['names']:
raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format")
field_spec['names'].append(name)
field_spec['formats'].append(value)
field_spec['offsets'].append(offset)
offset += value.itemsize
offset += extra_offset
field_spec['itemsize'] = offset
# extra final padding for aligned types
if stream.byteorder == '@':
field_spec['itemsize'] += (-offset) % common_alignment
# Check if this was a simple 1-item type, and unwrap it
if (field_spec['names'] == [None]
and field_spec['offsets'][0] == 0
and field_spec['itemsize'] == field_spec['formats'][0].itemsize
and not is_subdtype):
ret = field_spec['formats'][0]
else:
_fix_names(field_spec)
ret = dtype(field_spec)
# Finished
return ret, common_alignment
def _fix_names(field_spec):
""" Replace names which are None with the next unused f%d name """
names = field_spec['names']
for i, name in enumerate(names):
if name is not None:
continue
j = 0
while True:
name = f'f{j}'
if name not in names:
break
j = j + 1
names[i] = name
def _add_trailing_padding(value, padding):
"""Inject the specified number of padding bytes at the end of a dtype"""
if value.fields is None:
field_spec = dict(
names=['f0'],
formats=[value],
offsets=[0],
itemsize=value.itemsize
)
else:
fields = value.fields
names = value.names
field_spec = dict(
names=names,
formats=[fields[name][0] for name in names],
offsets=[fields[name][1] for name in names],
itemsize=value.itemsize
)
field_spec['itemsize'] += padding
return dtype(field_spec)
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a % b
return a
def _lcm(a, b):
return a // _gcd(a, b) * b
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
""" Format the error message for when __array_ufunc__ gives up. """
args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
['{}={!r}'.format(k, v)
for k, v in kwargs.items()])
args = inputs + kwargs.get('out', ())
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
return ('operand type(s) all returned NotImplemented from '
'__array_ufunc__({!r}, {!r}, {}): {}'
.format(ufunc, method, args_string, types_string))
def array_function_errmsg_formatter(public_api, types):
""" Format the error message for when __array_ufunc__ gives up. """
func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
return ("no implementation found for '{}' on types that implement "
'__array_function__: {}'.format(func_name, list(types)))
def _ufunc_doc_signature_formatter(ufunc):
"""
Builds a signature string which resembles PEP 457
This is used to construct the first line of the docstring
"""
# input arguments are simple
if ufunc.nin == 1:
in_args = 'x'
else:
in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
# output arguments are both keyword or positional
if ufunc.nout == 0:
out_args = ', /, out=()'
elif ufunc.nout == 1:
out_args = ', /, out=None'
else:
out_args = '[, {positional}], / [, out={default}]'.format(
positional=', '.join(
'out{}'.format(i+1) for i in range(ufunc.nout)),
default=repr((None,)*ufunc.nout)
)
# keyword only args depend on whether this is a gufunc
kwargs = (
", casting='same_kind'"
", order='K'"
", dtype=None"
", subok=True"
)
# NOTE: gufuncs may or may not support the `axis` parameter
if ufunc.signature is None:
kwargs = f", where=True{kwargs}[, signature, extobj]"
else:
kwargs += "[, signature, extobj, axes, axis]"
# join all the parts together
return '{name}({in_args}{out_args}, *{kwargs})'.format(
name=ufunc.__name__,
in_args=in_args,
out_args=out_args,
kwargs=kwargs
)
def npy_ctypes_check(cls):
# determine if a class comes from ctypes, in order to work around
# a bug in the buffer protocol for those objects, bpo-10746
try:
# ctypes class are new-style, so have an __mro__. This probably fails
# for ctypes classes with multiple inheritance.
if IS_PYPY:
# (..., _ctypes.basics._CData, Bufferable, object)
ctype_base = cls.__mro__[-3]
else:
# # (..., _ctypes._CData, object)
ctype_base = cls.__mro__[-2]
# right now, they're part of the _ctypes module
return '_ctypes' in ctype_base.__module__
except Exception:
return False
| 28,220 | Python | 29.215203 | 86 | 0.548689 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/umath_tests.py | """
Shim for _umath_tests to allow a deprecation period for the new name.
"""
import warnings
# 2018-04-04, numpy 1.15.0
warnings.warn(("numpy.core.umath_tests is an internal NumPy "
"module and should not be imported. It will "
"be removed in a future NumPy release."),
category=DeprecationWarning, stacklevel=2)
from ._umath_tests import *
| 389 | Python | 26.857141 | 69 | 0.655527 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/umath.py | """
Create the numpy.core.umath namespace for backward compatibility. In v1.16
the multiarray and umath c-extension modules were merged into a single
_multiarray_umath extension module. So we replicate the old namespace
by importing from the extension module.
"""
from . import _multiarray_umath
from ._multiarray_umath import * # noqa: F403
# These imports are needed for backward compatibility,
# do not change them. issue gh-11862
# _ones_like is semi-public, on purpose not added to __all__
from ._multiarray_umath import _UFUNC_API, _add_newdoc_ufunc, _ones_like
__all__ = [
'_UFUNC_API', 'ERR_CALL', 'ERR_DEFAULT', 'ERR_IGNORE', 'ERR_LOG',
'ERR_PRINT', 'ERR_RAISE', 'ERR_WARN', 'FLOATING_POINT_SUPPORT',
'FPE_DIVIDEBYZERO', 'FPE_INVALID', 'FPE_OVERFLOW', 'FPE_UNDERFLOW', 'NAN',
'NINF', 'NZERO', 'PINF', 'PZERO', 'SHIFT_DIVIDEBYZERO', 'SHIFT_INVALID',
'SHIFT_OVERFLOW', 'SHIFT_UNDERFLOW', 'UFUNC_BUFSIZE_DEFAULT',
'UFUNC_PYVALS_NAME', '_add_newdoc_ufunc', 'absolute', 'add',
'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
'bitwise_and', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj',
'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', 'divide',
'divmod', 'e', 'equal', 'euler_gamma', 'exp', 'exp2', 'expm1', 'fabs',
'floor', 'floor_divide', 'float_power', 'fmax', 'fmin', 'fmod', 'frexp',
'frompyfunc', 'gcd', 'geterrobj', 'greater', 'greater_equal', 'heaviside',
'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp',
'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2',
'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
'logical_xor', 'maximum', 'minimum', 'mod', 'modf', 'multiply', 'negative',
'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians',
'reciprocal', 'remainder', 'right_shift', 'rint', 'seterrobj', 'sign',
'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan',
'tanh', 'true_divide', 'trunc']
| 2,040 | Python | 54.162161 | 79 | 0.637255 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/overrides.py | """Implementation of __array_function__ overrides from NEP-18."""
import collections
import functools
import os
from numpy.core._multiarray_umath import (
add_docstring, implement_array_function, _get_implementing_args)
from numpy.compat._inspect import getargspec
ARRAY_FUNCTION_ENABLED = bool(
int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1)))
array_function_like_doc = (
"""like : array_like, optional
Reference object to allow the creation of arrays which are not
NumPy arrays. If an array-like passed in as ``like`` supports
the ``__array_function__`` protocol, the result will be defined
by it. In this case, it ensures the creation of an array object
compatible with that passed in via this argument."""
)
def set_array_function_like_doc(public_api):
if public_api.__doc__ is not None:
public_api.__doc__ = public_api.__doc__.replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
)
return public_api
add_docstring(
implement_array_function,
"""
Implement a function with checks for __array_function__ overrides.
All arguments are required, and can only be passed by position.
Parameters
----------
implementation : function
Function that implements the operation on NumPy array without
overrides when called like ``implementation(*args, **kwargs)``.
public_api : function
Function exposed by NumPy's public API originally called like
``public_api(*args, **kwargs)`` on which arguments are now being
checked.
relevant_args : iterable
Iterable of arguments to check for __array_function__ methods.
args : tuple
Arbitrary positional arguments originally passed into ``public_api``.
kwargs : dict
Arbitrary keyword arguments originally passed into ``public_api``.
Returns
-------
Result from calling ``implementation()`` or an ``__array_function__``
method, as appropriate.
Raises
------
TypeError : if no implementation is found.
""")
# exposed for testing purposes; used internally by implement_array_function
add_docstring(
_get_implementing_args,
"""
Collect arguments on which to call __array_function__.
Parameters
----------
relevant_args : iterable of array-like
Iterable of possibly array-like arguments to check for
__array_function__ methods.
Returns
-------
Sequence of arguments with __array_function__ methods, in the order in
which they should be called.
""")
ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
def verify_matching_signatures(implementation, dispatcher):
"""Verify that a dispatcher function has the right signature."""
implementation_spec = ArgSpec(*getargspec(implementation))
dispatcher_spec = ArgSpec(*getargspec(dispatcher))
if (implementation_spec.args != dispatcher_spec.args or
implementation_spec.varargs != dispatcher_spec.varargs or
implementation_spec.keywords != dispatcher_spec.keywords or
(bool(implementation_spec.defaults) !=
bool(dispatcher_spec.defaults)) or
(implementation_spec.defaults is not None and
len(implementation_spec.defaults) !=
len(dispatcher_spec.defaults))):
raise RuntimeError('implementation and dispatcher for %s have '
'different function signatures' % implementation)
if implementation_spec.defaults is not None:
if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):
raise RuntimeError('dispatcher functions can only use None for '
'default argument values')
def set_module(module):
"""Decorator for overriding __module__ on a function or class.
Example usage::
@set_module('numpy')
def example():
pass
assert example.__module__ == 'numpy'
"""
def decorator(func):
if module is not None:
func.__module__ = module
return func
return decorator
def array_function_dispatch(dispatcher, module=None, verify=True,
docs_from_dispatcher=False):
"""Decorator for adding dispatch with the __array_function__ protocol.
See NEP-18 for example usage.
Parameters
----------
dispatcher : callable
Function that when called like ``dispatcher(*args, **kwargs)`` with
arguments from the NumPy function call returns an iterable of
array-like arguments to check for ``__array_function__``.
module : str, optional
__module__ attribute to set on new function, e.g., ``module='numpy'``.
By default, module is copied from the decorated function.
verify : bool, optional
If True, verify the that the signature of the dispatcher and decorated
function signatures match exactly: all required and optional arguments
should appear in order with the same names, but the default values for
all optional arguments should be ``None``. Only disable verification
if the dispatcher's signature needs to deviate for some particular
reason, e.g., because the function has a signature like
``func(*args, **kwargs)``.
docs_from_dispatcher : bool, optional
If True, copy docs from the dispatcher function onto the dispatched
function, rather than from the implementation. This is useful for
functions defined in C, which otherwise don't have docstrings.
Returns
-------
Function suitable for decorating the implementation of a NumPy function.
"""
if not ARRAY_FUNCTION_ENABLED:
def decorator(implementation):
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
if module is not None:
implementation.__module__ = module
return implementation
return decorator
def decorator(implementation):
if verify:
verify_matching_signatures(implementation, dispatcher)
if docs_from_dispatcher:
add_docstring(implementation, dispatcher.__doc__)
@functools.wraps(implementation)
def public_api(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
return implement_array_function(
implementation, public_api, relevant_args, args, kwargs)
public_api.__code__ = public_api.__code__.replace(
co_name=implementation.__name__,
co_filename='<__array_function__ internals>')
if module is not None:
public_api.__module__ = module
public_api._implementation = implementation
return public_api
return decorator
def array_function_from_dispatcher(
implementation, module=None, verify=True, docs_from_dispatcher=True):
"""Like array_function_dispatcher, but with function arguments flipped."""
def decorator(dispatcher):
return array_function_dispatch(
dispatcher, module, verify=verify,
docs_from_dispatcher=docs_from_dispatcher)(implementation)
return decorator
| 7,297 | Python | 34.6 | 79 | 0.651638 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/_ufunc_config.py | """
Functions for changing global ufunc configuration
This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj`
"""
import collections.abc
import contextlib
from .overrides import set_module
from .umath import (
UFUNC_BUFSIZE_DEFAULT,
ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, ERR_DEFAULT,
SHIFT_DIVIDEBYZERO, SHIFT_OVERFLOW, SHIFT_UNDERFLOW, SHIFT_INVALID,
)
from . import umath
__all__ = [
"seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
"errstate",
]
_errdict = {"ignore": ERR_IGNORE,
"warn": ERR_WARN,
"raise": ERR_RAISE,
"call": ERR_CALL,
"print": ERR_PRINT,
"log": ERR_LOG}
_errdict_rev = {value: key for key, value in _errdict.items()}
@set_module('numpy')
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall, errstate
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]_:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] https://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
>>> np.seterr(**old_settings) # reset to default
{'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
>>> np.int16(32000) * np.int16(3)
30464
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None:
divide = all or old['divide']
if over is None:
over = all or old['over']
if under is None:
under = all or old['under']
if invalid is None:
invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
@set_module('numpy')
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr()
{'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
>>> np.arange(3.) / np.arange(3.)
array([nan, 1., 1.])
>>> oldsettings = np.seterr(all='warn', over='raise')
>>> np.geterr()
{'divide': 'warn', 'over': 'raise', 'under': 'warn', 'invalid': 'warn'}
>>> np.arange(3.) / np.arange(3.)
array([nan, 1., 1.])
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
@set_module('numpy')
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
Parameters
----------
size : int
Size of buffer.
"""
if size > 10e6:
raise ValueError("Buffer size, %s, is too big." % size)
if size < 5:
raise ValueError("Buffer size, %s, is too small." % size)
if size % 16 != 0:
raise ValueError("Buffer size, %s, is not a multiple of 16." % size)
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
@set_module('numpy')
def getbufsize():
"""
Return the size of the buffer used in ufuncs.
Returns
-------
getbufsize : int
Size of ufunc buffer in bytes.
"""
return umath.geterrobj()[0]
@set_module('numpy')
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is a string describing
the type of error (such as "divide by zero", "overflow", "underflow",
or "invalid value"), and the second is the status flag. The flag is a
byte, whose four least-significant bits indicate the type of error, one
of "divide", "over", "under", "invalid"::
[0 0 0 0 divide over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> saved_handler = np.seterrcall(err_handler)
>>> save_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([inf, inf, inf])
>>> np.seterrcall(saved_handler)
<function err_handler at 0x...>
>>> np.seterr(**save_err)
{'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'}
Log error message:
>>> class Log:
... def write(self, msg):
... print("LOG: %s" % msg)
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in divide
array([inf, inf, inf])
>>> np.seterrcall(saved_handler)
<numpy.core.numeric.Log object at 0x...>
>>> np.seterr(**save_err)
{'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'}
"""
if func is not None and not isinstance(func, collections.abc.Callable):
if (not hasattr(func, 'write') or
not isinstance(func.write, collections.abc.Callable)):
raise ValueError("Only callable can be used as callback")
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
@set_module('numpy')
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall() # we did not yet set a handler, returns None
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([inf, inf, inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
"""
return umath.geterrobj()[2]
class _unspecified:
pass
_Unspecified = _unspecified()
@set_module('numpy')
class errstate(contextlib.ContextDecorator):
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
.. versionchanged:: 1.17.0
`errstate` is also usable as a function decorator, saving
a level of indentation if an entire function is wrapped.
See :py:class:`contextlib.ContextDecorator` for more information.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([nan, inf, inf])
>>> with np.errstate(divide='warn'):
... np.arange(3) / 0.
array([nan, inf, inf])
>>> np.sqrt(-1)
nan
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
"""
def __init__(self, *, call=_Unspecified, **kwargs):
self.call = call
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
umath.seterrobj(defval)
# set the default values
_setdef()
| 13,382 | Python | 28.939597 | 82 | 0.607831 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/fromnumeric.py | """Module containing non-deprecated functions borrowed from Numeric.
"""
import functools
import types
import warnings
import numpy as np
from . import multiarray as mu
from . import overrides
from . import umath as um
from . import numerictypes as nt
from .multiarray import asarray, array, asanyarray, concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
_gentype = types.GeneratorType
# save away Python sum
_sum_ = sum
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
bound = getattr(obj, method, None)
if bound is None:
return _wrapit(obj, method, *args, **kwds)
try:
return bound(*args, **kwds)
except TypeError:
# A TypeError occurs if the object does have such a method in its
# class, but its signature is not identical to that of NumPy's. This
# situation has occurred in the case of a downstream library like
# 'pandas'.
#
# Call _wrapit from within the except clause to ensure a potential
# exception has a traceback chain.
return _wrapit(obj, method, *args, **kwds)
def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
passkwargs = {k: v for k, v in kwargs.items()
if v is not np._NoValue}
if type(obj) is not mu.ndarray:
try:
reduction = getattr(obj, method)
except AttributeError:
pass
else:
# This branch is needed for reductions like any which don't
# support a dtype.
if dtype is not None:
return reduction(axis=axis, dtype=dtype, out=out, **passkwargs)
else:
return reduction(axis=axis, out=out, **passkwargs)
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
def _take_dispatcher(a, indices, axis=None, out=None, mode=None):
return (a, out)
@array_function_dispatch(_take_dispatcher)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : array_like (Ni..., M, Nk...)
The source array.
indices : array_like (Nj...)
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional (Ni..., Nj..., Nk...)
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype. Note that `out` is always
buffered if `mode='raise'`; use other modes for better performance.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray (Ni..., Nj..., Nk...)
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
take_along_axis : Take elements by matching the array and the index arrays
Notes
-----
By eliminating the inner loop in the description above, and using `s_` to
build simple slice objects, `take` can be expressed in terms of applying
fancy indexing to each 1-d slice::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
for ii in ndindex(Ni):
for kk in ndindex(Nj):
out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices]
For this reason, it is equivalent to (but faster than) the following use
of `apply_along_axis`::
out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a)
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
def _reshape_dispatcher(a, newshape, order=None):
return (a,)
# not deprecated --- copy if necessary, view otherwise
@array_function_dispatch(_reshape_dispatcher)
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to read / write the
elements using Fortran-like index order, with the first index
changing fastest, and the last index changing slowest. Note that
the 'C' and 'F' options take no account of the memory layout of
the underlying array, and only refer to the order of indexing.
'A' means to read / write the elements in Fortran-like index
order if `a` is Fortran *contiguous* in memory, C-like order
otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raised when the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose makes the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
Traceback (most recent call last):
...
AttributeError: Incompatible shape for in-place modification. Use
`.reshape()` to make a copy with the desired shape.
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
return _wrapfunc(a, 'reshape', newshape, order=order)
def _choose_dispatcher(a, choices, out=None, mode=None):
yield a
yield from choices
yield out
@array_function_dispatch(_choose_dispatcher)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a list of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of ``n`` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each ``i``. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode='raise'`` (the default), then, first of all, each element of
``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose
that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)``
position in ``Ba`` - then the value at the same position in the new array
is the value in ``Bchoices[i]`` at that same position;
* if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed)
integer; negative integers are mapped to 0; values greater than ``n-1``
are mapped to ``n-1``; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in ``[0, n-1]``, where ``n`` is the
number of choices, unless ``mode=wrap`` or ``mode=clip``, in which
cases any integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype. Note that `out` is always
buffered if ``mode='raise'``; use other modes for better performance.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside ``[0, n-1]`` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod ``n``
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
numpy.take_along_axis : Preferable if `choices` is an array
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
def _repeat_dispatcher(a, repeats, axis=None):
return (a,)
@array_function_dispatch(_repeat_dispatcher)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
unique : Find the unique elements of an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _wrapfunc(a, 'repeat', repeats, axis=axis)
def _put_dispatcher(a, ind, v, mode=None):
return (a, ind, v)
@array_function_dispatch(_put_dispatcher)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers. In 'raise' mode,
if an exception occurs the target array may still be modified.
See Also
--------
putmask, place
put_along_axis : Put elements by matching the array and the index arrays
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
try:
put = a.put
except AttributeError as e:
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(a).__name__)) from e
return put(ind, v, mode=mode)
def _swapaxes_dispatcher(a, axis1, axis2):
return (a,)
@array_function_dispatch(_swapaxes_dispatcher)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return _wrapfunc(a, 'swapaxes', axis1, axis2)
def _transpose_dispatcher(a, axes=None):
return (a,)
@array_function_dispatch(_transpose_dispatcher)
def transpose(a, axes=None):
"""
Reverse or permute the axes of an array; returns the modified array.
For an array a with two axes, transpose(a) gives the matrix transpose.
Refer to `numpy.ndarray.transpose` for full documentation.
Parameters
----------
a : array_like
Input array.
axes : tuple or list of ints, optional
If specified, it must be a tuple or list which contains a permutation of
[0,1,..,N-1] where N is the number of axes of a. The i'th axis of the
returned array will correspond to the axis numbered ``axes[i]`` of the
input. If not specified, defaults to ``range(a.ndim)[::-1]``, which
reverses the order of the axes.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
ndarray.transpose : Equivalent method
moveaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
>>> x = np.ones((2, 3, 4, 5))
>>> np.transpose(x).shape
(5, 4, 3, 2)
"""
return _wrapfunc(a, 'transpose', axes)
def _partition_dispatcher(a, kth, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_partition_dispatcher)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a
way that the value of the element in k-th position is in the
position it would be in a sorted array. All elements smaller than
the k-th element are moved before this element and all equal or
greater are moved behind it. The ordering of the elements in the two
partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The k-th value of the element
will be in its final sorted position and all smaller elements
will be moved before it and all equal or greater elements behind
it. The order of all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
.. deprecated:: 1.22.0
Passing booleans as index is deprecated.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string. Not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average
speed, worst case performance, work space size, and whether they are
stable. A stable sort keeps items with the same key in the same
relative order. The available algorithms have the following
properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently,
partitioning along the last axis is faster and uses less space than
partitioning along any other axis.
The sort order for complex numbers is lexicographic. If both the
real and imaginary parts are non-nan then the order is determined by
the real parts except when they are equal, in which case the order
is determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_argpartition_dispatcher)
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
algorithm specified by the `kind` keyword. It returns an array of
indices of the same shape as `a` that index data along the given
axis in partitioned order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The k-th element will be in its
final sorted position and all smaller elements will be moved
before it and all larger elements behind it. The order all
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
.. deprecated:: 1.22.0
Passing booleans as index is deprecated.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string, and not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
More generally, ``np.take_along_axis(a, index_array, axis)``
always yields the partitioned `a`, irrespective of dimensionality.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort.
take_along_axis : Apply ``index_array`` from argpartition
to an array as if by calling partition.
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
Multi-dimensional array:
>>> x = np.array([[3, 4, 2], [1, 3, 1]])
>>> index_array = np.argpartition(x, kth=1, axis=-1)
>>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1)
array([[2, 3, 4],
[1, 1, 3]])
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
def _sort_dispatcher(a, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_sort_dispatcher)
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort or radix sort under the covers and, in general,
the actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The four algorithms implemented in NumPy have the following
properties:
=========== ======= ============= ============ ========
kind speed worst case work space stable
=========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
'heapsort' 3 O(n*log(n)) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'timsort' 2 O(n*log(n)) ~n/2 yes
=========== ======= ============= ============ ========
.. note:: The datatype determines which of 'mergesort' or 'timsort'
is actually used, even if 'mergesort' is specified. User selection
at a finer scale is not currently available.
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
When sorting does not make enough progress it switches to
`heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
This implementation makes quicksort O(n*log(n)) in the worst case.
'stable' automatically chooses the best stable sorting algorithm
for the data type being sorted.
It, along with 'mergesort' is currently mapped to
`timsort <https://en.wikipedia.org/wiki/Timsort>`_
or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
depending on the data type.
API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
.. versionadded:: 1.17.0
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
default sort if none is chosen. For timsort details, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
.. versionchanged:: 1.18.0
NaT now sorts to the end of arrays for consistency with NaN.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
else:
a = asanyarray(a).copy(order="K")
a.sort(axis=axis, kind=kind, order=order)
return a
def _argsort_dispatcher(a, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_argsort_dispatcher)
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
take_along_axis : Apply ``index_array`` from argsort
to an array as if by calling sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def _argmax_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue):
return (a, out)
@array_function_dispatch(_argmax_dispatcher)
def argmax(a, axis=None, out=None, *, keepdims=np._NoValue):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
.. versionadded:: 1.22.0
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed. If `keepdims` is set to True,
then the size of `axis` will be 1 with the resulting array having same
shape as `a.shape`.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmax to an array as if by calling max.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
Indexes of the maximal elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
>>> ind
(1, 2)
>>> a[ind]
15
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmax(x, axis=-1)
>>> # Same as np.amax(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[4],
[3]])
>>> # Same as np.amax(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([4, 3])
Setting `keepdims` to `True`,
>>> x = np.arange(24).reshape((2, 3, 4))
>>> res = np.argmax(x, axis=1, keepdims=True)
>>> res.shape
(2, 1, 4)
"""
kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {}
return _wrapfunc(a, 'argmax', axis=axis, out=out, **kwds)
def _argmin_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue):
return (a, out)
@array_function_dispatch(_argmin_dispatcher)
def argmin(a, axis=None, out=None, *, keepdims=np._NoValue):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
.. versionadded:: 1.22.0
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed. If `keepdims` is set to True,
then the size of `axis` will be 1 with the resulting array having same
shape as `a.shape`.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmin to an array as if by calling min.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
Indices of the minimum elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
>>> ind
(0, 0)
>>> a[ind]
10
>>> b = np.arange(6) + 10
>>> b[4] = 10
>>> b
array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmin(x, axis=-1)
>>> # Same as np.amin(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[2],
[0]])
>>> # Same as np.amax(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([2, 0])
Setting `keepdims` to `True`,
>>> x = np.arange(24).reshape((2, 3, 4))
>>> res = np.argmin(x, axis=1, keepdims=True)
>>> res.shape
(2, 1, 4)
"""
kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {}
return _wrapfunc(a, 'argmin', axis=axis, out=out, **kwds)
def _searchsorted_dispatcher(a, v, side=None, sorter=None):
return (a, v, sorter)
@array_function_dispatch(_searchsorted_dispatcher)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``a[i-1] < v <= a[i]``
right ``a[i-1] <= v < a[i]``
====== ============================
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : int or array of ints
Array of insertion points with the same shape as `v`,
or an integer if `v` is a scalar.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
This function uses the same algorithm as the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
def _resize_dispatcher(a, new_shape):
return (a,)
@array_function_dispatch(_resize_dispatcher)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated iterating over the array in C-order.
See Also
--------
numpy.reshape : Reshape an array without changing the total size.
numpy.pad : Enlarge and pad an array.
numpy.repeat : Repeat elements of an array.
ndarray.resize : resize an array in-place.
Notes
-----
When the total size of the array does not change `~numpy.reshape` should
be used. In most other cases either indexing (to reduce the size)
or padding (to increase the size) may be a more appropriate solution.
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, iterating
over `a` in C-order, disregarding axes (and cycling back from the start if
the new shape is larger). This functionality is therefore not suitable to
resize images, or data where each axis represents a separate and distinct
entity.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(2,3))
array([[0, 1, 2],
[3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
new_size = 1
for dim_length in new_shape:
new_size *= dim_length
if dim_length < 0:
raise ValueError('all elements of `new_shape` must be non-negative')
if a.size == 0 or new_size == 0:
# First case must zero fill. The second would have repeats == 0.
return np.zeros_like(a, shape=new_shape)
repeats = -(-new_size // a.size) # ceil division
a = concatenate((a,) * repeats)[:new_size]
return reshape(a, new_shape)
def _squeeze_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_squeeze_dispatcher)
def squeeze(a, axis=None):
"""
Remove axes of length one from `a`.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the entries of length one in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`. Note that if all axes are squeezed,
the result is a 0d array and not a scalar.
Raises
------
ValueError
If `axis` is not None, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding entries of length one
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
>>> x = np.array([[1234]])
>>> x.shape
(1, 1)
>>> np.squeeze(x)
array(1234) # 0d array
>>> np.squeeze(x).shape
()
>>> np.squeeze(x)[()]
1234
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze', axis=axis)
if axis is None:
return squeeze()
else:
return squeeze(axis=axis)
def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None):
return (a,)
@array_function_dispatch(_diagonal_dispatcher)
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
Starting in NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In some future release, it will return a read/write view and writing to
the returned array will alter your original array. The returned array
will have the same type as the input array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
of just ``np.diagonal(a)``. This will work with both past and future
versions of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D, then a 1-D array containing the diagonal and of the
same type as `a` is returned unless `a` is a `matrix`, in which case
a 1-D array rather than a (2-D) `matrix` is returned in order to
maintain backward compatibility.
If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
are removed, and a new axis inserted at the end corresponding to the
diagonal.
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
The anti-diagonal can be obtained by reversing the order of elements
using either `numpy.flipud` or `numpy.fliplr`.
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.fliplr(a).diagonal() # Horizontal flip
array([2, 4, 6])
>>> np.flipud(a).diagonal() # Vertical flip
array([6, 4, 2])
Note that the order in which the diagonal is retrieved varies depending
on the flip function.
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
else:
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
def _trace_dispatcher(
a, offset=None, axis1=None, axis2=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_trace_dispatcher)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
else:
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
def _ravel_dispatcher(a, order=None):
return (a,)
@array_function_dispatch(_ravel_dispatcher)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
As of NumPy 1.10, the returned array will have the same type as the input
array. (for example, a masked array will be returned for a masked array
input)
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means
to index the elements in row-major, C-style order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to index the elements
in column-major, Fortran-style order, with the
first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of
the memory layout of the underlying array, and only refer to
the order of axis indexing. 'A' means to read the elements in
Fortran-like index order if `a` is Fortran *contiguous* in
memory, C-like order otherwise. 'K' means to read the
elements in the order they occur in memory, except for
reversing the data when strides are negative. By default, 'C'
index order is used.
Returns
-------
y : array_like
y is an array of the same subtype as `a`, with shape ``(a.size,)``.
Note that matrices are special cased for backward compatibility, if `a`
is a matrix, then y is a 1-D ndarray.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
In row-major, C-style order, in two dimensions, the row index
varies the slowest, and the column index the quickest. This can
be generalized to multiple dimensions, where row-major order
implies that the index along the first axis varies slowest, and
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
may be preferable.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ravel(x)
array([1, 2, 3, 4, 5, 6])
>>> x.reshape(-1)
array([1, 2, 3, 4, 5, 6])
>>> np.ravel(x, order='F')
array([1, 4, 2, 5, 3, 6])
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> np.ravel(x.T)
array([1, 4, 2, 5, 3, 6])
>>> np.ravel(x.T, order='A')
array([1, 2, 3, 4, 5, 6])
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
if isinstance(a, np.matrix):
return asarray(a).ravel(order=order)
else:
return asanyarray(a).ravel(order=order)
def _nonzero_dispatcher(a):
return (a,)
@array_function_dispatch(_nonzero_dispatcher)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
.. note::
When called on a zero-d array or scalar, ``nonzero(a)`` is treated
as ``nonzero(atleast_1d(a))``.
.. deprecated:: 1.17.0
Use `atleast_1d` explicitly if this behavior is deliberate.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]])
>>> np.nonzero(x)
(array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9])
>>> a[a > 3] # prefer this spelling
array([4, 5, 6, 7, 8, 9])
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return _wrapfunc(a, 'nonzero')
def _shape_dispatcher(a):
return (a,)
@array_function_dispatch(_shape_dispatcher)
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with
``N>=1``.
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 3]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4), (5, 6)],
... dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(3,)
>>> a.shape
(3,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def _compress_dispatcher(condition, a, axis=None, out=None):
return (condition, a, out)
@array_function_dispatch(_compress_dispatcher)
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
extract : Equivalent method when working on 1-D arrays
:ref:`ufuncs-output-type`
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs):
return (a, a_min, a_max)
@array_function_dispatch(_clip_dispatcher)
def clip(a, a_min, a_max, out=None, **kwargs):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``.
No check is performed to ensure ``a_min < a_max``.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min, a_max : array_like or None
Minimum and maximum value. If ``None``, clipping is not performed on
the corresponding edge. Only one of `a_min` and `a_max` may be
``None``. Both are broadcast against `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
.. versionadded:: 1.17.0
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
:ref:`ufuncs-output-type`
Notes
-----
When `a_min` is greater than `a_max`, `clip` returns an
array in which all values are equal to `a_max`,
as shown in the second example.
Examples
--------
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> np.clip(a, 8, 1)
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs)
def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
@array_function_dispatch(_sum_dispatcher)
def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
initial=np._NoValue, where=np._NoValue):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a sum is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
Starting value for the sum. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.17.0
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
add.reduce : Equivalent functionality of `add`.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
The sum of an empty array is the neutral element 0:
>>> np.sum([])
0.0
For floating point numbers the numerical precision of sum (and
``np.add.reduce``) is in general limited by directly adding each number
individually to the result causing rounding errors in every step.
However, often numpy will use a numerically better approach (partial
pairwise summation) leading to improved precision in many use-cases.
This improved precision is always provided when no ``axis`` is given.
When ``axis`` is given, it will depend on which axis is summed.
Technically, to provide the best speed possible, the improved precision
is only used when the summation is along the fast axis in memory.
Note that the exact precision may vary depending on other parameters.
In contrast to NumPy, Python's ``math.fsum`` function uses a slower but
more precise approach to summation.
Especially when summing a large number of lower precision floating point
numbers, such as ``float32``, numerical errors can become significant.
In such cases it can be advisable to use `dtype="float64"` to use a higher
precision for the output.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
>>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1)
array([1., 5.])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
You can also start the sum with a value other than zero:
>>> np.sum([10], initial=5)
15
"""
if isinstance(a, _gentype):
# 2018-02-25, 1.15.0
warnings.warn(
"Calling np.sum(generator) is deprecated, and in the future will give a different result. "
"Use np.sum(np.fromiter(generator)) or the python sum builtin instead.",
DeprecationWarning, stacklevel=3)
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
initial=initial, where=where)
def _any_dispatcher(a, axis=None, out=None, keepdims=None, *,
where=np._NoValue):
return (a, where, out)
@array_function_dispatch(_any_dispatcher)
def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean if `axis` is ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (``axis=None``) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `any` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in checking for any `True` values.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False])
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> np.any([[True, False], [False, False]], where=[[False], [True]])
False
>>> o=np.array(False)
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array(True), array(True))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
return _wrapreduction(a, np.logical_or, 'any', axis, None, out,
keepdims=keepdims, where=where)
def _all_dispatcher(a, axis=None, out=None, keepdims=None, *,
where=None):
return (a, where, out)
@array_function_dispatch(_all_dispatcher)
def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (``axis=None``) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` for more
details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `all` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in checking for all `True` values.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False])
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> np.all([[True, True], [False, True]], where=[[True], [False]])
True
>>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array(True)) # may vary
"""
return _wrapreduction(a, np.logical_and, 'all', axis, None, out,
keepdims=keepdims, where=where)
def _cumsum_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_cumsum_dispatcher)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point
values since ``sum`` may use a pairwise summation routine, reducing
the roundoff-error. See `sum` for more information.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
``cumsum(b)[-1]`` may not be equal to ``sum(b)``
>>> b = np.array([1, 2e-9, 3e-9] * 1000000)
>>> b.cumsum()[-1]
1000000.0050045159
>>> b.sum()
1000000.0050000029
"""
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
def _ptp_dispatcher(a, axis=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_ptp_dispatcher)
def ptp(a, axis=None, out=None, keepdims=np._NoValue):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
.. warning::
`ptp` preserves the data type of the array. This means the
return value for an input of signed integers with n bits
(e.g. `np.int8`, `np.int16`, etc) is also a signed integer
with n bits. In that case, peak-to-peak values greater than
``2**(n-1)-1`` will be returned as negative values. An example
with a work-around is shown below.
Parameters
----------
a : array_like
Input values.
axis : None or int or tuple of ints, optional
Axis along which to find the peaks. By default, flatten the
array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.15.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `ptp` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.array([[4, 9, 2, 10],
... [6, 9, 7, 12]])
>>> np.ptp(x, axis=1)
array([8, 6])
>>> np.ptp(x, axis=0)
array([2, 0, 5, 2])
>>> np.ptp(x)
10
This example shows that a negative value can be returned when
the input is an array of signed integers.
>>> y = np.array([[1, 127],
... [0, 127],
... [-1, 127],
... [-2, 127]], dtype=np.int8)
>>> np.ptp(y, axis=1)
array([ 126, 127, -128, -127], dtype=int8)
A work-around is to use the `view()` method to view the result as
unsigned integers with the same bit width:
>>> np.ptp(y, axis=1).view(np.uint8)
array([126, 127, 128, 129], dtype=uint8)
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
ptp = a.ptp
except AttributeError:
pass
else:
return ptp(axis=axis, out=out, **kwargs)
return _methods._ptp(a, axis=axis, out=out, **kwargs)
def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
where=None):
return (a, out)
@array_function_dispatch(_amax_dispatcher)
def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amax` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The minimum value of an output element. Must be present to allow
computation on empty slice. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to compare for the maximum. See `~numpy.ufunc.reduce`
for details.
.. versionadded:: 1.17.0
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> np.amax(a, where=[False, True], initial=-1, axis=0)
array([-1, 3])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.amax(b, where=~np.isnan(b), initial=-1)
4.0
>>> np.nanmax(b)
4.0
You can use an initial value to compute the maximum of an empty slice, or
to initialize it to a different value:
>>> np.amax([[-50], [10]], axis=-1, initial=0)
array([ 0, 10])
Notice that the initial value is used as one of the elements for which the
maximum is determined, unlike for the default argument Python's max
function, which is only used for empty iterables.
>>> np.amax([5], initial=6)
6
>>> max([5], default=6)
5
"""
return _wrapreduction(a, np.maximum, 'max', axis, None, out,
keepdims=keepdims, initial=initial, where=where)
def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
where=None):
return (a, out)
@array_function_dispatch(_amin_dispatcher)
def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amin` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The maximum value of an output element. Must be present to allow
computation on empty slice. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to compare for the minimum. See `~numpy.ufunc.reduce`
for details.
.. versionadded:: 1.17.0
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> np.amin(a, where=[False, True], initial=10, axis=0)
array([10, 1])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.amin(b, where=~np.isnan(b), initial=10)
0.0
>>> np.nanmin(b)
0.0
>>> np.amin([[-50], [10]], axis=-1, initial=0)
array([-50, 0])
Notice that the initial value is used as one of the elements for which the
minimum is determined, unlike for the default argument Python's max
function, which is only used for empty iterables.
Notice that this isn't the same as Python's ``default`` argument.
>>> np.amin([6], initial=5)
5
>>> min([6], default=5)
6
"""
return _wrapreduction(a, np.minimum, 'min', axis, None, out,
keepdims=keepdims, initial=initial, where=where)
def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
@array_function_dispatch(_prod_dispatcher)
def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
initial=np._NoValue, where=np._NoValue):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The starting value for this product. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to include in the product. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.17.0
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
:ref:`ufuncs-output-type`
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x)
16 # may vary
The product of an empty array is the neutral element 1:
>>> np.prod([])
1.0
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
Or select specific elements to include:
>>> np.prod([1., np.nan, 3.], where=[True, False, True])
3.0
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == int
True
You can also start the product with a value other than one:
>>> np.prod([1, 2], initial=5)
10
"""
return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
keepdims=keepdims, initial=initial, where=where)
def _cumprod_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_cumprod_dispatcher)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
:ref:`ufuncs-output-type`
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def _ndim_dispatcher(a):
return (a,)
@array_function_dispatch(_ndim_dispatcher)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def _size_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_size_dispatcher)
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def _around_dispatcher(a, decimals=None, out=None):
return (a, out)
@array_function_dispatch(_around_dispatcher)
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See :ref:`ufuncs-output-type` for more
details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
``np.around`` uses a fast but sometimes inexact algorithm to round
floating-point datatypes. For positive `decimals` it is equivalent to
``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has
error due to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling by powers
of ten. For instance, note the extra "1" in the following:
>>> np.round(56294995342131.5, 3)
56294995342131.51
If your goal is to print such values with a fixed number of decimals, it is
preferable to use numpy's float printing routines to limit the number of
printed decimals:
>>> np.format_float_positional(56294995342131.5, precision=3)
'56294995342131.5'
The float printing routines use an accurate but much more computationally
demanding algorithm to compute the number of digits after the decimal
point.
Alternatively, Python's builtin `round` function uses a more accurate
but slower algorithm for 64-bit floating point values:
>>> round(56294995342131.5, 3)
56294995342131.5
>>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997
(16.06, 16.05)
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
Examples
--------
>>> np.around([0.37, 1.64])
array([0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _wrapfunc(a, 'round', decimals=decimals, out=out)
def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *,
where=None):
return (a, where, out)
@array_function_dispatch(_mean_dispatcher)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
By default, `float16` results are computed using `float32` intermediates
for extra precision.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([2., 3.])
>>> np.mean(a, axis=1)
array([1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.54999924
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806 # may vary
Specifying a where argument:
>>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]])
>>> np.mean(a)
12.0
>>> np.mean(a, where=[[True], [False], [False]])
9.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
mean = a.mean
except AttributeError:
pass
else:
return mean(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
keepdims=None, *, where=None):
return (a, where, out)
@array_function_dispatch(_std_dispatcher)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the standard deviation.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
:ref:`ufuncs-output-type`
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(x))``, where
``x = abs(a - a.mean())**2``.
The average squared deviation is typically calculated as ``x.sum() / N``,
where ``N = len(x)``. If, however, `ddof` is specified, the divisor
``N - ddof`` is used instead. In standard statistical practice, ``ddof=1``
provides an unbiased estimator of the variance of the infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables. The standard deviation computed in this
function is the square root of the estimated variance, so even with
``ddof=1``, it will not be an unbiased estimate of the standard deviation
per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177 # may vary
Specifying a where argument:
>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.std(a)
2.614064523559687 # may vary
>>> np.std(a, where=[[True], [True], [False]])
2.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
std = a.std
except AttributeError:
pass
else:
return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
keepdims=None, *, where=None):
return (a, where, out)
@array_function_dispatch(_var_dispatcher)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float64`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the variance. See `~numpy.ufunc.reduce` for
details.
.. versionadded:: 1.20.0
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std, mean, nanmean, nanstd, nanvar
:ref:`ufuncs-output-type`
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(x)``, where ``x = abs(a - a.mean())**2``.
The mean is typically calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
0.20250003
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759 # may vary
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
Specifying a where argument:
>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.var(a)
6.833333333333333 # may vary
>>> np.var(a, where=[[True], [True], [False]])
4.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
var = a.var
except AttributeError:
pass
else:
return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
# Aliases of other functions. These have their own definitions only so that
# they can have unique docstrings.
@array_function_dispatch(_around_dispatcher)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return around(a, decimals=decimals, out=out)
@array_function_dispatch(_prod_dispatcher, verify=False)
def product(*args, **kwargs):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
return prod(*args, **kwargs)
@array_function_dispatch(_cumprod_dispatcher, verify=False)
def cumproduct(*args, **kwargs):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
return cumprod(*args, **kwargs)
@array_function_dispatch(_any_dispatcher, verify=False)
def sometrue(*args, **kwargs):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function; see for details.
"""
return any(*args, **kwargs)
@array_function_dispatch(_all_dispatcher, verify=False)
def alltrue(*args, **kwargs):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
return all(*args, **kwargs)
| 123,483 | Python | 31.650449 | 103 | 0.603306 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/arrayprint.py | """Array printing function
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
"set_printoptions", "get_printoptions", "printoptions",
"format_float_positional", "format_float_scientific"]
__docformat__ = 'restructuredtext'
#
# Written by Konrad Hinsen <[email protected]>
# last revision: 1996-3-13
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
# scalars but for different purposes. scalartypes.c.src has str/reprs for when
# the scalar is printed on its own, while arrayprint.py has strs for when
# scalars are printed inside an ndarray. Only the latter strs are currently
# user-customizable.
import functools
import numbers
import sys
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
import numpy as np
from . import numerictypes as _nt
from .umath import absolute, isinf, isfinite, isnat
from . import multiarray
from .multiarray import (array, dragon4_positional, dragon4_scientific,
datetime_as_string, datetime_data, ndarray,
set_legacy_print_mode)
from .fromnumeric import any
from .numeric import concatenate, asarray, errstate
from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
flexible)
from .overrides import array_function_dispatch, set_module
import operator
import warnings
import contextlib
_format_options = {
'edgeitems': 3, # repr N leading and trailing items of each dimension
'threshold': 1000, # total items > triggers array summarization
'floatmode': 'maxprec',
'precision': 8, # precision of floating point representations
'suppress': False, # suppress printing small floating values in exp format
'linewidth': 75,
'nanstr': 'nan',
'infstr': 'inf',
'sign': '-',
'formatter': None,
# Internally stored as an int to simplify comparisons; converted from/to
# str/False on the way in/out.
'legacy': sys.maxsize}
def _make_options_dict(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
sign=None, formatter=None, floatmode=None, legacy=None):
"""
Make a dictionary out of the non-None arguments, plus conversion of
*legacy* and sanity checks.
"""
options = {k: v for k, v in locals().items() if v is not None}
if suppress is not None:
options['suppress'] = bool(suppress)
modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']
if floatmode not in modes + [None]:
raise ValueError("floatmode option must be one of " +
", ".join('"{}"'.format(m) for m in modes))
if sign not in [None, '-', '+', ' ']:
raise ValueError("sign option must be one of ' ', '+', or '-'")
if legacy == False:
options['legacy'] = sys.maxsize
elif legacy == '1.13':
options['legacy'] = 113
elif legacy == '1.21':
options['legacy'] = 121
elif legacy is None:
pass # OK, do nothing.
else:
warnings.warn(
"legacy printing option can currently only be '1.13', '1.21', or "
"`False`", stacklevel=3)
if threshold is not None:
# forbid the bad threshold arg suggested by stack overflow, gh-12351
if not isinstance(threshold, numbers.Number):
raise TypeError("threshold must be numeric")
if np.isnan(threshold):
raise ValueError("threshold must be non-NAN, try "
"sys.maxsize for untruncated representation")
if precision is not None:
# forbid the bad precision arg as suggested by issue #18254
try:
options['precision'] = operator.index(precision)
except TypeError as e:
raise TypeError('precision must be an integer') from e
return options
@set_module('numpy')
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
formatter=None, sign=None, floatmode=None, *, legacy=None):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int or None, optional
Number of digits of precision for floating point output (default 8).
May be None if `floatmode` is not `fixed`, to print as many digits as
necessary to uniquely specify the value.
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
To always use the full repr without summarization, pass `sys.maxsize`.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
If True, always print floating point numbers using fixed point
notation, in which case numbers equal to zero in the current precision
will print as zero. If False, then scientific notation is used when
absolute value of the smallest number is < 1e-4 or the ratio of the
maximum absolute value to the minimum is > 1e3. The default is False.
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values. (default '-')
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are:
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'object' : `np.object_` arrays
Other keys that can be used to set a group of types at once are:
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'numpystr'
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values
(default maxprec_equal):
* 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
* 'unique': Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
* 'maxprec': Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
* 'maxprec_equal': Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. This also
enables 1.21 legacy printing mode (described below).
If set to the string `'1.21'` enables 1.21 legacy printing mode. This
approximates numpy 1.21 print output of complex structured dtypes
by not inserting spaces after commas that separate fields and after
colons.
If set to `False`, disables legacy mode.
Unrecognized strings will be ignored with a warning for forward
compatibility.
.. versionadded:: 1.14.0
.. versionchanged:: 1.22.0
See Also
--------
get_printoptions, printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
Use `printoptions` as a context manager to set the values temporarily.
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
>>> np.array([1.123456789])
[1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
>>> np.arange(10)
array([0, 1, 2, ..., 7, 8, 9])
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
A custom formatter can be used to display array elements as desired:
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
>>> x = np.arange(3)
>>> x
array([int: 0, int: -1, int: -2])
>>> np.set_printoptions() # formatter gets reset
>>> x
array([0, 1, 2])
To put back the default options, you can use:
>>> np.set_printoptions(edgeitems=3, infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
Also to temporarily override options, use `printoptions` as a context manager:
>>> with np.printoptions(precision=2, suppress=True, threshold=5):
... np.linspace(0, 10, 10)
array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ])
"""
opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
suppress, nanstr, infstr, sign, formatter,
floatmode, legacy)
# formatter is always reset
opt['formatter'] = formatter
_format_options.update(opt)
# set the C variable for legacy mode
if _format_options['legacy'] == 113:
set_legacy_print_mode(113)
# reset the sign option in legacy mode to avoid confusion
_format_options['sign'] = '-'
elif _format_options['legacy'] == 121:
set_legacy_print_mode(121)
elif _format_options['legacy'] == sys.maxsize:
set_legacy_print_mode(0)
@set_module('numpy')
def get_printoptions():
"""
Return the current print options.
Returns
-------
print_opts : dict
Dictionary of current print options with keys
- precision : int
- threshold : int
- edgeitems : int
- linewidth : int
- suppress : bool
- nanstr : str
- infstr : str
- formatter : dict of callables
- sign : str
For a full description of these options, see `set_printoptions`.
See Also
--------
set_printoptions, printoptions, set_string_function
"""
opts = _format_options.copy()
opts['legacy'] = {
113: '1.13', 121: '1.21', sys.maxsize: False,
}[opts['legacy']]
return opts
def _get_legacy_print_mode():
"""Return the legacy print mode as an int."""
return _format_options['legacy']
@set_module('numpy')
@contextlib.contextmanager
def printoptions(*args, **kwargs):
"""Context manager for setting print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `set_printoptions` for the full description of
available options.
Examples
--------
>>> from numpy.testing import assert_equal
>>> with np.printoptions(precision=2):
... np.array([2.0]) / 3
array([0.67])
The `as`-clause of the `with`-statement gives the current print options:
>>> with np.printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
try:
np.set_printoptions(*args, **kwargs)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _leading_trailing(a, edgeitems, index=()):
"""
Keep only the N-D corners (leading and trailing edges) of an array.
Should be passed a base-class ndarray, since it makes no guarantees about
preserving subclasses.
"""
axis = len(index)
if axis == a.ndim:
return a[index]
if a.shape[axis] > 2*edgeitems:
return concatenate((
_leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
_leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
), axis=axis)
else:
return _leading_trailing(a, edgeitems, index + np.index_exp[:])
def _object_format(o):
""" Object arrays containing lists should be printed unambiguously """
if type(o) is list:
fmt = 'list({!r})'
else:
fmt = '{!r}'
return fmt.format(o)
def repr_format(x):
return repr(x)
def str_format(x):
return str(x)
def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy,
formatter, **kwargs):
# note: extra arguments in kwargs are ignored
# wrapped in lambdas to avoid taking a code path with the wrong type of data
formatdict = {
'bool': lambda: BoolFormat(data),
'int': lambda: IntegerFormat(data),
'float': lambda: FloatingFormat(
data, precision, floatmode, suppress, sign, legacy=legacy),
'longfloat': lambda: FloatingFormat(
data, precision, floatmode, suppress, sign, legacy=legacy),
'complexfloat': lambda: ComplexFloatingFormat(
data, precision, floatmode, suppress, sign, legacy=legacy),
'longcomplexfloat': lambda: ComplexFloatingFormat(
data, precision, floatmode, suppress, sign, legacy=legacy),
'datetime': lambda: DatetimeFormat(data, legacy=legacy),
'timedelta': lambda: TimedeltaFormat(data),
'object': lambda: _object_format,
'void': lambda: str_format,
'numpystr': lambda: repr_format}
# we need to wrap values in `formatter` in a lambda, so that the interface
# is the same as the above values.
def indirect(x):
return lambda: x
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
for key in formatdict.keys():
formatdict[key] = indirect(formatter['all'])
if 'int_kind' in fkeys:
for key in ['int']:
formatdict[key] = indirect(formatter['int_kind'])
if 'float_kind' in fkeys:
for key in ['float', 'longfloat']:
formatdict[key] = indirect(formatter['float_kind'])
if 'complex_kind' in fkeys:
for key in ['complexfloat', 'longcomplexfloat']:
formatdict[key] = indirect(formatter['complex_kind'])
if 'str_kind' in fkeys:
formatdict['numpystr'] = indirect(formatter['str_kind'])
for key in formatdict.keys():
if key in fkeys:
formatdict[key] = indirect(formatter[key])
return formatdict
def _get_format_function(data, **options):
"""
find the right formatting function for the dtype_
"""
dtype_ = data.dtype
dtypeobj = dtype_.type
formatdict = _get_formatdict(data, **options)
if dtypeobj is None:
return formatdict["numpystr"]()
elif issubclass(dtypeobj, _nt.bool_):
return formatdict['bool']()
elif issubclass(dtypeobj, _nt.integer):
if issubclass(dtypeobj, _nt.timedelta64):
return formatdict['timedelta']()
else:
return formatdict['int']()
elif issubclass(dtypeobj, _nt.floating):
if issubclass(dtypeobj, _nt.longfloat):
return formatdict['longfloat']()
else:
return formatdict['float']()
elif issubclass(dtypeobj, _nt.complexfloating):
if issubclass(dtypeobj, _nt.clongfloat):
return formatdict['longcomplexfloat']()
else:
return formatdict['complexfloat']()
elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
return formatdict['numpystr']()
elif issubclass(dtypeobj, _nt.datetime64):
return formatdict['datetime']()
elif issubclass(dtypeobj, _nt.object_):
return formatdict['object']()
elif issubclass(dtypeobj, _nt.void):
if dtype_.names is not None:
return StructuredVoidFormat.from_data(data, **options)
else:
return formatdict['void']()
else:
return formatdict['numpystr']()
def _recursive_guard(fillvalue='...'):
"""
Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs
Decorates a function such that if it calls itself with the same first
argument, it returns `fillvalue` instead of recursing.
Largely copied from reprlib.recursive_repr
"""
def decorating_function(f):
repr_running = set()
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
return f(self, *args, **kwargs)
finally:
repr_running.discard(key)
return wrapper
return decorating_function
# gracefully handle recursive calls, when object arrays contain themselves
@_recursive_guard()
def _array2string(a, options, separator=' ', prefix=""):
# The formatter __init__s in _get_format_function cannot deal with
# subclasses yet, and we also need to avoid recursion issues in
# _formatArray with subclasses which return 0d arrays in place of scalars
data = asarray(a)
if a.shape == ():
a = data
if a.size > options['threshold']:
summary_insert = "..."
data = _leading_trailing(data, options['edgeitems'])
else:
summary_insert = ""
# find the right formatting function for the array
format_function = _get_format_function(data, **options)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, options['linewidth'],
next_line_prefix, separator, options['edgeitems'],
summary_insert, options['legacy'])
return lst
def _array2string_dispatcher(
a, max_line_width=None, precision=None,
suppress_small=None, separator=None, prefix=None,
style=None, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix=None,
*, legacy=None):
return (a,)
@array_function_dispatch(_array2string_dispatcher, module='numpy')
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix="",
*, legacy=None):
"""
Return a string representation of an array.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int or None, optional
Floating point precision.
Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
Defaults to ``numpy.get_printoptions()['suppress']``.
separator : str, optional
Inserted between elements.
prefix : str, optional
suffix : str, optional
The length of the prefix and suffix strings are used to respectively
align and wrap the output. An array is typically printed as::
prefix + array2string(a) + suffix
The output is left-padded by the length of the prefix string, and
wrapping is forced at the column ``max_line_width - len(suffix)``.
It should be noted that the content of prefix and suffix strings are
not included in the output.
style : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are:
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'void' : type `numpy.void`
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
Other keys that can be used to set a group of types at once are:
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
Defaults to ``numpy.get_printoptions()['threshold']``.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension.
Defaults to ``numpy.get_printoptions()['edgeitems']``.
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values.
Defaults to ``numpy.get_printoptions()['sign']``.
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types.
Defaults to ``numpy.get_printoptions()['floatmode']``.
Can take the following values:
- 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
- 'unique': Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
- 'maxprec': Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
- 'maxprec_equal': Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> np.array2string(x, precision=2, separator=',',
... suppress_small=True)
'[0.,1.,2.,3.]'
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0 0x1 0x2]'
"""
overrides = _make_options_dict(precision, threshold, edgeitems,
max_line_width, suppress_small, None, None,
sign, formatter, floatmode, legacy)
options = _format_options.copy()
options.update(overrides)
if options['legacy'] <= 113:
if style is np._NoValue:
style = repr
if a.shape == () and a.dtype.names is None:
return style(a.item())
elif style is not np._NoValue:
# Deprecation 11-9-2017 v1.14
warnings.warn("'style' argument is deprecated and no longer functional"
" except in 1.13 'legacy' mode",
DeprecationWarning, stacklevel=3)
if options['legacy'] > 113:
options['linewidth'] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
def _extendLine(s, line, word, line_width, next_line_prefix, legacy):
needs_wrap = len(line) + len(word) > line_width
if legacy > 113:
# don't wrap lines if it won't help
if len(line) <= len(next_line_prefix):
needs_wrap = False
if needs_wrap:
s += line.rstrip() + "\n"
line = next_line_prefix
line += word
return s, line
def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy):
"""
Extends line with nicely formatted (possibly multi-line) string ``word``.
"""
words = word.splitlines()
if len(words) == 1 or legacy <= 113:
return _extendLine(s, line, word, line_width, next_line_prefix, legacy)
max_word_length = max(len(word) for word in words)
if (len(line) + max_word_length > line_width and
len(line) > len(next_line_prefix)):
s += line.rstrip() + '\n'
line = next_line_prefix + words[0]
indent = next_line_prefix
else:
indent = len(line)*' '
line += words[0]
for word in words[1::]:
s += line.rstrip() + '\n'
line = indent + word
suffix_length = max_word_length - len(words[-1])
line += suffix_length*' '
return s, line
def _formatArray(a, format_function, line_width, next_line_prefix,
separator, edge_items, summary_insert, legacy):
"""formatArray is designed for two modes of operation:
1. Full output
2. Summarized output
"""
def recurser(index, hanging_indent, curr_width):
"""
By using this local function, we don't need to recurse with all the
arguments. Since this function is not created recursively, the cost is
not significant
"""
axis = len(index)
axes_left = a.ndim - axis
if axes_left == 0:
return format_function(a[index])
# when recursing, add a space to align with the [ added, and reduce the
# length of the line by 1
next_hanging_indent = hanging_indent + ' '
if legacy <= 113:
next_width = curr_width
else:
next_width = curr_width - len(']')
a_len = a.shape[axis]
show_summary = summary_insert and 2*edge_items < a_len
if show_summary:
leading_items = edge_items
trailing_items = edge_items
else:
leading_items = 0
trailing_items = a_len
# stringify the array with the hanging indent on the first line too
s = ''
# last axis (rows) - wrap elements if they would not fit on one line
if axes_left == 1:
# the length up until the beginning of the separator / bracket
if legacy <= 113:
elem_width = curr_width - len(separator.rstrip())
else:
elem_width = curr_width - max(len(separator.rstrip()), len(']'))
line = hanging_indent
for i in range(leading_items):
word = recurser(index + (i,), next_hanging_indent, next_width)
s, line = _extendLine_pretty(
s, line, word, elem_width, hanging_indent, legacy)
line += separator
if show_summary:
s, line = _extendLine(
s, line, summary_insert, elem_width, hanging_indent, legacy)
if legacy <= 113:
line += ", "
else:
line += separator
for i in range(trailing_items, 1, -1):
word = recurser(index + (-i,), next_hanging_indent, next_width)
s, line = _extendLine_pretty(
s, line, word, elem_width, hanging_indent, legacy)
line += separator
if legacy <= 113:
# width of the separator is not considered on 1.13
elem_width = curr_width
word = recurser(index + (-1,), next_hanging_indent, next_width)
s, line = _extendLine_pretty(
s, line, word, elem_width, hanging_indent, legacy)
s += line
# other axes - insert newlines between rows
else:
s = ''
line_sep = separator.rstrip() + '\n'*(axes_left - 1)
for i in range(leading_items):
nested = recurser(index + (i,), next_hanging_indent, next_width)
s += hanging_indent + nested + line_sep
if show_summary:
if legacy <= 113:
# trailing space, fixed nbr of newlines, and fixed separator
s += hanging_indent + summary_insert + ", \n"
else:
s += hanging_indent + summary_insert + line_sep
for i in range(trailing_items, 1, -1):
nested = recurser(index + (-i,), next_hanging_indent,
next_width)
s += hanging_indent + nested + line_sep
nested = recurser(index + (-1,), next_hanging_indent, next_width)
s += hanging_indent + nested
# remove the hanging indent, and wrap in []
s = '[' + s[len(hanging_indent):] + ']'
return s
try:
# invoke the recursive part with an initial index and prefix
return recurser(index=(),
hanging_indent=next_line_prefix,
curr_width=line_width)
finally:
# recursive closures have a cyclic reference to themselves, which
# requires gc to collect (gh-10620). To avoid this problem, for
# performance and PyPy friendliness, we break the cycle:
recurser = None
def _none_or_positive_arg(x, name):
if x is None:
return -1
if x < 0:
raise ValueError("{} must be >= 0".format(name))
return x
class FloatingFormat:
""" Formatter for subtypes of np.floating """
def __init__(self, data, precision, floatmode, suppress_small, sign=False,
*, legacy=None):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
self._legacy = legacy
if self._legacy <= 113:
# when not 0d, legacy does not support '-'
if data.shape != () and sign == '-':
sign = ' '
self.floatmode = floatmode
if floatmode == 'unique':
self.precision = None
else:
self.precision = precision
self.precision = _none_or_positive_arg(self.precision, 'precision')
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
self.fillFormat(data)
def fillFormat(self, data):
# only the finite values are used to compute the number of digits
finite_vals = data[isfinite(data)]
# choose exponential mode based on the non-zero finite values:
abs_non_zero = absolute(finite_vals[finite_vals != 0])
if len(abs_non_zero) != 0:
max_val = np.max(abs_non_zero)
min_val = np.min(abs_non_zero)
with errstate(over='ignore'): # division can overflow
if max_val >= 1.e8 or (not self.suppress_small and
(min_val < 0.0001 or max_val/min_val > 1000.)):
self.exp_format = True
# do a first pass of printing all the numbers, to determine sizes
if len(finite_vals) == 0:
self.pad_left = 0
self.pad_right = 0
self.trim = '.'
self.exp_size = -1
self.unique = True
self.min_digits = None
elif self.exp_format:
trim, unique = '.', True
if self.floatmode == 'fixed' or self._legacy <= 113:
trim, unique = 'k', False
strs = (dragon4_scientific(x, precision=self.precision,
unique=unique, trim=trim, sign=self.sign == '+')
for x in finite_vals)
frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs))
int_part, frac_part = zip(*(s.split('.') for s in frac_strs))
self.exp_size = max(len(s) for s in exp_strs) - 1
self.trim = 'k'
self.precision = max(len(s) for s in frac_part)
self.min_digits = self.precision
self.unique = unique
# for back-compat with np 1.13, use 2 spaces & sign and full prec
if self._legacy <= 113:
self.pad_left = 3
else:
# this should be only 1 or 2. Can be calculated from sign.
self.pad_left = max(len(s) for s in int_part)
# pad_right is only needed for nan length calculation
self.pad_right = self.exp_size + 2 + self.precision
else:
trim, unique = '.', True
if self.floatmode == 'fixed':
trim, unique = 'k', False
strs = (dragon4_positional(x, precision=self.precision,
fractional=True,
unique=unique, trim=trim,
sign=self.sign == '+')
for x in finite_vals)
int_part, frac_part = zip(*(s.split('.') for s in strs))
if self._legacy <= 113:
self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)
else:
self.pad_left = max(len(s) for s in int_part)
self.pad_right = max(len(s) for s in frac_part)
self.exp_size = -1
self.unique = unique
if self.floatmode in ['fixed', 'maxprec_equal']:
self.precision = self.min_digits = self.pad_right
self.trim = 'k'
else:
self.trim = '.'
self.min_digits = 0
if self._legacy > 113:
# account for sign = ' ' by adding one to pad_left
if self.sign == ' ' and not any(np.signbit(finite_vals)):
self.pad_left += 1
# if there are non-finite values, may need to increase pad_left
if data.size != finite_vals.size:
neginf = self.sign != '-' or any(data[isinf(data)] < 0)
nanlen = len(_format_options['nanstr'])
inflen = len(_format_options['infstr']) + neginf
offset = self.pad_right + 1 # +1 for decimal pt
self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset)
def __call__(self, x):
if not np.isfinite(x):
with errstate(invalid='ignore'):
if np.isnan(x):
sign = '+' if self.sign == '+' else ''
ret = sign + _format_options['nanstr']
else: # isinf
sign = '-' if x < 0 else '+' if self.sign == '+' else ''
ret = sign + _format_options['infstr']
return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret
if self.exp_format:
return dragon4_scientific(x,
precision=self.precision,
min_digits=self.min_digits,
unique=self.unique,
trim=self.trim,
sign=self.sign == '+',
pad_left=self.pad_left,
exp_digits=self.exp_size)
else:
return dragon4_positional(x,
precision=self.precision,
min_digits=self.min_digits,
unique=self.unique,
fractional=True,
trim=self.trim,
sign=self.sign == '+',
pad_left=self.pad_left,
pad_right=self.pad_right)
@set_module('numpy')
def format_float_scientific(x, precision=None, unique=True, trim='k',
sign=False, pad_left=None, exp_digits=None,
min_digits=None):
"""
Format a floating-point scalar as a decimal string in scientific notation.
Provides control over rounding, trimming and padding. Uses and assumes
IEEE unbiased rounding. Uses the "Dragon4" algorithm.
Parameters
----------
x : python float or numpy floating scalar
Value to format.
precision : non-negative integer or None, optional
Maximum number of digits to print. May be None if `unique` is
`True`, but must be an integer if unique is `False`.
unique : boolean, optional
If `True`, use a digit-generation strategy which gives the shortest
representation which uniquely identifies the floating-point number from
other values of the same type, by judicious rounding. If `precision`
is given fewer digits than necessary can be printed. If `min_digits`
is given more can be printed, in which cases the last digit is rounded
with unbiased rounding.
If `False`, digits are generated as if printing an infinite-precision
value and stopping after `precision` digits, rounding the remaining
value with unbiased rounding
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
* 'k' : keep trailing zeros, keep decimal point (no trimming)
* '.' : trim all trailing zeros, leave decimal point
* '0' : trim all but the zero before the decimal point. Insert the
zero if it is missing.
* '-' : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
Pad the left side of the string with whitespace until at least that
many characters are to the left of the decimal point.
exp_digits : non-negative integer, optional
Pad the exponent with zeros until it contains at least this many digits.
If omitted, the exponent will be at least 2 digits.
min_digits : non-negative integer or None, optional
Minimum number of digits to print. This only has an effect for
`unique=True`. In that case more digits than necessary to uniquely
identify the value may be printed and rounded unbiased.
-- versionadded:: 1.21.0
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_positional
Examples
--------
>>> np.format_float_scientific(np.float32(np.pi))
'3.1415927e+00'
>>> s = np.float32(1.23e24)
>>> np.format_float_scientific(s, unique=False, precision=15)
'1.230000071797338e+24'
>>> np.format_float_scientific(s, exp_digits=4)
'1.23e+0024'
"""
precision = _none_or_positive_arg(precision, 'precision')
pad_left = _none_or_positive_arg(pad_left, 'pad_left')
exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')
min_digits = _none_or_positive_arg(min_digits, 'min_digits')
if min_digits > 0 and precision > 0 and min_digits > precision:
raise ValueError("min_digits must be less than or equal to precision")
return dragon4_scientific(x, precision=precision, unique=unique,
trim=trim, sign=sign, pad_left=pad_left,
exp_digits=exp_digits, min_digits=min_digits)
@set_module('numpy')
def format_float_positional(x, precision=None, unique=True,
fractional=True, trim='k', sign=False,
pad_left=None, pad_right=None, min_digits=None):
"""
Format a floating-point scalar as a decimal string in positional notation.
Provides control over rounding, trimming and padding. Uses and assumes
IEEE unbiased rounding. Uses the "Dragon4" algorithm.
Parameters
----------
x : python float or numpy floating scalar
Value to format.
precision : non-negative integer or None, optional
Maximum number of digits to print. May be None if `unique` is
`True`, but must be an integer if unique is `False`.
unique : boolean, optional
If `True`, use a digit-generation strategy which gives the shortest
representation which uniquely identifies the floating-point number from
other values of the same type, by judicious rounding. If `precision`
is given fewer digits than necessary can be printed, or if `min_digits`
is given more can be printed, in which cases the last digit is rounded
with unbiased rounding.
If `False`, digits are generated as if printing an infinite-precision
value and stopping after `precision` digits, rounding the remaining
value with unbiased rounding
fractional : boolean, optional
If `True`, the cutoffs of `precision` and `min_digits` refer to the
total number of digits after the decimal point, including leading
zeros.
If `False`, `precision` and `min_digits` refer to the total number of
significant digits, before or after the decimal point, ignoring leading
zeros.
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
* 'k' : keep trailing zeros, keep decimal point (no trimming)
* '.' : trim all trailing zeros, leave decimal point
* '0' : trim all but the zero before the decimal point. Insert the
zero if it is missing.
* '-' : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
Pad the left side of the string with whitespace until at least that
many characters are to the left of the decimal point.
pad_right : non-negative integer, optional
Pad the right side of the string with whitespace until at least that
many characters are to the right of the decimal point.
min_digits : non-negative integer or None, optional
Minimum number of digits to print. Only has an effect if `unique=True`
in which case additional digits past those necessary to uniquely
identify the value may be printed, rounding the last additional digit.
-- versionadded:: 1.21.0
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
Examples
--------
>>> np.format_float_positional(np.float32(np.pi))
'3.1415927'
>>> np.format_float_positional(np.float16(np.pi))
'3.14'
>>> np.format_float_positional(np.float16(0.3))
'0.3'
>>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)
'0.3000488281'
"""
precision = _none_or_positive_arg(precision, 'precision')
pad_left = _none_or_positive_arg(pad_left, 'pad_left')
pad_right = _none_or_positive_arg(pad_right, 'pad_right')
min_digits = _none_or_positive_arg(min_digits, 'min_digits')
if not fractional and precision == 0:
raise ValueError("precision must be greater than 0 if "
"fractional=False")
if min_digits > 0 and precision > 0 and min_digits > precision:
raise ValueError("min_digits must be less than or equal to precision")
return dragon4_positional(x, precision=precision, unique=unique,
fractional=fractional, trim=trim,
sign=sign, pad_left=pad_left,
pad_right=pad_right, min_digits=min_digits)
class IntegerFormat:
def __init__(self, data):
if data.size > 0:
max_str_len = max(len(str(np.max(data))),
len(str(np.min(data))))
else:
max_str_len = 0
self.format = '%{}d'.format(max_str_len)
def __call__(self, x):
return self.format % x
class BoolFormat:
def __init__(self, data, **kwargs):
# add an extra space so " True" and "False" have the same length and
# array elements align nicely when printed, except in 0d arrays
self.truestr = ' True' if data.shape != () else 'True'
def __call__(self, x):
return self.truestr if x else "False"
class ComplexFloatingFormat:
""" Formatter for subtypes of np.complexfloating """
def __init__(self, x, precision, floatmode, suppress_small,
sign=False, *, legacy=None):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
floatmode_real = floatmode_imag = floatmode
if legacy <= 113:
floatmode_real = 'maxprec_equal'
floatmode_imag = 'maxprec'
self.real_format = FloatingFormat(
x.real, precision, floatmode_real, suppress_small,
sign=sign, legacy=legacy
)
self.imag_format = FloatingFormat(
x.imag, precision, floatmode_imag, suppress_small,
sign='+', legacy=legacy
)
def __call__(self, x):
r = self.real_format(x.real)
i = self.imag_format(x.imag)
# add the 'j' before the terminal whitespace in i
sp = len(i.rstrip())
i = i[:sp] + 'j' + i[sp:]
return r + i
class _TimelikeFormat:
def __init__(self, data):
non_nat = data[~isnat(data)]
if len(non_nat) > 0:
# Max str length of non-NaT elements
max_str_len = max(len(self._format_non_nat(np.max(non_nat))),
len(self._format_non_nat(np.min(non_nat))))
else:
max_str_len = 0
if len(non_nat) < data.size:
# data contains a NaT
max_str_len = max(max_str_len, 5)
self._format = '%{}s'.format(max_str_len)
self._nat = "'NaT'".rjust(max_str_len)
def _format_non_nat(self, x):
# override in subclass
raise NotImplementedError
def __call__(self, x):
if isnat(x):
return self._nat
else:
return self._format % self._format_non_nat(x)
class DatetimeFormat(_TimelikeFormat):
def __init__(self, x, unit=None, timezone=None, casting='same_kind',
legacy=False):
# Get the unit from the dtype
if unit is None:
if x.dtype.kind == 'M':
unit = datetime_data(x.dtype)[0]
else:
unit = 's'
if timezone is None:
timezone = 'naive'
self.timezone = timezone
self.unit = unit
self.casting = casting
self.legacy = legacy
# must be called after the above are configured
super().__init__(x)
def __call__(self, x):
if self.legacy <= 113:
return self._format_non_nat(x)
return super().__call__(x)
def _format_non_nat(self, x):
return "'%s'" % datetime_as_string(x,
unit=self.unit,
timezone=self.timezone,
casting=self.casting)
class TimedeltaFormat(_TimelikeFormat):
def _format_non_nat(self, x):
return str(x.astype('i8'))
class SubArrayFormat:
def __init__(self, format_function):
self.format_function = format_function
def __call__(self, arr):
if arr.ndim <= 1:
return "[" + ", ".join(self.format_function(a) for a in arr) + "]"
return "[" + ", ".join(self.__call__(a) for a in arr) + "]"
class StructuredVoidFormat:
"""
Formatter for structured np.void objects.
This does not work on structured alias types like np.dtype(('i4', 'i2,i2')),
as alias scalars lose their field information, and the implementation
relies upon np.void.__getitem__.
"""
def __init__(self, format_functions):
self.format_functions = format_functions
@classmethod
def from_data(cls, data, **options):
"""
This is a second way to initialize StructuredVoidFormat, using the raw data
as input. Added to avoid changing the signature of __init__.
"""
format_functions = []
for field_name in data.dtype.names:
format_function = _get_format_function(data[field_name], **options)
if data.dtype[field_name].shape != ():
format_function = SubArrayFormat(format_function)
format_functions.append(format_function)
return cls(format_functions)
def __call__(self, x):
str_fields = [
format_function(field)
for field, format_function in zip(x, self.format_functions)
]
if len(str_fields) == 1:
return "({},)".format(str_fields[0])
else:
return "({})".format(", ".join(str_fields))
def _void_scalar_repr(x):
"""
Implements the repr for structured-void scalars. It is called from the
scalartypes.c.src code, and is placed here because it uses the elementwise
formatters defined above.
"""
return StructuredVoidFormat.from_data(array(x), **_format_options)(x)
_typelessdata = [int_, float_, complex_, bool_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def dtype_is_implied(dtype):
"""
Determine if the given dtype is implied by the representation of its values.
Parameters
----------
dtype : dtype
Data type
Returns
-------
implied : bool
True if the dtype is implied by the representation of its values.
Examples
--------
>>> np.core.arrayprint.dtype_is_implied(int)
True
>>> np.array([1, 2, 3], int)
array([1, 2, 3])
>>> np.core.arrayprint.dtype_is_implied(np.int8)
False
>>> np.array([1, 2, 3], np.int8)
array([1, 2, 3], dtype=int8)
"""
dtype = np.dtype(dtype)
if _format_options['legacy'] <= 113 and dtype.type == bool_:
return False
# not just void types can be structured, and names are not part of the repr
if dtype.names is not None:
return False
return dtype.type in _typelessdata
def dtype_short_repr(dtype):
"""
Convert a dtype to a short form which evaluates to the same dtype.
The intent is roughly that the following holds
>>> from numpy import *
>>> dt = np.int64([1, 2]).dtype
>>> assert eval(dtype_short_repr(dt)) == dt
"""
if type(dtype).__repr__ != np.dtype.__repr__:
# TODO: Custom repr for user DTypes, logic should likely move.
return repr(dtype)
if dtype.names is not None:
# structured dtypes give a list or tuple repr
return str(dtype)
elif issubclass(dtype.type, flexible):
# handle these separately so they don't give garbage like str256
return "'%s'" % str(dtype)
typename = dtype.name
# quote typenames which can't be represented as python variable names
if typename and not (typename[0].isalpha() and typename.isalnum()):
typename = repr(typename)
return typename
def _array_repr_implementation(
arr, max_line_width=None, precision=None, suppress_small=None,
array2string=array2string):
"""Internal version of array_repr() that allows overriding array2string."""
if max_line_width is None:
max_line_width = _format_options['linewidth']
if type(arr) is not ndarray:
class_name = type(arr).__name__
else:
class_name = "array"
skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0
prefix = class_name + "("
suffix = ")" if skipdtype else ","
if (_format_options['legacy'] <= 113 and
arr.shape == () and not arr.dtype.names):
lst = repr(arr.item())
elif arr.size > 0 or arr.shape == (0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', prefix, suffix=suffix)
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
arr_str = prefix + lst + suffix
if skipdtype:
return arr_str
dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype))
# compute whether we should put dtype on a new line: Do so if adding the
# dtype would extend the last line past max_line_width.
# Note: This line gives the correct result even when rfind returns -1.
last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
spacer = " "
if _format_options['legacy'] <= 113:
if issubclass(arr.dtype.type, flexible):
spacer = '\n' + ' '*len(class_name + "(")
elif last_line_len + len(dtype_str) + 1 > max_line_width:
spacer = '\n' + ' '*len(class_name + "(")
return arr_str + spacer + dtype_str
def _array_repr_dispatcher(
arr, max_line_width=None, precision=None, suppress_small=None):
return (arr,)
@array_function_dispatch(_array_repr_dispatcher, module='numpy')
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int, optional
Floating point precision.
Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
Defaults to ``numpy.get_printoptions()['suppress']``.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([0.000001, 0. , 2. , 3. ])'
"""
return _array_repr_implementation(
arr, max_line_width, precision, suppress_small)
@_recursive_guard()
def _guarded_repr_or_str(v):
if isinstance(v, bytes):
return repr(v)
return str(v)
def _array_str_implementation(
a, max_line_width=None, precision=None, suppress_small=None,
array2string=array2string):
"""Internal version of array_str() that allows overriding array2string."""
if (_format_options['legacy'] <= 113 and
a.shape == () and not a.dtype.names):
return str(a.item())
# the str of 0d arrays is a special case: It should appear like a scalar,
# so floats are not truncated by `precision`, and strings are not wrapped
# in quotes. So we return the str of the scalar value.
if a.shape == ():
# obtain a scalar and call str on it, avoiding problems for subclasses
# for which indexing with () returns a 0d instead of a scalar by using
# ndarray's getindex. Also guard against recursive 0d object arrays.
return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))
return array2string(a, max_line_width, precision, suppress_small, ' ', "")
def _array_str_dispatcher(
a, max_line_width=None, precision=None, suppress_small=None):
return (a,)
@array_function_dispatch(_array_str_dispatcher, module='numpy')
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int, optional
Floating point precision.
Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
Defaults to ``numpy.get_printoptions()['suppress']``.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return _array_str_implementation(
a, max_line_width, precision, suppress_small)
# needed if __array_function__ is disabled
_array2string_impl = getattr(array2string, '__wrapped__', array2string)
_default_array_str = functools.partial(_array_str_implementation,
array2string=_array2string_impl)
_default_array_repr = functools.partial(_array_repr_implementation,
array2string=_array2string_impl)
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> _ = a
>>> # [0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(_default_array_repr, 1)
else:
return multiarray.set_string_function(_default_array_str, 0)
else:
return multiarray.set_string_function(f, repr)
| 62,839 | Python | 35.834701 | 83 | 0.593899 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/numeric.py | import functools
import itertools
import operator
import sys
import warnings
import numbers
import numpy as np
from . import multiarray
from .multiarray import (
_fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS,
BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
WRAP, arange, array, asarray, asanyarray, ascontiguousarray,
asfortranarray, broadcast, can_cast, compare_chararrays,
concatenate, copyto, dot, dtype, empty,
empty_like, flatiter, frombuffer, from_dlpack, fromfile, fromiter,
fromstring, inner, lexsort, matmul, may_share_memory,
min_scalar_type, ndarray, nditer, nested_iters, promote_types,
putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
zeros, normalize_axis_index)
from . import overrides
from . import umath
from . import shape_base
from .overrides import set_array_function_like_doc, set_module
from .umath import (multiply, invert, sin, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
from ._exceptions import TooHardError, AxisError
from ._ufunc_config import errstate
bitwise_not = invert
ufunc = type(sin)
newaxis = None
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray',
'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
'fromstring', 'fromfile', 'frombuffer', 'from_dlpack', 'where',
'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian',
'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones',
'identity', 'allclose', 'compare_chararrays', 'putmask',
'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
'MAY_SHARE_EXACT', 'TooHardError', 'AxisError']
@set_module('numpy')
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_zeros_like_dispatcher)
def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
full_like : Return a new array with shape of input filled with value.
zeros : Return a new array setting values to zero.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.])
>>> np.zeros_like(y)
array([0., 0., 0.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
# needed instead of a 0 to get same result as zeros for string dtypes
z = zeros(1, dtype=res.dtype)
multiarray.copyto(res, z, casting='unsafe')
return res
def _ones_dispatcher(shape, dtype=None, order=None, *, like=None):
return(like,)
@set_array_function_like_doc
@set_module('numpy')
def ones(shape, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with ones.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: C
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and order.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty : Return a new uninitialized array.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.ones(5)
array([1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[1.],
[1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[1., 1.],
[1., 1.]])
"""
if like is not None:
return _ones_with_like(shape, dtype=dtype, order=order, like=like)
a = empty(shape, dtype, order)
multiarray.copyto(a, 1, casting='unsafe')
return a
_ones_with_like = array_function_dispatch(
_ones_dispatcher
)(ones)
def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_ones_like_dispatcher)
def ones_like(a, dtype=None, order='K', subok=True, shape=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of ones with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.ones_like(x)
array([[1, 1, 1],
[1, 1, 1]])
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.])
>>> np.ones_like(y)
array([1., 1., 1.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, 1, casting='unsafe')
return res
def _full_dispatcher(shape, fill_value, dtype=None, order=None, *, like=None):
return(like,)
@set_array_function_like_doc
@set_module('numpy')
def full(shape, fill_value, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or array_like
Fill value.
dtype : data-type, optional
The desired data-type for the array The default, None, means
``np.array(fill_value).dtype``.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
See Also
--------
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), np.inf)
array([[inf, inf],
[inf, inf]])
>>> np.full((2, 2), 10)
array([[10, 10],
[10, 10]])
>>> np.full((2, 2), [1, 2])
array([[1, 2],
[1, 2]])
"""
if like is not None:
return _full_with_like(shape, fill_value, dtype=dtype, order=order, like=like)
if dtype is None:
fill_value = asarray(fill_value)
dtype = fill_value.dtype
a = empty(shape, dtype, order)
multiarray.copyto(a, fill_value, casting='unsafe')
return a
_full_with_like = array_function_dispatch(
_full_dispatcher
)(full)
def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_full_like_dispatcher)
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : array_like
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1])
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0])
>>> np.full_like(x, 0.1, dtype=np.double)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
>>> np.full_like(x, np.nan, dtype=np.double)
array([nan, nan, nan, nan, nan, nan])
>>> y = np.arange(6, dtype=np.double)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
>>> y = np.zeros([2, 2, 3], dtype=int)
>>> np.full_like(y, [0, 0, 255])
array([[[ 0, 0, 255],
[ 0, 0, 255]],
[[ 0, 0, 255],
[ 0, 0, 255]]])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, fill_value, casting='unsafe')
return res
def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None):
return (a,)
@array_function_dispatch(_count_nonzero_dispatcher)
def count_nonzero(a, axis=None, *, keepdims=False):
"""
Counts the number of non-zero values in the array ``a``.
The word "non-zero" is in reference to the Python 2.x
built-in method ``__nonzero__()`` (renamed ``__bool__()``
in Python 3.x) of Python objects that tests an object's
"truthfulness". For example, any number is considered
truthful if it is nonzero, whereas any string is considered
truthful if it is not the empty string. Thus, this function
(recursively) counts how many elements in ``a`` (and in
sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()``
method evaluated to ``True``.
Parameters
----------
a : array_like
The array for which to count non-zeros.
axis : int or tuple, optional
Axis or tuple of axes along which to count non-zeros.
Default is None, meaning that non-zeros will be counted
along a flattened version of ``a``.
.. versionadded:: 1.12.0
keepdims : bool, optional
If this is set to True, the axes that are counted are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
.. versionadded:: 1.19.0
Returns
-------
count : int or array of int
Number of non-zero values in the array along a given axis.
Otherwise, the total number of non-zero values in the array
is returned.
See Also
--------
nonzero : Return the coordinates of all the non-zero values.
Examples
--------
>>> np.count_nonzero(np.eye(4))
4
>>> a = np.array([[0, 1, 7, 0],
... [3, 0, 2, 19]])
>>> np.count_nonzero(a)
5
>>> np.count_nonzero(a, axis=0)
array([1, 1, 2, 1])
>>> np.count_nonzero(a, axis=1)
array([2, 3])
>>> np.count_nonzero(a, axis=1, keepdims=True)
array([[2],
[3]])
"""
if axis is None and not keepdims:
return multiarray.count_nonzero(a)
a = asanyarray(a)
# TODO: this works around .astype(bool) not working properly (gh-9847)
if np.issubdtype(a.dtype, np.character):
a_bool = a != a.dtype.type()
else:
a_bool = a.astype(np.bool_, copy=False)
return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims)
@set_module('numpy')
def isfortran(a):
"""
Check if the array is Fortran contiguous but *not* C contiguous.
This function is obsolete and, because of changes due to relaxed stride
checking, its return value for the same array may differ for versions
of NumPy >= 1.10.0 and previous versions. If you only want to check if an
array is Fortran contiguous use ``a.flags.f_contiguous`` instead.
Parameters
----------
a : ndarray
Input array.
Returns
-------
isfortran : bool
Returns True if the array is Fortran contiguous but *not* C contiguous.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
>>> np.isfortran(np.array([1, 2], order='F'))
False
"""
return a.flags.fnc
def _argwhere_dispatcher(a):
return (a,)
@array_function_dispatch(_argwhere_dispatcher)
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : (N, a.ndim) ndarray
Indices of elements that are non-zero. Indices are grouped by element.
This array will have shape ``(N, a.ndim)`` where ``N`` is the number of
non-zero items.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``,
but produces a result of the correct shape for a 0D array.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``nonzero(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
# nonzero does not behave well on 0d, so promote to 1d
if np.ndim(a) == 0:
a = shape_base.atleast_1d(a)
# then remove the added dimension
return argwhere(a)[:,:0]
return transpose(nonzero(a))
def _flatnonzero_dispatcher(a):
return (a,)
@array_function_dispatch(_flatnonzero_dispatcher)
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to ``np.nonzero(np.ravel(a))[0]``.
Parameters
----------
a : array_like
Input data.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of ``a.ravel()``
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return np.nonzero(np.ravel(a))[0]
def _correlate_dispatcher(a, v, mode=None):
return (a, v)
@array_function_dispatch(_correlate_dispatcher)
def correlate(a, v, mode='valid'):
r"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts:
.. math:: c_k = \sum_n a_{n+k} \cdot \overline{v_n}
with a and v sequences being zero-padded where necessary and
:math:`\overline x` denoting complex conjugation.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
old_behavior : bool
`old_behavior` was removed in NumPy 1.10. If you need the old
behavior, use `multiarray.correlate`.
Returns
-------
out : ndarray
Discrete cross-correlation of `a` and `v`.
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
multiarray.correlate : Old, no conjugate, version of correlate.
scipy.signal.correlate : uses FFT which has superior performance on large arrays.
Notes
-----
The definition of correlation above is not unique and sometimes correlation
may be defined differently. Another common definition is:
.. math:: c'_k = \sum_n a_{n} \cdot \overline{v_{n+k}}
which is related to :math:`c_k` by :math:`c'_k = c_{-k}`.
`numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does
not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might
be preferable.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([0.5, 2. , 3.5, 3. , 0. ])
Using complex sequences:
>>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')
array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ])
Note that you get the time reversed, complex conjugated result
(:math:`\overline{c_{-k}}`) when the two input sequences a and v change
places:
>>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')
array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j])
"""
return multiarray.correlate2(a, v, mode)
def _convolve_dispatcher(a, v, mode=None):
return (a, v)
@array_function_dispatch(_convolve_dispatcher)
def convolve(a, v, mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
If `v` is longer than `a`, the arrays are swapped before computation.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode 'same' returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode 'valid' returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
polymul : Polynomial multiplication. Same output as convolve, but also
accepts poly1d objects as input.
Notes
-----
The discrete convolution operation is defined as
.. math:: (a * v)_n = \\sum_{m = -\\infty}^{\\infty} a_m v_{n - m}
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution",
https://en.wikipedia.org/wiki/Convolution
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([2.5])
"""
a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0:
raise ValueError('a cannot be empty')
if len(v) == 0:
raise ValueError('v cannot be empty')
return multiarray.correlate(a, v[::-1], mode)
def _outer_dispatcher(a, b, out=None):
return (a, b, out)
@array_function_dispatch(_outer_dispatcher)
def outer(a, b, out=None):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) array_like
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) array_like
Second input vector. Input is flattened if
not already 1-dimensional.
out : (M, N) ndarray, optional
A location where the result is stored
.. versionadded:: 1.9.0
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to dimensions other than 1D and other
operations. ``np.multiply.outer(a.ravel(), b.ravel())``
is the equivalent.
tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))``
is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([['a', 'aa', 'aaa'],
['b', 'bb', 'bbb'],
['c', 'cc', 'ccc']], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out)
def _tensordot_dispatcher(a, b, axes=None):
return (a, b)
@array_function_dispatch(_tensordot_dispatcher)
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes.
Given two tensors, `a` and `b`, and an array_like object containing
two array_like objects, ``(a_axes, b_axes)``, sum the products of
`a`'s and `b`'s elements (components) over the axes specified by
``a_axes`` and ``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions
of `a` and the first ``N`` dimensions of `b` are summed over.
Parameters
----------
a, b : array_like
Tensors to "dot".
axes : int or (2,) array_like
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) array_like
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements array_like must be of the same length.
Returns
-------
output : ndarray
The tensor dot product of the input.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
The shape of the result consists of the non-contracted axes of the
first tensor, followed by the non-contracted axes of the second.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]])
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([['a', 'b'],
['c', 'd']], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2 for double-contraction
array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object)
>>> np.tensordot(a, A, 1)
array([[['acc', 'bdd'],
['aaacccc', 'bbbdddd']],
[['aaaaacccccc', 'bbbbbdddddd'],
['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object)
>>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)
array([[[[['a', 'b'],
['c', 'd']],
...
>>> np.tensordot(a, A, (0, 1))
array([[['abbbbb', 'cddddd'],
['aabbbbbb', 'ccdddddd']],
[['aaabbbbbbb', 'cccddddddd'],
['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[['abb', 'cdd'],
['aaabbbb', 'cccdddd']],
[['aaaaabbbbbb', 'cccccdddddd'],
['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object)
"""
try:
iter(axes)
except Exception:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = a.ndim
bs = b.shape
ndb = b.ndim
equal = True
if na != nb:
equal = False
else:
for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin])))
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def _roll_dispatcher(a, shift, axis=None):
return (a,)
@array_function_dispatch(_roll_dispatcher)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int or tuple of ints
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : int or tuple of ints, optional
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Notes
-----
.. versionadded:: 1.12.0
Supports rolling over multiple dimensions simultaneously.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> np.roll(x, -2)
array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
>>> x2 = np.reshape(x, (2, 5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, -1)
array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 0]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, -1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
>>> np.roll(x2, -1, axis=1)
array([[1, 2, 3, 4, 0],
[6, 7, 8, 9, 5]])
>>> np.roll(x2, (1, 1), axis=(1, 0))
array([[9, 5, 6, 7, 8],
[4, 0, 1, 2, 3]])
>>> np.roll(x2, (2, 1), axis=(1, 0))
array([[8, 9, 5, 6, 7],
[3, 4, 0, 1, 2]])
"""
a = asanyarray(a)
if axis is None:
return roll(a.ravel(), shift, 0).reshape(a.shape)
else:
axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
broadcasted = broadcast(shift, axis)
if broadcasted.ndim > 1:
raise ValueError(
"'shift' and 'axis' should be scalars or 1D sequences")
shifts = {ax: 0 for ax in range(a.ndim)}
for sh, ax in broadcasted:
shifts[ax] += sh
rolls = [((slice(None), slice(None)),)] * a.ndim
for ax, offset in shifts.items():
offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.
if offset:
# (original, result), (original, result)
rolls[ax] = ((slice(None, -offset), slice(offset, None)),
(slice(-offset, None), slice(None, offset)))
result = empty_like(a)
for indices in itertools.product(*rolls):
arr_index, res_index = zip(*indices)
result[res_index] = a[arr_index]
return result
def _rollaxis_dispatcher(a, axis, start=None):
return (a,)
@array_function_dispatch(_rollaxis_dispatcher)
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
This function continues to be supported for backward compatibility, but you
should prefer `moveaxis`. The `moveaxis` function was added in NumPy
1.11.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to be rolled. The positions of the other axes do not
change relative to one another.
start : int, optional
When ``start <= axis``, the axis is rolled back until it lies in
this position. When ``start > axis``, the axis is rolled until it
lies before this position. The default, 0, results in a "complete"
roll. The following table describes how negative values of ``start``
are interpreted:
.. table::
:align: left
+-------------------+----------------------+
| ``start`` | Normalized ``start`` |
+===================+======================+
| ``-(arr.ndim+1)`` | raise ``AxisError`` |
+-------------------+----------------------+
| ``-arr.ndim`` | 0 |
+-------------------+----------------------+
| |vdots| | |vdots| |
+-------------------+----------------------+
| ``-1`` | ``arr.ndim-1`` |
+-------------------+----------------------+
| ``0`` | ``0`` |
+-------------------+----------------------+
| |vdots| | |vdots| |
+-------------------+----------------------+
| ``arr.ndim`` | ``arr.ndim`` |
+-------------------+----------------------+
| ``arr.ndim + 1`` | raise ``AxisError`` |
+-------------------+----------------------+
.. |vdots| unicode:: U+22EE .. Vertical Ellipsis
Returns
-------
res : ndarray
For NumPy >= 1.10.0 a view of `a` is always returned. For earlier
NumPy versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
See Also
--------
moveaxis : Move array axes to new positions.
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
axis = normalize_axis_index(axis, n)
if start < 0:
start += n
msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
if not (0 <= start < n + 1):
raise AxisError(msg % ('start', -n, 'start', n + 1, start))
if axis < start:
# it's been removed
start -= 1
if axis == start:
return a[...]
axes = list(range(0, n))
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
"""
Normalizes an axis argument into a tuple of non-negative integer axes.
This handles shorthands such as ``1`` and converts them to ``(1,)``,
as well as performing the handling of negative indices covered by
`normalize_axis_index`.
By default, this forbids axes from being specified multiple times.
Used internally by multi-axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int, iterable of int
The un-normalized index or indices of the axis.
ndim : int
The number of dimensions of the array that `axis` should be normalized
against.
argname : str, optional
A prefix to put before the error message, typically the name of the
argument.
allow_duplicate : bool, optional
If False, the default, disallow an axis from being specified twice.
Returns
-------
normalized_axes : tuple of int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If any axis provided is out of range
ValueError
If an axis is repeated
See also
--------
normalize_axis_index : normalizing a single scalar axis
"""
# Optimization to speed-up the most common cases.
if type(axis) not in (tuple, list):
try:
axis = [operator.index(axis)]
except TypeError:
pass
# Going via an iterator directly is slower than via list comprehension.
axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
if not allow_duplicate and len(set(axis)) != len(axis):
if argname:
raise ValueError('repeated axis in `{}` argument'.format(argname))
else:
raise ValueError('repeated axis')
return axis
def _moveaxis_dispatcher(a, source, destination):
return (a,)
@array_function_dispatch(_moveaxis_dispatcher)
def moveaxis(a, source, destination):
"""
Move axes of an array to new positions.
Other axes remain in their original order.
.. versionadded:: 1.11.0
Parameters
----------
a : np.ndarray
The array whose axes should be reordered.
source : int or sequence of int
Original positions of the axes to move. These must be unique.
destination : int or sequence of int
Destination positions for each of the original axes. These must also be
unique.
Returns
-------
result : np.ndarray
Array with moved axes. This array is a view of the input array.
See Also
--------
transpose : Permute the dimensions of an array.
swapaxes : Interchange two axes of an array.
Examples
--------
>>> x = np.zeros((3, 4, 5))
>>> np.moveaxis(x, 0, -1).shape
(4, 5, 3)
>>> np.moveaxis(x, -1, 0).shape
(5, 3, 4)
These all achieve the same result:
>>> np.transpose(x).shape
(5, 4, 3)
>>> np.swapaxes(x, 0, -1).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1], [-1, -2]).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
(5, 4, 3)
"""
try:
# allow duck-array types if they define transpose
transpose = a.transpose
except AttributeError:
a = asarray(a)
transpose = a.transpose
source = normalize_axis_tuple(source, a.ndim, 'source')
destination = normalize_axis_tuple(destination, a.ndim, 'destination')
if len(source) != len(destination):
raise ValueError('`source` and `destination` arguments must have '
'the same number of elements')
order = [n for n in range(a.ndim) if n not in source]
for dest, src in sorted(zip(destination, source)):
order.insert(dest, src)
result = transpose(order)
return result
def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None):
return (a, b)
@array_function_dispatch(_cross_dispatcher)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). Ignored if
both input vectors have dimension 2, as the return is scalar.
By default, the last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Notes
-----
.. versionadded:: 1.9.0
Supports full broadcasting of the inputs.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
array(-3)
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the *right-hand rule*.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa, axisb, axisc = (axis,) * 3
a = asarray(a)
b = asarray(b)
# Check axisa and axisb are within bounds
axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')
axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
# Move working axis to the end of the shape
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
msg = ("incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)")
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError(msg)
# Create the output array
shape = broadcast(a[..., 0], b[..., 0]).shape
if a.shape[-1] == 3 or b.shape[-1] == 3:
shape += (3,)
# Check axisc is within bounds
axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc')
dtype = promote_types(a.dtype, b.dtype)
cp = empty(shape, dtype)
# create local aliases for readability
a0 = a[..., 0]
a1 = a[..., 1]
if a.shape[-1] == 3:
a2 = a[..., 2]
b0 = b[..., 0]
b1 = b[..., 1]
if b.shape[-1] == 3:
b2 = b[..., 2]
if cp.ndim != 0 and cp.shape[-1] == 3:
cp0 = cp[..., 0]
cp1 = cp[..., 1]
cp2 = cp[..., 2]
if a.shape[-1] == 2:
if b.shape[-1] == 2:
# a0 * b1 - a1 * b0
multiply(a0, b1, out=cp)
cp -= a1 * b0
return cp
else:
assert b.shape[-1] == 3
# cp0 = a1 * b2 - 0 (a2 = 0)
# cp1 = 0 - a0 * b2 (a2 = 0)
# cp2 = a0 * b1 - a1 * b0
multiply(a1, b2, out=cp0)
multiply(a0, b2, out=cp1)
negative(cp1, out=cp1)
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
else:
assert a.shape[-1] == 3
if b.shape[-1] == 3:
# cp0 = a1 * b2 - a2 * b1
# cp1 = a2 * b0 - a0 * b2
# cp2 = a0 * b1 - a1 * b0
multiply(a1, b2, out=cp0)
tmp = array(a2 * b1)
cp0 -= tmp
multiply(a2, b0, out=cp1)
multiply(a0, b2, out=tmp)
cp1 -= tmp
multiply(a0, b1, out=cp2)
multiply(a1, b0, out=tmp)
cp2 -= tmp
else:
assert b.shape[-1] == 2
# cp0 = 0 - a2 * b1 (b2 = 0)
# cp1 = a2 * b0 - 0 (b2 = 0)
# cp2 = a0 * b1 - a1 * b0
multiply(a2, b1, out=cp0)
negative(cp0, out=cp0)
multiply(a2, b0, out=cp1)
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
return moveaxis(cp, -1, axisc)
little_endian = (sys.byteorder == 'little')
@set_module('numpy')
def indices(dimensions, dtype=int, sparse=False):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0, 1, ...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
sparse : boolean, optional
Return a sparse representation of the grid instead of a dense
representation. Default is False.
.. versionadded:: 1.17
Returns
-------
grid : one ndarray or tuple of ndarrays
If sparse is False:
Returns one array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
If sparse is True:
Returns a tuple of arrays, with
``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
dimensions[i] in the ith place
See Also
--------
mgrid, ogrid, meshgrid
Notes
-----
The output shape in the dense case is obtained by prepending the number
of dimensions in front of the tuple of dimensions, i.e. if `dimensions`
is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N, r0, ..., rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k, i0, i1, ..., iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
If sparse is set to true, the grid will be returned in a sparse
representation.
>>> i, j = np.indices((2, 3), sparse=True)
>>> i.shape
(2, 1)
>>> j.shape
(1, 3)
>>> i # row indices
array([[0],
[1]])
>>> j # column indices
array([[0, 1, 2]])
"""
dimensions = tuple(dimensions)
N = len(dimensions)
shape = (1,)*N
if sparse:
res = tuple()
else:
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
idx = arange(dim, dtype=dtype).reshape(
shape[:i] + (dim,) + shape[i+1:]
)
if sparse:
res = res + (idx,)
else:
res[i] = idx
return res
def _fromfunction_dispatcher(function, shape, *, dtype=None, like=None, **kwargs):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, where N is the rank of
`shape`. Each parameter represents the coordinates of the array
varying along a specific axis. For example, if `shape`
were ``(2, 2)``, then the parameters would be
``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
fromfunction : any
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
`fromfunction` would not match the `shape` parameter.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float)
array([[0., 0.],
[1., 1.]])
>>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float)
array([[0., 1.],
[0., 1.]])
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]])
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
if like is not None:
return _fromfunction_with_like(function, shape, dtype=dtype, like=like, **kwargs)
args = indices(shape, dtype=dtype)
return function(*args, **kwargs)
_fromfunction_with_like = array_function_dispatch(
_fromfunction_dispatcher
)(fromfunction)
def _frombuffer(buf, dtype, shape, order):
return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
@set_module('numpy')
def isscalar(element):
"""
Returns True if the type of `element` is a scalar type.
Parameters
----------
element : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `element` is a scalar type, False if it is not.
See Also
--------
ndim : Get the number of dimensions of an array
Notes
-----
If you need a stricter way to identify a *numerical* scalar, use
``isinstance(x, numbers.Number)``, as that returns ``False`` for most
non-numerical elements such as strings.
In most cases ``np.ndim(x) == 0`` should be used instead of this function,
as that will also return true for 0d arrays. This is how numpy overloads
functions in the style of the ``dx`` arguments to `gradient` and the ``bins``
argument to `histogram`. Some key differences:
+--------------------------------------+---------------+-------------------+
| x |``isscalar(x)``|``np.ndim(x) == 0``|
+======================================+===============+===================+
| PEP 3141 numeric objects (including | ``True`` | ``True`` |
| builtins) | | |
+--------------------------------------+---------------+-------------------+
| builtin string and buffer objects | ``True`` | ``True`` |
+--------------------------------------+---------------+-------------------+
| other builtin objects, like | ``False`` | ``True`` |
| `pathlib.Path`, `Exception`, | | |
| the result of `re.compile` | | |
+--------------------------------------+---------------+-------------------+
| third-party objects like | ``False`` | ``True`` |
| `matplotlib.figure.Figure` | | |
+--------------------------------------+---------------+-------------------+
| zero-dimensional numpy arrays | ``False`` | ``True`` |
+--------------------------------------+---------------+-------------------+
| other numpy arrays | ``False`` | ``False`` |
+--------------------------------------+---------------+-------------------+
| `list`, `tuple`, and other sequence | ``False`` | ``False`` |
| objects | | |
+--------------------------------------+---------------+-------------------+
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar(np.array(3.1))
False
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
>>> np.isscalar('numpy')
True
NumPy supports PEP 3141 numbers:
>>> from fractions import Fraction
>>> np.isscalar(Fraction(5, 17))
True
>>> from numbers import Number
>>> np.isscalar(Number())
True
"""
return (isinstance(element, generic)
or type(element) in ScalarType
or isinstance(element, numbers.Number))
@set_module('numpy')
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, or the length
of the two's complement if `num` is negative, provided that `width` is
at least a sufficient number of bits for `num` to be represented in the
designated form.
If the `width` value is insufficient, it will be ignored, and `num` will
be returned in binary (`num` > 0) or two's complement (`num` < 0) form
with its width equal to the minimum number of bits needed to represent
the number in the designated form. This behavior is deprecated and will
later raise an error.
.. deprecated:: 1.12.0
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
bin: Python's built-in binary representation generator of an integer.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=3)
'101'
>>> np.binary_repr(-3, width=5)
'11101'
"""
def warn_if_insufficient(width, binwidth):
if width is not None and width < binwidth:
warnings.warn(
"Insufficient bit width provided. This behavior "
"will raise an error in the future.", DeprecationWarning,
stacklevel=3)
# Ensure that num is a Python integer to avoid overflow or unwanted
# casts to floating point.
num = operator.index(num)
if num == 0:
return '0' * (width or 1)
elif num > 0:
binary = bin(num)[2:]
binwidth = len(binary)
outwidth = (binwidth if width is None
else max(binwidth, width))
warn_if_insufficient(width, binwidth)
return binary.zfill(outwidth)
else:
if width is None:
return '-' + bin(-num)[2:]
else:
poswidth = len(bin(-num)[2:])
# See gh-8679: remove extra digit
# for numbers at boundaries.
if 2**(poswidth - 1) == -num:
poswidth -= 1
twocomp = 2**(poswidth + 1) + num
binary = bin(twocomp)[2:]
binwidth = len(binary)
outwidth = max(binwidth, width)
warn_if_insufficient(width, binwidth)
return '1' * (outwidth - binwidth) + binary
@set_module('numpy')
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Positive and negative values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
elif base < 2:
raise ValueError("Bases less than 2 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0], val) for name in dt.names]
return tuple(res)
def _identity_dispatcher(n, dtype=None, *, like=None):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def identity(n, dtype=None, *, like=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
if like is not None:
return _identity_with_like(n, dtype=dtype, like=like)
from numpy import eye
return eye(n, dtype=dtype, like=like)
_identity_with_like = array_function_dispatch(
_identity_dispatcher
)(identity)
def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
return (a, b)
@array_function_dispatch(_allclose_dispatcher)
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
NaNs are treated as equal if they are in the same place and if
``equal_nan=True``. Infs are treated as equal if they are in the same
place and of the same sign in both arrays.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
.. versionadded:: 1.10.0
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise.
See Also
--------
isclose, all, any, equal
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
``allclose(a, b)`` might be different from ``allclose(b, a)`` in
some rare cases.
The comparison of `a` and `b` uses standard broadcasting, which
means that `a` and `b` need not have the same shape in order for
``allclose(a, b)`` to evaluate to True. The same is true for
`equal` but not `array_equal`.
`allclose` is not defined for non-numeric data types.
`bool` is considered a numeric data-type for this purpose.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
True
"""
res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
return bool(res)
def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
return (a, b)
@array_function_dispatch(_isclose_dispatcher)
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
.. warning:: The default `atol` is not appropriate for comparing numbers
that are much smaller than one (see Notes).
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
math.isclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Unlike the built-in `math.isclose`, the above equation is not symmetric
in `a` and `b` -- it assumes `b` is the reference value -- so that
`isclose(a, b)` might be different from `isclose(b, a)`. Furthermore,
the default value of atol is not zero, and is used to determine what
small values should be considered close to zero. The default value is
appropriate for expected values of order unity: if the expected values
are significantly smaller than one, it can result in false positives.
`atol` should be carefully selected for the use case at hand. A zero value
for `atol` will result in `False` if either `a` or `b` is zero.
`isclose` is not defined for non-numeric data types.
`bool` is considered a numeric data-type for this purpose.
Examples
--------
>>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
array([ True, False])
>>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
array([ True, True])
>>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True])
>>> np.isclose([1.0, np.nan], [1.0, np.nan])
array([ True, False])
>>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([ True, True])
>>> np.isclose([1e-8, 1e-7], [0.0, 0.0])
array([ True, False])
>>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0)
array([False, False])
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.0])
array([ True, True])
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0)
array([False, True])
"""
def within_tol(x, y, atol, rtol):
with errstate(invalid='ignore'):
return less_equal(abs(x-y), atol + rtol * abs(y))
x = asanyarray(a)
y = asanyarray(b)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
# NOTE: We explicitly allow timedelta, which used to work. This could
# possibly be deprecated. See also gh-18286.
# timedelta works if `atol` is an integer or also a timedelta.
# Although, the default tolerances are unlikely to be useful
if y.dtype.kind != "m":
dt = multiarray.result_type(y, 1.)
y = asanyarray(y, dtype=dt)
xfin = isfinite(x)
yfin = isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * ones_like(cond)
y = y * ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = isnan(x) & isnan(y)
# Needed to treat masked arrays correctly. = True would not work.
cond[both_nan] = both_nan[both_nan]
return cond[()] # Flatten 0d arrays to scalars
def _array_equal_dispatcher(a1, a2, equal_nan=None):
return (a1, a2)
@array_function_dispatch(_array_equal_dispatcher)
def array_equal(a1, a2, equal_nan=False):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
equal_nan : bool
Whether to compare NaN's as equal. If the dtype of a1 and a2 is
complex, values will be considered equal if either the real or the
imaginary component of a given value is ``nan``.
.. versionadded:: 1.19.0
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
>>> a = np.array([1, np.nan])
>>> np.array_equal(a, a)
False
>>> np.array_equal(a, a, equal_nan=True)
True
When ``equal_nan`` is True, complex values with nan components are
considered equal if either the real *or* the imaginary components are nan.
>>> a = np.array([1 + 1j])
>>> b = a.copy()
>>> a.real = np.nan
>>> b.imag = np.nan
>>> np.array_equal(a, b, equal_nan=True)
True
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
if a1.shape != a2.shape:
return False
if not equal_nan:
return bool(asarray(a1 == a2).all())
# Handling NaN values if equal_nan is True
a1nan, a2nan = isnan(a1), isnan(a2)
# NaN's occur at different locations
if not (a1nan == a2nan).all():
return False
# Shapes of a1, a2 and masks are guaranteed to be consistent by this point
return bool(asarray(a1[~a1nan] == a2[~a1nan]).all())
def _array_equiv_dispatcher(a1, a2):
return (a1, a2)
@array_function_dispatch(_array_equiv_dispatcher)
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
try:
multiarray.broadcast(a1, a2)
except Exception:
return False
return bool(asarray(a1 == a2).all())
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
def extend_all(module):
existing = set(__all__)
mall = getattr(module, '__all__')
for a in mall:
if a not in existing:
__all__.append(a)
from .umath import *
from .numerictypes import *
from . import fromnumeric
from .fromnumeric import *
from . import arrayprint
from .arrayprint import *
from . import _asarray
from ._asarray import *
from . import _ufunc_config
from ._ufunc_config import *
extend_all(fromnumeric)
extend_all(umath)
extend_all(numerictypes)
extend_all(arrayprint)
extend_all(_asarray)
extend_all(_ufunc_config)
| 77,444 | Python | 29.311155 | 92 | 0.55617 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/getlimits.py | """Machine limits for Float32 and Float64 and (long double) if available...
"""
__all__ = ['finfo', 'iinfo']
import warnings
from ._machar import MachAr
from .overrides import set_module
from . import numeric
from . import numerictypes as ntypes
from .numeric import array, inf, NaN
from .umath import log10, exp2, nextafter, isnan
def _fr0(a):
"""fix rank-0 --> rank-1"""
if a.ndim == 0:
a = a.copy()
a.shape = (1,)
return a
def _fr1(a):
"""fix rank > 0 --> rank-0"""
if a.size == 1:
a = a.copy()
a.shape = ()
return a
class MachArLike:
""" Object to simulate MachAr instance """
def __init__(self, ftype, *, eps, epsneg, huge, tiny,
ibeta, smallest_subnormal=None, **kwargs):
self.params = _MACHAR_PARAMS[ftype]
self.ftype = ftype
self.title = self.params['title']
# Parameter types same as for discovered MachAr object.
if not smallest_subnormal:
self._smallest_subnormal = nextafter(
self.ftype(0), self.ftype(1), dtype=self.ftype)
else:
self._smallest_subnormal = smallest_subnormal
self.epsilon = self.eps = self._float_to_float(eps)
self.epsneg = self._float_to_float(epsneg)
self.xmax = self.huge = self._float_to_float(huge)
self.xmin = self._float_to_float(tiny)
self.smallest_normal = self.tiny = self._float_to_float(tiny)
self.ibeta = self.params['itype'](ibeta)
self.__dict__.update(kwargs)
self.precision = int(-log10(self.eps))
self.resolution = self._float_to_float(
self._float_conv(10) ** (-self.precision))
self._str_eps = self._float_to_str(self.eps)
self._str_epsneg = self._float_to_str(self.epsneg)
self._str_xmin = self._float_to_str(self.xmin)
self._str_xmax = self._float_to_str(self.xmax)
self._str_resolution = self._float_to_str(self.resolution)
self._str_smallest_normal = self._float_to_str(self.xmin)
@property
def smallest_subnormal(self):
"""Return the value for the smallest subnormal.
Returns
-------
smallest_subnormal : float
value for the smallest subnormal.
Warns
-----
UserWarning
If the calculated value for the smallest subnormal is zero.
"""
# Check that the calculated value is not zero, in case it raises a
# warning.
value = self._smallest_subnormal
if self.ftype(0) == value:
warnings.warn(
'The value of the smallest subnormal for {} type '
'is zero.'.format(self.ftype), UserWarning, stacklevel=2)
return self._float_to_float(value)
@property
def _str_smallest_subnormal(self):
"""Return the string representation of the smallest subnormal."""
return self._float_to_str(self.smallest_subnormal)
def _float_to_float(self, value):
"""Converts float to float.
Parameters
----------
value : float
value to be converted.
"""
return _fr1(self._float_conv(value))
def _float_conv(self, value):
"""Converts float to conv.
Parameters
----------
value : float
value to be converted.
"""
return array([value], self.ftype)
def _float_to_str(self, value):
"""Converts float to str.
Parameters
----------
value : float
value to be converted.
"""
return self.params['fmt'] % array(_fr0(value)[0], self.ftype)
_convert_to_float = {
ntypes.csingle: ntypes.single,
ntypes.complex_: ntypes.float_,
ntypes.clongfloat: ntypes.longfloat
}
# Parameters for creating MachAr / MachAr-like objects
_title_fmt = 'numpy {} precision floating point number'
_MACHAR_PARAMS = {
ntypes.double: dict(
itype = ntypes.int64,
fmt = '%24.16e',
title = _title_fmt.format('double')),
ntypes.single: dict(
itype = ntypes.int32,
fmt = '%15.7e',
title = _title_fmt.format('single')),
ntypes.longdouble: dict(
itype = ntypes.longlong,
fmt = '%s',
title = _title_fmt.format('long double')),
ntypes.half: dict(
itype = ntypes.int16,
fmt = '%12.5e',
title = _title_fmt.format('half'))}
# Key to identify the floating point type. Key is result of
# ftype('-0.1').newbyteorder('<').tobytes()
# See:
# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
_KNOWN_TYPES = {}
def _register_type(machar, bytepat):
_KNOWN_TYPES[bytepat] = machar
_float_ma = {}
def _register_known_types():
# Known parameters for float16
# See docstring of MachAr class for description of parameters.
f16 = ntypes.float16
float16_ma = MachArLike(f16,
machep=-10,
negep=-11,
minexp=-14,
maxexp=16,
it=10,
iexp=5,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(f16(-10)),
epsneg=exp2(f16(-11)),
huge=f16(65504),
tiny=f16(2 ** -14))
_register_type(float16_ma, b'f\xae')
_float_ma[16] = float16_ma
# Known parameters for float32
f32 = ntypes.float32
float32_ma = MachArLike(f32,
machep=-23,
negep=-24,
minexp=-126,
maxexp=128,
it=23,
iexp=8,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(f32(-23)),
epsneg=exp2(f32(-24)),
huge=f32((1 - 2 ** -24) * 2**128),
tiny=exp2(f32(-126)))
_register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
_float_ma[32] = float32_ma
# Known parameters for float64
f64 = ntypes.float64
epsneg_f64 = 2.0 ** -53.0
tiny_f64 = 2.0 ** -1022.0
float64_ma = MachArLike(f64,
machep=-52,
negep=-53,
minexp=-1022,
maxexp=1024,
it=52,
iexp=11,
ibeta=2,
irnd=5,
ngrd=0,
eps=2.0 ** -52.0,
epsneg=epsneg_f64,
huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
tiny=tiny_f64)
_register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
_float_ma[64] = float64_ma
# Known parameters for IEEE 754 128-bit binary float
ld = ntypes.longdouble
epsneg_f128 = exp2(ld(-113))
tiny_f128 = exp2(ld(-16382))
# Ignore runtime error when this is not f128
with numeric.errstate(all='ignore'):
huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
float128_ma = MachArLike(ld,
machep=-112,
negep=-113,
minexp=-16382,
maxexp=16384,
it=112,
iexp=15,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(ld(-112)),
epsneg=epsneg_f128,
huge=huge_f128,
tiny=tiny_f128)
# IEEE 754 128-bit binary float
_register_type(float128_ma,
b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
_register_type(float128_ma,
b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
_float_ma[128] = float128_ma
# Known parameters for float80 (Intel 80-bit extended precision)
epsneg_f80 = exp2(ld(-64))
tiny_f80 = exp2(ld(-16382))
# Ignore runtime error when this is not f80
with numeric.errstate(all='ignore'):
huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
float80_ma = MachArLike(ld,
machep=-63,
negep=-64,
minexp=-16382,
maxexp=16384,
it=63,
iexp=15,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(ld(-63)),
epsneg=epsneg_f80,
huge=huge_f80,
tiny=tiny_f80)
# float80, first 10 bytes containing actual storage
_register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
_float_ma[80] = float80_ma
# Guessed / known parameters for double double; see:
# https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
# These numbers have the same exponent range as float64, but extended number of
# digits in the significand.
huge_dd = nextafter(ld(inf), ld(0), dtype=ld)
# As the smallest_normal in double double is so hard to calculate we set
# it to NaN.
smallest_normal_dd = NaN
# Leave the same value for the smallest subnormal as double
smallest_subnormal_dd = ld(nextafter(0., 1.))
float_dd_ma = MachArLike(ld,
machep=-105,
negep=-106,
minexp=-1022,
maxexp=1024,
it=105,
iexp=11,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(ld(-105)),
epsneg=exp2(ld(-106)),
huge=huge_dd,
tiny=smallest_normal_dd,
smallest_subnormal=smallest_subnormal_dd)
# double double; low, high order (e.g. PPC 64)
_register_type(float_dd_ma,
b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
# double double; high, low order (e.g. PPC 64 le)
_register_type(float_dd_ma,
b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
_float_ma['dd'] = float_dd_ma
def _get_machar(ftype):
""" Get MachAr instance or MachAr-like instance
Get parameters for floating point type, by first trying signatures of
various known floating point types, then, if none match, attempting to
identify parameters by analysis.
Parameters
----------
ftype : class
Numpy floating point type class (e.g. ``np.float64``)
Returns
-------
ma_like : instance of :class:`MachAr` or :class:`MachArLike`
Object giving floating point parameters for `ftype`.
Warns
-----
UserWarning
If the binary signature of the float type is not in the dictionary of
known float types.
"""
params = _MACHAR_PARAMS.get(ftype)
if params is None:
raise ValueError(repr(ftype))
# Detect known / suspected types
key = ftype('-0.1').newbyteorder('<').tobytes()
ma_like = None
if ftype == ntypes.longdouble:
# Could be 80 bit == 10 byte extended precision, where last bytes can
# be random garbage.
# Comparing first 10 bytes to pattern first to avoid branching on the
# random garbage.
ma_like = _KNOWN_TYPES.get(key[:10])
if ma_like is None:
ma_like = _KNOWN_TYPES.get(key)
if ma_like is not None:
return ma_like
# Fall back to parameter discovery
warnings.warn(
f'Signature {key} for {ftype} does not match any known type: '
'falling back to type probe function.\n'
'This warnings indicates broken support for the dtype!',
UserWarning, stacklevel=2)
return _discovered_machar(ftype)
def _discovered_machar(ftype):
""" Create MachAr instance with found information on float types
"""
params = _MACHAR_PARAMS[ftype]
return MachAr(lambda v: array([v], ftype),
lambda v:_fr0(v.astype(params['itype']))[0],
lambda v:array(_fr0(v)[0], ftype),
lambda v: params['fmt'] % array(_fr0(v)[0], ftype),
params['title'])
@set_module('numpy')
class finfo:
"""
finfo(dtype)
Machine limits for floating point types.
Attributes
----------
bits : int
The number of bits occupied by the type.
eps : float
The difference between 1.0 and the next smallest representable float
larger than 1.0. For example, for 64-bit binary floats in the IEEE-754
standard, ``eps = 2**-52``, approximately 2.22e-16.
epsneg : float
The difference between 1.0 and the next smallest representable float
less than 1.0. For example, for 64-bit binary floats in the IEEE-754
standard, ``epsneg = 2**-53``, approximately 1.11e-16.
iexp : int
The number of bits in the exponent portion of the floating point
representation.
machar : MachAr
The object which calculated these parameters and holds more
detailed information.
.. deprecated:: 1.22
machep : int
The exponent that yields `eps`.
max : floating point number of the appropriate type
The largest representable number.
maxexp : int
The smallest positive power of the base (2) that causes overflow.
min : floating point number of the appropriate type
The smallest representable number, typically ``-max``.
minexp : int
The most negative power of the base (2) consistent with there
being no leading 0's in the mantissa.
negep : int
The exponent that yields `epsneg`.
nexp : int
The number of bits in the exponent including its sign and bias.
nmant : int
The number of bits in the mantissa.
precision : int
The approximate number of decimal digits to which this kind of
float is precise.
resolution : floating point number of the appropriate type
The approximate decimal resolution of this type, i.e.,
``10**-precision``.
tiny : float
An alias for `smallest_normal`, kept for backwards compatibility.
smallest_normal : float
The smallest positive floating point number with 1 as leading bit in
the mantissa following IEEE-754 (see Notes).
smallest_subnormal : float
The smallest positive floating point number with 0 as leading bit in
the mantissa following IEEE-754.
Parameters
----------
dtype : float, dtype, or instance
Kind of floating point data-type about which to get information.
See Also
--------
MachAr : The implementation of the tests that produce this information.
iinfo : The equivalent for integer data types.
spacing : The distance between a value and the nearest adjacent number
nextafter : The next floating point value after x1 towards x2
Notes
-----
For developers of NumPy: do not instantiate this at the module level.
The initial calculation of these parameters is expensive and negatively
impacts import times. These objects are cached, so calling ``finfo()``
repeatedly inside your functions is not a problem.
Note that ``smallest_normal`` is not actually the smallest positive
representable value in a NumPy floating point type. As in the IEEE-754
standard [1]_, NumPy floating point types make use of subnormal numbers to
fill the gap between 0 and ``smallest_normal``. However, subnormal numbers
may have significantly reduced precision [2]_.
References
----------
.. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008,
pp.1-70, 2008, http://www.doi.org/10.1109/IEEESTD.2008.4610935
.. [2] Wikipedia, "Denormal Numbers",
https://en.wikipedia.org/wiki/Denormal_number
"""
_finfo_cache = {}
def __new__(cls, dtype):
try:
dtype = numeric.dtype(dtype)
except TypeError:
# In case a float instance was given
dtype = numeric.dtype(type(dtype))
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
dtypes = [dtype]
newdtype = numeric.obj2sctype(dtype)
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
if not issubclass(dtype, numeric.inexact):
raise ValueError("data type %r not inexact" % (dtype))
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
if not issubclass(dtype, numeric.floating):
newdtype = _convert_to_float[dtype]
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
obj = object.__new__(cls)._init(dtype)
for dt in dtypes:
cls._finfo_cache[dt] = obj
return obj
def _init(self, dtype):
self.dtype = numeric.dtype(dtype)
machar = _get_machar(dtype)
for word in ['precision', 'iexp',
'maxexp', 'minexp', 'negep',
'machep']:
setattr(self, word, getattr(machar, word))
for word in ['resolution', 'epsneg', 'smallest_subnormal']:
setattr(self, word, getattr(machar, word).flat[0])
self.bits = self.dtype.itemsize * 8
self.max = machar.huge.flat[0]
self.min = -self.max
self.eps = machar.eps.flat[0]
self.nexp = machar.iexp
self.nmant = machar.it
self._machar = machar
self._str_tiny = machar._str_xmin.strip()
self._str_max = machar._str_xmax.strip()
self._str_epsneg = machar._str_epsneg.strip()
self._str_eps = machar._str_eps.strip()
self._str_resolution = machar._str_resolution.strip()
self._str_smallest_normal = machar._str_smallest_normal.strip()
self._str_smallest_subnormal = machar._str_smallest_subnormal.strip()
return self
def __str__(self):
fmt = (
'Machine parameters for %(dtype)s\n'
'---------------------------------------------------------------\n'
'precision = %(precision)3s resolution = %(_str_resolution)s\n'
'machep = %(machep)6s eps = %(_str_eps)s\n'
'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
'maxexp = %(maxexp)6s max = %(_str_max)s\n'
'nexp = %(nexp)6s min = -max\n'
'smallest_normal = %(_str_smallest_normal)s '
'smallest_subnormal = %(_str_smallest_subnormal)s\n'
'---------------------------------------------------------------\n'
)
return fmt % self.__dict__
def __repr__(self):
c = self.__class__.__name__
d = self.__dict__.copy()
d['klass'] = c
return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
" max=%(_str_max)s, dtype=%(dtype)s)") % d)
@property
def smallest_normal(self):
"""Return the value for the smallest normal.
Returns
-------
smallest_normal : float
Value for the smallest normal.
Warns
-----
UserWarning
If the calculated value for the smallest normal is requested for
double-double.
"""
# This check is necessary because the value for smallest_normal is
# platform dependent for longdouble types.
if isnan(self._machar.smallest_normal.flat[0]):
warnings.warn(
'The value of smallest normal is undefined for double double',
UserWarning, stacklevel=2)
return self._machar.smallest_normal.flat[0]
@property
def tiny(self):
"""Return the value for tiny, alias of smallest_normal.
Returns
-------
tiny : float
Value for the smallest normal, alias of smallest_normal.
Warns
-----
UserWarning
If the calculated value for the smallest normal is requested for
double-double.
"""
return self.smallest_normal
@property
def machar(self):
"""The object which calculated these parameters and holds more
detailed information.
.. deprecated:: 1.22
"""
# Deprecated 2021-10-27, NumPy 1.22
warnings.warn(
"`finfo.machar` is deprecated (NumPy 1.22)",
DeprecationWarning, stacklevel=2,
)
return self._machar
@set_module('numpy')
class iinfo:
"""
iinfo(type)
Machine limits for integer types.
Attributes
----------
bits : int
The number of bits occupied by the type.
min : int
The smallest integer expressible by the type.
max : int
The largest integer expressible by the type.
Parameters
----------
int_type : integer type, dtype, or instance
The kind of integer data type to get information about.
See Also
--------
finfo : The equivalent for floating point data types.
Examples
--------
With types:
>>> ii16 = np.iinfo(np.int16)
>>> ii16.min
-32768
>>> ii16.max
32767
>>> ii32 = np.iinfo(np.int32)
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
With instances:
>>> ii32 = np.iinfo(np.int32(10))
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
"""
_min_vals = {}
_max_vals = {}
def __init__(self, int_type):
try:
self.dtype = numeric.dtype(int_type)
except TypeError:
self.dtype = numeric.dtype(type(int_type))
self.kind = self.dtype.kind
self.bits = self.dtype.itemsize * 8
self.key = "%s%d" % (self.kind, self.bits)
if self.kind not in 'iu':
raise ValueError("Invalid integer data type %r." % (self.kind,))
@property
def min(self):
"""Minimum value of given dtype."""
if self.kind == 'u':
return 0
else:
try:
val = iinfo._min_vals[self.key]
except KeyError:
val = int(-(1 << (self.bits-1)))
iinfo._min_vals[self.key] = val
return val
@property
def max(self):
"""Maximum value of given dtype."""
try:
val = iinfo._max_vals[self.key]
except KeyError:
if self.kind == 'u':
val = int((1 << self.bits) - 1)
else:
val = int((1 << (self.bits-1)) - 1)
iinfo._max_vals[self.key] = val
return val
def __str__(self):
"""String representation."""
fmt = (
'Machine parameters for %(dtype)s\n'
'---------------------------------------------------------------\n'
'min = %(min)s\n'
'max = %(max)s\n'
'---------------------------------------------------------------\n'
)
return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
def __repr__(self):
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
self.min, self.max, self.dtype)
| 24,124 | Python | 33.513591 | 102 | 0.530716 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/_asarray.py | """
Functions in the ``as*array`` family that promote array-likes into arrays.
`require` fits this category despite its name not matching this pattern.
"""
from .overrides import (
array_function_dispatch,
set_array_function_like_doc,
set_module,
)
from .multiarray import array, asanyarray
__all__ = ["require"]
def _require_dispatcher(a, dtype=None, requirements=None, *, like=None):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def require(a, dtype=None, requirements=None, *, like=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type. If None preserve the current dtype. If your
application requires the data to be in native byteorder, include
a byteorder specification as a part of the dtype specification.
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
* 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array with specified requirements and type if given.
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
"""
if like is not None:
return _require_with_like(
a,
dtype=dtype,
requirements=requirements,
like=like,
)
possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
'A': 'A', 'ALIGNED': 'A',
'W': 'W', 'WRITEABLE': 'W',
'O': 'O', 'OWNDATA': 'O',
'E': 'E', 'ENSUREARRAY': 'E'}
if not requirements:
return asanyarray(a, dtype=dtype)
else:
requirements = {possible_flags[x.upper()] for x in requirements}
if 'E' in requirements:
requirements.remove('E')
subok = False
else:
subok = True
order = 'A'
if requirements >= {'C', 'F'}:
raise ValueError('Cannot specify both "C" and "F" order')
elif 'F' in requirements:
order = 'F'
requirements.remove('F')
elif 'C' in requirements:
order = 'C'
requirements.remove('C')
arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(order)
break
return arr
_require_with_like = array_function_dispatch(
_require_dispatcher
)(require)
| 4,121 | Python | 28.654676 | 78 | 0.595001 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/__init__.py | """
Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
Please note that this module is private. All functions and objects
are available in the main ``numpy`` namespace - use that instead.
"""
from numpy.version import version as __version__
import os
import warnings
# disables OpenBLAS affinity setting of the main thread that limits
# python threads or processes to one core
env_added = []
for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
if envkey not in os.environ:
os.environ[envkey] = '1'
env_added.append(envkey)
try:
from . import multiarray
except ImportError as exc:
import sys
msg = """
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
Importing the numpy C-extensions failed. This error can happen for
many reasons, often due to issues with your setup or how NumPy was
installed.
We have compiled some common reasons and troubleshooting tips at:
https://numpy.org/devdocs/user/troubleshooting-importerror.html
Please note and check the following:
* The Python version is: Python%d.%d from "%s"
* The NumPy version is: "%s"
and make sure that they are the versions you expect.
Please carefully study the documentation linked above for further help.
Original error was: %s
""" % (sys.version_info[0], sys.version_info[1], sys.executable,
__version__, exc)
raise ImportError(msg)
finally:
for envkey in env_added:
del os.environ[envkey]
del envkey
del env_added
del os
from . import umath
# Check that multiarray,umath are pure python modules wrapping
# _multiarray_umath and not either of the old c-extension modules
if not (hasattr(multiarray, '_multiarray_umath') and
hasattr(umath, '_multiarray_umath')):
import sys
path = sys.modules['numpy'].__path__
msg = ("Something is wrong with the numpy installation. "
"While importing we detected an older version of "
"numpy in {}. One method of fixing this is to repeatedly uninstall "
"numpy until none is found, then reinstall this version.")
raise ImportError(msg.format(path))
from . import numerictypes as nt
multiarray.set_typeDict(nt.sctypeDict)
from . import numeric
from .numeric import *
from . import fromnumeric
from .fromnumeric import *
from . import defchararray as char
from . import records as rec
from .records import record, recarray, format_parser
from .memmap import *
from .defchararray import chararray
from . import function_base
from .function_base import *
from . import _machar
from ._machar import *
from . import getlimits
from .getlimits import *
from . import shape_base
from .shape_base import *
from . import einsumfunc
from .einsumfunc import *
del nt
from .fromnumeric import amax as max, amin as min, round_ as round
from .numeric import absolute as abs
# do this after everything else, to minimize the chance of this misleadingly
# appearing in an import-time traceback
from . import _add_newdocs
from . import _add_newdocs_scalars
# add these for module-freeze analysis (like PyInstaller)
from . import _dtype_ctypes
from . import _internal
from . import _dtype
from . import _methods
__all__ = ['char', 'rec', 'memmap']
__all__ += numeric.__all__
__all__ += ['record', 'recarray', 'format_parser']
__all__ += ['chararray']
__all__ += function_base.__all__
__all__ += getlimits.__all__
__all__ += shape_base.__all__
__all__ += einsumfunc.__all__
# We used to use `np.core._ufunc_reconstruct` to unpickle. This is unnecessary,
# but old pickles saved before 1.20 will be using it, and there is no reason
# to break loading them.
def _ufunc_reconstruct(module, name):
# The `fromlist` kwarg is required to ensure that `mod` points to the
# inner-most module rather than the parent package when module name is
# nested. This makes it possible to pickle non-toplevel ufuncs such as
# scipy.special.expit for instance.
mod = __import__(module, fromlist=[name])
return getattr(mod, name)
def _ufunc_reduce(func):
# Report the `__name__`. pickle will try to find the module. Note that
# pickle supports for this `__name__` to be a `__qualname__`. It may
# make sense to add a `__qualname__` to ufuncs, to allow this more
# explicitly (Numba has ufuncs as attributes).
# See also: https://github.com/dask/distributed/issues/3450
return func.__name__
def _DType_reconstruct(scalar_type):
# This is a work-around to pickle type(np.dtype(np.float64)), etc.
# and it should eventually be replaced with a better solution, e.g. when
# DTypes become HeapTypes.
return type(dtype(scalar_type))
def _DType_reduce(DType):
# To pickle a DType without having to add top-level names, pickle the
# scalar type for now (and assume that reconstruction will be possible).
if DType is dtype:
return "dtype" # must pickle `np.dtype` as a singleton.
scalar_type = DType.type # pickle the scalar type for reconstruction
return _DType_reconstruct, (scalar_type,)
def __getattr__(name):
# Deprecated 2021-10-20, NumPy 1.22
if name == "machar":
warnings.warn(
"The `np.core.machar` module is deprecated (NumPy 1.22)",
DeprecationWarning, stacklevel=2,
)
return _machar
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}")
import copyreg
copyreg.pickle(ufunc, _ufunc_reduce)
copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct)
# Unclutter namespace (must keep _*_reconstruct for unpickling)
del copyreg
del _ufunc_reduce
del _DType_reduce
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| 5,660 | Python | 30.983051 | 79 | 0.708304 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/_exceptions.py | """
Various richly-typed exceptions, that also help us deal with string formatting
in python where it's easier.
By putting the formatting in `__str__`, we also avoid paying the cost for
users who silence the exceptions.
"""
from numpy.core.overrides import set_module
def _unpack_tuple(tup):
if len(tup) == 1:
return tup[0]
else:
return tup
def _display_as_base(cls):
"""
A decorator that makes an exception class look like its base.
We use this to hide subclasses that are implementation details - the user
should catch the base type, which is what the traceback will show them.
Classes decorated with this decorator are subject to removal without a
deprecation warning.
"""
assert issubclass(cls, Exception)
cls.__name__ = cls.__base__.__name__
return cls
class UFuncTypeError(TypeError):
""" Base class for all ufunc exceptions """
def __init__(self, ufunc):
self.ufunc = ufunc
@_display_as_base
class _UFuncBinaryResolutionError(UFuncTypeError):
""" Thrown when a binary resolution fails """
def __init__(self, ufunc, dtypes):
super().__init__(ufunc)
self.dtypes = tuple(dtypes)
assert len(self.dtypes) == 2
def __str__(self):
return (
"ufunc {!r} cannot use operands with types {!r} and {!r}"
).format(
self.ufunc.__name__, *self.dtypes
)
@_display_as_base
class _UFuncNoLoopError(UFuncTypeError):
""" Thrown when a ufunc loop cannot be found """
def __init__(self, ufunc, dtypes):
super().__init__(ufunc)
self.dtypes = tuple(dtypes)
def __str__(self):
return (
"ufunc {!r} did not contain a loop with signature matching types "
"{!r} -> {!r}"
).format(
self.ufunc.__name__,
_unpack_tuple(self.dtypes[:self.ufunc.nin]),
_unpack_tuple(self.dtypes[self.ufunc.nin:])
)
@_display_as_base
class _UFuncCastingError(UFuncTypeError):
def __init__(self, ufunc, casting, from_, to):
super().__init__(ufunc)
self.casting = casting
self.from_ = from_
self.to = to
@_display_as_base
class _UFuncInputCastingError(_UFuncCastingError):
""" Thrown when a ufunc input cannot be casted """
def __init__(self, ufunc, casting, from_, to, i):
super().__init__(ufunc, casting, from_, to)
self.in_i = i
def __str__(self):
# only show the number if more than one input exists
i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else ""
return (
"Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting "
"rule {!r}"
).format(
self.ufunc.__name__, i_str, self.from_, self.to, self.casting
)
@_display_as_base
class _UFuncOutputCastingError(_UFuncCastingError):
""" Thrown when a ufunc output cannot be casted """
def __init__(self, ufunc, casting, from_, to, i):
super().__init__(ufunc, casting, from_, to)
self.out_i = i
def __str__(self):
# only show the number if more than one output exists
i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else ""
return (
"Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting "
"rule {!r}"
).format(
self.ufunc.__name__, i_str, self.from_, self.to, self.casting
)
# Exception used in shares_memory()
@set_module('numpy')
class TooHardError(RuntimeError):
pass
@set_module('numpy')
class AxisError(ValueError, IndexError):
"""Axis supplied was invalid.
This is raised whenever an ``axis`` parameter is specified that is larger
than the number of array dimensions.
For compatibility with code written against older numpy versions, which
raised a mixture of `ValueError` and `IndexError` for this situation, this
exception subclasses both to ensure that ``except ValueError`` and
``except IndexError`` statements continue to catch `AxisError`.
.. versionadded:: 1.13
Parameters
----------
axis : int or str
The out of bounds axis or a custom exception message.
If an axis is provided, then `ndim` should be specified as well.
ndim : int, optional
The number of array dimensions.
msg_prefix : str, optional
A prefix for the exception message.
Attributes
----------
axis : int, optional
The out of bounds axis or ``None`` if a custom exception
message was provided. This should be the axis as passed by
the user, before any normalization to resolve negative indices.
.. versionadded:: 1.22
ndim : int, optional
The number of array dimensions or ``None`` if a custom exception
message was provided.
.. versionadded:: 1.22
Examples
--------
>>> array_1d = np.arange(10)
>>> np.cumsum(array_1d, axis=1)
Traceback (most recent call last):
...
numpy.AxisError: axis 1 is out of bounds for array of dimension 1
Negative axes are preserved:
>>> np.cumsum(array_1d, axis=-2)
Traceback (most recent call last):
...
numpy.AxisError: axis -2 is out of bounds for array of dimension 1
The class constructor generally takes the axis and arrays'
dimensionality as arguments:
>>> print(np.AxisError(2, 1, msg_prefix='error'))
error: axis 2 is out of bounds for array of dimension 1
Alternatively, a custom exception message can be passed:
>>> print(np.AxisError('Custom error message'))
Custom error message
"""
__slots__ = ("axis", "ndim", "_msg")
def __init__(self, axis, ndim=None, msg_prefix=None):
if ndim is msg_prefix is None:
# single-argument form: directly set the error message
self._msg = axis
self.axis = None
self.ndim = None
else:
self._msg = msg_prefix
self.axis = axis
self.ndim = ndim
def __str__(self):
axis = self.axis
ndim = self.ndim
if axis is ndim is None:
return self._msg
else:
msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
if self._msg is not None:
msg = f"{self._msg}: {msg}"
return msg
@_display_as_base
class _ArrayMemoryError(MemoryError):
""" Thrown when an array cannot be allocated"""
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
@property
def _total_size(self):
num_bytes = self.dtype.itemsize
for dim in self.shape:
num_bytes *= dim
return num_bytes
@staticmethod
def _size_to_string(num_bytes):
""" Convert a number of bytes into a binary size string """
# https://en.wikipedia.org/wiki/Binary_prefix
LOG2_STEP = 10
STEP = 1024
units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
unit_val = 1 << (unit_i * LOG2_STEP)
n_units = num_bytes / unit_val
del unit_val
# ensure we pick a unit that is correct after rounding
if round(n_units) == STEP:
unit_i += 1
n_units /= STEP
# deal with sizes so large that we don't have units for them
if unit_i >= len(units):
new_unit_i = len(units) - 1
n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
unit_i = new_unit_i
unit_name = units[unit_i]
# format with a sensible number of digits
if unit_i == 0:
# no decimal point on bytes
return '{:.0f} {}'.format(n_units, unit_name)
elif round(n_units) < 1000:
# 3 significant figures, if none are dropped to the left of the .
return '{:#.3g} {}'.format(n_units, unit_name)
else:
# just give all the digits otherwise
return '{:#.0f} {}'.format(n_units, unit_name)
def __str__(self):
size_str = self._size_to_string(self._total_size)
return (
"Unable to allocate {} for an array with shape {} and data type {}"
.format(size_str, self.shape, self.dtype)
)
| 8,344 | Python | 29.680147 | 79 | 0.585451 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/einsumfunc.py | """
Implementation of optimized einsum.
"""
import itertools
import operator
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asanyarray, tensordot
from numpy.core.overrides import array_function_dispatch
__all__ = ['einsum', 'einsum_path']
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
einsum_symbols_set = set(einsum_symbols)
def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
"""
Computes the number of FLOPS in the contraction.
Parameters
----------
idx_contraction : iterable
The indices involved in the contraction
inner : bool
Does this contraction require an inner product?
num_terms : int
The number of terms in a contraction
size_dictionary : dict
The size of each of the indices in idx_contraction
Returns
-------
flop_count : int
The total number of FLOPS required for the contraction.
Examples
--------
>>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
30
>>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
60
"""
overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
op_factor = max(1, num_terms - 1)
if inner:
op_factor += 1
return overall_size * op_factor
def _compute_size_by_dict(indices, idx_dict):
"""
Computes the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index sizes
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
"""
ret = 1
for i in indices:
ret *= idx_dict[i]
return ret
def _find_contraction(positions, input_sets, output_set):
"""
Finds the contraction for a given set of input and output sets.
Parameters
----------
positions : iterable
Integer positions of terms used in the contraction.
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
Returns
-------
new_result : set
The indices of the resulting contraction
remaining : list
List of sets that have not been contracted, the new set is appended to
the end of this list
idx_removed : set
Indices removed from the entire contraction
idx_contraction : set
The indices used in the current contraction
Examples
--------
# A simple dot product test case
>>> pos = (0, 1)
>>> isets = [set('ab'), set('bc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
# A more complex case with additional terms in the contraction
>>> pos = (0, 2)
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
"""
idx_contract = set()
idx_remain = output_set.copy()
remaining = []
for ind, value in enumerate(input_sets):
if ind in positions:
idx_contract |= value
else:
remaining.append(value)
idx_remain |= value
new_result = idx_remain & idx_contract
idx_removed = (idx_contract - new_result)
remaining.append(new_result)
return (new_result, remaining, idx_removed, idx_contract)
def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
"""
Computes all possible pair contractions, sieves the results based
on ``memory_limit`` and returns the lowest cost path. This algorithm
scales factorial with respect to the elements in the list ``input_sets``.
Parameters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The optimal contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
full_results = [(0, [], input_sets)]
for iteration in range(len(input_sets) - 1):
iter_results = []
# Compute all unique pairs
for curr in full_results:
cost, positions, remaining = curr
for con in itertools.combinations(range(len(input_sets) - iteration), 2):
# Find the contraction
cont = _find_contraction(con, remaining, output_set)
new_result, new_input_sets, idx_removed, idx_contract = cont
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(new_result, idx_dict)
if new_size > memory_limit:
continue
# Build (total_cost, positions, indices_remaining)
total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict)
new_pos = positions + [con]
iter_results.append((total_cost, new_pos, new_input_sets))
# Update combinatorial list, if we did not find anything return best
# path + remaining contractions
if iter_results:
full_results = iter_results
else:
path = min(full_results, key=lambda x: x[0])[1]
path += [tuple(range(len(input_sets) - iteration))]
return path
# If we have not found anything return single einsum contraction
if len(full_results) == 0:
return [tuple(range(len(input_sets)))]
path = min(full_results, key=lambda x: x[0])[1]
return path
def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost):
"""Compute the cost (removed size + flops) and resultant indices for
performing the contraction specified by ``positions``.
Parameters
----------
positions : tuple of int
The locations of the proposed tensors to contract.
input_sets : list of sets
The indices found on each tensors.
output_set : set
The output indices of the expression.
idx_dict : dict
Mapping of each index to its size.
memory_limit : int
The total allowed size for an intermediary tensor.
path_cost : int
The contraction cost so far.
naive_cost : int
The cost of the unoptimized expression.
Returns
-------
cost : (int, int)
A tuple containing the size of any indices removed, and the flop cost.
positions : tuple of int
The locations of the proposed tensors to contract.
new_input_sets : list of sets
The resulting new list of indices if this proposed contraction is performed.
"""
# Find the contraction
contract = _find_contraction(positions, input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(idx_result, idx_dict)
if new_size > memory_limit:
return None
# Build sort tuple
old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions)
removed_size = sum(old_sizes) - new_size
# NB: removed_size used to be just the size of any removed indices i.e.:
# helpers.compute_size_by_dict(idx_removed, idx_dict)
cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
sort = (-removed_size, cost)
# Sieve based on total cost as well
if (path_cost + cost) > naive_cost:
return None
# Add contraction to possible choices
return [sort, positions, new_input_sets]
def _update_other_results(results, best):
"""Update the positions and provisional input_sets of ``results`` based on
performing the contraction result ``best``. Remove any involving the tensors
contracted.
Parameters
----------
results : list
List of contraction results produced by ``_parse_possible_contraction``.
best : list
The best contraction of ``results`` i.e. the one that will be performed.
Returns
-------
mod_results : list
The list of modified results, updated with outcome of ``best`` contraction.
"""
best_con = best[1]
bx, by = best_con
mod_results = []
for cost, (x, y), con_sets in results:
# Ignore results involving tensors just contracted
if x in best_con or y in best_con:
continue
# Update the input_sets
del con_sets[by - int(by > x) - int(by > y)]
del con_sets[bx - int(bx > x) - int(bx > y)]
con_sets.insert(-1, best[2][-1])
# Update the position indices
mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
mod_results.append((cost, mod_con, con_sets))
return mod_results
def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
"""
Finds the path by contracting the best pair until the input list is
exhausted. The best pair is found by minimizing the tuple
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
matrix multiplication or inner product operations, then Hadamard like
operations, and finally outer operations. Outer products are limited by
``memory_limit``. This algorithm scales cubically with respect to the
number of elements in the list ``input_sets``.
Parameters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The greedy contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _greedy_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
# Handle trivial cases that leaked through
if len(input_sets) == 1:
return [(0,)]
elif len(input_sets) == 2:
return [(0, 1)]
# Build up a naive cost
contract = _find_contraction(range(len(input_sets)), input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)
# Initially iterate over all pairs
comb_iter = itertools.combinations(range(len(input_sets)), 2)
known_contractions = []
path_cost = 0
path = []
for iteration in range(len(input_sets) - 1):
# Iterate over all pairs on first step, only previously found pairs on subsequent steps
for positions in comb_iter:
# Always initially ignore outer products
if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
continue
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,
naive_cost)
if result is not None:
known_contractions.append(result)
# If we do not have a inner contraction, rescan pairs including outer products
if len(known_contractions) == 0:
# Then check the outer products
for positions in itertools.combinations(range(len(input_sets)), 2):
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,
path_cost, naive_cost)
if result is not None:
known_contractions.append(result)
# If we still did not find any remaining contractions, default back to einsum like behavior
if len(known_contractions) == 0:
path.append(tuple(range(len(input_sets))))
break
# Sort based on first index
best = min(known_contractions, key=lambda x: x[0])
# Now propagate as many unused contractions as possible to next iteration
known_contractions = _update_other_results(known_contractions, best)
# Next iteration only compute contractions with the new tensor
# All other contractions have been accounted for
input_sets = best[2]
new_tensor_pos = len(input_sets) - 1
comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
# Update path and total cost
path.append(best[1])
path_cost += best[0][1]
return path
def _can_dot(inputs, result, idx_removed):
"""
Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
Parameters
----------
inputs : list of str
Specifies the subscripts for summation.
result : str
Resulting summation.
idx_removed : set
Indices that are removed in the summation
Returns
-------
type : bool
Returns true if BLAS should and can be used, else False
Notes
-----
If the operations is BLAS level 1 or 2 and is not already aligned
we default back to einsum as the memory movement to copy is more
costly than the operation itself.
Examples
--------
# Standard GEMM operation
>>> _can_dot(['ij', 'jk'], 'ik', set('j'))
True
# Can use the standard BLAS, but requires odd data movement
>>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
False
# DDOT where the memory is not aligned
>>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
False
"""
# All `dot` calls remove indices
if len(idx_removed) == 0:
return False
# BLAS can only handle two operands
if len(inputs) != 2:
return False
input_left, input_right = inputs
for c in set(input_left + input_right):
# can't deal with repeated indices on same input or more than 2 total
nl, nr = input_left.count(c), input_right.count(c)
if (nl > 1) or (nr > 1) or (nl + nr > 2):
return False
# can't do implicit summation or dimension collapse e.g.
# "ab,bc->c" (implicitly sum over 'a')
# "ab,ca->ca" (take diagonal of 'a')
if nl + nr - 1 == int(c in result):
return False
# Build a few temporaries
set_left = set(input_left)
set_right = set(input_right)
keep_left = set_left - idx_removed
keep_right = set_right - idx_removed
rs = len(idx_removed)
# At this point we are a DOT, GEMV, or GEMM operation
# Handle inner products
# DDOT with aligned data
if input_left == input_right:
return True
# DDOT without aligned data (better to use einsum)
if set_left == set_right:
return False
# Handle the 4 possible (aligned) GEMV or GEMM cases
# GEMM or GEMV no transpose
if input_left[-rs:] == input_right[:rs]:
return True
# GEMM or GEMV transpose both
if input_left[:rs] == input_right[-rs:]:
return True
# GEMM or GEMV transpose right
if input_left[-rs:] == input_right[-rs:]:
return True
# GEMM or GEMV transpose left
if input_left[:rs] == input_right[:rs]:
return True
# Einsum is faster than GEMV if we have to copy data
if not keep_left or not keep_right:
return False
# We are a matrix-matrix product, but we need to copy data
return True
def _parse_einsum_input(operands):
"""
A reproduction of einsum c side einsum parsing in python.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> np.random.seed(123)
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> _parse_einsum_input(('...a,...a->...', a, b))
('za,xza', 'xz', [a, b]) # may vary
>>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('za,xza', 'xz', [a, b]) # may vary
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = [asanyarray(v) for v in operands[1:]]
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
else:
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = [asanyarray(v) for v in operand_list]
subscripts = ""
last = len(subscript_list) - 1
for num, sub in enumerate(subscript_list):
for s in sub:
if s is Ellipsis:
subscripts += "..."
else:
try:
s = operator.index(s)
except TypeError as e:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis") from e
subscripts += einsum_symbols[s]
if num != last:
subscripts += ","
if output_list is not None:
subscripts += "->"
for s in output_list:
if s is Ellipsis:
subscripts += "..."
else:
try:
s = operator.index(s)
except TypeError as e:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis") from e
subscripts += einsum_symbols[s]
# Check for proper "->"
if ("-" in subscripts) or (">" in subscripts):
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
if invalid or (subscripts.count("->") != 1):
raise ValueError("Subscripts can only contain one '->'.")
# Parse ellipses
if "." in subscripts:
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
unused = list(einsum_symbols_set - set(used))
ellipse_inds = "".join(unused)
longest = 0
if "->" in subscripts:
input_tmp, output_sub = subscripts.split("->")
split_subscripts = input_tmp.split(",")
out_sub = True
else:
split_subscripts = subscripts.split(',')
out_sub = False
for num, sub in enumerate(split_subscripts):
if "." in sub:
if (sub.count(".") != 3) or (sub.count("...") != 1):
raise ValueError("Invalid Ellipses.")
# Take into account numerical values
if operands[num].shape == ():
ellipse_count = 0
else:
ellipse_count = max(operands[num].ndim, 1)
ellipse_count -= (len(sub) - 3)
if ellipse_count > longest:
longest = ellipse_count
if ellipse_count < 0:
raise ValueError("Ellipses lengths do not match.")
elif ellipse_count == 0:
split_subscripts[num] = sub.replace('...', '')
else:
rep_inds = ellipse_inds[-ellipse_count:]
split_subscripts[num] = sub.replace('...', rep_inds)
subscripts = ",".join(split_subscripts)
if longest == 0:
out_ellipse = ""
else:
out_ellipse = ellipse_inds[-longest:]
if out_sub:
subscripts += "->" + output_sub.replace("...", out_ellipse)
else:
# Special care for outputless ellipses
output_subscript = ""
tmp_subscripts = subscripts.replace(",", "")
for s in sorted(set(tmp_subscripts)):
if s not in (einsum_symbols):
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
normal_inds = ''.join(sorted(set(output_subscript) -
set(out_ellipse)))
subscripts += "->" + out_ellipse + normal_inds
# Build output string if does not exist
if "->" in subscripts:
input_subscripts, output_subscript = subscripts.split("->")
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(",", "")
output_subscript = ""
for s in sorted(set(tmp_subscripts)):
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError("Output character %s did not appear in the input"
% char)
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(',')) != len(operands):
raise ValueError("Number of einsum subscripts must be equal to the "
"number of operands.")
return (input_subscripts, output_subscript, operands)
def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):
# NOTE: technically, we should only dispatch on array-like arguments, not
# subscripts (given as strings). But separating operands into
# arrays/subscripts is a little tricky/slow (given einsum's two supported
# signatures), so as a practical shortcut we dispatch on everything.
# Strings will be ignored for dispatching since they don't define
# __array_function__.
return operands
@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
def einsum_path(*operands, optimize='greedy', einsum_call=False):
"""
einsum_path(subscripts, *operands, optimize='greedy')
Evaluates the lowest cost contraction order for an einsum expression by
considering the creation of intermediate arrays.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
*operands : list of array_like
These are the arrays for the operation.
optimize : {bool, list, tuple, 'greedy', 'optimal'}
Choose the type of path. If a tuple is provided, the second argument is
assumed to be the maximum intermediate size created. If only a single
argument is provided the largest input or output array size is used
as a maximum intermediate size.
* if a list is given that starts with ``einsum_path``, uses this as the
contraction path
* if False no optimization is taken
* if True defaults to the 'greedy' algorithm
* 'optimal' An algorithm that combinatorially explores all possible
ways of contracting the listed tensors and choosest the least costly
path. Scales exponentially with the number of terms in the
contraction.
* 'greedy' An algorithm that chooses the best pair contraction
at each step. Effectively, this algorithm searches the largest inner,
Hadamard, and then outer products at each step. Scales cubically with
the number of terms in the contraction. Equivalent to the 'optimal'
path for most contractions.
Default is 'greedy'.
Returns
-------
path : list of tuples
A list representation of the einsum path.
string_repr : str
A printable representation of the einsum path.
Notes
-----
The resulting path indicates which terms of the input contraction should be
contracted first, the result of this contraction is then appended to the
end of the contraction list. This list can then be iterated over until all
intermediate contractions are complete.
See Also
--------
einsum, linalg.multi_dot
Examples
--------
We can begin with a chain dot example. In this case, it is optimal to
contract the ``b`` and ``c`` tensors first as represented by the first
element of the path ``(1, 2)``. The resulting tensor is added to the end
of the contraction and the remaining contraction ``(0, 1)`` is then
completed.
>>> np.random.seed(123)
>>> a = np.random.rand(2, 2)
>>> b = np.random.rand(2, 5)
>>> c = np.random.rand(5, 2)
>>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
>>> print(path_info[0])
['einsum_path', (1, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ij,jk,kl->il # may vary
Naive scaling: 4
Optimized scaling: 3
Naive FLOP count: 1.600e+02
Optimized FLOP count: 5.600e+01
Theoretical speedup: 2.857
Largest intermediate: 4.000e+00 elements
-------------------------------------------------------------------------
scaling current remaining
-------------------------------------------------------------------------
3 kl,jk->jl ij,jl->il
3 jl,ij->il il->il
A more complex index transformation example.
>>> I = np.random.rand(10, 10, 10, 10)
>>> C = np.random.rand(10, 10)
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
... optimize='greedy')
>>> print(path_info[0])
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary
Naive scaling: 8
Optimized scaling: 5
Naive FLOP count: 8.000e+08
Optimized FLOP count: 8.000e+05
Theoretical speedup: 1000.000
Largest intermediate: 1.000e+04 elements
--------------------------------------------------------------------------
scaling current remaining
--------------------------------------------------------------------------
5 abcd,ea->bcde fb,gc,hd,bcde->efgh
5 bcde,fb->cdef gc,hd,cdef->efgh
5 cdef,gc->defg hd,defg->efgh
5 defg,hd->efgh efgh->efgh
"""
# Figure out what the path really is
path_type = optimize
if path_type is True:
path_type = 'greedy'
if path_type is None:
path_type = False
explicit_einsum_path = False
memory_limit = None
# No optimization or a named path algorithm
if (path_type is False) or isinstance(path_type, str):
pass
# Given an explicit path
elif len(path_type) and (path_type[0] == 'einsum_path'):
explicit_einsum_path = True
# Path tuple with memory limit
elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
isinstance(path_type[1], (int, float))):
memory_limit = int(path_type[1])
path_type = path_type[0]
else:
raise TypeError("Did not understand the path: %s" % str(path_type))
# Hidden option, only einsum should call this
einsum_call_arg = einsum_call
# Python side parsing
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
# Build a few useful list and sets
input_list = input_subscripts.split(',')
input_sets = [set(x) for x in input_list]
output_set = set(output_subscript)
indices = set(input_subscripts.replace(',', ''))
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
broadcast_indices = [[] for x in range(len(input_list))]
for tnum, term in enumerate(input_list):
sh = operands[tnum].shape
if len(sh) != len(term):
raise ValueError("Einstein sum subscript %s does not contain the "
"correct number of indices for operand %d."
% (input_subscripts[tnum], tnum))
for cnum, char in enumerate(term):
dim = sh[cnum]
# Build out broadcast indices
if dim == 1:
broadcast_indices[tnum].append(char)
if char in dimension_dict.keys():
# For broadcasting cases we always want the largest dim size
if dimension_dict[char] == 1:
dimension_dict[char] = dim
elif dim not in (1, dimension_dict[char]):
raise ValueError("Size of label '%s' for operand %d (%d) "
"does not match previous terms (%d)."
% (char, tnum, dimension_dict[char], dim))
else:
dimension_dict[char] = dim
# Convert broadcast inds to sets
broadcast_indices = [set(x) for x in broadcast_indices]
# Compute size of each input array plus the output array
size_list = [_compute_size_by_dict(term, dimension_dict)
for term in input_list + [output_subscript]]
max_size = max(size_list)
if memory_limit is None:
memory_arg = max_size
else:
memory_arg = memory_limit
# Compute naive cost
# This isn't quite right, need to look into exactly how einsum does this
inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict)
# Compute the path
if explicit_einsum_path:
path = path_type[1:]
elif (
(path_type is False)
or (len(input_list) in [1, 2])
or (indices == output_set)
):
# Nothing to be optimized, leave it to einsum
path = [tuple(range(len(input_list)))]
elif path_type == "greedy":
path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type == "optimal":
path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
else:
raise KeyError("Path name %s not found", path_type)
cost_list, scale_list, size_list, contraction_list = [], [], [], []
# Build contraction tuple (positions, gemm, einsum_str, remaining)
for cnum, contract_inds in enumerate(path):
# Make sure we remove inds from right to left
contract_inds = tuple(sorted(list(contract_inds), reverse=True))
contract = _find_contraction(contract_inds, input_sets, output_set)
out_inds, input_sets, idx_removed, idx_contract = contract
cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict)
cost_list.append(cost)
scale_list.append(len(idx_contract))
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
bcast = set()
tmp_inputs = []
for x in contract_inds:
tmp_inputs.append(input_list.pop(x))
bcast |= broadcast_indices.pop(x)
new_bcast_inds = bcast - idx_removed
# If we're broadcasting, nix blas
if not len(idx_removed & bcast):
do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
else:
do_blas = False
# Last contraction
if (cnum - len(path)) == -1:
idx_result = output_subscript
else:
sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
idx_result = "".join([x[1] for x in sorted(sort_result)])
input_list.append(idx_result)
broadcast_indices.append(new_bcast_inds)
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
contraction_list.append(contraction)
opt_cost = sum(cost_list) + 1
if len(input_list) != 1:
# Explicit "einsum_path" is usually trusted, but we detect this kind of
# mistake in order to prevent from returning an intermediate value.
raise RuntimeError(
"Invalid einsum_path is specified: {} more operands has to be "
"contracted.".format(len(input_list) - 1))
if einsum_call_arg:
return (operands, contraction_list)
# Return the path along with a nice string representation
overall_contraction = input_subscripts + "->" + output_subscript
header = ("scaling", "current", "remaining")
speedup = naive_cost / opt_cost
max_i = max(size_list)
path_print = " Complete contraction: %s\n" % overall_contraction
path_print += " Naive scaling: %d\n" % len(indices)
path_print += " Optimized scaling: %d\n" % max(scale_list)
path_print += " Naive FLOP count: %.3e\n" % naive_cost
path_print += " Optimized FLOP count: %.3e\n" % opt_cost
path_print += " Theoretical speedup: %3.3f\n" % speedup
path_print += " Largest intermediate: %.3e elements\n" % max_i
path_print += "-" * 74 + "\n"
path_print += "%6s %24s %40s\n" % header
path_print += "-" * 74
for n, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
remaining_str = ",".join(remaining) + "->" + output_subscript
path_run = (scale_list[n], einsum_str, remaining_str)
path_print += "\n%4d %24s %40s" % path_run
path = ['einsum_path'] + path
return (path, path_print)
def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
# Arguably we dispatch on more arguments than we really should; see note in
# _einsum_path_dispatcher for why.
yield from operands
yield out
# Rewrite einsum to handle different cases
@array_function_dispatch(_einsum_dispatcher, module='numpy')
def einsum(*operands, out=None, optimize=False, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe', optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
einops :
similar verbose interface is provided by
`einops <https://github.com/arogozhnikov/einops>`_ package to cover
additional operations: transpose, reshape/flatten, repeat/tile,
squeeze/unsqueeze and reductions.
opt_einsum :
`opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_
optimizes contraction order for einsum-like expressions
in backend-agnostic manner.
Notes
-----
.. versionadded:: 1.6.0
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`numpy.trace`.
* Return a diagonal, :py:func:`numpy.diag`.
* Array axis summations, :py:func:`numpy.sum`.
* Transpositions and permutations, :py:func:`numpy.transpose`.
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
* Tensor contractions, :py:func:`numpy.tensordot`.
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <numpy.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view (changed in version 1.10.0).
`einsum` also provides an alternative way to provide the subscripts
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
If the output shape is not provided in this format `einsum` will be
calculated in implicit mode, otherwise it will be performed explicitly.
The examples below have corresponding `einsum` calls with the two
parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
.. versionadded:: 1.12.0
Added the ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. In some cases 'optimal'
will return the superlative path through a more expensive, exhaustive search.
For iterative calculations it may be advisable to calculate the optimal path
once and reuse that path by supplying it as an argument. An example is given
below.
See :py:func:`numpy.einsum_path` for more details.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [0,1], [0])
array([ 10, 35, 60, 85, 110])
>>> np.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('ij->ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Vector inner products:
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
Chained array operations. For more complicated contractions, speed ups
might be achieved by repeatedly computing a 'greedy' path or pre-computing the
'optimal' path and repeatedly applying it, using an
`einsum_path` insertion (since version 1.12.0). Performance improvements can be
particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
>>> for iteration in range(500):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
>>> for iteration in range(500):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
Greedy `einsum` (faster optimal path approximation): ~160ms
>>> for iteration in range(500):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
Optimal `einsum` (best usage pattern in some use cases): ~110ms
>>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
>>> for iteration in range(500):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
"""
# Special handling if out is specified
specified_out = out is not None
# If no optimization, run pure einsum
if optimize is False:
if specified_out:
kwargs['out'] = out
return c_einsum(*operands, **kwargs)
# Check the kwargs to avoid a more cryptic error later, without having to
# repeat default values here
valid_einsum_kwargs = ['dtype', 'order', 'casting']
unknown_kwargs = [k for (k, v) in kwargs.items() if
k not in valid_einsum_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs: %s"
% unknown_kwargs)
# Build the contraction list and operand
operands, contraction_list = einsum_path(*operands, optimize=optimize,
einsum_call=True)
# Handle order kwarg for output array, c_einsum allows mixed case
output_order = kwargs.pop('order', 'K')
if output_order.upper() == 'A':
if all(arr.flags.f_contiguous for arr in operands):
output_order = 'F'
else:
output_order = 'C'
# Start contraction loop
for num, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
tmp_operands = [operands.pop(x) for x in inds]
# Do we need to deal with the output?
handle_out = specified_out and ((num + 1) == len(contraction_list))
# Call tensordot if still possible
if blas:
# Checks have already been handled
input_str, results_index = einsum_str.split('->')
input_left, input_right = input_str.split(',')
tensor_result = input_left + input_right
for s in idx_rm:
tensor_result = tensor_result.replace(s, "")
# Find indices to contract over
left_pos, right_pos = [], []
for s in sorted(idx_rm):
left_pos.append(input_left.find(s))
right_pos.append(input_right.find(s))
# Contract!
new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
# Build a new view if needed
if (tensor_result != results_index) or handle_out:
if handle_out:
kwargs["out"] = out
new_view = c_einsum(tensor_result + '->' + results_index, new_view, **kwargs)
# Call einsum
else:
# If out was specified
if handle_out:
kwargs["out"] = out
# Do the contraction
new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
# Append new items and dereference what we can
operands.append(new_view)
del tmp_operands, new_view
if specified_out:
return out
else:
return asanyarray(operands[0], order=output_order)
| 51,869 | Python | 34.921053 | 118 | 0.585899 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/_dtype_ctypes.py | """
Conversion from ctypes to dtype.
In an ideal world, we could achieve this through the PEP3118 buffer protocol,
something like::
def dtype_from_ctypes_type(t):
# needed to ensure that the shape of `t` is within memoryview.format
class DummyStruct(ctypes.Structure):
_fields_ = [('a', t)]
# empty to avoid memory allocation
ctype_0 = (DummyStruct * 0)()
mv = memoryview(ctype_0)
# convert the struct, and slice back out the field
return _dtype_from_pep3118(mv.format)['a']
Unfortunately, this fails because:
* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
* PEP3118 cannot represent unions, but both numpy and ctypes can
* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
"""
# We delay-import ctypes for distributions that do not include it.
# While this module is not used unless the user passes in ctypes
# members, it is eagerly imported from numpy/core/__init__.py.
import numpy as np
def _from_ctypes_array(t):
return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
def _from_ctypes_structure(t):
for item in t._fields_:
if len(item) > 2:
raise TypeError(
"ctypes bitfields have no dtype equivalent")
if hasattr(t, "_pack_"):
import ctypes
formats = []
offsets = []
names = []
current_offset = 0
for fname, ftyp in t._fields_:
names.append(fname)
formats.append(dtype_from_ctypes_type(ftyp))
# Each type has a default offset, this is platform dependent for some types.
effective_pack = min(t._pack_, ctypes.alignment(ftyp))
current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack
offsets.append(current_offset)
current_offset += ctypes.sizeof(ftyp)
return np.dtype(dict(
formats=formats,
offsets=offsets,
names=names,
itemsize=ctypes.sizeof(t)))
else:
fields = []
for fname, ftyp in t._fields_:
fields.append((fname, dtype_from_ctypes_type(ftyp)))
# by default, ctypes structs are aligned
return np.dtype(fields, align=True)
def _from_ctypes_scalar(t):
"""
Return the dtype type with endianness included if it's the case
"""
if getattr(t, '__ctype_be__', None) is t:
return np.dtype('>' + t._type_)
elif getattr(t, '__ctype_le__', None) is t:
return np.dtype('<' + t._type_)
else:
return np.dtype(t._type_)
def _from_ctypes_union(t):
import ctypes
formats = []
offsets = []
names = []
for fname, ftyp in t._fields_:
names.append(fname)
formats.append(dtype_from_ctypes_type(ftyp))
offsets.append(0) # Union fields are offset to 0
return np.dtype(dict(
formats=formats,
offsets=offsets,
names=names,
itemsize=ctypes.sizeof(t)))
def dtype_from_ctypes_type(t):
"""
Construct a dtype object from a ctypes type
"""
import _ctypes
if issubclass(t, _ctypes.Array):
return _from_ctypes_array(t)
elif issubclass(t, _ctypes._Pointer):
raise TypeError("ctypes pointers have no dtype equivalent")
elif issubclass(t, _ctypes.Structure):
return _from_ctypes_structure(t)
elif issubclass(t, _ctypes.Union):
return _from_ctypes_union(t)
elif isinstance(getattr(t, '_type_', None), str):
return _from_ctypes_scalar(t)
else:
raise NotImplementedError(
"Unknown ctypes type {}".format(t.__name__))
| 3,673 | Python | 30.135593 | 103 | 0.612034 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/_add_newdocs.py | """
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from numpy.core.function_base import add_newdoc
from numpy.core.overrides import array_function_like_doc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<class 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> next(fl)
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> next(fl)
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0)
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* ``buffered`` enables buffering when required.
* ``c_index`` causes a C-order index to be tracked.
* ``f_index`` causes a Fortran-order index to be tracked.
* ``multi_index`` causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* ``common_dtype`` causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* ``copy_if_overlap`` causes the iterator to determine if read
operands have overlap with write operands, and make temporary
copies as necessary to avoid overlap. False positives (needless
copying) are possible in some cases.
* ``delay_bufalloc`` delays allocation of the buffers until
a reset() call is made. Allows ``allocate`` operands to
be initialized before their values are copied into the buffers.
* ``external_loop`` causes the ``values`` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* ``grow_inner`` allows the ``value`` array sizes to be made
larger than the buffer size when both ``buffered`` and
``external_loop`` is used.
* ``ranged`` allows the iterator to be restricted to a sub-range
of the iterindex values.
* ``refs_ok`` enables iteration of reference types, such as
object arrays.
* ``reduce_ok`` enables iteration of ``readwrite`` operands
which are broadcasted, also known as reduction operands.
* ``zerosize_ok`` allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
``readonly``, ``readwrite``, or ``writeonly`` must be specified.
* ``readonly`` indicates the operand will only be read from.
* ``readwrite`` indicates the operand will be read from and written to.
* ``writeonly`` indicates the operand will only be written to.
* ``no_broadcast`` prevents the operand from being broadcasted.
* ``contig`` forces the operand data to be contiguous.
* ``aligned`` forces the operand data to be aligned.
* ``nbo`` forces the operand data to be in native byte order.
* ``copy`` allows a temporary read-only copy if required.
* ``updateifcopy`` allows a temporary read-write copy if required.
* ``allocate`` causes the array to be allocated if it is None
in the ``op`` parameter.
* ``no_subtype`` prevents an ``allocate`` operand from using a subtype.
* ``arraymask`` indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* ``writemasked`` indicates that only elements where the chosen
``arraymask`` operand is True will be written to.
* ``overlap_assume_elementwise`` can be used to mark operands that are
accessed only in the iterator order, to allow less conservative
copying when ``copy_if_overlap`` is present.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of ``allocate`` operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as `newaxis`.
itershape : tuple of ints, optional
The desired shape of the iterator. This allows ``allocate`` operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
Valid only before the iterator is closed.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the ``delay_bufalloc`` flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the ``c_index`` or
the ``f_index`` flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the ``multi_index`` flag,
and the property `multi_index` can be used to retrieve it.
index
When the ``c_index`` or ``f_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
and ``has_index`` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern. Valid only before the iterator
is closed.
multi_index
When the ``multi_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and ``has_multi_index`` is False.
ndim : int
The dimensions of the iterator.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over. Valid only before the iterator is
closed.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
Value of ``operands`` at current iteration. Normally, this is a
tuple of array scalars, but if the flag ``external_loop`` is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the NumPy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the coordinates or index of an iterator, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol:
>>> def iter_add_py(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... for (a, b, c) in it:
... addop(a, b, out=c)
... return it.operands[2]
Here is the same function, but following the C-style pattern:
>>> def iter_add(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... while not it.finished:
... addop(it[0], it[1], out=it[2])
... it.iternext()
... return it.operands[2]
Here is an example outer product function:
>>> def outer_it(x, y, out=None):
... mulop = np.multiply
... it = np.nditer([x, y, out], ['external_loop'],
... [['readonly'], ['readonly'], ['writeonly', 'allocate']],
... op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
... [-1] * x.ndim + list(range(y.ndim)),
... None])
... with it:
... for (a, b, c) in it:
... mulop(a, b, out=c)
... return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc:
>>> def luf(lamdaexpr, *args, **kwargs):
... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)'''
... nargs = len(args)
... op = (kwargs.get('out',None),) + args
... it = np.nditer(op, ['buffered','external_loop'],
... [['writeonly','allocate','no_broadcast']] +
... [['readonly','nbo','aligned']]*nargs,
... order=kwargs.get('order','K'),
... casting=kwargs.get('casting','safe'),
... buffersize=kwargs.get('buffersize',0))
... while not it.finished:
... it[0] = lamdaexpr(*it[1:])
... it.iternext()
... return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
If operand flags ``"writeonly"`` or ``"readwrite"`` are used the
operands may be views into the original data with the
`WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a
context manager or the `nditer.close` method must be called before
using the result. The temporary data will be written back to the
original data when the `__exit__` function is called but not before:
>>> a = np.arange(6, dtype='i4')[::-2]
>>> with np.nditer(a, [],
... [['writeonly', 'updateifcopy']],
... casting='unsafe',
... op_dtypes=[np.dtype('f4')]) as i:
... x = i.operands[0]
... x[:] = [-1, -2, -3]
... # a still unchanged here
>>> a, x
(array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32))
It is important to note that once the iterator is exited, dangling
references (like `x` in the example) may or may not share data with
the original data `a`. If writeback semantics were active, i.e. if
`x.base.flags.writebackifcopy` is `True`, then exiting the iterator
will sever the connection between `x` and `a`, writing to `x` will
no longer write to `a`. If writeback semantics are not active, then
`x.data` will still point at some part of `a.data`, and writing to
one will affect the other.
Context management and the `close` method appeared in version 1.15.0.
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> next(it)
(array(0), array(1))
>>> it2 = it.copy()
>>> next(it2)
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('operands',
"""
operands[`Slice`]
The array(s) to be iterated over. Valid only before the iterator is closed.
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i, /)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
add_newdoc('numpy.core', 'nested_iters',
"""
nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \
order="K", casting="safe", buffersize=0)
Create nditers for use in nested loops
Create a tuple of `nditer` objects which iterate in nested loops over
different axes of the op argument. The first iterator is used in the
outermost loop, the last in the innermost loop. Advancing one will change
the subsequent iterators to point at its new element.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
axes : list of list of int
Each item is used as an "op_axes" argument to an nditer
flags, op_flags, op_dtypes, order, casting, buffersize (optional)
See `nditer` parameters of the same name
Returns
-------
iters : tuple of nditer
An nditer for each item in `axes`, outermost first
See Also
--------
nditer
Examples
--------
Basic usage. Note how y is the "flattened" version of
[a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified
the first iter's axes as [1]
>>> a = np.arange(12).reshape(2, 3, 2)
>>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"])
>>> for x in i:
... print(i.multi_index)
... for y in j:
... print('', j.multi_index, y)
(0,)
(0, 0) 0
(0, 1) 1
(1, 0) 6
(1, 1) 7
(1,)
(0, 0) 2
(0, 1) 3
(1, 0) 8
(1, 1) 9
(2,)
(0, 0) 4
(0, 1) 5
(1, 0) 10
(1, 1) 11
""")
add_newdoc('numpy.core', 'nditer', ('close',
"""
close()
Resolve all writeback semantics in writeable operands.
.. versionadded:: 1.15.0
See Also
--------
:ref:`nditer-context-manager`
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
See Also
--------
broadcast_arrays
broadcast_to
broadcast_shapes
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[5., 6., 7.],
[6., 7., 8.],
[7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> next(row), next(col)
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('ndim',
"""
Number of dimensions of broadcasted result. Alias for `nd`.
.. versionadded:: 1.12.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.ndim
2
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result. For code intended for NumPy
1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0,
like=None)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
If object is a scalar, a 0-dimensional array containing object is
returned.
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
sequence.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
or if a copy is needed to satisfy any of the other requirements
(`dtype`, `order`, etc.).
order : {'K', 'A', 'C', 'F'}, optional
Specify the memory layout of the array. If object is not an array, the
newly created array will be in C order (row major) unless 'F' is
specified, in which case it will be in Fortran order (column major).
If object is an array the following holds.
===== ========= ===================================================
order no copy copy=True
===== ========= ===================================================
'K' unchanged F & C order preserved, otherwise most similar order
'A' unchanged F order if input is F and not C, otherwise C order
'C' C order C order
'F' F order F order
===== ========= ===================================================
When ``copy=False`` and a copy is made for other reasons, the result is
the same as if ``copy=True``, with some exceptions for 'A', see the
Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be prepended to the shape as
needed to meet this requirement.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
and a copy is forced by a change in dtype, then the order of the result is
not necessarily 'C' as expected. This is likely a bug.
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'asarray',
"""
asarray(a, dtype=None, order=None, *, like=None)
Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F', 'A', 'K'}, optional
Memory layout. 'A' and 'K' depend on the order of input array a.
'C' row-major (C-style),
'F' column-major (Fortran-style) memory representation.
'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
'K' (keep) preserve input order
Defaults to 'K'.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray with matching dtype and order. If `a` is a
subclass of ndarray, a base class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.recarray, np.ndarray)
True
>>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'asanyarray',
"""
asanyarray(a, dtype=None, order=None, *, like=None)
Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists, and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F', 'A', 'K'}, optional
Memory layout. 'A' and 'K' depend on the order of input array a.
'C' row-major (C-style),
'F' column-major (Fortran-style) memory representation.
'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
'K' (keep) preserve input order
Defaults to 'C'.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and
Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
>>> np.asanyarray(a) is a
True
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'ascontiguousarray',
"""
ascontiguousarray(a, dtype=None, *, like=None)
Return a contiguous array (ndim >= 1) in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[0., 1., 2.],
[3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
Note: This function returns an array with at least one-dimension (1-d)
so it will not preserve 0-d arrays.
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'asfortranarray',
"""
asfortranarray(a, dtype=None, *, like=None)
Return an array (ndim >= 1) laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
Note: This function returns an array with at least one-dimension (1-d)
so it will not preserve 0-d arrays.
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C', *, like=None)
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and
order. Object arrays will be initialized to None.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #uninitialized
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #uninitialized
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C', *, like=None)
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, *, sep, like=None)
A new 1-D array initialized from text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format. Most builtin numeric types are
supported and extension types may be supported.
.. versionadded:: 1.18.0
Complex dtypes.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
The string separating numbers in the data; extra whitespace between
elements is also ignored.
.. deprecated:: 1.14
Passing ``sep=''``, the default, is deprecated since it will
trigger the deprecated binary mode of this function. This mode
interprets `string` as binary bytes, rather than ASCII text with
decimal numbers, an operation which is better spelt
``frombuffer(string, dtype, count)``. If `string` contains unicode
text, the binary mode of `fromstring` will first encode it into
bytes using either utf-8 (python 3) or the default encoding
(python 2), neither of which produce sane results.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'compare_chararrays',
"""
compare_chararrays(a1, a2, cmp, rstrip)
Performs element-wise comparison of two string arrays using the
comparison operator specified by `cmp_op`.
Parameters
----------
a1, a2 : array_like
Arrays to be compared.
cmp : {"<", "<=", "==", ">=", ">", "!="}
Type of comparison.
rstrip : Boolean
If True, the spaces at the end of Strings are removed before the comparison.
Returns
-------
out : ndarray
The output array of type Boolean with the same shape as a and b.
Raises
------
ValueError
If `cmp_op` is not valid.
TypeError
If at least one of `a` or `b` is a non-string array
Examples
--------
>>> a = np.array(["a", "b", "cde"])
>>> b = np.array(["a", "a", "dec"])
>>> np.compare_chararrays(a, b, ">", True)
array([False, True, False])
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iter, dtype, count=-1, *, like=None)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iter : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
.. versionchanged:: 1.23
Object and subarray dtypes are now supported (note that the final
result is not 1-D for a subarray dtype).
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
A carefully constructed subarray dtype will lead to higher dimensional
results:
>>> iterable = ((x+1, x+2) for x in range(5))
>>> np.fromiter(iterable, dtype=np.dtype((int, 2)))
array([[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6]])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None)
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str or Path
Open file object or filename.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
Most builtin numeric types are supported and extension types may be supported.
.. versionadded:: 1.18.0
Complex dtypes.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
offset : int
The offset (in bytes) from the file's current position. Defaults to 0.
Only permitted for binary files.
.. versionadded:: 1.17.0
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import tempfile
>>> fname = tempfile.mkstemp()[1]
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset (in bytes); default: 0.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
This function creates a view into the original object. This should be safe
in general, but it may make sense to copy the result when the original
object is mutable or untrusted.
Examples
--------
>>> s = b'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1')
>>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'from_dlpack',
"""
from_dlpack(x, /)
Create a NumPy array from an object implementing the ``__dlpack__``
protocol. Generally, the returned NumPy array is a read-only view
of the input object. See [1]_ and [2]_ for more details.
Parameters
----------
x : object
A Python object that implements the ``__dlpack__`` and
``__dlpack_device__`` methods.
Returns
-------
out : ndarray
References
----------
.. [1] Array API documentation,
https://data-apis.org/array-api/latest/design_topics/data_interchange.html#syntax-for-data-interchange-with-dlpack
.. [2] Python specification for DLPack,
https://dmlc.github.io/dlpack/latest/python_spec.html
Examples
--------
>>> import torch
>>> x = torch.arange(10)
>>> # create a view of the torch tensor "x" in NumPy
>>> y = np.from_dlpack(x)
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None, *, like=None)
Return evenly spaced values within a given interval.
``arange`` can be called with a varying number of positional arguments:
* ``arange(stop)``: Values are generated within the half-open interval
``[0, stop)`` (in other words, the interval including `start` but
excluding `stop`).
* ``arange(start, stop)``: Values are generated within the half-open
interval ``[start, stop)``.
* ``arange(start, stop, step)`` Values are generated within the half-open
interval ``[start, stop)``, with spacing between values given by
``step``.
For integer arguments the function is roughly equivalent to the Python
built-in :py:class:`range`, but returns an ndarray rather than a ``range``
instance.
When using a non-integer step, such as 0.1, it is often better to use
`numpy.linspace`.
See the Warning sections below for more information.
Parameters
----------
start : integer or real, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : integer or real
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : integer or real, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
Warnings
--------
The length of the output might not be numerically stable.
Another stability issue is due to the internal implementation of
`numpy.arange`.
The actual step value used to populate the array is
``dtype(start + step) - dtype(start)`` and not `step`. Precision loss
can occur here, due to casting or due to using floating points when
`start` is much larger than `step`. This can lead to unexpected
behaviour. For example::
>>> np.arange(0, 5, 0.5, dtype=int)
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
>>> np.arange(-3, 3, 0.5, dtype=int)
array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
In such cases, the use of `numpy.linspace` should be preferred.
The built-in :py:class:`range` generates :std:doc:`Python built-in integers
that have arbitrary size <c-api/long>`, while `numpy.arange` produces
`numpy.int32` or `numpy.int64` numbers. This may result in incorrect
results for large integer values::
>>> power = 40
>>> modulo = 10000
>>> x1 = [(n ** power) % modulo for n in range(8)]
>>> x2 = [(n ** power) % modulo for n in np.arange(8)]
>>> print(x1)
[0, 1, 7776, 8801, 6176, 625, 6576, 4001] # correct
>>> print(x2)
[0, 1, 7776, 7185, 0, 5969, 4816, 3361] # incorrect
See Also
--------
numpy.linspace : Evenly spaced numbers with careful handling of endpoints.
numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions.
numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
.. deprecated:: 1.16
For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`.
For ndarray subclasses, define the ``__array_ufunc__`` method and
override the relevant ufunc.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. warning::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always considered "canonical", this mainly
means that the promoted dtype will always be in native byte order.
This function is symmetric, but rarely associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
Please see `numpy.result_type` for additional information about promotion.
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
.. versionchanged:: 1.23.0
NumPy now supports promotion for more structured dtypes. It will now
remove unnecessary padding from a structure dtype and promote included
fields individually.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
An example of a non-associative case:
>>> p = np.promote_types
>>> p('S', p('i1', 'u1'))
dtype('S6')
>>> p(p('S', 'i1'), 'u1')
dtype('S4')
""")
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe')
*This documentation shadows that of the native python implementation of the `einsum` function,
except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout of the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`numpy.trace`.
* Return a diagonal, :py:func:`numpy.diag`.
* Array axis summations, :py:func:`numpy.sum`.
* Transpositions and permutations, :py:func:`numpy.transpose`.
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
* Tensor contractions, :py:func:`numpy.tensordot`.
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <numpy.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view (changed in version 1.10.0).
`einsum` also provides an alternative way to provide the subscripts
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
If the output shape is not provided in this format `einsum` will be
calculated in implicit mode, otherwise it will be performed explicitly.
The examples below have corresponding `einsum` calls with the two
parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [0,1], [0])
array([ 10, 35, 60, 85, 110])
>>> np.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('ij->ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Vector inner products:
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
numpy.typing.NDArray : An ndarray alias :term:`generic <generic type>`
w.r.t. its `dtype.type <numpy.dtype.type>`.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[0.0e+000, 0.0e+000], # random
[ nan, 2.5e-323]])
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__dlpack__',
"""a.__dlpack__(*, stream=None)
DLPack Protocol: Part of the Array API."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__dlpack_device__',
"""a.__dlpack_device__()
DLPack Protocol: Part of the Array API."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
.. autoattribute:: numpy.core._internal._ctypes.data
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.shape
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.strides
:noindex:
.. automethod:: numpy.core._internal._ctypes.data_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.shape_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.strides_as
:noindex:
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the ``as_parameter`` attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x = np.array([[0, 1], [2, 3]], dtype=np.int32)
>>> x
array([[0, 1],
[2, 3]], dtype=int32)
>>> x.ctypes.data
31962608 # may vary
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
<__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents
c_uint(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents
c_ulong(4294967296)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1fce60> # may vary
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1ff320> # may vary
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
.. warning::
Setting ``arr.dtype`` is discouraged and may be deprecated in the
future. Setting will replace the ``dtype`` without modifying the
memory (see also `ndarray.view` and `ndarray.astype`).
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
ndarray.astype : Cast the values contained in the array to a new data-type.
ndarray.view : Create a view of the same data but a different data-type.
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
WRITEBACKIFCOPY (X)
This array is a copy of some other array. The C-API function
PyArray_ResolveWritebackIfCopy must be called before deallocating
to the base array will be updated with the contents of this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the WRITEBACKIFCOPY, WRITEABLE, and ALIGNED flags can be
changed by the user, via direct assignment to the attribute or dictionary
entry, or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- WRITEBACKIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<class 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
The shape property is usually used to get the current shape of an array,
but may also be used to reshape the array in-place by assigning a tuple of
array dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the size of
the array and the remaining dimensions. Reshaping an array in-place will
fail if a copy is required.
.. warning::
Setting ``arr.shape`` is discouraged and may be deprecated in the
future. Using `ndarray.reshape` is the preferred approach.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
>>> np.zeros((4,2))[::2].shape = (-1,)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: Incompatible shape for in-place modification. Use
`.reshape()` to make a copy with the desired shape.
See Also
--------
numpy.shape : Equivalent getter function.
numpy.reshape : Function similar to setting ``shape``.
ndarray.reshape : Method similar to setting ``shape``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equal to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Notes
-----
`a.size` returns a standard arbitrary precision Python integer. This
may not be the case with other methods of obtaining the same value
(like the suggested ``np.prod(a.shape)``, which returns an instance
of ``np.int_``), and may be relevant if the value is used further in
calculations that may overflow a fixed size integer type.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
.. warning::
Setting ``arr.strides`` is discouraged and may be deprecated in the
future. `numpy.lib.stride_tricks.as_strided` should be preferred
to create a new view of the same data in a safer way.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
The transposed array.
Same as ``self.transpose()``.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
See Also
--------
transpose
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__([dtype], /) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""a.__array_finalize__(obj, /)
Present so subclasses can call super. Does nothing.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(array[, context], /)
Returns a view of `array` with the same type as self.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(array[, context], /)
Returns a view of `array` with the same type as self.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__()
Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
Equivalent to ``a.copy(order='K')``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__class_getitem__',
"""a.__class_getitem__(item, /)
Return a parametrized wrapper around the `~numpy.ndarray` type.
.. versionadded:: 1.22
Returns
-------
alias : types.GenericAlias
A parametrized `~numpy.ndarray` type.
Examples
--------
>>> from typing import Any
>>> import numpy as np
>>> np.ndarray[Any, np.dtype[Any]]
numpy.ndarray[typing.Any, numpy.dtype[typing.Any]]
Notes
-----
This method is only available for python 3.9 and later.
See Also
--------
:pep:`585` : Type hinting generics in standard collections.
numpy.typing.NDArray : An ndarray alias :term:`generic <generic type>`
w.r.t. its `dtype.type <numpy.dtype.type>`.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__(memo, /) -> Deep copy of array.
Used if :func:`copy.deepcopy` is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(state, /)
For unpickling.
The `state` argument must be a sequence that contains the following
elements:
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False, *, where=True)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False, *, where=True)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None, *, keepdims=False)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None, *, keepdims=False)
Return indices of the minimum values along the given axis.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind=None, order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
.. versionchanged:: 1.17.0
Casting between a simple data type and a structured one is possible only
for "unsafe" casting. Casting to multiple fields is allowed, but
casting from multiple fields is not.
.. versionchanged:: 1.9.0
Casting from numeric to string types in 'safe' casting mode requires
that the string dtype length is long enough to store the max
integer/float value converted.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace=False)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Arrays of byte-strings are not swapped. The real and imaginary
parts of a complex number are swapped individually.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> list(map(hex, A))
['0x1', '0x100', '0x2233']
>>> A.byteswap(inplace=True)
array([ 256, 1, 13090], dtype=int16)
>>> list(map(hex, A))
['0x100', '0x1', '0x3322']
Arrays of byte-strings are not swapped
>>> A = np.array([b'ceg', b'fac'])
>>> A.byteswap()
array([b'ceg', b'fac'], dtype='|S3')
``A.newbyteorder().byteswap()`` produces an array with the same values
but different representation in memory
>>> A = np.array([1, 2, 3])
>>> A.view(np.uint8)
array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0], dtype=uint8)
>>> A.newbyteorder().byteswap(inplace=True)
array([1, 2, 3])
>>> A.view(np.uint8)
array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
0, 3], dtype=uint8)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None, **kwargs)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:`numpy.copy` are very
similar but have different default values for their order=
arguments, and this function always passes sub-classes through.)
See also
--------
numpy.copy : Similar function with different default behavior
numpy.copyto
Notes
-----
This function is the preferred method for creating an array copy. The
function :func:`numpy.copy` is similar, but it defaults to using order 'K',
and will not pass sub-classes through by default.
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
a future version the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot'))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str or Path
A string naming the dump file.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[1.+1.j, 0.+0.j],
[0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[1., 0.],
[0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[1., 0.],
[0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.item(3)
1
>>> x.item(7)
0
>>> x.item((0, 1))
2
>>> x.item((2, 2))
1
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[2, 2, 6],
[1, 0, 6],
[1, 0, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S', /)
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'little'} - little endian
* {'>', 'big'} - big endian
* {'=', 'native'} - native order, equivalent to `sys.byteorder`
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None, keepdims=False)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
PyPy only: will always raise if the data memory must be changed, since
there is no reliable way to determine if references or views to it
exist.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that references or is referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]], dtype=int32)
>>> x
array([[1.0e+000, 1.5e-323, 1.5e-323],
[1.5e-323, 1.0e+000, 1.5e-323],
[1.5e-323, 1.5e-323, 1.0e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY,
respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The WRITEBACKIFCOPY and flag can never be set
to True. The flag WRITEABLE can only be set to True if the array owns its
own memory, or the ultimate owner of the memory exposes a writeable buffer
interface, or is a string. (The exception for string is made so that
unpickling can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 7 Boolean flags
in use, only four of which can be changed by the user:
WRITEBACKIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
called, the base array will be updated with the contents of this array.
All flags can be accessed using the single (upper case) letter as well
as the full name.
Examples
--------
>>> y = np.array([[3, 1, 7],
... [2, 0, 0],
... [8, 5, 9]])
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
WRITEBACKIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set WRITEBACKIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind=None, order=None)
Sort an array in-place. Refer to `numpy.sort` for full documentation.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with datatype. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
numpy.argsort : Indirect sort.
numpy.lexsort : Indirect stable sort on multiple keys.
numpy.searchsorted : Find elements in sorted array.
numpy.partition: Partial sort.
Notes
-----
See `numpy.sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([(b'c', 1), (b'a', 2)],
dtype=[('x', 'S1'), ('y', '<i8')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that the value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order of all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
.. deprecated:: 1.22.0
Passing booleans as index is deprecated.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need to be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a partitioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
>>> a
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove axes of length one from `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str or Path
An open file object, or a string containing a filename.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
When fid is a file object, array contents are directly written to the
file, bypassing the file object's ``write`` method. As a result, tofile
cannot be used with files objects supporting compression (e.g., GzipFile)
or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as an ``a.ndim``-levels deep nested list of Python scalars.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible builtin Python type, via
the `~numpy.ndarray.item` function.
If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will
not be a list at all, but a simple Python scalar.
Parameters
----------
none
Returns
-------
y : object, or list of object, or list of list of object, or ...
The possibly nested list of array elements.
Notes
-----
The array may be recreated via ``a = np.array(a.tolist())``, although this
may sometimes lose precision.
Examples
--------
For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
except that ``tolist`` changes numpy scalars to Python scalars:
>>> a = np.uint32([1, 2])
>>> a_list = list(a)
>>> a_list
[1, 2]
>>> type(a_list[0])
<class 'numpy.uint32'>
>>> a_tolist = a.tolist()
>>> a_tolist
[1, 2]
>>> type(a_tolist[0])
<class 'int'>
Additionally, for a 2D array, ``tolist`` applies recursively:
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
The base case for this recursion is a 0D array:
>>> a = np.array(1)
>>> list(a)
Traceback (most recent call last):
...
TypeError: iteration over a 0-d array
>>> a.tolist()
1
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', """
a.tobytes(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object is produced in C-order by default.
This behavior is controlled by the ``order`` parameter.
.. versionadded:: 1.9.0
Parameters
----------
order : {'C', 'F', 'A'}, optional
Controls the memory layout of the bytes object. 'C' means C-order,
'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is
Fortran contiguous, 'C' otherwise. Default is 'C'.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]], dtype='<u2')
>>> x.tobytes()
b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00'
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', r"""
a.tostring(order='C')
A compatibility alias for `tobytes`, with exactly the same behavior.
Despite its name, it returns `bytes` not `str`\ s.
.. deprecated:: 1.19.0
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array this has no effect, as a transposed vector is simply the
same vector. To convert a 1-D array into a 2D column vector, an additional
dimension must be added. `np.atleast2d(a).T` achieves this, as does
`a[:, np.newaxis]`.
For a 2-D array, this is a standard matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
transpose : Equivalent function
ndarray.T : Array property returning the array transposed.
ndarray.reshape : Give a new shape to an array without changing its data.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view([dtype][, type])
New view of array with the same data.
.. note::
Passing None for ``dtype`` is different from omitting the parameter,
since the former invokes ``dtype(None)`` which is an alias for
``dtype('float_')``.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
Omitting it results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, omission
of the parameter results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a regular
array to a structured array), then the last axis of ``a`` must be
contiguous. This axis will be resized in the result.
.. versionchanged:: 1.23.0
Only the last axis needs to be contiguous. Previously, the entire array
had to be C-contiguous.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
<class 'numpy.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> x
array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')])
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1, 3], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16)
>>> y = x[:, ::2]
>>> y
array([[1, 3],
[4, 6]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
...
ValueError: To change to a dtype of a different size, the last axis must be contiguous
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 3)],
[(4, 6)]], dtype=[('width', '<i2'), ('length', '<i2')])
However, views that change dtype are totally fine for arrays with a
contiguous last axis, even if the rest of the axes are not C-contiguous:
>>> x = np.arange(2 * 3 * 4, dtype=np.int8).reshape(2, 3, 4)
>>> x.transpose(1, 0, 2).view(np.int16)
array([[[ 256, 770],
[3340, 3854]],
<BLANKLINE>
[[1284, 1798],
[4368, 4882]],
<BLANKLINE>
[[2312, 2826],
[5396, 5910]]], dtype=int16)
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, /, nin, nout, *[, identity])
Takes an arbitrary Python function and returns a NumPy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
identity : object, optional
The value to use for the `~numpy.ufunc.identity` attribute of the resulting
object. If specified, this is equivalent to setting the underlying
C ``identity`` field to ``PyUFunc_IdentityValue``.
If omitted, the identity is set to ``PyUFunc_None``. Note that this is
_not_ equivalent to setting the identity to ``None``, which implies the
operation is reorderable.
Returns
-------
out : ufunc
Returns a NumPy universal function (``ufunc``) object.
See Also
--------
vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy.
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array(['0o12', '0o36', '0o144'], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['0o12', '0o36', '0o144'], dtype='<U5')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[8192, 521, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
... invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj, /)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core.multiarray', 'get_handler_name',
"""
get_handler_name(a: ndarray) -> str,None
Return the name of the memory handler used by `a`. If not provided, return
the name of the memory handler that will be used to allocate data for the
next `ndarray` in this context. May return None if `a` does not own its
memory, in which case you can traverse ``a.base`` for a memory handler.
""")
add_newdoc('numpy.core.multiarray', 'get_handler_version',
"""
get_handler_version(a: ndarray) -> int,None
Return the version of the memory handler used by `a`. If not provided,
return the version of the memory handler that will be used to allocate data
for the next `ndarray` in this context. May return None if `a` does not own
its memory, in which case you can traverse ``a.base`` for a memory handler.
""")
add_newdoc('numpy.core.multiarray', '_get_madvise_hugepage',
"""
_get_madvise_hugepage() -> bool
Get use of ``madvise (2)`` MADV_HUGEPAGE support when
allocating the array data. Returns the currently set value.
See `global_state` for more information.
""")
add_newdoc('numpy.core.multiarray', '_set_madvise_hugepage',
"""
_set_madvise_hugepage(enabled: bool) -> bool
Set or unset use of ``madvise (2)`` MADV_HUGEPAGE support when
allocating the array data. Returns the previously set value.
See `global_state` for more information.
""")
add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
"""
format_float_OSprintf_g(val, precision)
Print a floating point scalar using the system's printf function,
equivalent to:
printf("%.*g", precision, val);
for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
method is designed to help cross-validate the format_float_* methods.
Parameters
----------
val : python float or numpy floating scalar
Value to format.
precision : non-negative integer, optional
Precision given to printf.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
format_float_positional
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use `info`. For
example, ``np.info(np.sin)``. Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`.
**Calling ufuncs:** ``op(*x[, out], where=True, **kwargs)``
Apply `op` to the arguments `*x` elementwise, broadcasting the arguments.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
*x : array_like
Input arrays.
out : ndarray, None, or tuple of ndarray and None, optional
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
number of outputs; use None for uninitialized outputs to be
allocated by the ufunc.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
Returns
-------
r : ndarray or tuple of ndarray
`r` will have the shape that the arrays in `x` broadcast to; if `out` is
provided, it will be returned. If not, `r` will be allocated and
may contain uninitialized values. If the function has more than one
output, then the result will be a tuple of arrays.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print(np.exp.identity)
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
add_newdoc('numpy.core', 'ufunc', ('signature',
"""
Definition of the core elements a generalized ufunc operates on.
The signature determines how the dimensions of each input/output array
are split into core and loop dimensions:
1. Each dimension in the signature is matched to a dimension of the
corresponding passed-in array, starting from the end of the shape tuple.
2. Core dimensions assigned to the same label in the signature must have
exactly matching sizes, no broadcasting is performed.
3. The core dimensions are removed from all inputs and the remaining
dimensions are broadcast together, defining the loop dimensions.
Notes
-----
Generalized ufuncs are used internally in many linalg functions, and in
the testing suite; the examples below are taken from these.
For ufuncs that operate on scalars, the signature is None, which is
equivalent to '()' for every argument.
Examples
--------
>>> np.core.umath_tests.matrix_multiply.signature
'(m,n),(n,p)->(m,p)'
>>> np.linalg._umath_linalg.det.signature
'(m,m)->()'
>>> np.add.signature is None
True # equivalent to '(),()->()'
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
Reduces `array`'s dimension by one, by applying ufunc along one axis.
Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
array : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is None, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `array`.
.. versionadded:: 1.7.0
initial : scalar, optional
The value with which to start the reduction.
If the ufunc has no identity or the dtype is object, this defaults
to None - otherwise it defaults to ufunc.identity.
If ``None`` is given, the first element of the reduction is used,
and an error is thrown if the reduction is empty.
.. versionadded:: 1.15.0
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `array`, and selects elements to include in the reduction. Note
that for ufuncs like ``minimum`` that do not have an identity
defined, one has to pass in also ``initial``.
.. versionadded:: 1.17.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
You can use the ``initial`` keyword argument to initialize the reduction
with a different value, and ``where`` to select specific elements to include:
>>> np.add.reduce([10], initial=5)
15
>>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10)
array([14., 14.])
>>> a = np.array([10., np.nan, 10])
>>> np.add.reduce(a, where=~np.isnan(a))
20.0
Allows reductions of empty arrays where they would normally fail, i.e.
for ufuncs without an identity.
>>> np.minimum.reduce([], initial=np.inf)
inf
>>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False])
array([ 1., 10.])
>>> np.minimum.reduce([])
Traceback (most recent call last):
...
ValueError: zero-size array to reduction operation minimum which has no identity
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[1., 0.],
[0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[1., 0.],
[1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[1., 0.],
[1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[1., 1.],
[0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(array, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = array.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``array[indices[i]]``.
* if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `array` (this happens if ``len(indices) > array.shape[axis]``).
Parameters
----------
array : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as
``ufunc.reduceat(array, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(array) - 1)``,
``indices[1::2] = range(1, len(array))``.
Don't be fooled by this attribute's name: `reduceat(array)` is not
necessarily smaller than `array`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[12., 15., 18., 21.],
[12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
r"""
outer(A, B, /, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
kwargs : any
Arguments to pass on to the ufunc. Typically `dtype` or `out`.
See `ufunc` for a comprehensive overview of all available arguments.
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer : A less powerful version of ``np.multiply.outer``
that `ravel`\ s all inputs to 1D. This exists
primarily for compatibility with old code.
tensordot : ``np.tensordot(a, b, axes=((), ()))`` and
``np.multiply.outer(a, b)`` behave same for all
dimensions of a and b.
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None, /)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
``a[indices] += b``, except that results are accumulated for elements that
are indexed more than once. For example, ``a[[0,0]] += 1`` will only
increment the first element once because of buffering, whereas
``add.at(a, [0,0], 1)`` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> a
array([-1, -2, 3, 4])
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> a
array([2, 3, 5, 4])
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> a
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(dtype, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
dtype
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint64), ('f2', np.int32)])
dtype([('f1', '<u8'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', 'S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)])
dtype([('hello', '<i8', (3,)), ('world', 'V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', 'S1'), ('age', 'u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', 'S25'), ('age', 'u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
Examples
--------
>>> x = np.dtype('i4')
>>> x.alignment
4
>>> x = np.dtype(float)
>>> x.alignment
8
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types.
Examples
--------
>>> x = np.dtype(float)
>>> x.char
'd'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
`__array_interface__` description of the data-type.
The format is that required by the 'descr' key in the
`__array_interface__` attribute.
Warning: This attribute exists specifically for `__array_interface__`,
and passing it directly to `np.dtype` will not accurately reconstruct
some dtypes (e.g., scalar and subarray dtypes).
Examples
--------
>>> x = np.dtype(float)
>>> x.descr
[('', '<f8')]
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.descr
[('name', '<U16'), ('grades', '<f8', (2,))]
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
Offset is limited to C int, which is signed and usually 32 bits.
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
The following example demonstrates that operations on this particular
dtype requires Python C-API.
Examples
--------
>>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
>>> x.flags
16
>>> np.core.multiarray.NEEDS_PYAPI
16
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the NumPy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
Examples
--------
>>> arr = np.array([[1, 2], [3, 4]])
>>> arr.dtype
dtype('int64')
>>> arr.itemsize
8
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.itemsize
80
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
= ======================
Examples
--------
>>> dt = np.dtype('i4')
>>> dt.kind
'i'
>>> dt = np.dtype('f8')
>>> dt.kind
'f'
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.kind
'V'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('metadata',
"""
Either ``None`` or a readonly dictionary of metadata (mappingproxy).
The metadata field can be set using any dictionary at data-type
creation. NumPy currently has no uniform approach to propagating
metadata; although some array operations preserve it, there is no
guarantee that others will.
.. warning::
Although used in certain projects, this feature was long undocumented
and is not well supported. Some aspects of metadata propagation
are expected to change in the future.
Examples
--------
>>> dt = np.dtype(float, metadata={"key": "value"})
>>> dt.metadata["key"]
'value'
>>> arr = np.array([1, 2, 3], dtype=dt)
>>> arr.dtype.metadata
mappingproxy({'key': 'value'})
Adding arrays with identical datatypes currently preserves the metadata:
>>> (arr + arr).dtype.metadata
mappingproxy({'key': 'value'})
But if the arrays have different dtype metadata, the metadata may be
dropped:
>>> dt2 = np.dtype(float, metadata={"key2": "value2"})
>>> arr2 = np.array([3, 2, 1], dtype=dt2)
>>> (arr + arr2).dtype.metadata is None
True # The metadata field is cleared so None is returned
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
Examples
--------
>>> x = np.dtype(float)
>>> x.name
'float64'
>>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
>>> x.name
'void640'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
Examples
--------
>>> dt = np.dtype(str)
>>> dt.num
19
>>> dt = np.dtype(float)
>>> dt.num
12
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
Examples
--------
>>> dt = np.dtype(('i4', 4))
>>> dt.shape
(4,)
>>> dt = np.dtype(('i4', (2, 3)))
>>> dt.shape
(2, 3)
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
"""
Number of dimensions of the sub-array if this data type describes a
sub-array, and ``0`` otherwise.
.. versionadded:: 1.13.0
Examples
--------
>>> x = np.dtype(float)
>>> x.ndim
0
>>> x = np.dtype((float, 8))
>>> x.ndim
1
>>> x = np.dtype(('i4', (3, 4)))
>>> x.ndim
2
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
See Also
--------
dtype.base
Examples
--------
>>> x = numpy.dtype('8f')
>>> x.subdtype
(dtype('float32'), (8,))
>>> x = numpy.dtype('i2')
>>> x.subdtype
>>>
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('base',
"""
Returns dtype for the base element of the subarrays,
regardless of their dimension or shape.
See Also
--------
dtype.subdtype
Examples
--------
>>> x = numpy.dtype('8f')
>>> x.base
dtype('float32')
>>> x = numpy.dtype('i2')
>>> x.base
dtype('int16')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S', /)
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. The default value ('S') results in swapping the current
byte order. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'little'} - little endian
* {'>', 'big'} - big endian
* {'=', 'native'} - native order
* {'|', 'I'} - ignore (no change to byte order)
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('__class_getitem__',
"""
__class_getitem__(item, /)
Return a parametrized wrapper around the `~numpy.dtype` type.
.. versionadded:: 1.22
Returns
-------
alias : types.GenericAlias
A parametrized `~numpy.dtype` type.
Examples
--------
>>> import numpy as np
>>> np.dtype[np.int64]
numpy.dtype[numpy.int64]
Notes
-----
This method is only available for python 3.9 and later.
See Also
--------
:pep:`585` : Type hinting generics in standard collections.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('__ge__',
"""
__ge__(value, /)
Return ``self >= value``.
Equivalent to ``np.can_cast(value, self, casting="safe")``.
See Also
--------
can_cast : Returns True if cast between data types can occur according to
the casting rule.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('__le__',
"""
__le__(value, /)
Return ``self <= value``.
Equivalent to ``np.can_cast(self, value, casting="safe")``.
See Also
--------
can_cast : Returns True if cast between data types can occur according to
the casting rule.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('__gt__',
"""
__ge__(value, /)
Return ``self > value``.
Equivalent to
``self != value and np.can_cast(value, self, casting="safe")``.
See Also
--------
can_cast : Returns True if cast between data types can occur according to
the casting rule.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('__lt__',
"""
__lt__(value, /)
Return ``self < value``.
Equivalent to
``self != value and np.can_cast(self, value, casting="safe")``.
See Also
--------
can_cast : Returns True if cast between data types can occur according to
the casting rule.
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False])
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
"""
normalize_axis_index(axis, ndim, msg_prefix=None)
Normalizes an axis index, `axis`, such that is a valid positive index into
the shape of array with `ndim` dimensions. Raises an AxisError with an
appropriate message if this is not possible.
Used internally by all axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int
The un-normalized index of the axis. Can be negative
ndim : int
The number of dimensions of the array that `axis` should be normalized
against
msg_prefix : str
A prefix to put before the message, typically the name of the argument
Returns
-------
normalized_axis : int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If the axis index is invalid, when `-ndim <= axis < ndim` is false.
Examples
--------
>>> normalize_axis_index(0, ndim=3)
0
>>> normalize_axis_index(1, ndim=3)
1
>>> normalize_axis_index(-1, ndim=3)
2
>>> normalize_axis_index(3, ndim=3)
Traceback (most recent call last):
...
AxisError: axis 3 is out of bounds for array of dimension 3
>>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
Traceback (most recent call last):
...
AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
""")
add_newdoc('numpy.core.multiarray', 'datetime_data',
"""
datetime_data(dtype, /)
Get information about the step size of a date or time type.
The returned tuple can be passed as the second argument of `numpy.datetime64` and
`numpy.timedelta64`.
Parameters
----------
dtype : dtype
The dtype object, which must be a `datetime64` or `timedelta64` type.
Returns
-------
unit : str
The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype
is based.
count : int
The number of base units in a step.
Examples
--------
>>> dt_25s = np.dtype('timedelta64[25s]')
>>> np.datetime_data(dt_25s)
('s', 25)
>>> np.array(10, dt_25s).astype('timedelta64[s]')
array(250, dtype='timedelta64[s]')
The result can be used to construct a datetime that uses the same units
as a timedelta
>>> np.datetime64('2010', np.datetime_data(dt_25s))
numpy.datetime64('2010-01-01T00:00:00','25s')
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
def refer_to_array_attribute(attr, method=True):
docstring = """
Scalar {} identical to the corresponding array attribute.
Please see `ndarray.{}`.
"""
return attr, docstring.format("method" if method else "attribute", attr)
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('T', method=False))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('base', method=False))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('all'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('any'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('argmax'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('argmin'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('argsort'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('astype'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('byteswap'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('choose'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('clip'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('compress'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('conjugate'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('copy'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('cumprod'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('cumsum'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('diagonal'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('dump'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('dumps'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('fill'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('flatten'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('getfield'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('item'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('itemset'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('max'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('mean'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('min'))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S', /)
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* 'S' - swap dtype from current to opposite endian
* {'<', 'little'} - little endian
* {'>', 'big'} - big endian
* {'=', 'native'} - native order
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('nonzero'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('prod'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('ptp'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('put'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('ravel'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('repeat'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('reshape'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('resize'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('round'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('searchsorted'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('setfield'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('setflags'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('sort'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('squeeze'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('std'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('sum'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('swapaxes'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('take'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('tofile'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('tolist'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('tostring'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('trace'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('transpose'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('var'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('view'))
add_newdoc('numpy.core.numerictypes', 'number', ('__class_getitem__',
"""
__class_getitem__(item, /)
Return a parametrized wrapper around the `~numpy.number` type.
.. versionadded:: 1.22
Returns
-------
alias : types.GenericAlias
A parametrized `~numpy.number` type.
Examples
--------
>>> from typing import Any
>>> import numpy as np
>>> np.signedinteger[Any]
numpy.signedinteger[typing.Any]
Notes
-----
This method is only available for python 3.9 and later.
See Also
--------
:pep:`585` : Type hinting generics in standard collections.
"""))
##############################################################################
#
# Documentation for scalar type abstract base classes in type hierarchy
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'number',
"""
Abstract base class of all numeric scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'integer',
"""
Abstract base class of all integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'signedinteger',
"""
Abstract base class of all signed integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
"""
Abstract base class of all unsigned integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'inexact',
"""
Abstract base class of all numeric scalar types with a (potentially)
inexact representation of the values in its range, such as
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'floating',
"""
Abstract base class of all floating-point scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'complexfloating',
"""
Abstract base class of all complex number scalar types that are made up of
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'flexible',
"""
Abstract base class of all scalar types without predefined length.
The actual size of these types depends on the specific `np.dtype`
instantiation.
""")
add_newdoc('numpy.core.numerictypes', 'character',
"""
Abstract base class of all character string scalar types.
""")
| 201,399 | Python | 28.397168 | 128 | 0.584407 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/function_base.py | import functools
import warnings
import operator
import types
from . import numeric as _nx
from .numeric import result_type, NaN, asanyarray, ndim
from numpy.core.multiarray import add_docstring
from numpy.core import overrides
__all__ = ['logspace', 'linspace', 'geomspace']
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
dtype=None, axis=None):
return (start, stop)
@array_function_dispatch(_linspace_dispatcher)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
axis=0):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
.. versionchanged:: 1.16.0
Non-scalar `start` and `stop` are now supported.
.. versionchanged:: 1.20.0
Values are rounded towards ``-inf`` instead of ``0`` when an
integer ``dtype`` is specified. The old behavior can
still be obtained with ``np.linspace(start, stop, num).astype(int)``
Parameters
----------
start : array_like
The starting value of the sequence.
stop : array_like
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, the data type
is inferred from `start` and `stop`. The inferred dtype will never be
an integer; `float` is chosen even if the arguments would produce an
array of integers.
.. versionadded:: 1.9.0
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Use -1 to get an axis at the end.
.. versionadded:: 1.16.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float, optional
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
scale (a geometric progression).
logspace : Similar to `geomspace`, but with the end points specified as
logarithms.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = operator.index(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
# and make sure one can use variables that have an __array_interface__, gh-6634
start = asanyarray(start) * 1.0
stop = asanyarray(stop) * 1.0
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
delta = stop - start
y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta))
# In-place multiplication y *= delta/div is faster, but prevents the multiplicant
# from overriding what class is produced, and thus prevents, e.g. use of Quantities,
# see gh-7142. Hence, we multiply in place only for standard scalar types.
_mult_inplace = _nx.isscalar(delta)
if div > 0:
step = delta / div
if _nx.any(step == 0):
# Special handling for denormal numbers, gh-5437
y /= div
if _mult_inplace:
y *= delta
else:
y = y * delta
else:
if _mult_inplace:
y *= step
else:
y = y * step
else:
# sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0)
# have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
y += start
if endpoint and num > 1:
y[-1] = stop
if axis != 0:
y = _nx.moveaxis(y, 0, axis)
if _nx.issubdtype(dtype, _nx.integer):
_nx.floor(y, out=y)
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,
dtype=None, axis=None):
return (start, stop)
@array_function_dispatch(_logspace_dispatcher)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
axis=0):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
.. versionchanged:: 1.16.0
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : array_like
``base ** start`` is the starting value of the sequence.
stop : array_like
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : array_like, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, the data type
is inferred from `start` and `stop`. The inferred type will never be
an integer; `float` is chosen even if the arguments would produce an
array of integers.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Use -1 to get an axis at the end.
.. versionadded:: 1.16.0
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
geomspace : Similar to logspace, but with endpoints specified directly.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype, copy=False)
def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,
axis=None):
return (start, stop)
@array_function_dispatch(_geomspace_dispatcher)
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
"""
Return numbers spaced evenly on a log scale (a geometric progression).
This is similar to `logspace`, but with endpoints specified directly.
Each output sample is a constant multiple of the previous.
.. versionchanged:: 1.16.0
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : array_like
The starting value of the sequence.
stop : array_like
The final value of the sequence, unless `endpoint` is False.
In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
dtype : dtype
The type of the output array. If `dtype` is not given, the data type
is inferred from `start` and `stop`. The inferred dtype will never be
an integer; `float` is chosen even if the arguments would produce an
array of integers.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Use -1 to get an axis at the end.
.. versionadded:: 1.16.0
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
logspace : Similar to geomspace, but with endpoints specified using log
and base.
linspace : Similar to geomspace, but with arithmetic instead of geometric
progression.
arange : Similar to linspace, with the step size specified instead of the
number of samples.
Notes
-----
If the inputs or dtype are complex, the output will follow a logarithmic
spiral in the complex plane. (There are an infinite number of spirals
passing through two points; the output will follow the shortest such path.)
Examples
--------
>>> np.geomspace(1, 1000, num=4)
array([ 1., 10., 100., 1000.])
>>> np.geomspace(1, 1000, num=3, endpoint=False)
array([ 1., 10., 100.])
>>> np.geomspace(1, 1000, num=4, endpoint=False)
array([ 1. , 5.62341325, 31.6227766 , 177.827941 ])
>>> np.geomspace(1, 256, num=9)
array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.])
Note that the above may not produce exact integers:
>>> np.geomspace(1, 256, num=9, dtype=int)
array([ 1, 2, 4, 7, 16, 32, 63, 127, 256])
>>> np.around(np.geomspace(1, 256, num=9)).astype(int)
array([ 1, 2, 4, 8, 16, 32, 64, 128, 256])
Negative, decreasing, and complex inputs are allowed:
>>> np.geomspace(1000, 1, num=4)
array([1000., 100., 10., 1.])
>>> np.geomspace(-1000, -1, num=4)
array([-1000., -100., -10., -1.])
>>> np.geomspace(1j, 1000j, num=4) # Straight line
array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
>>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j,
6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j,
1.00000000e+00+0.00000000e+00j])
Graphical illustration of `endpoint` parameter:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> y = np.zeros(N)
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.axis([0.5, 2000, 0, 3])
[0.5, 2000, 0, 3]
>>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
>>> plt.show()
"""
start = asanyarray(start)
stop = asanyarray(stop)
if _nx.any(start == 0) or _nx.any(stop == 0):
raise ValueError('Geometric sequence cannot include zero')
dt = result_type(start, stop, float(num), _nx.zeros((), dtype))
if dtype is None:
dtype = dt
else:
# complex to dtype('complex128'), for instance
dtype = _nx.dtype(dtype)
# Promote both arguments to the same dtype in case, for instance, one is
# complex and another is negative and log would produce NaN otherwise.
# Copy since we may change things in-place further down.
start = start.astype(dt, copy=True)
stop = stop.astype(dt, copy=True)
out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt)
# Avoid negligible real or imaginary parts in output by rotating to
# positive real, calculating, then undoing rotation
if _nx.issubdtype(dt, _nx.complexfloating):
all_imag = (start.real == 0.) & (stop.real == 0.)
if _nx.any(all_imag):
start[all_imag] = start[all_imag].imag
stop[all_imag] = stop[all_imag].imag
out_sign[all_imag] = 1j
both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1)
if _nx.any(both_negative):
_nx.negative(start, out=start, where=both_negative)
_nx.negative(stop, out=stop, where=both_negative)
_nx.negative(out_sign, out=out_sign, where=both_negative)
log_start = _nx.log10(start)
log_stop = _nx.log10(stop)
result = logspace(log_start, log_stop, num=num,
endpoint=endpoint, base=10.0, dtype=dtype)
# Make sure the endpoints match the start and stop arguments. This is
# necessary because np.exp(np.log(x)) is not necessarily equal to x.
if num > 0:
result[0] = start
if num > 1 and endpoint:
result[-1] = stop
result = out_sign * result
if axis != 0:
result = _nx.moveaxis(result, 0, axis)
return result.astype(dtype, copy=False)
def _needs_add_docstring(obj):
"""
Returns true if the only way to set the docstring of `obj` from python is
via add_docstring.
This function errs on the side of being overly conservative.
"""
Py_TPFLAGS_HEAPTYPE = 1 << 9
if isinstance(obj, (types.FunctionType, types.MethodType, property)):
return False
if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:
return False
return True
def _add_docstring(obj, doc, warn_on_python):
if warn_on_python and not _needs_add_docstring(obj):
warnings.warn(
"add_newdoc was used on a pure-python object {}. "
"Prefer to attach it directly to the source."
.format(obj),
UserWarning,
stacklevel=3)
try:
add_docstring(obj, doc)
except Exception:
pass
def add_newdoc(place, obj, doc, warn_on_python=True):
"""
Add documentation to an existing object, typically one defined in C
The purpose is to allow easier editing of the docstrings without requiring
a re-compile. This exists primarily for internal use within numpy itself.
Parameters
----------
place : str
The absolute name of the module to import from
obj : str
The name of the object to add documentation to, typically a class or
function name
doc : {str, Tuple[str, str], List[Tuple[str, str]]}
If a string, the documentation to apply to `obj`
If a tuple, then the first element is interpreted as an attribute of
`obj` and the second as the docstring to apply - ``(method, docstring)``
If a list, then each element of the list should be a tuple of length
two - ``[(method1, docstring1), (method2, docstring2), ...]``
warn_on_python : bool
If True, the default, emit `UserWarning` if this is used to attach
documentation to a pure-python object.
Notes
-----
This routine never raises an error if the docstring can't be written, but
will raise an error if the object being documented does not exist.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
Since this function grabs the ``char *`` from a c-level str object and puts
it into the ``tp_doc`` slot of the type of `obj`, it violates a number of
C-API best-practices, by:
- modifying a `PyTypeObject` after calling `PyType_Ready`
- calling `Py_INCREF` on the str and losing the reference, so the str
will never be released
If possible it should be avoided.
"""
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
_add_docstring(new, doc.strip(), warn_on_python)
elif isinstance(doc, tuple):
attr, docstring = doc
_add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
elif isinstance(doc, list):
for attr, docstring in doc:
_add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
| 19,019 | Python | 34.886792 | 88 | 0.60918 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/memmap.py | from contextlib import nullcontext
import numpy as np
from .numeric import uint8, ndarray, dtype
from numpy.compat import os_fspath, is_pathlib_path
from numpy.core.overrides import set_module
__all__ = ['memmap']
dtypedescr = dtype
valid_filemodes = ["r", "c", "r+", "w+"]
writeable_filemodes = ["r+", "w+"]
mode_equivalents = {
"readonly":"r",
"copyonwrite":"c",
"readwrite":"r+",
"write":"w+"
}
@set_module('numpy')
class memmap(ndarray):
"""Create a memory-map to an array stored in a *binary* file on disk.
Memory-mapped files are used for accessing small segments of large files
on disk, without reading the entire file into memory. NumPy's
memmap's are array-like objects. This differs from Python's ``mmap``
module, which uses file-like objects.
This subclass of ndarray has some unpleasant interactions with
some operations, because it doesn't quite fit properly as a subclass.
An alternative to using this subclass is to create the ``mmap``
object yourself, then create an ndarray with ndarray.__new__ directly,
passing the object created in its 'buffer=' parameter.
This class may at some point be turned into a factory function
which returns a view into an mmap buffer.
Flush the memmap instance to write the changes to the file. Currently there
is no API to close the underlying ``mmap``. It is tricky to ensure the
resource is actually closed, since it may be shared between different
memmap instances.
Parameters
----------
filename : str, file-like object, or pathlib.Path instance
The file name or file object to be used as the array data buffer.
dtype : data-type, optional
The data-type used to interpret the file contents.
Default is `uint8`.
mode : {'r+', 'r', 'w+', 'c'}, optional
The file is opened in this mode:
+------+-------------------------------------------------------------+
| 'r' | Open existing file for reading only. |
+------+-------------------------------------------------------------+
| 'r+' | Open existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'w+' | Create or overwrite existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'c' | Copy-on-write: assignments affect data in memory, but |
| | changes are not saved to disk. The file on disk is |
| | read-only. |
+------+-------------------------------------------------------------+
Default is 'r+'.
offset : int, optional
In the file, array data starts at this offset. Since `offset` is
measured in bytes, it should normally be a multiple of the byte-size
of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
file are valid; The file will be extended to accommodate the
additional data. By default, ``memmap`` will start at the beginning of
the file, even if ``filename`` is a file pointer ``fp`` and
``fp.tell() != 0``.
shape : tuple, optional
The desired shape of the array. If ``mode == 'r'`` and the number
of remaining bytes after `offset` is not a multiple of the byte-size
of `dtype`, you must specify `shape`. By default, the returned array
will be 1-D with the number of elements determined by file size
and data-type.
order : {'C', 'F'}, optional
Specify the order of the ndarray memory layout:
:term:`row-major`, C-style or :term:`column-major`,
Fortran-style. This only has an effect if the shape is
greater than 1-D. The default order is 'C'.
Attributes
----------
filename : str or pathlib.Path instance
Path to the mapped file.
offset : int
Offset position in the file.
mode : str
File mode.
Methods
-------
flush
Flush any changes in memory to file on disk.
When you delete a memmap object, flush is called first to write
changes to disk.
See also
--------
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
The memmap object can be used anywhere an ndarray is accepted.
Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
``True``.
Memory-mapped files cannot be larger than 2GB on 32-bit systems.
When a memmap causes a file to be created or extended beyond its
current size in the filesystem, the contents of the new part are
unspecified. On systems with POSIX filesystem semantics, the extended
part will be filled with zero bytes.
Examples
--------
>>> data = np.arange(12, dtype='float32')
>>> data.resize((3,4))
This example uses a temporary file so that doctest doesn't write
files to your directory. You would use a 'normal' filename.
>>> from tempfile import mkdtemp
>>> import os.path as path
>>> filename = path.join(mkdtemp(), 'newfile.dat')
Create a memmap with dtype and shape that matches our data:
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
>>> fp
memmap([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]], dtype=float32)
Write data to memmap array:
>>> fp[:] = data[:]
>>> fp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fp.filename == path.abspath(filename)
True
Flushes memory changes to disk in order to read them back
>>> fp.flush()
Load the memmap and verify data was stored:
>>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> newfp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Read-only memmap:
>>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> fpr.flags.writeable
False
Copy-on-write memmap:
>>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
>>> fpc.flags.writeable
True
It's possible to assign to copy-on-write array, but values are only
written into the memory copy of the array, and not written to disk:
>>> fpc
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fpc[0,:] = 0
>>> fpc
memmap([[ 0., 0., 0., 0.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
File on disk is unchanged:
>>> fpr
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Offset into a memmap:
>>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
>>> fpo
memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
"""
__array_priority__ = -100.0
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
shape=None, order='C'):
# Import here to minimize 'import numpy' overhead
import mmap
import os.path
try:
mode = mode_equivalents[mode]
except KeyError as e:
if mode not in valid_filemodes:
raise ValueError(
"mode must be one of {!r} (got {!r})"
.format(valid_filemodes + list(mode_equivalents.keys()), mode)
) from None
if mode == 'w+' and shape is None:
raise ValueError("shape must be given")
if hasattr(filename, 'read'):
f_ctx = nullcontext(filename)
else:
f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b')
with f_ctx as fid:
fid.seek(0, 2)
flen = fid.tell()
descr = dtypedescr(dtype)
_dbytes = descr.itemsize
if shape is None:
bytes = flen - offset
if bytes % _dbytes:
raise ValueError("Size of available data is not a "
"multiple of the data-type size.")
size = bytes // _dbytes
shape = (size,)
else:
if not isinstance(shape, tuple):
shape = (shape,)
size = np.intp(1) # avoid default choice of np.int_, which might overflow
for k in shape:
size *= k
bytes = int(offset + size*_dbytes)
if mode in ('w+', 'r+') and flen < bytes:
fid.seek(bytes - 1, 0)
fid.write(b'\0')
fid.flush()
if mode == 'c':
acc = mmap.ACCESS_COPY
elif mode == 'r':
acc = mmap.ACCESS_READ
else:
acc = mmap.ACCESS_WRITE
start = offset - offset % mmap.ALLOCATIONGRANULARITY
bytes -= start
array_offset = offset - start
mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
offset=array_offset, order=order)
self._mmap = mm
self.offset = offset
self.mode = mode
if is_pathlib_path(filename):
# special case - if we were constructed with a pathlib.path,
# then filename is a path object, not a string
self.filename = filename.resolve()
elif hasattr(fid, "name") and isinstance(fid.name, str):
# py3 returns int for TemporaryFile().name
self.filename = os.path.abspath(fid.name)
# same as memmap copies (e.g. memmap + 1)
else:
self.filename = None
return self
def __array_finalize__(self, obj):
if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
self._mmap = obj._mmap
self.filename = obj.filename
self.offset = obj.offset
self.mode = obj.mode
else:
self._mmap = None
self.filename = None
self.offset = None
self.mode = None
def flush(self):
"""
Write any changes in the array to the file on disk.
For further information, see `memmap`.
Parameters
----------
None
See Also
--------
memmap
"""
if self.base is not None and hasattr(self.base, 'flush'):
self.base.flush()
def __array_wrap__(self, arr, context=None):
arr = super().__array_wrap__(arr, context)
# Return a memmap if a memmap was given as the output of the
# ufunc. Leave the arr class unchanged if self is not a memmap
# to keep original memmap subclasses behavior
if self is arr or type(self) is not memmap:
return arr
# Return scalar instead of 0d memmap, e.g. for np.sum with
# axis=None
if arr.shape == ():
return arr[()]
# Return ndarray otherwise
return arr.view(np.ndarray)
def __getitem__(self, index):
res = super().__getitem__(index)
if type(res) is memmap and res._mmap is None:
return res.view(type=ndarray)
return res
| 11,688 | Python | 33.58284 | 90 | 0.528405 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/setup.py | import os
import sys
import sysconfig
import pickle
import copy
import warnings
import textwrap
import glob
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from sysconfig import get_config_var
from numpy.compat import npy_load_module
from setup_common import * # noqa: F403
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
if not NPY_RELAXED_STRIDES_CHECKING:
raise SystemError(
"Support for NPY_RELAXED_STRIDES_CHECKING=0 has been remove as of "
"NumPy 1.23. This error will eventually be removed entirely.")
# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a
# bogus value for affected strides in order to help smoke out bad stride usage
# when relaxed stride checking is enabled.
NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0")
NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING
# Set NPY_DISABLE_SVML=1 in the environment to disable the vendored SVML
# library. This option only has significance on a Linux x86_64 host and is most
# useful to avoid improperly requiring SVML when cross compiling.
NPY_DISABLE_SVML = (os.environ.get('NPY_DISABLE_SVML', "0") == "1")
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration information between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly:
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
self._check_complex = None
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
self._check_types = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
self._check_ieee_macros = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
self._check_complex = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
def can_link_svml():
"""SVML library is supported only on x86_64 architecture and currently
only on linux
"""
if NPY_DISABLE_SVML:
return False
platform = sysconfig.get_platform()
return ("x86_64" in platform
and "linux" in platform
and sys.maxsize > 2**31)
def check_svml_submodule(svmlpath):
if not os.path.exists(svmlpath + "/README.md"):
raise RuntimeError("Missing `SVML` submodule! Run `git submodule "
"update --init` to fix this.")
return True
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
return os.path.join(sys.prefix, "libs")
else:
return get_config_var('LIBDIR')
def is_npy_no_signal():
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
header."""
return sys.platform == 'win32'
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
# Perhaps a fancier check is in order here.
# so that threads are only enabled if there
# are actually multiple CPUS? -- but
# threaded code can be nice even on a single
# CPU so that long-calculating code doesn't
# block.
return 'NPY_NOSMP' in os.environ
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
# Distutils hack on AMD64 on windows
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
# On win32, force long double format string to be 'g', not
# 'Lg', since the MS runtime does not support long double whose
# size is > sizeof(double)
if a == "Intel" or a == "AMD64":
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
def check_math_capabilities(config, ext, moredefs, mathlibs):
def check_func(
func_name,
decl=False,
headers=["feature_detection_math.h"],
):
return config.check_func(
func_name,
libraries=mathlibs,
decl=decl,
call=True,
call_args=FUNC_CALL_ARGS[func_name],
headers=headers,
)
def check_funcs_once(funcs_name, headers=["feature_detection_math.h"]):
call = dict([(f, True) for f in funcs_name])
call_args = dict([(f, FUNC_CALL_ARGS[f]) for f in funcs_name])
st = config.check_funcs_once(
funcs_name,
libraries=mathlibs,
decl=False,
call=call,
call_args=call_args,
headers=headers,
)
if st:
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
return st
def check_funcs(funcs_name, headers=["feature_detection_math.h"]):
# Use check_funcs_once first, and if it does not work, test func per
# func. Return success only if all the functions are available
if not check_funcs_once(funcs_name, headers=headers):
# Global check failed, check func per func
for f in funcs_name:
if check_func(f, headers=headers):
moredefs.append((fname2def(f), 1))
return 0
else:
return 1
#use_msvc = config.check_decl("_MSC_VER")
if not check_funcs_once(MANDATORY_FUNCS):
raise SystemError("One of the required function to build numpy is not"
" available (the list is %s)." % str(MANDATORY_FUNCS))
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
if f in OPTIONAL_STDFUNCS:
OPTIONAL_STDFUNCS.remove(f)
else:
OPTIONAL_FILE_FUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
check_funcs(OPTIONAL_FILE_FUNCS, headers=["feature_detection_stdio.h"])
check_funcs(OPTIONAL_MISC_FUNCS, headers=["feature_detection_misc.h"])
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
h = h.replace(".", "_").replace(os.path.sep, "_")
moredefs.append((fname2def(h), 1))
# Try with both "locale.h" and "xlocale.h"
locale_headers = [
"stdlib.h",
"xlocale.h",
"feature_detection_locale.h",
]
if not check_funcs(OPTIONAL_LOCALE_FUNCS, headers=locale_headers):
# It didn't work with xlocale.h, maybe it will work with locale.h?
locale_headers[1] = "locale.h"
check_funcs(OPTIONAL_LOCALE_FUNCS, headers=locale_headers)
for tup in OPTIONAL_INTRINSICS:
headers = None
if len(tup) == 2:
f, args, m = tup[0], tup[1], fname2def(tup[0])
elif len(tup) == 3:
f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])
else:
f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])
if config.check_func(f, decl=False, call=True, call_args=args,
headers=headers):
moredefs.append((m, 1))
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
if config.check_gcc_function_attribute(dec, fn):
moredefs.append((fname2def(fn), 1))
if fn == 'attribute_target_avx512f':
# GH-14787: Work around GCC<8.4 bug when compiling with AVX512
# support on Windows-based platforms
if (sys.platform in ('win32', 'cygwin') and
config.check_compiler_gcc() and
not config.check_gcc_version_at_least(8, 4)):
ext.extra_compile_args.extend(
['-ffixed-xmm%s' % n for n in range(16, 32)])
for dec, fn, code, header in OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS:
if config.check_gcc_function_attribute_with_intrinsics(dec, fn, code,
header):
moredefs.append((fname2def(fn), 1))
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_gcc_variable_attribute(fn):
m = fn.replace("(", "_").replace(")", "_")
moredefs.append((fname2def(m), 1))
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
def check_complex(config, mathlibs):
priv = []
pub = []
try:
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365", stacklevel=2)
return priv, pub
except Exception:
# os.uname not available on all platforms. blanket except ugly but safe
pass
# Check for complex support
st = config.check_header('complex.h')
if st:
priv.append(('HAVE_COMPLEX_H', 1))
pub.append(('NPY_USE_C99_COMPLEX', 1))
for t in C99_COMPLEX_TYPES:
st = config.check_type(t, headers=["complex.h"])
if st:
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
def check_prec(prec):
flist = [f + prec for f in C99_COMPLEX_FUNCS]
decl = dict([(f, True) for f in flist])
if not config.check_funcs_once(flist, call=decl, decl=decl,
libraries=mathlibs):
for f in flist:
if config.check_func(f, call=True, decl=True,
libraries=mathlibs):
priv.append((fname2def(f), 1))
else:
priv.extend([(fname2def(f), 1) for f in flist])
check_prec('')
check_prec('f')
check_prec('l')
return priv, pub
def check_ieee_macros(config):
priv = []
pub = []
macros = []
def _add_decl(f):
priv.append(fname2def("decl_%s" % f))
pub.append('NPY_%s' % fname2def("decl_%s" % f))
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
for f in _macros:
py_symbol = fname2def("decl_%s" % f)
already_declared = config.check_decl(py_symbol,
headers=["Python.h", "math.h"])
if already_declared:
if config.check_macro_true(py_symbol,
headers=["Python.h", "math.h"]):
pub.append('NPY_%s' % fname2def("decl_%s" % f))
else:
macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
st = config.check_decl(f, headers=["Python.h", "math.h"])
if st:
_add_decl(f)
return priv, pub
def check_types(config_cmd, ext, build_dir):
private_defines = []
public_defines = []
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
expected = {'short': [2], 'int': [4], 'long': [8, 4],
'float': [4], 'double': [8], 'long double': [16, 12, 8],
'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],
'off_t': [8, 4]}
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
if not result:
python = 'python'
if '__pypy__' in sys.builtin_module_names:
python = 'pypy'
raise SystemError(
"Cannot compile 'Python.h'. Perhaps you need to "
"install {0}-dev|{0}-devel.".format(python))
res = config_cmd.check_header("endian.h")
if res:
private_defines.append(('HAVE_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
res = config_cmd.check_header("sys/endian.h")
if res:
private_defines.append(('HAVE_SYS_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))
# Check basic types sizes
for type in ('short', 'int', 'long'):
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
headers=["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
if not already_declared and not type == 'long double':
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# Compute size of corresponding complex type: used to check that our
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
res = config_cmd.check_type_size(complex_def,
expected=[2 * x for x in expected[type]])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
expected=expected[type])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# We check declaration AND type because that's how distutils does it.
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
library_dirs=[pythonlib_dir()],
expected=expected['PY_LONG_LONG'])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
res = config_cmd.check_type_size('long long',
expected=expected['long long'])
if res >= 0:
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
"Config wo CHAR_BIT is not supported"
", please contact the maintainers")
return private_defines, public_defines
def check_mathlib(config_cmd):
# Testing the C math library
mathlibs = []
mathlibs_choices = [[], ["m"], ["cpml"]]
mathlib = os.environ.get("MATHLIB")
if mathlib:
mathlibs_choices.insert(0, mathlib.split(","))
for libs in mathlibs_choices:
if config_cmd.check_func(
"log",
libraries=libs,
call_args="0",
decl="double log(double);",
call=True
):
mathlibs = libs
break
else:
raise RuntimeError(
"math library missing; rerun setup.py after setting the "
"MATHLIB env variable"
)
return mathlibs
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
hide = '__attribute__((visibility("hidden")))'
if config.check_gcc_function_attribute(hide, 'hideme'):
return hide
else:
return ''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import (Configuration, dot_join,
exec_mod_from_location)
from numpy.distutils.system_info import (get_info, blas_opt_info,
lapack_opt_info)
from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS
from numpy.version import release as is_released
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
codegen_dir = join(local_dir, 'code_generators')
if is_released:
warnings.simplefilter('error', MismatchCAPIWarning)
# Check whether we have a mismatch between the set C API VERSION and the
# actual C API VERSION
check_api_version(C_API_VERSION, codegen_dir)
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = exec_mod_from_location('_'.join(n.split('.')),
generate_umath_py)
header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
def generate_config_h(ext, build_dir):
target = join(build_dir, header_dir, 'config.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
# Check math library and C99 math funcs availability
mathlibs = check_mathlib(config_cmd)
moredefs.append(('MATHLIB', ','.join(mathlibs)))
check_math_capabilities(config_cmd, ext, moredefs, mathlibs)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
# Signal check
if is_npy_no_signal():
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
if sys.platform == 'win32' or os.name == 'nt':
win32_checks(moredefs)
# C99 restrict keyword
moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))
# Inline check
inline = config_cmd.check_inline()
if can_link_svml():
moredefs.append(('NPY_CAN_LINK_SVML', 1))
# Use bogus stride debug aid to flush out bugs where users use
# strides of dimensions with length 1 to index a full contiguous
# array.
if NPY_RELAXED_STRIDES_DEBUG:
moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
else:
moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 0))
# Get long double representation
rep = check_long_double_representation(config_cmd)
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
if check_for_right_shift_internal_compiler_error(config_cmd):
moredefs.append('NPY_DO_NOT_OPTIMIZE_LONG_right_shift')
moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONG_right_shift')
moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift')
moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift')
# Generate the config.h file from moredefs
with open(target, 'w') as target_f:
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# define inline to our keyword, or nothing
target_f.write('#ifndef __cplusplus\n')
if inline == 'inline':
target_f.write('/* #undef inline */\n')
else:
target_f.write('#define inline %s\n' % inline)
target_f.write('#endif\n')
# add the guard to make sure config.h is never included directly,
# but always through npy_config.h
target_f.write(textwrap.dedent("""
#ifndef NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
"""))
log.info('File: %s' % target)
with open(target) as target_f:
log.info(target_f.read())
log.info('EOF')
else:
mathlibs = []
with open(target) as target_f:
for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
# needed).
if hasattr(ext, 'libraries'):
ext.libraries.extend(mathlibs)
incl_dir = os.path.dirname(target)
if incl_dir not in config.numpy_include_dirs:
config.numpy_include_dirs.append(incl_dir)
return target
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put common include directory in build_dir on search path
# allows using code generation in headers
config.add_include_dirs(join(build_dir, "src", "common"))
config.add_include_dirs(join(build_dir, "src", "npymath"))
target = join(build_dir, header_dir, '_numpyconfig.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
if is_npy_no_signal():
moredefs.append(('NPY_NO_SIGNAL', 1))
if is_npy_no_smp():
moredefs.append(('NPY_NO_SMP', 1))
else:
moredefs.append(('NPY_NO_SMP', 0))
mathlibs = check_mathlib(config_cmd)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
if NPY_RELAXED_STRIDES_DEBUG:
moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
# Check whether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
hidden_visibility = visibility_define(config_cmd)
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
# Add the C API/ABI versions
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
with open(target, 'w') as target_f:
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# Define __STDC_FORMAT_MACROS
target_f.write(textwrap.dedent("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
"""))
# Dump the numpyconfig.h header to stdout
log.info('File: %s' % target)
with open(target) as target_f:
log.info(target_f.read())
log.info('EOF')
config.add_data_files((header_dir, target))
return target
def generate_api_func(module_name):
def generate_api(ext, build_dir):
script = join(codegen_dir, module_name + '.py')
sys.path.insert(0, codegen_dir)
try:
m = __import__(module_name)
log.info('executing %s', script)
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
finally:
del sys.path[0]
config.add_data_files((header_dir, h_file),
(header_dir, doc_file))
return (h_file,)
return generate_api
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
config.add_include_dirs(join(local_dir, "src", "common"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
config.add_data_dir('include/numpy')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
config.add_include_dirs(join('src', '_simd'))
config.add_define_macros([("NPY_INTERNAL_BUILD", "1")]) # this macro indicates that Numpy build is in process
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
if sys.platform[:3] == "aix":
config.add_define_macros([("_LARGE_FILES", None)])
else:
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
join(codegen_dir, 'genapi.py'),
]
#######################################################################
# npymath library #
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
# update the substitution dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
# (avoid late errors with MATHLIB which are confusing if the
# compiler does not work).
for lang, test_code, note in (
('c', 'int main(void) { return 0;}', ''),
('c++', (
'int main(void)'
'{ auto x = 0.0; return static_cast<int>(x); }'
), (
'note: A compiler with support for C++11 language '
'features is required.'
)
),
):
is_cpp = lang == 'c++'
if is_cpp:
# this a workaround to get rid of invalid c++ flags
# without doing big changes to config.
# c tested first, compiler should be here
bk_c = config_cmd.compiler
config_cmd.compiler = bk_c.cxx_compiler()
# Check that Linux compiler actually support the default flags
if hasattr(config_cmd.compiler, 'compiler'):
config_cmd.compiler.compiler.extend(NPY_CXX_FLAGS)
config_cmd.compiler.compiler_so.extend(NPY_CXX_FLAGS)
st = config_cmd.try_link(test_code, lang=lang)
if not st:
# rerun the failing command in verbose mode
config_cmd.compiler.verbose = True
config_cmd.try_link(test_code, lang=lang)
raise RuntimeError(
f"Broken toolchain: cannot link a simple {lang.upper()} "
f"program. {note}"
)
if is_cpp:
config_cmd.compiler = bk_c
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
subst_dict["posix_mathlib"] = posix_mlib
subst_dict["msvc_mathlib"] = msvc_mlib
npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'),
join('src', 'npymath', 'npy_math.c'),
# join('src', 'npymath', 'ieee754.cpp'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')
]
def opts_if_msvc(build_cmd):
""" Add flags if we are using MSVC compiler
We can't see `build_cmd` in our scope, because we have not initialized
the distutils build command, so use this deferred calculation to run
when we are building the library.
"""
if build_cmd.compiler.compiler_type != 'msvc':
return []
# Explicitly disable whole-program optimization.
flags = ['/GL-']
# Disable voltbl section for vc142 to allow link using mingw-w64; see:
# https://github.com/matthew-brett/dll_investigation/issues/1#issuecomment-1100468171
if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']):
flags.append('-d2VolatileMetadata-')
return flags
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib',
build_info={
'include_dirs' : [], # empty list required for creating npy_math_internal.h
'extra_compiler_args': [opts_if_msvc],
})
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
subst_dict)
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
subst_dict)
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('_multiarray_tests',
sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),
join('src', 'common', 'mem_overlap.c'),
join('src', 'common', 'npy_argparse.c'),
join('src', 'common', 'npy_hashtable.c')],
depends=[join('src', 'common', 'mem_overlap.h'),
join('src', 'common', 'npy_argparse.h'),
join('src', 'common', 'npy_hashtable.h'),
join('src', 'common', 'npy_extint128.h')],
libraries=['npymath'])
#######################################################################
# _multiarray_umath module - common part #
#######################################################################
common_deps = [
join('src', 'common', 'dlpack', 'dlpack.h'),
join('src', 'common', 'array_assign.h'),
join('src', 'common', 'binop_override.h'),
join('src', 'common', 'cblasfuncs.h'),
join('src', 'common', 'lowlevel_strided_loops.h'),
join('src', 'common', 'mem_overlap.h'),
join('src', 'common', 'npy_argparse.h'),
join('src', 'common', 'npy_cblas.h'),
join('src', 'common', 'npy_config.h'),
join('src', 'common', 'npy_ctypes.h'),
join('src', 'common', 'npy_dlpack.h'),
join('src', 'common', 'npy_extint128.h'),
join('src', 'common', 'npy_import.h'),
join('src', 'common', 'npy_hashtable.h'),
join('src', 'common', 'npy_longdouble.h'),
join('src', 'common', 'npy_svml.h'),
join('src', 'common', 'templ_common.h.src'),
join('src', 'common', 'ucsnarrow.h'),
join('src', 'common', 'ufunc_override.h'),
join('src', 'common', 'umathmodule.h'),
join('src', 'common', 'numpyos.h'),
join('src', 'common', 'npy_cpu_dispatch.h'),
join('src', 'common', 'simd', 'simd.h'),
]
common_src = [
join('src', 'common', 'array_assign.c'),
join('src', 'common', 'mem_overlap.c'),
join('src', 'common', 'npy_argparse.c'),
join('src', 'common', 'npy_hashtable.c'),
join('src', 'common', 'npy_longdouble.c'),
join('src', 'common', 'templ_common.h.src'),
join('src', 'common', 'ucsnarrow.c'),
join('src', 'common', 'ufunc_override.c'),
join('src', 'common', 'numpyos.c'),
join('src', 'common', 'npy_cpu_features.c'),
]
if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
blas_info = get_info('blas_ilp64_opt', 2)
else:
blas_info = get_info('blas_opt', 0)
have_blas = blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', [])
if have_blas:
extra_info = blas_info
# These files are also in MANIFEST.in so that they are always in
# the source distribution independently of HAVE_CBLAS.
common_src.extend([join('src', 'common', 'cblasfuncs.c'),
join('src', 'common', 'python_xerbla.c'),
])
else:
extra_info = {}
#######################################################################
# _multiarray_umath module - multiarray part #
#######################################################################
multiarray_deps = [
join('src', 'multiarray', 'abstractdtypes.h'),
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h.src'),
join('src', 'multiarray', 'arrayfunction_override.h'),
join('src', 'multiarray', 'array_coercion.h'),
join('src', 'multiarray', 'array_method.h'),
join('src', 'multiarray', 'npy_buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'common_dtype.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
join('src', 'multiarray', 'conversion_utils.h'),
join('src', 'multiarray', 'ctors.h'),
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'dtypemeta.h'),
join('src', 'multiarray', 'dtype_transfer.h'),
join('src', 'multiarray', 'dragon4.h'),
join('src', 'multiarray', 'einsum_debug.h'),
join('src', 'multiarray', 'einsum_sumprod.h'),
join('src', 'multiarray', 'experimental_public_dtype_api.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
join('src', 'multiarray', 'legacy_dtype_implementation.h'),
join('src', 'multiarray', 'mapping.h'),
join('src', 'multiarray', 'methods.h'),
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'strfuncs.h'),
join('src', 'multiarray', 'typeinfo.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
join('src', 'multiarray', 'textreading', 'readtext.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
join('include', 'numpy', 'arrayscalars.h'),
join('include', 'numpy', 'noprefix.h'),
join('include', 'numpy', 'npy_interrupt.h'),
join('include', 'numpy', 'npy_3kcompat.h'),
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('include', 'numpy', 'npy_common.h'),
join('include', 'numpy', 'npy_os.h'),
join('include', 'numpy', 'utils.h'),
join('include', 'numpy', 'ndarrayobject.h'),
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
# add library sources as distuils does not consider libraries
# dependencies
] + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'abstractdtypes.c'),
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.h.src'),
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'argfunc.dispatch.c.src'),
join('src', 'multiarray', 'array_coercion.c'),
join('src', 'multiarray', 'array_method.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'arrayfunction_override.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'compiled_base.c'),
join('src', 'multiarray', 'common.c'),
join('src', 'multiarray', 'common_dtype.c'),
join('src', 'multiarray', 'convert.c'),
join('src', 'multiarray', 'convert_datatype.c'),
join('src', 'multiarray', 'conversion_utils.c'),
join('src', 'multiarray', 'ctors.c'),
join('src', 'multiarray', 'datetime.c'),
join('src', 'multiarray', 'datetime_strings.c'),
join('src', 'multiarray', 'datetime_busday.c'),
join('src', 'multiarray', 'datetime_busdaycal.c'),
join('src', 'multiarray', 'descriptor.c'),
join('src', 'multiarray', 'dlpack.c'),
join('src', 'multiarray', 'dtypemeta.c'),
join('src', 'multiarray', 'dragon4.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'einsum_sumprod.c.src'),
join('src', 'multiarray', 'experimental_public_dtype_api.c'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
join('src', 'multiarray', 'item_selection.c'),
join('src', 'multiarray', 'iterators.c'),
join('src', 'multiarray', 'legacy_dtype_implementation.c'),
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
join('src', 'multiarray', 'mapping.c'),
join('src', 'multiarray', 'methods.c'),
join('src', 'multiarray', 'multiarraymodule.c'),
join('src', 'multiarray', 'nditer_templ.c.src'),
join('src', 'multiarray', 'nditer_api.c'),
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'strfuncs.c'),
join('src', 'multiarray', 'temp_elide.c'),
join('src', 'multiarray', 'typeinfo.c'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'vdot.c'),
join('src', 'common', 'npy_sort.h.src'),
join('src', 'npysort', 'x86-qsort.dispatch.cpp'),
join('src', 'npysort', 'quicksort.cpp'),
join('src', 'npysort', 'mergesort.cpp'),
join('src', 'npysort', 'timsort.cpp'),
join('src', 'npysort', 'heapsort.cpp'),
join('src', 'npysort', 'radixsort.cpp'),
join('src', 'common', 'npy_partition.h'),
join('src', 'npysort', 'selection.cpp'),
join('src', 'common', 'npy_binsearch.h'),
join('src', 'npysort', 'binsearch.cpp'),
join('src', 'multiarray', 'textreading', 'conversions.c'),
join('src', 'multiarray', 'textreading', 'field_types.c'),
join('src', 'multiarray', 'textreading', 'growth.c'),
join('src', 'multiarray', 'textreading', 'readtext.c'),
join('src', 'multiarray', 'textreading', 'rows.c'),
join('src', 'multiarray', 'textreading', 'stream_pyobject.c'),
join('src', 'multiarray', 'textreading', 'str_to_int.c'),
join('src', 'multiarray', 'textreading', 'tokenize.cpp'),
]
#######################################################################
# _multiarray_umath module - umath part #
#######################################################################
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
with open(target, 'w') as f:
f.write(generate_umath.make_code(generate_umath.defdict,
generate_umath.__file__))
return []
def generate_umath_doc_header(ext, build_dir):
from numpy.distutils.misc_util import exec_mod_from_location
target = join(build_dir, header_dir, '_umath_doc_generated.h')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
generate_umath_doc_py = join(codegen_dir, 'generate_umath_doc.py')
if newer(generate_umath_doc_py, target):
n = dot_join(config.name, 'generate_umath_doc')
generate_umath_doc = exec_mod_from_location(
'_'.join(n.split('.')), generate_umath_doc_py)
generate_umath_doc.write_code(target)
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops_utils.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'loops_unary_fp.dispatch.c.src'),
join('src', 'umath', 'loops_arithm_fp.dispatch.c.src'),
join('src', 'umath', 'loops_arithmetic.dispatch.c.src'),
join('src', 'umath', 'loops_minmax.dispatch.c.src'),
join('src', 'umath', 'loops_trigonometric.dispatch.c.src'),
join('src', 'umath', 'loops_umath_fp.dispatch.c.src'),
join('src', 'umath', 'loops_exponent_log.dispatch.c.src'),
join('src', 'umath', 'loops_hyperbolic.dispatch.c.src'),
join('src', 'umath', 'loops_modulo.dispatch.c.src'),
join('src', 'umath', 'matmul.h.src'),
join('src', 'umath', 'matmul.c.src'),
join('src', 'umath', 'clip.h'),
join('src', 'umath', 'clip.cpp'),
join('src', 'umath', 'dispatching.c'),
join('src', 'umath', 'legacy_array_method.c'),
join('src', 'umath', 'wrapping_array_method.c'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'extobj.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c'),
join('src', 'umath', 'override.c'),
# For testing. Eventually, should use public API and be separate:
join('src', 'umath', '_scaled_float_dtype.c'),
]
umath_deps = [
generate_umath_py,
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'common', 'templ_common.h.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'override.h'),
join(codegen_dir, 'generate_ufunc_api.py'),
join(codegen_dir, 'ufunc_docstrings.py'),
]
svml_path = join('numpy', 'core', 'src', 'umath', 'svml')
svml_objs = []
# we have converted the following into universal intrinsics
# so we can bring the benefits of performance for all platforms
# not just for avx512 on linux without performance/accuracy regression,
# actually the other way around, better performance and
# after all maintainable code.
svml_filter = (
'svml_z0_tanh_d_la.s', 'svml_z0_tanh_s_la.s'
)
if can_link_svml() and check_svml_submodule(svml_path):
svml_objs = glob.glob(svml_path + '/**/*.s', recursive=True)
svml_objs = [o for o in svml_objs if not o.endswith(svml_filter)]
# The ordering of names returned by glob is undefined, so we sort
# to make builds reproducible.
svml_objs.sort()
config.add_extension('_multiarray_umath',
# Forcing C language even though we have C++ sources.
# It forces the C linker and don't link C++ runtime.
language = 'c',
sources=multiarray_src + umath_src +
common_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py'),
generate_umath_c,
generate_umath_doc_header,
generate_ufunc_api,
],
depends=deps + multiarray_deps + umath_deps +
common_deps,
libraries=['npymath'],
extra_objects=svml_objs,
extra_info=extra_info,
extra_cxx_compile_args=NPY_CXX_FLAGS)
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('_umath_tests', sources=[
join('src', 'umath', '_umath_tests.c.src'),
join('src', 'umath', '_umath_tests.dispatch.c'),
join('src', 'common', 'npy_cpu_features.c'),
])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('_rational_tests',
sources=[join('src', 'umath', '_rational_tests.c')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('_struct_ufunc_tests',
sources=[join('src', 'umath', '_struct_ufunc_tests.c')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('_operand_flag_tests',
sources=[join('src', 'umath', '_operand_flag_tests.c')])
#######################################################################
# SIMD module #
#######################################################################
config.add_extension('_simd', sources=[
join('src', 'common', 'npy_cpu_features.c'),
join('src', '_simd', '_simd.c'),
join('src', '_simd', '_simd_inc.h.src'),
join('src', '_simd', '_simd_data.inc.src'),
join('src', '_simd', '_simd.dispatch.c.src'),
], depends=[
join('src', 'common', 'npy_cpu_dispatch.h'),
join('src', 'common', 'simd', 'simd.h'),
join('src', '_simd', '_simd.h'),
join('src', '_simd', '_simd_inc.h.src'),
join('src', '_simd', '_simd_data.inc.src'),
join('src', '_simd', '_simd_arg.inc'),
join('src', '_simd', '_simd_convert.inc'),
join('src', '_simd', '_simd_easyintrin.inc'),
join('src', '_simd', '_simd_vector.inc'),
])
config.add_subpackage('tests')
config.add_data_dir('tests/data')
config.add_data_dir('tests/examples')
config.add_data_files('*.pyi')
config.make_svn_version_py()
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| 52,651 | Python | 42.51405 | 113 | 0.526809 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/_add_newdocs_scalars.py | """
This file is separate from ``_add_newdocs.py`` so that it can be mocked out by
our sphinx ``conf.py`` during doc builds, where we want to avoid showing
platform-dependent information.
"""
from numpy.core import dtype
from numpy.core import numerictypes as _numerictypes
from numpy.core.function_base import add_newdoc
import platform
##############################################################################
#
# Documentation for concrete scalar classes
#
##############################################################################
def numeric_type_aliases(aliases):
def type_aliases_gen():
for alias, doc in aliases:
try:
alias_type = getattr(_numerictypes, alias)
except AttributeError:
# The set of aliases that actually exist varies between platforms
pass
else:
yield (alias_type, alias, doc)
return list(type_aliases_gen())
possible_aliases = numeric_type_aliases([
('int8', '8-bit signed integer (``-128`` to ``127``)'),
('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),
('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),
('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),
('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
('uint8', '8-bit unsigned integer (``0`` to ``255``)'),
('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),
('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),
('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),
('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
('float96', '96-bit extended-precision floating-point number type'),
('float128', '128-bit extended-precision floating-point number type'),
('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
])
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
# note: `:field: value` is rST syntax which renders as field lists.
o = getattr(_numerictypes, obj)
character_code = dtype(o).char
canonical_name_doc = "" if obj == o.__name__ else ":Canonical name: `numpy.{}`\n ".format(obj)
alias_doc = ''.join(":Alias: `numpy.{}`\n ".format(alias) for alias in fixed_aliases)
alias_doc += ''.join(":Alias on this platform ({} {}): `numpy.{}`: {}.\n ".format(platform.system(), platform.machine(), alias, doc)
for (alias_type, alias, doc) in possible_aliases if alias_type is o)
docstring = """
{doc}
:Character code: ``'{character_code}'``
{canonical_name_doc}{alias_doc}
""".format(doc=doc.strip(), character_code=character_code,
canonical_name_doc=canonical_name_doc, alias_doc=alias_doc)
add_newdoc('numpy.core.numerictypes', obj, docstring)
add_newdoc_for_scalar_type('bool_', ['bool8'],
"""
Boolean type (True or False), stored as a byte.
.. warning::
The :class:`bool_` type is not a subclass of the :class:`int_` type
(the :class:`bool_` is not even a number type). This is different
than Python's default implementation of :class:`bool` as a
sub-class of :class:`int`.
""")
add_newdoc_for_scalar_type('byte', [],
"""
Signed integer type, compatible with C ``char``.
""")
add_newdoc_for_scalar_type('short', [],
"""
Signed integer type, compatible with C ``short``.
""")
add_newdoc_for_scalar_type('intc', [],
"""
Signed integer type, compatible with C ``int``.
""")
add_newdoc_for_scalar_type('int_', [],
"""
Signed integer type, compatible with Python `int` and C ``long``.
""")
add_newdoc_for_scalar_type('longlong', [],
"""
Signed integer type, compatible with C ``long long``.
""")
add_newdoc_for_scalar_type('ubyte', [],
"""
Unsigned integer type, compatible with C ``unsigned char``.
""")
add_newdoc_for_scalar_type('ushort', [],
"""
Unsigned integer type, compatible with C ``unsigned short``.
""")
add_newdoc_for_scalar_type('uintc', [],
"""
Unsigned integer type, compatible with C ``unsigned int``.
""")
add_newdoc_for_scalar_type('uint', [],
"""
Unsigned integer type, compatible with C ``unsigned long``.
""")
add_newdoc_for_scalar_type('ulonglong', [],
"""
Signed integer type, compatible with C ``unsigned long long``.
""")
add_newdoc_for_scalar_type('half', [],
"""
Half-precision floating-point number type.
""")
add_newdoc_for_scalar_type('single', [],
"""
Single-precision floating-point number type, compatible with C ``float``.
""")
add_newdoc_for_scalar_type('double', ['float_'],
"""
Double-precision floating-point number type, compatible with Python `float`
and C ``double``.
""")
add_newdoc_for_scalar_type('longdouble', ['longfloat'],
"""
Extended-precision floating-point number type, compatible with C
``long double`` but not necessarily with IEEE 754 quadruple-precision.
""")
add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
"""
Complex number type composed of two single-precision floating-point
numbers.
""")
add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
"""
Complex number type composed of two double-precision floating-point
numbers, compatible with Python `complex`.
""")
add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
"""
Complex number type composed of two extended-precision floating-point
numbers.
""")
add_newdoc_for_scalar_type('object_', [],
"""
Any Python object.
""")
add_newdoc_for_scalar_type('str_', ['unicode_'],
r"""
A unicode string.
When used in arrays, this type strips trailing null codepoints.
Unlike the builtin `str`, this supports the :ref:`python:bufferobjects`, exposing its
contents as UCS4:
>>> m = memoryview(np.str_("abc"))
>>> m.format
'3w'
>>> m.tobytes()
b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
""")
add_newdoc_for_scalar_type('bytes_', ['string_'],
r"""
A byte string.
When used in arrays, this type strips trailing null bytes.
""")
add_newdoc_for_scalar_type('void', [],
r"""
Either an opaque sequence of bytes, or a structure.
>>> np.void(b'abcd')
void(b'\x61\x62\x63\x64')
Structured `void` scalars can only be constructed via extraction from :ref:`structured_arrays`:
>>> arr = np.array((1, 2), dtype=[('x', np.int8), ('y', np.int8)])
>>> arr[()]
(1, 2) # looks like a tuple, but is `np.void`
""")
add_newdoc_for_scalar_type('datetime64', [],
"""
If created from a 64-bit integer, it represents an offset from
``1970-01-01T00:00:00``.
If created from string, the string can be in ISO 8601 date
or datetime format.
>>> np.datetime64(10, 'Y')
numpy.datetime64('1980')
>>> np.datetime64('1980', 'Y')
numpy.datetime64('1980')
>>> np.datetime64(10, 'D')
numpy.datetime64('1970-01-11')
See :ref:`arrays.datetime` for more information.
""")
add_newdoc_for_scalar_type('timedelta64', [],
"""
A timedelta stored as a 64-bit integer.
See :ref:`arrays.datetime` for more information.
""")
add_newdoc('numpy.core.numerictypes', "integer", ('is_integer',
"""
integer.is_integer() -> bool
Return ``True`` if the number is finite with integral value.
.. versionadded:: 1.22
Examples
--------
>>> np.int64(-2).is_integer()
True
>>> np.uint32(5).is_integer()
True
"""))
# TODO: work out how to put this on the base class, np.floating
for float_name in ('half', 'single', 'double', 'longdouble'):
add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
"""
{ftype}.as_integer_ratio() -> (int, int)
Return a pair of integers, whose ratio is exactly equal to the original
floating point number, and with a positive denominator.
Raise `OverflowError` on infinities and a `ValueError` on NaNs.
>>> np.{ftype}(10.0).as_integer_ratio()
(10, 1)
>>> np.{ftype}(0.0).as_integer_ratio()
(0, 1)
>>> np.{ftype}(-.25).as_integer_ratio()
(-1, 4)
""".format(ftype=float_name)))
add_newdoc('numpy.core.numerictypes', float_name, ('is_integer',
f"""
{float_name}.is_integer() -> bool
Return ``True`` if the floating point number is finite with integral
value, and ``False`` otherwise.
.. versionadded:: 1.22
Examples
--------
>>> np.{float_name}(-2.0).is_integer()
True
>>> np.{float_name}(3.2).is_integer()
False
"""))
for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'):
# Add negative examples for signed cases by checking typecode
add_newdoc('numpy.core.numerictypes', int_name, ('bit_count',
f"""
{int_name}.bit_count() -> int
Computes the number of 1-bits in the absolute value of the input.
Analogous to the builtin `int.bit_count` or ``popcount`` in C++.
Examples
--------
>>> np.{int_name}(127).bit_count()
7""" +
(f"""
>>> np.{int_name}(-127).bit_count()
7
""" if dtype(int_name).char.islower() else "")))
| 10,232 | Python | 31.798077 | 139 | 0.600762 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/defchararray.py | """
This module contains a set of functions for vectorized string
operations and methods.
.. note::
The `chararray` class exists for backwards compatibility with
Numarray, it is not recommended for new development. Starting from numpy
1.4, if one needs arrays of strings, it is recommended to use arrays of
`dtype` `object_`, `string_` or `unicode_`, and use the free functions
in the `numpy.char` module for fast vectorized string operations.
Some methods will only be available if the corresponding string method is
available in your version of Python.
The preferred alias for `defchararray` is `numpy.char`.
"""
import functools
from .numerictypes import (
string_, unicode_, integer, int_, object_, bool_, character)
from .numeric import ndarray, compare_chararrays
from .numeric import array as narray
from numpy.core.multiarray import _vec_string
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.compat import asbytes
import numpy
__all__ = [
'equal', 'not_equal', 'greater_equal', 'less_equal',
'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
'array', 'asarray'
]
_globalvar = 0
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.char')
def _use_unicode(*args):
"""
Helper function for determining the output type of some string
operations.
For an operation on two ndarrays, if at least one is unicode, the
result should be unicode.
"""
for x in args:
if (isinstance(x, str) or
issubclass(numpy.asarray(x).dtype.type, unicode_)):
return unicode_
return string_
def _to_string_or_unicode_array(result):
"""
Helper function to cast a result back into a string or unicode array
if an object array must be used as an intermediary.
"""
return numpy.asarray(result.tolist())
def _clean_args(*args):
"""
Helper function for delegating arguments to Python string
functions.
Many of the Python string operations that have optional arguments
do not use 'None' to indicate a default value. In these cases,
we need to remove all None arguments, and those following them.
"""
newargs = []
for chk in args:
if chk is None:
break
newargs.append(chk)
return newargs
def _get_num_chars(a):
"""
Helper function that returns the number of characters per field in
a string or unicode array. This is to abstract out the fact that
for a unicode array this is itemsize / 4.
"""
if issubclass(a.dtype.type, unicode_):
return a.itemsize // 4
return a.itemsize
def _binary_op_dispatcher(x1, x2):
return (x1, x2)
@array_function_dispatch(_binary_op_dispatcher)
def equal(x1, x2):
"""
Return (x1 == x2) element-wise.
Unlike `numpy.equal`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray
Output array of bools.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
"""
return compare_chararrays(x1, x2, '==', True)
@array_function_dispatch(_binary_op_dispatcher)
def not_equal(x1, x2):
"""
Return (x1 != x2) element-wise.
Unlike `numpy.not_equal`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray
Output array of bools.
See Also
--------
equal, greater_equal, less_equal, greater, less
"""
return compare_chararrays(x1, x2, '!=', True)
@array_function_dispatch(_binary_op_dispatcher)
def greater_equal(x1, x2):
"""
Return (x1 >= x2) element-wise.
Unlike `numpy.greater_equal`, this comparison is performed by
first stripping whitespace characters from the end of the string.
This behavior is provided for backward-compatibility with
numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray
Output array of bools.
See Also
--------
equal, not_equal, less_equal, greater, less
"""
return compare_chararrays(x1, x2, '>=', True)
@array_function_dispatch(_binary_op_dispatcher)
def less_equal(x1, x2):
"""
Return (x1 <= x2) element-wise.
Unlike `numpy.less_equal`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray
Output array of bools.
See Also
--------
equal, not_equal, greater_equal, greater, less
"""
return compare_chararrays(x1, x2, '<=', True)
@array_function_dispatch(_binary_op_dispatcher)
def greater(x1, x2):
"""
Return (x1 > x2) element-wise.
Unlike `numpy.greater`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray
Output array of bools.
See Also
--------
equal, not_equal, greater_equal, less_equal, less
"""
return compare_chararrays(x1, x2, '>', True)
@array_function_dispatch(_binary_op_dispatcher)
def less(x1, x2):
"""
Return (x1 < x2) element-wise.
Unlike `numpy.greater`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray
Output array of bools.
See Also
--------
equal, not_equal, greater_equal, less_equal, greater
"""
return compare_chararrays(x1, x2, '<', True)
def _unary_op_dispatcher(a):
return (a,)
@array_function_dispatch(_unary_op_dispatcher)
def str_len(a):
"""
Return len(a) element-wise.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of integers
See Also
--------
builtins.len
"""
# Note: __len__, etc. currently return ints, which are not C-integers.
# Generally intp would be expected for lengths, although int is sufficient
# due to the dtype itemsize limitation.
return _vec_string(a, int_, '__len__')
@array_function_dispatch(_binary_op_dispatcher)
def add(x1, x2):
"""
Return element-wise string concatenation for two arrays of str or unicode.
Arrays `x1` and `x2` must have the same shape.
Parameters
----------
x1 : array_like of str or unicode
Input array.
x2 : array_like of str or unicode
Input array.
Returns
-------
add : ndarray
Output array of `string_` or `unicode_`, depending on input types
of the same shape as `x1` and `x2`.
"""
arr1 = numpy.asarray(x1)
arr2 = numpy.asarray(x2)
out_size = _get_num_chars(arr1) + _get_num_chars(arr2)
dtype = _use_unicode(arr1, arr2)
return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,))
def _multiply_dispatcher(a, i):
return (a,)
@array_function_dispatch(_multiply_dispatcher)
def multiply(a, i):
"""
Return (a * i), that is string multiple concatenation,
element-wise.
Values in `i` of less than 0 are treated as 0 (which yields an
empty string).
Parameters
----------
a : array_like of str or unicode
i : array_like of ints
Returns
-------
out : ndarray
Output array of str or unicode, depending on input types
"""
a_arr = numpy.asarray(a)
i_arr = numpy.asarray(i)
if not issubclass(i_arr.dtype.type, integer):
raise ValueError("Can only multiply by integers")
out_size = _get_num_chars(a_arr) * max(int(i_arr.max()), 0)
return _vec_string(
a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,))
def _mod_dispatcher(a, values):
return (a, values)
@array_function_dispatch(_mod_dispatcher)
def mod(a, values):
"""
Return (a % i), that is pre-Python 2.6 string formatting
(interpolation), element-wise for a pair of array_likes of str
or unicode.
Parameters
----------
a : array_like of str or unicode
values : array_like of values
These values will be element-wise interpolated into the string.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input types
See Also
--------
str.__mod__
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, '__mod__', (values,)))
@array_function_dispatch(_unary_op_dispatcher)
def capitalize(a):
"""
Return a copy of `a` with only the first character of each element
capitalized.
Calls `str.capitalize` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Input array of strings to capitalize.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input
types
See Also
--------
str.capitalize
Examples
--------
>>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c
array(['a1b2', '1b2a', 'b2a1', '2a1b'],
dtype='|S4')
>>> np.char.capitalize(c)
array(['A1b2', '1b2a', 'B2a1', '2a1b'],
dtype='|S4')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'capitalize')
def _center_dispatcher(a, width, fillchar=None):
return (a,)
@array_function_dispatch(_center_dispatcher)
def center(a, width, fillchar=' '):
"""
Return a copy of `a` with its elements centered in a string of
length `width`.
Calls `str.center` element-wise.
Parameters
----------
a : array_like of str or unicode
width : int
The length of the resulting strings
fillchar : str or unicode, optional
The padding character to use (default is space).
Returns
-------
out : ndarray
Output array of str or unicode, depending on input
types
See Also
--------
str.center
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = int(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar))
def _count_dispatcher(a, sub, start=None, end=None):
return (a,)
@array_function_dispatch(_count_dispatcher)
def count(a, sub, start=0, end=None):
"""
Returns an array with the number of non-overlapping occurrences of
substring `sub` in the range [`start`, `end`].
Calls `str.count` element-wise.
Parameters
----------
a : array_like of str or unicode
sub : str or unicode
The substring to search for.
start, end : int, optional
Optional arguments `start` and `end` are interpreted as slice
notation to specify the range in which to count.
Returns
-------
out : ndarray
Output array of ints.
See Also
--------
str.count
Examples
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> np.char.count(c, 'A')
array([3, 1, 1])
>>> np.char.count(c, 'aA')
array([3, 1, 0])
>>> np.char.count(c, 'A', start=1, end=4)
array([2, 1, 1])
>>> np.char.count(c, 'A', start=1, end=3)
array([1, 0, 0])
"""
return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end))
def _code_dispatcher(a, encoding=None, errors=None):
return (a,)
@array_function_dispatch(_code_dispatcher)
def decode(a, encoding=None, errors=None):
"""
Calls `str.decode` element-wise.
The set of available codecs comes from the Python standard library,
and may be extended at runtime. For more information, see the
:mod:`codecs` module.
Parameters
----------
a : array_like of str or unicode
encoding : str, optional
The name of an encoding
errors : str, optional
Specifies how to handle encoding errors
Returns
-------
out : ndarray
See Also
--------
str.decode
Notes
-----
The type of the result will depend on the encoding specified.
Examples
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> np.char.encode(c, encoding='cp037')
array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@',
'\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'],
dtype='|S7')
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'decode', _clean_args(encoding, errors)))
@array_function_dispatch(_code_dispatcher)
def encode(a, encoding=None, errors=None):
"""
Calls `str.encode` element-wise.
The set of available codecs comes from the Python standard library,
and may be extended at runtime. For more information, see the codecs
module.
Parameters
----------
a : array_like of str or unicode
encoding : str, optional
The name of an encoding
errors : str, optional
Specifies how to handle encoding errors
Returns
-------
out : ndarray
See Also
--------
str.encode
Notes
-----
The type of the result will depend on the encoding specified.
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'encode', _clean_args(encoding, errors)))
def _endswith_dispatcher(a, suffix, start=None, end=None):
return (a,)
@array_function_dispatch(_endswith_dispatcher)
def endswith(a, suffix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in `a` ends with `suffix`, otherwise `False`.
Calls `str.endswith` element-wise.
Parameters
----------
a : array_like of str or unicode
suffix : str
start, end : int, optional
With optional `start`, test beginning at that position. With
optional `end`, stop comparing at that position.
Returns
-------
out : ndarray
Outputs an array of bools.
See Also
--------
str.endswith
Examples
--------
>>> s = np.array(['foo', 'bar'])
>>> s[0] = 'foo'
>>> s[1] = 'bar'
>>> s
array(['foo', 'bar'], dtype='<U3')
>>> np.char.endswith(s, 'ar')
array([False, True])
>>> np.char.endswith(s, 'a', start=1, end=2)
array([False, True])
"""
return _vec_string(
a, bool_, 'endswith', [suffix, start] + _clean_args(end))
def _expandtabs_dispatcher(a, tabsize=None):
return (a,)
@array_function_dispatch(_expandtabs_dispatcher)
def expandtabs(a, tabsize=8):
"""
Return a copy of each string element where all tab characters are
replaced by one or more spaces.
Calls `str.expandtabs` element-wise.
Return a copy of each string element where all tab characters are
replaced by one or more spaces, depending on the current column
and the given `tabsize`. The column number is reset to zero after
each newline occurring in the string. This doesn't understand other
non-printing characters or escape sequences.
Parameters
----------
a : array_like of str or unicode
Input array
tabsize : int, optional
Replace tabs with `tabsize` number of spaces. If not given defaults
to 8 spaces.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See Also
--------
str.expandtabs
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'expandtabs', (tabsize,)))
@array_function_dispatch(_count_dispatcher)
def find(a, sub, start=0, end=None):
"""
For each element, return the lowest index in the string where
substring `sub` is found.
Calls `str.find` element-wise.
For each element, return the lowest index in the string where
substring `sub` is found, such that `sub` is contained in the
range [`start`, `end`].
Parameters
----------
a : array_like of str or unicode
sub : str or unicode
start, end : int, optional
Optional arguments `start` and `end` are interpreted as in
slice notation.
Returns
-------
out : ndarray or int
Output array of ints. Returns -1 if `sub` is not found.
See Also
--------
str.find
"""
return _vec_string(
a, int_, 'find', [sub, start] + _clean_args(end))
@array_function_dispatch(_count_dispatcher)
def index(a, sub, start=0, end=None):
"""
Like `find`, but raises `ValueError` when the substring is not found.
Calls `str.index` element-wise.
Parameters
----------
a : array_like of str or unicode
sub : str or unicode
start, end : int, optional
Returns
-------
out : ndarray
Output array of ints. Returns -1 if `sub` is not found.
See Also
--------
find, str.find
"""
return _vec_string(
a, int_, 'index', [sub, start] + _clean_args(end))
@array_function_dispatch(_unary_op_dispatcher)
def isalnum(a):
"""
Returns true for each element if all characters in the string are
alphanumeric and there is at least one character, false otherwise.
Calls `str.isalnum` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See Also
--------
str.isalnum
"""
return _vec_string(a, bool_, 'isalnum')
@array_function_dispatch(_unary_op_dispatcher)
def isalpha(a):
"""
Returns true for each element if all characters in the string are
alphabetic and there is at least one character, false otherwise.
Calls `str.isalpha` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See Also
--------
str.isalpha
"""
return _vec_string(a, bool_, 'isalpha')
@array_function_dispatch(_unary_op_dispatcher)
def isdigit(a):
"""
Returns true for each element if all characters in the string are
digits and there is at least one character, false otherwise.
Calls `str.isdigit` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See Also
--------
str.isdigit
"""
return _vec_string(a, bool_, 'isdigit')
@array_function_dispatch(_unary_op_dispatcher)
def islower(a):
"""
Returns true for each element if all cased characters in the
string are lowercase and there is at least one cased character,
false otherwise.
Calls `str.islower` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See Also
--------
str.islower
"""
return _vec_string(a, bool_, 'islower')
@array_function_dispatch(_unary_op_dispatcher)
def isspace(a):
"""
Returns true for each element if there are only whitespace
characters in the string and there is at least one character,
false otherwise.
Calls `str.isspace` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See Also
--------
str.isspace
"""
return _vec_string(a, bool_, 'isspace')
@array_function_dispatch(_unary_op_dispatcher)
def istitle(a):
"""
Returns true for each element if the element is a titlecased
string and there is at least one character, false otherwise.
Call `str.istitle` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See Also
--------
str.istitle
"""
return _vec_string(a, bool_, 'istitle')
@array_function_dispatch(_unary_op_dispatcher)
def isupper(a):
"""
Returns true for each element if all cased characters in the
string are uppercase and there is at least one character, false
otherwise.
Call `str.isupper` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See Also
--------
str.isupper
"""
return _vec_string(a, bool_, 'isupper')
def _join_dispatcher(sep, seq):
return (sep, seq)
@array_function_dispatch(_join_dispatcher)
def join(sep, seq):
"""
Return a string which is the concatenation of the strings in the
sequence `seq`.
Calls `str.join` element-wise.
Parameters
----------
sep : array_like of str or unicode
seq : array_like of str or unicode
Returns
-------
out : ndarray
Output array of str or unicode, depending on input types
See Also
--------
str.join
"""
return _to_string_or_unicode_array(
_vec_string(sep, object_, 'join', (seq,)))
def _just_dispatcher(a, width, fillchar=None):
return (a,)
@array_function_dispatch(_just_dispatcher)
def ljust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` left-justified in a
string of length `width`.
Calls `str.ljust` element-wise.
Parameters
----------
a : array_like of str or unicode
width : int
The length of the resulting strings
fillchar : str or unicode, optional
The character to use for padding
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See Also
--------
str.ljust
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = int(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar))
@array_function_dispatch(_unary_op_dispatcher)
def lower(a):
"""
Return an array with the elements converted to lowercase.
Call `str.lower` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like, {str, unicode}
Input array.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See Also
--------
str.lower
Examples
--------
>>> c = np.array(['A1B C', '1BCA', 'BCA1']); c
array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')
>>> np.char.lower(c)
array(['a1b c', '1bca', 'bca1'], dtype='<U5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'lower')
def _strip_dispatcher(a, chars=None):
return (a,)
@array_function_dispatch(_strip_dispatcher)
def lstrip(a, chars=None):
"""
For each element in `a`, return a copy with the leading characters
removed.
Calls `str.lstrip` element-wise.
Parameters
----------
a : array-like, {str, unicode}
Input array.
chars : {str, unicode}, optional
The `chars` argument is a string specifying the set of
characters to be removed. If omitted or None, the `chars`
argument defaults to removing whitespace. The `chars` argument
is not a prefix; rather, all combinations of its values are
stripped.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See Also
--------
str.lstrip
Examples
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
The 'a' variable is unstripped from c[1] because whitespace leading.
>>> np.char.lstrip(c, 'a')
array(['AaAaA', ' aA ', 'bBABba'], dtype='<U7')
>>> np.char.lstrip(c, 'A') # leaves c unchanged
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all()
... # XXX: is this a regression? This used to return True
... # np.char.lstrip(c,'') does not modify c at all.
False
>>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all()
True
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))
def _partition_dispatcher(a, sep):
return (a,)
@array_function_dispatch(_partition_dispatcher)
def partition(a, sep):
"""
Partition each element in `a` around `sep`.
Calls `str.partition` element-wise.
For each element in `a`, split the element as the first
occurrence of `sep`, and return 3 strings containing the part
before the separator, the separator itself, and the part after
the separator. If the separator is not found, return 3 strings
containing the string itself, followed by two empty strings.
Parameters
----------
a : array_like, {str, unicode}
Input array
sep : {str, unicode}
Separator to split each string element in `a`.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type.
The output array will have an extra dimension with 3
elements per input element.
See Also
--------
str.partition
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'partition', (sep,)))
def _replace_dispatcher(a, old, new, count=None):
return (a,)
@array_function_dispatch(_replace_dispatcher)
def replace(a, old, new, count=None):
"""
For each element in `a`, return a copy of the string with all
occurrences of substring `old` replaced by `new`.
Calls `str.replace` element-wise.
Parameters
----------
a : array-like of str or unicode
old, new : str or unicode
count : int, optional
If the optional argument `count` is given, only the first
`count` occurrences are replaced.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See Also
--------
str.replace
"""
return _to_string_or_unicode_array(
_vec_string(
a, object_, 'replace', [old, new] + _clean_args(count)))
@array_function_dispatch(_count_dispatcher)
def rfind(a, sub, start=0, end=None):
"""
For each element in `a`, return the highest index in the string
where substring `sub` is found, such that `sub` is contained
within [`start`, `end`].
Calls `str.rfind` element-wise.
Parameters
----------
a : array-like of str or unicode
sub : str or unicode
start, end : int, optional
Optional arguments `start` and `end` are interpreted as in
slice notation.
Returns
-------
out : ndarray
Output array of ints. Return -1 on failure.
See Also
--------
str.rfind
"""
return _vec_string(
a, int_, 'rfind', [sub, start] + _clean_args(end))
@array_function_dispatch(_count_dispatcher)
def rindex(a, sub, start=0, end=None):
"""
Like `rfind`, but raises `ValueError` when the substring `sub` is
not found.
Calls `str.rindex` element-wise.
Parameters
----------
a : array-like of str or unicode
sub : str or unicode
start, end : int, optional
Returns
-------
out : ndarray
Output array of ints.
See Also
--------
rfind, str.rindex
"""
return _vec_string(
a, int_, 'rindex', [sub, start] + _clean_args(end))
@array_function_dispatch(_just_dispatcher)
def rjust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` right-justified in a
string of length `width`.
Calls `str.rjust` element-wise.
Parameters
----------
a : array_like of str or unicode
width : int
The length of the resulting strings
fillchar : str or unicode, optional
The character to use for padding
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See Also
--------
str.rjust
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = int(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar))
@array_function_dispatch(_partition_dispatcher)
def rpartition(a, sep):
"""
Partition (split) each element around the right-most separator.
Calls `str.rpartition` element-wise.
For each element in `a`, split the element as the last
occurrence of `sep`, and return 3 strings containing the part
before the separator, the separator itself, and the part after
the separator. If the separator is not found, return 3 strings
containing the string itself, followed by two empty strings.
Parameters
----------
a : array_like of str or unicode
Input array
sep : str or unicode
Right-most separator to split each element in array.
Returns
-------
out : ndarray
Output array of string or unicode, depending on input
type. The output array will have an extra dimension with
3 elements per input element.
See Also
--------
str.rpartition
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'rpartition', (sep,)))
def _split_dispatcher(a, sep=None, maxsplit=None):
return (a,)
@array_function_dispatch(_split_dispatcher)
def rsplit(a, sep=None, maxsplit=None):
"""
For each element in `a`, return a list of the words in the
string, using `sep` as the delimiter string.
Calls `str.rsplit` element-wise.
Except for splitting from the right, `rsplit`
behaves like `split`.
Parameters
----------
a : array_like of str or unicode
sep : str or unicode, optional
If `sep` is not specified or None, any whitespace string
is a separator.
maxsplit : int, optional
If `maxsplit` is given, at most `maxsplit` splits are done,
the rightmost ones.
Returns
-------
out : ndarray
Array of list objects
See Also
--------
str.rsplit, split
"""
# This will return an array of lists of different sizes, so we
# leave it as an object array
return _vec_string(
a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
def _strip_dispatcher(a, chars=None):
return (a,)
@array_function_dispatch(_strip_dispatcher)
def rstrip(a, chars=None):
"""
For each element in `a`, return a copy with the trailing
characters removed.
Calls `str.rstrip` element-wise.
Parameters
----------
a : array-like of str or unicode
chars : str or unicode, optional
The `chars` argument is a string specifying the set of
characters to be removed. If omitted or None, the `chars`
argument defaults to removing whitespace. The `chars` argument
is not a suffix; rather, all combinations of its values are
stripped.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See Also
--------
str.rstrip
Examples
--------
>>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c
array(['aAaAaA', 'abBABba'],
dtype='|S7')
>>> np.char.rstrip(c, b'a')
array(['aAaAaA', 'abBABb'],
dtype='|S7')
>>> np.char.rstrip(c, b'A')
array(['aAaAa', 'abBABba'],
dtype='|S7')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))
@array_function_dispatch(_split_dispatcher)
def split(a, sep=None, maxsplit=None):
"""
For each element in `a`, return a list of the words in the
string, using `sep` as the delimiter string.
Calls `str.split` element-wise.
Parameters
----------
a : array_like of str or unicode
sep : str or unicode, optional
If `sep` is not specified or None, any whitespace string is a
separator.
maxsplit : int, optional
If `maxsplit` is given, at most `maxsplit` splits are done.
Returns
-------
out : ndarray
Array of list objects
See Also
--------
str.split, rsplit
"""
# This will return an array of lists of different sizes, so we
# leave it as an object array
return _vec_string(
a, object_, 'split', [sep] + _clean_args(maxsplit))
def _splitlines_dispatcher(a, keepends=None):
return (a,)
@array_function_dispatch(_splitlines_dispatcher)
def splitlines(a, keepends=None):
"""
For each element in `a`, return a list of the lines in the
element, breaking at line boundaries.
Calls `str.splitlines` element-wise.
Parameters
----------
a : array_like of str or unicode
keepends : bool, optional
Line breaks are not included in the resulting list unless
keepends is given and true.
Returns
-------
out : ndarray
Array of list objects
See Also
--------
str.splitlines
"""
return _vec_string(
a, object_, 'splitlines', _clean_args(keepends))
def _startswith_dispatcher(a, prefix, start=None, end=None):
return (a,)
@array_function_dispatch(_startswith_dispatcher)
def startswith(a, prefix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in `a` starts with `prefix`, otherwise `False`.
Calls `str.startswith` element-wise.
Parameters
----------
a : array_like of str or unicode
prefix : str
start, end : int, optional
With optional `start`, test beginning at that position. With
optional `end`, stop comparing at that position.
Returns
-------
out : ndarray
Array of booleans
See Also
--------
str.startswith
"""
return _vec_string(
a, bool_, 'startswith', [prefix, start] + _clean_args(end))
@array_function_dispatch(_strip_dispatcher)
def strip(a, chars=None):
"""
For each element in `a`, return a copy with the leading and
trailing characters removed.
Calls `str.strip` element-wise.
Parameters
----------
a : array-like of str or unicode
chars : str or unicode, optional
The `chars` argument is a string specifying the set of
characters to be removed. If omitted or None, the `chars`
argument defaults to removing whitespace. The `chars` argument
is not a prefix or suffix; rather, all combinations of its
values are stripped.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See Also
--------
str.strip
Examples
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> np.char.strip(c)
array(['aAaAaA', 'aA', 'abBABba'], dtype='<U7')
>>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads
array(['AaAaA', ' aA ', 'bBABb'], dtype='<U7')
>>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails
array(['aAaAa', ' aA ', 'abBABba'], dtype='<U7')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))
@array_function_dispatch(_unary_op_dispatcher)
def swapcase(a):
"""
Return element-wise a copy of the string with
uppercase characters converted to lowercase and vice versa.
Calls `str.swapcase` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like, {str, unicode}
Input array.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See Also
--------
str.swapcase
Examples
--------
>>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c
array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'],
dtype='|S5')
>>> np.char.swapcase(c)
array(['A1b C', '1B cA', 'B cA1', 'Ca1B'],
dtype='|S5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'swapcase')
@array_function_dispatch(_unary_op_dispatcher)
def title(a):
"""
Return element-wise title cased version of string or unicode.
Title case words start with uppercase characters, all remaining cased
characters are lowercase.
Calls `str.title` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like, {str, unicode}
Input array.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See Also
--------
str.title
Examples
--------
>>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c
array(['a1b c', '1b ca', 'b ca1', 'ca1b'],
dtype='|S5')
>>> np.char.title(c)
array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],
dtype='|S5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'title')
def _translate_dispatcher(a, table, deletechars=None):
return (a,)
@array_function_dispatch(_translate_dispatcher)
def translate(a, table, deletechars=None):
"""
For each element in `a`, return a copy of the string where all
characters occurring in the optional argument `deletechars` are
removed, and the remaining characters have been mapped through the
given translation table.
Calls `str.translate` element-wise.
Parameters
----------
a : array-like of str or unicode
table : str of length 256
deletechars : str
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See Also
--------
str.translate
"""
a_arr = numpy.asarray(a)
if issubclass(a_arr.dtype.type, unicode_):
return _vec_string(
a_arr, a_arr.dtype, 'translate', (table,))
else:
return _vec_string(
a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
@array_function_dispatch(_unary_op_dispatcher)
def upper(a):
"""
Return an array with the elements converted to uppercase.
Calls `str.upper` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like, {str, unicode}
Input array.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See Also
--------
str.upper
Examples
--------
>>> c = np.array(['a1b c', '1bca', 'bca1']); c
array(['a1b c', '1bca', 'bca1'], dtype='<U5')
>>> np.char.upper(c)
array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'upper')
def _zfill_dispatcher(a, width):
return (a,)
@array_function_dispatch(_zfill_dispatcher)
def zfill(a, width):
"""
Return the numeric string left-filled with zeros
Calls `str.zfill` element-wise.
Parameters
----------
a : array_like, {str, unicode}
Input array.
width : int
Width of string to left-fill elements in `a`.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See Also
--------
str.zfill
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = int(numpy.max(width_arr.flat))
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,))
@array_function_dispatch(_unary_op_dispatcher)
def isnumeric(a):
"""
For each element, return True if there are only numeric
characters in the element.
Calls `unicode.isnumeric` element-wise.
Numeric characters include digit characters, and all characters
that have the Unicode numeric value property, e.g. ``U+2155,
VULGAR FRACTION ONE FIFTH``.
Parameters
----------
a : array_like, unicode
Input array.
Returns
-------
out : ndarray, bool
Array of booleans of same shape as `a`.
See Also
--------
unicode.isnumeric
"""
if _use_unicode(a) != unicode_:
raise TypeError("isnumeric is only available for Unicode strings and arrays")
return _vec_string(a, bool_, 'isnumeric')
@array_function_dispatch(_unary_op_dispatcher)
def isdecimal(a):
"""
For each element, return True if there are only decimal
characters in the element.
Calls `unicode.isdecimal` element-wise.
Decimal characters include digit characters, and all characters
that can be used to form decimal-radix numbers,
e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``.
Parameters
----------
a : array_like, unicode
Input array.
Returns
-------
out : ndarray, bool
Array of booleans identical in shape to `a`.
See Also
--------
unicode.isdecimal
"""
if _use_unicode(a) != unicode_:
raise TypeError("isnumeric is only available for Unicode strings and arrays")
return _vec_string(a, bool_, 'isdecimal')
@set_module('numpy')
class chararray(ndarray):
"""
chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
strides=None, order=None)
Provides a convenient view on arrays of string and unicode values.
.. note::
The `chararray` class exists for backwards compatibility with
Numarray, it is not recommended for new development. Starting from numpy
1.4, if one needs arrays of strings, it is recommended to use arrays of
`dtype` `object_`, `string_` or `unicode_`, and use the free functions
in the `numpy.char` module for fast vectorized string operations.
Versus a regular NumPy array of type `str` or `unicode`, this
class adds the following functionality:
1) values automatically have whitespace removed from the end
when indexed
2) comparison operators automatically remove whitespace from the
end when comparing values
3) vectorized string operations are provided as methods
(e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``)
chararrays should be created using `numpy.char.array` or
`numpy.char.asarray`, rather than this constructor directly.
This constructor creates the array, using `buffer` (with `offset`
and `strides`) if it is not ``None``. If `buffer` is ``None``, then
constructs a new array with `strides` in "C order", unless both
``len(shape) >= 2`` and ``order='F'``, in which case `strides`
is in "Fortran order".
Methods
-------
astype
argsort
copy
count
decode
dump
dumps
encode
endswith
expandtabs
fill
find
flatten
getfield
index
isalnum
isalpha
isdecimal
isdigit
islower
isnumeric
isspace
istitle
isupper
item
join
ljust
lower
lstrip
nonzero
put
ravel
repeat
replace
reshape
resize
rfind
rindex
rjust
rsplit
rstrip
searchsorted
setfield
setflags
sort
split
splitlines
squeeze
startswith
strip
swapaxes
swapcase
take
title
tofile
tolist
tostring
translate
transpose
upper
view
zfill
Parameters
----------
shape : tuple
Shape of the array.
itemsize : int, optional
Length of each array element, in number of characters. Default is 1.
unicode : bool, optional
Are the array elements of type unicode (True) or string (False).
Default is False.
buffer : object exposing the buffer interface or str, optional
Memory address of the start of the array data. Default is None,
in which case a new array is created.
offset : int, optional
Fixed stride displacement from the beginning of an axis?
Default is 0. Needs to be >=0.
strides : array_like of ints, optional
Strides for the array (see `ndarray.strides` for full description).
Default is None.
order : {'C', 'F'}, optional
The order in which the array data is stored in memory: 'C' ->
"row major" order (the default), 'F' -> "column major"
(Fortran) order.
Examples
--------
>>> charar = np.chararray((3, 3))
>>> charar[:] = 'a'
>>> charar
chararray([[b'a', b'a', b'a'],
[b'a', b'a', b'a'],
[b'a', b'a', b'a']], dtype='|S1')
>>> charar = np.chararray(charar.shape, itemsize=5)
>>> charar[:] = 'abc'
>>> charar
chararray([[b'abc', b'abc', b'abc'],
[b'abc', b'abc', b'abc'],
[b'abc', b'abc', b'abc']], dtype='|S5')
"""
def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
offset=0, strides=None, order='C'):
global _globalvar
if unicode:
dtype = unicode_
else:
dtype = string_
# force itemsize to be a Python int, since using NumPy integer
# types results in itemsize.itemsize being used as the size of
# strings in the new array.
itemsize = int(itemsize)
if isinstance(buffer, str):
# unicode objects do not have the buffer interface
filler = buffer
buffer = None
else:
filler = None
_globalvar = 1
if buffer is None:
self = ndarray.__new__(subtype, shape, (dtype, itemsize),
order=order)
else:
self = ndarray.__new__(subtype, shape, (dtype, itemsize),
buffer=buffer,
offset=offset, strides=strides,
order=order)
if filler is not None:
self[...] = filler
_globalvar = 0
return self
def __array_finalize__(self, obj):
# The b is a special case because it is used for reconstructing.
if not _globalvar and self.dtype.char not in 'SUbc':
raise ValueError("Can only create a chararray from string data.")
def __getitem__(self, obj):
val = ndarray.__getitem__(self, obj)
if isinstance(val, character):
temp = val.rstrip()
if len(temp) == 0:
val = ''
else:
val = temp
return val
# IMPLEMENTATION NOTE: Most of the methods of this class are
# direct delegations to the free functions in this module.
# However, those that return an array of strings should instead
# return a chararray, so some extra wrapping is required.
def __eq__(self, other):
"""
Return (self == other) element-wise.
See Also
--------
equal
"""
return equal(self, other)
def __ne__(self, other):
"""
Return (self != other) element-wise.
See Also
--------
not_equal
"""
return not_equal(self, other)
def __ge__(self, other):
"""
Return (self >= other) element-wise.
See Also
--------
greater_equal
"""
return greater_equal(self, other)
def __le__(self, other):
"""
Return (self <= other) element-wise.
See Also
--------
less_equal
"""
return less_equal(self, other)
def __gt__(self, other):
"""
Return (self > other) element-wise.
See Also
--------
greater
"""
return greater(self, other)
def __lt__(self, other):
"""
Return (self < other) element-wise.
See Also
--------
less
"""
return less(self, other)
def __add__(self, other):
"""
Return (self + other), that is string concatenation,
element-wise for a pair of array_likes of str or unicode.
See Also
--------
add
"""
return asarray(add(self, other))
def __radd__(self, other):
"""
Return (other + self), that is string concatenation,
element-wise for a pair of array_likes of `string_` or `unicode_`.
See Also
--------
add
"""
return asarray(add(numpy.asarray(other), self))
def __mul__(self, i):
"""
Return (self * i), that is string multiple concatenation,
element-wise.
See Also
--------
multiply
"""
return asarray(multiply(self, i))
def __rmul__(self, i):
"""
Return (self * i), that is string multiple concatenation,
element-wise.
See Also
--------
multiply
"""
return asarray(multiply(self, i))
def __mod__(self, i):
"""
Return (self % i), that is pre-Python 2.6 string formatting
(interpolation), element-wise for a pair of array_likes of `string_`
or `unicode_`.
See Also
--------
mod
"""
return asarray(mod(self, i))
def __rmod__(self, other):
return NotImplemented
def argsort(self, axis=-1, kind=None, order=None):
"""
Return the indices that sort the array lexicographically.
For full documentation see `numpy.argsort`, for which this method is
in fact merely a "thin wrapper."
Examples
--------
>>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')
>>> c = c.view(np.chararray); c
chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],
dtype='|S5')
>>> c[c.argsort()]
chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],
dtype='|S5')
"""
return self.__array__().argsort(axis, kind, order)
argsort.__doc__ = ndarray.argsort.__doc__
def capitalize(self):
"""
Return a copy of `self` with only the first character of each element
capitalized.
See Also
--------
char.capitalize
"""
return asarray(capitalize(self))
def center(self, width, fillchar=' '):
"""
Return a copy of `self` with its elements centered in a
string of length `width`.
See Also
--------
center
"""
return asarray(center(self, width, fillchar))
def count(self, sub, start=0, end=None):
"""
Returns an array with the number of non-overlapping occurrences of
substring `sub` in the range [`start`, `end`].
See Also
--------
char.count
"""
return count(self, sub, start, end)
def decode(self, encoding=None, errors=None):
"""
Calls `str.decode` element-wise.
See Also
--------
char.decode
"""
return decode(self, encoding, errors)
def encode(self, encoding=None, errors=None):
"""
Calls `str.encode` element-wise.
See Also
--------
char.encode
"""
return encode(self, encoding, errors)
def endswith(self, suffix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in `self` ends with `suffix`, otherwise `False`.
See Also
--------
char.endswith
"""
return endswith(self, suffix, start, end)
def expandtabs(self, tabsize=8):
"""
Return a copy of each string element where all tab characters are
replaced by one or more spaces.
See Also
--------
char.expandtabs
"""
return asarray(expandtabs(self, tabsize))
def find(self, sub, start=0, end=None):
"""
For each element, return the lowest index in the string where
substring `sub` is found.
See Also
--------
char.find
"""
return find(self, sub, start, end)
def index(self, sub, start=0, end=None):
"""
Like `find`, but raises `ValueError` when the substring is not found.
See Also
--------
char.index
"""
return index(self, sub, start, end)
def isalnum(self):
"""
Returns true for each element if all characters in the string
are alphanumeric and there is at least one character, false
otherwise.
See Also
--------
char.isalnum
"""
return isalnum(self)
def isalpha(self):
"""
Returns true for each element if all characters in the string
are alphabetic and there is at least one character, false
otherwise.
See Also
--------
char.isalpha
"""
return isalpha(self)
def isdigit(self):
"""
Returns true for each element if all characters in the string are
digits and there is at least one character, false otherwise.
See Also
--------
char.isdigit
"""
return isdigit(self)
def islower(self):
"""
Returns true for each element if all cased characters in the
string are lowercase and there is at least one cased character,
false otherwise.
See Also
--------
char.islower
"""
return islower(self)
def isspace(self):
"""
Returns true for each element if there are only whitespace
characters in the string and there is at least one character,
false otherwise.
See Also
--------
char.isspace
"""
return isspace(self)
def istitle(self):
"""
Returns true for each element if the element is a titlecased
string and there is at least one character, false otherwise.
See Also
--------
char.istitle
"""
return istitle(self)
def isupper(self):
"""
Returns true for each element if all cased characters in the
string are uppercase and there is at least one character, false
otherwise.
See Also
--------
char.isupper
"""
return isupper(self)
def join(self, seq):
"""
Return a string which is the concatenation of the strings in the
sequence `seq`.
See Also
--------
char.join
"""
return join(self, seq)
def ljust(self, width, fillchar=' '):
"""
Return an array with the elements of `self` left-justified in a
string of length `width`.
See Also
--------
char.ljust
"""
return asarray(ljust(self, width, fillchar))
def lower(self):
"""
Return an array with the elements of `self` converted to
lowercase.
See Also
--------
char.lower
"""
return asarray(lower(self))
def lstrip(self, chars=None):
"""
For each element in `self`, return a copy with the leading characters
removed.
See Also
--------
char.lstrip
"""
return asarray(lstrip(self, chars))
def partition(self, sep):
"""
Partition each element in `self` around `sep`.
See Also
--------
partition
"""
return asarray(partition(self, sep))
def replace(self, old, new, count=None):
"""
For each element in `self`, return a copy of the string with all
occurrences of substring `old` replaced by `new`.
See Also
--------
char.replace
"""
return asarray(replace(self, old, new, count))
def rfind(self, sub, start=0, end=None):
"""
For each element in `self`, return the highest index in the string
where substring `sub` is found, such that `sub` is contained
within [`start`, `end`].
See Also
--------
char.rfind
"""
return rfind(self, sub, start, end)
def rindex(self, sub, start=0, end=None):
"""
Like `rfind`, but raises `ValueError` when the substring `sub` is
not found.
See Also
--------
char.rindex
"""
return rindex(self, sub, start, end)
def rjust(self, width, fillchar=' '):
"""
Return an array with the elements of `self`
right-justified in a string of length `width`.
See Also
--------
char.rjust
"""
return asarray(rjust(self, width, fillchar))
def rpartition(self, sep):
"""
Partition each element in `self` around `sep`.
See Also
--------
rpartition
"""
return asarray(rpartition(self, sep))
def rsplit(self, sep=None, maxsplit=None):
"""
For each element in `self`, return a list of the words in
the string, using `sep` as the delimiter string.
See Also
--------
char.rsplit
"""
return rsplit(self, sep, maxsplit)
def rstrip(self, chars=None):
"""
For each element in `self`, return a copy with the trailing
characters removed.
See Also
--------
char.rstrip
"""
return asarray(rstrip(self, chars))
def split(self, sep=None, maxsplit=None):
"""
For each element in `self`, return a list of the words in the
string, using `sep` as the delimiter string.
See Also
--------
char.split
"""
return split(self, sep, maxsplit)
def splitlines(self, keepends=None):
"""
For each element in `self`, return a list of the lines in the
element, breaking at line boundaries.
See Also
--------
char.splitlines
"""
return splitlines(self, keepends)
def startswith(self, prefix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in `self` starts with `prefix`, otherwise `False`.
See Also
--------
char.startswith
"""
return startswith(self, prefix, start, end)
def strip(self, chars=None):
"""
For each element in `self`, return a copy with the leading and
trailing characters removed.
See Also
--------
char.strip
"""
return asarray(strip(self, chars))
def swapcase(self):
"""
For each element in `self`, return a copy of the string with
uppercase characters converted to lowercase and vice versa.
See Also
--------
char.swapcase
"""
return asarray(swapcase(self))
def title(self):
"""
For each element in `self`, return a titlecased version of the
string: words start with uppercase characters, all remaining cased
characters are lowercase.
See Also
--------
char.title
"""
return asarray(title(self))
def translate(self, table, deletechars=None):
"""
For each element in `self`, return a copy of the string where
all characters occurring in the optional argument
`deletechars` are removed, and the remaining characters have
been mapped through the given translation table.
See Also
--------
char.translate
"""
return asarray(translate(self, table, deletechars))
def upper(self):
"""
Return an array with the elements of `self` converted to
uppercase.
See Also
--------
char.upper
"""
return asarray(upper(self))
def zfill(self, width):
"""
Return the numeric string left-filled with zeros in a string of
length `width`.
See Also
--------
char.zfill
"""
return asarray(zfill(self, width))
def isnumeric(self):
"""
For each element in `self`, return True if there are only
numeric characters in the element.
See Also
--------
char.isnumeric
"""
return isnumeric(self)
def isdecimal(self):
"""
For each element in `self`, return True if there are only
decimal characters in the element.
See Also
--------
char.isdecimal
"""
return isdecimal(self)
@set_module("numpy.char")
def array(obj, itemsize=None, copy=True, unicode=None, order=None):
"""
Create a `chararray`.
.. note::
This class is provided for numarray backward-compatibility.
New code (not concerned with numarray compatibility) should use
arrays of type `string_` or `unicode_` and use the free functions
in :mod:`numpy.char <numpy.core.defchararray>` for fast
vectorized string operations instead.
Versus a regular NumPy array of type `str` or `unicode`, this
class adds the following functionality:
1) values automatically have whitespace removed from the end
when indexed
2) comparison operators automatically remove whitespace from the
end when comparing values
3) vectorized string operations are provided as methods
(e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``)
Parameters
----------
obj : array of str or unicode-like
itemsize : int, optional
`itemsize` is the number of characters per scalar in the
resulting array. If `itemsize` is None, and `obj` is an
object array or a Python list, the `itemsize` will be
automatically determined. If `itemsize` is provided and `obj`
is of type str or unicode, then the `obj` string will be
chunked into `itemsize` pieces.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy
will only be made if __array__ returns a copy, if obj is a
nested sequence, or if a copy is needed to satisfy any of the other
requirements (`itemsize`, unicode, `order`, etc.).
unicode : bool, optional
When true, the resulting `chararray` can contain Unicode
characters, when false only 8-bit characters. If unicode is
None and `obj` is one of the following:
- a `chararray`,
- an ndarray of type `str` or `unicode`
- a Python str or unicode object,
then the unicode setting of the output array will be
automatically determined.
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest). If order is 'A', then the returned array may
be in any order (either C-, Fortran-contiguous, or even
discontiguous).
"""
if isinstance(obj, (bytes, str)):
if unicode is None:
if isinstance(obj, str):
unicode = True
else:
unicode = False
if itemsize is None:
itemsize = len(obj)
shape = len(obj) // itemsize
return chararray(shape, itemsize=itemsize, unicode=unicode,
buffer=obj, order=order)
if isinstance(obj, (list, tuple)):
obj = numpy.asarray(obj)
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):
# If we just have a vanilla chararray, create a chararray
# view around it.
if not isinstance(obj, chararray):
obj = obj.view(chararray)
if itemsize is None:
itemsize = obj.itemsize
# itemsize is in 8-bit chars, so for Unicode, we need
# to divide by the size of a single Unicode character,
# which for NumPy is always 4
if issubclass(obj.dtype.type, unicode_):
itemsize //= 4
if unicode is None:
if issubclass(obj.dtype.type, unicode_):
unicode = True
else:
unicode = False
if unicode:
dtype = unicode_
else:
dtype = string_
if order is not None:
obj = numpy.asarray(obj, order=order)
if (copy or
(itemsize != obj.itemsize) or
(not unicode and isinstance(obj, unicode_)) or
(unicode and isinstance(obj, string_))):
obj = obj.astype((dtype, int(itemsize)))
return obj
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):
if itemsize is None:
# Since no itemsize was specified, convert the input array to
# a list so the ndarray constructor will automatically
# determine the itemsize for us.
obj = obj.tolist()
# Fall through to the default case
if unicode:
dtype = unicode_
else:
dtype = string_
if itemsize is None:
val = narray(obj, dtype=dtype, order=order, subok=True)
else:
val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)
return val.view(chararray)
@set_module("numpy.char")
def asarray(obj, itemsize=None, unicode=None, order=None):
"""
Convert the input to a `chararray`, copying the data only if
necessary.
Versus a regular NumPy array of type `str` or `unicode`, this
class adds the following functionality:
1) values automatically have whitespace removed from the end
when indexed
2) comparison operators automatically remove whitespace from the
end when comparing values
3) vectorized string operations are provided as methods
(e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``)
Parameters
----------
obj : array of str or unicode-like
itemsize : int, optional
`itemsize` is the number of characters per scalar in the
resulting array. If `itemsize` is None, and `obj` is an
object array or a Python list, the `itemsize` will be
automatically determined. If `itemsize` is provided and `obj`
is of type str or unicode, then the `obj` string will be
chunked into `itemsize` pieces.
unicode : bool, optional
When true, the resulting `chararray` can contain Unicode
characters, when false only 8-bit characters. If unicode is
None and `obj` is one of the following:
- a `chararray`,
- an ndarray of type `str` or 'unicode`
- a Python str or unicode object,
then the unicode setting of the output array will be
automatically determined.
order : {'C', 'F'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest).
"""
return array(obj, itemsize, copy=False,
unicode=unicode, order=order)
| 69,776 | Python | 23.947086 | 86 | 0.585774 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/cversions.py | """Simple script to compute the api hash of the current API.
The API has is defined by numpy_api_order and ufunc_api_order.
"""
from os.path import dirname
from code_generators.genapi import fullapi_hash
from code_generators.numpy_api import full_api
if __name__ == '__main__':
curdir = dirname(__file__)
print(fullapi_hash(full_api))
| 347 | Python | 23.857141 | 62 | 0.714697 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/_dtype.py | """
A place for code to be called from the implementation of np.dtype
String handling is much easier to do correctly in python.
"""
import numpy as np
_kind_to_stem = {
'u': 'uint',
'i': 'int',
'c': 'complex',
'f': 'float',
'b': 'bool',
'V': 'void',
'O': 'object',
'M': 'datetime',
'm': 'timedelta',
'S': 'bytes',
'U': 'str',
}
def _kind_name(dtype):
try:
return _kind_to_stem[dtype.kind]
except KeyError as e:
raise RuntimeError(
"internal dtype error, unknown kind {!r}"
.format(dtype.kind)
) from None
def __str__(dtype):
if dtype.fields is not None:
return _struct_str(dtype, include_align=True)
elif dtype.subdtype:
return _subarray_str(dtype)
elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
return dtype.str
else:
return dtype.name
def __repr__(dtype):
arg_str = _construction_repr(dtype, include_align=False)
if dtype.isalignedstruct:
arg_str = arg_str + ", align=True"
return "dtype({})".format(arg_str)
def _unpack_field(dtype, offset, title=None):
"""
Helper function to normalize the items in dtype.fields.
Call as:
dtype, offset, title = _unpack_field(*dtype.fields[name])
"""
return dtype, offset, title
def _isunsized(dtype):
# PyDataType_ISUNSIZED
return dtype.itemsize == 0
def _construction_repr(dtype, include_align=False, short=False):
"""
Creates a string repr of the dtype, excluding the 'dtype()' part
surrounding the object. This object may be a string, a list, or
a dict depending on the nature of the dtype. This
is the object passed as the first parameter to the dtype
constructor, and if no additional constructor parameters are
given, will reproduce the exact memory layout.
Parameters
----------
short : bool
If true, this creates a shorter repr using 'kind' and 'itemsize', instead
of the longer type name.
include_align : bool
If true, this includes the 'align=True' parameter
inside the struct dtype construction dict when needed. Use this flag
if you want a proper repr string without the 'dtype()' part around it.
If false, this does not preserve the
'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
struct arrays like the regular repr does, because the 'align'
flag is not part of first dtype constructor parameter. This
mode is intended for a full 'repr', where the 'align=True' is
provided as the second parameter.
"""
if dtype.fields is not None:
return _struct_str(dtype, include_align=include_align)
elif dtype.subdtype:
return _subarray_str(dtype)
else:
return _scalar_str(dtype, short=short)
def _scalar_str(dtype, short):
byteorder = _byte_order_str(dtype)
if dtype.type == np.bool_:
if short:
return "'?'"
else:
return "'bool'"
elif dtype.type == np.object_:
# The object reference may be different sizes on different
# platforms, so it should never include the itemsize here.
return "'O'"
elif dtype.type == np.string_:
if _isunsized(dtype):
return "'S'"
else:
return "'S%d'" % dtype.itemsize
elif dtype.type == np.unicode_:
if _isunsized(dtype):
return "'%sU'" % byteorder
else:
return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
# unlike the other types, subclasses of void are preserved - but
# historically the repr does not actually reveal the subclass
elif issubclass(dtype.type, np.void):
if _isunsized(dtype):
return "'V'"
else:
return "'V%d'" % dtype.itemsize
elif dtype.type == np.datetime64:
return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype))
elif dtype.type == np.timedelta64:
return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype))
elif np.issubdtype(dtype, np.number):
# Short repr with endianness, like '<f8'
if short or dtype.byteorder not in ('=', '|'):
return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize)
# Longer repr, like 'float64'
else:
return "'%s%d'" % (_kind_name(dtype), 8*dtype.itemsize)
elif dtype.isbuiltin == 2:
return dtype.type.__name__
else:
raise RuntimeError(
"Internal error: NumPy dtype unrecognized type number")
def _byte_order_str(dtype):
""" Normalize byteorder to '<' or '>' """
# hack to obtain the native and swapped byte order characters
swapped = np.dtype(int).newbyteorder('S')
native = swapped.newbyteorder('S')
byteorder = dtype.byteorder
if byteorder == '=':
return native.byteorder
if byteorder == 'S':
# TODO: this path can never be reached
return swapped.byteorder
elif byteorder == '|':
return ''
else:
return byteorder
def _datetime_metadata_str(dtype):
# TODO: this duplicates the C metastr_to_unicode functionality
unit, count = np.datetime_data(dtype)
if unit == 'generic':
return ''
elif count == 1:
return '[{}]'.format(unit)
else:
return '[{}{}]'.format(count, unit)
def _struct_dict_str(dtype, includealignedflag):
# unpack the fields dictionary into ls
names = dtype.names
fld_dtypes = []
offsets = []
titles = []
for name in names:
fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
fld_dtypes.append(fld_dtype)
offsets.append(offset)
titles.append(title)
# Build up a string to make the dictionary
if np.core.arrayprint._get_legacy_print_mode() <= 121:
colon = ":"
fieldsep = ","
else:
colon = ": "
fieldsep = ", "
# First, the names
ret = "{'names'%s[" % colon
ret += fieldsep.join(repr(name) for name in names)
# Second, the formats
ret += "], 'formats'%s[" % colon
ret += fieldsep.join(
_construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
# Third, the offsets
ret += "], 'offsets'%s[" % colon
ret += fieldsep.join("%d" % offset for offset in offsets)
# Fourth, the titles
if any(title is not None for title in titles):
ret += "], 'titles'%s[" % colon
ret += fieldsep.join(repr(title) for title in titles)
# Fifth, the itemsize
ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize)
if (includealignedflag and dtype.isalignedstruct):
# Finally, the aligned flag
ret += ", 'aligned'%sTrue}" % colon
else:
ret += "}"
return ret
def _aligned_offset(offset, alignment):
# round up offset:
return - (-offset // alignment) * alignment
def _is_packed(dtype):
"""
Checks whether the structured data type in 'dtype'
has a simple layout, where all the fields are in order,
and follow each other with no alignment padding.
When this returns true, the dtype can be reconstructed
from a list of the field names and dtypes with no additional
dtype parameters.
Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
"""
align = dtype.isalignedstruct
max_alignment = 1
total_offset = 0
for name in dtype.names:
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
if align:
total_offset = _aligned_offset(total_offset, fld_dtype.alignment)
max_alignment = max(max_alignment, fld_dtype.alignment)
if fld_offset != total_offset:
return False
total_offset += fld_dtype.itemsize
if align:
total_offset = _aligned_offset(total_offset, max_alignment)
if total_offset != dtype.itemsize:
return False
return True
def _struct_list_str(dtype):
items = []
for name in dtype.names:
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
item = "("
if title is not None:
item += "({!r}, {!r}), ".format(title, name)
else:
item += "{!r}, ".format(name)
# Special case subarray handling here
if fld_dtype.subdtype is not None:
base, shape = fld_dtype.subdtype
item += "{}, {}".format(
_construction_repr(base, short=True),
shape
)
else:
item += _construction_repr(fld_dtype, short=True)
item += ")"
items.append(item)
return "[" + ", ".join(items) + "]"
def _struct_str(dtype, include_align):
# The list str representation can't include the 'align=' flag,
# so if it is requested and the struct has the aligned flag set,
# we must use the dict str instead.
if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
sub = _struct_list_str(dtype)
else:
sub = _struct_dict_str(dtype, include_align)
# If the data type isn't the default, void, show it
if dtype.type != np.void:
return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub)
else:
return sub
def _subarray_str(dtype):
base, shape = dtype.subdtype
return "({}, {})".format(
_construction_repr(base, short=True),
shape
)
def _name_includes_bit_suffix(dtype):
if dtype.type == np.object_:
# pointer size varies by system, best to omit it
return False
elif dtype.type == np.bool_:
# implied
return False
elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
# unspecified
return False
else:
return True
def _name_get(dtype):
# provides dtype.name.__get__, documented as returning a "bit name"
if dtype.isbuiltin == 2:
# user dtypes don't promise to do anything special
return dtype.type.__name__
if issubclass(dtype.type, np.void):
# historically, void subclasses preserve their name, eg `record64`
name = dtype.type.__name__
else:
name = _kind_name(dtype)
# append bit counts
if _name_includes_bit_suffix(dtype):
name += "{}".format(dtype.itemsize * 8)
# append metadata to datetimes
if dtype.type in (np.datetime64, np.timedelta64):
name += _datetime_metadata_str(dtype)
return name
| 10,495 | Python | 27.677596 | 81 | 0.600953 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/_machar.py | """
Machine arithmetic - determine the parameters of the
floating-point arithmetic system
Author: Pearu Peterson, September 2003
"""
__all__ = ['MachAr']
from numpy.core.fromnumeric import any
from numpy.core._ufunc_config import errstate
from numpy.core.overrides import set_module
# Need to speed this up...especially for longfloat
# Deprecated 2021-10-20, NumPy 1.22
@set_module('numpy')
class MachAr:
"""
Diagnosing machine parameters.
Attributes
----------
ibeta : int
Radix in which numbers are represented.
it : int
Number of base-`ibeta` digits in the floating point mantissa M.
machep : int
Exponent of the smallest (most negative) power of `ibeta` that,
added to 1.0, gives something different from 1.0
eps : float
Floating-point number ``beta**machep`` (floating point precision)
negep : int
Exponent of the smallest power of `ibeta` that, subtracted
from 1.0, gives something different from 1.0.
epsneg : float
Floating-point number ``beta**negep``.
iexp : int
Number of bits in the exponent (including its sign and bias).
minexp : int
Smallest (most negative) power of `ibeta` consistent with there
being no leading zeros in the mantissa.
xmin : float
Floating-point number ``beta**minexp`` (the smallest [in
magnitude] positive floating point number with full precision).
maxexp : int
Smallest (positive) power of `ibeta` that causes overflow.
xmax : float
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
usable floating value).
irnd : int
In ``range(6)``, information on what kind of rounding is done
in addition, and on how underflow is handled.
ngrd : int
Number of 'guard digits' used when truncating the product
of two mantissas to fit the representation.
epsilon : float
Same as `eps`.
tiny : float
An alias for `smallest_normal`, kept for backwards compatibility.
huge : float
Same as `xmax`.
precision : float
``- int(-log10(eps))``
resolution : float
``- 10**(-precision)``
smallest_normal : float
The smallest positive floating point number with 1 as leading bit in
the mantissa following IEEE-754. Same as `xmin`.
smallest_subnormal : float
The smallest positive floating point number with 0 as leading bit in
the mantissa following IEEE-754.
Parameters
----------
float_conv : function, optional
Function that converts an integer or integer array to a float
or float array. Default is `float`.
int_conv : function, optional
Function that converts a float or float array to an integer or
integer array. Default is `int`.
float_to_float : function, optional
Function that converts a float array to float. Default is `float`.
Note that this does not seem to do anything useful in the current
implementation.
float_to_str : function, optional
Function that converts a single float to a string. Default is
``lambda v:'%24.16e' %v``.
title : str, optional
Title that is printed in the string representation of `MachAr`.
See Also
--------
finfo : Machine limits for floating point types.
iinfo : Machine limits for integer types.
References
----------
.. [1] Press, Teukolsky, Vetterling and Flannery,
"Numerical Recipes in C++," 2nd ed,
Cambridge University Press, 2002, p. 31.
"""
def __init__(self, float_conv=float,int_conv=int,
float_to_float=float,
float_to_str=lambda v:'%24.16e' % v,
title='Python floating point number'):
"""
float_conv - convert integer to float (array)
int_conv - convert float (array) to integer
float_to_float - convert float array to float
float_to_str - convert array float to str
title - description of used floating point numbers
"""
# We ignore all errors here because we are purposely triggering
# underflow to detect the properties of the runninng arch.
with errstate(under='ignore'):
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
max_iterN = 10000
msg = "Did not converge after %d tries with %s"
one = float_conv(1)
two = one + one
zero = one - one
# Do we really need to do this? Aren't they 2 and 2.0?
# Determine ibeta and beta
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
b = one
for _ in range(max_iterN):
b = b + b
temp = a + b
itemp = int_conv(temp-a)
if any(itemp != 0):
break
else:
raise RuntimeError(msg % (_, one.dtype))
ibeta = itemp
beta = float_conv(ibeta)
# Determine it and irnd
it = -1
b = one
for _ in range(max_iterN):
it = it + 1
b = b * beta
temp = b + one
temp1 = temp - b
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
betah = beta / two
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
temp = a + betah
irnd = 0
if any(temp-a != zero):
irnd = 1
tempa = a + beta
temp = tempa + betah
if irnd == 0 and any(temp-tempa != zero):
irnd = 2
# Determine negep and epsneg
negep = it + 3
betain = one / beta
a = one
for i in range(negep):
a = a * betain
b = a
for _ in range(max_iterN):
temp = one - a
if any(temp-one != zero):
break
a = a * beta
negep = negep - 1
# Prevent infinite loop on PPC with gcc 4.0:
if negep < 0:
raise RuntimeError("could not determine machine tolerance "
"for 'negep', locals() -> %s" % (locals()))
else:
raise RuntimeError(msg % (_, one.dtype))
negep = -negep
epsneg = a
# Determine machep and eps
machep = - it - 3
a = b
for _ in range(max_iterN):
temp = one + a
if any(temp-one != zero):
break
a = a * beta
machep = machep + 1
else:
raise RuntimeError(msg % (_, one.dtype))
eps = a
# Determine ngrd
ngrd = 0
temp = one + eps
if irnd == 0 and any(temp*one - one != zero):
ngrd = 1
# Determine iexp
i = 0
k = 1
z = betain
t = one + eps
nxres = 0
for _ in range(max_iterN):
y = z
z = y*y
a = z*one # Check here for underflow
temp = z*t
if any(a+a == zero) or any(abs(z) >= y):
break
temp1 = temp * betain
if any(temp1*beta == z):
break
i = i + 1
k = k + k
else:
raise RuntimeError(msg % (_, one.dtype))
if ibeta != 10:
iexp = i + 1
mx = k + k
else:
iexp = 2
iz = ibeta
while k >= iz:
iz = iz * ibeta
iexp = iexp + 1
mx = iz + iz - 1
# Determine minexp and xmin
for _ in range(max_iterN):
xmin = y
y = y * betain
a = y * one
temp = y * t
if any((a + a) != zero) and any(abs(y) < xmin):
k = k + 1
temp1 = temp * betain
if any(temp1*beta == y) and any(temp != y):
nxres = 3
xmin = y
break
else:
break
else:
raise RuntimeError(msg % (_, one.dtype))
minexp = -k
# Determine maxexp, xmax
if mx <= k + k - 3 and ibeta != 10:
mx = mx + mx
iexp = iexp + 1
maxexp = mx + minexp
irnd = irnd + nxres
if irnd >= 2:
maxexp = maxexp - 2
i = maxexp + minexp
if ibeta == 2 and not i:
maxexp = maxexp - 1
if i > 20:
maxexp = maxexp - 1
if any(a != y):
maxexp = maxexp - 2
xmax = one - epsneg
if any(xmax*one != xmax):
xmax = one - beta*epsneg
xmax = xmax / (xmin*beta*beta*beta)
i = maxexp + minexp + 3
for j in range(i):
if ibeta == 2:
xmax = xmax + xmax
else:
xmax = xmax * beta
smallest_subnormal = abs(xmin / beta ** (it))
self.ibeta = ibeta
self.it = it
self.negep = negep
self.epsneg = float_to_float(epsneg)
self._str_epsneg = float_to_str(epsneg)
self.machep = machep
self.eps = float_to_float(eps)
self._str_eps = float_to_str(eps)
self.ngrd = ngrd
self.iexp = iexp
self.minexp = minexp
self.xmin = float_to_float(xmin)
self._str_xmin = float_to_str(xmin)
self.maxexp = maxexp
self.xmax = float_to_float(xmax)
self._str_xmax = float_to_str(xmax)
self.irnd = irnd
self.title = title
# Commonly used parameters
self.epsilon = self.eps
self.tiny = self.xmin
self.huge = self.xmax
self.smallest_normal = self.xmin
self._str_smallest_normal = float_to_str(self.xmin)
self.smallest_subnormal = float_to_float(smallest_subnormal)
self._str_smallest_subnormal = float_to_str(smallest_subnormal)
import math
self.precision = int(-math.log10(float_to_float(self.eps)))
ten = two + two + two + two + two
resolution = ten ** (-self.precision)
self.resolution = float_to_float(resolution)
self._str_resolution = float_to_str(resolution)
def __str__(self):
fmt = (
'Machine parameters for %(title)s\n'
'---------------------------------------------------------------------\n'
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
'smallest_normal=%(smallest_normal)s '
'smallest_subnormal=%(smallest_subnormal)s\n'
'---------------------------------------------------------------------\n'
)
return fmt % self.__dict__
if __name__ == '__main__':
print(MachAr())
| 11,618 | Python | 31.455307 | 88 | 0.509985 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/shape_base.py | __all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
'stack', 'vstack']
import functools
import itertools
import operator
import warnings
from . import numeric as _nx
from . import overrides
from .multiarray import array, asanyarray, normalize_axis_index
from . import fromnumeric as _from_nx
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _atleast_1d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_1d_dispatcher)
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1)
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_2d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_2d_dispatcher)
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_3d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_3d_dispatcher)
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted to
arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print(arr, arr.shape) # doctest: +SKIP
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :, _nx.newaxis]
elif ary.ndim == 2:
result = ary[:, :, _nx.newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
warnings.warn('arrays to stack must be passed as a "sequence" type '
'such as list or tuple. Support for non-sequence '
'iterables such as generators is deprecated as of '
'NumPy 1.16 and will raise an error in the future.',
FutureWarning, stacklevel=stacklevel)
return ()
return arrays
def _vhstack_dispatcher(tup):
return _arrays_for_stack_dispatcher(tup)
@array_function_dispatch(_vhstack_dispatcher)
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
block : Assemble an nd-array from nested lists of blocks.
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third axis).
column_stack : Stack 1-D arrays as columns into a 2-D array.
vsplit : Split an array into multiple sub-arrays vertically (row-wise).
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.vstack((a,b))
array([[1, 2, 3],
[4, 5, 6]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[4], [5], [6]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[4],
[5],
[6]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_2d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
return _nx.concatenate(arrs, 0)
@array_function_dispatch(_vhstack_dispatcher)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis, except for 1-D
arrays where it concatenates along the first axis. Rebuilds arrays divided
by `hsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis,
except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
block : Assemble an nd-array from nested lists of blocks.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
column_stack : Stack 1-D arrays as columns into a 2-D array.
hsplit : Split an array into multiple sub-arrays horizontally (column-wise).
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((4,5,6))
>>> np.hstack((a,b))
array([1, 2, 3, 4, 5, 6])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[4],[5],[6]])
>>> np.hstack((a,b))
array([[1, 4],
[2, 5],
[3, 6]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_1d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs and arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
def _stack_dispatcher(arrays, axis=None, out=None):
arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_dispatch(_stack_dispatcher)
def stack(arrays, axis=0, out=None):
"""
Join a sequence of arrays along a new axis.
The ``axis`` parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the first
dimension and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what stack would have returned if no
out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
block : Assemble an nd-array from nested lists of blocks.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.stack((a, b))
array([[1, 2, 3],
[4, 5, 6]])
>>> np.stack((a, b), axis=-1)
array([[1, 4],
[2, 5],
[3, 6]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(arrays, stacklevel=2)
arrays = [asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = {arr.shape for arr in arrays}
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
axis = normalize_axis_index(axis, result_ndim)
sl = (slice(None),) * axis + (_nx.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
return _nx.concatenate(expanded_arrays, axis=axis, out=out)
# Internal functions to eliminate the overhead of repeated dispatch in one of
# the two possible paths inside np.block.
# Use getattr to protect against __array_function__ being disabled.
_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate)
def _block_format_index(index):
"""
Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
"""
idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
return 'arrays' + idx_str
def _block_check_depths_match(arrays, parent_index=[]):
"""
Recursive function checking that the depths of nested lists in `arrays`
all match. Mismatch raises a ValueError as described in the block
docstring below.
The entire index (rather than just the depth) needs to be calculated
for each innermost list, in case an error needs to be raised, so that
the index of the offending list can be printed as part of the error.
Parameters
----------
arrays : nested list of arrays
The arrays to check
parent_index : list of int
The full index of `arrays` within the nested lists passed to
`_block_check_depths_match` at the top of the recursion.
Returns
-------
first_index : list of int
The full index of an element from the bottom of the nesting in
`arrays`. If any element at the bottom is an empty list, this will
refer to it, and the last index along the empty axis will be None.
max_arr_ndim : int
The maximum of the ndims of the arrays nested in `arrays`.
final_size: int
The number of elements in the final array. This is used the motivate
the choice of algorithm used using benchmarking wisdom.
"""
if type(arrays) is tuple:
# not strictly necessary, but saves us from:
# - more than one way to do things - no point treating tuples like
# lists
# - horribly confusing behaviour that results when tuples are
# treated like ndarray
raise TypeError(
'{} is a tuple. '
'Only lists can be used to arrange blocks, and np.block does '
'not allow implicit conversion from tuple to ndarray.'.format(
_block_format_index(parent_index)
)
)
elif type(arrays) is list and len(arrays) > 0:
idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
for i, arr in enumerate(arrays))
first_index, max_arr_ndim, final_size = next(idxs_ndims)
for index, ndim, size in idxs_ndims:
final_size += size
if ndim > max_arr_ndim:
max_arr_ndim = ndim
if len(index) != len(first_index):
raise ValueError(
"List depths are mismatched. First element was at depth "
"{}, but there is an element at depth {} ({})".format(
len(first_index),
len(index),
_block_format_index(index)
)
)
# propagate our flag that indicates an empty list at the bottom
if index[-1] is None:
first_index = index
return first_index, max_arr_ndim, final_size
elif type(arrays) is list and len(arrays) == 0:
# We've 'bottomed out' on an empty list
return parent_index + [None], 0, 0
else:
# We've 'bottomed out' - arrays is either a scalar or an array
size = _size(arrays)
return parent_index, _ndim(arrays), size
def _atleast_nd(a, ndim):
# Ensures `a` has at least `ndim` dimensions by prepending
# ones to `a.shape` as necessary
return array(a, ndmin=ndim, copy=False, subok=True)
def _accumulate(values):
return list(itertools.accumulate(values))
def _concatenate_shapes(shapes, axis):
"""Given array shapes, return the resulting shape and slices prefixes.
These help in nested concatenation.
Returns
-------
shape: tuple of int
This tuple satisfies::
shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
shape == concatenate(arrs, axis).shape
slice_prefixes: tuple of (slice(start, end), )
For a list of arrays being concatenated, this returns the slice
in the larger array at axis that needs to be sliced into.
For example, the following holds::
ret = concatenate([a, b, c], axis)
_, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
ret[(slice(None),) * axis + sl_a] == a
ret[(slice(None),) * axis + sl_b] == b
ret[(slice(None),) * axis + sl_c] == c
These are called slice prefixes since they are used in the recursive
blocking algorithm to compute the left-most slices during the
recursion. Therefore, they must be prepended to rest of the slice
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
to existing slice tuple without creating a new tuple every time.
"""
# Cache a result that will be reused.
shape_at_axis = [shape[axis] for shape in shapes]
# Take a shape, any shape
first_shape = shapes[0]
first_shape_pre = first_shape[:axis]
first_shape_post = first_shape[axis+1:]
if any(shape[:axis] != first_shape_pre or
shape[axis+1:] != first_shape_post for shape in shapes):
raise ValueError(
'Mismatched array shapes in block along axis {}.'.format(axis))
shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
offsets_at_axis = _accumulate(shape_at_axis)
slice_prefixes = [(slice(start, end),)
for start, end in zip([0] + offsets_at_axis,
offsets_at_axis)]
return shape, slice_prefixes
def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
"""
Returns the shape of the final array, along with a list
of slices and a list of arrays that can be used for assignment inside the
new array
Parameters
----------
arrays : nested list of arrays
The arrays to check
max_depth : list of int
The number of nested lists
result_ndim : int
The number of dimensions in thefinal array.
Returns
-------
shape : tuple of int
The shape that the final array will take on.
slices: list of tuple of slices
The slices into the full array required for assignment. These are
required to be prepended with ``(Ellipsis, )`` to obtain to correct
final index.
arrays: list of ndarray
The data to assign to each slice of the full array
"""
if depth < max_depth:
shapes, slices, arrays = zip(
*[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
for arr in arrays])
axis = result_ndim - max_depth + depth
shape, slice_prefixes = _concatenate_shapes(shapes, axis)
# Prepend the slice prefix and flatten the slices
slices = [slice_prefix + the_slice
for slice_prefix, inner_slices in zip(slice_prefixes, slices)
for the_slice in inner_slices]
# Flatten the array list
arrays = functools.reduce(operator.add, arrays)
return shape, slices, arrays
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
# Return the slice and the array inside a list to be consistent with
# the recursive case.
arr = _atleast_nd(arrays, result_ndim)
return arr.shape, [()], [arr]
def _block(arrays, max_depth, result_ndim, depth=0):
"""
Internal implementation of block based on repeated concatenation.
`arrays` is the argument passed to
block. `max_depth` is the depth of nested lists within `arrays` and
`result_ndim` is the greatest of the dimensions of the arrays in
`arrays` and the depth of the lists in `arrays` (see block docstring
for details).
"""
if depth < max_depth:
arrs = [_block(arr, max_depth, result_ndim, depth+1)
for arr in arrays]
return _concatenate(arrs, axis=-(max_depth-depth))
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
return _atleast_nd(arrays, result_ndim)
def _block_dispatcher(arrays):
# Use type(...) is list to match the behavior of np.block(), which special
# cases list specifically rather than allowing for generic iterables or
# tuple. Also, we know that list.__array_function__ will never exist.
if type(arrays) is list:
for subarrays in arrays:
yield from _block_dispatcher(subarrays)
else:
yield arrays
@array_function_dispatch(_block_dispatcher)
def block(arrays):
"""
Assemble an nd-array from nested lists of blocks.
Blocks in the innermost lists are concatenated (see `concatenate`) along
the last dimension (-1), then these are concatenated along the
second-last dimension (-2), and so on until the outermost list is reached.
Blocks can be of any dimension, but will not be broadcasted using the normal
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
the same for all blocks. This is primarily useful for working with scalars,
and means that code like ``np.block([v, 1])`` is valid, where
``v.ndim == 1``.
When the nested list is two levels deep, this allows block matrices to be
constructed from their components.
.. versionadded:: 1.13.0
Parameters
----------
arrays : nested list of array_like or scalars (but not tuples)
If passed a single ndarray or scalar (a nested list of depth 0), this
is returned unmodified (and not copied).
Elements shapes must match along the appropriate axes (without
broadcasting), but leading 1s will be prepended to the shape as
necessary to make the dimensions match.
Returns
-------
block_array : ndarray
The array assembled from the given blocks.
The dimensionality of the output is equal to the greatest of:
* the dimensionality of all the inputs
* the depth to which the input list is nested
Raises
------
ValueError
* If list depths are mismatched - for instance, ``[[a, b], c]`` is
illegal, and should be spelt ``[[a, b], [c]]``
* If lists are empty - for instance, ``[[a, b], []]``
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third axis).
column_stack : Stack 1-D arrays as columns into a 2-D array.
vsplit : Split an array into multiple sub-arrays vertically (row-wise).
Notes
-----
When called with only scalars, ``np.block`` is equivalent to an ndarray
call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
``np.array([[1, 2], [3, 4]])``.
This function does not enforce that the blocks lie on a fixed grid.
``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
AAAbb
AAAbb
cccDD
But is also allowed to produce, for some ``a, b, c, d``::
AAAbb
AAAbb
cDDDD
Since concatenation happens along the last axis first, `block` is _not_
capable of producing the following directly::
AAAbb
cccbb
cccDD
Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
Examples
--------
The most common use of this function is to build a block matrix
>>> A = np.eye(2) * 2
>>> B = np.eye(3) * 3
>>> np.block([
... [A, np.zeros((2, 3))],
... [np.ones((3, 2)), B ]
... ])
array([[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[1., 1., 3., 0., 0.],
[1., 1., 0., 3., 0.],
[1., 1., 0., 0., 3.]])
With a list of depth 1, `block` can be used as `hstack`
>>> np.block([1, 2, 3]) # hstack([1, 2, 3])
array([1, 2, 3])
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.block([a, b, 10]) # hstack([a, b, 10])
array([ 1, 2, 3, 4, 5, 6, 10])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([A, B]) # hstack([A, B])
array([[1, 1, 2, 2],
[1, 1, 2, 2]])
With a list of depth 2, `block` can be used in place of `vstack`:
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.block([[a], [b]]) # vstack([a, b])
array([[1, 2, 3],
[4, 5, 6]])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([[A], [B]]) # vstack([A, B])
array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
It can also be used in places of `atleast_1d` and `atleast_2d`
>>> a = np.array(0)
>>> b = np.array([1])
>>> np.block([a]) # atleast_1d(a)
array([0])
>>> np.block([b]) # atleast_1d(b)
array([1])
>>> np.block([[a]]) # atleast_2d(a)
array([[0]])
>>> np.block([[b]]) # atleast_2d(b)
array([[1]])
"""
arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
# It was found through benchmarking that making an array of final size
# around 256x256 was faster by straight concatenation on a
# i7-7700HQ processor and dual channel ram 2400MHz.
# It didn't seem to matter heavily on the dtype used.
#
# A 2D array using repeated concatenation requires 2 copies of the array.
#
# The fastest algorithm will depend on the ratio of CPU power to memory
# speed.
# One can monitor the results of the benchmark
# https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
# to tune this parameter until a C version of the `_block_info_recursion`
# algorithm is implemented which would likely be faster than the python
# version.
if list_ndim * final_size > (2 * 512 * 512):
return _block_slicing(arrays, list_ndim, result_ndim)
else:
return _block_concatenate(arrays, list_ndim, result_ndim)
# These helper functions are mostly used for testing.
# They allow us to write tests that directly call `_block_slicing`
# or `_block_concatenate` without blocking large arrays to force the wisdom
# to trigger the desired path.
def _block_setup(arrays):
"""
Returns
(`arrays`, list_ndim, result_ndim, final_size)
"""
bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
list_ndim = len(bottom_index)
if bottom_index and bottom_index[-1] is None:
raise ValueError(
'List at {} cannot be empty'.format(
_block_format_index(bottom_index)
)
)
result_ndim = max(arr_ndim, list_ndim)
return arrays, list_ndim, result_ndim, final_size
def _block_slicing(arrays, list_ndim, result_ndim):
shape, slices, arrays = _block_info_recursion(
arrays, list_ndim, result_ndim)
dtype = _nx.result_type(*[arr.dtype for arr in arrays])
# Test preferring F only in the case that all input arrays are F
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = _nx.empty(shape=shape, dtype=dtype, order=order)
# Note: In a c implementation, the function
# PyArray_CreateMultiSortedStridePerm could be used for more advanced
# guessing of the desired order.
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
def _block_concatenate(arrays, list_ndim, result_ndim):
result = _block(arrays, list_ndim, result_ndim)
if list_ndim == 0:
# Catch an edge case where _block returns a view because
# `arrays` is a single numpy array and not a list of numpy arrays.
# This might copy scalars or lists twice, but this isn't a likely
# usecase for those interested in performance
result = result.copy()
return result
| 28,985 | Python | 31.242492 | 81 | 0.590926 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/_type_aliases.py | """
Due to compatibility, numpy has a very large number of different naming
conventions for the scalar types (those subclassing from `numpy.generic`).
This file produces a convoluted set of dictionaries mapping names to types,
and sometimes other mappings too.
.. data:: allTypes
A dictionary of names to types that will be exposed as attributes through
``np.core.numerictypes.*``
.. data:: sctypeDict
Similar to `allTypes`, but maps a broader set of aliases to their types.
.. data:: sctypes
A dictionary keyed by a "type group" string, providing a list of types
under that group.
"""
from numpy.compat import unicode
from numpy.core._string_helpers import english_lower
from numpy.core.multiarray import typeinfo, dtype
from numpy.core._dtype import _kind_name
sctypeDict = {} # Contains all leaf-node scalar types with aliases
allTypes = {} # Collect the types we will add to the module
# separate the actual type info from the abstract base classes
_abstract_types = {}
_concrete_typeinfo = {}
for k, v in typeinfo.items():
# make all the keys lowercase too
k = english_lower(k)
if isinstance(v, type):
_abstract_types[k] = v
else:
_concrete_typeinfo[k] = v
_concrete_types = {v.type for k, v in _concrete_typeinfo.items()}
def _bits_of(obj):
try:
info = next(v for v in _concrete_typeinfo.values() if v.type is obj)
except StopIteration:
if obj in _abstract_types.values():
msg = "Cannot count the bits of an abstract type"
raise ValueError(msg) from None
# some third-party type - make a best-guess
return dtype(obj).itemsize * 8
else:
return info.bits
def bitname(obj):
"""Return a bit-width name for a given type object"""
bits = _bits_of(obj)
dt = dtype(obj)
char = dt.kind
base = _kind_name(dt)
if base == 'object':
bits = 0
if bits != 0:
char = "%s%d" % (char, bits // 8)
return base, bits, char
def _add_types():
for name, info in _concrete_typeinfo.items():
# define C-name and insert typenum and typechar references also
allTypes[name] = info.type
sctypeDict[name] = info.type
sctypeDict[info.char] = info.type
sctypeDict[info.num] = info.type
for name, cls in _abstract_types.items():
allTypes[name] = cls
_add_types()
# This is the priority order used to assign the bit-sized NPY_INTxx names, which
# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be
# consistent.
# If two C types have the same size, then the earliest one in this list is used
# as the sized name.
_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte']
_uint_ctypes = list('u' + t for t in _int_ctypes)
def _add_aliases():
for name, info in _concrete_typeinfo.items():
# these are handled by _add_integer_aliases
if name in _int_ctypes or name in _uint_ctypes:
continue
# insert bit-width version for this class (if relevant)
base, bit, char = bitname(info.type)
myname = "%s%d" % (base, bit)
# ensure that (c)longdouble does not overwrite the aliases assigned to
# (c)double
if name in ('longdouble', 'clongdouble') and myname in allTypes:
continue
allTypes[myname] = info.type
# add mapping for both the bit name and the numarray name
sctypeDict[myname] = info.type
# add forward, reverse, and string mapping to numarray
sctypeDict[char] = info.type
_add_aliases()
def _add_integer_aliases():
seen_bits = set()
for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes):
i_info = _concrete_typeinfo[i_ctype]
u_info = _concrete_typeinfo[u_ctype]
bits = i_info.bits # same for both
for info, charname, intname in [
(i_info,'i%d' % (bits//8,), 'int%d' % bits),
(u_info,'u%d' % (bits//8,), 'uint%d' % bits)]:
if bits not in seen_bits:
# sometimes two different types have the same number of bits
# if so, the one iterated over first takes precedence
allTypes[intname] = info.type
sctypeDict[intname] = info.type
sctypeDict[charname] = info.type
seen_bits.add(bits)
_add_integer_aliases()
# We use these later
void = allTypes['void']
#
# Rework the Python names (so that float and complex and int are consistent
# with Python usage)
#
def _set_up_aliases():
type_pairs = [('complex_', 'cdouble'),
('int0', 'intp'),
('uint0', 'uintp'),
('single', 'float'),
('csingle', 'cfloat'),
('singlecomplex', 'cfloat'),
('float_', 'double'),
('intc', 'int'),
('uintc', 'uint'),
('int_', 'long'),
('uint', 'ulong'),
('cfloat', 'cdouble'),
('longfloat', 'longdouble'),
('clongfloat', 'clongdouble'),
('longcomplex', 'clongdouble'),
('bool_', 'bool'),
('bytes_', 'string'),
('string_', 'string'),
('str_', 'unicode'),
('unicode_', 'unicode'),
('object_', 'object')]
for alias, t in type_pairs:
allTypes[alias] = allTypes[t]
sctypeDict[alias] = sctypeDict[t]
# Remove aliases overriding python types and modules
to_remove = ['object', 'int', 'float',
'complex', 'bool', 'string', 'datetime', 'timedelta',
'bytes', 'str']
for t in to_remove:
try:
del allTypes[t]
del sctypeDict[t]
except KeyError:
pass
# Additional aliases in sctypeDict that should not be exposed as attributes
attrs_to_remove = ['ulong']
for t in attrs_to_remove:
try:
del allTypes[t]
except KeyError:
pass
_set_up_aliases()
sctypes = {'int': [],
'uint':[],
'float':[],
'complex':[],
'others':[bool, object, bytes, unicode, void]}
def _add_array_type(typename, bits):
try:
t = allTypes['%s%d' % (typename, bits)]
except KeyError:
pass
else:
sctypes[typename].append(t)
def _set_array_types():
ibytes = [1, 2, 4, 8, 16, 32, 64]
fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
for bytes in ibytes:
bits = 8*bytes
_add_array_type('int', bits)
_add_array_type('uint', bits)
for bytes in fbytes:
bits = 8*bytes
_add_array_type('float', bits)
_add_array_type('complex', 2*bits)
_gi = dtype('p')
if _gi.type not in sctypes['int']:
indx = 0
sz = _gi.itemsize
_lst = sctypes['int']
while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
indx += 1
sctypes['int'].insert(indx, _gi.type)
sctypes['uint'].insert(indx, dtype('P').type)
_set_array_types()
# Add additional strings to the sctypeDict
_toadd = ['int', 'float', 'complex', 'bool', 'object',
'str', 'bytes', ('a', 'bytes_')]
for name in _toadd:
if isinstance(name, tuple):
sctypeDict[name[0]] = allTypes[name[1]]
else:
sctypeDict[name] = allTypes['%s_' % name]
del _toadd, name
| 7,490 | Python | 29.57551 | 80 | 0.566088 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/numerictypes.py | """
numerictypes: Define the numeric type objects
This module is designed so "from numerictypes import \\*" is safe.
Exported symbols include:
Dictionary with all registered number types (including aliases):
sctypeDict
Type objects (not all will be available, depends on platform):
see variable sctypes for which ones you have
Bit-width names
int8 int16 int32 int64 int128
uint8 uint16 uint32 uint64 uint128
float16 float32 float64 float96 float128 float256
complex32 complex64 complex128 complex192 complex256 complex512
datetime64 timedelta64
c-based names
bool_
object_
void, str_, unicode_
byte, ubyte,
short, ushort
intc, uintc,
intp, uintp,
int_, uint,
longlong, ulonglong,
single, csingle,
float_, complex_,
longfloat, clongfloat,
As part of the type-hierarchy: xx -- is bit-width
generic
+-> bool_ (kind=b)
+-> number
| +-> integer
| | +-> signedinteger (intxx) (kind=i)
| | | byte
| | | short
| | | intc
| | | intp int0
| | | int_
| | | longlong
| | \\-> unsignedinteger (uintxx) (kind=u)
| | ubyte
| | ushort
| | uintc
| | uintp uint0
| | uint_
| | ulonglong
| +-> inexact
| +-> floating (floatxx) (kind=f)
| | half
| | single
| | float_ (double)
| | longfloat
| \\-> complexfloating (complexxx) (kind=c)
| csingle (singlecomplex)
| complex_ (cfloat, cdouble)
| clongfloat (longcomplex)
+-> flexible
| +-> character
| | str_ (string_, bytes_) (kind=S) [Python 2]
| | unicode_ (kind=U) [Python 2]
| |
| | bytes_ (string_) (kind=S) [Python 3]
| | str_ (unicode_) (kind=U) [Python 3]
| |
| \\-> void (kind=V)
\\-> object_ (not used much) (kind=O)
"""
import numbers
from numpy.core.multiarray import (
ndarray, array, dtype, datetime_data, datetime_as_string,
busday_offset, busday_count, is_busday, busdaycalendar
)
from numpy.core.overrides import set_module
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypes',
'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
'issubdtype', 'datetime_data', 'datetime_as_string',
'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
]
# we don't need all these imports, but we need to keep them for compatibility
# for users using np.core.numerictypes.UPPER_TABLE
from ._string_helpers import (
english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE
)
from ._type_aliases import (
sctypeDict,
allTypes,
bitname,
sctypes,
_concrete_types,
_concrete_typeinfo,
_bits_of,
)
from ._dtype import _kind_name
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
from builtins import bool, int, float, complex, object, str, bytes
from numpy.compat import long, unicode
# We use this later
generic = allTypes['generic']
genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64', 'int128',
'uint128', 'float16',
'float32', 'float64', 'float80', 'float96', 'float128',
'float256',
'complex32', 'complex64', 'complex128', 'complex160',
'complex192', 'complex256', 'complex512', 'object']
@set_module('numpy')
def maximum_sctype(t):
"""
Return the scalar type of highest precision of the same kind as the input.
Parameters
----------
t : dtype or dtype specifier
The input data type. This can be a `dtype` object or an object that
is convertible to a `dtype`.
Returns
-------
out : dtype
The highest precision data type of the same kind (`dtype.kind`) as `t`.
See Also
--------
obj2sctype, mintypecode, sctype2char
dtype
Examples
--------
>>> np.maximum_sctype(int)
<class 'numpy.int64'>
>>> np.maximum_sctype(np.uint8)
<class 'numpy.uint64'>
>>> np.maximum_sctype(complex)
<class 'numpy.complex256'> # may vary
>>> np.maximum_sctype(str)
<class 'numpy.str_'>
>>> np.maximum_sctype('i2')
<class 'numpy.int64'>
>>> np.maximum_sctype('f4')
<class 'numpy.float128'> # may vary
"""
g = obj2sctype(t)
if g is None:
return t
t = g
base = _kind_name(dtype(t))
if base in sctypes:
return sctypes[base][-1]
else:
return t
@set_module('numpy')
def issctype(rep):
"""
Determines whether the given object represents a scalar data-type.
Parameters
----------
rep : any
If `rep` is an instance of a scalar dtype, True is returned. If not,
False is returned.
Returns
-------
out : bool
Boolean result of check whether `rep` is a scalar dtype.
See Also
--------
issubsctype, issubdtype, obj2sctype, sctype2char
Examples
--------
>>> np.issctype(np.int32)
True
>>> np.issctype(list)
False
>>> np.issctype(1.1)
False
Strings are also a scalar type:
>>> np.issctype(np.dtype('str'))
True
"""
if not isinstance(rep, (type, dtype)):
return False
try:
res = obj2sctype(rep)
if res and res != object_:
return True
return False
except Exception:
return False
@set_module('numpy')
def obj2sctype(rep, default=None):
"""
Return the scalar dtype or NumPy equivalent of Python type of an object.
Parameters
----------
rep : any
The object of which the type is returned.
default : any, optional
If given, this is returned for objects whose types can not be
determined. If not given, None is returned for those objects.
Returns
-------
dtype : dtype or Python type
The data type of `rep`.
See Also
--------
sctype2char, issctype, issubsctype, issubdtype, maximum_sctype
Examples
--------
>>> np.obj2sctype(np.int32)
<class 'numpy.int32'>
>>> np.obj2sctype(np.array([1., 2.]))
<class 'numpy.float64'>
>>> np.obj2sctype(np.array([1.j]))
<class 'numpy.complex128'>
>>> np.obj2sctype(dict)
<class 'numpy.object_'>
>>> np.obj2sctype('string')
>>> np.obj2sctype(1, default=list)
<class 'list'>
"""
# prevent abstract classes being upcast
if isinstance(rep, type) and issubclass(rep, generic):
return rep
# extract dtype from arrays
if isinstance(rep, ndarray):
return rep.dtype.type
# fall back on dtype to convert
try:
res = dtype(rep)
except Exception:
return default
else:
return res.type
@set_module('numpy')
def issubclass_(arg1, arg2):
"""
Determine if a class is a subclass of a second class.
`issubclass_` is equivalent to the Python built-in ``issubclass``,
except that it returns False instead of raising a TypeError if one
of the arguments is not a class.
Parameters
----------
arg1 : class
Input class. True is returned if `arg1` is a subclass of `arg2`.
arg2 : class or tuple of classes.
Input class. If a tuple of classes, True is returned if `arg1` is a
subclass of any of the tuple elements.
Returns
-------
out : bool
Whether `arg1` is a subclass of `arg2` or not.
See Also
--------
issubsctype, issubdtype, issctype
Examples
--------
>>> np.issubclass_(np.int32, int)
False
>>> np.issubclass_(np.int32, float)
False
>>> np.issubclass_(np.float64, float)
True
"""
try:
return issubclass(arg1, arg2)
except TypeError:
return False
@set_module('numpy')
def issubsctype(arg1, arg2):
"""
Determine if the first argument is a subclass of the second argument.
Parameters
----------
arg1, arg2 : dtype or dtype specifier
Data-types.
Returns
-------
out : bool
The result.
See Also
--------
issctype, issubdtype, obj2sctype
Examples
--------
>>> np.issubsctype('S8', str)
False
>>> np.issubsctype(np.array([1]), int)
True
>>> np.issubsctype(np.array([1]), float)
False
"""
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
@set_module('numpy')
def issubdtype(arg1, arg2):
r"""
Returns True if first argument is a typecode lower/equal in type hierarchy.
This is like the builtin :func:`issubclass`, but for `dtype`\ s.
Parameters
----------
arg1, arg2 : dtype_like
`dtype` or object coercible to one
Returns
-------
out : bool
See Also
--------
:ref:`arrays.scalars` : Overview of the numpy type hierarchy.
issubsctype, issubclass_
Examples
--------
`issubdtype` can be used to check the type of arrays:
>>> ints = np.array([1, 2, 3], dtype=np.int32)
>>> np.issubdtype(ints.dtype, np.integer)
True
>>> np.issubdtype(ints.dtype, np.floating)
False
>>> floats = np.array([1, 2, 3], dtype=np.float32)
>>> np.issubdtype(floats.dtype, np.integer)
False
>>> np.issubdtype(floats.dtype, np.floating)
True
Similar types of different sizes are not subdtypes of each other:
>>> np.issubdtype(np.float64, np.float32)
False
>>> np.issubdtype(np.float32, np.float64)
False
but both are subtypes of `floating`:
>>> np.issubdtype(np.float64, np.floating)
True
>>> np.issubdtype(np.float32, np.floating)
True
For convenience, dtype-like objects are allowed too:
>>> np.issubdtype('S1', np.string_)
True
>>> np.issubdtype('i4', np.signedinteger)
True
"""
if not issubclass_(arg1, generic):
arg1 = dtype(arg1).type
if not issubclass_(arg2, generic):
arg2 = dtype(arg2).type
return issubclass(arg1, arg2)
# This dictionary allows look up based on any alias for an array data-type
class _typedict(dict):
"""
Base object for a dictionary for look-up with any alias for an array dtype.
Instances of `_typedict` can not be used as dictionaries directly,
first they have to be populated.
"""
def __getitem__(self, obj):
return dict.__getitem__(self, obj2sctype(obj))
nbytes = _typedict()
_alignment = _typedict()
_maxvals = _typedict()
_minvals = _typedict()
def _construct_lookups():
for name, info in _concrete_typeinfo.items():
obj = info.type
nbytes[obj] = info.bits // 8
_alignment[obj] = info.alignment
if len(info) > 5:
_maxvals[obj] = info.max
_minvals[obj] = info.min
else:
_maxvals[obj] = None
_minvals[obj] = None
_construct_lookups()
@set_module('numpy')
def sctype2char(sctype):
"""
Return the string representation of a scalar dtype.
Parameters
----------
sctype : scalar dtype or object
If a scalar dtype, the corresponding string character is
returned. If an object, `sctype2char` tries to infer its scalar type
and then return the corresponding string character.
Returns
-------
typechar : str
The string character corresponding to the scalar type.
Raises
------
ValueError
If `sctype` is an object for which the type can not be inferred.
See Also
--------
obj2sctype, issctype, issubsctype, mintypecode
Examples
--------
>>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]:
... print(np.sctype2char(sctype))
l # may vary
d
D
S
O
>>> x = np.array([1., 2-1.j])
>>> np.sctype2char(x)
'D'
>>> np.sctype2char(list)
'O'
"""
sctype = obj2sctype(sctype)
if sctype is None:
raise ValueError("unrecognized type")
if sctype not in _concrete_types:
# for compatibility
raise KeyError(sctype)
return dtype(sctype).char
# Create dictionary of casting functions that wrap sequences
# indexed by type or type character
cast = _typedict()
for key in _concrete_types:
cast[key] = lambda x, k=key: array(x, copy=False).astype(k)
def _scalar_type_key(typ):
"""A ``key`` function for `sorted`."""
dt = dtype(typ)
return (dt.kind.lower(), dt.itemsize)
ScalarType = [int, float, complex, bool, bytes, str, memoryview]
ScalarType += sorted(_concrete_types, key=_scalar_type_key)
ScalarType = tuple(ScalarType)
# Now add the types we've determined to this module
for key in allTypes:
globals()[key] = allTypes[key]
__all__.append(key)
del key
typecodes = {'Character':'c',
'Integer':'bhilqp',
'UnsignedInteger':'BHILQP',
'Float':'efdg',
'Complex':'FDG',
'AllInteger':'bBhHiIlLqQpP',
'AllFloat':'efdgFDG',
'Datetime': 'Mm',
'All':'?bhilqpBHILQPefdgFDGSUVOMm'}
# backwards compatibility --- deprecated name
# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py)
typeDict = sctypeDict
# b -> boolean
# u -> unsigned integer
# i -> signed integer
# f -> floating point
# c -> complex
# M -> datetime
# m -> timedelta
# S -> string
# U -> Unicode string
# V -> record
# O -> Python object
_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
__len_test_types = len(__test_types)
# Keep incrementing until a common type both can be coerced to
# is found. Otherwise, return None
def _find_common_coerce(a, b):
if a > b:
return a
try:
thisind = __test_types.index(a.char)
except ValueError:
return None
return _can_coerce_all([a, b], start=thisind)
# Find a data-type that all data-types in a list can be coerced to
def _can_coerce_all(dtypelist, start=0):
N = len(dtypelist)
if N == 0:
return None
if N == 1:
return dtypelist[0]
thisind = start
while thisind < __len_test_types:
newdtype = dtype(__test_types[thisind])
numcoerce = len([x for x in dtypelist if newdtype >= x])
if numcoerce == N:
return newdtype
thisind += 1
return None
def _register_types():
numbers.Integral.register(integer)
numbers.Complex.register(inexact)
numbers.Real.register(floating)
numbers.Number.register(number)
_register_types()
@set_module('numpy')
def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules.
Parameters
----------
array_types : sequence
A list of dtypes or dtype convertible objects representing arrays.
scalar_types : sequence
A list of dtypes or dtype convertible objects representing scalars.
Returns
-------
datatype : dtype
The common data type, which is the maximum of `array_types` ignoring
`scalar_types`, unless the maximum of `scalar_types` is of a
different kind (`dtype.kind`). If the kind is not understood, then
None is returned.
See Also
--------
dtype, common_type, can_cast, mintypecode
Examples
--------
>>> np.find_common_type([], [np.int64, np.float32, complex])
dtype('complex128')
>>> np.find_common_type([np.int64, np.float32], [])
dtype('float64')
The standard casting rules ensure that a scalar cannot up-cast an
array unless the scalar is of a fundamentally different kind of data
(i.e. under a different hierarchy in the data type hierarchy) then
the array:
>>> np.find_common_type([np.float32], [np.int64, np.float64])
dtype('float32')
Complex is of a different type, so it up-casts the float in the
`array_types` argument:
>>> np.find_common_type([np.float32], [complex])
dtype('complex128')
Type specifier strings are convertible to dtypes and can therefore
be used instead of dtypes:
>>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
dtype('complex128')
"""
array_types = [dtype(x) for x in array_types]
scalar_types = [dtype(x) for x in scalar_types]
maxa = _can_coerce_all(array_types)
maxsc = _can_coerce_all(scalar_types)
if maxa is None:
return maxsc
if maxsc is None:
return maxa
try:
index_a = _kind_list.index(maxa.kind)
index_sc = _kind_list.index(maxsc.kind)
except ValueError:
return None
if index_sc > index_a:
return _find_common_coerce(maxsc, maxa)
else:
return maxa
| 17,271 | Python | 24.740686 | 81 | 0.584564 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/_methods.py | """
Array methods which are called by both the C-code for the method
and the Python code for the NumPy-namespace function
"""
import warnings
from contextlib import nullcontext
from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core.multiarray import asanyarray
from numpy.core import numerictypes as nt
from numpy.core import _exceptions
from numpy._globals import _NoValue
from numpy.compat import pickle, os_fspath
# save those O(100) nanoseconds!
umr_maximum = um.maximum.reduce
umr_minimum = um.minimum.reduce
umr_sum = um.add.reduce
umr_prod = um.multiply.reduce
umr_any = um.logical_or.reduce
umr_all = um.logical_and.reduce
# Complex types to -> (2,)float view for fast-path computation in _var()
_complex_to_float = {
nt.dtype(nt.csingle) : nt.dtype(nt.single),
nt.dtype(nt.cdouble) : nt.dtype(nt.double),
}
# Special case for windows: ensure double takes precedence
if nt.dtype(nt.longdouble) != nt.dtype(nt.double):
_complex_to_float.update({
nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble),
})
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
# small reductions
def _amax(a, axis=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_maximum(a, axis, None, out, keepdims, initial, where)
def _amin(a, axis=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_minimum(a, axis, None, out, keepdims, initial, where)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_sum(a, axis, dtype, out, keepdims, initial, where)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_prod(a, axis, dtype, out, keepdims, initial, where)
def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
# Parsing keyword arguments is currently fairly slow, so avoid it for now
if where is True:
return umr_any(a, axis, dtype, out, keepdims)
return umr_any(a, axis, dtype, out, keepdims, where=where)
def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
# Parsing keyword arguments is currently fairly slow, so avoid it for now
if where is True:
return umr_all(a, axis, dtype, out, keepdims)
return umr_all(a, axis, dtype, out, keepdims, where=where)
def _count_reduce_items(arr, axis, keepdims=False, where=True):
# fast-path for the default case
if where is True:
# no boolean mask given, calculate items according to axis
if axis is None:
axis = tuple(range(arr.ndim))
elif not isinstance(axis, tuple):
axis = (axis,)
items = 1
for ax in axis:
items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
items = nt.intp(items)
else:
# TODO: Optimize case when `where` is broadcast along a non-reduction
# axis and full sum is more excessive than needed.
# guarded to protect circular imports
from numpy.lib.stride_tricks import broadcast_to
# count True values in (potentially broadcasted) boolean mask
items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None,
keepdims)
return items
# Numpy 1.17.0, 2019-02-24
# Various clip behavior deprecations, marked with _clip_dep as a prefix.
def _clip_dep_is_scalar_nan(a):
# guarded to protect circular imports
from numpy.core.fromnumeric import ndim
if ndim(a) != 0:
return False
try:
return um.isnan(a)
except TypeError:
return False
def _clip_dep_is_byte_swapped(a):
if isinstance(a, mu.ndarray):
return not a.dtype.isnative
return False
def _clip_dep_invoke_with_casting(ufunc, *args, out=None, casting=None, **kwargs):
# normal path
if casting is not None:
return ufunc(*args, out=out, casting=casting, **kwargs)
# try to deal with broken casting rules
try:
return ufunc(*args, out=out, **kwargs)
except _exceptions._UFuncOutputCastingError as e:
# Numpy 1.17.0, 2019-02-24
warnings.warn(
"Converting the output of clip from {!r} to {!r} is deprecated. "
"Pass `casting=\"unsafe\"` explicitly to silence this warning, or "
"correct the type of the variables.".format(e.from_, e.to),
DeprecationWarning,
stacklevel=2
)
return ufunc(*args, out=out, casting="unsafe", **kwargs)
def _clip(a, min=None, max=None, out=None, *, casting=None, **kwargs):
if min is None and max is None:
raise ValueError("One of max or min must be given")
# Numpy 1.17.0, 2019-02-24
# This deprecation probably incurs a substantial slowdown for small arrays,
# it will be good to get rid of it.
if not _clip_dep_is_byte_swapped(a) and not _clip_dep_is_byte_swapped(out):
using_deprecated_nan = False
if _clip_dep_is_scalar_nan(min):
min = -float('inf')
using_deprecated_nan = True
if _clip_dep_is_scalar_nan(max):
max = float('inf')
using_deprecated_nan = True
if using_deprecated_nan:
warnings.warn(
"Passing `np.nan` to mean no clipping in np.clip has always "
"been unreliable, and is now deprecated. "
"In future, this will always return nan, like it already does "
"when min or max are arrays that contain nan. "
"To skip a bound, pass either None or an np.inf of an "
"appropriate sign.",
DeprecationWarning,
stacklevel=2
)
if min is None:
return _clip_dep_invoke_with_casting(
um.minimum, a, max, out=out, casting=casting, **kwargs)
elif max is None:
return _clip_dep_invoke_with_casting(
um.maximum, a, min, out=out, casting=casting, **kwargs)
else:
return _clip_dep_invoke_with_casting(
um.clip, a, min, max, out=out, casting=casting, **kwargs)
def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
arr = asanyarray(a)
is_float16_result = False
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
if rcount == 0 if where is True else umr_any(rcount == 0, axis=None):
warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None:
if issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
elif issubclass(arr.dtype.type, nt.float16):
dtype = mu.dtype('f4')
is_float16_result = True
ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
if is_float16_result and out is None:
ret = arr.dtype.type(ret)
elif hasattr(ret, 'dtype'):
if is_float16_result:
ret = arr.dtype.type(ret / rcount)
else:
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
where=True):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
# Make this warning show up on top.
if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None):
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
stacklevel=2)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
# Compute the mean.
# Note that if dtype is not of inexact type then arraymean will
# not be either.
arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where)
# The shape of rcount has to match arrmean to not change the shape of out
# in broadcasting. Otherwise, it cannot be stored back to arrmean.
if rcount.ndim == 0:
# fast-path for default case when where is True
div = rcount
else:
# matching rcount to arrmean when where is specified as array
div = rcount.reshape(arrmean.shape)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(arrmean, div, out=arrmean, casting='unsafe',
subok=False)
elif hasattr(arrmean, "dtype"):
arrmean = arrmean.dtype.type(arrmean / rcount)
else:
arrmean = arrmean / rcount
# Compute sum of squared deviations from mean
# Note that x may not be inexact and that we need it to be an array,
# not a scalar.
x = asanyarray(arr - arrmean)
if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
x = um.multiply(x, x, out=x)
# Fast-paths for built-in complex types
elif x.dtype in _complex_to_float:
xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
um.multiply(xv, xv, out=xv)
x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
# Most general case; includes handling object arrays containing imaginary
# numbers and complex types with non-native byteorder
else:
x = um.multiply(x, um.conjugate(x), out=x).real
ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where)
# Compute degrees of freedom and make sure it is not negative.
rcount = um.maximum(rcount - ddof, 0)
# divide by degrees of freedom
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
where=True):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims, where=where)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(um.sqrt(ret))
else:
ret = um.sqrt(ret)
return ret
def _ptp(a, axis=None, out=None, keepdims=False):
return um.subtract(
umr_maximum(a, axis, None, out, keepdims),
umr_minimum(a, axis, None, None, keepdims),
out
)
def _dump(self, file, protocol=2):
if hasattr(file, 'write'):
ctx = nullcontext(file)
else:
ctx = open(os_fspath(file), "wb")
with ctx as f:
pickle.dump(self, f, protocol=protocol)
def _dumps(self, protocol=2):
return pickle.dumps(self, protocol=protocol)
| 10,887 | Python | 36.034013 | 82 | 0.629834 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/_string_helpers.py | """
String-handling utilities to avoid locale-dependence.
Used primarily to generate type name aliases.
"""
# "import string" is costly to import!
# Construct the translation tables directly
# "A" = chr(65), "a" = chr(97)
_all_chars = [chr(_m) for _m in range(256)]
_ascii_upper = _all_chars[65:65+26]
_ascii_lower = _all_chars[97:97+26]
LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
def english_lower(s):
""" Apply English case rules to convert ASCII strings to all lower case.
This is an internal utility function to replace calls to str.lower() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
lowered : str
Examples
--------
>>> from numpy.core.numerictypes import english_lower
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_lower('')
''
"""
lowered = s.translate(LOWER_TABLE)
return lowered
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.core.numerictypes import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
def english_capitalize(s):
""" Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy.core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
"""
if s:
return english_upper(s[0]) + s[1:]
else:
return s
| 2,855 | Python | 27.277227 | 88 | 0.654291 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_simd.py | # NOTE: Please avoid the use of numpy.testing since NPYV intrinsics
# may be involved in their functionality.
import pytest, math, re
import itertools
from numpy.core._simd import targets
from numpy.core._multiarray_umath import __cpu_baseline__
class _Test_Utility:
# submodule of the desired SIMD extension, e.g. targets["AVX512F"]
npyv = None
# the current data type suffix e.g. 's8'
sfx = None
# target name can be 'baseline' or one or more of CPU features
target_name = None
def __getattr__(self, attr):
"""
To call NPV intrinsics without the attribute 'npyv' and
auto suffixing intrinsics according to class attribute 'sfx'
"""
return getattr(self.npyv, attr + "_" + self.sfx)
def _data(self, start=None, count=None, reverse=False):
"""
Create list of consecutive numbers according to number of vector's lanes.
"""
if start is None:
start = 1
if count is None:
count = self.nlanes
rng = range(start, start + count)
if reverse:
rng = reversed(rng)
if self._is_fp():
return [x / 1.0 for x in rng]
return list(rng)
def _is_unsigned(self):
return self.sfx[0] == 'u'
def _is_signed(self):
return self.sfx[0] == 's'
def _is_fp(self):
return self.sfx[0] == 'f'
def _scalar_size(self):
return int(self.sfx[1:])
def _int_clip(self, seq):
if self._is_fp():
return seq
max_int = self._int_max()
min_int = self._int_min()
return [min(max(v, min_int), max_int) for v in seq]
def _int_max(self):
if self._is_fp():
return None
max_u = self._to_unsigned(self.setall(-1))[0]
if self._is_signed():
return max_u // 2
return max_u
def _int_min(self):
if self._is_fp():
return None
if self._is_unsigned():
return 0
return -(self._int_max() + 1)
def _true_mask(self):
max_unsig = getattr(self.npyv, "setall_u" + self.sfx[1:])(-1)
return max_unsig[0]
def _to_unsigned(self, vector):
if isinstance(vector, (list, tuple)):
return getattr(self.npyv, "load_u" + self.sfx[1:])(vector)
else:
sfx = vector.__name__.replace("npyv_", "")
if sfx[0] == "b":
cvt_intrin = "cvt_u{0}_b{0}"
else:
cvt_intrin = "reinterpret_u{0}_{1}"
return getattr(self.npyv, cvt_intrin.format(sfx[1:], sfx))(vector)
def _pinfinity(self):
v = self.npyv.setall_u32(0x7f800000)
return self.npyv.reinterpret_f32_u32(v)[0]
def _ninfinity(self):
v = self.npyv.setall_u32(0xff800000)
return self.npyv.reinterpret_f32_u32(v)[0]
def _nan(self):
v = self.npyv.setall_u32(0x7fc00000)
return self.npyv.reinterpret_f32_u32(v)[0]
def _cpu_features(self):
target = self.target_name
if target == "baseline":
target = __cpu_baseline__
else:
target = target.split('__') # multi-target separator
return ' '.join(target)
class _SIMD_BOOL(_Test_Utility):
"""
To test all boolean vector types at once
"""
def _data(self, start=None, count=None, reverse=False):
nlanes = getattr(self.npyv, "nlanes_u" + self.sfx[1:])
true_mask = self._true_mask()
rng = range(nlanes)
if reverse:
rng = reversed(rng)
return [true_mask if x % 2 else 0 for x in rng]
def _load_b(self, data):
len_str = self.sfx[1:]
load = getattr(self.npyv, "load_u" + len_str)
cvt = getattr(self.npyv, f"cvt_b{len_str}_u{len_str}")
return cvt(load(data))
def test_operators_logical(self):
"""
Logical operations for boolean types.
Test intrinsics:
npyv_xor_##SFX, npyv_and_##SFX, npyv_or_##SFX, npyv_not_##SFX
"""
data_a = self._data()
data_b = self._data(reverse=True)
vdata_a = self._load_b(data_a)
vdata_b = self._load_b(data_b)
data_and = [a & b for a, b in zip(data_a, data_b)]
vand = getattr(self, "and")(vdata_a, vdata_b)
assert vand == data_and
data_or = [a | b for a, b in zip(data_a, data_b)]
vor = getattr(self, "or")(vdata_a, vdata_b)
assert vor == data_or
data_xor = [a ^ b for a, b in zip(data_a, data_b)]
vxor = getattr(self, "xor")(vdata_a, vdata_b)
assert vxor == data_xor
vnot = getattr(self, "not")(vdata_a)
assert vnot == data_b
def test_tobits(self):
data2bits = lambda data: sum([int(x != 0) << i for i, x in enumerate(data, 0)])
for data in (self._data(), self._data(reverse=True)):
vdata = self._load_b(data)
data_bits = data2bits(data)
tobits = bin(self.tobits(vdata))
assert tobits == bin(data_bits)
class _SIMD_INT(_Test_Utility):
"""
To test all integer vector types at once
"""
def test_operators_shift(self):
if self.sfx in ("u8", "s8"):
return
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
for count in range(self._scalar_size()):
# load to cast
data_shl_a = self.load([a << count for a in data_a])
# left shift
shl = self.shl(vdata_a, count)
assert shl == data_shl_a
# load to cast
data_shr_a = self.load([a >> count for a in data_a])
# right shift
shr = self.shr(vdata_a, count)
assert shr == data_shr_a
# shift by zero or max or out-range immediate constant is not applicable and illogical
for count in range(1, self._scalar_size()):
# load to cast
data_shl_a = self.load([a << count for a in data_a])
# left shift by an immediate constant
shli = self.shli(vdata_a, count)
assert shli == data_shl_a
# load to cast
data_shr_a = self.load([a >> count for a in data_a])
# right shift by an immediate constant
shri = self.shri(vdata_a, count)
assert shri == data_shr_a
def test_arithmetic_subadd_saturated(self):
if self.sfx in ("u32", "s32", "u64", "s64"):
return
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_adds = self._int_clip([a + b for a, b in zip(data_a, data_b)])
adds = self.adds(vdata_a, vdata_b)
assert adds == data_adds
data_subs = self._int_clip([a - b for a, b in zip(data_a, data_b)])
subs = self.subs(vdata_a, vdata_b)
assert subs == data_subs
def test_math_max_min(self):
data_a = self._data()
data_b = self._data(self.nlanes)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_max = [max(a, b) for a, b in zip(data_a, data_b)]
simd_max = self.max(vdata_a, vdata_b)
assert simd_max == data_max
data_min = [min(a, b) for a, b in zip(data_a, data_b)]
simd_min = self.min(vdata_a, vdata_b)
assert simd_min == data_min
class _SIMD_FP32(_Test_Utility):
"""
To only test single precision
"""
def test_conversions(self):
"""
Round to nearest even integer, assume CPU control register is set to rounding.
Test intrinsics:
npyv_round_s32_##SFX
"""
features = self._cpu_features()
if not self.npyv.simd_f64 and re.match(r".*(NEON|ASIMD)", features):
# very costly to emulate nearest even on Armv7
# instead we round halves to up. e.g. 0.5 -> 1, -0.5 -> -1
_round = lambda v: int(v + (0.5 if v >= 0 else -0.5))
else:
_round = round
vdata_a = self.load(self._data())
vdata_a = self.sub(vdata_a, self.setall(0.5))
data_round = [_round(x) for x in vdata_a]
vround = self.round_s32(vdata_a)
assert vround == data_round
class _SIMD_FP64(_Test_Utility):
"""
To only test double precision
"""
def test_conversions(self):
"""
Round to nearest even integer, assume CPU control register is set to rounding.
Test intrinsics:
npyv_round_s32_##SFX
"""
vdata_a = self.load(self._data())
vdata_a = self.sub(vdata_a, self.setall(0.5))
vdata_b = self.mul(vdata_a, self.setall(-1.5))
data_round = [round(x) for x in list(vdata_a) + list(vdata_b)]
vround = self.round_s32(vdata_a, vdata_b)
assert vround == data_round
class _SIMD_FP(_Test_Utility):
"""
To test all float vector types at once
"""
def test_arithmetic_fused(self):
vdata_a, vdata_b, vdata_c = [self.load(self._data())]*3
vdata_cx2 = self.add(vdata_c, vdata_c)
# multiply and add, a*b + c
data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)])
fma = self.muladd(vdata_a, vdata_b, vdata_c)
assert fma == data_fma
# multiply and subtract, a*b - c
fms = self.mulsub(vdata_a, vdata_b, vdata_c)
data_fms = self.sub(data_fma, vdata_cx2)
assert fms == data_fms
# negate multiply and add, -(a*b) + c
nfma = self.nmuladd(vdata_a, vdata_b, vdata_c)
data_nfma = self.sub(vdata_cx2, data_fma)
assert nfma == data_nfma
# negate multiply and subtract, -(a*b) - c
nfms = self.nmulsub(vdata_a, vdata_b, vdata_c)
data_nfms = self.mul(data_fma, self.setall(-1))
assert nfms == data_nfms
def test_abs(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan))
for case, desired in abs_cases:
data_abs = [desired]*self.nlanes
vabs = self.abs(self.setall(case))
assert vabs == pytest.approx(data_abs, nan_ok=True)
vabs = self.abs(self.mul(vdata, self.setall(-1)))
assert vabs == data
def test_sqrt(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf))
for case, desired in sqrt_cases:
data_sqrt = [desired]*self.nlanes
sqrt = self.sqrt(self.setall(case))
assert sqrt == pytest.approx(data_sqrt, nan_ok=True)
data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision
sqrt = self.sqrt(vdata)
assert sqrt == data_sqrt
def test_square(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
# square
square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf))
for case, desired in square_cases:
data_square = [desired]*self.nlanes
square = self.square(self.setall(case))
assert square == pytest.approx(data_square, nan_ok=True)
data_square = [x*x for x in data]
square = self.square(vdata)
assert square == data_square
@pytest.mark.parametrize("intrin, func", [("ceil", math.ceil),
("trunc", math.trunc), ("floor", math.floor), ("rint", round)])
def test_rounding(self, intrin, func):
"""
Test intrinsics:
npyv_rint_##SFX
npyv_ceil_##SFX
npyv_trunc_##SFX
npyv_floor##SFX
"""
intrin_name = intrin
intrin = getattr(self, intrin)
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
# special cases
round_cases = ((nan, nan), (pinf, pinf), (ninf, ninf))
for case, desired in round_cases:
data_round = [desired]*self.nlanes
_round = intrin(self.setall(case))
assert _round == pytest.approx(data_round, nan_ok=True)
for x in range(0, 2**20, 256**2):
for w in (-1.05, -1.10, -1.15, 1.05, 1.10, 1.15):
data = self.load([(x+a)*w for a in range(self.nlanes)])
data_round = [func(x) for x in data]
_round = intrin(data)
assert _round == data_round
# signed zero
if intrin_name == "floor":
data_szero = (-0.0,)
else:
data_szero = (-0.0, -0.25, -0.30, -0.45, -0.5)
for w in data_szero:
_round = self._to_unsigned(intrin(self.setall(w)))
data_round = self._to_unsigned(self.setall(-0.0))
assert _round == data_round
def test_max(self):
"""
Test intrinsics:
npyv_max_##SFX
npyv_maxp_##SFX
"""
data_a = self._data()
data_b = self._data(self.nlanes)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_max = [max(a, b) for a, b in zip(data_a, data_b)]
_max = self.max(vdata_a, vdata_b)
assert _max == data_max
maxp = self.maxp(vdata_a, vdata_b)
assert maxp == data_max
# test IEEE standards
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
max_cases = ((nan, nan, nan), (nan, 10, 10), (10, nan, 10),
(pinf, pinf, pinf), (pinf, 10, pinf), (10, pinf, pinf),
(ninf, ninf, ninf), (ninf, 10, 10), (10, ninf, 10),
(10, 0, 10), (10, -10, 10))
for case_operand1, case_operand2, desired in max_cases:
data_max = [desired]*self.nlanes
vdata_a = self.setall(case_operand1)
vdata_b = self.setall(case_operand2)
maxp = self.maxp(vdata_a, vdata_b)
assert maxp == pytest.approx(data_max, nan_ok=True)
if nan in (case_operand1, case_operand2, desired):
continue
_max = self.max(vdata_a, vdata_b)
assert _max == data_max
def test_min(self):
"""
Test intrinsics:
npyv_min_##SFX
npyv_minp_##SFX
"""
data_a = self._data()
data_b = self._data(self.nlanes)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_min = [min(a, b) for a, b in zip(data_a, data_b)]
_min = self.min(vdata_a, vdata_b)
assert _min == data_min
minp = self.minp(vdata_a, vdata_b)
assert minp == data_min
# test IEEE standards
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
min_cases = ((nan, nan, nan), (nan, 10, 10), (10, nan, 10),
(pinf, pinf, pinf), (pinf, 10, 10), (10, pinf, 10),
(ninf, ninf, ninf), (ninf, 10, ninf), (10, ninf, ninf),
(10, 0, 0), (10, -10, -10))
for case_operand1, case_operand2, desired in min_cases:
data_min = [desired]*self.nlanes
vdata_a = self.setall(case_operand1)
vdata_b = self.setall(case_operand2)
minp = self.minp(vdata_a, vdata_b)
assert minp == pytest.approx(data_min, nan_ok=True)
if nan in (case_operand1, case_operand2, desired):
continue
_min = self.min(vdata_a, vdata_b)
assert _min == data_min
def test_reciprocal(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf))
for case, desired in recip_cases:
data_recip = [desired]*self.nlanes
recip = self.recip(self.setall(case))
assert recip == pytest.approx(data_recip, nan_ok=True)
data_recip = self.load([1/x for x in data]) # load to truncate precision
recip = self.recip(vdata)
assert recip == data_recip
def test_special_cases(self):
"""
Compare Not NaN. Test intrinsics:
npyv_notnan_##SFX
"""
nnan = self.notnan(self.setall(self._nan()))
assert nnan == [0]*self.nlanes
class _SIMD_ALL(_Test_Utility):
"""
To test all vector types at once
"""
def test_memory_load(self):
data = self._data()
# unaligned load
load_data = self.load(data)
assert load_data == data
# aligned load
loada_data = self.loada(data)
assert loada_data == data
# stream load
loads_data = self.loads(data)
assert loads_data == data
# load lower part
loadl = self.loadl(data)
loadl_half = list(loadl)[:self.nlanes//2]
data_half = data[:self.nlanes//2]
assert loadl_half == data_half
assert loadl != data # detect overflow
def test_memory_store(self):
data = self._data()
vdata = self.load(data)
# unaligned store
store = [0] * self.nlanes
self.store(store, vdata)
assert store == data
# aligned store
store_a = [0] * self.nlanes
self.storea(store_a, vdata)
assert store_a == data
# stream store
store_s = [0] * self.nlanes
self.stores(store_s, vdata)
assert store_s == data
# store lower part
store_l = [0] * self.nlanes
self.storel(store_l, vdata)
assert store_l[:self.nlanes//2] == data[:self.nlanes//2]
assert store_l != vdata # detect overflow
# store higher part
store_h = [0] * self.nlanes
self.storeh(store_h, vdata)
assert store_h[:self.nlanes//2] == data[self.nlanes//2:]
assert store_h != vdata # detect overflow
def test_memory_partial_load(self):
if self.sfx in ("u8", "s8", "u16", "s16"):
return
data = self._data()
lanes = list(range(1, self.nlanes + 1))
lanes += [self.nlanes**2, self.nlanes**4] # test out of range
for n in lanes:
load_till = self.load_till(data, n, 15)
data_till = data[:n] + [15] * (self.nlanes-n)
assert load_till == data_till
load_tillz = self.load_tillz(data, n)
data_tillz = data[:n] + [0] * (self.nlanes-n)
assert load_tillz == data_tillz
def test_memory_partial_store(self):
if self.sfx in ("u8", "s8", "u16", "s16"):
return
data = self._data()
data_rev = self._data(reverse=True)
vdata = self.load(data)
lanes = list(range(1, self.nlanes + 1))
lanes += [self.nlanes**2, self.nlanes**4]
for n in lanes:
data_till = data_rev.copy()
data_till[:n] = data[:n]
store_till = self._data(reverse=True)
self.store_till(store_till, n, vdata)
assert store_till == data_till
def test_memory_noncont_load(self):
if self.sfx in ("u8", "s8", "u16", "s16"):
return
for stride in range(1, 64):
data = self._data(count=stride*self.nlanes)
data_stride = data[::stride]
loadn = self.loadn(data, stride)
assert loadn == data_stride
for stride in range(-64, 0):
data = self._data(stride, -stride*self.nlanes)
data_stride = self.load(data[::stride]) # cast unsigned
loadn = self.loadn(data, stride)
assert loadn == data_stride
def test_memory_noncont_partial_load(self):
if self.sfx in ("u8", "s8", "u16", "s16"):
return
lanes = list(range(1, self.nlanes + 1))
lanes += [self.nlanes**2, self.nlanes**4]
for stride in range(1, 64):
data = self._data(count=stride*self.nlanes)
data_stride = data[::stride]
for n in lanes:
data_stride_till = data_stride[:n] + [15] * (self.nlanes-n)
loadn_till = self.loadn_till(data, stride, n, 15)
assert loadn_till == data_stride_till
data_stride_tillz = data_stride[:n] + [0] * (self.nlanes-n)
loadn_tillz = self.loadn_tillz(data, stride, n)
assert loadn_tillz == data_stride_tillz
for stride in range(-64, 0):
data = self._data(stride, -stride*self.nlanes)
data_stride = list(self.load(data[::stride])) # cast unsigned
for n in lanes:
data_stride_till = data_stride[:n] + [15] * (self.nlanes-n)
loadn_till = self.loadn_till(data, stride, n, 15)
assert loadn_till == data_stride_till
data_stride_tillz = data_stride[:n] + [0] * (self.nlanes-n)
loadn_tillz = self.loadn_tillz(data, stride, n)
assert loadn_tillz == data_stride_tillz
def test_memory_noncont_store(self):
if self.sfx in ("u8", "s8", "u16", "s16"):
return
vdata = self.load(self._data())
for stride in range(1, 64):
data = [15] * stride * self.nlanes
data[::stride] = vdata
storen = [15] * stride * self.nlanes
storen += [127]*64
self.storen(storen, stride, vdata)
assert storen[:-64] == data
assert storen[-64:] == [127]*64 # detect overflow
for stride in range(-64, 0):
data = [15] * -stride * self.nlanes
data[::stride] = vdata
storen = [127]*64
storen += [15] * -stride * self.nlanes
self.storen(storen, stride, vdata)
assert storen[64:] == data
assert storen[:64] == [127]*64 # detect overflow
def test_memory_noncont_partial_store(self):
if self.sfx in ("u8", "s8", "u16", "s16"):
return
data = self._data()
vdata = self.load(data)
lanes = list(range(1, self.nlanes + 1))
lanes += [self.nlanes**2, self.nlanes**4]
for stride in range(1, 64):
for n in lanes:
data_till = [15] * stride * self.nlanes
data_till[::stride] = data[:n] + [15] * (self.nlanes-n)
storen_till = [15] * stride * self.nlanes
storen_till += [127]*64
self.storen_till(storen_till, stride, n, vdata)
assert storen_till[:-64] == data_till
assert storen_till[-64:] == [127]*64 # detect overflow
for stride in range(-64, 0):
for n in lanes:
data_till = [15] * -stride * self.nlanes
data_till[::stride] = data[:n] + [15] * (self.nlanes-n)
storen_till = [127]*64
storen_till += [15] * -stride * self.nlanes
self.storen_till(storen_till, stride, n, vdata)
assert storen_till[64:] == data_till
assert storen_till[:64] == [127]*64 # detect overflow
@pytest.mark.parametrize("intrin, table_size, elsize", [
("self.lut32", 32, 32),
("self.lut16", 16, 64)
])
def test_lut(self, intrin, table_size, elsize):
"""
Test lookup table intrinsics:
npyv_lut32_##sfx
npyv_lut16_##sfx
"""
if elsize != self._scalar_size():
return
intrin = eval(intrin)
idx_itrin = getattr(self.npyv, f"setall_u{elsize}")
table = range(0, table_size)
for i in table:
broadi = self.setall(i)
idx = idx_itrin(i)
lut = intrin(table, idx)
assert lut == broadi
def test_misc(self):
broadcast_zero = self.zero()
assert broadcast_zero == [0] * self.nlanes
for i in range(1, 10):
broadcasti = self.setall(i)
assert broadcasti == [i] * self.nlanes
data_a, data_b = self._data(), self._data(reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
# py level of npyv_set_* don't support ignoring the extra specified lanes or
# fill non-specified lanes with zero.
vset = self.set(*data_a)
assert vset == data_a
# py level of npyv_setf_* don't support ignoring the extra specified lanes or
# fill non-specified lanes with the specified scalar.
vsetf = self.setf(10, *data_a)
assert vsetf == data_a
# We're testing the sanity of _simd's type-vector,
# reinterpret* intrinsics itself are tested via compiler
# during the build of _simd module
sfxes = ["u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64", "f32"]
if self.npyv.simd_f64:
sfxes.append("f64")
for sfx in sfxes:
vec_name = getattr(self, "reinterpret_" + sfx)(vdata_a).__name__
assert vec_name == "npyv_" + sfx
# select & mask operations
select_a = self.select(self.cmpeq(self.zero(), self.zero()), vdata_a, vdata_b)
assert select_a == data_a
select_b = self.select(self.cmpneq(self.zero(), self.zero()), vdata_a, vdata_b)
assert select_b == data_b
# cleanup intrinsic is only used with AVX for
# zeroing registers to avoid the AVX-SSE transition penalty,
# so nothing to test here
self.npyv.cleanup()
def test_reorder(self):
data_a, data_b = self._data(), self._data(reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
# lower half part
data_a_lo = data_a[:self.nlanes//2]
data_b_lo = data_b[:self.nlanes//2]
# higher half part
data_a_hi = data_a[self.nlanes//2:]
data_b_hi = data_b[self.nlanes//2:]
# combine two lower parts
combinel = self.combinel(vdata_a, vdata_b)
assert combinel == data_a_lo + data_b_lo
# combine two higher parts
combineh = self.combineh(vdata_a, vdata_b)
assert combineh == data_a_hi + data_b_hi
# combine x2
combine = self.combine(vdata_a, vdata_b)
assert combine == (data_a_lo + data_b_lo, data_a_hi + data_b_hi)
# zip(interleave)
data_zipl = [v for p in zip(data_a_lo, data_b_lo) for v in p]
data_ziph = [v for p in zip(data_a_hi, data_b_hi) for v in p]
vzip = self.zip(vdata_a, vdata_b)
assert vzip == (data_zipl, data_ziph)
def test_reorder_rev64(self):
# Reverse elements of each 64-bit lane
ssize = self._scalar_size()
if ssize == 64:
return
data_rev64 = [
y for x in range(0, self.nlanes, 64//ssize)
for y in reversed(range(x, x + 64//ssize))
]
rev64 = self.rev64(self.load(range(self.nlanes)))
assert rev64 == data_rev64
def test_operators_comparison(self):
if self._is_fp():
data_a = self._data()
else:
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
mask_true = self._true_mask()
def to_bool(vector):
return [lane == mask_true for lane in vector]
# equal
data_eq = [a == b for a, b in zip(data_a, data_b)]
cmpeq = to_bool(self.cmpeq(vdata_a, vdata_b))
assert cmpeq == data_eq
# not equal
data_neq = [a != b for a, b in zip(data_a, data_b)]
cmpneq = to_bool(self.cmpneq(vdata_a, vdata_b))
assert cmpneq == data_neq
# greater than
data_gt = [a > b for a, b in zip(data_a, data_b)]
cmpgt = to_bool(self.cmpgt(vdata_a, vdata_b))
assert cmpgt == data_gt
# greater than and equal
data_ge = [a >= b for a, b in zip(data_a, data_b)]
cmpge = to_bool(self.cmpge(vdata_a, vdata_b))
assert cmpge == data_ge
# less than
data_lt = [a < b for a, b in zip(data_a, data_b)]
cmplt = to_bool(self.cmplt(vdata_a, vdata_b))
assert cmplt == data_lt
# less than and equal
data_le = [a <= b for a, b in zip(data_a, data_b)]
cmple = to_bool(self.cmple(vdata_a, vdata_b))
assert cmple == data_le
def test_operators_logical(self):
if self._is_fp():
data_a = self._data()
else:
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
if self._is_fp():
data_cast_a = self._to_unsigned(vdata_a)
data_cast_b = self._to_unsigned(vdata_b)
cast, cast_data = self._to_unsigned, self._to_unsigned
else:
data_cast_a, data_cast_b = data_a, data_b
cast, cast_data = lambda a: a, self.load
data_xor = cast_data([a ^ b for a, b in zip(data_cast_a, data_cast_b)])
vxor = cast(self.xor(vdata_a, vdata_b))
assert vxor == data_xor
data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)])
vor = cast(getattr(self, "or")(vdata_a, vdata_b))
assert vor == data_or
data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)])
vand = cast(getattr(self, "and")(vdata_a, vdata_b))
assert vand == data_and
data_not = cast_data([~a for a in data_cast_a])
vnot = cast(getattr(self, "not")(vdata_a))
assert vnot == data_not
def test_conversion_boolean(self):
bsfx = "b" + self.sfx[1:]
to_boolean = getattr(self.npyv, "cvt_%s_%s" % (bsfx, self.sfx))
from_boolean = getattr(self.npyv, "cvt_%s_%s" % (self.sfx, bsfx))
false_vb = to_boolean(self.setall(0))
true_vb = self.cmpeq(self.setall(0), self.setall(0))
assert false_vb != true_vb
false_vsfx = from_boolean(false_vb)
true_vsfx = from_boolean(true_vb)
assert false_vsfx != true_vsfx
def test_conversion_expand(self):
"""
Test expand intrinsics:
npyv_expand_u16_u8
npyv_expand_u32_u16
"""
if self.sfx not in ("u8", "u16"):
return
totype = self.sfx[0]+str(int(self.sfx[1:])*2)
expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}")
# close enough from the edge to detect any deviation
data = self._data(self._int_max() - self.nlanes)
vdata = self.load(data)
edata = expand(vdata)
# lower half part
data_lo = data[:self.nlanes//2]
# higher half part
data_hi = data[self.nlanes//2:]
assert edata == (data_lo, data_hi)
def test_arithmetic_subadd(self):
if self._is_fp():
data_a = self._data()
else:
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
# non-saturated
data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast
add = self.add(vdata_a, vdata_b)
assert add == data_add
data_sub = self.load([a - b for a, b in zip(data_a, data_b)])
sub = self.sub(vdata_a, vdata_b)
assert sub == data_sub
def test_arithmetic_mul(self):
if self.sfx in ("u64", "s64"):
return
if self._is_fp():
data_a = self._data()
else:
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_mul = self.load([a * b for a, b in zip(data_a, data_b)])
mul = self.mul(vdata_a, vdata_b)
assert mul == data_mul
def test_arithmetic_div(self):
if not self._is_fp():
return
data_a, data_b = self._data(), self._data(reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
# load to truncate f64 to precision of f32
data_div = self.load([a / b for a, b in zip(data_a, data_b)])
div = self.div(vdata_a, vdata_b)
assert div == data_div
def test_arithmetic_intdiv(self):
"""
Test integer division intrinsics:
npyv_divisor_##sfx
npyv_divc_##sfx
"""
if self._is_fp():
return
int_min = self._int_min()
def trunc_div(a, d):
"""
Divide towards zero works with large integers > 2^53,
and wrap around overflow similar to what C does.
"""
if d == -1 and a == int_min:
return a
sign_a, sign_d = a < 0, d < 0
if a == 0 or sign_a == sign_d:
return a // d
return (a + sign_d - sign_a) // d + 1
data = [1, -int_min] # to test overflow
data += range(0, 2**8, 2**5)
data += range(0, 2**8, 2**5-1)
bsize = self._scalar_size()
if bsize > 8:
data += range(2**8, 2**16, 2**13)
data += range(2**8, 2**16, 2**13-1)
if bsize > 16:
data += range(2**16, 2**32, 2**29)
data += range(2**16, 2**32, 2**29-1)
if bsize > 32:
data += range(2**32, 2**64, 2**61)
data += range(2**32, 2**64, 2**61-1)
# negate
data += [-x for x in data]
for dividend, divisor in itertools.product(data, data):
divisor = self.setall(divisor)[0] # cast
if divisor == 0:
continue
dividend = self.load(self._data(dividend))
data_divc = [trunc_div(a, divisor) for a in dividend]
divisor_parms = self.divisor(divisor)
divc = self.divc(dividend, divisor_parms)
assert divc == data_divc
def test_arithmetic_reduce_sum(self):
"""
Test reduce sum intrinsics:
npyv_sum_##sfx
"""
if self.sfx not in ("u32", "u64", "f32", "f64"):
return
# reduce sum
data = self._data()
vdata = self.load(data)
data_sum = sum(data)
vsum = self.sum(vdata)
assert vsum == data_sum
def test_arithmetic_reduce_sumup(self):
"""
Test extend reduce sum intrinsics:
npyv_sumup_##sfx
"""
if self.sfx not in ("u8", "u16"):
return
rdata = (0, self.nlanes, self._int_min(), self._int_max()-self.nlanes)
for r in rdata:
data = self._data(r)
vdata = self.load(data)
data_sum = sum(data)
vsum = self.sumup(vdata)
assert vsum == data_sum
def test_mask_conditional(self):
"""
Conditional addition and subtraction for all supported data types.
Test intrinsics:
npyv_ifadd_##SFX, npyv_ifsub_##SFX
"""
vdata_a = self.load(self._data())
vdata_b = self.load(self._data(reverse=True))
true_mask = self.cmpeq(self.zero(), self.zero())
false_mask = self.cmpneq(self.zero(), self.zero())
data_sub = self.sub(vdata_b, vdata_a)
ifsub = self.ifsub(true_mask, vdata_b, vdata_a, vdata_b)
assert ifsub == data_sub
ifsub = self.ifsub(false_mask, vdata_a, vdata_b, vdata_b)
assert ifsub == vdata_b
data_add = self.add(vdata_b, vdata_a)
ifadd = self.ifadd(true_mask, vdata_b, vdata_a, vdata_b)
assert ifadd == data_add
ifadd = self.ifadd(false_mask, vdata_a, vdata_b, vdata_b)
assert ifadd == vdata_b
bool_sfx = ("b8", "b16", "b32", "b64")
int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64")
fp_sfx = ("f32", "f64")
all_sfx = int_sfx + fp_sfx
tests_registry = {
bool_sfx: _SIMD_BOOL,
int_sfx : _SIMD_INT,
fp_sfx : _SIMD_FP,
("f32",): _SIMD_FP32,
("f64",): _SIMD_FP64,
all_sfx : _SIMD_ALL
}
for target_name, npyv in targets.items():
simd_width = npyv.simd if npyv else ''
pretty_name = target_name.split('__') # multi-target separator
if len(pretty_name) > 1:
# multi-target
pretty_name = f"({' '.join(pretty_name)})"
else:
pretty_name = pretty_name[0]
skip = ""
skip_sfx = dict()
if not npyv:
skip = f"target '{pretty_name}' isn't supported by current machine"
elif not npyv.simd:
skip = f"target '{pretty_name}' isn't supported by NPYV"
elif not npyv.simd_f64:
skip_sfx["f64"] = f"target '{pretty_name}' doesn't support double-precision"
for sfxes, cls in tests_registry.items():
for sfx in sfxes:
skip_m = skip_sfx.get(sfx, skip)
inhr = (cls,)
attr = dict(npyv=targets[target_name], sfx=sfx, target_name=target_name)
tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr)
if skip_m:
pytest.mark.skip(reason=skip_m)(tcls)
globals()[tcls.__name__] = tcls
| 37,478 | Python | 36.107921 | 94 | 0.531565 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_errstate.py | import pytest
import sysconfig
import numpy as np
from numpy.testing import assert_, assert_raises
# The floating point emulation on ARM EABI systems lacking a hardware FPU is
# known to be buggy. This is an attempt to identify these hosts. It may not
# catch all possible cases, but it catches the known cases of gh-413 and
# gh-15562.
hosttype = sysconfig.get_config_var('HOST_GNU_TYPE')
arm_softfloat = False if hosttype is None else hosttype.endswith('gnueabi')
class TestErrstate:
@pytest.mark.skipif(arm_softfloat,
reason='platform/cpu issue with FPU (gh-413,-15562)')
def test_invalid(self):
with np.errstate(all='raise', under='ignore'):
a = -np.arange(3)
# This should work
with np.errstate(invalid='ignore'):
np.sqrt(a)
# While this should fail!
with assert_raises(FloatingPointError):
np.sqrt(a)
@pytest.mark.skipif(arm_softfloat,
reason='platform/cpu issue with FPU (gh-15562)')
def test_divide(self):
with np.errstate(all='raise', under='ignore'):
a = -np.arange(3)
# This should work
with np.errstate(divide='ignore'):
a // 0
# While this should fail!
with assert_raises(FloatingPointError):
a // 0
# As should this, see gh-15562
with assert_raises(FloatingPointError):
a // a
def test_errcall(self):
def foo(*args):
print(args)
olderrcall = np.geterrcall()
with np.errstate(call=foo):
assert_(np.geterrcall() is foo, 'call is not foo')
with np.errstate(call=None):
assert_(np.geterrcall() is None, 'call is not None')
assert_(np.geterrcall() is olderrcall, 'call is not olderrcall')
def test_errstate_decorator(self):
@np.errstate(all='ignore')
def foo():
a = -np.arange(3)
a // 0
foo()
| 2,066 | Python | 33.449999 | 77 | 0.57454 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_records.py | import collections.abc
import textwrap
from io import BytesIO
from os import path
from pathlib import Path
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
assert_raises, temppath,
)
from numpy.compat import pickle
class TestFromrecords:
def test_fromrecords(self):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
names='col1,col2,col3')
assert_equal(r[0].item(), (456, 'dbe', 1.2))
assert_equal(r['col1'].dtype.kind, 'i')
assert_equal(r['col2'].dtype.kind, 'U')
assert_equal(r['col2'].dtype.itemsize, 12)
assert_equal(r['col3'].dtype.kind, 'f')
def test_fromrecords_0len(self):
""" Verify fromrecords works with a 0-length input """
dtype = [('a', float), ('b', float)]
r = np.rec.fromrecords([], dtype=dtype)
assert_equal(r.shape, (0,))
def test_fromrecords_2d(self):
data = [
[(1, 2), (3, 4), (5, 6)],
[(6, 5), (4, 3), (2, 1)]
]
expected_a = [[1, 3, 5], [6, 4, 2]]
expected_b = [[2, 4, 6], [5, 3, 1]]
# try with dtype
r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)])
assert_equal(r1['a'], expected_a)
assert_equal(r1['b'], expected_b)
# try with names
r2 = np.rec.fromrecords(data, names=['a', 'b'])
assert_equal(r2['a'], expected_a)
assert_equal(r2['b'], expected_b)
assert_equal(r1, r2)
def test_method_array(self):
r = np.rec.array(b'abcdefg' * 100, formats='i2,a3,i4', shape=3, byteorder='big')
assert_equal(r[1].item(), (25444, b'efg', 1633837924))
def test_method_array2(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1].item(), (2, 22.0, b'b'))
def test_recarray_slices(self):
r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
(6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
assert_equal(r[1::2][1].item(), (4, 44.0, b'd'))
def test_recarray_fromarrays(self):
x1 = np.array([1, 2, 3, 4])
x2 = np.array(['a', 'dd', 'xyz', '12'])
x3 = np.array([1.1, 2, 3, 4])
r = np.rec.fromarrays([x1, x2, x3], names='a,b,c')
assert_equal(r[1].item(), (2, 'dd', 2.0))
x1[1] = 34
assert_equal(r.a, np.array([1, 2, 3, 4]))
def test_recarray_fromfile(self):
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, 'recarray_from_file.fits')
fd = open(filename, 'rb')
fd.seek(2880 * 2)
r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.seek(2880 * 2)
r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big')
fd.seek(2880 * 2)
bytes_array = BytesIO()
bytes_array.write(fd.read())
bytes_array.seek(0)
r3 = np.rec.fromfile(bytes_array, formats='f8,i4,a5', shape=3, byteorder='big')
fd.close()
assert_equal(r1, r2)
assert_equal(r2, r3)
def test_recarray_from_obj(self):
count = 10
a = np.zeros(count, dtype='O')
b = np.zeros(count, dtype='f8')
c = np.zeros(count, dtype='f8')
for i in range(len(a)):
a[i] = list(range(1, 10))
mine = np.rec.fromarrays([a, b, c], names='date,data1,data2')
for i in range(len(a)):
assert_((mine.date[i] == list(range(1, 10))))
assert_((mine.data1[i] == 0.0))
assert_((mine.data2[i] == 0.0))
def test_recarray_repr(self):
a = np.array([(1, 0.1), (2, 0.2)],
dtype=[('foo', '<i4'), ('bar', '<f8')])
a = np.rec.array(a)
assert_equal(
repr(a),
textwrap.dedent("""\
rec.array([(1, 0.1), (2, 0.2)],
dtype=[('foo', '<i4'), ('bar', '<f8')])""")
)
# make sure non-structured dtypes also show up as rec.array
a = np.array(np.ones(4, dtype='f8'))
assert_(repr(np.rec.array(a)).startswith('rec.array'))
# check that the 'np.record' part of the dtype isn't shown
a = np.rec.array(np.ones(3, dtype='i4,i4'))
assert_equal(repr(a).find('numpy.record'), -1)
a = np.rec.array(np.ones(3, dtype='i4'))
assert_(repr(a).find('dtype=int32') != -1)
def test_0d_recarray_repr(self):
arr_0d = np.rec.array((1, 2.0, '2003'), dtype='<i4,<f8,<M8[Y]')
assert_equal(repr(arr_0d), textwrap.dedent("""\
rec.array((1, 2., '2003'),
dtype=[('f0', '<i4'), ('f1', '<f8'), ('f2', '<M8[Y]')])"""))
record = arr_0d[()]
assert_equal(repr(record), "(1, 2., '2003')")
# 1.13 converted to python scalars before the repr
try:
np.set_printoptions(legacy='1.13')
assert_equal(repr(record), '(1, 2.0, datetime.date(2003, 1, 1))')
finally:
np.set_printoptions(legacy=False)
def test_recarray_from_repr(self):
a = np.array([(1,'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
recordarr = np.rec.array(a)
recarr = a.view(np.recarray)
recordview = a.view(np.dtype((np.record, a.dtype)))
recordarr_r = eval("numpy." + repr(recordarr), {'numpy': np})
recarr_r = eval("numpy." + repr(recarr), {'numpy': np})
recordview_r = eval("numpy." + repr(recordview), {'numpy': np})
assert_equal(type(recordarr_r), np.recarray)
assert_equal(recordarr_r.dtype.type, np.record)
assert_equal(recordarr, recordarr_r)
assert_equal(type(recarr_r), np.recarray)
assert_equal(recarr_r.dtype.type, np.record)
assert_equal(recarr, recarr_r)
assert_equal(type(recordview_r), np.ndarray)
assert_equal(recordview.dtype.type, np.record)
assert_equal(recordview, recordview_r)
def test_recarray_views(self):
a = np.array([(1,'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
b = np.array([1,2,3,4,5], dtype=np.int64)
#check that np.rec.array gives right dtypes
assert_equal(np.rec.array(a).dtype.type, np.record)
assert_equal(type(np.rec.array(a)), np.recarray)
assert_equal(np.rec.array(b).dtype.type, np.int64)
assert_equal(type(np.rec.array(b)), np.recarray)
#check that viewing as recarray does the same
assert_equal(a.view(np.recarray).dtype.type, np.record)
assert_equal(type(a.view(np.recarray)), np.recarray)
assert_equal(b.view(np.recarray).dtype.type, np.int64)
assert_equal(type(b.view(np.recarray)), np.recarray)
#check that view to non-structured dtype preserves type=np.recarray
r = np.rec.array(np.ones(4, dtype="f4,i4"))
rv = r.view('f8').view('f4,i4')
assert_equal(type(rv), np.recarray)
assert_equal(rv.dtype.type, np.record)
#check that getitem also preserves np.recarray and np.record
r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'),
('c', 'i4,i4')]))
assert_equal(r['c'].dtype.type, np.record)
assert_equal(type(r['c']), np.recarray)
#and that it preserves subclasses (gh-6949)
class C(np.recarray):
pass
c = r.view(C)
assert_equal(type(c['c']), C)
# check that accessing nested structures keep record type, but
# not for subarrays, non-void structures, non-structured voids
test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)),
('d', ('i8', 'i4,i4'))]
r = np.rec.array([((1,1), b'11111111', [1,1], 1),
((1,1), b'11111111', [1,1], 1)], dtype=test_dtype)
assert_equal(r.a.dtype.type, np.record)
assert_equal(r.b.dtype.type, np.void)
assert_equal(r.c.dtype.type, np.float32)
assert_equal(r.d.dtype.type, np.int64)
# check the same, but for views
r = np.rec.array(np.ones(4, dtype='i4,i4'))
assert_equal(r.view('f4,f4').dtype.type, np.record)
assert_equal(r.view(('i4',2)).dtype.type, np.int32)
assert_equal(r.view('V8').dtype.type, np.void)
assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64)
#check that we can undo the view
arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')]
for arr in arrs:
rec = np.rec.array(arr)
# recommended way to view as an ndarray:
arr2 = rec.view(rec.dtype.fields or rec.dtype, np.ndarray)
assert_equal(arr2.dtype.type, arr.dtype.type)
assert_equal(type(arr2), type(arr))
def test_recarray_from_names(self):
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)],
names='c1, c2, c3, c4')
pa = np.rec.fromrecords([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)],
names='c1, c2, c3, c4')
assert_(ra.dtype == pa.dtype)
assert_(ra.shape == pa.shape)
for k in range(len(ra)):
assert_(ra[k].item() == pa[k].item())
def test_recarray_conflict_fields(self):
ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2),
(3, 'wrs', 1.3)],
names='field, shape, mean')
ra.mean = [1.1, 2.2, 3.3]
assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3])
assert_(type(ra.mean) is type(ra.var))
ra.shape = (1, 3)
assert_(ra.shape == (1, 3))
ra.shape = ['A', 'B', 'C']
assert_array_equal(ra['shape'], [['A', 'B', 'C']])
ra.field = 5
assert_array_equal(ra['field'], [[5, 5, 5]])
assert_(isinstance(ra.field, collections.abc.Callable))
def test_fromrecords_with_explicit_dtype(self):
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
dtype=[('a', int), ('b', object)])
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
#
ndtype = np.dtype([('a', int), ('b', object)])
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype)
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
def test_recarray_stringtypes(self):
# Issue #3993
a = np.array([('abc ', 1), ('abc', 2)],
dtype=[('foo', 'S4'), ('bar', int)])
a = a.view(np.recarray)
assert_equal(a.foo[0] == a.foo[1], False)
def test_recarray_returntypes(self):
qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)}
a = np.rec.array([('abc ', (1,1), 1, ('abcde', 'fgehi')),
('abc', (2,3), 1, ('abcde', 'jklmn'))],
dtype=[('foo', 'S4'),
('bar', [('A', int), ('B', int)]),
('baz', int), ('qux', qux_fields)])
assert_equal(type(a.foo), np.ndarray)
assert_equal(type(a['foo']), np.ndarray)
assert_equal(type(a.bar), np.recarray)
assert_equal(type(a['bar']), np.recarray)
assert_equal(a.bar.dtype.type, np.record)
assert_equal(type(a['qux']), np.recarray)
assert_equal(a.qux.dtype.type, np.record)
assert_equal(dict(a.qux.dtype.fields), qux_fields)
assert_equal(type(a.baz), np.ndarray)
assert_equal(type(a['baz']), np.ndarray)
assert_equal(type(a[0].bar), np.record)
assert_equal(type(a[0]['bar']), np.record)
assert_equal(a[0].bar.A, 1)
assert_equal(a[0].bar['A'], 1)
assert_equal(a[0]['bar'].A, 1)
assert_equal(a[0]['bar']['A'], 1)
assert_equal(a[0].qux.D, b'fgehi')
assert_equal(a[0].qux['D'], b'fgehi')
assert_equal(a[0]['qux'].D, b'fgehi')
assert_equal(a[0]['qux']['D'], b'fgehi')
def test_zero_width_strings(self):
# Test for #6430, based on the test case from #1901
cols = [['test'] * 3, [''] * 3]
rec = np.rec.fromarrays(cols)
assert_equal(rec['f0'], ['test', 'test', 'test'])
assert_equal(rec['f1'], ['', '', ''])
dt = np.dtype([('f0', '|S4'), ('f1', '|S')])
rec = np.rec.fromarrays(cols, dtype=dt)
assert_equal(rec.itemsize, 4)
assert_equal(rec['f0'], [b'test', b'test', b'test'])
assert_equal(rec['f1'], [b'', b'', b''])
class TestPathUsage:
# Test that pathlib.Path can be used
def test_tofile_fromfile(self):
with temppath(suffix='.bin') as path:
path = Path(path)
np.random.seed(123)
a = np.random.rand(10).astype('f8,i4,a5')
a[5] = (0.5,10,'abcde')
with path.open("wb") as fd:
a.tofile(fd)
x = np.core.records.fromfile(path,
formats='f8,i4,a5',
shape=10)
assert_array_equal(x, a)
class TestRecord:
def setup_method(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
dtype=[("col1", "<i4"),
("col2", "<i4"),
("col3", "<i4")])
def test_assignment1(self):
a = self.data
assert_equal(a.col1[0], 1)
a[0].col1 = 0
assert_equal(a.col1[0], 0)
def test_assignment2(self):
a = self.data
assert_equal(a.col1[0], 1)
a.col1[0] = 0
assert_equal(a.col1[0], 0)
def test_invalid_assignment(self):
a = self.data
def assign_invalid_column(x):
x[0].col5 = 1
assert_raises(AttributeError, assign_invalid_column, a)
def test_nonwriteable_setfield(self):
# gh-8171
r = np.rec.array([(0,), (1,)], dtype=[('f', 'i4')])
r.flags.writeable = False
with assert_raises(ValueError):
r.f = [2, 3]
with assert_raises(ValueError):
r.setfield([2,3], *r.dtype.fields['f'])
def test_out_of_order_fields(self):
# names in the same order, padding added to descr
x = self.data[['col1', 'col2']]
assert_equal(x.dtype.names, ('col1', 'col2'))
assert_equal(x.dtype.descr,
[('col1', '<i4'), ('col2', '<i4'), ('', '|V4')])
# names change order to match indexing, as of 1.14 - descr can't
# represent that
y = self.data[['col2', 'col1']]
assert_equal(y.dtype.names, ('col2', 'col1'))
assert_raises(ValueError, lambda: y.dtype.descr)
def test_pickle_1(self):
# Issue #1529
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0],
protocol=proto)))
def test_pickle_2(self):
a = self.data
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0],
protocol=proto)))
def test_pickle_3(self):
# Issue #7140
a = self.data
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
pa = pickle.loads(pickle.dumps(a[0], protocol=proto))
assert_(pa.flags.c_contiguous)
assert_(pa.flags.f_contiguous)
assert_(pa.flags.writeable)
assert_(pa.flags.aligned)
def test_pickle_void(self):
# issue gh-13593
dt = np.dtype([('obj', 'O'), ('int', 'i')])
a = np.empty(1, dtype=dt)
data = (bytearray(b'eman'),)
a['obj'] = data
a['int'] = 42
ctor, args = a[0].__reduce__()
# check the constructor is what we expect before interpreting the arguments
assert ctor is np.core.multiarray.scalar
dtype, obj = args
# make sure we did not pickle the address
assert not isinstance(obj, bytes)
assert_raises(RuntimeError, ctor, dtype, 13)
# Test roundtrip:
dump = pickle.dumps(a[0])
unpickled = pickle.loads(dump)
assert a[0] == unpickled
# Also check the similar (impossible) "object scalar" path:
with pytest.warns(DeprecationWarning):
assert ctor(np.dtype("O"), data) is data
def test_objview_record(self):
# https://github.com/numpy/numpy/issues/2599
dt = np.dtype([('foo', 'i8'), ('bar', 'O')])
r = np.zeros((1,3), dtype=dt).view(np.recarray)
r.foo = np.array([1, 2, 3]) # TypeError?
# https://github.com/numpy/numpy/issues/3256
ra = np.recarray((2,), dtype=[('x', object), ('y', float), ('z', int)])
ra[['x','y']] # TypeError?
def test_record_scalar_setitem(self):
# https://github.com/numpy/numpy/issues/3561
rec = np.recarray(1, dtype=[('x', float, 5)])
rec[0].x = 1
assert_equal(rec[0].x, np.ones(5))
def test_missing_field(self):
# https://github.com/numpy/numpy/issues/4806
arr = np.zeros((3,), dtype=[('x', int), ('y', int)])
assert_raises(KeyError, lambda: arr[['nofield']])
def test_fromarrays_nested_structured_arrays(self):
arrays = [
np.arange(10),
np.ones(10, dtype=[('a', '<u2'), ('b', '<f4')]),
]
arr = np.rec.fromarrays(arrays) # ValueError?
@pytest.mark.parametrize('nfields', [0, 1, 2])
def test_assign_dtype_attribute(self, nfields):
dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields])
data = np.zeros(3, dt).view(np.recarray)
# the original and resulting dtypes differ on whether they are records
assert data.dtype.type == np.record
assert dt.type != np.record
# ensure that the dtype remains a record even when assigned
data.dtype = dt
assert data.dtype.type == np.record
@pytest.mark.parametrize('nfields', [0, 1, 2])
def test_nested_fields_are_records(self, nfields):
""" Test that nested structured types are treated as records too """
dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields])
dt_outer = np.dtype([('inner', dt)])
data = np.zeros(3, dt_outer).view(np.recarray)
assert isinstance(data, np.recarray)
assert isinstance(data['inner'], np.recarray)
data0 = data[0]
assert isinstance(data0, np.record)
assert isinstance(data0['inner'], np.record)
def test_nested_dtype_padding(self):
""" test that trailing padding is preserved """
# construct a dtype with padding at the end
dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)])
dt_padded_end = dt[['a', 'b']]
assert dt_padded_end.itemsize == dt.itemsize
dt_outer = np.dtype([('inner', dt_padded_end)])
data = np.zeros(3, dt_outer).view(np.recarray)
assert_equal(data['inner'].dtype, dt_padded_end)
data0 = data[0]
assert_equal(data0['inner'].dtype, dt_padded_end)
def test_find_duplicate():
l1 = [1, 2, 3, 4, 5, 6]
assert_(np.rec.find_duplicate(l1) == [])
l2 = [1, 2, 1, 4, 5, 6]
assert_(np.rec.find_duplicate(l2) == [1])
l3 = [1, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [1, 2])
l3 = [2, 2, 1, 4, 1, 6, 2, 3]
assert_(np.rec.find_duplicate(l3) == [2, 1])
| 20,269 | Python | 37.90595 | 96 | 0.516898 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_scalarmath.py | import contextlib
import sys
import warnings
import itertools
import operator
import platform
from numpy.compat import _pep440
import pytest
from hypothesis import given, settings
from hypothesis.strategies import sampled_from
from hypothesis.extra import numpy as hynp
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
assert_warns,
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
floating_types = np.floating.__subclasses__()
complex_floating_types = np.complexfloating.__subclasses__()
objecty_things = [object(), None]
reasonable_operators_for_scalars = [
operator.lt, operator.le, operator.eq, operator.ne, operator.ge,
operator.gt, operator.add, operator.floordiv, operator.mod,
operator.mul, operator.pow, operator.sub, operator.truediv,
]
# This compares scalarmath against ufuncs.
class TestTypes:
def test_types(self):
for atype in types:
a = atype(1)
assert_(a == 1, "error with %r: got %r" % (atype, a))
def test_type_add(self):
# list of types
for k, atype in enumerate(types):
a_scalar = atype(3)
a_array = np.array([3], dtype=atype)
for l, btype in enumerate(types):
b_scalar = btype(1)
b_array = np.array([1], dtype=btype)
c_scalar = a_scalar + b_scalar
c_array = a_array + b_array
# It was comparing the type numbers, but the new ufunc
# function-finding mechanism finds the lowest function
# to which both inputs can be cast - which produces 'l'
# when you do 'q' + 'b'. The old function finding mechanism
# skipped ahead based on the first argument, but that
# does not produce properly symmetric results...
assert_equal(c_scalar.dtype, c_array.dtype,
"error with types (%d/'%c' + %d/'%c')" %
(k, np.dtype(atype).char, l, np.dtype(btype).char))
def test_type_create(self):
for k, atype in enumerate(types):
a = np.array([1, 2, 3], atype)
b = atype([1, 2, 3])
assert_equal(a, b)
def test_leak(self):
# test leak of scalar objects
# a leak would show up in valgrind as still-reachable of ~2.6MB
for i in range(200000):
np.add(1, 1)
@pytest.mark.slow
@settings(max_examples=10000, deadline=2000)
@given(sampled_from(reasonable_operators_for_scalars),
hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()),
hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()))
def test_array_scalar_ufunc_equivalence(op, arr1, arr2):
"""
This is a thorough test attempting to cover important promotion paths
and ensuring that arrays and scalars stay as aligned as possible.
However, if it creates troubles, it should maybe just be removed.
"""
scalar1 = arr1[()]
scalar2 = arr2[()]
assert isinstance(scalar1, np.generic)
assert isinstance(scalar2, np.generic)
if arr1.dtype.kind == "c" or arr2.dtype.kind == "c":
comp_ops = {operator.ge, operator.gt, operator.le, operator.lt}
if op in comp_ops and (np.isnan(scalar1) or np.isnan(scalar2)):
pytest.xfail("complex comp ufuncs use sort-order, scalars do not.")
# ignore fpe's since they may just mismatch for integers anyway.
with warnings.catch_warnings(), np.errstate(all="ignore"):
# Comparisons DeprecationWarnings replacing errors (2022-03):
warnings.simplefilter("error", DeprecationWarning)
try:
res = op(arr1, arr2)
except Exception as e:
with pytest.raises(type(e)):
op(scalar1, scalar2)
else:
scalar_res = op(scalar1, scalar2)
assert_array_equal(scalar_res, res)
class TestBaseMath:
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
type='binary',
max_size=sz):
exp1 = np.ones_like(inp1)
inp1[...] = np.ones_like(inp1)
inp2[...] = np.zeros_like(inp2)
assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
np.add(inp1, inp2, out=out)
assert_almost_equal(out, exp1, err_msg=msg)
inp2[...] += np.arange(inp2.size, dtype=dt) + 1
assert_almost_equal(np.square(inp2),
np.multiply(inp2, inp2), err_msg=msg)
# skip true divide for ints
if dt != np.int32:
assert_almost_equal(np.reciprocal(inp2),
np.divide(1, inp2), err_msg=msg)
inp1[...] = np.ones_like(inp1)
np.add(inp1, 2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
inp2[...] = np.ones_like(inp2)
np.add(2, inp2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_almost_equal(d + d, d * 2)
np.add(d, d, out=o)
np.add(np.ones_like(d), d, out=o)
np.add(d, np.ones_like(d), out=o)
np.add(np.ones_like(d), d)
np.add(d, np.ones_like(d))
class TestPower:
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
b = a ** 4
assert_(b == 81, "error with %r: got %r" % (t, b))
def test_large_types(self):
for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
a = t(51)
b = a ** 4
msg = "error with %r: got %r" % (t, b)
if np.issubdtype(t, np.integer):
assert_(b == 6765201, msg)
else:
assert_almost_equal(b, 6765201, err_msg=msg)
def test_integers_to_negative_integer_power(self):
# Note that the combination of uint64 with a signed integer
# has common type np.float64. The other combinations should all
# raise a ValueError for integer ** negative integer.
exp = [np.array(-1, dt)[()] for dt in 'bhilq']
# 1 ** -1 possible special case
base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, 1.)
# -1 ** -1 possible special case
base = [np.array(-1, dt)[()] for dt in 'bhilq']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, -1.)
# 2 ** -1 perhaps generic
base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, .5)
def test_mixed_types(self):
typelist = [np.int8, np.int16, np.float16,
np.float32, np.float64, np.int8,
np.int16, np.int32, np.int64]
for t1 in typelist:
for t2 in typelist:
a = t1(3)
b = t2(2)
result = a**b
msg = ("error with %r and %r:"
"got %r, expected %r") % (t1, t2, result, 9)
if np.issubdtype(np.dtype(result), np.integer):
assert_(result == 9, msg)
else:
assert_almost_equal(result, 9, err_msg=msg)
def test_modular_power(self):
# modular power is not implemented, so ensure it errors
a = 5
b = 4
c = 10
expected = pow(a, b, c) # noqa: F841
for t in (np.int32, np.float32, np.complex64):
# note that 3-operand power only dispatches on the first argument
assert_raises(TypeError, operator.pow, t(a), b, c)
assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)
def floordiv_and_mod(x, y):
return (x // y, x % y)
def _signs(dt):
if dt in np.typecodes['UnsignedInteger']:
return (+1,)
else:
return (+1, -1)
class TestModulus:
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*71, dtype=dt1)[()]
b = np.array(sg2*19, dtype=dt2)[()]
div, rem = op(a, b)
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_exact(self):
# test that float results are exact for small integers. This also
# holds for the same integers scaled by powers of two.
nlst = list(range(-127, 0))
plst = list(range(1, 128))
dividend = nlst + [0] + plst
divisor = nlst + plst
arg = list(itertools.product(dividend, divisor))
tgt = list(divmod(*t) for t in arg)
a, b = np.array(arg, dtype=int).T
# convert exact integer results from Python to float so that
# signed zero can be used, it is checked.
tgtdiv, tgtrem = np.array(tgt, dtype=float).T
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
for op in [floordiv_and_mod, divmod]:
for dt in np.typecodes['Float']:
msg = 'op: %s, dtype: %s' % (op.__name__, dt)
fa = a.astype(dt)
fb = b.astype(dt)
# use list comprehension so a_ and b_ are scalars
div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)])
assert_equal(div, tgtdiv, err_msg=msg)
assert_equal(rem, tgtrem, err_msg=msg)
def test_float_modulus_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*78*6e-8, dtype=dt1)[()]
b = np.array(sg2*6e-8, dtype=dt2)[()]
div, rem = op(a, b)
# Equal assertion should hold when fmod is used
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_corner_cases(self):
# Check remainder magnitude.
for dt in np.typecodes['Float']:
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = operator.mod(a, b)
assert_(rem <= b, 'dt: %s' % dt)
rem = operator.mod(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)
# Check nans, inf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in remainder")
sup.filter(RuntimeWarning, "divide by zero encountered in remainder")
sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide")
sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
sup.filter(RuntimeWarning, "invalid value encountered in divmod")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
rem = operator.mod(fone, fzer)
assert_(np.isnan(rem), 'dt: %s' % dt)
# MSVC 2008 returns NaN here, so disable the check.
#rem = operator.mod(fone, finf)
#assert_(rem == fone, 'dt: %s' % dt)
rem = operator.mod(fone, fnan)
assert_(np.isnan(rem), 'dt: %s' % dt)
rem = operator.mod(finf, fone)
assert_(np.isnan(rem), 'dt: %s' % dt)
for op in [floordiv_and_mod, divmod]:
div, mod = op(fone, fzer)
assert_(np.isinf(div)) and assert_(np.isnan(mod))
def test_inplace_floordiv_handling(self):
# issue gh-12927
# this only applies to in-place floordiv //=, because the output type
# promotes to float which does not fit
a = np.array([1, 2], np.int64)
b = np.array([1, 2], np.uint64)
with pytest.raises(TypeError,
match=r"Cannot cast ufunc 'floor_divide' output from"):
a //= b
class TestComplexDivision:
def test_zero_division(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
a = t(0.0)
b = t(1.0)
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.nan))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.nan))
assert_(np.isnan(b/a))
b = t(0.)
assert_(np.isnan(b/a))
def test_signed_zeros(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
# tupled (numerator, denominator, expected)
# for testing as expected == numerator/denominator
data = (
(( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)),
(( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
(( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)),
(( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)),
(( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)),
(( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0))
)
for cases in data:
n = cases[0]
d = cases[1]
ex = cases[2]
result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
# check real and imag parts separately to avoid comparison
# in array context, which does not account for signed zeros
assert_equal(result.real, ex[0])
assert_equal(result.imag, ex[1])
def test_branches(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
# tupled (numerator, denominator, expected)
# for testing as expected == numerator/denominator
data = list()
# trigger branch: real(fabs(denom)) > imag(fabs(denom))
# followed by else condition as neither are == 0
data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0)))
# trigger branch: real(fabs(denom)) > imag(fabs(denom))
# followed by if condition as both are == 0
# is performed in test_zero_division(), so this is skipped
# trigger else if branch: real(fabs(denom)) < imag(fabs(denom))
data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0)))
for cases in data:
n = cases[0]
d = cases[1]
ex = cases[2]
result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
# check real and imag parts separately to avoid comparison
# in array context, which does not account for signed zeros
assert_equal(result.real, ex[0])
assert_equal(result.imag, ex[1])
class TestConversion:
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
for T in [None, np.float64, np.int64]:
a = np.array(l, dtype=T)
assert_equal([int(_m) for _m in a], li)
a = np.array(l[:3], dtype=np.uint64)
assert_equal([int(_m) for _m in a], li[:3])
def test_iinfo_long_values(self):
for code in 'bBhH':
res = np.array(np.iinfo(code).max + 1, dtype=code)
tgt = np.iinfo(code).min
assert_(res == tgt)
for code in np.typecodes['AllInteger']:
res = np.array(np.iinfo(code).max, dtype=code)
tgt = np.iinfo(code).max
assert_(res == tgt)
for code in np.typecodes['AllInteger']:
res = np.dtype(code).type(np.iinfo(code).max)
tgt = np.iinfo(code).max
assert_(res == tgt)
def test_int_raise_behaviour(self):
def overflow_error_func(dtype):
dtype(np.iinfo(dtype).max + 1)
for code in [np.int_, np.uint, np.longlong, np.ulonglong]:
assert_raises(OverflowError, overflow_error_func, code)
def test_int_from_infinite_longdouble(self):
# gh-627
x = np.longdouble(np.inf)
assert_raises(OverflowError, int, x)
with suppress_warnings() as sup:
sup.record(np.ComplexWarning)
x = np.clongdouble(np.inf)
assert_raises(OverflowError, int, x)
assert_equal(len(sup.log), 1)
@pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)")
def test_int_from_infinite_longdouble___int__(self):
x = np.longdouble(np.inf)
assert_raises(OverflowError, x.__int__)
with suppress_warnings() as sup:
sup.record(np.ComplexWarning)
x = np.clongdouble(np.inf)
assert_raises(OverflowError, x.__int__)
assert_equal(len(sup.log), 1)
@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double")
@pytest.mark.skipif(platform.machine().startswith("ppc"),
reason="IBM double double")
def test_int_from_huge_longdouble(self):
# Produce a longdouble that would overflow a double,
# use exponent that avoids bug in Darwin pow function.
exp = np.finfo(np.double).maxexp - 1
huge_ld = 2 * 1234 * np.longdouble(2) ** exp
huge_i = 2 * 1234 * 2 ** exp
assert_(huge_ld != np.inf)
assert_equal(int(huge_ld), huge_i)
def test_int_from_longdouble(self):
x = np.longdouble(1.5)
assert_equal(int(x), 1)
x = np.longdouble(-10.5)
assert_equal(int(x), -10)
def test_numpy_scalar_relational_operators(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
def test_scalar_comparison_to_none(self):
# Scalars should just return False and not give a warnings.
# The comparisons are flagged by pep8, ignore that.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', FutureWarning)
assert_(not np.float32(1) == None)
assert_(not np.str_('test') == None)
# This is dubious (see below):
assert_(not np.datetime64('NaT') == None)
assert_(np.float32(1) != None)
assert_(np.str_('test') != None)
# This is dubious (see below):
assert_(np.datetime64('NaT') != None)
assert_(len(w) == 0)
# For documentation purposes, this is why the datetime is dubious.
# At the time of deprecation this was no behaviour change, but
# it has to be considered when the deprecations are done.
assert_(np.equal(np.datetime64('NaT'), None))
#class TestRepr:
# def test_repr(self):
# for t in types:
# val = t(1197346475.0137341)
# val_repr = repr(val)
# val2 = eval(val_repr)
# assert_equal( val, val2 )
class TestRepr:
def _test_type_repr(self, t):
finfo = np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm', 'small norm']:
# Values from https://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7-(last_fraction_bit_idx % 8)
constr[byte] = 1 << bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7-(last_exponent_bit_idx % 8)
constr[byte] = 1 << bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)
def test_float_repr(self):
# long double test cannot work, because eval goes through a python
# float
for t in [np.float32, np.float64]:
self._test_type_repr(t)
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf:
def test_equal_nbytes(self):
for type in types:
x = type(0)
assert_(sys.getsizeof(x) > x.nbytes)
def test_error(self):
d = np.float32()
assert_raises(TypeError, d.__sizeof__, "a")
class TestMultiply:
def test_seq_repeat(self):
# Test that basic sequences get repeated when multiplied with
# numpy integers. And errors are raised when multiplied with others.
# Some of this behaviour may be controversial and could be open for
# change.
accepted_types = set(np.typecodes["AllInteger"])
deprecated_types = {'?'}
forbidden_types = (
set(np.typecodes["All"]) - accepted_types - deprecated_types)
forbidden_types -= {'V'} # can't default-construct void scalars
for seq_type in (list, tuple):
seq = seq_type([1, 2, 3])
for numpy_type in accepted_types:
i = np.dtype(numpy_type).type(2)
assert_equal(seq * i, seq * int(i))
assert_equal(i * seq, int(i) * seq)
for numpy_type in deprecated_types:
i = np.dtype(numpy_type).type()
assert_equal(
assert_warns(DeprecationWarning, operator.mul, seq, i),
seq * int(i))
assert_equal(
assert_warns(DeprecationWarning, operator.mul, i, seq),
int(i) * seq)
for numpy_type in forbidden_types:
i = np.dtype(numpy_type).type()
assert_raises(TypeError, operator.mul, seq, i)
assert_raises(TypeError, operator.mul, i, seq)
def test_no_seq_repeat_basic_array_like(self):
# Test that an array-like which does not know how to be multiplied
# does not attempt sequence repeat (raise TypeError).
# See also gh-7428.
class ArrayLike:
def __init__(self, arr):
self.arr = arr
def __array__(self):
return self.arr
# Test for simple ArrayLike above and memoryviews (original report)
for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))):
assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.))
assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.))
assert_array_equal(arr_like * np.int_(3), np.full(3, 3))
assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
class TestNegative:
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.neg, a)
def test_result(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for dt in types:
a = np.ones((), dtype=dt)[()]
assert_equal(operator.neg(a) + a, 0)
class TestSubtract:
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.sub, a, a)
def test_result(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for dt in types:
a = np.ones((), dtype=dt)[()]
assert_equal(operator.sub(a, a), 0)
class TestAbs:
def _test_abs_func(self, absfunc, test_dtype):
x = test_dtype(-1.5)
assert_equal(absfunc(x), 1.5)
x = test_dtype(0.0)
res = absfunc(x)
# assert_equal() checks zero signedness
assert_equal(res, 0.0)
x = test_dtype(-0.0)
res = absfunc(x)
assert_equal(res, 0.0)
x = test_dtype(np.finfo(test_dtype).max)
assert_equal(absfunc(x), x.real)
with suppress_warnings() as sup:
sup.filter(UserWarning)
x = test_dtype(np.finfo(test_dtype).tiny)
assert_equal(absfunc(x), x.real)
x = test_dtype(np.finfo(test_dtype).min)
assert_equal(absfunc(x), -x.real)
@pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
def test_builtin_abs(self, dtype):
if (
sys.platform == "cygwin" and dtype == np.clongdouble and
(
_pep440.parse(platform.release().split("-")[0])
< _pep440.Version("3.3.0")
)
):
pytest.xfail(
reason="absl is computed in double precision on cygwin < 3.3"
)
self._test_abs_func(abs, dtype)
@pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
def test_numpy_abs(self, dtype):
if (
sys.platform == "cygwin" and dtype == np.clongdouble and
(
_pep440.parse(platform.release().split("-")[0])
< _pep440.Version("3.3.0")
)
):
pytest.xfail(
reason="absl is computed in double precision on cygwin < 3.3"
)
self._test_abs_func(np.abs, dtype)
class TestBitShifts:
@pytest.mark.parametrize('type_code', np.typecodes['AllInteger'])
@pytest.mark.parametrize('op',
[operator.rshift, operator.lshift], ids=['>>', '<<'])
def test_shift_all_bits(self, type_code, op):
""" Shifts where the shift amount is the width of the type or wider """
# gh-2449
dt = np.dtype(type_code)
nbits = dt.itemsize * 8
for val in [5, -5]:
for shift in [nbits, nbits + 4]:
val_scl = dt.type(val)
shift_scl = dt.type(shift)
res_scl = op(val_scl, shift_scl)
if val_scl < 0 and op is operator.rshift:
# sign bit is preserved
assert_equal(res_scl, -1)
else:
assert_equal(res_scl, 0)
# Result on scalars should be the same as on arrays
val_arr = np.array([val]*32, dtype=dt)
shift_arr = np.array([shift]*32, dtype=dt)
res_arr = op(val_arr, shift_arr)
assert_equal(res_arr, res_scl)
class TestHash:
@pytest.mark.parametrize("type_code", np.typecodes['AllInteger'])
def test_integer_hashes(self, type_code):
scalar = np.dtype(type_code).type
for i in range(128):
assert hash(i) == hash(scalar(i))
@pytest.mark.parametrize("type_code", np.typecodes['AllFloat'])
def test_float_and_complex_hashes(self, type_code):
scalar = np.dtype(type_code).type
for val in [np.pi, np.inf, 3, 6.]:
numpy_val = scalar(val)
# Cast back to Python, in case the NumPy scalar has less precision
if numpy_val.dtype.kind == 'c':
val = complex(numpy_val)
else:
val = float(numpy_val)
assert val == numpy_val
assert hash(val) == hash(numpy_val)
if hash(float(np.nan)) != hash(float(np.nan)):
# If Python distinguises different NaNs we do so too (gh-18833)
assert hash(scalar(np.nan)) != hash(scalar(np.nan))
@pytest.mark.parametrize("type_code", np.typecodes['Complex'])
def test_complex_hashes(self, type_code):
# Test some complex valued hashes specifically:
scalar = np.dtype(type_code).type
for val in [np.pi+1j, np.inf-3j, 3j, 6.+1j]:
numpy_val = scalar(val)
assert hash(complex(numpy_val)) == hash(numpy_val)
@contextlib.contextmanager
def recursionlimit(n):
o = sys.getrecursionlimit()
try:
sys.setrecursionlimit(n)
yield
finally:
sys.setrecursionlimit(o)
@given(sampled_from(objecty_things),
sampled_from(reasonable_operators_for_scalars),
sampled_from(types))
def test_operator_object_left(o, op, type_):
try:
with recursionlimit(200):
op(o, type_(1))
except TypeError:
pass
@given(sampled_from(objecty_things),
sampled_from(reasonable_operators_for_scalars),
sampled_from(types))
def test_operator_object_right(o, op, type_):
try:
with recursionlimit(200):
op(type_(1), o)
except TypeError:
pass
@given(sampled_from(reasonable_operators_for_scalars),
sampled_from(types),
sampled_from(types))
def test_operator_scalars(op, type1, type2):
try:
op(type1(1), type2(1))
except TypeError:
pass
@pytest.mark.parametrize("op", reasonable_operators_for_scalars)
def test_longdouble_inf_loop(op):
try:
op(np.longdouble(3), None)
except TypeError:
pass
try:
op(None, np.longdouble(3))
except TypeError:
pass
@pytest.mark.parametrize("op", reasonable_operators_for_scalars)
def test_clongdouble_inf_loop(op):
if op in {operator.mod} and False:
pytest.xfail("The modulo operator is known to be broken")
try:
op(np.clongdouble(3), None)
except TypeError:
pass
try:
op(None, np.longdouble(3))
except TypeError:
pass
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
@pytest.mark.parametrize("operation", [
lambda min, max: max + max,
lambda min, max: min - max,
lambda min, max: max * max], ids=["+", "-", "*"])
def test_scalar_integer_operation_overflow(dtype, operation):
st = np.dtype(dtype).type
min = st(np.iinfo(dtype).min)
max = st(np.iinfo(dtype).max)
with pytest.warns(RuntimeWarning, match="overflow encountered"):
operation(min, max)
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
@pytest.mark.parametrize("operation", [
lambda min, neg_1: abs(min),
lambda min, neg_1: min * neg_1,
lambda min, neg_1: min // neg_1], ids=["abs", "*", "//"])
def test_scalar_signed_integer_overflow(dtype, operation):
# The minimum signed integer can "overflow" for some additional operations
st = np.dtype(dtype).type
min = st(np.iinfo(dtype).min)
neg_1 = st(-1)
with pytest.warns(RuntimeWarning, match="overflow encountered"):
operation(min, neg_1)
@pytest.mark.parametrize("dtype", np.typecodes["UnsignedInteger"])
@pytest.mark.xfail # TODO: the check is quite simply missing!
def test_scalar_signed_integer_overflow(dtype):
val = np.dtype(dtype).type(8)
with pytest.warns(RuntimeWarning, match="overflow encountered"):
-val
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
@pytest.mark.parametrize("operation", [
lambda val, zero: val // zero,
lambda val, zero: val % zero, ], ids=["//", "%"])
def test_scalar_integer_operation_divbyzero(dtype, operation):
st = np.dtype(dtype).type
val = st(100)
zero = st(0)
with pytest.warns(RuntimeWarning, match="divide by zero"):
operation(val, zero)
ops_with_names = [
("__lt__", "__gt__", operator.lt, True),
("__le__", "__ge__", operator.le, True),
("__eq__", "__eq__", operator.eq, True),
# Note __op__ and __rop__ may be identical here:
("__ne__", "__ne__", operator.ne, True),
("__gt__", "__lt__", operator.gt, True),
("__ge__", "__le__", operator.ge, True),
("__floordiv__", "__rfloordiv__", operator.floordiv, False),
("__truediv__", "__rtruediv__", operator.truediv, False),
("__add__", "__radd__", operator.add, False),
("__mod__", "__rmod__", operator.mod, False),
("__mul__", "__rmul__", operator.mul, False),
("__pow__", "__rpow__", operator.pow, False),
("__sub__", "__rsub__", operator.sub, False),
]
@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
@pytest.mark.parametrize("sctype", [np.float32, np.float64, np.longdouble])
def test_subclass_deferral(sctype, __op__, __rop__, op, cmp):
"""
This test covers scalar subclass deferral. Note that this is exceedingly
complicated, especially since it tends to fall back to the array paths and
these additionally add the "array priority" mechanism.
The behaviour was modified subtly in 1.22 (to make it closer to how Python
scalars work). Due to its complexity and the fact that subclassing NumPy
scalars is probably a bad idea to begin with. There is probably room
for adjustments here.
"""
class myf_simple1(sctype):
pass
class myf_simple2(sctype):
pass
def op_func(self, other):
return __op__
def rop_func(self, other):
return __rop__
myf_op = type("myf_op", (sctype,), {__op__: op_func, __rop__: rop_func})
# inheritance has to override, or this is correctly lost:
res = op(myf_simple1(1), myf_simple2(2))
assert type(res) == sctype or type(res) == np.bool_
assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2) # inherited
# Two independent subclasses do not really define an order. This could
# be attempted, but we do not since Python's `int` does neither:
assert op(myf_op(1), myf_simple1(2)) == __op__
assert op(myf_simple1(1), myf_op(2)) == op(1, 2) # inherited
def test_longdouble_complex():
# Simple test to check longdouble and complex combinations, since these
# need to go through promotion, which longdouble needs to be careful about.
x = np.longdouble(1)
assert x + 1j == 1+1j
assert 1j + x == 1+1j
@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
@pytest.mark.parametrize("subtype", [float, int, complex, np.float16])
def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp):
def op_func(self, other):
return __op__
def rop_func(self, other):
return __rop__
# Check that deferring is indicated using `__array_ufunc__`:
myt = type("myt", (subtype,),
{__op__: op_func, __rop__: rop_func, "__array_ufunc__": None})
# Just like normally, we should never presume we can modify the float.
assert op(myt(1), np.float64(2)) == __op__
assert op(np.float64(1), myt(2)) == __rop__
if op in {operator.mod, operator.floordiv} and subtype == complex:
return # module is not support for complex. Do not test.
if __rop__ == __op__:
return
# When no deferring is indicated, subclasses are handled normally.
myt = type("myt", (subtype,), {__rop__: rop_func})
# Check for float32, as a float subclass float64 may behave differently
res = op(myt(1), np.float16(2))
expected = op(subtype(1), np.float16(2))
assert res == expected
assert type(res) == type(expected)
res = op(np.float32(2), myt(1))
expected = op(np.float32(2), subtype(1))
assert res == expected
assert type(res) == type(expected)
# Same check for longdouble:
res = op(myt(1), np.longdouble(2))
expected = op(subtype(1), np.longdouble(2))
assert res == expected
assert type(res) == type(expected)
res = op(np.float32(2), myt(1))
expected = op(np.longdouble(2), subtype(1))
assert res == expected
| 40,778 | Python | 38.172911 | 85 | 0.538182 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_scalarinherit.py | """ Test printing of scalar types.
"""
import pytest
import numpy as np
from numpy.testing import assert_, assert_raises
class A:
pass
class B(A, np.float64):
pass
class C(B):
pass
class D(C, B):
pass
class B0(np.float64, A):
pass
class C0(B0):
pass
class HasNew:
def __new__(cls, *args, **kwargs):
return cls, args, kwargs
class B1(np.float64, HasNew):
pass
class TestInherit:
def test_init(self):
x = B(1.0)
assert_(str(x) == '1.0')
y = C(2.0)
assert_(str(y) == '2.0')
z = D(3.0)
assert_(str(z) == '3.0')
def test_init2(self):
x = B0(1.0)
assert_(str(x) == '1.0')
y = C0(2.0)
assert_(str(y) == '2.0')
def test_gh_15395(self):
# HasNew is the second base, so `np.float64` should have priority
x = B1(1.0)
assert_(str(x) == '1.0')
# previously caused RecursionError!?
with pytest.raises(TypeError):
B1(1.0, 2.0)
class TestCharacter:
def test_char_radd(self):
# GH issue 9620, reached gentype_add and raise TypeError
np_s = np.string_('abc')
np_u = np.unicode_('abc')
s = b'def'
u = u'def'
assert_(np_s.__radd__(np_s) is NotImplemented)
assert_(np_s.__radd__(np_u) is NotImplemented)
assert_(np_s.__radd__(s) is NotImplemented)
assert_(np_s.__radd__(u) is NotImplemented)
assert_(np_u.__radd__(np_s) is NotImplemented)
assert_(np_u.__radd__(np_u) is NotImplemented)
assert_(np_u.__radd__(s) is NotImplemented)
assert_(np_u.__radd__(u) is NotImplemented)
assert_(s + np_s == b'defabc')
assert_(u + np_u == u'defabc')
class MyStr(str, np.generic):
# would segfault
pass
with assert_raises(TypeError):
# Previously worked, but gave completely wrong result
ret = s + MyStr('abc')
class MyBytes(bytes, np.generic):
# would segfault
pass
ret = s + MyBytes(b'abc')
assert(type(ret) is type(s))
assert ret == b"defabc"
def test_char_repeat(self):
np_s = np.string_('abc')
np_u = np.unicode_('abc')
res_s = b'abc' * 5
res_u = u'abc' * 5
assert_(np_s * 5 == res_s)
assert_(np_u * 5 == res_u)
| 2,381 | Python | 23.060606 | 73 | 0.52163 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_scalar_methods.py | """
Test the scalar constructors, which also do type-coercion
"""
import sys
import fractions
import platform
import types
from typing import Any, Type
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_raises
class TestAsIntegerRatio:
# derived in part from the cpython test "test_floatasratio"
@pytest.mark.parametrize("ftype", [
np.half, np.single, np.double, np.longdouble])
@pytest.mark.parametrize("f, ratio", [
(0.875, (7, 8)),
(-0.875, (-7, 8)),
(0.0, (0, 1)),
(11.5, (23, 2)),
])
def test_small(self, ftype, f, ratio):
assert_equal(ftype(f).as_integer_ratio(), ratio)
@pytest.mark.parametrize("ftype", [
np.half, np.single, np.double, np.longdouble])
def test_simple_fractions(self, ftype):
R = fractions.Fraction
assert_equal(R(0, 1),
R(*ftype(0.0).as_integer_ratio()))
assert_equal(R(5, 2),
R(*ftype(2.5).as_integer_ratio()))
assert_equal(R(1, 2),
R(*ftype(0.5).as_integer_ratio()))
assert_equal(R(-2100, 1),
R(*ftype(-2100.0).as_integer_ratio()))
@pytest.mark.parametrize("ftype", [
np.half, np.single, np.double, np.longdouble])
def test_errors(self, ftype):
assert_raises(OverflowError, ftype('inf').as_integer_ratio)
assert_raises(OverflowError, ftype('-inf').as_integer_ratio)
assert_raises(ValueError, ftype('nan').as_integer_ratio)
def test_against_known_values(self):
R = fractions.Fraction
assert_equal(R(1075, 512),
R(*np.half(2.1).as_integer_ratio()))
assert_equal(R(-1075, 512),
R(*np.half(-2.1).as_integer_ratio()))
assert_equal(R(4404019, 2097152),
R(*np.single(2.1).as_integer_ratio()))
assert_equal(R(-4404019, 2097152),
R(*np.single(-2.1).as_integer_ratio()))
assert_equal(R(4728779608739021, 2251799813685248),
R(*np.double(2.1).as_integer_ratio()))
assert_equal(R(-4728779608739021, 2251799813685248),
R(*np.double(-2.1).as_integer_ratio()))
# longdouble is platform dependent
@pytest.mark.parametrize("ftype, frac_vals, exp_vals", [
# dtype test cases generated using hypothesis
# first five generated cases per dtype
(np.half, [0.0, 0.01154830649280303, 0.31082276347447274,
0.527350517124794, 0.8308562335072596],
[0, 1, 0, -8, 12]),
(np.single, [0.0, 0.09248576989263226, 0.8160498218131407,
0.17389442853722373, 0.7956044195067877],
[0, 12, 10, 17, -26]),
(np.double, [0.0, 0.031066908499895136, 0.5214135908877832,
0.45780736035689296, 0.5906586745934036],
[0, -801, 51, 194, -653]),
pytest.param(
np.longdouble,
[0.0, 0.20492557202724854, 0.4277180662199366, 0.9888085019891495,
0.9620175814461964],
[0, -7400, 14266, -7822, -8721],
marks=[
pytest.mark.skipif(
np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double"),
pytest.mark.skipif(
platform.machine().startswith("ppc"),
reason="IBM double double"),
]
)
])
def test_roundtrip(self, ftype, frac_vals, exp_vals):
for frac, exp in zip(frac_vals, exp_vals):
f = np.ldexp(ftype(frac), exp)
assert f.dtype == ftype
n, d = f.as_integer_ratio()
try:
# workaround for gh-9968
nf = np.longdouble(str(n))
df = np.longdouble(str(d))
except (OverflowError, RuntimeWarning):
# the values may not fit in any float type
pytest.skip("longdouble too small on this platform")
assert_equal(nf / df, f, "{}/{}".format(n, d))
class TestIsInteger:
@pytest.mark.parametrize("str_value", ["inf", "nan"])
@pytest.mark.parametrize("code", np.typecodes["Float"])
def test_special(self, code: str, str_value: str) -> None:
cls = np.dtype(code).type
value = cls(str_value)
assert not value.is_integer()
@pytest.mark.parametrize(
"code", np.typecodes["Float"] + np.typecodes["AllInteger"]
)
def test_true(self, code: str) -> None:
float_array = np.arange(-5, 5).astype(code)
for value in float_array:
assert value.is_integer()
@pytest.mark.parametrize("code", np.typecodes["Float"])
def test_false(self, code: str) -> None:
float_array = np.arange(-5, 5).astype(code)
float_array *= 1.1
for value in float_array:
if value == 0:
continue
assert not value.is_integer()
@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
class TestClassGetItem:
@pytest.mark.parametrize("cls", [
np.number,
np.integer,
np.inexact,
np.unsignedinteger,
np.signedinteger,
np.floating,
])
def test_abc(self, cls: Type[np.number]) -> None:
alias = cls[Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is cls
def test_abc_complexfloating(self) -> None:
alias = np.complexfloating[Any, Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is np.complexfloating
@pytest.mark.parametrize("arg_len", range(4))
def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len in (1, 2):
assert np.complexfloating[arg_tup]
else:
match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
with pytest.raises(TypeError, match=match):
np.complexfloating[arg_tup]
@pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character])
def test_abc_non_numeric(self, cls: Type[np.generic]) -> None:
with pytest.raises(TypeError):
cls[Any]
@pytest.mark.parametrize("code", np.typecodes["All"])
def test_concrete(self, code: str) -> None:
cls = np.dtype(code).type
with pytest.raises(TypeError):
cls[Any]
@pytest.mark.parametrize("arg_len", range(4))
def test_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len == 1:
assert np.number[arg_tup]
else:
with pytest.raises(TypeError):
np.number[arg_tup]
def test_subscript_scalar(self) -> None:
assert np.number[Any]
@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
@pytest.mark.parametrize("cls", [np.number, np.complexfloating, np.int64])
def test_class_getitem_38(cls: Type[np.number]) -> None:
match = "Type subscription requires python >= 3.9"
with pytest.raises(TypeError, match=match):
cls[Any]
class TestBitCount:
# derived in part from the cpython test "test_bit_count"
@pytest.mark.parametrize("itype", np.sctypes['int']+np.sctypes['uint'])
def test_small(self, itype):
for a in range(max(np.iinfo(itype).min, 0), 128):
msg = f"Smoke test for {itype}({a}).bit_count()"
assert itype(a).bit_count() == bin(a).count("1"), msg
def test_bit_count(self):
for exp in [10, 17, 63]:
a = 2**exp
assert np.uint64(a).bit_count() == 1
assert np.uint64(a - 1).bit_count() == exp
assert np.uint64(a ^ 63).bit_count() == 7
assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8
| 7,923 | Python | 36.028037 | 78 | 0.5662 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_argparse.py | """
Tests for the private NumPy argument parsing functionality.
They mainly exists to ensure good test coverage without having to try the
weirder cases on actual numpy functions but test them in one place.
The test function is defined in C to be equivalent to (errors may not always
match exactly, and could be adjusted):
def func(arg1, /, arg2, *, arg3):
i = integer(arg1) # reproducing the 'i' parsing in Python.
return None
"""
import pytest
import numpy as np
from numpy.core._multiarray_tests import argparse_example_function as func
def test_invalid_integers():
with pytest.raises(TypeError,
match="integer argument expected, got float"):
func(1.)
with pytest.raises(OverflowError):
func(2**100)
def test_missing_arguments():
with pytest.raises(TypeError,
match="missing required positional argument 0"):
func()
with pytest.raises(TypeError,
match="missing required positional argument 0"):
func(arg2=1, arg3=4)
with pytest.raises(TypeError,
match=r"missing required argument \'arg2\' \(pos 1\)"):
func(1, arg3=5)
def test_too_many_positional():
# the second argument is positional but can be passed as keyword.
with pytest.raises(TypeError,
match="takes from 2 to 3 positional arguments but 4 were given"):
func(1, 2, 3, 4)
def test_multiple_values():
with pytest.raises(TypeError,
match=r"given by name \('arg2'\) and position \(position 1\)"):
func(1, 2, arg2=3)
def test_string_fallbacks():
# We can (currently?) use numpy strings to test the "slow" fallbacks
# that should normally not be taken due to string interning.
arg2 = np.unicode_("arg2")
missing_arg = np.unicode_("missing_arg")
func(1, **{arg2: 3})
with pytest.raises(TypeError,
match="got an unexpected keyword argument 'missing_arg'"):
func(2, **{missing_arg: 3})
| 1,977 | Python | 30.396825 | 77 | 0.660091 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_numeric.py | import sys
import warnings
import itertools
import platform
import pytest
import math
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_warns, assert_array_max_ulp, HAS_REFCOUNT
)
from numpy.core._rational_tests import rational
from hypothesis import given, strategies as st
from hypothesis.extra import numpy as hynp
class TestResize:
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_repeats(self):
A = np.array([1, 2, 3])
Ar1 = np.array([[1, 2, 3, 1], [2, 3, 1, 2]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 1], [2, 3], [1, 2]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
Ar = np.resize(A, (0, 2))
assert_equal(Ar.shape, (0, 2))
Ar = np.resize(A, (2, 0))
assert_equal(Ar.shape, (2, 0))
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
def test_negative_resize(self):
A = np.arange(0, 10, dtype=np.float32)
new_shape = (-10, -1)
with pytest.raises(ValueError, match=r"negative"):
np.resize(A, new_shape=new_shape)
def test_subclass(self):
class MyArray(np.ndarray):
__array_priority__ = 1.
my_arr = np.array([1]).view(MyArray)
assert type(np.resize(my_arr, 5)) is MyArray
assert type(np.resize(my_arr, 0)) is MyArray
my_arr = np.array([]).view(MyArray)
assert type(np.resize(my_arr, 5)) is MyArray
class TestNonarrayArgs:
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_count_nonzero(self):
arr = [[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19]]
tgt = np.array([2, 3])
out = np.count_nonzero(arr, axis=1)
assert_equal(out, tgt)
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
s = np.float64(1.)
assert_(isinstance(s.round(), np.float64))
assert_equal(s.round(), 1.)
@pytest.mark.parametrize('dtype', [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
])
def test_dunder_round(self, dtype):
s = dtype(1)
assert_(isinstance(round(s), int))
assert_(isinstance(round(s, None), int))
assert_(isinstance(round(s, ndigits=None), int))
assert_equal(round(s), 1)
assert_equal(round(s, None), 1)
assert_equal(round(s, ndigits=None), 1)
@pytest.mark.parametrize('val, ndigits', [
pytest.param(2**31 - 1, -1,
marks=pytest.mark.xfail(reason="Out of range of int32")
),
(2**31 - 1, 1-math.ceil(math.log10(2**31 - 1))),
(2**31 - 1, -math.ceil(math.log10(2**31 - 1)))
])
def test_dunder_round_edgecases(self, val, ndigits):
assert_equal(round(val, ndigits), round(np.int32(val), ndigits))
def test_dunder_round_accuracy(self):
f = np.float64(5.1 * 10**73)
assert_(isinstance(round(f, -73), np.float64))
assert_array_max_ulp(round(f, -73), 5.0 * 10**73)
assert_(isinstance(round(f, ndigits=-73), np.float64))
assert_array_max_ulp(round(f, ndigits=-73), 5.0 * 10**73)
i = np.int64(501)
assert_(isinstance(round(i, -2), np.int64))
assert_array_max_ulp(round(i, -2), 500)
assert_(isinstance(round(i, ndigits=-2), np.int64))
assert_array_max_ulp(round(i, ndigits=-2), 500)
@pytest.mark.xfail(raises=AssertionError, reason="gh-15896")
def test_round_py_consistency(self):
f = 5.1 * 10**73
assert_equal(round(np.float64(f), -73), round(f, -73))
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_equal(np.squeeze(A).shape, (3, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
B = np.array([None, 0])
B[0] = 1j
assert_almost_equal(np.var(B), 0.25)
class TestIsscalar:
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
assert_(np.isscalar(False))
assert_(np.isscalar('numpy'))
assert_(not np.isscalar([3.1]))
assert_(not np.isscalar(None))
# PEP 3141
from fractions import Fraction
assert_(np.isscalar(Fraction(5, 17)))
from numbers import Number
assert_(np.isscalar(Number()))
class TestBoolScalar:
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
assert_((t and s) is s)
assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
assert_((t | t) is t)
assert_((f | t) is t)
assert_((t | f) is t)
assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
assert_((t & t) is t)
assert_((f & t) is f)
assert_((t & f) is f)
assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
assert_((t ^ t) is f)
assert_((f ^ t) is t)
assert_((t ^ f) is t)
assert_((f ^ f) is f)
class TestBoolArray:
def setup_method(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=bool)[1::]
self.f = np.array([False] * 41, dtype=bool)[1::]
self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
assert_(self.t.all())
assert_(self.t.any())
assert_(not self.f.all())
assert_(not self.f.any())
assert_(self.nm.any())
assert_(self.im.any())
assert_(not self.nm.all())
assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
assert_(np.any(d))
e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
d = np.array([False] * 100043, dtype=bool)
d[i] = True
assert_(np.any(d), msg="%r" % i)
e = np.array([True] * 100043, dtype=bool)
e[i] = False
assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
assert_array_equal(np.abs(~self.t), self.f)
assert_array_equal(np.abs(~self.f), self.t)
assert_array_equal(np.abs(self.f), self.f)
assert_array_equal(~np.abs(self.f), self.t)
assert_array_equal(~np.abs(self.t), self.f)
assert_array_equal(np.abs(~self.nm), self.im)
np.logical_not(self.t, out=self.o)
assert_array_equal(self.o, self.f)
np.abs(self.t, out=self.o)
assert_array_equal(self.o, self.t)
def test_logical_and_or_xor(self):
assert_array_equal(self.t | self.t, self.t)
assert_array_equal(self.f | self.f, self.f)
assert_array_equal(self.t | self.f, self.t)
assert_array_equal(self.f | self.t, self.t)
np.logical_or(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t & self.t, self.t)
assert_array_equal(self.f & self.f, self.f)
assert_array_equal(self.t & self.f, self.f)
assert_array_equal(self.f & self.t, self.f)
np.logical_and(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t ^ self.t, self.f)
assert_array_equal(self.f ^ self.f, self.f)
assert_array_equal(self.t ^ self.f, self.t)
assert_array_equal(self.f ^ self.t, self.t)
np.logical_xor(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.f)
assert_array_equal(self.nm & self.t, self.nm)
assert_array_equal(self.im & self.f, False)
assert_array_equal(self.nm & True, self.nm)
assert_array_equal(self.im & False, self.f)
assert_array_equal(self.nm | self.t, self.t)
assert_array_equal(self.im | self.f, self.im)
assert_array_equal(self.nm | True, self.t)
assert_array_equal(self.im | False, self.im)
assert_array_equal(self.nm ^ self.t, self.im)
assert_array_equal(self.im ^ self.f, self.im)
assert_array_equal(self.nm ^ True, self.im)
assert_array_equal(self.im ^ False, self.im)
class TestBoolCmp:
def setup_method(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
self.f[s:s+8] = [i & 2**x for x in range(8)]
self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
s += 8
s = 0
for i in range(16):
self.d[s:s+4] = [i & 2**x for x in range(4)]
self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
s += 4
self.nf = self.f.copy()
self.nd = self.d.copy()
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
self.inff = self.f.copy()
self.infd = self.d.copy()
self.inff[::3][self.ef[::3]] = np.inf
self.infd[::3][self.ed[::3]] = np.inf
self.inff[1::3][self.ef[1::3]] = -np.inf
self.infd[1::3][self.ed[1::3]] = -np.inf
self.inff[2::3][self.ef[2::3]] = np.nan
self.infd[2::3][self.ed[2::3]] = np.nan
self.efnonan = self.ef.copy()
self.efnonan[2::3] = False
self.ednonan = self.ed.copy()
self.ednonan[2::3] = False
self.signf = self.f.copy()
self.signd = self.d.copy()
self.signf[self.ef] *= -1.
self.signd[self.ed] *= -1.
self.signf[1::6][self.ef[1::6]] = -np.inf
self.signd[1::6][self.ed[1::6]] = -np.inf
self.signf[3::6][self.ef[3::6]] = -np.nan
self.signd[3::6][self.ed[3::6]] = -np.nan
self.signf[4::6][self.ef[4::6]] = -0.
self.signd[4::6][self.ed[4::6]] = -0.
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
class TestSeterr:
def test_default(self):
err = np.geterr()
assert_equal(err,
dict(divide='warn',
invalid='warn',
over='warn',
under='ignore')
)
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
assert_(err == old)
new = np.seterr()
assert_(new['divide'] == 'print')
np.seterr(over='raise')
assert_(np.geterr()['over'] == 'raise')
assert_(new['divide'] == 'print')
np.seterr(**old)
assert_(np.geterr() == old)
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
with assert_raises(FloatingPointError):
np.array([1.]) / np.array([0.])
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
def test_errobj(self):
olderrobj = np.geterrobj()
self.called = 0
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
assert_equal(len(w), 1)
def log_err(*args):
self.called += 1
extobj_err = args
assert_(len(extobj_err) == 2)
assert_("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
assert_equal(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
assert_equal(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
def test_errobj_noerrmask(self):
# errmask = 0 has a special code path for the default
olderrobj = np.geterrobj()
try:
# set errobj to something non default
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
umath.ERR_DEFAULT + 1, None])
# call a ufunc
np.isnan(np.array([6]))
# same with the default, lots of times to get rid of possible
# pre-existing stack in the code
for i in range(10000):
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
None])
np.isnan(np.array([6]))
finally:
np.seterrobj(olderrobj)
class TestFloatExceptions:
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError as exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
# Check that fpe exception is raised.
#
# Given a floating operation `flop` and two scalar values, check that
# the operation raises the floating point exception specified by
# `fpeerr`. Tests all variants with 0-d array scalars as well.
self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
# Test for all real and complex float types
@pytest.mark.parametrize("typecode", np.typecodes["AllFloat"])
def test_floating_exceptions(self, typecode):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi._machar.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi._machar.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
# The value of tiny for double double is NaN, so we need to
# pass the assert
if not np.isnan(ft_tiny):
self.assert_raises_fpe(underflow,
lambda a, b: a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a, b: a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a, b: a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a, b: a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a, b: a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a, b: a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
self.assert_raises_fpe(divbyzero,
lambda a, b: a/b, ftype(1), ftype(0))
self.assert_raises_fpe(
invalid, lambda a, b: a/b, ftype(np.inf), ftype(np.inf)
)
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(0), ftype(0))
self.assert_raises_fpe(
invalid, lambda a, b: a-b, ftype(np.inf), ftype(np.inf)
)
self.assert_raises_fpe(
invalid, lambda a, b: a+b, ftype(np.inf), ftype(-np.inf)
)
self.assert_raises_fpe(invalid,
lambda a, b: a*b, ftype(0), ftype(np.inf))
def test_warnings(self):
# test warning code path
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
assert_equal(len(w), 1)
assert_("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
assert_equal(len(w), 2)
assert_("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
assert_equal(len(w), 3)
assert_("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
assert_equal(len(w), 4)
assert_("underflow" in str(w[-1].message))
class TestTypes:
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8, i16), np.dtype(np.int16))
assert_equal(promote_func(i32, i8), np.dtype(np.int32))
assert_equal(promote_func(i16, i64), np.dtype(np.int64))
assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
assert_equal(promote_func(f32, f64), np.dtype(np.float64))
assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
# coercion between kinds
assert_equal(promote_func(b, i32), np.dtype(np.int32))
assert_equal(promote_func(b, u8), np.dtype(np.uint8))
assert_equal(promote_func(i8, u8), np.dtype(np.int16))
assert_equal(promote_func(u8, i32), np.dtype(np.int32))
assert_equal(promote_func(i64, u32), np.dtype(np.int64))
assert_equal(promote_func(u64, i32), np.dtype(np.float64))
assert_equal(promote_func(i32, f32), np.dtype(np.float64))
assert_equal(promote_func(i64, f32), np.dtype(np.float64))
assert_equal(promote_func(f32, i16), np.dtype(np.float32))
assert_equal(promote_func(f32, u32), np.dtype(np.float64))
assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
assert_equal(promote_func(np.int32(-1), np.array([u64])),
np.dtype(np.float64))
assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
assert_equal(promote_func(fld, np.array([c64])),
np.dtype(np.complex64))
assert_equal(promote_func(c64, np.array([f64])),
np.dtype(np.complex128))
assert_equal(promote_func(np.complex64(3j), np.array([f64])),
np.dtype(np.complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(np.array([f32]), c128),
np.dtype(np.complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
# Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
# a float32, shouldn't promote to float64
#
# a = np.array([1.0, 1.5], dtype=np.float32)
# t = np.array([True, False])
# b = t*a
# assert_equal(b, [1.0, 0.0])
# assert_equal(b.dtype, np.dtype('f4'))
# b = (1-t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
#
# Probably ~t (bitwise negation) is more proper to use here,
# but this is arguably less intuitive to understand at a glance, and
# would fail if 't' is actually an integer array instead of boolean:
#
# b = (~t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
assert_(np.result_type(None) == np.dtype(None))
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))
assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))
assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))
assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
def test_can_cast_and_promote_usertypes(self):
# The rational type defines safe casting for signed integers,
# boolean. Rational itself *does* cast safely to double.
# (rational does not actually cast to all signed integers, e.g.
# int64 can be both long and longlong and it registers only the first)
valid_types = ["int8", "int16", "int32", "int64", "bool"]
invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V"
rational_dt = np.dtype(rational)
for numpy_dtype in valid_types:
numpy_dtype = np.dtype(numpy_dtype)
assert np.can_cast(numpy_dtype, rational_dt)
assert np.promote_types(numpy_dtype, rational_dt) is rational_dt
for numpy_dtype in invalid_types:
numpy_dtype = np.dtype(numpy_dtype)
assert not np.can_cast(numpy_dtype, rational_dt)
with pytest.raises(TypeError):
np.promote_types(numpy_dtype, rational_dt)
double_dt = np.dtype("double")
assert np.can_cast(rational_dt, double_dt)
assert np.promote_types(double_dt, rational_dt) is double_dt
@pytest.mark.parametrize("swap", ["", "swap"])
@pytest.mark.parametrize("string_dtype", ["U", "S"])
def test_promote_types_strings(self, swap, string_dtype):
if swap == "swap":
promote_types = lambda a, b: np.promote_types(b, a)
else:
promote_types = np.promote_types
S = string_dtype
# Promote numeric with unsized string:
assert_equal(promote_types('bool', S), np.dtype(S+'5'))
assert_equal(promote_types('b', S), np.dtype(S+'4'))
assert_equal(promote_types('u1', S), np.dtype(S+'3'))
assert_equal(promote_types('u2', S), np.dtype(S+'5'))
assert_equal(promote_types('u4', S), np.dtype(S+'10'))
assert_equal(promote_types('u8', S), np.dtype(S+'20'))
assert_equal(promote_types('i1', S), np.dtype(S+'4'))
assert_equal(promote_types('i2', S), np.dtype(S+'6'))
assert_equal(promote_types('i4', S), np.dtype(S+'11'))
assert_equal(promote_types('i8', S), np.dtype(S+'21'))
# Promote numeric with sized string:
assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5'))
assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('b', S+'1'), np.dtype(S+'4'))
assert_equal(promote_types('b', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3'))
assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5'))
assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10'))
assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30'))
assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20'))
assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30'))
# Promote with object:
assert_equal(promote_types('O', S+'30'), np.dtype('O'))
@pytest.mark.parametrize(["dtype1", "dtype2"],
[[np.dtype("V6"), np.dtype("V10")], # mismatch shape
# Mismatching names:
[np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])],
])
def test_invalid_void_promotion(self, dtype1, dtype2):
with pytest.raises(TypeError):
np.promote_types(dtype1, dtype2)
@pytest.mark.parametrize(["dtype1", "dtype2"],
[[np.dtype("V10"), np.dtype("V10")],
[np.dtype([("name1", "i8")]),
np.dtype([("name1", np.dtype("i8").newbyteorder())])],
[np.dtype("i8,i8"), np.dtype("i8,>i8")],
[np.dtype("i8,i8"), np.dtype("i4,i4")],
])
def test_valid_void_promotion(self, dtype1, dtype2):
assert np.promote_types(dtype1, dtype2) == dtype1
@pytest.mark.parametrize("dtype",
list(np.typecodes["All"]) +
["i,i", "10i", "S3", "S100", "U3", "U100", rational])
def test_promote_identical_types_metadata(self, dtype):
# The same type passed in twice to promote types always
# preserves metadata
metadata = {1: 1}
dtype = np.dtype(dtype, metadata=metadata)
res = np.promote_types(dtype, dtype)
assert res.metadata == dtype.metadata
# byte-swapping preserves and makes the dtype native:
dtype = dtype.newbyteorder()
if dtype.isnative:
# The type does not have byte swapping
return
res = np.promote_types(dtype, dtype)
# Metadata is (currently) generally lost on byte-swapping (except for
# unicode.
if dtype.char != "U":
assert res.metadata is None
else:
assert res.metadata == metadata
assert res.isnative
@pytest.mark.slow
@pytest.mark.filterwarnings('ignore:Promotion of numbers:FutureWarning')
@pytest.mark.parametrize(["dtype1", "dtype2"],
itertools.product(
list(np.typecodes["All"]) +
["i,i", "S3", "S100", "U3", "U100", rational],
repeat=2))
def test_promote_types_metadata(self, dtype1, dtype2):
"""Metadata handling in promotion does not appear formalized
right now in NumPy. This test should thus be considered to
document behaviour, rather than test the correct definition of it.
This test is very ugly, it was useful for rewriting part of the
promotion, but probably should eventually be replaced/deleted
(i.e. when metadata handling in promotion is better defined).
"""
metadata1 = {1: 1}
metadata2 = {2: 2}
dtype1 = np.dtype(dtype1, metadata=metadata1)
dtype2 = np.dtype(dtype2, metadata=metadata2)
try:
res = np.promote_types(dtype1, dtype2)
except TypeError:
# Promotion failed, this test only checks metadata
return
if res.char not in "USV" or res.names is not None or res.shape != ():
# All except string dtypes (and unstructured void) lose metadata
# on promotion (unless both dtypes are identical).
# At some point structured ones did not, but were restrictive.
assert res.metadata is None
elif res == dtype1:
# If one result is the result, it is usually returned unchanged:
assert res is dtype1
elif res == dtype2:
# dtype1 may have been cast to the same type/kind as dtype2.
# If the resulting dtype is identical we currently pick the cast
# version of dtype1, which lost the metadata:
if np.promote_types(dtype1, dtype2.kind) == dtype2:
res.metadata is None
else:
res.metadata == metadata2
else:
assert res.metadata is None
# Try again for byteswapped version
dtype1 = dtype1.newbyteorder()
assert dtype1.metadata == metadata1
res_bs = np.promote_types(dtype1, dtype2)
assert res_bs == res
assert res_bs.metadata == res.metadata
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, complex))
assert_(not np.can_cast(complex, float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
assert_(np.can_cast('i4', 'S11'))
assert_(np.can_cast('i8', 'i8', 'no'))
assert_(not np.can_cast('<i8', '>i8', 'no'))
assert_(np.can_cast('<i8', '>i8', 'equiv'))
assert_(not np.can_cast('<i4', '>i8', 'equiv'))
assert_(np.can_cast('<i4', '>i8', 'safe'))
assert_(not np.can_cast('<i8', '>i4', 'safe'))
assert_(np.can_cast('<i8', '>i4', 'same_kind'))
assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
assert_(np.can_cast('<i8', '>u4', 'unsafe'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'S4'))
assert_(not np.can_cast('b', 'S3'))
assert_(np.can_cast('u1', 'S3'))
assert_(not np.can_cast('u1', 'S2'))
assert_(np.can_cast('u2', 'S5'))
assert_(not np.can_cast('u2', 'S4'))
assert_(np.can_cast('u4', 'S10'))
assert_(not np.can_cast('u4', 'S9'))
assert_(np.can_cast('u8', 'S20'))
assert_(not np.can_cast('u8', 'S19'))
assert_(np.can_cast('i1', 'S4'))
assert_(not np.can_cast('i1', 'S3'))
assert_(np.can_cast('i2', 'S6'))
assert_(not np.can_cast('i2', 'S5'))
assert_(np.can_cast('i4', 'S11'))
assert_(not np.can_cast('i4', 'S10'))
assert_(np.can_cast('i8', 'S21'))
assert_(not np.can_cast('i8', 'S20'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'U4'))
assert_(not np.can_cast('b', 'U3'))
assert_(np.can_cast('u1', 'U3'))
assert_(not np.can_cast('u1', 'U2'))
assert_(np.can_cast('u2', 'U5'))
assert_(not np.can_cast('u2', 'U4'))
assert_(np.can_cast('u4', 'U10'))
assert_(not np.can_cast('u4', 'U9'))
assert_(np.can_cast('u8', 'U20'))
assert_(not np.can_cast('u8', 'U19'))
assert_(np.can_cast('i1', 'U4'))
assert_(not np.can_cast('i1', 'U3'))
assert_(np.can_cast('i2', 'U6'))
assert_(not np.can_cast('i2', 'U5'))
assert_(np.can_cast('i4', 'U11'))
assert_(not np.can_cast('i4', 'U10'))
assert_(np.can_cast('i8', 'U21'))
assert_(not np.can_cast('i8', 'U20'))
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
# Also test keyword arguments
assert_(np.can_cast(from_=np.int32, to=np.int64))
def test_can_cast_simple_to_structured(self):
# Non-structured can only be cast to structured in 'unsafe' mode.
assert_(not np.can_cast('i4', 'i4,i4'))
assert_(not np.can_cast('i4', 'i4,i2'))
assert_(np.can_cast('i4', 'i4,i4', casting='unsafe'))
assert_(np.can_cast('i4', 'i4,i2', casting='unsafe'))
# Even if there is just a single field which is OK.
assert_(not np.can_cast('i2', [('f1', 'i4')]))
assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind'))
assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe'))
# It should be the same for recursive structured or subarrays.
assert_(not np.can_cast('i2', [('f1', 'i4,i4')]))
assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe'))
assert_(not np.can_cast('i2', [('f1', '(2,3)i4')]))
assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe'))
def test_can_cast_structured_to_simple(self):
# Need unsafe casting for structured to simple.
assert_(not np.can_cast([('f1', 'i4')], 'i4'))
assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe'))
assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe'))
# Since it is unclear what is being cast, multiple fields to
# single should not work even for unsafe casting.
assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe'))
# But a single field inside a single field is OK.
assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4'))
assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe'))
# And a subarray is fine too - it will just take the first element
# (arguably not very consistently; might also take the first field).
assert_(not np.can_cast([('f0', '(3,)i4')], 'i4'))
assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe'))
# But a structured subarray with multiple fields should fail.
assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4',
casting='unsafe'))
def test_can_cast_values(self):
# gh-5917
for dt in np.sctypes['int'] + np.sctypes['uint']:
ii = np.iinfo(dt)
assert_(np.can_cast(ii.min, dt))
assert_(np.can_cast(ii.max, dt))
assert_(not np.can_cast(ii.min - 1, dt))
assert_(not np.can_cast(ii.max + 1, dt))
for dt in np.sctypes['float']:
fi = np.finfo(dt)
assert_(np.can_cast(fi.min, dt))
assert_(np.can_cast(fi.max, dt))
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
pass
class TestFromiter:
def makegen(self):
return (x**2 for x in range(24))
def test_types(self):
ai32 = np.fromiter(self.makegen(), np.int32)
ai64 = np.fromiter(self.makegen(), np.int64)
af = np.fromiter(self.makegen(), float)
assert_(ai32.dtype == np.dtype(np.int32))
assert_(ai64.dtype == np.dtype(np.int64))
assert_(af.dtype == np.dtype(float))
def test_lengths(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
assert_(len(a) == len(expected))
assert_(len(a20) == 20)
assert_raises(ValueError, np.fromiter,
self.makegen(), int, len(expected) + 10)
def test_values(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
assert_(np.alltrue(a == expected, axis=0))
assert_(np.alltrue(a20 == expected[:20], axis=0))
def load_data(self, n, eindex):
# Utility method for the issue 2592 tests.
# Raise an exception at the desired index in the iterator.
for e in range(n):
if e == eindex:
raise NIterError('error at index %s' % eindex)
yield e
@pytest.mark.parametrize("dtype", [int, object])
@pytest.mark.parametrize(["count", "error_index"], [(10, 5), (10, 9)])
def test_2592(self, count, error_index, dtype):
# Test iteration exceptions are correctly raised. The data/generator
# has `count` elements but errors at `error_index`
iterable = self.load_data(count, error_index)
with pytest.raises(NIterError):
np.fromiter(iterable, dtype=dtype, count=count)
@pytest.mark.parametrize("dtype", ["S", "S0", "V0", "U0"])
def test_empty_not_structured(self, dtype):
# Note, "S0" could be allowed at some point, so long "S" (without
# any length) is rejected.
with pytest.raises(ValueError, match="Must specify length"):
np.fromiter([], dtype=dtype)
@pytest.mark.parametrize(["dtype", "data"],
[("d", [1, 2, 3, 4, 5, 6, 7, 8, 9]),
("O", [1, 2, 3, 4, 5, 6, 7, 8, 9]),
("i,O", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]),
# subarray dtypes (important because their dimensions end up
# in the result arrays dimension:
("2i", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]),
(np.dtype(("O", (2, 3))),
[((1, 2, 3), (3, 4, 5)), ((3, 2, 1), (5, 4, 3))])])
@pytest.mark.parametrize("length_hint", [0, 1])
def test_growth_and_complicated_dtypes(self, dtype, data, length_hint):
dtype = np.dtype(dtype)
data = data * 100 # make sure we realloc a bit
class MyIter:
# Class/example from gh-15789
def __length_hint__(self):
# only required to be an estimate, this is legal
return length_hint # 0 or 1
def __iter__(self):
return iter(data)
res = np.fromiter(MyIter(), dtype=dtype)
expected = np.array(data, dtype=dtype)
assert_array_equal(res, expected)
def test_empty_result(self):
class MyIter:
def __length_hint__(self):
return 10
def __iter__(self):
return iter([]) # actual iterator is empty.
res = np.fromiter(MyIter(), dtype="d")
assert res.shape == (0,)
assert res.dtype == "d"
def test_too_few_items(self):
msg = "iterator too short: Expected 10 but iterator had only 3 items."
with pytest.raises(ValueError, match=msg):
np.fromiter([1, 2, 3], count=10, dtype=int)
def test_failed_itemsetting(self):
with pytest.raises(TypeError):
np.fromiter([1, None, 3], dtype=int)
# The following manages to hit somewhat trickier code paths:
iterable = ((2, 3, 4) for i in range(5))
with pytest.raises(ValueError):
np.fromiter(iterable, dtype=np.dtype((int, 2)))
class TestNonzero:
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(np.array([])), 0)
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
assert_equal(np.nonzero(np.array([])), ([],))
assert_equal(np.count_nonzero(np.array([0])), 0)
assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0)
assert_equal(np.nonzero(np.array([0])), ([],))
assert_equal(np.count_nonzero(np.array([1])), 1)
assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1)
assert_equal(np.nonzero(np.array([1])), ([0],))
def test_nonzero_zerod(self):
assert_equal(np.count_nonzero(np.array(0)), 0)
assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0)
with assert_warns(DeprecationWarning):
assert_equal(np.nonzero(np.array(0)), ([],))
assert_equal(np.count_nonzero(np.array(1)), 1)
assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1)
with assert_warns(DeprecationWarning):
assert_equal(np.nonzero(np.array(1)), ([0],))
def test_nonzero_onedim(self):
x = np.array([1, 0, 2, -1, 0, 0, 8])
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
# x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],
# dtype=[('a', 'i4'), ('b', 'i2')])
x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)],
dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')])
assert_equal(np.count_nonzero(x['a']), 3)
assert_equal(np.count_nonzero(x['b']), 4)
assert_equal(np.count_nonzero(x['c']), 3)
assert_equal(np.count_nonzero(x['d']), 4)
assert_equal(np.nonzero(x['a']), ([0, 2, 3],))
assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],))
def test_nonzero_twodim(self):
x = np.array([[0, 1, 0], [2, 0, 3]])
assert_equal(np.count_nonzero(x.astype('i1')), 3)
assert_equal(np.count_nonzero(x.astype('i2')), 3)
assert_equal(np.count_nonzero(x.astype('i4')), 3)
assert_equal(np.count_nonzero(x.astype('i8')), 3)
assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
x = np.eye(3)
assert_equal(np.count_nonzero(x.astype('i1')), 3)
assert_equal(np.count_nonzero(x.astype('i2')), 3)
assert_equal(np.count_nonzero(x.astype('i4')), 3)
assert_equal(np.count_nonzero(x.astype('i8')), 3)
assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
x = np.array([[(0, 1), (0, 0), (1, 11)],
[(1, 1), (1, 0), (0, 0)],
[(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
assert_equal(np.count_nonzero(x['a']), 4)
assert_equal(np.count_nonzero(x['b']), 5)
assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))
assert_(not x['a'].T.flags.aligned)
assert_equal(np.count_nonzero(x['a'].T), 4)
assert_equal(np.count_nonzero(x['b'].T), 5)
assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
def test_sparse(self):
# test special sparse condition boolean code path
for i in range(20):
c = np.zeros(200, dtype=bool)
c[i::20] = True
assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
c = np.zeros(400, dtype=bool)
c[10 + i:20 + i] = True
c[20 + i*2] = True
assert_equal(np.nonzero(c)[0],
np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2])))
def test_return_type(self):
class C(np.ndarray):
pass
for view in (C, np.ndarray):
for nd in range(1, 4):
shape = tuple(range(2, 2+nd))
x = np.arange(np.prod(shape)).reshape(shape).view(view)
for nzx in (np.nonzero(x), x.nonzero()):
for nzx_i in nzx:
assert_(type(nzx_i) is np.ndarray)
assert_(nzx_i.flags.writeable)
def test_count_nonzero_axis(self):
# Basic check of functionality
m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
expected = np.array([1, 1, 1, 1, 1])
assert_equal(np.count_nonzero(m, axis=0), expected)
expected = np.array([2, 3])
assert_equal(np.count_nonzero(m, axis=1), expected)
assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1))
assert_raises(TypeError, np.count_nonzero, m, axis='foo')
assert_raises(np.AxisError, np.count_nonzero, m, axis=3)
assert_raises(TypeError, np.count_nonzero,
m, axis=np.array([[1], [2]]))
def test_count_nonzero_axis_all_dtypes(self):
# More thorough test that the axis argument is respected
# for all dtypes and responds correctly when presented with
# either integer or tuple arguments for axis
msg = "Mismatch for dtype: %s"
def assert_equal_w_dt(a, b, err_msg):
assert_equal(a.dtype, b.dtype, err_msg=err_msg)
assert_equal(a, b, err_msg=err_msg)
for dt in np.typecodes['All']:
err_msg = msg % (np.dtype(dt).name,)
if dt != 'V':
if dt != 'M':
m = np.zeros((3, 3), dtype=dt)
n = np.ones(1, dtype=dt)
m[0, 0] = n[0]
m[1, 0] = n[0]
else: # np.zeros doesn't work for np.datetime64
m = np.array(['1970-01-01'] * 9)
m = m.reshape((3, 3))
m[0, 0] = '1970-01-12'
m[1, 0] = '1970-01-12'
m = m.astype(dt)
expected = np.array([2, 0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=0),
expected, err_msg=err_msg)
expected = np.array([1, 1, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=1),
expected, err_msg=err_msg)
expected = np.array(2)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m, axis=None),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m),
expected, err_msg=err_msg)
if dt == 'V':
# There are no 'nonzero' objects for np.void, so the testing
# setup is slightly different for this dtype
m = np.array([np.void(1)] * 6).reshape((2, 3))
expected = np.array([0, 0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=0),
expected, err_msg=err_msg)
expected = np.array([0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=1),
expected, err_msg=err_msg)
expected = np.array(0)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m, axis=None),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m),
expected, err_msg=err_msg)
def test_count_nonzero_axis_consistent(self):
# Check that the axis behaviour for valid axes in
# non-special cases is consistent (and therefore
# correct) by checking it against an integer array
# that is then casted to the generic object dtype
from itertools import combinations, permutations
axis = (0, 1, 2, 3)
size = (5, 5, 5, 5)
msg = "Mismatch for axis: %s"
rng = np.random.RandomState(1234)
m = rng.randint(-100, 100, size=size)
n = m.astype(object)
for length in range(len(axis)):
for combo in combinations(axis, length):
for perm in permutations(combo):
assert_equal(
np.count_nonzero(m, axis=perm),
np.count_nonzero(n, axis=perm),
err_msg=msg % (perm,))
def test_countnonzero_axis_empty(self):
a = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(np.count_nonzero(a, axis=()), a.astype(bool))
def test_countnonzero_keepdims(self):
a = np.array([[0, 0, 1, 0],
[0, 3, 5, 0],
[7, 9, 2, 0]])
assert_equal(np.count_nonzero(a, axis=0, keepdims=True),
[[1, 2, 3, 0]])
assert_equal(np.count_nonzero(a, axis=1, keepdims=True),
[[1], [2], [3]])
assert_equal(np.count_nonzero(a, keepdims=True),
[[6]])
def test_array_method(self):
# Tests that the array method
# call to nonzero works
m = np.array([[1, 0, 0], [4, 0, 6]])
tgt = [[0, 1, 1], [0, 0, 2]]
assert_equal(m.nonzero(), tgt)
def test_nonzero_invalid_object(self):
# gh-9295
a = np.array([np.array([1, 2]), 3], dtype=object)
assert_raises(ValueError, np.nonzero, a)
class BoolErrors:
def __bool__(self):
raise ValueError("Not allowed")
assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))
def test_nonzero_sideeffect_safety(self):
# gh-13631
class FalseThenTrue:
_val = False
def __bool__(self):
try:
return self._val
finally:
self._val = True
class TrueThenFalse:
_val = True
def __bool__(self):
try:
return self._val
finally:
self._val = False
# result grows on the second pass
a = np.array([True, FalseThenTrue()])
assert_raises(RuntimeError, np.nonzero, a)
a = np.array([[True], [FalseThenTrue()]])
assert_raises(RuntimeError, np.nonzero, a)
# result shrinks on the second pass
a = np.array([False, TrueThenFalse()])
assert_raises(RuntimeError, np.nonzero, a)
a = np.array([[False], [TrueThenFalse()]])
assert_raises(RuntimeError, np.nonzero, a)
def test_nonzero_sideffects_structured_void(self):
# Checks that structured void does not mutate alignment flag of
# original array.
arr = np.zeros(5, dtype="i1,i8,i8") # `ones` may short-circuit
assert arr.flags.aligned # structs are considered "aligned"
assert not arr["f2"].flags.aligned
# make sure that nonzero/count_nonzero do not flip the flag:
np.nonzero(arr)
assert arr.flags.aligned
np.count_nonzero(arr)
assert arr.flags.aligned
def test_nonzero_exception_safe(self):
# gh-13930
class ThrowsAfter:
def __init__(self, iters):
self.iters_left = iters
def __bool__(self):
if self.iters_left == 0:
raise ValueError("called `iters` times")
self.iters_left -= 1
return True
"""
Test that a ValueError is raised instead of a SystemError
If the __bool__ function is called after the error state is set,
Python (cpython) will raise a SystemError.
"""
# assert that an exception in first pass is handled correctly
a = np.array([ThrowsAfter(5)]*10)
assert_raises(ValueError, np.nonzero, a)
# raise exception in second pass for 1-dimensional loop
a = np.array([ThrowsAfter(15)]*10)
assert_raises(ValueError, np.nonzero, a)
# raise exception in second pass for n-dimensional loop
a = np.array([[ThrowsAfter(15)]]*10)
assert_raises(ValueError, np.nonzero, a)
def test_structured_threadsafety(self):
# Nonzero (and some other functions) should be threadsafe for
# structured datatypes, see gh-15387. This test can behave randomly.
from concurrent.futures import ThreadPoolExecutor
# Create a deeply nested dtype to make a failure more likely:
dt = np.dtype([("", "f8")])
dt = np.dtype([("", dt)])
dt = np.dtype([("", dt)] * 2)
# The array should be large enough to likely run into threading issues
arr = np.random.uniform(size=(5000, 4)).view(dt)[:, 0]
def func(arr):
arr.nonzero()
tpe = ThreadPoolExecutor(max_workers=8)
futures = [tpe.submit(func, arr) for _ in range(10)]
for f in futures:
f.result()
assert arr.dtype is dt
class TestIndex:
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
g1 = randint(0, 5, size=15)
g2 = randint(0, 8, size=15)
V[g1, g2] = -V[g1, g2]
assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all())
def test_boolean_edgecase(self):
a = np.array([], dtype='int32')
b = np.array([], dtype='bool')
c = a[b]
assert_equal(c, [])
assert_equal(c.dtype, np.dtype('int32'))
class TestBinaryRepr:
def test_zero(self):
assert_equal(np.binary_repr(0), '0')
def test_positive(self):
assert_equal(np.binary_repr(10), '1010')
assert_equal(np.binary_repr(12522),
'11000011101010')
assert_equal(np.binary_repr(10736848),
'101000111101010011010000')
def test_negative(self):
assert_equal(np.binary_repr(-1), '-1')
assert_equal(np.binary_repr(-10), '-1010')
assert_equal(np.binary_repr(-12522),
'-11000011101010')
assert_equal(np.binary_repr(-10736848),
'-101000111101010011010000')
def test_sufficient_width(self):
assert_equal(np.binary_repr(0, width=5), '00000')
assert_equal(np.binary_repr(10, width=7), '0001010')
assert_equal(np.binary_repr(-5, width=7), '1111011')
def test_neg_width_boundaries(self):
# see gh-8670
# Ensure that the example in the issue does not
# break before proceeding to a more thorough test.
assert_equal(np.binary_repr(-128, width=8), '10000000')
for width in range(1, 11):
num = -2**(width - 1)
exp = '1' + (width - 1) * '0'
assert_equal(np.binary_repr(num, width=width), exp)
def test_large_neg_int64(self):
# See gh-14289.
assert_equal(np.binary_repr(np.int64(-2**62), width=64),
'11' + '0'*62)
class TestBaseRepr:
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
def test_positive(self):
assert_equal(np.base_repr(12, 10), '12')
assert_equal(np.base_repr(12, 10, 4), '000012')
assert_equal(np.base_repr(12, 4), '30')
assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW')
def test_negative(self):
assert_equal(np.base_repr(-12, 10), '-12')
assert_equal(np.base_repr(-12, 10, 4), '-000012')
assert_equal(np.base_repr(-12, 4), '-30')
def test_base_range(self):
with assert_raises(ValueError):
np.base_repr(1, 1)
with assert_raises(ValueError):
np.base_repr(1, 37)
class TestArrayComparisons:
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),
np.array([('a', 1)], dtype='S1,u4'))
assert_(res)
assert_(type(res) is bool)
def test_array_equal_equal_nan(self):
# Test array_equal with equal_nan kwarg
a1 = np.array([1, 2, np.nan])
a2 = np.array([1, np.nan, 2])
a3 = np.array([1, 2, np.inf])
# equal_nan=False by default
assert_(not np.array_equal(a1, a1))
assert_(np.array_equal(a1, a1, equal_nan=True))
assert_(not np.array_equal(a1, a2, equal_nan=True))
# nan's not conflated with inf's
assert_(not np.array_equal(a1, a3, equal_nan=True))
# 0-D arrays
a = np.array(np.nan)
assert_(not np.array_equal(a, a))
assert_(np.array_equal(a, a, equal_nan=True))
# Non-float dtype - equal_nan should have no effect
a = np.array([1, 2, 3], dtype=int)
assert_(np.array_equal(a, a))
assert_(np.array_equal(a, a, equal_nan=True))
# Multi-dimensional array
a = np.array([[0, 1], [np.nan, 1]])
assert_(not np.array_equal(a, a))
assert_(np.array_equal(a, a, equal_nan=True))
# Complex values
a, b = [np.array([1 + 1j])]*2
a.real, b.imag = np.nan, np.nan
assert_(not np.array_equal(a, b, equal_nan=False))
assert_(np.array_equal(a, b, equal_nan=True))
def test_none_compares_elementwise(self):
a = np.array([None, 1, None], dtype=object)
assert_equal(a == None, [True, False, True])
assert_equal(a != None, [False, True, False])
a = np.ones(3)
assert_equal(a == None, [False, False, False])
assert_equal(a != None, [True, True, True])
def test_array_equiv(self):
res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([1]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([2]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
assert_(not res)
assert_(type(res) is bool)
@pytest.mark.parametrize("dtype", ["V0", "V3", "V10"])
def test_compare_unstructured_voids(self, dtype):
zeros = np.zeros(3, dtype=dtype)
assert_array_equal(zeros, zeros)
assert not (zeros != zeros).any()
if dtype == "V0":
# Can't test != of actually different data
return
nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype)
assert not (zeros == nonzeros).any()
assert (zeros != nonzeros).all()
def assert_array_strict_equal(x, y):
assert_array_equal(x, y)
# Check flags, 32 bit arches typically don't provide 16 byte alignment
if ((x.dtype.alignment <= 8 or
np.intp().dtype.itemsize != 4) and
sys.platform != 'win32'):
assert_(x.flags == y.flags)
else:
assert_(x.flags.owndata == y.flags.owndata)
assert_(x.flags.writeable == y.flags.writeable)
assert_(x.flags.c_contiguous == y.flags.c_contiguous)
assert_(x.flags.f_contiguous == y.flags.f_contiguous)
assert_(x.flags.writebackifcopy == y.flags.writebackifcopy)
# check endianness
assert_(x.dtype.isnative == y.dtype.isnative)
class TestClip:
def setup_method(self):
self.nr = 5
self.nc = 3
def fastclip(self, a, m, M, out=None, casting=None):
if out is None:
if casting is None:
return a.clip(m, M)
else:
return a.clip(m, M, casting=casting)
else:
if casting is None:
return a.clip(m, M, out)
else:
return a.clip(m, M, out, casting=casting)
def clip(self, a, m, M, out=None):
# use slow-clip
selector = np.less(a, m) + 2*np.greater(a, M)
return selector.choose((a, m, M), out=out)
# Handy functions
def _generate_data(self, n, m):
return randn(n, m)
def _generate_data_complex(self, n, m):
return randn(n, m) + 1.j * rand(n, m)
def _generate_flt_data(self, n, m):
return (randn(n, m)).astype(np.float32)
def _neg_byteorder(self, a):
a = np.asarray(a)
if sys.byteorder == 'little':
a = a.astype(a.dtype.newbyteorder('>'))
else:
a = a.astype(a.dtype.newbyteorder('<'))
return a
def _generate_non_native_data(self, n, m):
data = randn(n, m)
data = self._neg_byteorder(data)
assert_(not data.dtype.isnative)
return data
def _generate_int_data(self, n, m):
return (10 * rand(n, m)).astype(np.int64)
def _generate_int32_data(self, n, m):
return (10 * rand(n, m)).astype(np.int32)
# Now the real test cases
@pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO')
def test_ones_pathological(self, dtype):
# for preservation of behavior described in
# gh-12519; amin > amax behavior may still change
# in the future
arr = np.ones(10, dtype=dtype)
expected = np.zeros(10, dtype=dtype)
actual = np.clip(arr, 1, 0)
if dtype == 'O':
assert actual.tolist() == expected.tolist()
else:
assert_equal(actual, expected)
def test_simple_double(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.1
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_int(self):
# Test native int input with scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_array_double(self):
# Test native double input with array min/max.
a = self._generate_data(self.nr, self.nc)
m = np.zeros(a.shape)
M = m + 0.5
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_nonnative(self):
# Test non native double input with scalar min/max.
# Test native double input with non native double scalar min/max.
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
# Test native double input with non native double scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
assert_(not M.dtype.isnative)
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_simple_complex(self):
# Test native complex input with native double scalar min/max.
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.j
M = 1. + 2.j
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_clip_complex(self):
# Address Issue gh-5354 for clipping complex arrays
# Test native complex input without explicit min/max
# ie, either min=None or max=None
a = np.ones(10, dtype=complex)
m = a.min()
M = a.max()
am = self.fastclip(a, m, None)
aM = self.fastclip(a, None, M)
assert_array_strict_equal(am, a)
assert_array_strict_equal(aM, a)
def test_clip_non_contig(self):
# Test clip for non contiguous native input and native scalar min/max.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = self.fastclip(a, -1.6, 1.7)
act = self.clip(a, -1.6, 1.7)
assert_array_strict_equal(ac, act)
def test_simple_out(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@pytest.mark.parametrize("casting", [None, "unsafe"])
def test_simple_int32_inout(self, casting):
# Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
if casting is None:
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac, casting=casting)
else:
# explicitly passing "unsafe" will silence warning
self.fastclip(a, m, M, ac, casting=casting)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_out(self):
# Test native int32 input with int32 scalar min/max and int64 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_inout(self):
# Test native int32 input with double array min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_out(self):
# Test native double input with scalar min/max and int out.
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_inplace_01(self):
# Test native double input with array min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_simple_inplace_02(self):
# Test native double input with scalar min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(ac, m, M, ac)
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
# Test non contiguous double input with double scalar min/max in-place.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(ac, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_02(self):
# Test native int32 input with int32 scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(np.int32)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_03(self):
# Test native int32 input with float64 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = -2
M = 4
ac = self.fastclip(a, np.float64(m), np.float64(M))
act = self.clip(a, np.float64(m), np.float64(M))
assert_array_strict_equal(ac, act)
def test_type_cast_04(self):
# Test native int32 input with float32 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float32(-2)
M = np.float32(4)
act = self.fastclip(a, m, M)
ac = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_05(self):
# Test native int32 with double arrays min/max.
a = self._generate_int_data(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m * np.zeros(a.shape), M)
act = self.clip(a, m * np.zeros(a.shape), M)
assert_array_strict_equal(ac, act)
def test_type_cast_06(self):
# Test native with NON native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.5
m_s = self._neg_byteorder(m)
M = 1.
act = self.clip(a, m_s, M)
ac = self.fastclip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_07(self):
# Test NON native with native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
act = a_s.clip(m, M)
ac = self.fastclip(a_s, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_08(self):
# Test NON native with native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
ac = self.fastclip(a_s, m, M)
act = a_s.clip(m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_09(self):
# Test native with NON native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
m_s = self._neg_byteorder(m)
assert_(not m_s.dtype.isnative)
ac = self.fastclip(a, m_s, M)
act = self.clip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_10(self):
# Test native int32 with float min/max and float out for output argument.
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.float32(-0.5)
M = np.float32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_type_cast_11(self):
# Test non native with native scalar, min/max, out non native
a = self._generate_non_native_data(self.nr, self.nc)
b = a.copy()
b = b.astype(b.dtype.newbyteorder('>'))
bt = b.copy()
m = -0.5
M = 1.
self.fastclip(a, m, M, out=b)
self.clip(a, m, M, out=bt)
assert_array_strict_equal(b, bt)
def test_type_cast_12(self):
# Test native int32 input and min/max and float out
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.int32(0)
M = np.int32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple2(self):
# Test native int32 input with double min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple_int32(self):
# Test native int32 input with int32 scalar min/max and int64 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_int32(self):
# Test native int32 input with double array min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_outint32(self):
# Test native double input with scalar min/max and int out
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
with assert_warns(DeprecationWarning):
# NumPy 1.17.0, 2018-02-24 - casting is unsafe
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_transposed(self):
# Test that the out argument works when transposed
a = np.arange(16).reshape(4, 4)
out = np.empty_like(a).T
a.clip(4, 10, out=out)
expected = self.clip(a, 4, 10)
assert_array_equal(out, expected)
def test_clip_with_out_memory_overlap(self):
# Test that the out argument works when it has memory overlap
a = np.arange(16).reshape(4, 4)
ac = a.copy()
a[:-1].clip(4, 10, out=a[1:])
expected = self.clip(ac[:-1], 4, 10)
assert_array_equal(a[1:], expected)
def test_clip_inplace_array(self):
# Test native double input with array min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_inplace_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_func_takes_out(self):
# Ensure that the clip() function takes an out=argument.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
a2 = np.clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a2, ac)
assert_(a2 is a)
def test_clip_nan(self):
d = np.arange(7.)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(max=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=np.nan, max=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=-2, max=np.nan), d)
with assert_warns(DeprecationWarning):
assert_equal(d.clip(min=np.nan, max=10), d)
def test_object_clip(self):
a = np.arange(10, dtype=object)
actual = np.clip(a, 1, 5)
expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5])
assert actual.tolist() == expected.tolist()
def test_clip_all_none(self):
a = np.arange(10, dtype=object)
with assert_raises_regex(ValueError, 'max or min'):
np.clip(a, None, None)
def test_clip_invalid_casting(self):
a = np.arange(10, dtype=object)
with assert_raises_regex(ValueError,
'casting must be one of'):
self.fastclip(a, 1, 8, casting="garbage")
@pytest.mark.parametrize("amin, amax", [
# two scalars
(1, 0),
# mix scalar and array
(1, np.zeros(10)),
# two arrays
(np.ones(10), np.zeros(10)),
])
def test_clip_value_min_max_flip(self, amin, amax):
a = np.arange(10, dtype=np.int64)
# requirement from ufunc_docstrings.py
expected = np.minimum(np.maximum(a, amin), amax)
actual = np.clip(a, amin, amax)
assert_equal(actual, expected)
@pytest.mark.parametrize("arr, amin, amax, exp", [
# for a bug in npy_ObjectClip, based on a
# case produced by hypothesis
(np.zeros(10, dtype=np.int64),
0,
-2**64+1,
np.full(10, -2**64+1, dtype=object)),
# for bugs in NPY_TIMEDELTA_MAX, based on a case
# produced by hypothesis
(np.zeros(10, dtype='m8') - 1,
0,
0,
np.zeros(10, dtype='m8')),
])
def test_clip_problem_cases(self, arr, amin, amax, exp):
actual = np.clip(arr, amin, amax)
assert_equal(actual, exp)
@pytest.mark.xfail(reason="no scalar nan propagation yet",
raises=AssertionError,
strict=True)
@pytest.mark.parametrize("arr, amin, amax", [
# problematic scalar nan case from hypothesis
(np.zeros(10, dtype=np.int64),
np.array(np.nan),
np.zeros(10, dtype=np.int32)),
])
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_clip_scalar_nan_propagation(self, arr, amin, amax):
# enforcement of scalar nan propagation for comparisons
# called through clip()
expected = np.minimum(np.maximum(arr, amin), amax)
actual = np.clip(arr, amin, amax)
assert_equal(actual, expected)
@pytest.mark.xfail(reason="propagation doesn't match spec")
@pytest.mark.parametrize("arr, amin, amax", [
(np.array([1] * 10, dtype='m8'),
np.timedelta64('NaT'),
np.zeros(10, dtype=np.int32)),
])
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_NaT_propagation(self, arr, amin, amax):
# NOTE: the expected function spec doesn't
# propagate NaT, but clip() now does
expected = np.minimum(np.maximum(arr, amin), amax)
actual = np.clip(arr, amin, amax)
assert_equal(actual, expected)
@given(
data=st.data(),
arr=hynp.arrays(
dtype=hynp.integer_dtypes() | hynp.floating_dtypes(),
shape=hynp.array_shapes()
)
)
def test_clip_property(self, data, arr):
"""A property-based test using Hypothesis.
This aims for maximum generality: it could in principle generate *any*
valid inputs to np.clip, and in practice generates much more varied
inputs than human testers come up with.
Because many of the inputs have tricky dependencies - compatible dtypes
and mutually-broadcastable shapes - we use `st.data()` strategy draw
values *inside* the test function, from strategies we construct based
on previous values. An alternative would be to define a custom strategy
with `@st.composite`, but until we have duplicated code inline is fine.
That accounts for most of the function; the actual test is just three
lines to calculate and compare actual vs expected results!
"""
numeric_dtypes = hynp.integer_dtypes() | hynp.floating_dtypes()
# Generate shapes for the bounds which can be broadcast with each other
# and with the base shape. Below, we might decide to use scalar bounds,
# but it's clearer to generate these shapes unconditionally in advance.
in_shapes, result_shape = data.draw(
hynp.mutually_broadcastable_shapes(
num_shapes=2, base_shape=arr.shape
)
)
# Scalar `nan` is deprecated due to the differing behaviour it shows.
s = numeric_dtypes.flatmap(
lambda x: hynp.from_dtype(x, allow_nan=False))
amin = data.draw(s | hynp.arrays(dtype=numeric_dtypes,
shape=in_shapes[0], elements={"allow_nan": False}))
amax = data.draw(s | hynp.arrays(dtype=numeric_dtypes,
shape=in_shapes[1], elements={"allow_nan": False}))
# Then calculate our result and expected result and check that they're
# equal! See gh-12519 and gh-19457 for discussion deciding on this
# property and the result_type argument.
result = np.clip(arr, amin, amax)
t = np.result_type(arr, amin, amax)
expected = np.minimum(amax, np.maximum(arr, amin, dtype=t), dtype=t)
assert result.dtype == t
assert_array_equal(result, expected)
class TestAllclose:
rtol = 1e-5
atol = 1e-8
def setup_method(self):
self.olderr = np.seterr(invalid='ignore')
def teardown_method(self):
np.seterr(**self.olderr)
def tst_allclose(self, x, y):
assert_(np.allclose(x, y), "%s and %s not close" % (x, y))
def tst_not_allclose(self, x, y):
assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y))
def test_ip_allclose(self):
# Parametric test factory.
arr = np.array([100, 1000])
aran = np.arange(125).reshape((5, 5, 5))
atol = self.atol
rtol = self.rtol
data = [([1, 0], [1, 0]),
([atol], [0]),
([1], [1+rtol+atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol*2),
(aran, aran + aran*rtol),
(np.inf, np.inf),
(np.inf, [np.inf])]
for (x, y) in data:
self.tst_allclose(x, y)
def test_ip_not_allclose(self):
# Parametric test factory.
aran = np.arange(125).reshape((5, 5, 5))
atol = self.atol
rtol = self.rtol
data = [([np.inf, 0], [1, np.inf]),
([np.inf, 0], [1, 0]),
([np.inf, np.inf], [1, np.inf]),
([np.inf, np.inf], [1, 0]),
([-np.inf, 0], [np.inf, 0]),
([np.nan, 0], [np.nan, 0]),
([atol*2], [0]),
([1], [1+rtol+atol*2]),
(aran, aran + aran*atol + atol*2),
(np.array([np.inf, 1]), np.array([0, np.inf]))]
for (x, y) in data:
self.tst_not_allclose(x, y)
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
y = np.array([0, np.inf])
np.allclose(x, y)
assert_array_equal(x, np.array([np.inf, 1]))
assert_array_equal(y, np.array([0, np.inf]))
def test_min_int(self):
# Could make problems because of abs(min_int) == min_int
min_int = np.iinfo(np.int_).min
a = np.array([min_int], dtype=np.int_)
assert_(np.allclose(a, a))
def test_equalnan(self):
x = np.array([1.0, np.nan])
assert_(np.allclose(x, x, equal_nan=True))
def test_return_class_is_ndarray(self):
# Issue gh-6475
# Check that allclose does not preserve subtypes
class Foo(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
a = Foo([1])
assert_(type(np.allclose(a, a)) is bool)
class TestIsclose:
rtol = 1e-5
atol = 1e-8
def _setup(self):
atol = self.atol
rtol = self.rtol
arr = np.array([100, 1000])
aran = np.arange(125).reshape((5, 5, 5))
self.all_close_tests = [
([1, 0], [1, 0]),
([atol], [0]),
([1], [1 + rtol + atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol),
(aran, aran + aran*rtol),
(np.inf, np.inf),
(np.inf, [np.inf]),
([np.inf, -np.inf], [np.inf, -np.inf]),
]
self.none_close_tests = [
([np.inf, 0], [1, np.inf]),
([np.inf, -np.inf], [1, 0]),
([np.inf, np.inf], [1, -np.inf]),
([np.inf, np.inf], [1, 0]),
([np.nan, 0], [np.nan, -np.inf]),
([atol*2], [0]),
([1], [1 + rtol + atol*2]),
(aran, aran + rtol*1.1*aran + atol*1.1),
(np.array([np.inf, 1]), np.array([0, np.inf])),
]
self.some_close_tests = [
([np.inf, 0], [np.inf, atol*2]),
([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]),
(np.arange(3), [0, 1, 2.1]),
(np.nan, [np.nan, np.nan, np.nan]),
([0], [atol, np.inf, -np.inf, np.nan]),
(0, [atol, np.inf, -np.inf, np.nan]),
]
self.some_close_results = [
[True, False],
[True, False, False],
[True, True, False],
[False, False, False],
[True, False, False, False],
[True, False, False, False],
]
def test_ip_isclose(self):
self._setup()
tests = self.some_close_tests
results = self.some_close_results
for (x, y), result in zip(tests, results):
assert_array_equal(np.isclose(x, y), result)
def tst_all_isclose(self, x, y):
assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y))
def tst_none_isclose(self, x, y):
msg = "%s and %s shouldn't be close"
assert_(not np.any(np.isclose(x, y)), msg % (x, y))
def tst_isclose_allclose(self, x, y):
msg = "isclose.all() and allclose aren't same for %s and %s"
msg2 = "isclose and allclose aren't same for %s and %s"
if np.isscalar(x) and np.isscalar(y):
assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y))
else:
assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y))
def test_ip_all_isclose(self):
self._setup()
for (x, y) in self.all_close_tests:
self.tst_all_isclose(x, y)
def test_ip_none_isclose(self):
self._setup()
for (x, y) in self.none_close_tests:
self.tst_none_isclose(x, y)
def test_ip_isclose_allclose(self):
self._setup()
tests = (self.all_close_tests + self.none_close_tests +
self.some_close_tests)
for (x, y) in tests:
self.tst_isclose_allclose(x, y)
def test_equal_nan(self):
assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True])
arr = np.array([1.0, np.nan])
assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True])
def test_masked_arrays(self):
# Make sure to test the output type when arguments are interchanged.
x = np.ma.masked_where([True, True, False], np.arange(3))
assert_(type(x) is type(np.isclose(2, x)))
assert_(type(x) is type(np.isclose(x, 2)))
x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan])
assert_(type(x) is type(np.isclose(np.inf, x)))
assert_(type(x) is type(np.isclose(x, np.inf)))
x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
y = np.isclose(np.nan, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
y = np.isclose(x, np.nan, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
y = np.isclose(x, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
def test_scalar_return(self):
assert_(np.isscalar(np.isclose(1, 1)))
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
y = np.array([0, np.inf])
np.isclose(x, y)
assert_array_equal(x, np.array([np.inf, 1]))
assert_array_equal(y, np.array([0, np.inf]))
def test_non_finite_scalar(self):
# GH7014, when two scalars are compared the output should also be a
# scalar
assert_(np.isclose(np.inf, -np.inf) is np.False_)
assert_(np.isclose(0, np.inf) is np.False_)
assert_(type(np.isclose(0, np.inf)) is np.bool_)
def test_timedelta(self):
# Allclose currently works for timedelta64 as long as `atol` is
# an integer or also a timedelta64
a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]")
assert np.isclose(a, a, atol=0, equal_nan=True).all()
assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all()
assert np.allclose(a, a, atol=0, equal_nan=True)
assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True)
class TestStdVar:
def setup_method(self):
self.A = np.array([1, -1, 1, -1])
self.real_var = 1
def test_basic(self):
assert_almost_equal(np.var(self.A), self.real_var)
assert_almost_equal(np.std(self.A)**2, self.real_var)
def test_scalars(self):
assert_equal(np.var(1), 0)
assert_equal(np.std(1), 0)
def test_ddof1(self):
assert_almost_equal(np.var(self.A, ddof=1),
self.real_var * len(self.A) / (len(self.A) - 1))
assert_almost_equal(np.std(self.A, ddof=1)**2,
self.real_var*len(self.A) / (len(self.A) - 1))
def test_ddof2(self):
assert_almost_equal(np.var(self.A, ddof=2),
self.real_var * len(self.A) / (len(self.A) - 2))
assert_almost_equal(np.std(self.A, ddof=2)**2,
self.real_var * len(self.A) / (len(self.A) - 2))
def test_out_scalar(self):
d = np.arange(10)
out = np.array(0.)
r = np.std(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.var(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.mean(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
class TestStdVarComplex:
def test_basic(self):
A = np.array([1, 1.j, -1, -1.j])
real_var = 1
assert_almost_equal(np.var(A), real_var)
assert_almost_equal(np.std(A)**2, real_var)
def test_scalars(self):
assert_equal(np.var(1j), 0)
assert_equal(np.std(1j), 0)
class TestCreationFuncs:
# Test ones, zeros, empty and full.
def setup_method(self):
dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
# void, bytes, str
variable_sized = {tp for tp in dtypes if tp.str.endswith('0')}
self.dtypes = sorted(dtypes - variable_sized |
{np.dtype(tp.str.replace("0", str(i)))
for tp in variable_sized for i in range(1, 10)},
key=lambda dtype: dtype.str)
self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'}
self.ndims = 10
def check_function(self, func, fill_value=None):
par = ((0, 1, 2),
range(self.ndims),
self.orders,
self.dtypes)
fill_kwarg = {}
if fill_value is not None:
fill_kwarg = {'fill_value': fill_value}
for size, ndims, order, dtype in itertools.product(*par):
shape = ndims * [size]
# do not fill void type
if fill_kwarg and dtype.str.startswith('|V'):
continue
arr = func(shape, order=order, dtype=dtype,
**fill_kwarg)
assert_equal(arr.dtype, dtype)
assert_(getattr(arr.flags, self.orders[order]))
if fill_value is not None:
if dtype.str.startswith('|S'):
val = str(fill_value)
else:
val = fill_value
assert_equal(arr, dtype.type(val))
def test_zeros(self):
self.check_function(np.zeros)
def test_ones(self):
self.check_function(np.ones)
def test_empty(self):
self.check_function(np.empty)
def test_full(self):
self.check_function(np.full, 0)
self.check_function(np.full, 1)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_for_reference_leak(self):
# Make sure we have an object for reference
dim = 1
beg = sys.getrefcount(dim)
np.zeros([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.ones([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.empty([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.full([dim]*10, 0)
assert_(sys.getrefcount(dim) == beg)
class TestLikeFuncs:
'''Test ones_like, zeros_like, empty_like and full_like'''
def setup_method(self):
self.data = [
# Array scalars
(np.array(3.), None),
(np.array(3), 'f8'),
# 1D arrays
(np.arange(6, dtype='f4'), None),
(np.arange(6), 'c16'),
# 2D C-layout arrays
(np.arange(6).reshape(2, 3), None),
(np.arange(6).reshape(3, 2), 'i1'),
# 2D F-layout arrays
(np.arange(6).reshape((2, 3), order='F'), None),
(np.arange(6).reshape((3, 2), order='F'), 'i1'),
# 3D C-layout arrays
(np.arange(24).reshape(2, 3, 4), None),
(np.arange(24).reshape(4, 3, 2), 'f4'),
# 3D F-layout arrays
(np.arange(24).reshape((2, 3, 4), order='F'), None),
(np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),
# 3D non-C/F-layout arrays
(np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
(np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
]
self.shapes = [(), (5,), (5,6,), (5,6,7,)]
def compare_array_value(self, dz, value, fill_value):
if value is not None:
if fill_value:
try:
z = dz.dtype.type(value)
except OverflowError:
pass
else:
assert_(np.all(dz == z))
else:
assert_(np.all(dz == value))
def check_like_function(self, like_function, value, fill_value=False):
if fill_value:
fill_kwarg = {'fill_value': value}
else:
fill_kwarg = {}
for d, dtype in self.data:
# default (K) order, dtype
dz = like_function(d, dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_equal(np.array(dz.strides)*d.dtype.itemsize,
np.array(d.strides)*dz.dtype.itemsize)
assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)
assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# C order, default dtype
dz = like_function(d, order='C', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# F order, default dtype
dz = like_function(d, order='F', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# A order
dz = like_function(d, order='A', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
if d.flags.f_contiguous:
assert_(dz.flags.f_contiguous)
else:
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# Test the 'shape' parameter
for s in self.shapes:
for o in 'CFA':
sz = like_function(d, dtype=dtype, shape=s, order=o,
**fill_kwarg)
assert_equal(sz.shape, s)
if dtype is None:
assert_equal(sz.dtype, d.dtype)
else:
assert_equal(sz.dtype, np.dtype(dtype))
if o == 'C' or (o == 'A' and d.flags.c_contiguous):
assert_(sz.flags.c_contiguous)
elif o == 'F' or (o == 'A' and d.flags.f_contiguous):
assert_(sz.flags.f_contiguous)
self.compare_array_value(sz, value, fill_value)
if (d.ndim != len(s)):
assert_equal(np.argsort(like_function(d, dtype=dtype,
shape=s, order='K',
**fill_kwarg).strides),
np.argsort(np.empty(s, dtype=dtype,
order='C').strides))
else:
assert_equal(np.argsort(like_function(d, dtype=dtype,
shape=s, order='K',
**fill_kwarg).strides),
np.argsort(d.strides))
# Test the 'subok' parameter
class MyNDArray(np.ndarray):
pass
a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = like_function(a, **fill_kwarg)
assert_(type(b) is MyNDArray)
b = like_function(a, subok=False, **fill_kwarg)
assert_(type(b) is not MyNDArray)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
def test_zeros_like(self):
self.check_like_function(np.zeros_like, 0)
def test_empty_like(self):
self.check_like_function(np.empty_like, None)
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
@pytest.mark.parametrize('likefunc', [np.empty_like, np.full_like,
np.zeros_like, np.ones_like])
@pytest.mark.parametrize('dtype', [str, bytes])
def test_dtype_str_bytes(self, likefunc, dtype):
# Regression test for gh-19860
a = np.arange(16).reshape(2, 8)
b = a[:, ::2] # Ensure b is not contiguous.
kwargs = {'fill_value': ''} if likefunc == np.full_like else {}
result = likefunc(b, dtype=dtype, **kwargs)
if dtype == str:
assert result.strides == (16, 4)
else:
# dtype is bytes
assert result.strides == (4, 1)
class TestCorrelate:
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.xs = np.arange(1, 20)[::3]
self.y = np.array([-1, -2, -3], dtype=dt)
self.z1 = np.array([-3., -8., -14., -20., -26., -14., -5.], dtype=dt)
self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt)
self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt)
self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt)
self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt)
self.zs = np.array([-3., -14., -30., -48., -66., -84.,
-102., -54., -19.], dtype=dt)
def test_float(self):
self._setup(float)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.x, self.y[:-1], 'full')
assert_array_almost_equal(z, self.z1_4)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
z = np.correlate(self.x[::-1], self.y, 'full')
assert_array_almost_equal(z, self.z1r)
z = np.correlate(self.y, self.x[::-1], 'full')
assert_array_almost_equal(z, self.z2r)
z = np.correlate(self.xs, self.y, 'full')
assert_array_almost_equal(z, self.zs)
def test_object(self):
self._setup(Decimal)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.correlate(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=complex)
y = np.array([-1, -2j, 3+1j], dtype=complex)
r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
def test_zero_size(self):
with pytest.raises(ValueError):
np.correlate(np.array([]), np.ones(1000), mode='full')
with pytest.raises(ValueError):
np.correlate(np.ones(1000), np.array([]), mode='full')
def test_mode(self):
d = np.ones(100)
k = np.ones(3)
default_mode = np.correlate(d, k, mode='valid')
with assert_warns(DeprecationWarning):
valid_mode = np.correlate(d, k, mode='v')
assert_array_equal(valid_mode, default_mode)
# integer mode
with assert_raises(ValueError):
np.correlate(d, k, mode=-1)
assert_array_equal(np.correlate(d, k, mode=0), valid_mode)
# illegal arguments
with assert_raises(TypeError):
np.correlate(d, k, mode=None)
class TestConvolve:
def test_object(self):
d = [1.] * 100
k = [1.] * 3
assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3))
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.convolve(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
def test_mode(self):
d = np.ones(100)
k = np.ones(3)
default_mode = np.convolve(d, k, mode='full')
with assert_warns(DeprecationWarning):
full_mode = np.convolve(d, k, mode='f')
assert_array_equal(full_mode, default_mode)
# integer mode
with assert_raises(ValueError):
np.convolve(d, k, mode=-1)
assert_array_equal(np.convolve(d, k, mode=2), full_mode)
# illegal arguments
with assert_raises(TypeError):
np.convolve(d, k, mode=None)
class TestArgwhere:
@pytest.mark.parametrize('nd', [0, 1, 2])
def test_nd(self, nd):
# get an nd array with multiple elements in every dimension
x = np.empty((2,)*nd, bool)
# none
x[...] = False
assert_equal(np.argwhere(x).shape, (0, nd))
# only one
x[...] = False
x.flat[0] = True
assert_equal(np.argwhere(x).shape, (1, nd))
# all but one
x[...] = True
x.flat[0] = False
assert_equal(np.argwhere(x).shape, (x.size - 1, nd))
# all
x[...] = True
assert_equal(np.argwhere(x).shape, (x.size, nd))
def test_2D(self):
x = np.arange(6).reshape((2, 3))
assert_array_equal(np.argwhere(x > 1),
[[0, 2],
[1, 0],
[1, 1],
[1, 2]])
def test_list(self):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
class TestStringFunction:
def test_set_string_function(self):
a = np.array([1])
np.set_string_function(lambda x: "FOO", repr=True)
assert_equal(repr(a), "FOO")
np.set_string_function(None, repr=True)
assert_equal(repr(a), "array([1])")
np.set_string_function(lambda x: "FOO", repr=False)
assert_equal(str(a), "FOO")
np.set_string_function(None, repr=False)
assert_equal(str(a), "[1]")
class TestRoll:
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
def test_roll2d(self):
x2 = np.reshape(np.arange(10), (2, 5))
x2r = np.roll(x2, 1)
assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
x2r = np.roll(x2, 1, axis=0)
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, 1, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
# Roll multiple axes at once.
x2r = np.roll(x2, 1, axis=(0, 1))
assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
x2r = np.roll(x2, (1, 0), axis=(0, 1))
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, (-1, 0), axis=(0, 1))
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, (0, 1), axis=(0, 1))
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
x2r = np.roll(x2, (0, -1), axis=(0, 1))
assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]))
x2r = np.roll(x2, (1, 1), axis=(0, 1))
assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
x2r = np.roll(x2, (-1, -1), axis=(0, 1))
assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]]))
# Roll the same axis multiple times.
x2r = np.roll(x2, 1, axis=(0, 0))
assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))
x2r = np.roll(x2, 1, axis=(1, 1))
assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]]))
# Roll more than one turn in either direction.
x2r = np.roll(x2, 6, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
x2r = np.roll(x2, -4, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
def test_roll_empty(self):
x = np.array([])
assert_equal(np.roll(x, 1), np.array([]))
class TestRollaxis:
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4),
(0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4),
(0, 4): (2, 3, 4, 1),
(1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4),
(1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4),
(1, 4): (1, 3, 4, 2),
(2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4),
(2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4),
(2, 4): (1, 2, 4, 3),
(3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3),
(3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4),
(3, 4): (1, 2, 3, 4)}
def test_exceptions(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4)
assert_raises(np.AxisError, np.rollaxis, a, -5, 0)
assert_raises(np.AxisError, np.rollaxis, a, 0, -5)
assert_raises(np.AxisError, np.rollaxis, a, 4, 0)
assert_raises(np.AxisError, np.rollaxis, a, 0, 5)
def test_results(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
aind = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
for (i, j) in self.tgtshape:
# positive axis, positive start
res = np.rollaxis(a, axis=i, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, j)], str((i,j)))
assert_(not res.flags['OWNDATA'])
# negative axis, positive start
ip = i + 1
res = np.rollaxis(a, axis=-ip, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, j)])
assert_(not res.flags['OWNDATA'])
# positive axis, negative start
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=i, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, 4 - jp)])
assert_(not res.flags['OWNDATA'])
# negative axis, negative start
ip = i + 1
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=-ip, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)])
assert_(not res.flags['OWNDATA'])
class TestMoveaxis:
def test_move_to_end(self):
x = np.random.randn(5, 6, 7)
for source, expected in [(0, (6, 7, 5)),
(1, (5, 7, 6)),
(2, (5, 6, 7)),
(-1, (5, 6, 7))]:
actual = np.moveaxis(x, source, -1).shape
assert_(actual, expected)
def test_move_new_position(self):
x = np.random.randn(1, 2, 3, 4)
for source, destination, expected in [
(0, 1, (2, 1, 3, 4)),
(1, 2, (1, 3, 2, 4)),
(1, -1, (1, 3, 4, 2)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, expected)
def test_preserve_order(self):
x = np.zeros((1, 2, 3, 4))
for source, destination in [
(0, 0),
(3, -1),
(-1, 3),
([0, -1], [0, -1]),
([2, 0], [2, 0]),
(range(4), range(4)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, (1, 2, 3, 4))
def test_move_multiples(self):
x = np.zeros((0, 1, 2, 3))
for source, destination, expected in [
([0, 1], [2, 3], (2, 3, 0, 1)),
([2, 3], [0, 1], (2, 3, 0, 1)),
([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)),
([3, 0], [1, 0], (0, 3, 1, 2)),
([0, 3], [0, 1], (0, 3, 1, 2)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, expected)
def test_errors(self):
x = np.random.randn(1, 2, 3)
assert_raises_regex(np.AxisError, 'source.*out of bounds',
np.moveaxis, x, 3, 0)
assert_raises_regex(np.AxisError, 'source.*out of bounds',
np.moveaxis, x, -4, 0)
assert_raises_regex(np.AxisError, 'destination.*out of bounds',
np.moveaxis, x, 0, 5)
assert_raises_regex(ValueError, 'repeated axis in `source`',
np.moveaxis, x, [0, 0], [0, 1])
assert_raises_regex(ValueError, 'repeated axis in `destination`',
np.moveaxis, x, [0, 1], [1, 1])
assert_raises_regex(ValueError, 'must have the same number',
np.moveaxis, x, 0, [0, 1])
assert_raises_regex(ValueError, 'must have the same number',
np.moveaxis, x, [0, 1], [0])
def test_array_likes(self):
x = np.ma.zeros((1, 2, 3))
result = np.moveaxis(x, 0, 0)
assert_(x.shape, result.shape)
assert_(isinstance(result, np.ma.MaskedArray))
x = [1, 2, 3]
result = np.moveaxis(x, 0, 0)
assert_(x, list(result))
assert_(isinstance(result, np.ndarray))
class TestCross:
def test_2x2(self):
u = [1, 2]
v = [3, 4]
z = -2
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_2x3(self):
u = [1, 2]
v = [3, 4, 5]
z = np.array([10, -5, -2])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_3x3(self):
u = [1, 2, 3]
v = [4, 5, 6]
z = np.array([-3, 6, -3])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_broadcasting(self):
# Ticket #2624 (Trac #2032)
u = np.tile([1, 2], (11, 1))
v = np.tile([3, 4], (11, 1))
z = -2
assert_equal(np.cross(u, v), z)
assert_equal(np.cross(v, u), -z)
assert_equal(np.cross(u, u), 0)
u = np.tile([1, 2], (11, 1)).T
v = np.tile([3, 4, 5], (11, 1))
z = np.tile([10, -5, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0), z)
assert_equal(np.cross(v, u.T), -z)
assert_equal(np.cross(v, v), 0)
u = np.tile([1, 2, 3], (11, 1)).T
v = np.tile([3, 4], (11, 1)).T
z = np.tile([-12, 9, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0, axisb=0), z)
assert_equal(np.cross(v.T, u.T), -z)
assert_equal(np.cross(u.T, u.T), 0)
u = np.tile([1, 2, 3], (5, 1))
v = np.tile([4, 5, 6], (5, 1)).T
z = np.tile([-3, 6, -3], (5, 1))
assert_equal(np.cross(u, v, axisb=0), z)
assert_equal(np.cross(v.T, u), -z)
assert_equal(np.cross(u, u), 0)
def test_broadcasting_shapes(self):
u = np.ones((2, 1, 3))
v = np.ones((5, 3))
assert_equal(np.cross(u, v).shape, (2, 5, 3))
u = np.ones((10, 3, 5))
v = np.ones((2, 5))
assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3))
assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2)
assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0)
u = np.ones((10, 3, 5, 7))
v = np.ones((5, 7, 2))
assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7))
assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2)
assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4)
# gh-5885
u = np.ones((3, 4, 2))
for axisc in range(-2, 2):
assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))
def test_outer_out_param():
arr1 = np.ones((5,))
arr2 = np.ones((2,))
arr3 = np.linspace(-2, 2, 5)
out1 = np.ndarray(shape=(5,5))
out2 = np.ndarray(shape=(2, 5))
res1 = np.outer(arr1, arr3, out1)
assert_equal(res1, out1)
assert_equal(np.outer(arr2, arr3, out2), out2)
class TestIndices:
def test_simple(self):
[x, y] = np.indices((4, 3))
assert_array_equal(x, np.array([[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]))
assert_array_equal(y, np.array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]]))
def test_single_input(self):
[x] = np.indices((4,))
assert_array_equal(x, np.array([0, 1, 2, 3]))
[x] = np.indices((4,), sparse=True)
assert_array_equal(x, np.array([0, 1, 2, 3]))
def test_scalar_input(self):
assert_array_equal([], np.indices(()))
assert_array_equal([], np.indices((), sparse=True))
assert_array_equal([[]], np.indices((0,)))
assert_array_equal([[]], np.indices((0,), sparse=True))
def test_sparse(self):
[x, y] = np.indices((4,3), sparse=True)
assert_array_equal(x, np.array([[0], [1], [2], [3]]))
assert_array_equal(y, np.array([[0, 1, 2]]))
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("dims", [(), (0,), (4, 3)])
def test_return_type(self, dtype, dims):
inds = np.indices(dims, dtype=dtype)
assert_(inds.dtype == dtype)
for arr in np.indices(dims, dtype=dtype, sparse=True):
assert_(arr.dtype == dtype)
class TestRequire:
flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
'F', 'F_CONTIGUOUS', 'FORTRAN',
'A', 'ALIGNED',
'W', 'WRITEABLE',
'O', 'OWNDATA']
def generate_all_false(self, dtype):
arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)])
arr.setflags(write=False)
a = arr['a']
assert_(not a.flags['C'])
assert_(not a.flags['F'])
assert_(not a.flags['O'])
assert_(not a.flags['W'])
assert_(not a.flags['A'])
return a
def set_and_check_flag(self, flag, dtype, arr):
if dtype is None:
dtype = arr.dtype
b = np.require(arr, dtype, [flag])
assert_(b.flags[flag])
assert_(b.dtype == dtype)
# a further call to np.require ought to return the same array
# unless OWNDATA is specified.
c = np.require(b, None, [flag])
if flag[0] != 'O':
assert_(c is b)
else:
assert_(c.flags[flag])
def test_require_each(self):
id = ['f8', 'i4']
fd = [None, 'f8', 'c16']
for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names):
a = self.generate_all_false(idtype)
self.set_and_check_flag(flag, fdtype, a)
def test_unknown_requirement(self):
a = self.generate_all_false('f8')
assert_raises(KeyError, np.require, a, None, 'Q')
def test_non_array_input(self):
a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O'])
assert_(a.flags['O'])
assert_(a.flags['C'])
assert_(a.flags['A'])
assert_(a.dtype == 'i4')
assert_equal(a, [1, 2, 3, 4])
def test_C_and_F_simul(self):
a = self.generate_all_false('f8')
assert_raises(ValueError, np.require, a, None, ['C', 'F'])
def test_ensure_array(self):
class ArraySubclass(np.ndarray):
pass
a = ArraySubclass((2, 2))
b = np.require(a, None, ['E'])
assert_(type(b) is np.ndarray)
def test_preserve_subtype(self):
class ArraySubclass(np.ndarray):
pass
for flag in self.flag_names:
a = ArraySubclass((2, 2))
self.set_and_check_flag(flag, None, a)
class TestBroadcast:
def test_broadcast_in_args(self):
# gh-5881
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
np.empty((5, 1, 7))]
mits = [np.broadcast(*arrs),
np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])),
np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])),
np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),
np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
for mit in mits:
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 4)
for a, ia in zip(arrs, mit.iters):
assert_(a is ia.base)
def test_broadcast_single_arg(self):
# gh-6899
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 1)
assert_(arrs[0] is mit.iters[0].base)
def test_number_of_arguments(self):
arr = np.empty((5,))
for j in range(35):
arrs = [arr] * j
if j > 32:
assert_raises(ValueError, np.broadcast, *arrs)
else:
mit = np.broadcast(*arrs)
assert_equal(mit.numiter, j)
def test_broadcast_error_kwargs(self):
#gh-13455
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
mit2 = np.broadcast(*arrs, **{})
assert_equal(mit.shape, mit2.shape)
assert_equal(mit.ndim, mit2.ndim)
assert_equal(mit.nd, mit2.nd)
assert_equal(mit.numiter, mit2.numiter)
assert_(mit.iters[0].base is mit2.iters[0].base)
assert_raises(ValueError, np.broadcast, 1, **{'x': 1})
def test_shape_mismatch_error_message(self):
with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and "
r"arg 2 with shape \(2,\)"):
np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7])
class TestKeepdims:
class sub_array(np.ndarray):
def sum(self, axis=None, dtype=None, out=None):
return np.ndarray.sum(self, axis, dtype, out, keepdims=True)
def test_raise(self):
sub_class = self.sub_array
x = np.arange(30).view(sub_class)
assert_raises(TypeError, np.sum, x, keepdims=True)
class TestTensordot:
def test_zero_dimension(self):
# Test resolution to issue #5663
a = np.ndarray((3,0))
b = np.ndarray((0,4))
td = np.tensordot(a, b, (1, 0))
assert_array_equal(td, np.dot(a, b))
assert_array_equal(td, np.einsum('ij,jk', a, b))
def test_zero_dimensional(self):
# gh-12130
arr_0d = np.array(1)
ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined
assert_array_equal(ret, arr_0d)
| 136,761 | Python | 37.042281 | 95 | 0.522291 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_einsum.py | import itertools
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
assert_raises, suppress_warnings, assert_raises_regex, assert_allclose
)
# Setup for optimize einsum
chars = 'abcdefghij'
sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
global_size_dict = dict(zip(chars, sizes))
class TestEinsum:
def test_einsum_errors(self):
for do_opt in [True, False]:
# Need enough arguments
assert_raises(ValueError, np.einsum, optimize=do_opt)
assert_raises(ValueError, np.einsum, "", optimize=do_opt)
# subscripts must be a string
assert_raises(TypeError, np.einsum, 0, 0, optimize=do_opt)
# out parameter must be an array
assert_raises(TypeError, np.einsum, "", 0, out='test',
optimize=do_opt)
# order parameter must be a valid order
assert_raises(ValueError, np.einsum, "", 0, order='W',
optimize=do_opt)
# casting parameter must be a valid casting
assert_raises(ValueError, np.einsum, "", 0, casting='blah',
optimize=do_opt)
# dtype parameter must be a valid dtype
assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type',
optimize=do_opt)
# other keyword arguments are rejected
assert_raises(TypeError, np.einsum, "", 0, bad_arg=0,
optimize=do_opt)
# issue 4528 revealed a segfault with this call
assert_raises(TypeError, np.einsum, *(None,)*63, optimize=do_opt)
# number of operands must match count in subscripts string
assert_raises(ValueError, np.einsum, "", 0, 0, optimize=do_opt)
assert_raises(ValueError, np.einsum, ",", 0, [0], [0],
optimize=do_opt)
assert_raises(ValueError, np.einsum, ",", [0], optimize=do_opt)
# can't have more subscripts than dimensions in the operand
assert_raises(ValueError, np.einsum, "i", 0, optimize=do_opt)
assert_raises(ValueError, np.einsum, "ij", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, "...i", 0, optimize=do_opt)
assert_raises(ValueError, np.einsum, "i...j", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, "i...", 0, optimize=do_opt)
assert_raises(ValueError, np.einsum, "ij...", [0, 0], optimize=do_opt)
# invalid ellipsis
assert_raises(ValueError, np.einsum, "i..", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, ".i...", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, "j->..j", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt)
# invalid subscript character
assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt)
assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt)
# output subscripts must appear in input
assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt)
# output subscripts may only be specified once
assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]],
optimize=do_opt)
# dimensions much match when being collapsed
assert_raises(ValueError, np.einsum, "ii",
np.arange(6).reshape(2, 3), optimize=do_opt)
assert_raises(ValueError, np.einsum, "ii->i",
np.arange(6).reshape(2, 3), optimize=do_opt)
# broadcasting to new dimensions must be enabled explicitly
assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3),
optimize=do_opt)
assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
out=np.arange(4).reshape(2, 2), optimize=do_opt)
with assert_raises_regex(ValueError, "'b'"):
# gh-11221 - 'c' erroneously appeared in the error message
a = np.ones((3, 3, 4, 5, 6))
b = np.ones((3, 4, 5))
np.einsum('aabcb,abc', a, b)
# Check order kwarg, asanyarray allows 1d to pass through
assert_raises(ValueError, np.einsum, "i->i", np.arange(6).reshape(-1, 1),
optimize=do_opt, order='d')
def test_einsum_views(self):
# pass-through
for do_opt in [True, False]:
a = np.arange(6)
a.shape = (2, 3)
b = np.einsum("...", a, optimize=do_opt)
assert_(b.base is a)
b = np.einsum(a, [Ellipsis], optimize=do_opt)
assert_(b.base is a)
b = np.einsum("ij", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, a)
b = np.einsum(a, [0, 1], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, a)
# output is writeable whenever input is writeable
b = np.einsum("...", a, optimize=do_opt)
assert_(b.flags['WRITEABLE'])
a.flags['WRITEABLE'] = False
b = np.einsum("...", a, optimize=do_opt)
assert_(not b.flags['WRITEABLE'])
# transpose
a = np.arange(6)
a.shape = (2, 3)
b = np.einsum("ji", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, a.T)
b = np.einsum(a, [1, 0], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, a.T)
# diagonal
a = np.arange(9)
a.shape = (3, 3)
b = np.einsum("ii->i", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[i, i] for i in range(3)])
b = np.einsum(a, [0, 0], [0], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[i, i] for i in range(3)])
# diagonal with various ways of broadcasting an additional dimension
a = np.arange(27)
a.shape = (3, 3, 3)
b = np.einsum("...ii->...i", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
b = np.einsum("ii...->...i", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(2, 0, 1)])
b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(2, 0, 1)])
b = np.einsum("...ii->i...", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum("jii->ij", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum("ii...->i...", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
b = np.einsum("i...i->i...", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
b = np.einsum("i...i->...i", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(1, 0, 2)])
b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(1, 0, 2)])
# triple diagonal
a = np.arange(27)
a.shape = (3, 3, 3)
b = np.einsum("iii->i", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[i, i, i] for i in range(3)])
b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, [a[i, i, i] for i in range(3)])
# swap axes
a = np.arange(24)
a.shape = (2, 3, 4)
b = np.einsum("ijk->jik", a, optimize=do_opt)
assert_(b.base is a)
assert_equal(b, a.swapaxes(0, 1))
b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt)
assert_(b.base is a)
assert_equal(b, a.swapaxes(0, 1))
def check_einsum_sums(self, dtype, do_opt=False):
# Check various sums. Does many sizes to exercise unrolled loops.
# sum(a, axis=-1)
for n in range(1, 17):
a = np.arange(n, dtype=dtype)
assert_equal(np.einsum("i->", a, optimize=do_opt),
np.sum(a, axis=-1).astype(dtype))
assert_equal(np.einsum(a, [0], [], optimize=do_opt),
np.sum(a, axis=-1).astype(dtype))
for n in range(1, 17):
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
assert_equal(np.einsum("...i->...", a, optimize=do_opt),
np.sum(a, axis=-1).astype(dtype))
assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt),
np.sum(a, axis=-1).astype(dtype))
# sum(a, axis=0)
for n in range(1, 17):
a = np.arange(2*n, dtype=dtype).reshape(2, n)
assert_equal(np.einsum("i...->...", a, optimize=do_opt),
np.sum(a, axis=0).astype(dtype))
assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt),
np.sum(a, axis=0).astype(dtype))
for n in range(1, 17):
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
assert_equal(np.einsum("i...->...", a, optimize=do_opt),
np.sum(a, axis=0).astype(dtype))
assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt),
np.sum(a, axis=0).astype(dtype))
# trace(a)
for n in range(1, 17):
a = np.arange(n*n, dtype=dtype).reshape(n, n)
assert_equal(np.einsum("ii", a, optimize=do_opt),
np.trace(a).astype(dtype))
assert_equal(np.einsum(a, [0, 0], optimize=do_opt),
np.trace(a).astype(dtype))
# gh-15961: should accept numpy int64 type in subscript list
np_array = np.asarray([0, 0])
assert_equal(np.einsum(a, np_array, optimize=do_opt),
np.trace(a).astype(dtype))
assert_equal(np.einsum(a, list(np_array), optimize=do_opt),
np.trace(a).astype(dtype))
# multiply(a, b)
assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
for n in range(1, 17):
a = np.arange(3 * n, dtype=dtype).reshape(3, n)
b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
assert_equal(np.einsum("..., ...", a, b, optimize=do_opt),
np.multiply(a, b))
assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt),
np.multiply(a, b))
# inner(a,b)
for n in range(1, 17):
a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b))
assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt),
np.inner(a, b))
for n in range(1, 11):
a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt),
np.inner(a.T, b.T).T)
assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt),
np.inner(a.T, b.T).T)
# outer(a,b)
for n in range(1, 17):
a = np.arange(3, dtype=dtype)+1
b = np.arange(n, dtype=dtype)+1
assert_equal(np.einsum("i,j", a, b, optimize=do_opt),
np.outer(a, b))
assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt),
np.outer(a, b))
# Suppress the complex warnings for the 'as f8' tests
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning)
# matvec(a,b) / a.dot(b) where a is matrix, b is vector
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("ij, j", a, b, optimize=do_opt),
np.dot(a, b))
assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt),
np.dot(a, b))
c = np.arange(4, dtype=dtype)
np.einsum("ij,j", a, b, out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1], b, [1], out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt),
np.dot(b.T, a.T))
assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt),
np.dot(b.T, a.T))
c = np.arange(4, dtype=dtype)
np.einsum("ji,j", a.T, b.T, out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
np.dot(b.T.astype('f8'),
a.T.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a.T, [1, 0], b.T, [1], out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
np.dot(b.T.astype('f8'),
a.T.astype('f8')).astype(dtype))
# matmat(a,b) / a.dot(b) where a is matrix, b is matrix
for n in range(1, 17):
if n < 8 or dtype != 'f2':
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt),
np.dot(a, b))
assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt),
np.dot(a, b))
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
c = np.arange(24, dtype=dtype).reshape(4, 6)
np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe',
optimize=do_opt)
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1], b, [1, 2], out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
# matrix triple product (note this is not currently an efficient
# way to multiply 3 matrices)
a = np.arange(12, dtype=dtype).reshape(3, 4)
b = np.arange(20, dtype=dtype).reshape(4, 5)
c = np.arange(30, dtype=dtype).reshape(5, 6)
if dtype != 'f2':
assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt),
a.dot(b).dot(c))
assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3],
optimize=do_opt), a.dot(b).dot(c))
d = np.arange(18, dtype=dtype).reshape(3, 6)
np.einsum("ij,jk,kl", a, b, c, out=d,
dtype='f8', casting='unsafe', optimize=do_opt)
tgt = a.astype('f8').dot(b.astype('f8'))
tgt = tgt.dot(c.astype('f8')).astype(dtype)
assert_equal(d, tgt)
d[...] = 0
np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d,
dtype='f8', casting='unsafe', optimize=do_opt)
tgt = a.astype('f8').dot(b.astype('f8'))
tgt = tgt.dot(c.astype('f8')).astype(dtype)
assert_equal(d, tgt)
# tensordot(a, b)
if np.dtype(dtype) != np.dtype('f2'):
a = np.arange(60, dtype=dtype).reshape(3, 4, 5)
b = np.arange(24, dtype=dtype).reshape(4, 3, 2)
assert_equal(np.einsum("ijk, jil -> kl", a, b),
np.tensordot(a, b, axes=([1, 0], [0, 1])))
assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]),
np.tensordot(a, b, axes=([1, 0], [0, 1])))
c = np.arange(10, dtype=dtype).reshape(5, 2)
np.einsum("ijk,jil->kl", a, b, out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1, 0], [0, 1])).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c,
dtype='f8', casting='unsafe', optimize=do_opt)
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1, 0], [0, 1])).astype(dtype))
# logical_and(logical_and(a!=0, b!=0), c!=0)
a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype)
b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype)
c = np.array([True, True, False, True, True, False, True, True])
assert_equal(np.einsum("i,i,i->i", a, b, c,
dtype='?', casting='unsafe', optimize=do_opt),
np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
dtype='?', casting='unsafe'),
np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
a = np.arange(9, dtype=dtype)
assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))
# Various stride0, contiguous, and SSE aligned variants
for n in range(1, 25):
a = np.arange(n, dtype=dtype)
if np.dtype(dtype).itemsize > 1:
assert_equal(np.einsum("...,...", a, a, optimize=do_opt),
np.multiply(a, a))
assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a))
assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a)
assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a)
assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a))
assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a))
assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt),
np.multiply(a[1:], a[:-1]))
assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt),
np.dot(a[1:], a[:-1]))
assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:])
assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:])
assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt),
2*np.sum(a[1:]))
assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt),
2*np.sum(a[1:]))
# An object array, summed as the data type
a = np.arange(9, dtype=object)
b = np.einsum("i->", a, dtype=dtype, casting='unsafe')
assert_equal(b, np.sum(a))
assert_equal(b.dtype, np.dtype(dtype))
b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe')
assert_equal(b, np.sum(a))
assert_equal(b.dtype, np.dtype(dtype))
# A case which was failing (ticket #1885)
p = np.arange(2) + 1
q = np.arange(4).reshape(2, 2) + 3
r = np.arange(4).reshape(2, 2) + 7
assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
# singleton dimensions broadcast (gh-10343)
p = np.ones((10,2))
q = np.ones((1,2))
assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
np.einsum('ij,ij->j', p, q, optimize=False))
assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
[10.] * 2)
# a blas-compatible contraction broadcasting case which was failing
# for optimize=True (ticket #10930)
x = np.array([2., 3.])
y = np.array([4.])
assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.)
assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.)
# all-ones array was bypassing bug (ticket #10930)
p = np.ones((1, 5)) / 2
q = np.ones((5, 5)) / 2
for optimize in (True, False):
assert_array_equal(np.einsum("...ij,...jk->...ik", p, p,
optimize=optimize),
np.einsum("...ij,...jk->...ik", p, q,
optimize=optimize))
assert_array_equal(np.einsum("...ij,...jk->...ik", p, q,
optimize=optimize),
np.full((1, 5), 1.25))
# Cases which were failing (gh-10899)
x = np.eye(2, dtype=dtype)
y = np.ones(2, dtype=dtype)
assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
[2.]) # contig_contig_outstride0_two
assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
[2.]) # stride0_contig_outstride0_two
assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
[2.]) # contig_stride0_outstride0_two
def test_einsum_sums_int8(self):
self.check_einsum_sums('i1')
def test_einsum_sums_uint8(self):
self.check_einsum_sums('u1')
def test_einsum_sums_int16(self):
self.check_einsum_sums('i2')
def test_einsum_sums_uint16(self):
self.check_einsum_sums('u2')
def test_einsum_sums_int32(self):
self.check_einsum_sums('i4')
self.check_einsum_sums('i4', True)
def test_einsum_sums_uint32(self):
self.check_einsum_sums('u4')
self.check_einsum_sums('u4', True)
def test_einsum_sums_int64(self):
self.check_einsum_sums('i8')
def test_einsum_sums_uint64(self):
self.check_einsum_sums('u8')
def test_einsum_sums_float16(self):
self.check_einsum_sums('f2')
def test_einsum_sums_float32(self):
self.check_einsum_sums('f4')
def test_einsum_sums_float64(self):
self.check_einsum_sums('f8')
self.check_einsum_sums('f8', True)
def test_einsum_sums_longdouble(self):
self.check_einsum_sums(np.longdouble)
def test_einsum_sums_cfloat64(self):
self.check_einsum_sums('c8')
self.check_einsum_sums('c8', True)
def test_einsum_sums_cfloat128(self):
self.check_einsum_sums('c16')
def test_einsum_sums_clongdouble(self):
self.check_einsum_sums(np.clongdouble)
def test_einsum_misc(self):
# This call used to crash because of a bug in
# PyArray_AssignZero
a = np.ones((1, 2))
b = np.ones((2, 2, 1))
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]])
# Regression test for issue #10369 (test unicode inputs with Python 2)
assert_equal(np.einsum(u'ij...,j...->i...', a, b), [[[2], [2]]])
assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20)
assert_equal(np.einsum(u'...i,...i', [1, 2, 3], [2, 3, 4]), 20)
assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4],
optimize=u'greedy'), 20)
# The iterator had an issue with buffering this reduction
a = np.ones((5, 12, 4, 2, 3), np.int64)
b = np.ones((5, 12, 11), np.int64)
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
np.einsum('ijklm,ijn->', a, b))
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True),
np.einsum('ijklm,ijn->', a, b, optimize=True))
# Issue #2027, was a problem in the contiguous 3-argument
# inner loop implementation
a = np.arange(1, 3)
b = np.arange(1, 5).reshape(2, 2)
c = np.arange(1, 9).reshape(4, 2)
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True),
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
# Ensure explicitly setting out=None does not cause an error
# see issue gh-15776 and issue gh-15256
assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]])
def test_subscript_range(self):
# Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used
# when creating a subscript from arrays
a = np.ones((2, 3))
b = np.ones((3, 4))
np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False)
np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False)
np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False)
assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False))
assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False))
def test_einsum_broadcast(self):
# Issue #2455 change in handling ellipsis
# remove the 'middle broadcast' error
# only use the 'RIGHT' iteration in prepare_op_axes
# adds auto broadcast on left where it belongs
# broadcast on right has to be explicit
# We need to test the optimized parsing as well
A = np.arange(2 * 3 * 4).reshape(2, 3, 4)
B = np.arange(3)
ref = np.einsum('ijk,j->ijk', A, B, optimize=False)
for opt in [True, False]:
assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref)
assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref)
assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error
A = np.arange(12).reshape((4, 3))
B = np.arange(6).reshape((3, 2))
ref = np.einsum('ik,kj->ij', A, B, optimize=False)
for opt in [True, False]:
assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref)
assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref)
assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error
assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error
dims = [2, 3, 4, 5]
a = np.arange(np.prod(dims)).reshape(dims)
v = np.arange(dims[2])
ref = np.einsum('ijkl,k->ijl', a, v, optimize=False)
for opt in [True, False]:
assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref)
assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error
assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref)
J, K, M = 160, 160, 120
A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M)
B = np.arange(J * K * M * 3).reshape(J, K, M, 3)
ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False)
for opt in [True, False]:
assert_equal(np.einsum('...lmn,lmno->...o', A, B,
optimize=opt), ref) # used to raise error
def test_einsum_fixedstridebug(self):
# Issue #4485 obscure einsum bug
# This case revealed a bug in nditer where it reported a stride
# as 'fixed' (0) when it was in fact not fixed during processing
# (0 or 4). The reason for the bug was that the check for a fixed
# stride was using the information from the 2D inner loop reuse
# to restrict the iteration dimensions it had to validate to be
# the same, but that 2D inner loop reuse logic is only triggered
# during the buffer copying step, and hence it was invalid to
# rely on those values. The fix is to check all the dimensions
# of the stride in question, which in the test case reveals that
# the stride is not fixed.
#
# NOTE: This test is triggered by the fact that the default buffersize,
# used by einsum, is 8192, and 3*2731 = 8193, is larger than that
# and results in a mismatch between the buffering and the
# striding for operand A.
A = np.arange(2 * 3).reshape(2, 3).astype(np.float32)
B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16)
es = np.einsum('cl, cpx->lpx', A, B)
tp = np.tensordot(A, B, axes=(0, 0))
assert_equal(es, tp)
# The following is the original test case from the bug report,
# made repeatable by changing random arrays to aranges.
A = np.arange(3 * 3).reshape(3, 3).astype(np.float64)
B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32)
es = np.einsum('cl, cpxy->lpxy', A, B)
tp = np.tensordot(A, B, axes=(0, 0))
assert_equal(es, tp)
def test_einsum_fixed_collapsingbug(self):
# Issue #5147.
# The bug only occurred when output argument of einssum was used.
x = np.random.normal(0, 1, (5, 5, 5, 5))
y1 = np.zeros((5, 5))
np.einsum('aabb->ab', x, out=y1)
idx = np.arange(5)
y2 = x[idx[:, None], idx[:, None], idx, idx]
assert_equal(y1, y2)
def test_einsum_failed_on_p9_and_s390x(self):
# Issues gh-14692 and gh-12689
# Bug with signed vs unsigned char errored on power9 and s390x Linux
tensor = np.random.random_sample((10, 10, 10, 10))
x = np.einsum('ijij->', tensor)
y = tensor.trace(axis1=0, axis2=2).trace()
assert_allclose(x, y)
def test_einsum_all_contig_non_contig_output(self):
# Issue gh-5907, tests that the all contiguous special case
# actually checks the contiguity of the output
x = np.ones((5, 5))
out = np.ones(10)[::2]
correct_base = np.ones(10)
correct_base[::2] = 5
# Always worked (inner iteration is done with 0-stride):
np.einsum('mi,mi,mi->m', x, x, x, out=out)
assert_array_equal(out.base, correct_base)
# Example 1:
out = np.ones(10)[::2]
np.einsum('im,im,im->m', x, x, x, out=out)
assert_array_equal(out.base, correct_base)
# Example 2, buffering causes x to be contiguous but
# special cases do not catch the operation before:
out = np.ones((2, 2, 2))[..., 0]
correct_base = np.ones((2, 2, 2))
correct_base[..., 0] = 2
x = np.ones((2, 2), np.float32)
np.einsum('ij,jk->ik', x, x, out=out)
assert_array_equal(out.base, correct_base)
@pytest.mark.parametrize("dtype",
np.typecodes["AllFloat"] + np.typecodes["AllInteger"])
def test_different_paths(self, dtype):
# Test originally added to cover broken float16 path: gh-20305
# Likely most are covered elsewhere, at least partially.
dtype = np.dtype(dtype)
# Simple test, designed to excersize most specialized code paths,
# note the +0.5 for floats. This makes sure we use a float value
# where the results must be exact.
arr = (np.arange(7) + 0.5).astype(dtype)
scalar = np.array(2, dtype=dtype)
# contig -> scalar:
res = np.einsum('i->', arr)
assert res == arr.sum()
# contig, contig -> contig:
res = np.einsum('i,i->i', arr, arr)
assert_array_equal(res, arr * arr)
# noncontig, noncontig -> contig:
res = np.einsum('i,i->i', arr.repeat(2)[::2], arr.repeat(2)[::2])
assert_array_equal(res, arr * arr)
# contig + contig -> scalar
assert np.einsum('i,i->', arr, arr) == (arr * arr).sum()
# contig + scalar -> contig (with out)
out = np.ones(7, dtype=dtype)
res = np.einsum('i,->i', arr, dtype.type(2), out=out)
assert_array_equal(res, arr * dtype.type(2))
# scalar + contig -> contig (with out)
res = np.einsum(',i->i', scalar, arr)
assert_array_equal(res, arr * dtype.type(2))
# scalar + contig -> scalar
res = np.einsum(',i->', scalar, arr)
# Use einsum to compare to not have difference due to sum round-offs:
assert res == np.einsum('i->', scalar * arr)
# contig + scalar -> scalar
res = np.einsum('i,->', arr, scalar)
# Use einsum to compare to not have difference due to sum round-offs:
assert res == np.einsum('i->', scalar * arr)
# contig + contig + contig -> scalar
arr = np.array([0.5, 0.5, 0.25, 4.5, 3.], dtype=dtype)
res = np.einsum('i,i,i->', arr, arr, arr)
assert_array_equal(res, (arr * arr * arr).sum())
# four arrays:
res = np.einsum('i,i,i,i->', arr, arr, arr, arr)
assert_array_equal(res, (arr * arr * arr * arr).sum())
def test_small_boolean_arrays(self):
# See gh-5946.
# Use array of True embedded in False.
a = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
a[...] = True
out = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
tgt = np.ones((2, 1, 1), dtype=np.bool_)
res = np.einsum('...ij,...jk->...ik', a, a, out=out)
assert_equal(res, tgt)
def test_out_is_res(self):
a = np.arange(9).reshape(3, 3)
res = np.einsum('...ij,...jk->...ik', a, a, out=a)
assert res is a
def optimize_compare(self, subscripts, operands=None):
# Tests all paths of the optimization function against
# conventional einsum
if operands is None:
args = [subscripts]
terms = subscripts.split('->')[0].split(',')
for term in terms:
dims = [global_size_dict[x] for x in term]
args.append(np.random.rand(*dims))
else:
args = [subscripts] + operands
noopt = np.einsum(*args, optimize=False)
opt = np.einsum(*args, optimize='greedy')
assert_almost_equal(opt, noopt)
opt = np.einsum(*args, optimize='optimal')
assert_almost_equal(opt, noopt)
def test_hadamard_like_products(self):
# Hadamard outer products
self.optimize_compare('a,ab,abc->abc')
self.optimize_compare('a,b,ab->ab')
def test_index_transformations(self):
# Simple index transformation cases
self.optimize_compare('ea,fb,gc,hd,abcd->efgh')
self.optimize_compare('ea,fb,abcd,gc,hd->efgh')
self.optimize_compare('abcd,ea,fb,gc,hd->efgh')
def test_complex(self):
# Long test cases
self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac')
self.optimize_compare('abhe,hidj,jgba,hiab,gab')
self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac')
self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad')
self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb')
self.optimize_compare('bdhe,acad,hiab,agac,hibd')
def test_collapse(self):
# Inner products
self.optimize_compare('ab,ab,c->')
self.optimize_compare('ab,ab,c->c')
self.optimize_compare('ab,ab,cd,cd->')
self.optimize_compare('ab,ab,cd,cd->ac')
self.optimize_compare('ab,ab,cd,cd->cd')
self.optimize_compare('ab,ab,cd,cd,ef,ef->')
def test_expand(self):
# Outer products
self.optimize_compare('ab,cd,ef->abcdef')
self.optimize_compare('ab,cd,ef->acdf')
self.optimize_compare('ab,cd,de->abcde')
self.optimize_compare('ab,cd,de->be')
self.optimize_compare('ab,bcd,cd->abcd')
self.optimize_compare('ab,bcd,cd->abd')
def test_edge_cases(self):
# Difficult edge cases for optimization
self.optimize_compare('eb,cb,fb->cef')
self.optimize_compare('dd,fb,be,cdb->cef')
self.optimize_compare('bca,cdb,dbf,afc->')
self.optimize_compare('dcc,fce,ea,dbf->ab')
self.optimize_compare('fdf,cdd,ccd,afe->ae')
self.optimize_compare('abcd,ad')
self.optimize_compare('ed,fcd,ff,bcf->be')
self.optimize_compare('baa,dcf,af,cde->be')
self.optimize_compare('bd,db,eac->ace')
self.optimize_compare('fff,fae,bef,def->abd')
self.optimize_compare('efc,dbc,acf,fd->abe')
self.optimize_compare('ba,ac,da->bcd')
def test_inner_product(self):
# Inner products
self.optimize_compare('ab,ab')
self.optimize_compare('ab,ba')
self.optimize_compare('abc,abc')
self.optimize_compare('abc,bac')
self.optimize_compare('abc,cba')
def test_random_cases(self):
# Randomly built test cases
self.optimize_compare('aab,fa,df,ecc->bde')
self.optimize_compare('ecb,fef,bad,ed->ac')
self.optimize_compare('bcf,bbb,fbf,fc->')
self.optimize_compare('bb,ff,be->e')
self.optimize_compare('bcb,bb,fc,fff->')
self.optimize_compare('fbb,dfd,fc,fc->')
self.optimize_compare('afd,ba,cc,dc->bf')
self.optimize_compare('adb,bc,fa,cfc->d')
self.optimize_compare('bbd,bda,fc,db->acf')
self.optimize_compare('dba,ead,cad->bce')
self.optimize_compare('aef,fbc,dca->bde')
def test_combined_views_mapping(self):
# gh-10792
a = np.arange(9).reshape(1, 1, 3, 1, 3)
b = np.einsum('bbcdc->d', a)
assert_equal(b, [12])
def test_broadcasting_dot_cases(self):
# Ensures broadcasting cases are not mistaken for GEMM
a = np.random.rand(1, 5, 4)
b = np.random.rand(4, 6)
c = np.random.rand(5, 6)
d = np.random.rand(10)
self.optimize_compare('ijk,kl,jl', operands=[a, b, c])
self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d])
e = np.random.rand(1, 1, 5, 4)
f = np.random.rand(7, 7)
self.optimize_compare('abjk,kl,jl', operands=[e, b, c])
self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f])
# Edge case found in gh-11308
g = np.arange(64).reshape(2, 4, 8)
self.optimize_compare('obk,ijk->ioj', operands=[g, g])
def test_output_order(self):
# Ensure output order is respected for optimize cases, the below
# conraction should yield a reshaped tensor view
# gh-16415
a = np.ones((2, 3, 5), order='F')
b = np.ones((4, 3), order='F')
for opt in [True, False]:
tmp = np.einsum('...ft,mf->...mt', a, b, order='a', optimize=opt)
assert_(tmp.flags.f_contiguous)
tmp = np.einsum('...ft,mf->...mt', a, b, order='f', optimize=opt)
assert_(tmp.flags.f_contiguous)
tmp = np.einsum('...ft,mf->...mt', a, b, order='c', optimize=opt)
assert_(tmp.flags.c_contiguous)
tmp = np.einsum('...ft,mf->...mt', a, b, order='k', optimize=opt)
assert_(tmp.flags.c_contiguous is False)
assert_(tmp.flags.f_contiguous is False)
tmp = np.einsum('...ft,mf->...mt', a, b, optimize=opt)
assert_(tmp.flags.c_contiguous is False)
assert_(tmp.flags.f_contiguous is False)
c = np.ones((4, 3), order='C')
for opt in [True, False]:
tmp = np.einsum('...ft,mf->...mt', a, c, order='a', optimize=opt)
assert_(tmp.flags.c_contiguous)
d = np.ones((2, 3, 5), order='C')
for opt in [True, False]:
tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt)
assert_(tmp.flags.c_contiguous)
class TestEinsumPath:
def build_operands(self, string, size_dict=global_size_dict):
# Builds views based off initial operands
operands = [string]
terms = string.split('->')[0].split(',')
for term in terms:
dims = [size_dict[x] for x in term]
operands.append(np.random.rand(*dims))
return operands
def assert_path_equal(self, comp, benchmark):
# Checks if list of tuples are equivalent
ret = (len(comp) == len(benchmark))
assert_(ret)
for pos in range(len(comp) - 1):
ret &= isinstance(comp[pos + 1], tuple)
ret &= (comp[pos + 1] == benchmark[pos + 1])
assert_(ret)
def test_memory_contraints(self):
# Ensure memory constraints are satisfied
outer_test = self.build_operands('a,b,c->abc')
path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0))
self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0))
self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
long_test = self.build_operands('acdf,jbje,gihb,hfac')
path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0))
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0))
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
def test_long_paths(self):
# Long complex cases
# Long test 1
long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
path, path_str = np.einsum_path(*long_test1, optimize='greedy')
self.assert_path_equal(path, ['einsum_path',
(3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
path, path_str = np.einsum_path(*long_test1, optimize='optimal')
self.assert_path_equal(path, ['einsum_path',
(3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
# Long test 2
long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb')
path, path_str = np.einsum_path(*long_test2, optimize='greedy')
self.assert_path_equal(path, ['einsum_path',
(3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)])
path, path_str = np.einsum_path(*long_test2, optimize='optimal')
self.assert_path_equal(path, ['einsum_path',
(0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)])
def test_edge_paths(self):
# Difficult edge cases
# Edge test1
edge_test1 = self.build_operands('eb,cb,fb->cef')
path, path_str = np.einsum_path(*edge_test1, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
path, path_str = np.einsum_path(*edge_test1, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
# Edge test2
edge_test2 = self.build_operands('dd,fb,be,cdb->cef')
path, path_str = np.einsum_path(*edge_test2, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
path, path_str = np.einsum_path(*edge_test2, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
# Edge test3
edge_test3 = self.build_operands('bca,cdb,dbf,afc->')
path, path_str = np.einsum_path(*edge_test3, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
path, path_str = np.einsum_path(*edge_test3, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
# Edge test4
edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab')
path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
# Edge test5
edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->',
size_dict={"a": 20, "b": 20, "c": 20, "d": 20})
path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
def test_path_type_input(self):
# Test explicit path handling
path_test = self.build_operands('dcc,fce,ea,dbf->ab')
path, path_str = np.einsum_path(*path_test, optimize=False)
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
path, path_str = np.einsum_path(*path_test, optimize=True)
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)]
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
self.assert_path_equal(path, exp_path)
# Double check einsum works on the input path
noopt = np.einsum(*path_test, optimize=False)
opt = np.einsum(*path_test, optimize=exp_path)
assert_almost_equal(noopt, opt)
def test_path_type_input_internal_trace(self):
#gh-20962
path_test = self.build_operands('cab,cdd->ab')
exp_path = ['einsum_path', (1,), (0, 1)]
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
self.assert_path_equal(path, exp_path)
# Double check einsum works on the input path
noopt = np.einsum(*path_test, optimize=False)
opt = np.einsum(*path_test, optimize=exp_path)
assert_almost_equal(noopt, opt)
def test_path_type_input_invalid(self):
path_test = self.build_operands('ab,bc,cd,de->ae')
exp_path = ['einsum_path', (2, 3), (0, 1)]
assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
assert_raises(
RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
path_test = self.build_operands('a,a,a->a')
exp_path = ['einsum_path', (1,), (0, 1)]
assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
assert_raises(
RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
def test_spaces(self):
#gh-10794
arr = np.array([[1]])
for sp in itertools.product(['', ' '], repeat=4):
# no error for any spacing
np.einsum('{}...a{}->{}...a{}'.format(*sp), arr)
def test_overlap():
a = np.arange(9, dtype=int).reshape(3, 3)
b = np.arange(9, dtype=int).reshape(3, 3)
d = np.dot(a, b)
# sanity check
c = np.einsum('ij,jk->ik', a, b)
assert_equal(c, d)
#gh-10080, out overlaps one of the operands
c = np.einsum('ij,jk->ik', a, b, out=b)
assert_equal(c, d)
| 50,113 | Python | 43.153304 | 108 | 0.513559 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_deprecations.py | """
Tests related to deprecation warnings. Also a convenient place
to document how deprecations should eventually be turned into errors.
"""
import datetime
import operator
import warnings
import pytest
import tempfile
import re
import sys
import numpy as np
from numpy.testing import (
assert_raises, assert_warns, assert_, assert_array_equal, SkipTest,
KnownFailureException, break_cycles,
)
from numpy.core._multiarray_tests import fromstring_null_term_c_api
try:
import pytz
_has_pytz = True
except ImportError:
_has_pytz = False
class _DeprecationTestCase:
# Just as warning: warnings uses re.match, so the start of this message
# must match.
message = ''
warning_cls = DeprecationWarning
def setup_method(self):
self.warn_ctx = warnings.catch_warnings(record=True)
self.log = self.warn_ctx.__enter__()
# Do *not* ignore other DeprecationWarnings. Ignoring warnings
# can give very confusing results because of
# https://bugs.python.org/issue4180 and it is probably simplest to
# try to keep the tests cleanly giving only the right warning type.
# (While checking them set to "error" those are ignored anyway)
# We still have them show up, because otherwise they would be raised
warnings.filterwarnings("always", category=self.warning_cls)
warnings.filterwarnings("always", message=self.message,
category=self.warning_cls)
def teardown_method(self):
self.warn_ctx.__exit__()
def assert_deprecated(self, function, num=1, ignore_others=False,
function_fails=False,
exceptions=np._NoValue,
args=(), kwargs={}):
"""Test if DeprecationWarnings are given and raised.
This first checks if the function when called gives `num`
DeprecationWarnings, after that it tries to raise these
DeprecationWarnings and compares them with `exceptions`.
The exceptions can be different for cases where this code path
is simply not anticipated and the exception is replaced.
Parameters
----------
function : callable
The function to test
num : int
Number of DeprecationWarnings to expect. This should normally be 1.
ignore_others : bool
Whether warnings of the wrong type should be ignored (note that
the message is not checked)
function_fails : bool
If the function would normally fail, setting this will check for
warnings inside a try/except block.
exceptions : Exception or tuple of Exceptions
Exception to expect when turning the warnings into an error.
The default checks for DeprecationWarnings. If exceptions is
empty the function is expected to run successfully.
args : tuple
Arguments for `function`
kwargs : dict
Keyword arguments for `function`
"""
__tracebackhide__ = True # Hide traceback for py.test
# reset the log
self.log[:] = []
if exceptions is np._NoValue:
exceptions = (self.warning_cls,)
try:
function(*args, **kwargs)
except (Exception if function_fails else tuple()):
pass
# just in case, clear the registry
num_found = 0
for warning in self.log:
if warning.category is self.warning_cls:
num_found += 1
elif not ignore_others:
raise AssertionError(
"expected %s but got: %s" %
(self.warning_cls.__name__, warning.category))
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
lst = [str(w) for w in self.log]
raise AssertionError("\n".join([msg] + lst))
with warnings.catch_warnings():
warnings.filterwarnings("error", message=self.message,
category=self.warning_cls)
try:
function(*args, **kwargs)
if exceptions != tuple():
raise AssertionError(
"No error raised during function call")
except exceptions:
if exceptions == tuple():
raise AssertionError(
"Error raised during function call")
def assert_not_deprecated(self, function, args=(), kwargs={}):
"""Test that warnings are not raised.
This is just a shorthand for:
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
"""
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
class _VisibleDeprecationTestCase(_DeprecationTestCase):
warning_cls = np.VisibleDeprecationWarning
class TestComparisonDeprecations(_DeprecationTestCase):
"""This tests the deprecation, for non-element-wise comparison logic.
This used to mean that when an error occurred during element-wise comparison
(i.e. broadcasting) NotImplemented was returned, but also in the comparison
itself, False was given instead of the error.
Also test FutureWarning for the None comparison.
"""
message = "elementwise.* comparison failed; .*"
def test_normal_types(self):
for op in (operator.eq, operator.ne):
# Broadcasting errors:
self.assert_deprecated(op, args=(np.zeros(3), []))
a = np.zeros(3, dtype='i,i')
# (warning is issued a couple of times here)
self.assert_deprecated(op, args=(a, a[:-1]), num=None)
# ragged array comparison returns True/False
a = np.array([1, np.array([1,2,3])], dtype=object)
b = np.array([1, np.array([1,2,3])], dtype=object)
self.assert_deprecated(op, args=(a, b), num=None)
def test_string(self):
# For two string arrays, strings always raised the broadcasting error:
a = np.array(['a', 'b'])
b = np.array(['a', 'b', 'c'])
assert_raises(ValueError, lambda x, y: x == y, a, b)
# The empty list is not cast to string, and this used to pass due
# to dtype mismatch; now (2018-06-21) it correctly leads to a
# FutureWarning.
assert_warns(FutureWarning, lambda: a == [])
def test_void_dtype_equality_failures(self):
class NotArray:
def __array__(self):
raise TypeError
# Needed so Python 3 does not raise DeprecationWarning twice.
def __ne__(self, other):
return NotImplemented
self.assert_deprecated(lambda: np.arange(2) == NotArray())
self.assert_deprecated(lambda: np.arange(2) != NotArray())
def test_array_richcompare_legacy_weirdness(self):
# It doesn't really work to use assert_deprecated here, b/c part of
# the point of assert_deprecated is to check that when warnings are
# set to "error" mode then the error is propagated -- which is good!
# But here we are testing a bunch of code that is deprecated *because*
# it has the habit of swallowing up errors and converting them into
# different warnings. So assert_warns will have to be sufficient.
assert_warns(FutureWarning, lambda: np.arange(2) == "a")
assert_warns(FutureWarning, lambda: np.arange(2) != "a")
# No warning for scalar comparisons
with warnings.catch_warnings():
warnings.filterwarnings("error")
assert_(not (np.array(0) == "a"))
assert_(np.array(0) != "a")
assert_(not (np.int16(0) == "a"))
assert_(np.int16(0) != "a")
for arg1 in [np.asarray(0), np.int16(0)]:
struct = np.zeros(2, dtype="i4,i4")
for arg2 in [struct, "a"]:
for f in [operator.lt, operator.le, operator.gt, operator.ge]:
with warnings.catch_warnings() as l:
warnings.filterwarnings("always")
assert_raises(TypeError, f, arg1, arg2)
assert_(not l)
class TestDatetime64Timezone(_DeprecationTestCase):
"""Parsing of datetime64 with timezones deprecated in 1.11.0, because
datetime64 is now timezone naive rather than UTC only.
It will be quite a while before we can remove this, because, at the very
least, a lot of existing code uses the 'Z' modifier to avoid conversion
from local time to UTC, even if otherwise it handles time in a timezone
naive fashion.
"""
def test_string(self):
self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
@pytest.mark.skipif(not _has_pytz,
reason="The pytz module is not available.")
def test_datetime(self):
tz = pytz.timezone('US/Eastern')
dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
self.assert_deprecated(np.datetime64, args=(dt,))
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
out in gh-7093. Eventually, such assignment should NOT be allowed, but
in the interests of maintaining backwards compatibility, only a Deprecation-
Warning will be raised instead for the time being to give developers time to
refactor relevant code.
"""
def test_data_attr_assignment(self):
a = np.arange(10)
b = np.linspace(0, 1, 10)
self.message = ("Assigning the 'data' attribute is an "
"inherently unsafe operation and will "
"be removed in the future.")
self.assert_deprecated(a.__setattr__, args=('data', b.data))
class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
"""
If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
represent the number in base 2 (positive) or 2's complement (negative) form,
the function used to silently ignore the parameter and return a representation
using the minimal number of bits needed for the form in question. Such behavior
is now considered unsafe from a user perspective and will raise an error in the future.
"""
def test_insufficient_width_positive(self):
args = (10,)
kwargs = {'width': 2}
self.message = ("Insufficient bit width provided. This behavior "
"will raise an error in the future.")
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
def test_insufficient_width_negative(self):
args = (-5,)
kwargs = {'width': 2}
self.message = ("Insufficient bit width provided. This behavior "
"will raise an error in the future.")
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase):
# Deprecated 2021-01-05, NumPy 1.21
message = r".*`.dtype` attribute"
def test_deprecation_dtype_attribute_is_dtype(self):
class dt:
dtype = "f8"
class vdt(np.void):
dtype = "f,f"
self.assert_deprecated(lambda: np.dtype(dt))
self.assert_deprecated(lambda: np.dtype(dt()))
self.assert_deprecated(lambda: np.dtype(vdt))
self.assert_deprecated(lambda: np.dtype(vdt(1)))
class TestTestDeprecated:
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
test_case_instance.setup_method()
assert_raises(AssertionError,
test_case_instance.assert_deprecated,
lambda: None)
def foo():
warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
test_case_instance.assert_deprecated(foo)
test_case_instance.teardown_method()
class TestNonNumericConjugate(_DeprecationTestCase):
"""
Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
which conflicts with the error behavior of np.conjugate.
"""
def test_conjugate(self):
for a in np.array(5), np.array(5j):
self.assert_not_deprecated(a.conjugate)
for a in (np.array('s'), np.array('2016', 'M'),
np.array((1, 2), [('a', int), ('b', int)])):
self.assert_deprecated(a.conjugate)
class TestNPY_CHAR(_DeprecationTestCase):
# 2017-05-03, 1.13.0
def test_npy_char_deprecation(self):
from numpy.core._multiarray_tests import npy_char_deprecation
self.assert_deprecated(npy_char_deprecation)
assert_(npy_char_deprecation() == 'S1')
class TestPyArray_AS1D(_DeprecationTestCase):
def test_npy_pyarrayas1d_deprecation(self):
from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
class TestPyArray_AS2D(_DeprecationTestCase):
def test_npy_pyarrayas2d_deprecation(self):
from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
class TestDatetimeEvent(_DeprecationTestCase):
# 2017-08-11, 1.14.0
def test_3_tuple(self):
for cls in (np.datetime64, np.timedelta64):
# two valid uses - (unit, num) and (unit, num, den, None)
self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
# trying to use the event argument, removed in 1.7.0, is deprecated
# it used to be a uint8
self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
class TestTruthTestingEmptyArrays(_DeprecationTestCase):
# 2017-09-25, 1.14.0
message = '.*truth value of an empty array is ambiguous.*'
def test_1d(self):
self.assert_deprecated(bool, args=(np.array([]),))
def test_2d(self):
self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
class TestBincount(_DeprecationTestCase):
# 2017-06-01, 1.14.0
def test_bincount_minlength(self):
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
def test_generator_sum(self):
self.assert_deprecated(np.sum, args=((i for i in range(5)),))
class TestPositiveOnNonNumerical(_DeprecationTestCase):
# 2018-06-28, 1.16.0
def test_positive_on_non_number(self):
self.assert_deprecated(operator.pos, args=(np.array('foo'),))
class TestFromstring(_DeprecationTestCase):
# 2017-10-19, 1.14
def test_fromstring(self):
self.assert_deprecated(np.fromstring, args=('\x00'*80,))
class TestFromStringAndFileInvalidData(_DeprecationTestCase):
# 2019-06-08, 1.17.0
# Tests should be moved to real tests when deprecation is done.
message = "string or file could not be read to its end"
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
def test_deprecate_unparsable_data_file(self, invalid_str):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with tempfile.TemporaryFile(mode="w") as f:
x.tofile(f, sep=',', format='%.2f')
f.write(invalid_str)
f.seek(0)
self.assert_deprecated(lambda: np.fromfile(f, sep=","))
f.seek(0)
self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
# Should not raise:
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
f.seek(0)
res = np.fromfile(f, sep=",", count=4)
assert_array_equal(res, x)
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
def test_deprecate_unparsable_string(self, invalid_str):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
x_str = "1.51,2,3.51,4{}".format(invalid_str)
self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
# The C-level API can use not fixed size, but 0 terminated strings,
# so test that as well:
bytestr = x_str.encode("ascii")
self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
with assert_warns(DeprecationWarning):
# this is slightly strange, in that fromstring leaves data
# potentially uninitialized (would be good to error when all is
# read, but count is larger then actual data maybe).
res = np.fromstring(x_str, sep=",", count=5)
assert_array_equal(res[:-1], x)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# Should not raise:
res = np.fromstring(x_str, sep=",", count=4)
assert_array_equal(res, x)
class Test_GetSet_NumericOps(_DeprecationTestCase):
# 2018-09-20, 1.16.0
def test_get_numeric_ops(self):
from numpy.core._multiarray_tests import getset_numericops
self.assert_deprecated(getset_numericops, num=2)
# empty kwargs prevents any state actually changing which would break
# other tests.
self.assert_deprecated(np.set_numeric_ops, kwargs={})
assert_raises(ValueError, np.set_numeric_ops, add='abc')
class TestShape1Fields(_DeprecationTestCase):
warning_cls = FutureWarning
# 2019-05-20, 1.17.0
def test_shape_1_fields(self):
self.assert_deprecated(np.dtype, args=([('a', int, 1)],))
class TestNonZero(_DeprecationTestCase):
# 2019-05-26, 1.17.0
def test_zerod(self):
self.assert_deprecated(lambda: np.nonzero(np.array(0)))
self.assert_deprecated(lambda: np.nonzero(np.array(1)))
def test_deprecate_ragged_arrays():
# 2019-11-29 1.19.0
#
# NEP 34 deprecated automatic object dtype when creating ragged
# arrays. Also see the "ragged" tests in `test_multiarray`
#
# emits a VisibleDeprecationWarning
arg = [1, [2, 3]]
with assert_warns(np.VisibleDeprecationWarning):
np.array(arg)
class TestTooDeepDeprecation(_VisibleDeprecationTestCase):
# NumPy 1.20, 2020-05-08
# This is a bit similar to the above ragged array deprecation case.
message = re.escape("Creating an ndarray from nested sequences exceeding")
def test_deprecation(self):
nested = [1]
for i in range(np.MAXDIMS - 1):
nested = [nested]
self.assert_not_deprecated(np.array, args=(nested,))
self.assert_not_deprecated(np.array,
args=(nested,), kwargs=dict(dtype=object))
self.assert_deprecated(np.array, args=([nested],))
class TestToString(_DeprecationTestCase):
# 2020-03-06 1.19.0
message = re.escape("tostring() is deprecated. Use tobytes() instead.")
def test_tostring(self):
arr = np.array(list(b"test\xFF"), dtype=np.uint8)
self.assert_deprecated(arr.tostring)
def test_tostring_matches_tobytes(self):
arr = np.array(list(b"test\xFF"), dtype=np.uint8)
b = arr.tobytes()
with assert_warns(DeprecationWarning):
s = arr.tostring()
assert s == b
class TestDTypeCoercion(_DeprecationTestCase):
# 2020-02-06 1.19.0
message = "Converting .* to a dtype .*is deprecated"
deprecated_types = [
# The builtin scalar super types:
np.generic, np.flexible, np.number,
np.inexact, np.floating, np.complexfloating,
np.integer, np.unsignedinteger, np.signedinteger,
# character is a deprecated S1 special case:
np.character,
]
def test_dtype_coercion(self):
for scalar_type in self.deprecated_types:
self.assert_deprecated(np.dtype, args=(scalar_type,))
def test_array_construction(self):
for scalar_type in self.deprecated_types:
self.assert_deprecated(np.array, args=([], scalar_type,))
def test_not_deprecated(self):
# All specific types are not deprecated:
for group in np.sctypes.values():
for scalar_type in group:
self.assert_not_deprecated(np.dtype, args=(scalar_type,))
for scalar_type in [type, dict, list, tuple]:
# Typical python types are coerced to object currently:
self.assert_not_deprecated(np.dtype, args=(scalar_type,))
class BuiltInRoundComplexDType(_DeprecationTestCase):
# 2020-03-31 1.19.0
deprecated_types = [np.csingle, np.cdouble, np.clongdouble]
not_deprecated_types = [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
]
def test_deprecated(self):
for scalar_type in self.deprecated_types:
scalar = scalar_type(0)
self.assert_deprecated(round, args=(scalar,))
self.assert_deprecated(round, args=(scalar, 0))
self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
def test_not_deprecated(self):
for scalar_type in self.not_deprecated_types:
scalar = scalar_type(0)
self.assert_not_deprecated(round, args=(scalar,))
self.assert_not_deprecated(round, args=(scalar, 0))
self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
class TestIncorrectAdvancedIndexWithEmptyResult(_DeprecationTestCase):
# 2020-05-27, NumPy 1.20.0
message = "Out of bound index found. This was previously ignored.*"
@pytest.mark.parametrize("index", [([3, 0],), ([0, 0], [3, 0])])
def test_empty_subspace(self, index):
# Test for both a single and two/multiple advanced indices. These
# This will raise an IndexError in the future.
arr = np.ones((2, 2, 0))
self.assert_deprecated(arr.__getitem__, args=(index,))
self.assert_deprecated(arr.__setitem__, args=(index, 0.))
# for this array, the subspace is only empty after applying the slice
arr2 = np.ones((2, 2, 1))
index2 = (slice(0, 0),) + index
self.assert_deprecated(arr2.__getitem__, args=(index2,))
self.assert_deprecated(arr2.__setitem__, args=(index2, 0.))
def test_empty_index_broadcast_not_deprecated(self):
arr = np.ones((2, 2, 2))
index = ([[3], [2]], []) # broadcast to an empty result.
self.assert_not_deprecated(arr.__getitem__, args=(index,))
self.assert_not_deprecated(arr.__setitem__,
args=(index, np.empty((2, 0, 2))))
class TestNonExactMatchDeprecation(_DeprecationTestCase):
# 2020-04-22
def test_non_exact_match(self):
arr = np.array([[3, 6, 6], [4, 5, 1]])
# misspelt mode check
self.assert_deprecated(lambda: np.ravel_multi_index(arr, (7, 6), mode='Cilp'))
# using completely different word with first character as R
self.assert_deprecated(lambda: np.searchsorted(arr[0], 4, side='Random'))
class TestDeprecatedGlobals(_DeprecationTestCase):
# 2020-06-06
def test_type_aliases(self):
# from builtins
self.assert_deprecated(lambda: np.bool(True))
self.assert_deprecated(lambda: np.int(1))
self.assert_deprecated(lambda: np.float(1))
self.assert_deprecated(lambda: np.complex(1))
self.assert_deprecated(lambda: np.object())
self.assert_deprecated(lambda: np.str('abc'))
# from np.compat
self.assert_deprecated(lambda: np.long(1))
self.assert_deprecated(lambda: np.unicode('abc'))
# from np.core.numerictypes
self.assert_deprecated(lambda: np.typeDict)
class TestMatrixInOuter(_DeprecationTestCase):
# 2020-05-13 NumPy 1.20.0
message = (r"add.outer\(\) was passed a numpy matrix as "
r"(first|second) argument.")
def test_deprecated(self):
arr = np.array([1, 2, 3])
m = np.array([1, 2, 3]).view(np.matrix)
self.assert_deprecated(np.add.outer, args=(m, m), num=2)
self.assert_deprecated(np.add.outer, args=(arr, m))
self.assert_deprecated(np.add.outer, args=(m, arr))
self.assert_not_deprecated(np.add.outer, args=(arr, arr))
class TestRaggedArray(_DeprecationTestCase):
# 2020-07-24, NumPy 1.20.0
message = "setting an array element with a sequence"
def test_deprecated(self):
arr = np.ones((1, 1))
# Deprecated if the array is a leave node:
self.assert_deprecated(lambda: np.array([arr, 0], dtype=np.float64))
self.assert_deprecated(lambda: np.array([0, arr], dtype=np.float64))
# And when it is an assignment into a lower dimensional subarray:
self.assert_deprecated(lambda: np.array([arr, [0]], dtype=np.float64))
self.assert_deprecated(lambda: np.array([[0], arr], dtype=np.float64))
class FlatteningConcatenateUnsafeCast(_DeprecationTestCase):
# NumPy 1.20, 2020-09-03
message = "concatenate with `axis=None` will use same-kind casting"
def test_deprecated(self):
self.assert_deprecated(np.concatenate,
args=(([0.], [1.]),),
kwargs=dict(axis=None, out=np.empty(2, dtype=np.int64)))
def test_not_deprecated(self):
self.assert_not_deprecated(np.concatenate,
args=(([0.], [1.]),),
kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64),
'casting': "unsafe"})
with assert_raises(TypeError):
# Tests should notice if the deprecation warning is given first...
np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64),
casting="same_kind")
class TestDeprecateSubarrayDTypeDuringArrayCoercion(_DeprecationTestCase):
warning_cls = FutureWarning
message = "(creating|casting) an array (with|to) a subarray dtype"
def test_deprecated_array(self):
# Arrays are more complex, since they "broadcast" on success:
arr = np.array([1, 2])
self.assert_deprecated(lambda: arr.astype("(2)i,"))
with pytest.warns(FutureWarning):
res = arr.astype("(2)i,")
assert_array_equal(res, [[1, 2], [1, 2]])
self.assert_deprecated(lambda: np.array(arr, dtype="(2)i,"))
with pytest.warns(FutureWarning):
res = np.array(arr, dtype="(2)i,")
assert_array_equal(res, [[1, 2], [1, 2]])
with pytest.warns(FutureWarning):
res = np.array([[(1,), (2,)], arr], dtype="(2)i,")
assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 2], [1, 2]]])
def test_deprecated_and_error(self):
# These error paths do not give a warning, but will succeed in the
# future.
arr = np.arange(5 * 2).reshape(5, 2)
def check():
with pytest.raises(ValueError):
arr.astype("(2,2)f")
self.assert_deprecated(check)
def check():
with pytest.raises(ValueError):
np.array(arr, dtype="(2,2)f")
self.assert_deprecated(check)
class TestFutureWarningArrayLikeNotIterable(_DeprecationTestCase):
# Deprecated 2020-12-09, NumPy 1.20
warning_cls = FutureWarning
message = "The input object of type.*but not a sequence"
@pytest.mark.parametrize("protocol",
["__array__", "__array_interface__", "__array_struct__"])
def test_deprecated(self, protocol):
"""Test that these objects give a warning since they are not 0-D,
not coerced at the top level `np.array(obj)`, but nested, and do
*not* define the sequence protocol.
NOTE: Tests for the versions including __len__ and __getitem__ exist
in `test_array_coercion.py` and they can be modified or amended
when this deprecation expired.
"""
blueprint = np.arange(10)
MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)})
self.assert_deprecated(lambda: np.array([MyArr()], dtype=object))
@pytest.mark.parametrize("protocol",
["__array__", "__array_interface__", "__array_struct__"])
def test_0d_not_deprecated(self, protocol):
# 0-D always worked (albeit it would use __float__ or similar for the
# conversion, which may not happen anymore)
blueprint = np.array(1.)
MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)})
myarr = MyArr()
self.assert_not_deprecated(lambda: np.array([myarr], dtype=object))
res = np.array([myarr], dtype=object)
expected = np.empty(1, dtype=object)
expected[0] = myarr
assert_array_equal(res, expected)
@pytest.mark.parametrize("protocol",
["__array__", "__array_interface__", "__array_struct__"])
def test_unnested_not_deprecated(self, protocol):
blueprint = np.arange(10)
MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)})
myarr = MyArr()
self.assert_not_deprecated(lambda: np.array(myarr))
res = np.array(myarr)
assert_array_equal(res, blueprint)
@pytest.mark.parametrize("protocol",
["__array__", "__array_interface__", "__array_struct__"])
def test_strange_dtype_handling(self, protocol):
"""The old code would actually use the dtype from the array, but
then end up not using the array (for dimension discovery)
"""
blueprint = np.arange(10).astype("f4")
MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol),
"__float__": lambda _: 0.5})
myarr = MyArr()
# Make sure we warn (and capture the FutureWarning)
with pytest.warns(FutureWarning, match=self.message):
res = np.array([[myarr]])
assert res.shape == (1, 1)
assert res.dtype == "f4"
assert res[0, 0] == 0.5
@pytest.mark.parametrize("protocol",
["__array__", "__array_interface__", "__array_struct__"])
def test_assignment_not_deprecated(self, protocol):
# If the result is dtype=object we do not unpack a nested array or
# array-like, if it is nested at exactly the right depth.
# NOTE: We actually do still call __array__, etc. but ignore the result
# in the end. For `dtype=object` we could optimize that away.
blueprint = np.arange(10).astype("f4")
MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol),
"__float__": lambda _: 0.5})
myarr = MyArr()
res = np.empty(3, dtype=object)
def set():
res[:] = [myarr, myarr, myarr]
self.assert_not_deprecated(set)
assert res[0] is myarr
assert res[1] is myarr
assert res[2] is myarr
class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase):
# Deprecated 2020-11-24, NumPy 1.20
"""
Technically, it should be impossible to create numpy object scalars,
but there was an unpickle path that would in theory allow it. That
path is invalid and must lead to the warning.
"""
message = "Unpickling a scalar with object dtype is deprecated."
def test_deprecated(self):
ctor = np.core.multiarray.scalar
self.assert_deprecated(lambda: ctor(np.dtype("O"), 1))
try:
with warnings.catch_warnings():
warnings.simplefilter("always")
import nose # noqa: F401
except ImportError:
HAVE_NOSE = False
else:
HAVE_NOSE = True
@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose")
class TestNoseDecoratorsDeprecated(_DeprecationTestCase):
class DidntSkipException(Exception):
pass
def test_slow(self):
def _test_slow():
@np.testing.dec.slow
def slow_func(x, y, z):
pass
assert_(slow_func.slow)
self.assert_deprecated(_test_slow)
def test_setastest(self):
def _test_setastest():
@np.testing.dec.setastest()
def f_default(a):
pass
@np.testing.dec.setastest(True)
def f_istest(a):
pass
@np.testing.dec.setastest(False)
def f_isnottest(a):
pass
assert_(f_default.__test__)
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
self.assert_deprecated(_test_setastest, num=3)
def test_skip_functions_hardcoded(self):
def _test_skip_functions_hardcoded():
@np.testing.dec.skipif(True)
def f1(x):
raise self.DidntSkipException
try:
f1('a')
except self.DidntSkipException:
raise Exception('Failed to skip')
except SkipTest().__class__:
pass
@np.testing.dec.skipif(False)
def f2(x):
raise self.DidntSkipException
try:
f2('a')
except self.DidntSkipException:
pass
except SkipTest().__class__:
raise Exception('Skipped when not expected to')
self.assert_deprecated(_test_skip_functions_hardcoded, num=2)
def test_skip_functions_callable(self):
def _test_skip_functions_callable():
def skip_tester():
return skip_flag == 'skip me!'
@np.testing.dec.skipif(skip_tester)
def f1(x):
raise self.DidntSkipException
try:
skip_flag = 'skip me!'
f1('a')
except self.DidntSkipException:
raise Exception('Failed to skip')
except SkipTest().__class__:
pass
@np.testing.dec.skipif(skip_tester)
def f2(x):
raise self.DidntSkipException
try:
skip_flag = 'five is right out!'
f2('a')
except self.DidntSkipException:
pass
except SkipTest().__class__:
raise Exception('Skipped when not expected to')
self.assert_deprecated(_test_skip_functions_callable, num=2)
def test_skip_generators_hardcoded(self):
def _test_skip_generators_hardcoded():
@np.testing.dec.knownfailureif(True, "This test is known to fail")
def g1(x):
yield from range(x)
try:
for j in g1(10):
pass
except KnownFailureException().__class__:
pass
else:
raise Exception('Failed to mark as known failure')
@np.testing.dec.knownfailureif(False, "This test is NOT known to fail")
def g2(x):
yield from range(x)
raise self.DidntSkipException('FAIL')
try:
for j in g2(10):
pass
except KnownFailureException().__class__:
raise Exception('Marked incorrectly as known failure')
except self.DidntSkipException:
pass
self.assert_deprecated(_test_skip_generators_hardcoded, num=2)
def test_skip_generators_callable(self):
def _test_skip_generators_callable():
def skip_tester():
return skip_flag == 'skip me!'
@np.testing.dec.knownfailureif(skip_tester, "This test is known to fail")
def g1(x):
yield from range(x)
try:
skip_flag = 'skip me!'
for j in g1(10):
pass
except KnownFailureException().__class__:
pass
else:
raise Exception('Failed to mark as known failure')
@np.testing.dec.knownfailureif(skip_tester, "This test is NOT known to fail")
def g2(x):
yield from range(x)
raise self.DidntSkipException('FAIL')
try:
skip_flag = 'do not skip'
for j in g2(10):
pass
except KnownFailureException().__class__:
raise Exception('Marked incorrectly as known failure')
except self.DidntSkipException:
pass
self.assert_deprecated(_test_skip_generators_callable, num=2)
def test_deprecated(self):
def _test_deprecated():
@np.testing.dec.deprecated(True)
def non_deprecated_func():
pass
@np.testing.dec.deprecated()
def deprecated_func():
import warnings
warnings.warn("TEST: deprecated func", DeprecationWarning, stacklevel=1)
@np.testing.dec.deprecated()
def deprecated_func2():
import warnings
warnings.warn("AHHHH", stacklevel=1)
raise ValueError
@np.testing.dec.deprecated()
def deprecated_func3():
import warnings
warnings.warn("AHHHH", stacklevel=1)
# marked as deprecated, but does not raise DeprecationWarning
assert_raises(AssertionError, non_deprecated_func)
# should be silent
deprecated_func()
with warnings.catch_warnings(record=True):
warnings.simplefilter("always") # do not propagate unrelated warnings
# fails if deprecated decorator just disables test. See #1453.
assert_raises(ValueError, deprecated_func2)
# warning is not a DeprecationWarning
assert_raises(AssertionError, deprecated_func3)
self.assert_deprecated(_test_deprecated, num=4)
def test_parametrize(self):
def _test_parametrize():
# dec.parametrize assumes that it is being run by nose. Because
# we are running under pytest, we need to explicitly check the
# results.
@np.testing.dec.parametrize('base, power, expected',
[(1, 1, 1),
(2, 1, 2),
(2, 2, 4)])
def check_parametrize(base, power, expected):
assert_(base**power == expected)
count = 0
for test in check_parametrize():
test[0](*test[1:])
count += 1
assert_(count == 3)
self.assert_deprecated(_test_parametrize)
class TestSingleElementSignature(_DeprecationTestCase):
# Deprecated 2021-04-01, NumPy 1.21
message = r"The use of a length 1"
def test_deprecated(self):
self.assert_deprecated(lambda: np.add(1, 2, signature="d"))
self.assert_deprecated(lambda: np.add(1, 2, sig=(np.dtype("l"),)))
class TestComparisonBadDType(_DeprecationTestCase):
# Deprecated 2021-04-01, NumPy 1.21
message = r"using `dtype=` in comparisons is only useful for"
def test_deprecated(self):
self.assert_deprecated(lambda: np.equal(1, 1, dtype=np.int64))
# Not an error only for the transition
self.assert_deprecated(lambda: np.equal(1, 1, sig=(None, None, "l")))
def test_not_deprecated(self):
np.equal(True, False, dtype=bool)
np.equal(3, 5, dtype=bool, casting="unsafe")
np.equal([None], [4], dtype=object)
class TestComparisonBadObjectDType(_DeprecationTestCase):
# Deprecated 2021-04-01, NumPy 1.21 (different branch of the above one)
message = r"using `dtype=object` \(or equivalent signature\) will"
warning_cls = FutureWarning
def test_deprecated(self):
self.assert_deprecated(lambda: np.equal(1, 1, dtype=object))
self.assert_deprecated(
lambda: np.equal(1, 1, sig=(None, None, object)))
class TestCtypesGetter(_DeprecationTestCase):
# Deprecated 2021-05-18, Numpy 1.21.0
warning_cls = DeprecationWarning
ctypes = np.array([1]).ctypes
@pytest.mark.parametrize(
"name", ["get_data", "get_shape", "get_strides", "get_as_parameter"]
)
def test_deprecated(self, name: str) -> None:
func = getattr(self.ctypes, name)
self.assert_deprecated(lambda: func())
@pytest.mark.parametrize(
"name", ["data", "shape", "strides", "_as_parameter_"]
)
def test_not_deprecated(self, name: str) -> None:
self.assert_not_deprecated(lambda: getattr(self.ctypes, name))
class TestUFuncForcedDTypeWarning(_DeprecationTestCase):
message = "The `dtype` and `signature` arguments to ufuncs only select the"
def test_not_deprecated(self):
import pickle
# does not warn (test relies on bad pickling behaviour, simply remove
# it if the `assert int64 is not int64_2` should start failing.
int64 = np.dtype("int64")
int64_2 = pickle.loads(pickle.dumps(int64))
assert int64 is not int64_2
self.assert_not_deprecated(lambda: np.add(3, 4, dtype=int64_2))
def test_deprecation(self):
int64 = np.dtype("int64")
self.assert_deprecated(lambda: np.add(3, 5, dtype=int64.newbyteorder()))
self.assert_deprecated(lambda: np.add(3, 5, dtype="m8[ns]"))
def test_behaviour(self):
int64 = np.dtype("int64")
arr = np.arange(10, dtype="m8[s]")
with pytest.warns(DeprecationWarning, match=self.message):
np.add(3, 5, dtype=int64.newbyteorder())
with pytest.warns(DeprecationWarning, match=self.message):
np.add(3, 5, dtype="m8[ns]") # previously used the "ns"
with pytest.warns(DeprecationWarning, match=self.message):
np.add(arr, arr, dtype="m8[ns]") # never preserved the "ns"
with pytest.warns(DeprecationWarning, match=self.message):
np.maximum(arr, arr, dtype="m8[ns]") # previously used the "ns"
with pytest.warns(DeprecationWarning, match=self.message):
np.maximum.reduce(arr, dtype="m8[ns]") # never preserved the "ns"
PARTITION_DICT = {
"partition method": np.arange(10).partition,
"argpartition method": np.arange(10).argpartition,
"partition function": lambda kth: np.partition(np.arange(10), kth),
"argpartition function": lambda kth: np.argpartition(np.arange(10), kth),
}
@pytest.mark.parametrize("func", PARTITION_DICT.values(), ids=PARTITION_DICT)
class TestPartitionBoolIndex(_DeprecationTestCase):
# Deprecated 2021-09-29, NumPy 1.22
warning_cls = DeprecationWarning
message = "Passing booleans as partition index is deprecated"
def test_deprecated(self, func):
self.assert_deprecated(lambda: func(True))
self.assert_deprecated(lambda: func([False, True]))
def test_not_deprecated(self, func):
self.assert_not_deprecated(lambda: func(1))
self.assert_not_deprecated(lambda: func([0, 1]))
class TestMachAr(_DeprecationTestCase):
# Deprecated 2021-10-19, NumPy 1.22
warning_cls = DeprecationWarning
def test_deprecated(self):
self.assert_deprecated(lambda: np.MachAr)
def test_deprecated_module(self):
self.assert_deprecated(lambda: getattr(np.core, "machar"))
def test_deprecated_attr(self):
finfo = np.finfo(float)
self.assert_deprecated(lambda: getattr(finfo, "machar"))
class TestQuantileInterpolationDeprecation(_DeprecationTestCase):
# Deprecated 2021-11-08, NumPy 1.22
@pytest.mark.parametrize("func",
[np.percentile, np.quantile, np.nanpercentile, np.nanquantile])
def test_deprecated(self, func):
self.assert_deprecated(
lambda: func([0., 1.], 0., interpolation="linear"))
self.assert_deprecated(
lambda: func([0., 1.], 0., interpolation="nearest"))
@pytest.mark.parametrize("func",
[np.percentile, np.quantile, np.nanpercentile, np.nanquantile])
def test_both_passed(self, func):
with warnings.catch_warnings():
# catch the DeprecationWarning so that it does not raise:
warnings.simplefilter("always", DeprecationWarning)
with pytest.raises(TypeError):
func([0., 1.], 0., interpolation="nearest", method="nearest")
class TestMemEventHook(_DeprecationTestCase):
# Deprecated 2021-11-18, NumPy 1.23
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
import numpy.core._multiarray_tests as ma_tests
with pytest.warns(DeprecationWarning,
match='PyDataMem_SetEventHook is deprecated'):
ma_tests.test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
break_cycles()
with pytest.warns(DeprecationWarning,
match='PyDataMem_SetEventHook is deprecated'):
ma_tests.test_pydatamem_seteventhook_end()
class TestArrayFinalizeNone(_DeprecationTestCase):
message = "Setting __array_finalize__ = None"
def test_use_none_is_deprecated(self):
# Deprecated way that ndarray itself showed nothing needs finalizing.
class NoFinalize(np.ndarray):
__array_finalize__ = None
self.assert_deprecated(lambda: np.array(1).view(NoFinalize))
class TestAxisNotMAXDIMS(_DeprecationTestCase):
# Deprecated 2022-01-08, NumPy 1.23
message = r"Using `axis=32` \(MAXDIMS\) is deprecated"
def test_deprecated(self):
a = np.zeros((1,)*32)
self.assert_deprecated(lambda: np.repeat(a, 1, axis=np.MAXDIMS))
class TestLoadtxtParseIntsViaFloat(_DeprecationTestCase):
# Deprecated 2022-07-03, NumPy 1.23
# This test can be removed without replacement after the deprecation.
# The tests:
# * numpy/lib/tests/test_loadtxt.py::test_integer_signs
# * lib/tests/test_loadtxt.py::test_implicit_cast_float_to_int_fails
# Have a warning filter that needs to be removed.
message = r"loadtxt\(\): Parsing an integer via a float is deprecated.*"
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
def test_deprecated_warning(self, dtype):
with pytest.warns(DeprecationWarning, match=self.message):
np.loadtxt(["10.5"], dtype=dtype)
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
def test_deprecated_raised(self, dtype):
# The DeprecationWarning is chained when raised, so test manually:
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
try:
np.loadtxt(["10.5"], dtype=dtype)
except ValueError as e:
assert isinstance(e.__cause__, DeprecationWarning)
| 47,590 | Python | 37.348912 | 91 | 0.610044 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_print.py | import sys
import pytest
import numpy as np
from numpy.testing import assert_, assert_equal
from numpy.core.tests._locales import CommaDecimalPointLocale
from io import StringIO
_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
def test_float_types(tp):
""" Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
for x in [0, 1, -1, 1e20]:
assert_equal(str(tp(x)), str(float(x)),
err_msg='Failed str formatting for type %s' % tp)
if tp(1e16).itemsize > 4:
assert_equal(str(tp(1e16)), str(float('1e16')),
err_msg='Failed str formatting for type %s' % tp)
else:
ref = '1e+16'
assert_equal(str(tp(1e16)), ref,
err_msg='Failed str formatting for type %s' % tp)
@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
def test_nan_inf_float(tp):
""" Check formatting of nan & inf.
This is only for the str function, and only for simple types.
The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
for x in [np.inf, -np.inf, np.nan]:
assert_equal(str(tp(x)), _REF[x],
err_msg='Failed str formatting for type %s' % tp)
@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
def test_complex_types(tp):
"""Check formatting of complex types.
This is only for the str function, and only for simple types.
The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
for x in [0, 1, -1, 1e20]:
assert_equal(str(tp(x)), str(complex(x)),
err_msg='Failed str formatting for type %s' % tp)
assert_equal(str(tp(x*1j)), str(complex(x*1j)),
err_msg='Failed str formatting for type %s' % tp)
assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)),
err_msg='Failed str formatting for type %s' % tp)
if tp(1e16).itemsize > 8:
assert_equal(str(tp(1e16)), str(complex(1e16)),
err_msg='Failed str formatting for type %s' % tp)
else:
ref = '(1e+16+0j)'
assert_equal(str(tp(1e16)), ref,
err_msg='Failed str formatting for type %s' % tp)
@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble])
def test_complex_inf_nan(dtype):
"""Check inf/nan formatting of complex types."""
TESTS = {
complex(np.inf, 0): "(inf+0j)",
complex(0, np.inf): "infj",
complex(-np.inf, 0): "(-inf+0j)",
complex(0, -np.inf): "-infj",
complex(np.inf, 1): "(inf+1j)",
complex(1, np.inf): "(1+infj)",
complex(-np.inf, 1): "(-inf+1j)",
complex(1, -np.inf): "(1-infj)",
complex(np.nan, 0): "(nan+0j)",
complex(0, np.nan): "nanj",
complex(-np.nan, 0): "(nan+0j)",
complex(0, -np.nan): "nanj",
complex(np.nan, 1): "(nan+1j)",
complex(1, np.nan): "(1+nanj)",
complex(-np.nan, 1): "(nan+1j)",
complex(1, -np.nan): "(1+nanj)",
}
for c, s in TESTS.items():
assert_equal(str(dtype(c)), s)
# print tests
def _test_redirected_print(x, tp, ref=None):
file = StringIO()
file_tp = StringIO()
stdout = sys.stdout
try:
sys.stdout = file_tp
print(tp(x))
sys.stdout = file
if ref:
print(ref)
else:
print(x)
finally:
sys.stdout = stdout
assert_equal(file.getvalue(), file_tp.getvalue(),
err_msg='print failed for type%s' % tp)
@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
def test_float_type_print(tp):
"""Check formatting when using print """
for x in [0, 1, -1, 1e20]:
_test_redirected_print(float(x), tp)
for x in [np.inf, -np.inf, np.nan]:
_test_redirected_print(float(x), tp, _REF[x])
if tp(1e16).itemsize > 4:
_test_redirected_print(float(1e16), tp)
else:
ref = '1e+16'
_test_redirected_print(float(1e16), tp, ref)
@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
def test_complex_type_print(tp):
"""Check formatting when using print """
# We do not create complex with inf/nan directly because the feature is
# missing in python < 2.6
for x in [0, 1, -1, 1e20]:
_test_redirected_print(complex(x), tp)
if tp(1e16).itemsize > 8:
_test_redirected_print(complex(1e16), tp)
else:
ref = '(1e+16+0j)'
_test_redirected_print(complex(1e16), tp, ref)
_test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)')
_test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)')
_test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)')
def test_scalar_format():
"""Test the str.format method with NumPy scalar types"""
tests = [('{0}', True, np.bool_),
('{0}', False, np.bool_),
('{0:d}', 130, np.uint8),
('{0:d}', 50000, np.uint16),
('{0:d}', 3000000000, np.uint32),
('{0:d}', 15000000000000000000, np.uint64),
('{0:d}', -120, np.int8),
('{0:d}', -30000, np.int16),
('{0:d}', -2000000000, np.int32),
('{0:d}', -7000000000000000000, np.int64),
('{0:g}', 1.5, np.float16),
('{0:g}', 1.5, np.float32),
('{0:g}', 1.5, np.float64),
('{0:g}', 1.5, np.longdouble),
('{0:g}', 1.5+0.5j, np.complex64),
('{0:g}', 1.5+0.5j, np.complex128),
('{0:g}', 1.5+0.5j, np.clongdouble)]
for (fmat, val, valtype) in tests:
try:
assert_equal(fmat.format(val), fmat.format(valtype(val)),
"failed with val %s, type %s" % (val, valtype))
except ValueError as e:
assert_(False,
"format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" %
(fmat, repr(val), repr(valtype), str(e)))
#
# Locale tests: scalar types formatting should be independent of the locale
#
class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
def test_locale_single(self):
assert_equal(str(np.float32(1.2)), str(float(1.2)))
def test_locale_double(self):
assert_equal(str(np.double(1.2)), str(float(1.2)))
def test_locale_longdouble(self):
assert_equal(str(np.longdouble('1.2')), str(float(1.2)))
| 6,737 | Python | 32.522388 | 80 | 0.556628 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_api.py | import sys
import numpy as np
from numpy.core._rational_tests import rational
import pytest
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
HAS_REFCOUNT
)
def test_array_array():
tobj = type(object)
ones11 = np.ones((1, 1), np.float64)
tndarray = type(ones11)
# Test is_ndarray
assert_equal(np.array(ones11, dtype=np.float64), ones11)
if HAS_REFCOUNT:
old_refcount = sys.getrefcount(tndarray)
np.array(ones11)
assert_equal(old_refcount, sys.getrefcount(tndarray))
# test None
assert_equal(np.array(None, dtype=np.float64),
np.array(np.nan, dtype=np.float64))
if HAS_REFCOUNT:
old_refcount = sys.getrefcount(tobj)
np.array(None, dtype=np.float64)
assert_equal(old_refcount, sys.getrefcount(tobj))
# test scalar
assert_equal(np.array(1.0, dtype=np.float64),
np.ones((), dtype=np.float64))
if HAS_REFCOUNT:
old_refcount = sys.getrefcount(np.float64)
np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
assert_equal(old_refcount, sys.getrefcount(np.float64))
# test string
S2 = np.dtype((bytes, 2))
S3 = np.dtype((bytes, 3))
S5 = np.dtype((bytes, 5))
assert_equal(np.array(b"1.0", dtype=np.float64),
np.ones((), dtype=np.float64))
assert_equal(np.array(b"1.0").dtype, S3)
assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3)
assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1."))
assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5))
# test string
U2 = np.dtype((str, 2))
U3 = np.dtype((str, 3))
U5 = np.dtype((str, 5))
assert_equal(np.array("1.0", dtype=np.float64),
np.ones((), dtype=np.float64))
assert_equal(np.array("1.0").dtype, U3)
assert_equal(np.array("1.0", dtype=str).dtype, U3)
assert_equal(np.array("1.0", dtype=U2), np.array(str("1.")))
assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5))
builtins = getattr(__builtins__, '__dict__', __builtins__)
assert_(hasattr(builtins, 'get'))
# test memoryview
dat = np.array(memoryview(b'1.0'), dtype=np.float64)
assert_equal(dat, [49.0, 46.0, 48.0])
assert_(dat.dtype.type is np.float64)
dat = np.array(memoryview(b'1.0'))
assert_equal(dat, [49, 46, 48])
assert_(dat.dtype.type is np.uint8)
# test array interface
a = np.array(100.0, dtype=np.float64)
o = type("o", (object,),
dict(__array_interface__=a.__array_interface__))
assert_equal(np.array(o, dtype=np.float64), a)
# test array_struct interface
a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
dtype=[('f0', int), ('f1', float), ('f2', str)])
o = type("o", (object,),
dict(__array_struct__=a.__array_struct__))
## wasn't what I expected... is np.array(o) supposed to equal a ?
## instead we get a array([...], dtype=">V18")
assert_equal(bytes(np.array(o).data), bytes(a.data))
# test array
o = type("o", (object,),
dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))()
assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
# test recursion
nested = 1.5
for i in range(np.MAXDIMS):
nested = [nested]
# no error
np.array(nested)
# Exceeds recursion limit
assert_raises(ValueError, np.array, [nested], dtype=np.float64)
# Try with lists...
assert_equal(np.array([None] * 10, dtype=np.float64),
np.full((10,), np.nan, dtype=np.float64))
assert_equal(np.array([[None]] * 10, dtype=np.float64),
np.full((10, 1), np.nan, dtype=np.float64))
assert_equal(np.array([[None] * 10], dtype=np.float64),
np.full((1, 10), np.nan, dtype=np.float64))
assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
np.full((10, 10), np.nan, dtype=np.float64))
assert_equal(np.array([1.0] * 10, dtype=np.float64),
np.ones((10,), dtype=np.float64))
assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
np.ones((10, 1), dtype=np.float64))
assert_equal(np.array([[1.0] * 10], dtype=np.float64),
np.ones((1, 10), dtype=np.float64))
assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
np.ones((10, 10), dtype=np.float64))
# Try with tuples
assert_equal(np.array((None,) * 10, dtype=np.float64),
np.full((10,), np.nan, dtype=np.float64))
assert_equal(np.array([(None,)] * 10, dtype=np.float64),
np.full((10, 1), np.nan, dtype=np.float64))
assert_equal(np.array([(None,) * 10], dtype=np.float64),
np.full((1, 10), np.nan, dtype=np.float64))
assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
np.full((10, 10), np.nan, dtype=np.float64))
assert_equal(np.array((1.0,) * 10, dtype=np.float64),
np.ones((10,), dtype=np.float64))
assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
np.ones((10, 1), dtype=np.float64))
assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
np.ones((1, 10), dtype=np.float64))
assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
np.ones((10, 10), dtype=np.float64))
@pytest.mark.parametrize("array", [True, False])
def test_array_impossible_casts(array):
# All builtin types can be forcibly cast, at least theoretically,
# but user dtypes cannot necessarily.
rt = rational(1, 2)
if array:
rt = np.array(rt)
with assert_raises(TypeError):
np.array(rt, dtype="M8")
def test_fastCopyAndTranspose():
# 0D array
a = np.array(2)
b = np.fastCopyAndTranspose(a)
assert_equal(b, a.T)
assert_(b.flags.owndata)
# 1D array
a = np.array([3, 2, 7, 0])
b = np.fastCopyAndTranspose(a)
assert_equal(b, a.T)
assert_(b.flags.owndata)
# 2D array
a = np.arange(6).reshape(2, 3)
b = np.fastCopyAndTranspose(a)
assert_equal(b, a.T)
assert_(b.flags.owndata)
def test_array_astype():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Default behavior: allows unsafe casts, keeps memory layout,
# always copies.
b = a.astype('i4')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('i4'))
assert_equal(a.strides, b.strides)
b = a.T.astype('i4')
assert_equal(a.T, b)
assert_equal(b.dtype, np.dtype('i4'))
assert_equal(a.T.strides, b.strides)
b = a.astype('f4')
assert_equal(a, b)
assert_(not (a is b))
# copy=False parameter can sometimes skip a copy
b = a.astype('f4', copy=False)
assert_(a is b)
# order parameter allows overriding of the memory layout,
# forcing a copy if the layout is wrong
b = a.astype('f4', order='F', copy=False)
assert_equal(a, b)
assert_(not (a is b))
assert_(b.flags.f_contiguous)
b = a.astype('f4', order='C', copy=False)
assert_equal(a, b)
assert_(a is b)
assert_(b.flags.c_contiguous)
# casting parameter allows catching bad casts
b = a.astype('c8', casting='safe')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('c8'))
assert_raises(TypeError, a.astype, 'i4', casting='safe')
# subok=False passes through a non-subclassed array
b = a.astype('f4', subok=0, copy=False)
assert_(a is b)
class MyNDArray(np.ndarray):
pass
a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
# subok=True passes through a subclass
b = a.astype('f4', subok=True, copy=False)
assert_(a is b)
# subok=True is default, and creates a subtype on a cast
b = a.astype('i4', copy=False)
assert_equal(a, b)
assert_equal(type(b), MyNDArray)
# subok=False never returns a subclass
b = a.astype('f4', subok=False, copy=False)
assert_equal(a, b)
assert_(not (a is b))
assert_(type(b) is not MyNDArray)
# Make sure converting from string object to fixed length string
# does not truncate.
a = np.array([b'a'*100], dtype='O')
b = a.astype('S')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('S100'))
a = np.array([u'a'*100], dtype='O')
b = a.astype('U')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('U100'))
# Same test as above but for strings shorter than 64 characters
a = np.array([b'a'*10], dtype='O')
b = a.astype('S')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('S10'))
a = np.array([u'a'*10], dtype='O')
b = a.astype('U')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('U10'))
a = np.array(123456789012345678901234567890, dtype='O').astype('S')
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
a = np.array(123456789012345678901234567890, dtype='O').astype('U')
assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
a = np.array([123456789012345678901234567890], dtype='O').astype('S')
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
a = np.array([123456789012345678901234567890], dtype='O').astype('U')
assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
a = np.array(123456789012345678901234567890, dtype='S')
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
a = np.array(123456789012345678901234567890, dtype='U')
assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
a = np.array(u'a\u0140', dtype='U')
b = np.ndarray(buffer=a, dtype='uint32', shape=2)
assert_(b.size == 2)
a = np.array([1000], dtype='i4')
assert_raises(TypeError, a.astype, 'S1', casting='safe')
a = np.array(1000, dtype='i4')
assert_raises(TypeError, a.astype, 'U1', casting='safe')
@pytest.mark.parametrize("dt", ["S", "U"])
def test_array_astype_to_string_discovery_empty(dt):
# See also gh-19085
arr = np.array([""], dtype=object)
# Note, the itemsize is the `0 -> 1` logic, which should change.
# The important part the test is rather that it does not error.
assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize
# check the same thing for `np.can_cast` (since it accepts arrays)
assert np.can_cast(arr, dt, casting="unsafe")
assert not np.can_cast(arr, dt, casting="same_kind")
# as well as for the object as a descriptor:
assert np.can_cast("O", dt, casting="unsafe")
@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"])
def test_array_astype_to_void(dt):
dt = np.dtype(dt)
arr = np.array([], dtype=dt)
assert arr.astype("V").dtype.itemsize == dt.itemsize
def test_object_array_astype_to_void():
# This is different to `test_array_astype_to_void` as object arrays
# are inspected. The default void is "V8" (8 is the length of double)
arr = np.array([], dtype="O").astype("V")
assert arr.dtype == "V8"
@pytest.mark.parametrize("t",
np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float']
)
def test_array_astype_warning(t):
# test ComplexWarning when casting from complex to float or int
a = np.array(10, dtype=np.complex_)
assert_warns(np.ComplexWarning, a.astype, t)
@pytest.mark.parametrize(["dtype", "out_dtype"],
[(np.bytes_, np.bool_),
(np.unicode_, np.bool_),
(np.dtype("S10,S9"), np.dtype("?,?"))])
def test_string_to_boolean_cast(dtype, out_dtype):
"""
Currently, for `astype` strings are cast to booleans effectively by
calling `bool(int(string)`. This is not consistent (see gh-9875) and
will eventually be deprecated.
"""
arr = np.array(["10", "10\0\0\0", "0\0\0", "0"], dtype=dtype)
expected = np.array([True, True, False, False], dtype=out_dtype)
assert_array_equal(arr.astype(out_dtype), expected)
@pytest.mark.parametrize(["dtype", "out_dtype"],
[(np.bytes_, np.bool_),
(np.unicode_, np.bool_),
(np.dtype("S10,S9"), np.dtype("?,?"))])
def test_string_to_boolean_cast_errors(dtype, out_dtype):
"""
These currently error out, since cast to integers fails, but should not
error out in the future.
"""
for invalid in ["False", "True", "", "\0", "non-empty"]:
arr = np.array([invalid], dtype=dtype)
with assert_raises(ValueError):
arr.astype(out_dtype)
@pytest.mark.parametrize("str_type", [str, bytes, np.str_, np.unicode_])
@pytest.mark.parametrize("scalar_type",
[np.complex64, np.complex128, np.clongdouble])
def test_string_to_complex_cast(str_type, scalar_type):
value = scalar_type(b"1+3j")
assert scalar_type(value) == 1+3j
assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j
assert np.array(value).astype(scalar_type)[()] == 1+3j
arr = np.zeros(1, dtype=scalar_type)
arr[0] = value
assert arr[0] == 1+3j
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
def test_none_to_nan_cast(dtype):
# Note that at the time of writing this test, the scalar constructors
# reject None
arr = np.zeros(1, dtype=dtype)
arr[0] = None
assert np.isnan(arr)[0]
assert np.isnan(np.array(None, dtype=dtype))[()]
assert np.isnan(np.array([None], dtype=dtype))[0]
assert np.isnan(np.array(None).astype(dtype))[()]
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Simple copy
np.copyto(a, 1.5)
assert_equal(a, 1.5)
np.copyto(a.T, 2.5)
assert_equal(a, 2.5)
# Where-masked copy
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
np.copyto(a, 3.5, where=mask)
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
np.copyto(a.T, 4.5, where=mask)
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
def test_copyto():
a = np.arange(6, dtype='i4').reshape(2, 3)
# Simple copy
np.copyto(a, [[3, 1, 5], [6, 2, 1]])
assert_equal(a, [[3, 1, 5], [6, 2, 1]])
# Overlapping copy should work
np.copyto(a[:, :2], a[::-1, 1::-1])
assert_equal(a, [[2, 6, 5], [1, 3, 1]])
# Defaults to 'same_kind' casting
assert_raises(TypeError, np.copyto, a, 1.5)
# Force a copy with 'unsafe' casting, truncating 1.5 to 1
np.copyto(a, 1.5, casting='unsafe')
assert_equal(a, 1)
# Copying with a mask
np.copyto(a, 3, where=[True, False, True])
assert_equal(a, [[3, 1, 3], [3, 1, 3]])
# Casting rule still applies with a mask
assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
# Lists of integer 0's and 1's is ok too
np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
assert_equal(a, [[3, 4, 4], [4, 1, 3]])
# Overlapping copy with mask should work
np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
assert_equal(a, [[3, 4, 4], [4, 3, 3]])
# 'dst' must be an array
assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
def test_copyto_permut():
# test explicit overflow case
pad = 500
l = [True] * pad + [True, True, True, True]
r = np.zeros(len(l)-pad)
d = np.ones(len(l)-pad)
mask = np.array(l)[pad:]
np.copyto(r, d, where=mask[::-1])
# test all permutation of possible masks, 9 should be sufficient for
# current 4 byte unrolled code
power = 9
d = np.ones(power)
for i in range(2**power):
r = np.zeros(power)
l = [(i & x) != 0 for x in range(power)]
mask = np.array(l)
np.copyto(r, d, where=mask)
assert_array_equal(r == 1, l)
assert_equal(r.sum(), sum(l))
r = np.zeros(power)
np.copyto(r, d, where=mask[::-1])
assert_array_equal(r == 1, l[::-1])
assert_equal(r.sum(), sum(l))
r = np.zeros(power)
np.copyto(r[::2], d[::2], where=mask[::2])
assert_array_equal(r[::2] == 1, l[::2])
assert_equal(r[::2].sum(), sum(l[::2]))
r = np.zeros(power)
np.copyto(r[::2], d[::2], where=mask[::-2])
assert_array_equal(r[::2] == 1, l[::-2])
assert_equal(r[::2].sum(), sum(l[::-2]))
for c in [0xFF, 0x7F, 0x02, 0x10]:
r = np.zeros(power)
mask = np.array(l)
imask = np.array(l).view(np.uint8)
imask[mask != 0] = c
np.copyto(r, d, where=mask)
assert_array_equal(r == 1, l)
assert_equal(r.sum(), sum(l))
r = np.zeros(power)
np.copyto(r, d, where=True)
assert_equal(r.sum(), r.size)
r = np.ones(power)
d = np.zeros(power)
np.copyto(r, d, where=False)
assert_equal(r.sum(), r.size)
def test_copy_order():
a = np.arange(24).reshape(2, 1, 3, 4)
b = a.copy(order='F')
c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3)
def check_copy_result(x, y, ccontig, fcontig, strides=False):
assert_(not (x is y))
assert_equal(x, y)
assert_equal(res.flags.c_contiguous, ccontig)
assert_equal(res.flags.f_contiguous, fcontig)
# Validate the initial state of a, b, and c
assert_(a.flags.c_contiguous)
assert_(not a.flags.f_contiguous)
assert_(not b.flags.c_contiguous)
assert_(b.flags.f_contiguous)
assert_(not c.flags.c_contiguous)
assert_(not c.flags.f_contiguous)
# Copy with order='C'
res = a.copy(order='C')
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
res = b.copy(order='C')
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
res = c.copy(order='C')
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
res = np.copy(a, order='C')
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
res = np.copy(b, order='C')
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
res = np.copy(c, order='C')
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
# Copy with order='F'
res = a.copy(order='F')
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
res = b.copy(order='F')
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
res = c.copy(order='F')
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
res = np.copy(a, order='F')
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
res = np.copy(b, order='F')
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
res = np.copy(c, order='F')
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
# Copy with order='K'
res = a.copy(order='K')
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
res = b.copy(order='K')
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
res = c.copy(order='K')
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
res = np.copy(a, order='K')
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
res = np.copy(b, order='K')
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
res = np.copy(c, order='K')
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
def test_contiguous_flags():
a = np.ones((4, 4, 1))[::2,:,:]
a.strides = a.strides[:2] + (-123,)
b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
def check_contig(a, ccontig, fcontig):
assert_(a.flags.c_contiguous == ccontig)
assert_(a.flags.f_contiguous == fcontig)
# Check if new arrays are correct:
check_contig(a, False, False)
check_contig(b, False, False)
check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
check_contig(np.array([[[1], [2]]], order='F'), True, True)
check_contig(np.empty((2, 2)), True, False)
check_contig(np.empty((2, 2), order='F'), False, True)
# Check that np.array creates correct contiguous flags:
check_contig(np.array(a, copy=False), False, False)
check_contig(np.array(a, copy=False, order='C'), True, False)
check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
# Check slicing update of flags and :
check_contig(a[0], True, True)
check_contig(a[None, ::4, ..., None], True, True)
check_contig(b[0, 0, ...], False, True)
check_contig(b[:, :, 0:0, :, :], True, True)
# Test ravel and squeeze.
check_contig(a.ravel(), True, True)
check_contig(np.ones((1, 3, 1)).squeeze(), True, True)
def test_broadcast_arrays():
# Test user defined dtypes
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
result = np.broadcast_arrays(a, b)
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
@pytest.mark.parametrize(["shape", "fill_value", "expected_output"],
[((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])),
((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))])
def test_full_from_list(shape, fill_value, expected_output):
output = np.full(shape, fill_value)
assert_equal(output, expected_output)
def test_astype_copyflag():
# test the various copyflag options
arr = np.arange(10, dtype=np.intp)
res_true = arr.astype(np.intp, copy=True)
assert not np.may_share_memory(arr, res_true)
res_always = arr.astype(np.intp, copy=np._CopyMode.ALWAYS)
assert not np.may_share_memory(arr, res_always)
res_false = arr.astype(np.intp, copy=False)
# `res_false is arr` currently, but check `may_share_memory`.
assert np.may_share_memory(arr, res_false)
res_if_needed = arr.astype(np.intp, copy=np._CopyMode.IF_NEEDED)
# `res_if_needed is arr` currently, but check `may_share_memory`.
assert np.may_share_memory(arr, res_if_needed)
res_never = arr.astype(np.intp, copy=np._CopyMode.NEVER)
assert np.may_share_memory(arr, res_never)
# Simple tests for when a copy is necessary:
res_false = arr.astype(np.float64, copy=False)
assert_array_equal(res_false, arr)
res_if_needed = arr.astype(np.float64,
copy=np._CopyMode.IF_NEEDED)
assert_array_equal(res_if_needed, arr)
assert_raises(ValueError, arr.astype, np.float64,
copy=np._CopyMode.NEVER)
| 22,474 | Python | 36.026359 | 90 | 0.597312 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_memmap.py | import sys
import os
import mmap
import pytest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryFile
from numpy import (
memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
from numpy import arange, allclose, asarray
from numpy.testing import (
assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY,
break_cycles
)
class TestMemmap:
def setup_method(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.shape = (3, 4)
self.dtype = 'float32'
self.data = arange(12, dtype=self.dtype)
self.data.resize(self.shape)
def teardown_method(self):
self.tmpfp.close()
self.data = None
if IS_PYPY:
break_cycles()
break_cycles()
def test_roundtrip(self):
# Write data to file
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp # Test __del__ machinery, which handles cleanup
# Read data back from file
newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
shape=self.shape)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)
assert_equal(newfp.flags.writeable, False)
def test_open_with_filename(self, tmp_path):
tmpname = tmp_path / 'mmap'
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp
def test_unnamed_file(self):
with TemporaryFile() as f:
fp = memmap(f, dtype=self.dtype, shape=self.shape)
del fp
def test_attributes(self):
offset = 1
mode = "w+"
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
shape=self.shape, offset=offset)
assert_equal(offset, fp.offset)
assert_equal(mode, fp.mode)
del fp
def test_filename(self, tmp_path):
tmpname = tmp_path / "mmap"
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
abspath = Path(os.path.abspath(tmpname))
fp[:] = self.data[:]
assert_equal(abspath, fp.filename)
b = fp[:1]
assert_equal(abspath, b.filename)
del b
del fp
def test_path(self, tmp_path):
tmpname = tmp_path / "mmap"
fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
shape=self.shape)
# os.path.realpath does not resolve symlinks on Windows
# see: https://bugs.python.org/issue9949
# use Path.resolve, just as memmap class does internally
abspath = str(Path(tmpname).resolve())
fp[:] = self.data[:]
assert_equal(abspath, str(fp.filename.resolve()))
b = fp[:1]
assert_equal(abspath, str(b.filename.resolve()))
del b
del fp
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
assert_equal(fp.filename, self.tmpfp.name)
@pytest.mark.skipif(sys.platform == 'gnu0',
reason="Known to fail on hurd")
def test_flush(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
assert_equal(fp[0], self.data[0])
fp.flush()
def test_del(self):
# Make sure a view does not delete the underlying mmap
fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp_base[0] = 5
fp_view = fp_base[0:1]
assert_equal(fp_view[0], 5)
del fp_view
# Should still be able to access and assign values after
# deleting the view
assert_equal(fp_base[0], 5)
fp_base[0] = 6
assert_equal(fp_base[0], 6)
def test_arithmetic_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = (fp + 10)
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = fp[(1, 2), (2, 3)]
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
def test_slicing_keeps_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
assert_(fp[:2, :2]._mmap is fp._mmap)
def test_view(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
new1 = fp.view()
new2 = new1.view()
assert_(new1.base is fp)
assert_(new2.base is fp)
new_array = asarray(fp)
assert_(new_array.base is fp)
def test_ufunc_return_ndarray(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
with suppress_warnings() as sup:
sup.filter(FutureWarning, "np.average currently does not preserve")
for unary_op in [sum, average, product]:
result = unary_op(fp)
assert_(isscalar(result))
assert_(result.__class__ is self.data[0, 0].__class__)
assert_(unary_op(fp, axis=0).__class__ is ndarray)
assert_(unary_op(fp, axis=1).__class__ is ndarray)
for binary_op in [add, subtract, multiply]:
assert_(binary_op(fp, self.data).__class__ is ndarray)
assert_(binary_op(self.data, fp).__class__ is ndarray)
assert_(binary_op(fp, fp).__class__ is ndarray)
fp += 1
assert(fp.__class__ is memmap)
add(fp, 1, out=fp)
assert(fp.__class__ is memmap)
def test_getitem(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
assert_(fp[1:, :-1].__class__ is memmap)
# Fancy indexing returns a copy that is not memmapped
assert_(fp[[0, 1]].__class__ is ndarray)
def test_memmap_subclass(self):
class MemmapSubClass(memmap):
pass
fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
# We keep previous behavior for subclasses of memmap, i.e. the
# ufunc and __getitem__ output is never turned into a ndarray
assert_(sum(fp, axis=0).__class__ is MemmapSubClass)
assert_(sum(fp).__class__ is MemmapSubClass)
assert_(fp[1:, :-1].__class__ is MemmapSubClass)
assert(fp[[0, 1]].__class__ is MemmapSubClass)
def test_mmap_offset_greater_than_allocation_granularity(self):
size = 5 * mmap.ALLOCATIONGRANULARITY
offset = mmap.ALLOCATIONGRANULARITY + 1
fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
assert_(fp.offset == offset)
def test_no_shape(self):
self.tmpfp.write(b'a'*16)
mm = memmap(self.tmpfp, dtype='float64')
assert_equal(mm.shape, (2,))
def test_empty_array(self):
# gh-12653
with pytest.raises(ValueError, match='empty file'):
memmap(self.tmpfp, shape=(0,4), mode='w+')
self.tmpfp.write(b'\0')
# ok now the file is not empty
memmap(self.tmpfp, shape=(0,4), mode='w+')
| 7,483 | Python | 33.648148 | 79 | 0.567419 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_regression.py | import copy
import sys
import gc
import tempfile
import pytest
from os import path
from io import BytesIO
from itertools import chain
import numpy as np
from numpy.testing import (
assert_, assert_equal, IS_PYPY, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_raises,
assert_raises_regex, assert_warns, suppress_warnings,
_assert_valid_refcount, HAS_REFCOUNT, IS_PYSTON
)
from numpy.testing._private.utils import _no_tracing, requires_memory
from numpy.compat import asbytes, asunicode, pickle
class TestRegression:
def test_invalid_round(self):
# Ticket #3
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
def test_mem_empty(self):
# Ticket #7
np.empty((1,), dtype=[('x', np.int64)])
def test_pickle_transposed(self):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
with BytesIO() as f:
pickle.dump(a, f, protocol=proto)
f.seek(0)
b = pickle.load(f)
assert_array_equal(a, b)
def test_dtype_names(self):
# Ticket #35
# Should succeed
np.dtype([(('name', 'label'), np.int32, 3)])
def test_reduce(self):
# Ticket #40
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
def test_zeros_order(self):
# Ticket #43
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_asarray_with_order(self):
# Check that nothing is done when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
def test_ravel_with_order(self):
# Check that ravel works when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
def test_sort_bigendian(self):
# Ticket #47
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self):
# Ticket #49
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self):
# Ticket #50
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
with BytesIO() as f:
pickle.dump(ca, f, protocol=proto)
f.seek(0)
ca = np.load(f, allow_pickle=True)
def test_noncontiguous_fill(self):
# Ticket #58.
a = np.zeros((5, 3))
b = a[:, :2,]
def rs():
b.shape = (10,)
assert_raises(AttributeError, rs)
def test_bool(self):
# Ticket #60
np.bool_(1) # Should succeed
def test_indexing1(self):
# Ticket #64
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self):
# Ticket #65
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self):
# Ticket #67
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
assert_(a[1] == 'auto')
assert_(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
with suppress_warnings() as sup:
sup.filter(FutureWarning)
assert_(b != 'auto')
assert_(b[0] != 'auto')
def test_unicode_swapping(self):
# Ticket #79
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua.newbyteorder() # Should succeed.
def test_object_array_fill(self):
# Ticket #86
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self):
# Ticket #93
assert_raises(TypeError, np.dtype,
{'names':['a'], 'formats':['foo']}, align=1)
def test_endian_bool_indexing(self):
# Ticket #105
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
xa = np.where((a > 2) & (a < 6))
xb = np.where((b > 2) & (b < 6))
ya = ((a > 2) & (a < 6))
yb = ((b > 2) & (b < 6))
assert_array_almost_equal(xa, ya.nonzero())
assert_array_almost_equal(xb, yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_endian_where(self):
# GitHub issue #369
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
max_net = net.max()
test = np.where(net <= 0., max_net, net)
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
def test_endian_recarray(self):
# Ticket #2185
dt = np.dtype([
('head', '>u4'),
('data', '>u4', 2),
])
buf = np.recarray(1, dtype=dt)
buf[0]['head'] = 1
buf[0]['data'][:] = [1, 1]
h = buf[0]['head']
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
def test_mem_dot(self):
# Ticket #106
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
# Dummy array to detect bad memory access:
_z = np.ones(10)
_dummy = np.empty((0, 10))
z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
np.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
# Do the same for the built-in dot:
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
def test_arange_endian(self):
# Ticket #111
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
assert_array_equal(ref, x)
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
def test_arange_inf_step(self):
ref = np.arange(0, 1, 10)
x = np.arange(0, 1, np.inf)
assert_array_equal(ref, x)
ref = np.arange(0, 1, -10)
x = np.arange(0, 1, -np.inf)
assert_array_equal(ref, x)
ref = np.arange(0, -1, -10)
x = np.arange(0, -1, -np.inf)
assert_array_equal(ref, x)
ref = np.arange(0, -1, 10)
x = np.arange(0, -1, np.inf)
assert_array_equal(ref, x)
def test_arange_underflow_stop_and_step(self):
finfo = np.finfo(np.float64)
ref = np.arange(0, finfo.eps, 2 * finfo.eps)
x = np.arange(0, finfo.eps, finfo.max)
assert_array_equal(ref, x)
ref = np.arange(0, finfo.eps, -2 * finfo.eps)
x = np.arange(0, finfo.eps, -finfo.max)
assert_array_equal(ref, x)
ref = np.arange(0, -finfo.eps, -2 * finfo.eps)
x = np.arange(0, -finfo.eps, -finfo.max)
assert_array_equal(ref, x)
ref = np.arange(0, -finfo.eps, 2 * finfo.eps)
x = np.arange(0, -finfo.eps, finfo.max)
assert_array_equal(ref, x)
def test_argmax(self):
# Ticket #119
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
a.argmax(i) # Should succeed
def test_mem_divmod(self):
# Ticket #126
for i in range(10):
divmod(np.array([i])[0], 10)
def test_hstack_invalid_dims(self):
# Ticket #128
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
assert_raises(ValueError, np.hstack, (x, y))
def test_squeeze_type(self):
# Ticket #133
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self):
# Ticket #143
assert_equal(0, np.add.identity)
def test_numpy_float_python_long_addition(self):
# Check that numpy float and python longs can be added correctly.
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
def test_binary_repr_0(self):
# Ticket #151
assert_equal('0', np.binary_repr(0))
def test_rec_iterate(self):
# Ticket #160
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
(2, 2.2, '2.0')], dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self):
# Ticket #190
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
def test_tobytes_FORTRANORDER_discontiguous(self):
# Fix in r2836
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.frombuffer(x.tobytes()))
def test_flat_assignment(self):
# Correct behaviour of ticket #194
x = np.empty((3, 1))
x.flat = np.arange(3)
assert_array_almost_equal(x, [[0], [1], [2]])
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
def test_broadcast_flat_assignment(self):
# Ticket #194
x = np.empty((3, 1))
def bfa():
x[:] = np.arange(3)
def bfb():
x[:] = np.arange(3, dtype=float)
assert_raises(ValueError, bfa)
assert_raises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
# and equivalent unsafe casted array assignment
a = np.arange(10)
b = np.ones(10, dtype=bool)
r = np.arange(10)
def assign(a, b, c):
a[b] = c
assert_raises(ValueError, assign, a, b, np.nan)
a[b] = np.array(np.nan) # but not this.
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
def test_unpickle_dtype_with_object(self):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
with BytesIO() as f:
pickle.dump(dt, f, protocol=proto)
f.seek(0)
dt_ = pickle.load(f)
assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self):
# Ticket #196
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
assert_raises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
def test_recarray_single_element(self):
# Ticket #202
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])
assert_array_equal(a, b)
assert_equal(a, r[0][0])
def test_zero_sized_array_indexing(self):
# Ticket #205
tmp = np.array([])
def index_tmp():
tmp[np.array(10)]
assert_raises(IndexError, index_tmp)
def test_chararray_rstrip(self):
# Ticket #222
x = np.chararray((1,), 5)
x[0] = b'a '
x = x.rstrip()
assert_equal(x[0], b'a')
def test_object_array_shape(self):
# Ticket #239
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))
assert_equal(np.array([], dtype=object).shape, (0,))
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
def test_mem_around(self):
# Ticket #243
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
def test_character_array_strip(self):
# Ticket #246
x = np.char.array(("x", "x ", "x "))
for c in x:
assert_equal(c, "x")
def test_lexsort(self):
# Lexsort memory error
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
def test_lexsort_invalid_sequence(self):
# Issue gh-4123
class BuggySequence:
def __len__(self):
return 4
def __getitem__(self, key):
raise KeyError
assert_raises(KeyError, np.lexsort, BuggySequence())
def test_lexsort_zerolen_custom_strides(self):
# Ticket #14228
xs = np.array([], dtype='i8')
assert np.lexsort((xs,)).shape[0] == 0 # Works
xs.strides = (16,)
assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError
def test_lexsort_zerolen_custom_strides_2d(self):
xs = np.array([], dtype='i8')
xs.shape = (0, 2)
xs.strides = (16, 16)
assert np.lexsort((xs,), axis=0).shape[0] == 0
xs.shape = (2, 0)
xs.strides = (16, 16)
assert np.lexsort((xs,), axis=0).shape[0] == 2
def test_lexsort_invalid_axis(self):
assert_raises(np.AxisError, np.lexsort, (np.arange(1),), axis=2)
assert_raises(np.AxisError, np.lexsort, (np.array([]),), axis=1)
assert_raises(np.AxisError, np.lexsort, (np.array(1),), axis=10)
def test_lexsort_zerolen_element(self):
dt = np.dtype([]) # a void dtype with no fields
xs = np.empty(4, dt)
assert np.lexsort((xs,)).shape[0] == xs.shape[0]
def test_pickle_py2_bytes_encoding(self):
# Check that arrays and scalars pickled on Py2 are
# unpickleable on Py3 using encoding='bytes'
test_data = [
# (original, py2_pickle)
(np.unicode_('\u6f2c'),
b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
b"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n."),
(np.array([9e123], dtype=np.float64),
b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
b"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
b"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
b"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb."),
(np.array([(9e123,)], dtype=[('name', float)]),
b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
b"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
b"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
b"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
b"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
b"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb."),
]
for original, data in test_data:
result = pickle.loads(data, encoding='bytes')
assert_equal(result, original)
if isinstance(result, np.ndarray) and result.dtype.names is not None:
for name in result.dtype.names:
assert_(isinstance(name, str))
def test_pickle_dtype(self):
# Ticket #251
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
pickle.dumps(float, protocol=proto)
def test_swap_real(self):
# Ticket #265
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
def test_object_array_from_list(self):
# Ticket #270 (gh-868)
assert_(np.array([1, None, 'A']).shape == (3,))
def test_multiple_assign(self):
# Ticket #273
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
def test_empty_array_type(self):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8, 7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
res1 = arr
if res1.dtype.kind in 'uib':
assert_((res1 == res2).all(), func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8, 7)
arr2 = np.random.rand(8, 7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self):
# Ticket #298
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
def test_fancy_index(self):
# Ticket #302
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
def test_recarray_copy(self):
# Ticket #312
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self):
# Ticket #322
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
def test_ndmin_float64(self):
# Ticket #324
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
def test_ndmin_order(self):
# Issue #465 and related checks
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
def test_mem_axis_minimization(self):
# Ticket #327
data = np.arange(5)
data = np.add.outer(data, data)
def test_mem_float_imag(self):
# Ticket #330
np.float64(1.0).imag
def test_dtype_tuple(self):
# Ticket #334
assert_(np.dtype('i4') == np.dtype(('i4', ())))
def test_dtype_posttuple(self):
# Ticket #335
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self):
# Ticket #341
assert_equal(np.array(['X'], 'c'), b'X')
def test_string_array_size(self):
# Ticket #342
assert_raises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
def test_dtype_repr(self):
# Ticket #344
dt1 = np.dtype(('uint32', 2))
dt2 = np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self):
# Make sure reshape order works.
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
def test_reshape_zero_strides(self):
# Issue #380, test reshaping of zero strided arrays
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
def test_reshape_zero_size(self):
# GitHub Issue #2700, setting shape failed for 0-sized arrays
a = np.ones((0, 2))
a.shape = (-1, 2)
# Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides.
# With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous.
@pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
reason="Using relaxed stride debug")
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
strides_c = (16, 8, 8, 8)
strides_f = (8, 24, 48, 48)
assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
def test_repeat_discont(self):
# Ticket #352
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
def test_array_index(self):
# Make sure optimization is not called in this case.
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
def test_object_argmax(self):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self):
# Ticket #372
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)]),
np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
def test_random_shuffle(self):
# Ticket #374
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
def test_refcount_vdot(self):
# Changeset #3443
_assert_valid_refcount(np.vdot)
def test_startswith(self):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
def test_noncommutative_reduce_accumulate(self):
# Ticket #413
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self):
# Convolve should raise an error for empty input array.
assert_raises(ValueError, np.convolve, [], [1])
assert_raises(ValueError, np.convolve, [1], [])
def test_multidim_byteswap(self):
# Ticket #449
r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
def test_string_NULL(self):
# Changeset 3557
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self):
# Ticket #483
r = np.array([[b'abc']], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == b'abc')
def test_take_output(self):
# Ensure that 'take' honours output parameter.
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
b = np.zeros_like(a)
np.take(x, [0, 2], axis=1, out=b)
assert_array_equal(a, b)
def test_take_object_fail(self):
# Issue gh-3001
d = 123.
a = np.array([d, 1], dtype=object)
if HAS_REFCOUNT:
ref_d = sys.getrefcount(d)
try:
a.take([0, 100])
except IndexError:
pass
if HAS_REFCOUNT:
assert_(ref_d == sys.getrefcount(d))
def test_array_str_64bit(self):
# Ticket #501
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
np.array_str(s) # Should succeed
def test_frompyfunc_endian(self):
# Ticket #503
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self):
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s))
def test_arr_transpose(self):
# Ticket #516
x = np.random.rand(*(2,)*16)
x.transpose(list(range(16))) # Should succeed
def test_string_mergesort(self):
# Ticket #540
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self):
# Ticket #546
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self):
# Ticket #555
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self):
# Ticket #562
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
def test_dot_negative_stride(self):
# Ticket #588
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
def test_object_casting(self):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484, 286])
y = np.zeros([484, 286])
x |= y
assert_raises(TypeError, rs)
def test_unicode_scalar(self):
# Ticket #600
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
new = pickle.loads(pickle.dumps(el, protocol=proto))
assert_equal(new, el)
def test_arange_non_native_dtype(self):
# Ticket #616
for T in ('>f4', '<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0, dtype=dt).dtype, dt)
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
def test_bool_flat_indexing_invalid_nr_elements(self):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v):
x[(s > 0)] = v
assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))
assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
def test_mem_scalar_indexing(self):
# Ticket #603
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
def test_binary_repr_0_width(self):
assert_equal(np.binary_repr(0, width=3), '000')
def test_fromstring(self):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
def test_searchsorted_variable_length(self):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
def test_string_argsort_with_zeros(self):
# Check argsort for strings containing zeros.
x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
def test_string_sort_with_zeros(self):
# Check sort for strings containing zeros.
x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
def test_flat_byteorder(self):
# Ticket #657
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
def test_sign_bit(self):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[0. 0. 0.]')
def test_flat_index_byteswap(self):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
# Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides.
# With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
reason="Using relaxed stride debug")
def test_copy_detection_corner_case2(self):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self):
# Ticket #633
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1-d broadcast slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1-d -> 2-d broadcast slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:, :] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:, 0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2-d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:, 0] = a
arr[:, 1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:, 0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
tmp = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
tmp = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
tmp = arr1.take([1, 2, 3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0], [1], [0], [1], [1]], int)
tmp = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
del tmp # Avoid pyflakes unused variable warning
def test_mem_custom_float_to_array(self):
# Ticket 702
class MyFloat:
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp.astype(float) # Should succeed
def test_object_array_refcount_self_assign(self):
# Ticket #711
class VictimObject:
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self):
x = [1, 2, 3]
assert_raises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self):
# Ticket #713
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self):
# Ticket #714
np.zeros(10)[np.array(0)]
def test_nonnative_endian_fill(self):
# Non-native endian arrays were incorrectly filled with scalars
# before r5034.
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self):
# Test for ticket #551, changeset r5140
x = np.zeros((30, 40))
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
y = pickle.loads(pickle.dumps(x, protocol=proto))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self):
# Ticket #788, changeset r5155
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
with open(filename, 'rb') as f:
xp = pickle.load(f, encoding='latin1')
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self):
# Ticket #789, changeset 5217.
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype=np.single)
try:
a.compress([True, False], axis=1, out=b)
raise AssertionError("compress with an out which cannot be "
"safely casted should not return "
"successfully")
except TypeError:
pass
def test_attributes(self):
# Ticket #791
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
assert_(dat.info == 'jubba')
dat.resize((4, 2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2, 3, 4], [6, 3, 4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32, 0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2, 7).info == 'jubba')
assert_(dat.compress([0, 1, 1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0], 'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32, 0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2, 4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0, 1).info == 'jubba')
assert_(dat.take([2, 3, 5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
# These methods do not preserve subclasses
assert_(type(dat.nonzero()[0]) is np.ndarray)
assert_(type(dat.nonzero()[1]) is np.ndarray)
def test_recarray_tolist(self):
# Ticket #793, changeset r5215
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_nonscalar_item_method(self):
# Make sure that .item() fails graciously when it should
a = np.arange(5)
assert_raises(ValueError, a.item)
def test_char_array_creation(self):
a = np.array('123', dtype='c')
b = np.array([b'1', b'2', b'3'])
assert_equal(a, b)
def test_unaligned_unicode_access(self):
# Ticket #825
for i in range(1, 9):
msg = 'unicode offset: %d chars' % i
t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])
x = np.array([(b'a', u'b')], dtype=t)
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
def test_sign_for_complex_nan(self):
# Ticket 794.
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self):
# Ticket #674
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self):
# Ticket #816
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4, 5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
# Ticket #840 - order should be an invalid keyword.
x = np.array([[0, 1], [2, 3]])
assert_raises(TypeError, x.resize, (2, 2), order='C')
def test_for_zero_length_in_choose(self):
"Ticket #882"
a = np.array(1)
assert_raises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
assert_raises(ValueError, lambda: np.array([1], ndmin=33))
def test_void_scalar_with_titles(self):
# No ticket
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_void_scalar_constructor(self):
#Issue #1550
#Create test string data, construct void scalar from data and assert
#that void scalar contains original data.
test_string = np.array("test")
test_string_void_scalar = np.core.multiarray.scalar(
np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
#Create record scalar, construct from data and assert that
#reconstructed scalar is correct.
test_record = np.ones((), "i,i")
test_record_void_scalar = np.core.multiarray.scalar(
test_record.dtype, test_record.tobytes())
assert_(test_record_void_scalar == test_record)
# Test pickle and unpickle of void and record scalars
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_(pickle.loads(
pickle.dumps(test_string, protocol=proto)) == test_string)
assert_(pickle.loads(
pickle.dumps(test_record, protocol=proto)) == test_record)
@_no_tracing
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
if IS_PYPY:
x.resize((m, 0), refcheck=False)
else:
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
if IS_PYPY:
y.resize((0, n), refcheck=False)
else:
y.resize((0, n))
# `dot` should just return zero (m, n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
# Regression test for #1061.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
with assert_raises_regex(ValueError,
'Maximum allowed dimension exceeded'):
np.empty(sz)
def test_huge_arange(self):
# Regression test for #1062.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
with assert_raises_regex(ValueError,
'Maximum allowed size exceeded'):
np.arange(sz)
assert_(np.size == sz)
def test_fromiter_bytes(self):
# Ticket #1058
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
# Ticket #1078: segfaults when creating an array with a sequence of
# 0d arrays.
a = np.array((np.ones(2), np.array(2)), dtype=object)
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)), dtype=object)
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
# Ticket #1081: weird array with strange input...
t = np.array([np.array([]), np.array(0, object)], dtype=object)
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
# Ticket #1080.
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
# Ticket #1106.
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(TypeError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
# Ticket #1217, don't segfault.
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=int))
def test_object_array_to_fixed_string(self):
# Ticket #1235.
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
# Ticket #1240.
a = np.array([[u'abc', u'\u03a3'],
[u'asdf', u'erw']],
dtype='U')
assert_raises(UnicodeEncodeError, np.array, a, 'S4')
def test_unicode_to_string_cast_error(self):
# gh-15790
a = np.array([u'\x80'] * 129, dtype='U3')
assert_raises(UnicodeEncodeError, np.array, a, 'S')
b = a.reshape(3, 43)[:-1, :-1]
assert_raises(UnicodeEncodeError, np.array, b, 'S')
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
assert_(a.itemsize == 16)
a = np.array([u'123', '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', u'123', '12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'1234'])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
# Ticket #1198 and #1267
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
# Ticket #1267
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
# Ticket #1267
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
# Ticket #1259 and gh-441
for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
z = np.array([2.2-1.1j], dtype)
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder()))
else:
# big-endian machine
assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype))
# double check real and imaginary parts:
assert_equal(x.real, y.real.byteswap())
assert_equal(x.imag, y.imag.byteswap())
def test_structured_arrays_with_objects1(self):
# Ticket #1299
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_structured_arrays_with_objects2(self):
# Ticket #1299 second test
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
# Ticket #1254
dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
assert_raises(ValueError, np.dtype, dtspec)
def test_signed_integer_division_overflow(self):
# Ticket #1317.
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
with np.errstate(over="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, int):
test_type(t)
def test_buffer_hashlib(self):
from hashlib import sha256
x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
assert_equal(sha256(x).hexdigest(), '4636993d3e1da4e9d6b8f87b79e8f7c6d018580d52661950eabc3845c5897a4d')
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
with assert_warns(DeprecationWarning):
np.fromstring(b'aa, aa, 1.0', sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.sctypeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], np.bool_) # not x[0] because it is unordered
failures = []
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls, i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1, 2, 3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=float)
a.shape = (4, 4)
lut = np.ones((5 + 3, 4), float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tobytes())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, b"\x01\x02\x03")
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning)
assert_equal(float(x), float(x.real))
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_complex_boolean_cast(self):
# Ticket #2218
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
assert_(np.any(x))
assert_(np.all(x[1:]))
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([], ['?', '?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
# The second use of itemsize was throwing an exception because in
# ctors.c, discover_itemsize was calling PyObject_Length without
# checking the return code. This failed to get the length of the
# number 2, and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_squeeze_contiguous(self):
# Similar to GitHub issue #387
a = np.zeros((1, 2)).squeeze()
b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze()
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
def test_squeeze_axis_handling(self):
# Issue #10779
# Ensure proper handling of objects
# that don't support axis specification
# when squeezing
class OldSqueeze(np.ndarray):
def __new__(cls,
input_array):
obj = np.asarray(input_array).view(cls)
return obj
# it is perfectly reasonable that prior
# to numpy version 1.7.0 a subclass of ndarray
# might have been created that did not expect
# squeeze to have an axis argument
# NOTE: this example is somewhat artificial;
# it is designed to simulate an old API
# expectation to guard against regression
def squeeze(self):
return super().squeeze()
oldsqueeze = OldSqueeze(np.array([[1],[2],[3]]))
# if no axis argument is specified the old API
# expectation should give the correct result
assert_equal(np.squeeze(oldsqueeze),
np.array([1,2,3]))
# likewise, axis=None should work perfectly well
# with the old API expectation
assert_equal(np.squeeze(oldsqueeze, axis=None),
np.array([1,2,3]))
# however, specification of any particular axis
# should raise a TypeError in the context of the
# old API specification, even when using a valid
# axis specification like 1 for this array
with assert_raises(TypeError):
# this would silently succeed for array
# subclasses / objects that did not support
# squeeze axis argument handling before fixing
# Issue #10779
np.squeeze(oldsqueeze, axis=1)
# check for the same behavior when using an invalid
# axis specification -- in this case axis=0 does not
# have size 1, but the priority should be to raise
# a TypeError for the axis argument and NOT a
# ValueError for squeezing a non-empty dimension
with assert_raises(TypeError):
np.squeeze(oldsqueeze, axis=0)
# the new API knows how to handle the axis
# argument and will return a ValueError if
# attempting to squeeze an axis that is not
# of length 1
with assert_raises(ValueError):
np.squeeze(np.array([[1],[2],[3]]), axis=0)
def test_reduce_contiguous(self):
# GitHub issue #387
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
b = np.add.reduce(np.zeros((2, 1, 2)), 1)
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.c_contiguous)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(RecursionError, int, a)
assert_raises(RecursionError, float, a)
a[()] = None
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_object_array_circular_reference(self):
# Test the same for a circular reference.
a = np.array(0, dtype=object)
b = np.array(0, dtype=object)
a[()] = b
b[()] = a
assert_raises(RecursionError, int, a)
# NumPy has no tp_traverse currently, so circular references
# cannot be detected. So resolve it:
a[()] = None
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_object_array_nested(self):
# but is fine with a reference to a different array
a = np.array(0, dtype=object)
b = np.array(0, dtype=object)
a[()] = b
assert_equal(int(a), int(0))
assert_equal(float(a), float(0))
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
if HAS_REFCOUNT:
assert_(sys.getrefcount(a[()]) == 2)
a[()].__class__ # will segfault if object was deleted
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1, 0]), [])
def test_objectarray_setfield(self):
# Setfield should not overwrite Object fields with non-Object data
x = np.array([1, 2, 3], dtype=object)
assert_raises(TypeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = b"hello1"
s2 = b"hello2"
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = b'black'
s2 = b'white'
s3 = b'other'
a = np.array([[s1], [s2], [s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
# Ticket #1756
s = b'0123456789abcdef'
a = np.array([s]*5)
for i in range(1, 17):
a1 = np.array(a, "|S%d" % i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"gh-2355"
r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_alignment_update(self):
# Check that alignment flag is updated on stride setting
a = np.arange(10)
assert_(a.flags.aligned)
a.strides = 3
assert_(not a.flags.aligned)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except Exception:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1, 2], [3, 4]]).T
np.array(x.flat)
assert_equal(x, [[1, 3], [2, 4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_pickle_bytes_overwrite(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
data = np.array([1], dtype='b')
data = pickle.loads(pickle.dumps(data, protocol=proto))
data[0] = 0xdd
bytestring = "\x01 ".encode('ascii')
assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
b"p13\ntp14\nb.")
# This should work:
result = pickle.loads(data, encoding='latin1')
assert_array_equal(result, np.array([129], dtype='b'))
# Should not segfault:
assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
def test_pickle_py2_scalar_latin1_hack(self):
# Check that scalar unpickling hack in Py3 that supports
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(...)
datas = [
# (original, python2_pickle, koi8r_validity)
(np.unicode_('\u6bd2'),
(b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
'invalid'),
(np.float64(9e123),
(b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
'invalid'),
(np.bytes_(b'\x9c'), # different 8-bit code point in KOI8-R vs latin1
(b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
b"tp8\nRp9\n."),
'different'),
]
for original, data, koi8r_validity in datas:
result = pickle.loads(data, encoding='latin1')
assert_equal(result, original)
# Decoding under non-latin1 encoding (e.g.) KOI8-R can
# produce bad results, but should not segfault.
if koi8r_validity == 'different':
# Unicode code points happen to lie within latin1,
# but are different in koi8-r, resulting to silent
# bogus results
result = pickle.loads(data, encoding='koi8-r')
assert_(result != original)
elif koi8r_validity == 'invalid':
# Unicode code points outside latin1, so results
# to an encoding exception
assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
else:
raise ValueError(koi8r_validity)
def test_structured_type_to_object(self):
a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0, 1)
a_obj[1] = (3, 2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2, 2), dtype=object)
a[...] = [[1, 2]]
assert_equal(a, [[1, 2], [1, 2]])
@pytest.mark.slow_pypy
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings (gh-2583)
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr, dtype in [(asunicode, "U"), (asbytes, "S")]:
b = np.array([val, tostr('xx')], dtype=dtype)
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val], dtype=dtype)
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')], dtype=dtype)
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val], dtype=dtype)
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for NumPy's four byte unicode.
a = np.array(['abcd'])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode_)[0]
del a
def test_refcount_error_in_clip(self):
# Ticket #1588
a = np.zeros((2,), dtype='>i2').clip(min=0)
x = a + a
# This used to segfault:
y = str(x)
# Check the final string:
assert_(y == "[0 0]")
def test_searchsorted_wrong_dtype(self):
# Ticket #2189, it used to segfault, so we check that it raises the
# proper exception.
a = np.array([('a', 1)], dtype='S1, int')
assert_raises(TypeError, np.searchsorted, a, 1.2)
# Ticket #2066, similar problem:
dtype = np.format_parser(['i4', 'i4'], [], [])
a = np.recarray((2,), dtype)
a[...] = [(1, 2), (3, 4)]
assert_raises(TypeError, np.searchsorted, a, 1)
def test_complex64_alignment(self):
# Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
dtt = np.complex64
arr = np.arange(10, dtype=dtt)
# 2D array
arr2 = np.reshape(arr, (2, 5))
# Fortran write followed by (C or F) read caused bus error
data_str = arr2.tobytes('F')
data_back = np.ndarray(arr2.shape,
arr2.dtype,
buffer=data_str,
order='F')
assert_array_equal(arr2, data_back)
def test_structured_count_nonzero(self):
arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
count = np.count_nonzero(arr)
assert_equal(count, 0)
def test_copymodule_preserves_f_contiguity(self):
a = np.empty((2, 2), order='F')
b = copy.copy(a)
c = copy.deepcopy(a)
assert_(b.flags.fortran)
assert_(b.flags.f_contiguous)
assert_(c.flags.fortran)
assert_(c.flags.f_contiguous)
def test_fortran_order_buffer(self):
import numpy as np
a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'],
[u'F', u'o', u'o', u'b', u'']]])
assert_array_equal(arr, arr2)
def test_assign_from_sequence_error(self):
# Ticket #4024.
arr = np.array([1, 2, 3])
assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
arr.__setitem__(slice(None), [9])
assert_equal(arr, [9, 9, 9])
def test_format_on_flex_array_element(self):
# Ticket #4369.
dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])
arr = np.array([('2000-01-01', 1)], dt)
formatted = '{0}'.format(arr[0])
assert_equal(formatted, str(arr[0]))
def test_deepcopy_on_0d_array(self):
# Ticket #3311.
arr = np.array(3)
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
assert_(arr is not arr_cp)
assert_(isinstance(arr_cp, type(arr)))
def test_deepcopy_F_order_object_array(self):
# Ticket #6456.
a = {'a': 1}
b = {'b': 2}
arr = np.array([[a, b], [a, b]], order='F')
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_(arr is not arr_cp)
# Ensure that we have actually copied the item.
assert_(arr[0, 1] is not arr_cp[1, 1])
# Ensure we are allowed to have references to the same object.
assert_(arr[0, 1] is arr[1, 1])
# Check the references hold for the copied objects.
assert_(arr_cp[0, 1] is arr_cp[1, 1])
def test_deepcopy_empty_object_array(self):
# Ticket #8536.
# Deepcopy should succeed
a = np.array([], dtype=object)
b = copy.deepcopy(a)
assert_(a.shape == b.shape)
def test_bool_subscript_crash(self):
# gh-4494
c = np.rec.array([(1, 2, 3), (4, 5, 6)])
masked = c[np.array([True, False])]
base = masked.base
del masked, c
base.dtype
def test_richcompare_crash(self):
# gh-4613
import operator as op
# dummy class where __array__ throws exception
class Foo:
__array_priority__ = 1002
def __array__(self, *args, **kwargs):
raise Exception()
rhs = Foo()
lhs = np.array(1)
for f in [op.lt, op.le, op.gt, op.ge]:
assert_raises(TypeError, f, lhs, rhs)
assert_(not op.eq(lhs, rhs))
assert_(op.ne(lhs, rhs))
def test_richcompare_scalar_and_subclass(self):
# gh-4709
class Foo(np.ndarray):
def __eq__(self, other):
return "OK"
x = np.array([1, 2, 3]).view(Foo)
assert_equal(10 == x, "OK")
assert_equal(np.int32(10) == x, "OK")
assert_equal(np.array([10]) == x, "OK")
def test_pickle_empty_string(self):
# gh-3926
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
test_string = np.string_('')
assert_equal(pickle.loads(
pickle.dumps(test_string, protocol=proto)), test_string)
def test_frompyfunc_many_args(self):
# gh-5672
def passer(*args):
pass
assert_raises(ValueError, np.frompyfunc, passer, 32, 1)
def test_repeat_broadcasting(self):
# gh-5743
a = np.arange(60).reshape(3, 4, 5)
for axis in chain(range(-a.ndim, a.ndim), [None]):
assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))
def test_frompyfunc_nout_0(self):
# gh-2014
def f(x):
x[0], x[-1] = x[-1], x[0]
uf = np.frompyfunc(f, 1, 0)
a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]], dtype=object)
assert_equal(uf(a), ())
expected = np.array([[3, 2, 1], [5, 4], [9, 7, 8, 6]], dtype=object)
assert_array_equal(a, expected)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_leak_in_structured_dtype_comparison(self):
# gh-6250
recordtype = np.dtype([('a', np.float64),
('b', np.int32),
('d', (str, 5))])
# Simple case
a = np.zeros(2, dtype=recordtype)
for i in range(100):
a == a
assert_(sys.getrefcount(a) < 10)
# The case in the bug report.
before = sys.getrefcount(a)
u, v = a[0], a[1]
u == v
del u, v
gc.collect()
after = sys.getrefcount(a)
assert_equal(before, after)
def test_empty_percentile(self):
# gh-6530 / gh-6553
assert_array_equal(np.percentile(np.arange(10), []), np.array([]))
def test_void_compare_segfault(self):
# gh-6922. The following should not segfault
a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])
a.sort()
def test_reshape_size_overflow(self):
# gh-7455
a = np.ones(20)[::2]
if np.dtype(np.intp).itemsize == 8:
# 64 bit. The following are the prime factors of 2**63 + 5,
# plus a leading 2, so when multiplied together as int64,
# the result overflows to a total size of 10.
new_shape = (2, 13, 419, 691, 823, 2977518503)
else:
# 32 bit. The following are the prime factors of 2**31 + 5,
# plus a leading 2, so when multiplied together as int32,
# the result overflows to a total size of 10.
new_shape = (2, 7, 7, 43826197)
assert_raises(ValueError, a.reshape, new_shape)
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
reason="PyPy bug in error formatting")
def test_invalid_structured_dtypes(self):
# gh-2865
# mapping python objects to other dtypes
assert_raises(ValueError, np.dtype, ('O', [('name', 'i8')]))
assert_raises(ValueError, np.dtype, ('i8', [('name', 'O')]))
assert_raises(ValueError, np.dtype,
('i8', [('name', [('name', 'O')])]))
assert_raises(ValueError, np.dtype, ([('a', 'i4'), ('b', 'i4')], 'O'))
assert_raises(ValueError, np.dtype, ('i8', 'O'))
# wrong number/type of tuple elements in dict
assert_raises(ValueError, np.dtype,
('i', {'name': ('i', 0, 'title', 'oops')}))
assert_raises(ValueError, np.dtype,
('i', {'name': ('i', 'wrongtype', 'title')}))
# disallowed as of 1.13
assert_raises(ValueError, np.dtype,
([('a', 'O'), ('b', 'O')], [('c', 'O'), ('d', 'O')]))
# allowed as a special case due to existing use, see gh-2798
a = np.ones(1, dtype=('O', [('name', 'O')]))
assert_equal(a[0], 1)
# In particular, the above union dtype (and union dtypes in general)
# should mainly behave like the main (object) dtype:
assert a[0] is a.item()
assert type(a[0]) is int
def test_correct_hash_dict(self):
# gh-8887 - __hash__ would be None despite tp_hash being set
all_types = set(np.sctypeDict.values()) - {np.void}
for t in all_types:
val = t()
try:
hash(val)
except TypeError as e:
assert_equal(t.__hash__, None)
else:
assert_(t.__hash__ != None)
def test_scalar_copy(self):
scalar_types = set(np.sctypeDict.values())
values = {
np.void: b"a",
np.bytes_: b"a",
np.unicode_: "a",
np.datetime64: "2017-08-25",
}
for sctype in scalar_types:
item = sctype(values.get(sctype, 1))
item2 = copy.copy(item)
assert_equal(item, item2)
def test_void_item_memview(self):
va = np.zeros(10, 'V4')
x = va[:1].item()
va[0] = b'\xff\xff\xff\xff'
del va
assert_equal(x, b'\x00\x00\x00\x00')
def test_void_getitem(self):
# Test fix for gh-11668.
assert_(np.array([b'a'], 'V1').astype('O') == b'a')
assert_(np.array([b'ab'], 'V2').astype('O') == b'ab')
assert_(np.array([b'abc'], 'V3').astype('O') == b'abc')
assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd')
def test_structarray_title(self):
# The following used to segfault on pypy, due to NPY_TITLE_KEY
# not working properly and resulting to double-decref of the
# structured array field items:
# See: https://bitbucket.org/pypy/pypy/issues/2789
for j in range(5):
structure = np.array([1], dtype=[(('x', 'X'), np.object_)])
structure[0]['x'] = np.array([2])
gc.collect()
def test_dtype_scalar_squeeze(self):
# gh-11384
values = {
'S': b"a",
'M': "2018-06-20",
}
for ch in np.typecodes['All']:
if ch in 'O':
continue
sctype = np.dtype(ch).type
scvalue = sctype(values.get(ch, 3))
for axis in [None, ()]:
squeezed = scvalue.squeeze(axis=axis)
assert_equal(squeezed, scvalue)
assert_equal(type(squeezed), type(scvalue))
def test_field_access_by_title(self):
# gh-11507
s = 'Some long field name'
if HAS_REFCOUNT:
base = sys.getrefcount(s)
t = np.dtype([((s, 'f1'), np.float64)])
data = np.zeros(10, t)
for i in range(10):
str(data[['f1']])
if HAS_REFCOUNT:
assert_(base <= sys.getrefcount(s))
@pytest.mark.parametrize('val', [
# arrays and scalars
np.ones((10, 10), dtype='int32'),
np.uint64(10),
])
@pytest.mark.parametrize('protocol',
range(2, pickle.HIGHEST_PROTOCOL + 1)
)
def test_pickle_module(self, protocol, val):
# gh-12837
s = pickle.dumps(val, protocol)
assert b'_multiarray_umath' not in s
if protocol == 5 and len(val.shape) > 0:
# unpickling ndarray goes through _frombuffer for protocol 5
assert b'numpy.core.numeric' in s
else:
assert b'numpy.core.multiarray' in s
def test_object_casting_errors(self):
# gh-11993 update to ValueError (see gh-16909), since strings can in
# principle be converted to complex, but this string cannot.
arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)
assert_raises(ValueError, arr.astype, 'c8')
def test_eff1d_casting(self):
# gh-12711
x = np.array([1, 2, 4, 7, 0], dtype=np.int16)
res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
assert_equal(res, [-99, 1, 2, 3, -7, 88, 99])
# The use of safe casting means, that 1<<20 is cast unsafely, an
# error may be better, but currently there is no mechanism for it.
res = np.ediff1d(x, to_begin=(1<<20), to_end=(1<<20))
assert_equal(res, [0, 1, 2, 3, -7, 0])
def test_pickle_datetime64_array(self):
# gh-12745 (would fail with pickle5 installed)
d = np.datetime64('2015-07-04 12:59:59.50', 'ns')
arr = np.array([d])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(arr, protocol=proto)
assert_equal(pickle.loads(dumped), arr)
def test_bad_array_interface(self):
class T:
__array_interface__ = {}
with assert_raises(ValueError):
np.array([T()])
def test_2d__array__shape(self):
class T:
def __array__(self):
return np.ndarray(shape=(0,0))
# Make sure __array__ is used instead of Sequence methods.
def __iter__(self):
return iter([])
def __getitem__(self, idx):
raise AssertionError("__getitem__ was called")
def __len__(self):
return 0
t = T()
# gh-13659, would raise in broadcasting [x=t for x in result]
arr = np.array([t])
assert arr.shape == (1, 0, 0)
@pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
def test_to_ctypes(self):
#gh-14214
arr = np.zeros((2 ** 31 + 1,), 'b')
assert arr.size * arr.itemsize > 2 ** 31
c_arr = np.ctypeslib.as_ctypes(arr)
assert_equal(c_arr._length_, arr.size)
def test_complex_conversion_error(self):
# gh-17068
with pytest.raises(TypeError, match=r"Unable to convert dtype.*"):
complex(np.array("now", np.datetime64))
def test__array_interface__descr(self):
# gh-17068
dt = np.dtype(dict(names=['a', 'b'],
offsets=[0, 0],
formats=[np.int64, np.int64]))
descr = np.array((1, 1), dtype=dt).__array_interface__['descr']
assert descr == [('', '|V8')] # instead of [(b'', '|V8')]
@pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
@requires_memory(free_bytes=9e9)
def test_dot_big_stride(self):
# gh-17111
# blas stride = stride//itemsize > int32 max
int32_max = np.iinfo(np.int32).max
n = int32_max + 3
a = np.empty([n], dtype=np.float32)
b = a[::n-1]
b[...] = 1
assert b.strides[0] > int32_max * b.dtype.itemsize
assert np.dot(b, b) == 2.0
def test_frompyfunc_name(self):
# name conversion was failing for python 3 strings
# resulting in the default '?' name. Also test utf-8
# encoding using non-ascii name.
def cassé(x):
return x
f = np.frompyfunc(cassé, 1, 1)
assert str(f) == "<ufunc 'cassé (vectorized)'>"
@pytest.mark.parametrize("operation", [
'add', 'subtract', 'multiply', 'floor_divide',
'conjugate', 'fmod', 'square', 'reciprocal',
'power', 'absolute', 'negative', 'positive',
'greater', 'greater_equal', 'less',
'less_equal', 'equal', 'not_equal', 'logical_and',
'logical_not', 'logical_or', 'bitwise_and', 'bitwise_or',
'bitwise_xor', 'invert', 'left_shift', 'right_shift',
'gcd', 'lcm'
]
)
@pytest.mark.parametrize("order", [
('b->', 'B->'),
('h->', 'H->'),
('i->', 'I->'),
('l->', 'L->'),
('q->', 'Q->'),
]
)
def test_ufunc_order(self, operation, order):
# gh-18075
# Ensure signed types before unsigned
def get_idx(string, str_lst):
for i, s in enumerate(str_lst):
if string in s:
return i
raise ValueError(f"{string} not in list")
types = getattr(np, operation).types
assert get_idx(order[0], types) < get_idx(order[1], types), (
f"Unexpected types order of ufunc in {operation}"
f"for {order}. Possible fix: Use signed before unsigned"
"in generate_umath.py")
| 91,101 | Python | 34.614543 | 111 | 0.527195 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_hashtable.py | import pytest
import random
from numpy.core._multiarray_tests import identityhash_tester
@pytest.mark.parametrize("key_length", [1, 3, 6])
@pytest.mark.parametrize("length", [1, 16, 2000])
def test_identity_hashtable(key_length, length):
# use a 30 object pool for everything (duplicates will happen)
pool = [object() for i in range(20)]
keys_vals = []
for i in range(length):
keys = tuple(random.choices(pool, k=key_length))
keys_vals.append((keys, random.choice(pool)))
dictionary = dict(keys_vals)
# add a random item at the end:
keys_vals.append(random.choice(keys_vals))
# the expected one could be different with duplicates:
expected = dictionary[keys_vals[-1][0]]
res = identityhash_tester(key_length, keys_vals, replace=True)
assert res is expected
# check that ensuring one duplicate definitely raises:
keys_vals.insert(0, keys_vals[-2])
with pytest.raises(RuntimeError):
identityhash_tester(key_length, keys_vals)
| 1,011 | Python | 31.64516 | 66 | 0.691395 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_umath_complex.py | import sys
import platform
import pytest
import numpy as np
# import the c-extension module directly since _arg is not exported via umath
import numpy.core._multiarray_umath as ncu
from numpy.testing import (
assert_raises, assert_equal, assert_array_equal, assert_almost_equal, assert_array_max_ulp
)
# TODO: branch cuts (use Pauli code)
# TODO: conj 'symmetry'
# TODO: FPU exceptions
# At least on Windows the results of many complex functions are not conforming
# to the C99 standard. See ticket 1574.
# Ditto for Solaris (ticket 1642) and OS X on PowerPC.
#FIXME: this will probably change when we require full C99 campatibility
with np.errstate(all='ignore'):
functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0)
or (np.log(complex(np.NZERO, 0)).imag != np.pi))
# TODO: replace with a check on whether platform-provided C99 funcs are used
xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky)
# TODO This can be xfail when the generator functions are got rid of.
platform_skip = pytest.mark.skipif(xfail_complex_tests,
reason="Inadequate C99 complex support")
class TestCexp:
def test_simple(self):
check = check_complex_value
f = np.exp
check(f, 1, 0, np.exp(1), 0, False)
check(f, 0, 1, np.cos(1), np.sin(1), False)
ref = np.exp(1) * complex(np.cos(1), np.sin(1))
check(f, 1, 1, ref.real, ref.imag, False)
@platform_skip
def test_special_values(self):
# C99: Section G 6.3.1
check = check_complex_value
f = np.exp
# cexp(+-0 + 0i) is 1 + 0i
check(f, np.PZERO, 0, 1, 0, False)
check(f, np.NZERO, 0, 1, 0, False)
# cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU
# exception
check(f, 1, np.inf, np.nan, np.nan)
check(f, -1, np.inf, np.nan, np.nan)
check(f, 0, np.inf, np.nan, np.nan)
# cexp(inf + 0i) is inf + 0i
check(f, np.inf, 0, np.inf, 0)
# cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y
check(f, -np.inf, 1, np.PZERO, np.PZERO)
check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO)
# cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y
check(f, np.inf, 1, np.inf, np.inf)
check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf)
# cexp(-inf + inf i) is +-0 +- 0i (signs unspecified)
def _check_ninf_inf(dummy):
msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
z = f(np.array(complex(-np.inf, np.inf)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
_check_ninf_inf(None)
# cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex.
def _check_inf_inf(dummy):
msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
z = f(np.array(complex(np.inf, np.inf)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
_check_inf_inf(None)
# cexp(-inf + nan i) is +-0 +- 0i
def _check_ninf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
z = f(np.array(complex(-np.inf, np.nan)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
_check_ninf_nan(None)
# cexp(inf + nan i) is +-inf + nan
def _check_inf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
z = f(np.array(complex(np.inf, np.nan)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
_check_inf_nan(None)
# cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU
# ex)
check(f, np.nan, 1, np.nan, np.nan)
check(f, np.nan, -1, np.nan, np.nan)
check(f, np.nan, np.inf, np.nan, np.nan)
check(f, np.nan, -np.inf, np.nan, np.nan)
# cexp(nan + nani) is nan + nani
check(f, np.nan, np.nan, np.nan, np.nan)
# TODO This can be xfail when the generator functions are got rid of.
@pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms")
def test_special_values2(self):
# XXX: most implementations get it wrong here (including glibc <= 2.10)
# cexp(nan + 0i) is nan + 0i
check = check_complex_value
f = np.exp
check(f, np.nan, 0, np.nan, 0)
class TestClog:
def test_simple(self):
x = np.array([1+0j, 1+2j])
y_r = np.log(np.abs(x)) + 1j * np.angle(x)
y = np.log(x)
assert_almost_equal(y, y_r)
@platform_skip
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_special_values(self):
xl = []
yl = []
# From C99 std (Sec 6.3.2)
# XXX: check exceptions raised
# --- raise for invalid fails.
# clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero'
# floating-point exception.
with np.errstate(divide='raise'):
x = np.array([np.NZERO], dtype=complex)
y = complex(-np.inf, np.pi)
assert_raises(FloatingPointError, np.log, x)
with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero'
# floating-point exception.
with np.errstate(divide='raise'):
x = np.array([0], dtype=complex)
y = complex(-np.inf, 0)
assert_raises(FloatingPointError, np.log, x)
with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(x + i inf returns +inf + i pi /2, for finite x.
x = np.array([complex(1, np.inf)], dtype=complex)
y = complex(np.inf, 0.5 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
x = np.array([complex(-1, np.inf)], dtype=complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(x + iNaN) returns NaN + iNaN and optionally raises the
# 'invalid' floating- point exception, for finite x.
with np.errstate(invalid='raise'):
x = np.array([complex(1., np.nan)], dtype=complex)
y = complex(np.nan, np.nan)
#assert_raises(FloatingPointError, np.log, x)
with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
with np.errstate(invalid='raise'):
x = np.array([np.inf + 1j * np.nan], dtype=complex)
#assert_raises(FloatingPointError, np.log, x)
with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(- inf + iy) returns +inf + ipi , for finite positive-signed y.
x = np.array([-np.inf + 1j], dtype=complex)
y = complex(np.inf, np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + iy) returns +inf + i0, for finite positive-signed y.
x = np.array([np.inf + 1j], dtype=complex)
y = complex(np.inf, 0)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(- inf + i inf) returns +inf + i3pi /4.
x = np.array([complex(-np.inf, np.inf)], dtype=complex)
y = complex(np.inf, 0.75 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + i inf) returns +inf + ipi /4.
x = np.array([complex(np.inf, np.inf)], dtype=complex)
y = complex(np.inf, 0.25 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+/- inf + iNaN) returns +inf + iNaN.
x = np.array([complex(np.inf, np.nan)], dtype=complex)
y = complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
x = np.array([complex(-np.inf, np.nan)], dtype=complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iy) returns NaN + iNaN and optionally raises the
# 'invalid' floating-point exception, for finite y.
x = np.array([complex(np.nan, 1)], dtype=complex)
y = complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + i inf) returns +inf + iNaN.
x = np.array([complex(np.nan, np.inf)], dtype=complex)
y = complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iNaN) returns NaN + iNaN.
x = np.array([complex(np.nan, np.nan)], dtype=complex)
y = complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(conj(z)) = conj(clog(z)).
xa = np.array(xl, dtype=complex)
ya = np.array(yl, dtype=complex)
with np.errstate(divide='ignore'):
for i in range(len(xa)):
assert_almost_equal(np.log(xa[i].conj()), ya[i].conj())
class TestCsqrt:
def test_simple(self):
# sqrt(1)
check_complex_value(np.sqrt, 1, 0, 1, 0)
# sqrt(1i)
rres = 0.5*np.sqrt(2)
ires = rres
check_complex_value(np.sqrt, 0, 1, rres, ires, False)
# sqrt(-1)
check_complex_value(np.sqrt, -1, 0, 0, 1)
def test_simple_conjugate(self):
ref = np.conj(np.sqrt(complex(1, 1)))
def f(z):
return np.sqrt(np.conj(z))
check_complex_value(f, 1, 1, ref.real, ref.imag, False)
#def test_branch_cut(self):
# _check_branch_cut(f, -1, 0, 1, -1)
@platform_skip
def test_special_values(self):
# C99: Sec G 6.4.2
check = check_complex_value
f = np.sqrt
# csqrt(+-0 + 0i) is 0 + 0i
check(f, np.PZERO, 0, 0, 0)
check(f, np.NZERO, 0, 0, 0)
# csqrt(x + infi) is inf + infi for any x (including NaN)
check(f, 1, np.inf, np.inf, np.inf)
check(f, -1, np.inf, np.inf, np.inf)
check(f, np.PZERO, np.inf, np.inf, np.inf)
check(f, np.NZERO, np.inf, np.inf, np.inf)
check(f, np.inf, np.inf, np.inf, np.inf)
check(f, -np.inf, np.inf, np.inf, np.inf)
check(f, -np.nan, np.inf, np.inf, np.inf)
# csqrt(x + nani) is nan + nani for any finite x
check(f, 1, np.nan, np.nan, np.nan)
check(f, -1, np.nan, np.nan, np.nan)
check(f, 0, np.nan, np.nan, np.nan)
# csqrt(-inf + yi) is +0 + infi for any finite y > 0
check(f, -np.inf, 1, np.PZERO, np.inf)
# csqrt(inf + yi) is +inf + 0i for any finite y > 0
check(f, np.inf, 1, np.inf, np.PZERO)
# csqrt(-inf + nani) is nan +- infi (both +i infi are valid)
def _check_ninf_nan(dummy):
msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)"
z = np.sqrt(np.array(complex(-np.inf, np.nan)))
#Fixme: ugly workaround for isinf bug.
with np.errstate(invalid='ignore'):
if not (np.isnan(z.real) and np.isinf(z.imag)):
raise AssertionError(msgform % (z.real, z.imag))
_check_ninf_nan(None)
# csqrt(+inf + nani) is inf + nani
check(f, np.inf, np.nan, np.inf, np.nan)
# csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x
# + nani)
check(f, np.nan, 0, np.nan, np.nan)
check(f, np.nan, 1, np.nan, np.nan)
check(f, np.nan, np.nan, np.nan, np.nan)
# XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch
# cuts first)
class TestCpow:
def setup_method(self):
self.olderr = np.seterr(invalid='ignore')
def teardown_method(self):
np.seterr(**self.olderr)
def test_simple(self):
x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
y_r = x ** 2
y = np.power(x, 2)
assert_almost_equal(y, y_r)
def test_scalar(self):
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
lx = list(range(len(x)))
# Hardcode the expected `builtins.complex` values,
# as complex exponentiation is broken as of bpo-44698
p_r = [
1+0j,
0.20787957635076193+0j,
0.35812203996480685+0.6097119028618724j,
0.12659112128185032+0.48847676699581527j,
complex(np.inf, np.nan),
complex(np.nan, np.nan),
]
n_r = [x[i] ** y[i] for i in lx]
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
def test_array(self):
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
lx = list(range(len(x)))
# Hardcode the expected `builtins.complex` values,
# as complex exponentiation is broken as of bpo-44698
p_r = [
1+0j,
0.20787957635076193+0j,
0.35812203996480685+0.6097119028618724j,
0.12659112128185032+0.48847676699581527j,
complex(np.inf, np.nan),
complex(np.nan, np.nan),
]
n_r = x ** y
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
class TestCabs:
def setup_method(self):
self.olderr = np.seterr(invalid='ignore')
def teardown_method(self):
np.seterr(**self.olderr)
def test_simple(self):
x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan])
y = np.abs(x)
assert_almost_equal(y, y_r)
def test_fabs(self):
# Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs)
x = np.array([1+0j], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(1, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(np.inf, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(np.nan, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
def test_cabs_inf_nan(self):
x, y = [], []
# cabs(+-nan + nani) returns nan
x.append(np.nan)
y.append(np.nan)
check_real_value(np.abs, np.nan, np.nan, np.nan)
x.append(np.nan)
y.append(-np.nan)
check_real_value(np.abs, -np.nan, np.nan, np.nan)
# According to C99 standard, if exactly one of the real/part is inf and
# the other nan, then cabs should return inf
x.append(np.inf)
y.append(np.nan)
check_real_value(np.abs, np.inf, np.nan, np.inf)
x.append(-np.inf)
y.append(np.nan)
check_real_value(np.abs, -np.inf, np.nan, np.inf)
# cabs(conj(z)) == conj(cabs(z)) (= cabs(z))
def f(a):
return np.abs(np.conj(a))
def g(a, b):
return np.abs(complex(a, b))
xa = np.array(x, dtype=complex)
assert len(xa) == len(x) == len(y)
for xi, yi in zip(x, y):
ref = g(xi, yi)
check_real_value(f, xi, yi, ref)
class TestCarg:
def test_simple(self):
check_real_value(ncu._arg, 1, 0, 0, False)
check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False)
check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False)
check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
# TODO This can be xfail when the generator functions are got rid of.
@pytest.mark.skip(
reason="Complex arithmetic with signed zero fails on most platforms")
def test_zero(self):
# carg(-0 +- 0i) returns +- pi
check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False)
check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False)
# carg(+0 +- 0i) returns +- 0
check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO)
# carg(x +- 0i) returns +- 0 for x > 0
check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False)
check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False)
# carg(x +- 0i) returns +- pi for x < 0
check_real_value(ncu._arg, -1, np.PZERO, np.pi, False)
check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False)
# carg(+- 0 + yi) returns pi/2 for y > 0
check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False)
check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False)
# carg(+- 0 + yi) returns -pi/2 for y < 0
check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False)
check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False)
#def test_branch_cuts(self):
# _check_branch_cut(ncu._arg, -1, 1j, -1, 1)
def test_special_values(self):
# carg(-np.inf +- yi) returns +-pi for finite y > 0
check_real_value(ncu._arg, -np.inf, 1, np.pi, False)
check_real_value(ncu._arg, -np.inf, -1, -np.pi, False)
# carg(np.inf +- yi) returns +-0 for finite y > 0
check_real_value(ncu._arg, np.inf, 1, np.PZERO, False)
check_real_value(ncu._arg, np.inf, -1, np.NZERO, False)
# carg(x +- np.infi) returns +-pi/2 for finite x
check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False)
check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False)
# carg(-np.inf +- np.infi) returns +-3pi/4
check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False)
check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False)
# carg(np.inf +- np.infi) returns +-pi/4
check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False)
check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False)
# carg(x + yi) returns np.nan if x or y is nan
check_real_value(ncu._arg, np.nan, 0, np.nan, False)
check_real_value(ncu._arg, 0, np.nan, np.nan, False)
check_real_value(ncu._arg, np.nan, np.inf, np.nan, False)
check_real_value(ncu._arg, np.inf, np.nan, np.nan, False)
def check_real_value(f, x1, y1, x, exact=True):
z1 = np.array([complex(x1, y1)])
if exact:
assert_equal(f(z1), x)
else:
assert_almost_equal(f(z1), x)
def check_complex_value(f, x1, y1, x2, y2, exact=True):
z1 = np.array([complex(x1, y1)])
z2 = complex(x2, y2)
with np.errstate(invalid='ignore'):
if exact:
assert_equal(f(z1), z2)
else:
assert_almost_equal(f(z1), z2)
class TestSpecialComplexAVX:
@pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
@pytest.mark.parametrize("astype", [np.complex64, np.complex128])
def test_array(self, stride, astype):
arr = np.array([complex(np.nan , np.nan),
complex(np.nan , np.inf),
complex(np.inf , np.nan),
complex(np.inf , np.inf),
complex(0. , np.inf),
complex(np.inf , 0.),
complex(0. , 0.),
complex(0. , np.nan),
complex(np.nan , 0.)], dtype=astype)
abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype)
sq_true = np.array([complex(np.nan, np.nan),
complex(np.nan, np.nan),
complex(np.nan, np.nan),
complex(np.nan, np.inf),
complex(-np.inf, np.nan),
complex(np.inf, np.nan),
complex(0., 0.),
complex(np.nan, np.nan),
complex(np.nan, np.nan)], dtype=astype)
assert_equal(np.abs(arr[::stride]), abs_true[::stride])
with np.errstate(invalid='ignore'):
assert_equal(np.square(arr[::stride]), sq_true[::stride])
class TestComplexAbsoluteAVX:
@pytest.mark.parametrize("arraysize", [1,2,3,4,5,6,7,8,9,10,11,13,15,17,18,19])
@pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4])
@pytest.mark.parametrize("astype", [np.complex64, np.complex128])
# test to ensure masking and strides work as intended in the AVX implementation
def test_array(self, arraysize, stride, astype):
arr = np.ones(arraysize, dtype=astype)
abs_true = np.ones(arraysize, dtype=arr.real.dtype)
assert_equal(np.abs(arr[::stride]), abs_true[::stride])
# Testcase taken as is from https://github.com/numpy/numpy/issues/16660
class TestComplexAbsoluteMixedDTypes:
@pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4])
@pytest.mark.parametrize("astype", [np.complex64, np.complex128])
@pytest.mark.parametrize("func", ['abs', 'square', 'conjugate'])
def test_array(self, stride, astype, func):
dtype = [('template_id', '<i8'), ('bank_chisq','<f4'),
('bank_chisq_dof','<i8'), ('chisq', '<f4'), ('chisq_dof','<i8'),
('cont_chisq', '<f4'), ('psd_var_val', '<f4'), ('sg_chisq','<f4'),
('mycomplex', astype), ('time_index', '<i8')]
vec = np.array([
(0, 0., 0, -31.666483, 200, 0., 0., 1. , 3.0+4.0j , 613090),
(1, 0., 0, 260.91525 , 42, 0., 0., 1. , 5.0+12.0j , 787315),
(1, 0., 0, 52.15155 , 42, 0., 0., 1. , 8.0+15.0j , 806641),
(1, 0., 0, 52.430195, 42, 0., 0., 1. , 7.0+24.0j , 1363540),
(2, 0., 0, 304.43646 , 58, 0., 0., 1. , 20.0+21.0j , 787323),
(3, 0., 0, 299.42108 , 52, 0., 0., 1. , 12.0+35.0j , 787332),
(4, 0., 0, 39.4836 , 28, 0., 0., 9.182192, 9.0+40.0j , 787304),
(4, 0., 0, 76.83787 , 28, 0., 0., 1. , 28.0+45.0j, 1321869),
(5, 0., 0, 143.26366 , 24, 0., 0., 10.996129, 11.0+60.0j , 787299)], dtype=dtype)
myfunc = getattr(np, func)
a = vec['mycomplex']
g = myfunc(a[::stride])
b = vec['mycomplex'].copy()
h = myfunc(b[::stride])
assert_array_max_ulp(h.real, g.real, 1)
assert_array_max_ulp(h.imag, g.imag, 1)
| 23,243 | Python | 36.309791 | 119 | 0.531042 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_scalar_ctors.py | """
Test the scalar constructors, which also do type-coercion
"""
import pytest
import numpy as np
from numpy.testing import (
assert_equal, assert_almost_equal, assert_warns,
)
class TestFromString:
def test_floating(self):
# Ticket #640, floats from string
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_floating_overflow(self):
""" Strings containing an unrepresentable float overflow """
fhalf = np.half('1e10000')
assert_equal(fhalf, np.inf)
fsingle = np.single('1e10000')
assert_equal(fsingle, np.inf)
fdouble = np.double('1e10000')
assert_equal(fdouble, np.inf)
flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000')
assert_equal(flongdouble, np.inf)
fhalf = np.half('-1e10000')
assert_equal(fhalf, -np.inf)
fsingle = np.single('-1e10000')
assert_equal(fsingle, -np.inf)
fdouble = np.double('-1e10000')
assert_equal(fdouble, -np.inf)
flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000')
assert_equal(flongdouble, -np.inf)
class TestExtraArgs:
def test_superclass(self):
# try both positional and keyword arguments
s = np.str_(b'\\x61', encoding='unicode-escape')
assert s == 'a'
s = np.str_(b'\\x61', 'unicode-escape')
assert s == 'a'
# previously this would return '\\xx'
with pytest.raises(UnicodeDecodeError):
np.str_(b'\\xx', encoding='unicode-escape')
with pytest.raises(UnicodeDecodeError):
np.str_(b'\\xx', 'unicode-escape')
# superclass fails, but numpy succeeds
assert np.bytes_(-2) == b'-2'
def test_datetime(self):
dt = np.datetime64('2000-01', ('M', 2))
assert np.datetime_data(dt) == ('M', 2)
with pytest.raises(TypeError):
np.datetime64('2000', garbage=True)
def test_bool(self):
with pytest.raises(TypeError):
np.bool_(False, garbage=True)
def test_void(self):
with pytest.raises(TypeError):
np.void(b'test', garbage=True)
class TestFromInt:
def test_intp(self):
# Ticket #99
assert_equal(1024, np.intp(1024))
def test_uint64_from_negative(self):
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
int_types = [np.byte, np.short, np.intc, np.int_, np.longlong]
uint_types = [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong]
float_types = [np.half, np.single, np.double, np.longdouble]
cfloat_types = [np.csingle, np.cdouble, np.clongdouble]
class TestArrayFromScalar:
""" gh-15467 """
def _do_test(self, t1, t2):
x = t1(2)
arr = np.array(x, dtype=t2)
# type should be preserved exactly
if t2 is None:
assert arr.dtype.type is t1
else:
assert arr.dtype.type is t2
@pytest.mark.parametrize('t1', int_types + uint_types)
@pytest.mark.parametrize('t2', int_types + uint_types + [None])
def test_integers(self, t1, t2):
return self._do_test(t1, t2)
@pytest.mark.parametrize('t1', float_types)
@pytest.mark.parametrize('t2', float_types + [None])
def test_reals(self, t1, t2):
return self._do_test(t1, t2)
@pytest.mark.parametrize('t1', cfloat_types)
@pytest.mark.parametrize('t2', cfloat_types + [None])
def test_complex(self, t1, t2):
return self._do_test(t1, t2)
| 3,688 | Python | 30.801724 | 77 | 0.610087 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_mem_overlap.py | import itertools
import pytest
import numpy as np
from numpy.core._multiarray_tests import solve_diophantine, internal_overlap
from numpy.core import _umath_tests
from numpy.lib.stride_tricks import as_strided
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_array_equal
)
ndims = 2
size = 10
shape = tuple([size] * ndims)
MAY_SHARE_BOUNDS = 0
MAY_SHARE_EXACT = -1
def _indices_for_nelems(nelems):
"""Returns slices of length nelems, from start onwards, in direction sign."""
if nelems == 0:
return [size // 2] # int index
res = []
for step in (1, 2):
for sign in (-1, 1):
start = size // 2 - nelems * step * sign // 2
stop = start + nelems * step * sign
res.append(slice(start, stop, step * sign))
return res
def _indices_for_axis():
"""Returns (src, dst) pairs of indices."""
res = []
for nelems in (0, 2, 3):
ind = _indices_for_nelems(nelems)
res.extend(itertools.product(ind, ind)) # all assignments of size "nelems"
return res
def _indices(ndims):
"""Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs."""
ind = _indices_for_axis()
return itertools.product(ind, repeat=ndims)
def _check_assignment(srcidx, dstidx):
"""Check assignment arr[dstidx] = arr[srcidx] works."""
arr = np.arange(np.product(shape)).reshape(shape)
cpy = arr.copy()
cpy[dstidx] = arr[srcidx]
arr[dstidx] = arr[srcidx]
assert_(np.all(arr == cpy),
'assigning arr[%s] = arr[%s]' % (dstidx, srcidx))
def test_overlapping_assignments():
# Test automatically generated assignments which overlap in memory.
inds = _indices(ndims)
for ind in inds:
srcidx = tuple([a[0] for a in ind])
dstidx = tuple([a[1] for a in ind])
_check_assignment(srcidx, dstidx)
@pytest.mark.slow
def test_diophantine_fuzz():
# Fuzz test the diophantine solver
rng = np.random.RandomState(1234)
max_int = np.iinfo(np.intp).max
for ndim in range(10):
feasible_count = 0
infeasible_count = 0
min_count = 500//(ndim + 1)
while min(feasible_count, infeasible_count) < min_count:
# Ensure big and small integer problems
A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6
U_max = rng.randint(0, 11, dtype=np.intp)**6
A_max = min(max_int, A_max)
U_max = min(max_int-1, U_max)
A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp))
for j in range(ndim))
U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp))
for j in range(ndim))
b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U)))
b = rng.randint(-1, b_ub+2, dtype=np.intp)
if ndim == 0 and feasible_count < min_count:
b = 0
X = solve_diophantine(A, U, b)
if X is None:
# Check the simplified decision problem agrees
X_simplified = solve_diophantine(A, U, b, simplify=1)
assert_(X_simplified is None, (A, U, b, X_simplified))
# Check no solution exists (provided the problem is
# small enough so that brute force checking doesn't
# take too long)
ranges = tuple(range(0, a*ub+1, a) for a, ub in zip(A, U))
size = 1
for r in ranges:
size *= len(r)
if size < 100000:
assert_(not any(sum(w) == b for w in itertools.product(*ranges)))
infeasible_count += 1
else:
# Check the simplified decision problem agrees
X_simplified = solve_diophantine(A, U, b, simplify=1)
assert_(X_simplified is not None, (A, U, b, X_simplified))
# Check validity
assert_(sum(a*x for a, x in zip(A, X)) == b)
assert_(all(0 <= x <= ub for x, ub in zip(X, U)))
feasible_count += 1
def test_diophantine_overflow():
# Smoke test integer overflow detection
max_intp = np.iinfo(np.intp).max
max_int64 = np.iinfo(np.int64).max
if max_int64 <= max_intp:
# Check that the algorithm works internally in 128-bit;
# solving this problem requires large intermediate numbers
A = (max_int64//2, max_int64//2 - 10)
U = (max_int64//2, max_int64//2 - 10)
b = 2*(max_int64//2) - 10
assert_equal(solve_diophantine(A, U, b), (1, 1))
def check_may_share_memory_exact(a, b):
got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
assert_equal(np.may_share_memory(a, b),
np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS))
a.fill(0)
b.fill(0)
a.fill(1)
exact = b.any()
err_msg = ""
if got != exact:
err_msg = " " + "\n ".join([
"base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],),
"shape_a = %r" % (a.shape,),
"shape_b = %r" % (b.shape,),
"strides_a = %r" % (a.strides,),
"strides_b = %r" % (b.strides,),
"size_a = %r" % (a.size,),
"size_b = %r" % (b.size,)
])
assert_equal(got, exact, err_msg=err_msg)
def test_may_share_memory_manual():
# Manual test cases for may_share_memory
# Base arrays
xs0 = [
np.zeros([13, 21, 23, 22], dtype=np.int8),
np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:]
]
# Generate all negative stride combinations
xs = []
for x in xs0:
for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)):
xp = x[ss]
xs.append(xp)
for x in xs:
# The default is a simple extent check
assert_(np.may_share_memory(x[:,0,:], x[:,1,:]))
assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None))
# Exact checks
check_may_share_memory_exact(x[:,0,:], x[:,1,:])
check_may_share_memory_exact(x[:,::7], x[:,3::3])
try:
xp = x.ravel()
if xp.flags.owndata:
continue
xp = xp.view(np.int16)
except ValueError:
continue
# 0-size arrays cannot overlap
check_may_share_memory_exact(x.ravel()[6:6],
xp.reshape(13, 21, 23, 11)[:,::7])
# Test itemsize is dealt with
check_may_share_memory_exact(x[:,::7],
xp.reshape(13, 21, 23, 11))
check_may_share_memory_exact(x[:,::7],
xp.reshape(13, 21, 23, 11)[:,3::3])
check_may_share_memory_exact(x.ravel()[6:7],
xp.reshape(13, 21, 23, 11)[:,::7])
# Check unit size
x = np.zeros([1], dtype=np.int8)
check_may_share_memory_exact(x, x)
check_may_share_memory_exact(x, x.copy())
def iter_random_view_pairs(x, same_steps=True, equal_size=False):
rng = np.random.RandomState(1234)
if equal_size and same_steps:
raise ValueError()
def random_slice(n, step):
start = rng.randint(0, n+1, dtype=np.intp)
stop = rng.randint(start, n+1, dtype=np.intp)
if rng.randint(0, 2, dtype=np.intp) == 0:
stop, start = start, stop
step *= -1
return slice(start, stop, step)
def random_slice_fixed_size(n, step, size):
start = rng.randint(0, n+1 - size*step)
stop = start + (size-1)*step + 1
if rng.randint(0, 2) == 0:
stop, start = start-1, stop-1
if stop < 0:
stop = None
step *= -1
return slice(start, stop, step)
# First a few regular views
yield x, x
for j in range(1, 7, 3):
yield x[j:], x[:-j]
yield x[...,j:], x[...,:-j]
# An array with zero stride internal overlap
strides = list(x.strides)
strides[0] = 0
xp = as_strided(x, shape=x.shape, strides=strides)
yield x, xp
yield xp, xp
# An array with non-zero stride internal overlap
strides = list(x.strides)
if strides[0] > 1:
strides[0] = 1
xp = as_strided(x, shape=x.shape, strides=strides)
yield x, xp
yield xp, xp
# Then discontiguous views
while True:
steps = tuple(rng.randint(1, 11, dtype=np.intp)
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
t1 = np.arange(x.ndim)
rng.shuffle(t1)
if equal_size:
t2 = t1
else:
t2 = np.arange(x.ndim)
rng.shuffle(t2)
a = x[s1]
if equal_size:
if a.size == 0:
continue
steps2 = tuple(rng.randint(1, max(2, p//(1+pa)))
if rng.randint(0, 5) == 0 else 1
for p, s, pa in zip(x.shape, s1, a.shape))
s2 = tuple(random_slice_fixed_size(p, s, pa)
for p, s, pa in zip(x.shape, steps2, a.shape))
elif same_steps:
steps2 = steps
else:
steps2 = tuple(rng.randint(1, 11, dtype=np.intp)
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
if not equal_size:
s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2))
a = a.transpose(t1)
b = x[s2].transpose(t2)
yield a, b
def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):
# Check that overlap problems with common strides are solved with
# little work.
x = np.zeros([17,34,71,97], dtype=np.int16)
feasible = 0
infeasible = 0
pair_iter = iter_random_view_pairs(x, same_steps)
while min(feasible, infeasible) < min_count:
a, b = next(pair_iter)
bounds_overlap = np.may_share_memory(a, b)
may_share_answer = np.may_share_memory(a, b)
easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b))
exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
if easy_answer != exact_answer:
# assert_equal is slow...
assert_equal(easy_answer, exact_answer)
if may_share_answer != bounds_overlap:
assert_equal(may_share_answer, bounds_overlap)
if bounds_overlap:
if exact_answer:
feasible += 1
else:
infeasible += 1
@pytest.mark.slow
def test_may_share_memory_easy_fuzz():
# Check that overlap problems with common strides are always
# solved with little work.
check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1,
same_steps=True,
min_count=2000)
@pytest.mark.slow
def test_may_share_memory_harder_fuzz():
# Overlap problems with not necessarily common strides take more
# work.
#
# The work bound below can't be reduced much. Harder problems can
# also exist but not be detected here, as the set of problems
# comes from RNG.
check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2,
same_steps=False,
min_count=2000)
def test_shares_memory_api():
x = np.zeros([4, 5, 6], dtype=np.int8)
assert_equal(np.shares_memory(x, x), True)
assert_equal(np.shares_memory(x, x.copy()), False)
a = x[:,::2,::3]
b = x[:,::3,::2]
assert_equal(np.shares_memory(a, b), True)
assert_equal(np.shares_memory(a, b, max_work=None), True)
assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1)
def test_may_share_memory_bad_max_work():
x = np.zeros([1])
assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100)
assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100)
def test_internal_overlap_diophantine():
def check(A, U, exists=None):
X = solve_diophantine(A, U, 0, require_ub_nontrivial=1)
if exists is None:
exists = (X is not None)
if X is not None:
assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U)))
assert_(all(0 <= x <= u for x, u in zip(X, U)))
assert_(any(x != u//2 for x, u in zip(X, U)))
if exists:
assert_(X is not None, repr(X))
else:
assert_(X is None, repr(X))
# Smoke tests
check((3, 2), (2*2, 3*2), exists=True)
check((3*2, 2), (15*2, (3-1)*2), exists=False)
def test_internal_overlap_slices():
# Slicing an array never generates internal overlap
x = np.zeros([17,34,71,97], dtype=np.int16)
rng = np.random.RandomState(1234)
def random_slice(n, step):
start = rng.randint(0, n+1, dtype=np.intp)
stop = rng.randint(start, n+1, dtype=np.intp)
if rng.randint(0, 2, dtype=np.intp) == 0:
stop, start = start, stop
step *= -1
return slice(start, stop, step)
cases = 0
min_count = 5000
while cases < min_count:
steps = tuple(rng.randint(1, 11, dtype=np.intp)
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
t1 = np.arange(x.ndim)
rng.shuffle(t1)
s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
a = x[s1].transpose(t1)
assert_(not internal_overlap(a))
cases += 1
def check_internal_overlap(a, manual_expected=None):
got = internal_overlap(a)
# Brute-force check
m = set()
ranges = tuple(range(n) for n in a.shape)
for v in itertools.product(*ranges):
offset = sum(s*w for s, w in zip(a.strides, v))
if offset in m:
expected = True
break
else:
m.add(offset)
else:
expected = False
# Compare
if got != expected:
assert_equal(got, expected, err_msg=repr((a.strides, a.shape)))
if manual_expected is not None and expected != manual_expected:
assert_equal(expected, manual_expected)
return got
def test_internal_overlap_manual():
# Stride tricks can construct arrays with internal overlap
# We don't care about memory bounds, the array is not
# read/write accessed
x = np.arange(1).astype(np.int8)
# Check low-dimensional special cases
check_internal_overlap(x, False) # 1-dim
check_internal_overlap(x.reshape([]), False) # 0-dim
a = as_strided(x, strides=(3, 4), shape=(4, 4))
check_internal_overlap(a, False)
a = as_strided(x, strides=(3, 4), shape=(5, 4))
check_internal_overlap(a, True)
a = as_strided(x, strides=(0,), shape=(0,))
check_internal_overlap(a, False)
a = as_strided(x, strides=(0,), shape=(1,))
check_internal_overlap(a, False)
a = as_strided(x, strides=(0,), shape=(2,))
check_internal_overlap(a, True)
a = as_strided(x, strides=(0, -9993), shape=(87, 22))
check_internal_overlap(a, True)
a = as_strided(x, strides=(0, -9993), shape=(1, 22))
check_internal_overlap(a, False)
a = as_strided(x, strides=(0, -9993), shape=(0, 22))
check_internal_overlap(a, False)
def test_internal_overlap_fuzz():
# Fuzz check; the brute-force check is fairly slow
x = np.arange(1).astype(np.int8)
overlap = 0
no_overlap = 0
min_count = 100
rng = np.random.RandomState(1234)
while min(overlap, no_overlap) < min_count:
ndim = rng.randint(1, 4, dtype=np.intp)
strides = tuple(rng.randint(-1000, 1000, dtype=np.intp)
for j in range(ndim))
shape = tuple(rng.randint(1, 30, dtype=np.intp)
for j in range(ndim))
a = as_strided(x, strides=strides, shape=shape)
result = check_internal_overlap(a)
if result:
overlap += 1
else:
no_overlap += 1
def test_non_ndarray_inputs():
# Regression check for gh-5604
class MyArray:
def __init__(self, data):
self.data = data
@property
def __array_interface__(self):
return self.data.__array_interface__
class MyArray2:
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
for cls in [MyArray, MyArray2]:
x = np.arange(5)
assert_(np.may_share_memory(cls(x[::2]), x[1::2]))
assert_(not np.shares_memory(cls(x[::2]), x[1::2]))
assert_(np.shares_memory(cls(x[1::3]), x[::2]))
assert_(np.may_share_memory(cls(x[1::3]), x[::2]))
def view_element_first_byte(x):
"""Construct an array viewing the first byte of each element of `x`"""
from numpy.lib.stride_tricks import DummyArray
interface = dict(x.__array_interface__)
interface['typestr'] = '|b1'
interface['descr'] = [('', '|b1')]
return np.asarray(DummyArray(interface, x))
def assert_copy_equivalent(operation, args, out, **kwargs):
"""
Check that operation(*args, out=out) produces results
equivalent to out[...] = operation(*args, out=out.copy())
"""
kwargs['out'] = out
kwargs2 = dict(kwargs)
kwargs2['out'] = out.copy()
out_orig = out.copy()
out[...] = operation(*args, **kwargs2)
expected = out.copy()
out[...] = out_orig
got = operation(*args, **kwargs).copy()
if (got != expected).any():
assert_equal(got, expected)
class TestUFunc:
"""
Test ufunc call memory overlap handling
"""
def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16,
count=5000):
shapes = [7, 13, 8, 21, 29, 32]
rng = np.random.RandomState(1234)
for ndim in range(1, 6):
x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype)
it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
min_count = count // (ndim + 1)**2
overlapping = 0
while overlapping < min_count:
a, b = next(it)
a_orig = a.copy()
b_orig = b.copy()
if get_out_axis_size is None:
assert_copy_equivalent(operation, [a], out=b)
if np.shares_memory(a, b):
overlapping += 1
else:
for axis in itertools.chain(range(ndim), [None]):
a[...] = a_orig
b[...] = b_orig
# Determine size for reduction axis (None if scalar)
outsize, scalarize = get_out_axis_size(a, b, axis)
if outsize == 'skip':
continue
# Slice b to get an output array of the correct size
sl = [slice(None)] * ndim
if axis is None:
if outsize is None:
sl = [slice(0, 1)] + [0]*(ndim - 1)
else:
sl = [slice(0, outsize)] + [0]*(ndim - 1)
else:
if outsize is None:
k = b.shape[axis]//2
if ndim == 1:
sl[axis] = slice(k, k + 1)
else:
sl[axis] = k
else:
assert b.shape[axis] >= outsize
sl[axis] = slice(0, outsize)
b_out = b[tuple(sl)]
if scalarize:
b_out = b_out.reshape([])
if np.shares_memory(a, b_out):
overlapping += 1
# Check result
assert_copy_equivalent(operation, [a], out=b_out, axis=axis)
@pytest.mark.slow
def test_unary_ufunc_call_fuzz(self):
self.check_unary_fuzz(np.invert, None, np.int16)
@pytest.mark.slow
def test_unary_ufunc_call_complex_fuzz(self):
# Complex typically has a smaller alignment than itemsize
self.check_unary_fuzz(np.negative, None, np.complex128, count=500)
def test_binary_ufunc_accumulate_fuzz(self):
def get_out_axis_size(a, b, axis):
if axis is None:
if a.ndim == 1:
return a.size, False
else:
return 'skip', False # accumulate doesn't support this
else:
return a.shape[axis], False
self.check_unary_fuzz(np.add.accumulate, get_out_axis_size,
dtype=np.int16, count=500)
def test_binary_ufunc_reduce_fuzz(self):
def get_out_axis_size(a, b, axis):
return None, (axis is None or a.ndim == 1)
self.check_unary_fuzz(np.add.reduce, get_out_axis_size,
dtype=np.int16, count=500)
def test_binary_ufunc_reduceat_fuzz(self):
def get_out_axis_size(a, b, axis):
if axis is None:
if a.ndim == 1:
return a.size, False
else:
return 'skip', False # reduceat doesn't support this
else:
return a.shape[axis], False
def do_reduceat(a, out, axis):
if axis is None:
size = len(a)
step = size//len(out)
else:
size = a.shape[axis]
step = a.shape[axis] // out.shape[axis]
idx = np.arange(0, size, step)
return np.add.reduceat(a, idx, out=out, axis=axis)
self.check_unary_fuzz(do_reduceat, get_out_axis_size,
dtype=np.int16, count=500)
def test_binary_ufunc_reduceat_manual(self):
def check(ufunc, a, ind, out):
c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy())
c2 = ufunc.reduceat(a, ind, out=out)
assert_array_equal(c1, c2)
# Exactly same input/output arrays
a = np.arange(10000, dtype=np.int16)
check(np.add, a, a[::-1].copy(), a)
# Overlap with index
a = np.arange(10000, dtype=np.int16)
check(np.add, a, a[::-1], a)
@pytest.mark.slow
def test_unary_gufunc_fuzz(self):
shapes = [7, 13, 8, 21, 29, 32]
gufunc = _umath_tests.euclidean_pdist
rng = np.random.RandomState(1234)
for ndim in range(2, 6):
x = rng.rand(*shapes[:ndim])
it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
min_count = 500 // (ndim + 1)**2
overlapping = 0
while overlapping < min_count:
a, b = next(it)
if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2:
continue
# Ensure the shapes are so that euclidean_pdist is happy
if b.shape[-1] > b.shape[-2]:
b = b[...,0,:]
else:
b = b[...,:,0]
n = a.shape[-2]
p = n * (n - 1) // 2
if p <= b.shape[-1] and p > 0:
b = b[...,:p]
else:
n = max(2, int(np.sqrt(b.shape[-1]))//2)
p = n * (n - 1) // 2
a = a[...,:n,:]
b = b[...,:p]
# Call
if np.shares_memory(a, b):
overlapping += 1
with np.errstate(over='ignore', invalid='ignore'):
assert_copy_equivalent(gufunc, [a], out=b)
def test_ufunc_at_manual(self):
def check(ufunc, a, ind, b=None):
a0 = a.copy()
if b is None:
ufunc.at(a0, ind.copy())
c1 = a0.copy()
ufunc.at(a, ind)
c2 = a.copy()
else:
ufunc.at(a0, ind.copy(), b.copy())
c1 = a0.copy()
ufunc.at(a, ind, b)
c2 = a.copy()
assert_array_equal(c1, c2)
# Overlap with index
a = np.arange(10000, dtype=np.int16)
check(np.invert, a[::-1], a)
# Overlap with second data array
a = np.arange(100, dtype=np.int16)
ind = np.arange(0, 100, 2, dtype=np.int16)
check(np.add, a, ind, a[25:75])
def test_unary_ufunc_1d_manual(self):
# Exercise ufunc fast-paths (that avoid creation of an `np.nditer`)
def check(a, b):
a_orig = a.copy()
b_orig = b.copy()
b0 = b.copy()
c1 = ufunc(a, out=b0)
c2 = ufunc(a, out=b)
assert_array_equal(c1, c2)
# Trigger "fancy ufunc loop" code path
mask = view_element_first_byte(b).view(np.bool_)
a[...] = a_orig
b[...] = b_orig
c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy()
a[...] = a_orig
b[...] = b_orig
c2 = ufunc(a, out=b, where=mask.copy()).copy()
# Also, mask overlapping with output
a[...] = a_orig
b[...] = b_orig
c3 = ufunc(a, out=b, where=mask).copy()
assert_array_equal(c1, c2)
assert_array_equal(c1, c3)
dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32,
np.float64, np.complex64, np.complex128]
dtypes = [np.dtype(x) for x in dtypes]
for dtype in dtypes:
if np.issubdtype(dtype, np.integer):
ufunc = np.invert
else:
ufunc = np.reciprocal
n = 1000
k = 10
indices = [
np.index_exp[:n],
np.index_exp[k:k+n],
np.index_exp[n-1::-1],
np.index_exp[k+n-1:k-1:-1],
np.index_exp[:2*n:2],
np.index_exp[k:k+2*n:2],
np.index_exp[2*n-1::-2],
np.index_exp[k+2*n-1:k-1:-2],
]
for xi, yi in itertools.product(indices, indices):
v = np.arange(1, 1 + n*2 + k, dtype=dtype)
x = v[xi]
y = v[yi]
with np.errstate(all='ignore'):
check(x, y)
# Scalar cases
check(x[:1], y)
check(x[-1:], y)
check(x[:1].reshape([]), y)
check(x[-1:].reshape([]), y)
def test_unary_ufunc_where_same(self):
# Check behavior at wheremask overlap
ufunc = np.invert
def check(a, out, mask):
c1 = ufunc(a, out=out.copy(), where=mask.copy())
c2 = ufunc(a, out=out, where=mask)
assert_array_equal(c1, c2)
# Check behavior with same input and output arrays
x = np.arange(100).astype(np.bool_)
check(x, x, x)
check(x, x.copy(), x)
check(x, x, x.copy())
@pytest.mark.slow
def test_binary_ufunc_1d_manual(self):
ufunc = np.add
def check(a, b, c):
c0 = c.copy()
c1 = ufunc(a, b, out=c0)
c2 = ufunc(a, b, out=c)
assert_array_equal(c1, c2)
for dtype in [np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64, np.complex64, np.complex128]:
# Check different data dependency orders
n = 1000
k = 10
indices = []
for p in [1, 2]:
indices.extend([
np.index_exp[:p*n:p],
np.index_exp[k:k+p*n:p],
np.index_exp[p*n-1::-p],
np.index_exp[k+p*n-1:k-1:-p],
])
for x, y, z in itertools.product(indices, indices, indices):
v = np.arange(6*n).astype(dtype)
x = v[x]
y = v[y]
z = v[z]
check(x, y, z)
# Scalar cases
check(x[:1], y, z)
check(x[-1:], y, z)
check(x[:1].reshape([]), y, z)
check(x[-1:].reshape([]), y, z)
check(x, y[:1], z)
check(x, y[-1:], z)
check(x, y[:1].reshape([]), z)
check(x, y[-1:].reshape([]), z)
def test_inplace_op_simple_manual(self):
rng = np.random.RandomState(1234)
x = rng.rand(200, 200) # bigger than bufsize
x += x.T
assert_array_equal(x - x.T, 0)
| 29,084 | Python | 30.207082 | 108 | 0.500825 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_nditer.py | import sys
import pytest
import textwrap
import subprocess
import numpy as np
import numpy.core._multiarray_tests as _multiarray_tests
from numpy import array, arange, nditer, all
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises,
HAS_REFCOUNT, suppress_warnings, break_cycles
)
def iter_multi_index(i):
ret = []
while not i.finished:
ret.append(i.multi_index)
i.iternext()
return ret
def iter_indices(i):
ret = []
while not i.finished:
ret.append(i.index)
i.iternext()
return ret
def iter_iterindices(i):
ret = []
while not i.finished:
ret.append(i.iterindex)
i.iternext()
return ret
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_iter_refcount():
# Make sure the iterator doesn't leak
# Basic
a = arange(6)
dt = np.dtype('f4').newbyteorder()
rc_a = sys.getrefcount(a)
rc_dt = sys.getrefcount(dt)
with nditer(a, [],
[['readwrite', 'updateifcopy']],
casting='unsafe',
op_dtypes=[dt]) as it:
assert_(not it.iterationneedsapi)
assert_(sys.getrefcount(a) > rc_a)
assert_(sys.getrefcount(dt) > rc_dt)
# del 'it'
it = None
assert_equal(sys.getrefcount(a), rc_a)
assert_equal(sys.getrefcount(dt), rc_dt)
# With a copy
a = arange(6, dtype='f4')
dt = np.dtype('f4')
rc_a = sys.getrefcount(a)
rc_dt = sys.getrefcount(dt)
it = nditer(a, [],
[['readwrite']],
op_dtypes=[dt])
rc2_a = sys.getrefcount(a)
rc2_dt = sys.getrefcount(dt)
it2 = it.copy()
assert_(sys.getrefcount(a) > rc2_a)
assert_(sys.getrefcount(dt) > rc2_dt)
it = None
assert_equal(sys.getrefcount(a), rc2_a)
assert_equal(sys.getrefcount(dt), rc2_dt)
it2 = None
assert_equal(sys.getrefcount(a), rc_a)
assert_equal(sys.getrefcount(dt), rc_dt)
del it2 # avoid pyflakes unused variable warning
def test_iter_best_order():
# The iterator should always find the iteration order
# with increasing memory addresses
# Test the ordering for 1-D to 5-D shapes
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
a = arange(np.prod(shape))
# Test each combination of positive and negative strides
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, [], [['readonly']])
assert_equal([x for x in i], a)
# Fortran-order
i = nditer(aview.T, [], [['readonly']])
assert_equal([x for x in i], a)
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1), [], [['readonly']])
assert_equal([x for x in i], a)
def test_iter_c_order():
# Test forcing C order
# Test the ordering for 1-D to 5-D shapes
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
a = arange(np.prod(shape))
# Test each combination of positive and negative strides
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, order='C')
assert_equal([x for x in i], aview.ravel(order='C'))
# Fortran-order
i = nditer(aview.T, order='C')
assert_equal([x for x in i], aview.T.ravel(order='C'))
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1), order='C')
assert_equal([x for x in i],
aview.swapaxes(0, 1).ravel(order='C'))
def test_iter_f_order():
# Test forcing F order
# Test the ordering for 1-D to 5-D shapes
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
a = arange(np.prod(shape))
# Test each combination of positive and negative strides
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, order='F')
assert_equal([x for x in i], aview.ravel(order='F'))
# Fortran-order
i = nditer(aview.T, order='F')
assert_equal([x for x in i], aview.T.ravel(order='F'))
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1), order='F')
assert_equal([x for x in i],
aview.swapaxes(0, 1).ravel(order='F'))
def test_iter_c_or_f_order():
# Test forcing any contiguous (C or F) order
# Test the ordering for 1-D to 5-D shapes
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
a = arange(np.prod(shape))
# Test each combination of positive and negative strides
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, order='A')
assert_equal([x for x in i], aview.ravel(order='A'))
# Fortran-order
i = nditer(aview.T, order='A')
assert_equal([x for x in i], aview.T.ravel(order='A'))
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1), order='A')
assert_equal([x for x in i],
aview.swapaxes(0, 1).ravel(order='A'))
def test_nditer_multi_index_set():
# Test the multi_index set
a = np.arange(6).reshape(2, 3)
it = np.nditer(a, flags=['multi_index'])
# Removes the iteration on two first elements of a[0]
it.multi_index = (0, 2,)
assert_equal([i for i in it], [2, 3, 4, 5])
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_nditer_multi_index_set_refcount():
# Test if the reference count on index variable is decreased
index = 0
i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index'])
start_count = sys.getrefcount(index)
i.multi_index = (index,)
end_count = sys.getrefcount(index)
assert_equal(start_count, end_count)
def test_iter_best_order_multi_index_1d():
# The multi-indices should be correct with any reordering
a = arange(4)
# 1D order
i = nditer(a, ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)])
# 1D reversed order
i = nditer(a[::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)])
def test_iter_best_order_multi_index_2d():
# The multi-indices should be correct with any reordering
a = arange(6)
# 2D C-order
i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)])
# 2D Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)])
# 2D reversed C-order
i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)])
i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)])
i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)])
# 2D reversed Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)])
i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)])
i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)])
def test_iter_best_order_multi_index_3d():
# The multi-indices should be correct with any reordering
a = arange(12)
# 3D C-order
i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)])
# 3D Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0),
(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)])
# 3D reversed C-order
i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1),
(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)])
i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1),
(1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)])
i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0),
(1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)])
# 3D reversed Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0),
(1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0),
(0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
['multi_index'], [['readonly']])
assert_equal(iter_multi_index(i),
[(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1),
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)])
def test_iter_best_order_c_index_1d():
# The C index should be correct with any reordering
a = arange(4)
# 1D order
i = nditer(a, ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 1, 2, 3])
# 1D reversed order
i = nditer(a[::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [3, 2, 1, 0])
def test_iter_best_order_c_index_2d():
# The C index should be correct with any reordering
a = arange(6)
# 2D C-order
i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
# 2D Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F'),
['c_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5])
# 2D reversed C-order
i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2])
i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3])
i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
# 2D reversed Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2])
i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3])
i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0])
def test_iter_best_order_c_index_3d():
# The C index should be correct with any reordering
a = arange(12)
# 3D C-order
i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
# 3D Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F'),
['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
# 3D reversed C-order
i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
# 3D reversed Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
['c_index'], [['readonly']])
assert_equal(iter_indices(i),
[1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
def test_iter_best_order_f_index_1d():
# The Fortran index should be correct with any reordering
a = arange(4)
# 1D order
i = nditer(a, ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 1, 2, 3])
# 1D reversed order
i = nditer(a[::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [3, 2, 1, 0])
def test_iter_best_order_f_index_2d():
# The Fortran index should be correct with any reordering
a = arange(6)
# 2D C-order
i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5])
# 2D Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F'),
['f_index'], [['readonly']])
assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
# 2D reversed C-order
i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4])
i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1])
i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0])
# 2D reversed Fortran-order
i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4])
i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1])
i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
def test_iter_best_order_f_index_3d():
# The Fortran index should be correct with any reordering
a = arange(12)
# 3D C-order
i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
# 3D Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F'),
['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
# 3D reversed C-order
i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
# 3D reversed Fortran-order
i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
['f_index'], [['readonly']])
assert_equal(iter_indices(i),
[6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
def test_iter_no_inner_full_coalesce():
# Check no_inner iterators which coalesce into a single inner loop
for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
size = np.prod(shape)
a = arange(size)
# Test each combination of forward and backwards indexing
for dirs in range(2**len(shape)):
dirs_index = [slice(None)]*len(shape)
for bit in range(len(shape)):
if ((2**bit) & dirs):
dirs_index[bit] = slice(None, None, -1)
dirs_index = tuple(dirs_index)
aview = a.reshape(shape)[dirs_index]
# C-order
i = nditer(aview, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (size,))
# Fortran-order
i = nditer(aview.T, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (size,))
# Other order
if len(shape) > 2:
i = nditer(aview.swapaxes(0, 1),
['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (size,))
def test_iter_no_inner_dim_coalescing():
# Check no_inner iterators whose dimensions may not coalesce completely
# Skipping the last element in a dimension prevents coalescing
# with the next-bigger dimension
a = arange(24).reshape(2, 3, 4)[:,:, :-1]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 2)
assert_equal(i[0].shape, (3,))
a = arange(24).reshape(2, 3, 4)[:, :-1,:]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 2)
assert_equal(i[0].shape, (8,))
a = arange(24).reshape(2, 3, 4)[:-1,:,:]
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (12,))
# Even with lots of 1-sized dimensions, should still coalesce
a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1)
i = nditer(a, ['external_loop'], [['readonly']])
assert_equal(i.ndim, 1)
assert_equal(i[0].shape, (24,))
def test_iter_dim_coalescing():
# Check that the correct number of dimensions are coalesced
# Tracking a multi-index disables coalescing
a = arange(24).reshape(2, 3, 4)
i = nditer(a, ['multi_index'], [['readonly']])
assert_equal(i.ndim, 3)
# A tracked index can allow coalescing if it's compatible with the array
a3d = arange(24).reshape(2, 3, 4)
i = nditer(a3d, ['c_index'], [['readonly']])
assert_equal(i.ndim, 1)
i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']])
assert_equal(i.ndim, 3)
i = nditer(a3d.T, ['c_index'], [['readonly']])
assert_equal(i.ndim, 3)
i = nditer(a3d.T, ['f_index'], [['readonly']])
assert_equal(i.ndim, 1)
i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']])
assert_equal(i.ndim, 3)
# When C or F order is forced, coalescing may still occur
a3d = arange(24).reshape(2, 3, 4)
i = nditer(a3d, order='C')
assert_equal(i.ndim, 1)
i = nditer(a3d.T, order='C')
assert_equal(i.ndim, 3)
i = nditer(a3d, order='F')
assert_equal(i.ndim, 3)
i = nditer(a3d.T, order='F')
assert_equal(i.ndim, 1)
i = nditer(a3d, order='A')
assert_equal(i.ndim, 1)
i = nditer(a3d.T, order='A')
assert_equal(i.ndim, 1)
def test_iter_broadcasting():
# Standard NumPy broadcasting rules
# 1D with scalar
i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (6,))
# 2D with scalar
i = nditer([arange(6).reshape(2, 3), np.int32(2)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (2, 3))
# 2D with 1D
i = nditer([arange(6).reshape(2, 3), arange(3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (2, 3))
i = nditer([arange(2).reshape(2, 1), arange(3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (2, 3))
# 2D with 2D
i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 6)
assert_equal(i.shape, (2, 3))
# 3D with scalar
i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
# 3D with 1D
i = nditer([arange(3), arange(24).reshape(4, 2, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(3), arange(8).reshape(4, 2, 1)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
# 3D with 2D
i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
# 3D with 3D
i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3),
arange(4).reshape(4, 1, 1)],
['multi_index'], [['readonly']]*3)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)],
['multi_index'], [['readonly']]*2)
assert_equal(i.itersize, 24)
assert_equal(i.shape, (4, 2, 3))
def test_iter_itershape():
# Check that allocated outputs work with a specified shape
a = np.arange(6, dtype='i2').reshape(2, 3)
i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
op_axes=[[0, 1, None], None],
itershape=(-1, -1, 4))
assert_equal(i.operands[1].shape, (2, 3, 4))
assert_equal(i.operands[1].strides, (24, 8, 2))
i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
op_axes=[[0, 1, None], None],
itershape=(-1, -1, 4))
assert_equal(i.operands[1].shape, (3, 2, 4))
assert_equal(i.operands[1].strides, (8, 24, 2))
i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
order='F',
op_axes=[[0, 1, None], None],
itershape=(-1, -1, 4))
assert_equal(i.operands[1].shape, (3, 2, 4))
assert_equal(i.operands[1].strides, (2, 6, 12))
# If we specify 1 in the itershape, it shouldn't allow broadcasting
# of that dimension to a bigger value
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly', 'allocate']],
op_axes=[[0, 1, None], None],
itershape=(-1, 1, 4))
# Test bug that for no op_axes but itershape, they are NULLed correctly
i = np.nditer([np.ones(2), None, None], itershape=(2,))
def test_iter_broadcasting_errors():
# Check that errors are thrown for bad broadcasting shapes
# 1D with 1D
assert_raises(ValueError, nditer, [arange(2), arange(3)],
[], [['readonly']]*2)
# 2D with 1D
assert_raises(ValueError, nditer,
[arange(6).reshape(2, 3), arange(2)],
[], [['readonly']]*2)
# 2D with 2D
assert_raises(ValueError, nditer,
[arange(6).reshape(2, 3), arange(9).reshape(3, 3)],
[], [['readonly']]*2)
assert_raises(ValueError, nditer,
[arange(6).reshape(2, 3), arange(4).reshape(2, 2)],
[], [['readonly']]*2)
# 3D with 3D
assert_raises(ValueError, nditer,
[arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)],
[], [['readonly']]*2)
assert_raises(ValueError, nditer,
[arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)],
[], [['readonly']]*2)
# Verify that the error message mentions the right shapes
try:
nditer([arange(2).reshape(1, 2, 1),
arange(3).reshape(1, 3),
arange(6).reshape(2, 3)],
[],
[['readonly'], ['readonly'], ['writeonly', 'no_broadcast']])
raise AssertionError('Should have raised a broadcast error')
except ValueError as e:
msg = str(e)
# The message should contain the shape of the 3rd operand
assert_(msg.find('(2,3)') >= 0,
'Message "%s" doesn\'t contain operand shape (2,3)' % msg)
# The message should contain the broadcast shape
assert_(msg.find('(1,2,3)') >= 0,
'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg)
try:
nditer([arange(6).reshape(2, 3), arange(2)],
[],
[['readonly'], ['readonly']],
op_axes=[[0, 1], [0, np.newaxis]],
itershape=(4, 3))
raise AssertionError('Should have raised a broadcast error')
except ValueError as e:
msg = str(e)
# The message should contain "shape->remappedshape" for each operand
assert_(msg.find('(2,3)->(2,3)') >= 0,
'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg)
assert_(msg.find('(2,)->(2,newaxis)') >= 0,
('Message "%s" doesn\'t contain remapped operand shape' +
'(2,)->(2,newaxis)') % msg)
# The message should contain the itershape parameter
assert_(msg.find('(4,3)') >= 0,
'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg)
try:
nditer([np.zeros((2, 1, 1)), np.zeros((2,))],
[],
[['writeonly', 'no_broadcast'], ['readonly']])
raise AssertionError('Should have raised a broadcast error')
except ValueError as e:
msg = str(e)
# The message should contain the shape of the bad operand
assert_(msg.find('(2,1,1)') >= 0,
'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg)
# The message should contain the broadcast shape
assert_(msg.find('(2,1,2)') >= 0,
'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg)
def test_iter_flags_errors():
# Check that bad combinations of flags produce errors
a = arange(6)
# Not enough operands
assert_raises(ValueError, nditer, [], [], [])
# Too many operands
assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100)
# Bad global flag
assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']])
# Bad op flag
assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']])
# Bad order parameter
assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G')
# Bad casting parameter
assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon')
# op_flags must match ops
assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2)
# Cannot track both a C and an F index
assert_raises(ValueError, nditer, a,
['c_index', 'f_index'], [['readonly']])
# Inner iteration and multi-indices/indices are incompatible
assert_raises(ValueError, nditer, a,
['external_loop', 'multi_index'], [['readonly']])
assert_raises(ValueError, nditer, a,
['external_loop', 'c_index'], [['readonly']])
assert_raises(ValueError, nditer, a,
['external_loop', 'f_index'], [['readonly']])
# Must specify exactly one of readwrite/readonly/writeonly per operand
assert_raises(ValueError, nditer, a, [], [[]])
assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']])
assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']])
assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']])
assert_raises(ValueError, nditer, a,
[], [['readonly', 'writeonly', 'readwrite']])
# Python scalars are always readonly
assert_raises(TypeError, nditer, 1.5, [], [['writeonly']])
assert_raises(TypeError, nditer, 1.5, [], [['readwrite']])
# Array scalars are always readonly
assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']])
assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']])
# Check readonly array
a.flags.writeable = False
assert_raises(ValueError, nditer, a, [], [['writeonly']])
assert_raises(ValueError, nditer, a, [], [['readwrite']])
a.flags.writeable = True
# Multi-indices available only with the multi_index flag
i = nditer(arange(6), [], [['readonly']])
assert_raises(ValueError, lambda i:i.multi_index, i)
# Index available only with an index flag
assert_raises(ValueError, lambda i:i.index, i)
# GotoCoords and GotoIndex incompatible with buffering or no_inner
def assign_multi_index(i):
i.multi_index = (0,)
def assign_index(i):
i.index = 0
def assign_iterindex(i):
i.iterindex = 0
def assign_iterrange(i):
i.iterrange = (0, 1)
i = nditer(arange(6), ['external_loop'])
assert_raises(ValueError, assign_multi_index, i)
assert_raises(ValueError, assign_index, i)
assert_raises(ValueError, assign_iterindex, i)
assert_raises(ValueError, assign_iterrange, i)
i = nditer(arange(6), ['buffered'])
assert_raises(ValueError, assign_multi_index, i)
assert_raises(ValueError, assign_index, i)
assert_raises(ValueError, assign_iterrange, i)
# Can't iterate if size is zero
assert_raises(ValueError, nditer, np.array([]))
def test_iter_slice():
a, b, c = np.arange(3), np.arange(3), np.arange(3.)
i = nditer([a, b, c], [], ['readwrite'])
with i:
i[0:2] = (3, 3)
assert_equal(a, [3, 1, 2])
assert_equal(b, [3, 1, 2])
assert_equal(c, [0, 1, 2])
i[1] = 12
assert_equal(i[0:2], [3, 12])
def test_iter_assign_mapping():
a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
casting='same_kind', op_dtypes=[np.dtype('f4')])
with it:
it.operands[0][...] = 3
it.operands[0][...] = 14
assert_equal(a, 14)
it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
casting='same_kind', op_dtypes=[np.dtype('f4')])
with it:
x = it.operands[0][-1:1]
x[...] = 14
it.operands[0][...] = -1234
assert_equal(a, -1234)
# check for no warnings on dealloc
x = None
it = None
def test_iter_nbo_align_contig():
# Check that byte order, alignment, and contig changes work
# Byte order change by requesting a specific dtype
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
assert_(a.dtype.byteorder != au.dtype.byteorder)
i = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv',
op_dtypes=[np.dtype('f4')])
with i:
# context manager triggers WRITEBACKIFCOPY on i at exit
assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
assert_equal(i.operands[0], a)
i.operands[0][:] = 2
assert_equal(au, [2]*6)
del i # should not raise a warning
# Byte order change by requesting NBO
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
assert_(a.dtype.byteorder != au.dtype.byteorder)
with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']],
casting='equiv') as i:
# context manager triggers UPDATEIFCOPY on i at exit
assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
assert_equal(i.operands[0], a)
i.operands[0][:] = 12345
i.operands[0][:] = 2
assert_equal(au, [2]*6)
# Unaligned input
a = np.zeros((6*4+1,), dtype='i1')[1:]
a.dtype = 'f4'
a[:] = np.arange(6, dtype='f4')
assert_(not a.flags.aligned)
# Without 'aligned', shouldn't copy
i = nditer(a, [], [['readonly']])
assert_(not i.operands[0].flags.aligned)
assert_equal(i.operands[0], a)
# With 'aligned', should make a copy
with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i:
assert_(i.operands[0].flags.aligned)
# context manager triggers UPDATEIFCOPY on i at exit
assert_equal(i.operands[0], a)
i.operands[0][:] = 3
assert_equal(a, [3]*6)
# Discontiguous input
a = arange(12)
# If it is contiguous, shouldn't copy
i = nditer(a[:6], [], [['readonly']])
assert_(i.operands[0].flags.contiguous)
assert_equal(i.operands[0], a[:6])
# If it isn't contiguous, should buffer
i = nditer(a[::2], ['buffered', 'external_loop'],
[['readonly', 'contig']],
buffersize=10)
assert_(i[0].flags.contiguous)
assert_equal(i[0], a[::2])
def test_iter_array_cast():
# Check that arrays are cast as requested
# No cast 'f4' -> 'f4'
a = np.arange(6, dtype='f4').reshape(2, 3)
i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')])
with i:
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('f4'))
# Byte-order cast '<f4' -> '>f4'
a = np.arange(6, dtype='<f4').reshape(2, 3)
with nditer(a, [], [['readwrite', 'updateifcopy']],
casting='equiv',
op_dtypes=[np.dtype('>f4')]) as i:
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('>f4'))
# Safe case 'f4' -> 'f8'
a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2)
i = nditer(a, [], [['readonly', 'copy']],
casting='safe',
op_dtypes=[np.dtype('f8')])
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('f8'))
# The memory layout of the temporary should match a (a is (48,4,16))
# except negative strides get flipped to positive strides.
assert_equal(i.operands[0].strides, (96, 8, 32))
a = a[::-1,:, ::-1]
i = nditer(a, [], [['readonly', 'copy']],
casting='safe',
op_dtypes=[np.dtype('f8')])
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('f8'))
assert_equal(i.operands[0].strides, (96, 8, 32))
# Same-kind cast 'f8' -> 'f4' -> 'f8'
a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
with nditer(a, [],
[['readwrite', 'updateifcopy']],
casting='same_kind',
op_dtypes=[np.dtype('f4')]) as i:
assert_equal(i.operands[0], a)
assert_equal(i.operands[0].dtype, np.dtype('f4'))
assert_equal(i.operands[0].strides, (4, 16, 48))
# Check that WRITEBACKIFCOPY is activated at exit
i.operands[0][2, 1, 1] = -12.5
assert_(a[2, 1, 1] != -12.5)
assert_equal(a[2, 1, 1], -12.5)
a = np.arange(6, dtype='i4')[::-2]
with nditer(a, [],
[['writeonly', 'updateifcopy']],
casting='unsafe',
op_dtypes=[np.dtype('f4')]) as i:
assert_equal(i.operands[0].dtype, np.dtype('f4'))
# Even though the stride was negative in 'a', it
# becomes positive in the temporary
assert_equal(i.operands[0].strides, (4,))
i.operands[0][:] = [1, 2, 3]
assert_equal(a, [1, 2, 3])
def test_iter_array_cast_errors():
# Check that invalid casts are caught
# Need to enable copying for casts to occur
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readonly']], op_dtypes=[np.dtype('f8')])
# Also need to allow casting for casts to occur
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readonly', 'copy']], casting='no',
op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readonly', 'copy']], casting='equiv',
op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
[['writeonly', 'updateifcopy']],
casting='no',
op_dtypes=[np.dtype('f4')])
assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
[['writeonly', 'updateifcopy']],
casting='equiv',
op_dtypes=[np.dtype('f4')])
# '<f4' -> '>f4' should not work with casting='no'
assert_raises(TypeError, nditer, arange(2, dtype='<f4'), [],
[['readonly', 'copy']], casting='no',
op_dtypes=[np.dtype('>f4')])
# 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readwrite', 'updateifcopy']],
casting='safe',
op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
[['readwrite', 'updateifcopy']],
casting='safe',
op_dtypes=[np.dtype('f4')])
# 'f4' -> 'i4' is neither a safe nor a same-kind cast
assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
[['readonly', 'copy']],
casting='same_kind',
op_dtypes=[np.dtype('i4')])
assert_raises(TypeError, nditer, arange(2, dtype='i4'), [],
[['writeonly', 'updateifcopy']],
casting='same_kind',
op_dtypes=[np.dtype('f4')])
def test_iter_scalar_cast():
# Check that scalars are cast as requested
# No cast 'f4' -> 'f4'
i = nditer(np.float32(2.5), [], [['readonly']],
op_dtypes=[np.dtype('f4')])
assert_equal(i.dtypes[0], np.dtype('f4'))
assert_equal(i.value.dtype, np.dtype('f4'))
assert_equal(i.value, 2.5)
# Safe cast 'f4' -> 'f8'
i = nditer(np.float32(2.5), [],
[['readonly', 'copy']],
casting='safe',
op_dtypes=[np.dtype('f8')])
assert_equal(i.dtypes[0], np.dtype('f8'))
assert_equal(i.value.dtype, np.dtype('f8'))
assert_equal(i.value, 2.5)
# Same-kind cast 'f8' -> 'f4'
i = nditer(np.float64(2.5), [],
[['readonly', 'copy']],
casting='same_kind',
op_dtypes=[np.dtype('f4')])
assert_equal(i.dtypes[0], np.dtype('f4'))
assert_equal(i.value.dtype, np.dtype('f4'))
assert_equal(i.value, 2.5)
# Unsafe cast 'f8' -> 'i4'
i = nditer(np.float64(3.0), [],
[['readonly', 'copy']],
casting='unsafe',
op_dtypes=[np.dtype('i4')])
assert_equal(i.dtypes[0], np.dtype('i4'))
assert_equal(i.value.dtype, np.dtype('i4'))
assert_equal(i.value, 3)
# Readonly scalars may be cast even without setting COPY or BUFFERED
i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')])
assert_equal(i[0].dtype, np.dtype('f8'))
assert_equal(i[0], 3.)
def test_iter_scalar_cast_errors():
# Check that invalid casts are caught
# Need to allow copying/buffering for write casts of scalars to occur
assert_raises(TypeError, nditer, np.float32(2), [],
[['readwrite']], op_dtypes=[np.dtype('f8')])
assert_raises(TypeError, nditer, 2.5, [],
[['readwrite']], op_dtypes=[np.dtype('f4')])
# 'f8' -> 'f4' isn't a safe cast if the value would overflow
assert_raises(TypeError, nditer, np.float64(1e60), [],
[['readonly']],
casting='safe',
op_dtypes=[np.dtype('f4')])
# 'f4' -> 'i4' is neither a safe nor a same-kind cast
assert_raises(TypeError, nditer, np.float32(2), [],
[['readonly']],
casting='same_kind',
op_dtypes=[np.dtype('i4')])
def test_iter_object_arrays_basic():
# Check that object arrays work
obj = {'a':3,'b':'d'}
a = np.array([[1, 2, 3], None, obj, None], dtype='O')
if HAS_REFCOUNT:
rc = sys.getrefcount(obj)
# Need to allow references for object arrays
assert_raises(TypeError, nditer, a)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(obj), rc)
i = nditer(a, ['refs_ok'], ['readonly'])
vals = [x_[()] for x_ in i]
assert_equal(np.array(vals, dtype='O'), a)
vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(obj), rc)
i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
['readonly'], order='C')
assert_(i.iterationneedsapi)
vals = [x_[()] for x_ in i]
assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F'))
vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(obj), rc)
i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
['readwrite'], order='C')
with i:
for x in i:
x[...] = None
vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_(sys.getrefcount(obj) == rc-1)
assert_equal(a, np.array([None]*4, dtype='O'))
def test_iter_object_arrays_conversions():
# Conversions to/from objects
a = np.arange(6, dtype='O')
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='i4')
with i:
for x in i:
x[...] += 1
assert_equal(a, np.arange(6)+1)
a = np.arange(6, dtype='i4')
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='O')
with i:
for x in i:
x[...] += 1
assert_equal(a, np.arange(6)+1)
# Non-contiguous object array
a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')])
a = a['a']
a[:] = np.arange(6)
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='i4')
with i:
for x in i:
x[...] += 1
assert_equal(a, np.arange(6)+1)
#Non-contiguous value array
a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')])
a = a['a']
a[:] = np.arange(6) + 98172488
i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
casting='unsafe', op_dtypes='O')
with i:
ob = i[0][()]
if HAS_REFCOUNT:
rc = sys.getrefcount(ob)
for x in i:
x[...] += 1
if HAS_REFCOUNT:
assert_(sys.getrefcount(ob) == rc-1)
assert_equal(a, np.arange(6)+98172489)
def test_iter_common_dtype():
# Check that the iterator finds a common data type correctly
i = nditer([array([3], dtype='f4'), array([0], dtype='f8')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('f8'))
assert_equal(i.dtypes[1], np.dtype('f8'))
i = nditer([array([3], dtype='i4'), array([0], dtype='f4')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('f8'))
assert_equal(i.dtypes[1], np.dtype('f8'))
i = nditer([array([3], dtype='f4'), array(0, dtype='f8')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='same_kind')
assert_equal(i.dtypes[0], np.dtype('f4'))
assert_equal(i.dtypes[1], np.dtype('f4'))
i = nditer([array([3], dtype='u4'), array(0, dtype='i4')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('u4'))
assert_equal(i.dtypes[1], np.dtype('u4'))
i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')],
['common_dtype'],
[['readonly', 'copy']]*2,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('i8'))
assert_equal(i.dtypes[1], np.dtype('i8'))
i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'),
array([2j], dtype='c8'), array([9], dtype='f8')],
['common_dtype'],
[['readonly', 'copy']]*4,
casting='safe')
assert_equal(i.dtypes[0], np.dtype('c16'))
assert_equal(i.dtypes[1], np.dtype('c16'))
assert_equal(i.dtypes[2], np.dtype('c16'))
assert_equal(i.dtypes[3], np.dtype('c16'))
assert_equal(i.value, (3, -12, 2j, 9))
# When allocating outputs, other outputs aren't factored in
i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [],
[['readonly', 'copy'],
['writeonly', 'allocate'],
['writeonly']],
casting='safe')
assert_equal(i.dtypes[0], np.dtype('i4'))
assert_equal(i.dtypes[1], np.dtype('i4'))
assert_equal(i.dtypes[2], np.dtype('c16'))
# But, if common data types are requested, they are
i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')],
['common_dtype'],
[['readonly', 'copy'],
['writeonly', 'allocate'],
['writeonly']],
casting='safe')
assert_equal(i.dtypes[0], np.dtype('c16'))
assert_equal(i.dtypes[1], np.dtype('c16'))
assert_equal(i.dtypes[2], np.dtype('c16'))
def test_iter_copy_if_overlap():
# Ensure the iterator makes copies on read/write overlap, if requested
# Copy not needed, 1 op
for flag in ['readonly', 'writeonly', 'readwrite']:
a = arange(10)
i = nditer([a], ['copy_if_overlap'], [[flag]])
with i:
assert_(i.operands[0] is a)
# Copy needed, 2 ops, read-write overlap
x = arange(10)
a = x[1:]
b = x[:-1]
with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
assert_(not np.shares_memory(*i.operands))
# Copy not needed with elementwise, 2 ops, exactly same arrays
x = arange(10)
a = x
b = x
i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'],
['readwrite', 'overlap_assume_elementwise']])
with i:
assert_(i.operands[0] is a and i.operands[1] is b)
with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b))
# Copy not needed, 2 ops, no overlap
x = arange(10)
a = x[::2]
b = x[1::2]
i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']])
assert_(i.operands[0] is a and i.operands[1] is b)
# Copy needed, 2 ops, read-write overlap
x = arange(4, dtype=np.int8)
a = x[3:]
b = x.view(np.int32)[:1]
with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i:
assert_(not np.shares_memory(*i.operands))
# Copy needed, 3 ops, read-write overlap
for flag in ['writeonly', 'readwrite']:
x = np.ones([10, 10])
a = x
b = x.T
c = x
with nditer([a, b, c], ['copy_if_overlap'],
[['readonly'], ['readonly'], [flag]]) as i:
a2, b2, c2 = i.operands
assert_(not np.shares_memory(a2, c2))
assert_(not np.shares_memory(b2, c2))
# Copy not needed, 3 ops, read-only overlap
x = np.ones([10, 10])
a = x
b = x.T
c = x
i = nditer([a, b, c], ['copy_if_overlap'],
[['readonly'], ['readonly'], ['readonly']])
a2, b2, c2 = i.operands
assert_(a is a2)
assert_(b is b2)
assert_(c is c2)
# Copy not needed, 3 ops, read-only overlap
x = np.ones([10, 10])
a = x
b = np.ones([10, 10])
c = x.T
i = nditer([a, b, c], ['copy_if_overlap'],
[['readonly'], ['writeonly'], ['readonly']])
a2, b2, c2 = i.operands
assert_(a is a2)
assert_(b is b2)
assert_(c is c2)
# Copy not needed, 3 ops, write-only overlap
x = np.arange(7)
a = x[:3]
b = x[3:6]
c = x[4:7]
i = nditer([a, b, c], ['copy_if_overlap'],
[['readonly'], ['writeonly'], ['writeonly']])
a2, b2, c2 = i.operands
assert_(a is a2)
assert_(b is b2)
assert_(c is c2)
def test_iter_op_axes():
# Check that custom axes work
# Reverse the axes
a = arange(6).reshape(2, 3)
i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]])
assert_(all([x == y for (x, y) in i]))
a = arange(24).reshape(2, 3, 4)
i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None])
assert_(all([x == y for (x, y) in i]))
# Broadcast 1D to any dimension
a = arange(1, 31).reshape(2, 3, 5)
b = arange(1, 3)
i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]])
assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel())
b = arange(1, 4)
i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]])
assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel())
b = arange(1, 6)
i = nditer([a, b], [], [['readonly']]*2,
op_axes=[None, [np.newaxis, np.newaxis, 0]])
assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel())
# Inner product-style broadcasting
a = arange(24).reshape(2, 3, 4)
b = arange(40).reshape(5, 2, 4)
i = nditer([a, b], ['multi_index'], [['readonly']]*2,
op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]])
assert_equal(i.shape, (2, 3, 5, 2))
# Matrix product-style broadcasting
a = arange(12).reshape(3, 4)
b = arange(20).reshape(4, 5)
i = nditer([a, b], ['multi_index'], [['readonly']]*2,
op_axes=[[0, -1], [-1, 1]])
assert_equal(i.shape, (3, 5))
def test_iter_op_axes_errors():
# Check that custom axes throws errors for bad inputs
# Wrong number of items in op_axes
a = arange(6).reshape(2, 3)
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0], [1], [0]])
# Out of bounds items in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[2, 1], [0, 1]])
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [2, -1]])
# Duplicate items in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 0], [0, 1]])
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [1, 1]])
# Different sized arrays in op_axes
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [0, 1, 0]])
# Non-broadcastable dimensions in the result
assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
op_axes=[[0, 1], [1, 0]])
def test_iter_copy():
# Check that copying the iterator works correctly
a = arange(24).reshape(2, 3, 4)
# Simple iterator
i = nditer(a)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterindex = 3
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
# Buffered iterator
i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterindex = 3
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterrange = (3, 9)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
i.iterrange = (2, 18)
next(i)
next(i)
j = i.copy()
assert_equal([x[()] for x in i], [x[()] for x in j])
# Casting iterator
with nditer(a, ['buffered'], order='F', casting='unsafe',
op_dtypes='f8', buffersize=5) as i:
j = i.copy()
assert_equal([x[()] for x in j], a.ravel(order='F'))
a = arange(24, dtype='<i4').reshape(2, 3, 4)
with nditer(a, ['buffered'], order='F', casting='unsafe',
op_dtypes='>f8', buffersize=5) as i:
j = i.copy()
assert_equal([x[()] for x in j], a.ravel(order='F'))
@pytest.mark.parametrize("dtype", np.typecodes["All"])
@pytest.mark.parametrize("loop_dtype", np.typecodes["All"])
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
def test_iter_copy_casts(dtype, loop_dtype):
# Ensure the dtype is never flexible:
if loop_dtype.lower() == "m":
loop_dtype = loop_dtype + "[ms]"
elif np.dtype(loop_dtype).itemsize == 0:
loop_dtype = loop_dtype + "50"
# Make things a bit more interesting by requiring a byte-swap as well:
arr = np.ones(1000, dtype=np.dtype(dtype).newbyteorder())
try:
expected = arr.astype(loop_dtype)
except Exception:
# Some casts are not possible, do not worry about them
return
it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"],
op_dtypes=[loop_dtype], casting="unsafe")
if np.issubdtype(np.dtype(loop_dtype), np.number):
# Casting to strings may be strange, but for simple dtypes do not rely
# on the cast being correct:
assert_array_equal(expected, np.ones(1000, dtype=loop_dtype))
it_copy = it.copy()
res = next(it)
del it
res_copy = next(it_copy)
del it_copy
assert_array_equal(res, expected)
assert_array_equal(res_copy, expected)
def test_iter_copy_casts_structured():
# Test a complicated structured dtype for casting, as it requires
# both multiple steps and a more complex casting setup.
# Includes a structured -> unstructured (any to object), and many other
# casts, which cause this to require all steps in the casting machinery
# one level down as well as the iterator copy (which uses NpyAuxData clone)
in_dtype = np.dtype([("a", np.dtype("i,")),
("b", np.dtype(">i,<i,>d,S17,>d,(3)f,O,i1"))])
out_dtype = np.dtype([("a", np.dtype("O")),
("b", np.dtype(">i,>i,S17,>d,>U3,(3)d,i1,O"))])
arr = np.ones(1000, dtype=in_dtype)
it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"],
op_dtypes=[out_dtype], casting="unsafe")
it_copy = it.copy()
res1 = next(it)
del it
res2 = next(it_copy)
del it_copy
expected = arr["a"].astype(out_dtype["a"])
assert_array_equal(res1["a"], expected)
assert_array_equal(res2["a"], expected)
for field in in_dtype["b"].names:
# Note that the .base avoids the subarray field
expected = arr["b"][field].astype(out_dtype["b"][field].base)
assert_array_equal(res1["b"][field], expected)
assert_array_equal(res2["b"][field], expected)
def test_iter_allocate_output_simple():
# Check that the iterator will properly allocate outputs
# Simple case
a = arange(6)
i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')])
assert_equal(i.operands[1].shape, a.shape)
assert_equal(i.operands[1].dtype, np.dtype('f4'))
def test_iter_allocate_output_buffered_readwrite():
# Allocated output with buffering + delay_bufalloc
a = arange(6)
i = nditer([a, None], ['buffered', 'delay_bufalloc'],
[['readonly'], ['allocate', 'readwrite']])
with i:
i.operands[1][:] = 1
i.reset()
for x in i:
x[1][...] += x[0][...]
assert_equal(i.operands[1], a+1)
def test_iter_allocate_output_itorder():
# The allocated output should match the iteration order
# C-order input, best iteration order
a = arange(6, dtype='i4').reshape(2, 3)
i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')])
assert_equal(i.operands[1].shape, a.shape)
assert_equal(i.operands[1].strides, a.strides)
assert_equal(i.operands[1].dtype, np.dtype('f4'))
# F-order input, best iteration order
a = arange(24, dtype='i4').reshape(2, 3, 4).T
i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')])
assert_equal(i.operands[1].shape, a.shape)
assert_equal(i.operands[1].strides, a.strides)
assert_equal(i.operands[1].dtype, np.dtype('f4'))
# Non-contiguous input, C iteration order
a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1)
i = nditer([a, None], [],
[['readonly'], ['writeonly', 'allocate']],
order='C',
op_dtypes=[None, np.dtype('f4')])
assert_equal(i.operands[1].shape, a.shape)
assert_equal(i.operands[1].strides, (32, 16, 4))
assert_equal(i.operands[1].dtype, np.dtype('f4'))
def test_iter_allocate_output_opaxes():
# Specifying op_axes should work
a = arange(24, dtype='i4').reshape(2, 3, 4)
i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']],
op_dtypes=[np.dtype('u4'), None],
op_axes=[[1, 2, 0], None])
assert_equal(i.operands[0].shape, (4, 2, 3))
assert_equal(i.operands[0].strides, (4, 48, 16))
assert_equal(i.operands[0].dtype, np.dtype('u4'))
def test_iter_allocate_output_types_promotion():
# Check type promotion of automatic outputs
i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f8'))
i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f8'))
i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('f4'))
i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('u4'))
i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [],
[['readonly']]*2+[['writeonly', 'allocate']])
assert_equal(i.dtypes[2], np.dtype('i8'))
def test_iter_allocate_output_types_byte_order():
# Verify the rules for byte order changes
# When there's just one input, the output type exactly matches
a = array([3], dtype='u4').newbyteorder()
i = nditer([a, None], [],
[['readonly'], ['writeonly', 'allocate']])
assert_equal(i.dtypes[0], i.dtypes[1])
# With two or more inputs, the output type is in native byte order
i = nditer([a, a, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_(i.dtypes[0] != i.dtypes[2])
assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2])
def test_iter_allocate_output_types_scalar():
# If the inputs are all scalars, the output should be a scalar
i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [],
[['writeonly', 'allocate']] + [['readonly']]*4)
assert_equal(i.operands[0].dtype, np.dtype('complex128'))
assert_equal(i.operands[0].ndim, 0)
def test_iter_allocate_output_subtype():
# Make sure that the subtype with priority wins
class MyNDArray(np.ndarray):
__array_priority__ = 15
# subclass vs ndarray
a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = np.arange(4).reshape(2, 2).T
i = nditer([a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_equal(type(a), type(i.operands[2]))
assert_(type(b) is not type(i.operands[2]))
assert_equal(i.operands[2].shape, (2, 2))
# If subtypes are disabled, we should get back an ndarray.
i = nditer([a, b, None], [],
[['readonly'], ['readonly'],
['writeonly', 'allocate', 'no_subtype']])
assert_equal(type(b), type(i.operands[2]))
assert_(type(a) is not type(i.operands[2]))
assert_equal(i.operands[2].shape, (2, 2))
def test_iter_allocate_output_errors():
# Check that the iterator will throw errors for bad output allocations
# Need an input if no output data type is specified
a = arange(6)
assert_raises(TypeError, nditer, [a, None], [],
[['writeonly'], ['writeonly', 'allocate']])
# Allocated output should be flagged for writing
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['allocate', 'readonly']])
# Allocated output can't have buffering without delayed bufalloc
assert_raises(ValueError, nditer, [a, None], ['buffered'],
['allocate', 'readwrite'])
# Must specify at least one input
assert_raises(ValueError, nditer, [None, None], [],
[['writeonly', 'allocate'],
['writeonly', 'allocate']],
op_dtypes=[np.dtype('f4'), np.dtype('f4')])
# If using op_axes, must specify all the axes
a = arange(24, dtype='i4').reshape(2, 3, 4)
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')],
op_axes=[None, [0, np.newaxis, 1]])
# If using op_axes, the axes must be within bounds
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')],
op_axes=[None, [0, 3, 1]])
# If using op_axes, there can't be duplicates
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly', 'allocate']],
op_dtypes=[None, np.dtype('f4')],
op_axes=[None, [0, 2, 1, 0]])
# Not all axes may be specified if a reduction. If there is a hole
# in op_axes, this is an error.
a = arange(24, dtype='i4').reshape(2, 3, 4)
assert_raises(ValueError, nditer, [a, None], ["reduce_ok"],
[['readonly'], ['readwrite', 'allocate']],
op_dtypes=[None, np.dtype('f4')],
op_axes=[None, [0, np.newaxis, 2]])
def test_iter_remove_axis():
a = arange(24).reshape(2, 3, 4)
i = nditer(a, ['multi_index'])
i.remove_axis(1)
assert_equal([x for x in i], a[:, 0,:].ravel())
a = a[::-1,:,:]
i = nditer(a, ['multi_index'])
i.remove_axis(0)
assert_equal([x for x in i], a[0,:,:].ravel())
def test_iter_remove_multi_index_inner_loop():
# Check that removing multi-index support works
a = arange(24).reshape(2, 3, 4)
i = nditer(a, ['multi_index'])
assert_equal(i.ndim, 3)
assert_equal(i.shape, (2, 3, 4))
assert_equal(i.itviews[0].shape, (2, 3, 4))
# Removing the multi-index tracking causes all dimensions to coalesce
before = [x for x in i]
i.remove_multi_index()
after = [x for x in i]
assert_equal(before, after)
assert_equal(i.ndim, 1)
assert_raises(ValueError, lambda i:i.shape, i)
assert_equal(i.itviews[0].shape, (24,))
# Removing the inner loop means there's just one iteration
i.reset()
assert_equal(i.itersize, 24)
assert_equal(i[0].shape, tuple())
i.enable_external_loop()
assert_equal(i.itersize, 24)
assert_equal(i[0].shape, (24,))
assert_equal(i.value, arange(24))
def test_iter_iterindex():
# Make sure iterindex works
buffersize = 5
a = arange(24).reshape(4, 3, 2)
for flags in ([], ['buffered']):
i = nditer(a, flags, buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 2
assert_equal(iter_iterindices(i), list(range(2, 24)))
i = nditer(a, flags, order='F', buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 5
assert_equal(iter_iterindices(i), list(range(5, 24)))
i = nditer(a[::-1], flags, order='F', buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 9
assert_equal(iter_iterindices(i), list(range(9, 24)))
i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 13
assert_equal(iter_iterindices(i), list(range(13, 24)))
i = nditer(a[::1, ::-1], flags, buffersize=buffersize)
assert_equal(iter_iterindices(i), list(range(24)))
i.iterindex = 23
assert_equal(iter_iterindices(i), list(range(23, 24)))
i.reset()
i.iterindex = 2
assert_equal(iter_iterindices(i), list(range(2, 24)))
def test_iter_iterrange():
# Make sure getting and resetting the iterrange works
buffersize = 5
a = arange(24, dtype='i4').reshape(4, 3, 2)
a_fort = a.ravel(order='F')
i = nditer(a, ['ranged'], ['readonly'], order='F',
buffersize=buffersize)
assert_equal(i.iterrange, (0, 24))
assert_equal([x[()] for x in i], a_fort)
for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
i.iterrange = r
assert_equal(i.iterrange, r)
assert_equal([x[()] for x in i], a_fort[r[0]:r[1]])
i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F',
op_dtypes='f8', buffersize=buffersize)
assert_equal(i.iterrange, (0, 24))
assert_equal([x[()] for x in i], a_fort)
for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
i.iterrange = r
assert_equal(i.iterrange, r)
assert_equal([x[()] for x in i], a_fort[r[0]:r[1]])
def get_array(i):
val = np.array([], dtype='f8')
for x in i:
val = np.concatenate((val, x))
return val
i = nditer(a, ['ranged', 'buffered', 'external_loop'],
['readonly'], order='F',
op_dtypes='f8', buffersize=buffersize)
assert_equal(i.iterrange, (0, 24))
assert_equal(get_array(i), a_fort)
for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
i.iterrange = r
assert_equal(i.iterrange, r)
assert_equal(get_array(i), a_fort[r[0]:r[1]])
def test_iter_buffering():
# Test buffering with several buffer sizes and types
arrays = []
# F-order swapped array
arrays.append(np.arange(24,
dtype='c16').reshape(2, 3, 4).T.newbyteorder().byteswap())
# Contiguous 1-dimensional array
arrays.append(np.arange(10, dtype='f4'))
# Unaligned array
a = np.zeros((4*16+1,), dtype='i1')[1:]
a.dtype = 'i4'
a[:] = np.arange(16, dtype='i4')
arrays.append(a)
# 4-D F-order array
arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T)
for a in arrays:
for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024):
vals = []
i = nditer(a, ['buffered', 'external_loop'],
[['readonly', 'nbo', 'aligned']],
order='C',
casting='equiv',
buffersize=buffersize)
while not i.finished:
assert_(i[0].size <= buffersize)
vals.append(i[0].copy())
i.iternext()
assert_equal(np.concatenate(vals), a.ravel(order='C'))
def test_iter_write_buffering():
# Test that buffering of writes is working
# F-order swapped array
a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap()
i = nditer(a, ['buffered'],
[['readwrite', 'nbo', 'aligned']],
casting='equiv',
order='C',
buffersize=16)
x = 0
with i:
while not i.finished:
i[0] = x
x += 1
i.iternext()
assert_equal(a.ravel(order='C'), np.arange(24))
def test_iter_buffering_delayed_alloc():
# Test that delaying buffer allocation works
a = np.arange(6)
b = np.arange(1, dtype='f4')
i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'],
['readwrite'],
casting='unsafe',
op_dtypes='f4')
assert_(i.has_delayed_bufalloc)
assert_raises(ValueError, lambda i:i.multi_index, i)
assert_raises(ValueError, lambda i:i[0], i)
assert_raises(ValueError, lambda i:i[0:2], i)
def assign_iter(i):
i[0] = 0
assert_raises(ValueError, assign_iter, i)
i.reset()
assert_(not i.has_delayed_bufalloc)
assert_equal(i.multi_index, (0,))
with i:
assert_equal(i[0], 0)
i[1] = 1
assert_equal(i[0:2], [0, 1])
assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
def test_iter_buffered_cast_simple():
# Test that buffering can handle a simple cast
a = np.arange(10, dtype='f4')
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('f8')],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f4'))
def test_iter_buffered_cast_byteswapped():
# Test that buffering can handle a cast which requires swap->cast->swap
a = np.arange(10, dtype='f4').newbyteorder().byteswap()
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('f8').newbyteorder()],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f4'))
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning)
a = np.arange(10, dtype='f8').newbyteorder().byteswap()
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='unsafe',
op_dtypes=[np.dtype('c8').newbyteorder()],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='f8'))
def test_iter_buffered_cast_byteswapped_complex():
# Test that buffering can handle a cast which requires swap->cast->copy
a = np.arange(10, dtype='c8').newbyteorder().byteswap()
a += 2j
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('c16')],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
a = np.arange(10, dtype='c8')
a += 2j
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('c16').newbyteorder()],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap()
a += 2j
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('c16')],
buffersize=3)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j)
a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap()
i = nditer(a, ['buffered', 'external_loop'],
[['readwrite', 'nbo', 'aligned']],
casting='same_kind',
op_dtypes=[np.dtype('f4')],
buffersize=7)
with i:
for v in i:
v[...] *= 2
assert_equal(a, 2*np.arange(10, dtype=np.longdouble))
def test_iter_buffered_cast_structured_type():
# Tests buffering of structured types
# simple -> struct type (duplicates the value)
sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
a = np.arange(3, dtype='f4') + 0.5
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt)
vals = [np.array(x) for x in i]
assert_equal(vals[0]['a'], 0.5)
assert_equal(vals[0]['b'], 0)
assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
assert_equal(vals[0]['d'], 0.5)
assert_equal(vals[1]['a'], 1.5)
assert_equal(vals[1]['b'], 1)
assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
assert_equal(vals[1]['d'], 1.5)
assert_equal(vals[0].dtype, np.dtype(sdt))
# object -> struct type
sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
a = np.zeros((3,), dtype='O')
a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5)
a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5)
a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5)
if HAS_REFCOUNT:
rc = sys.getrefcount(a[0])
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt)
vals = [x.copy() for x in i]
assert_equal(vals[0]['a'], 0.5)
assert_equal(vals[0]['b'], 0)
assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
assert_equal(vals[0]['d'], 0.5)
assert_equal(vals[1]['a'], 1.5)
assert_equal(vals[1]['b'], 1)
assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
assert_equal(vals[1]['d'], 1.5)
assert_equal(vals[0].dtype, np.dtype(sdt))
vals, i, x = [None]*3
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(a[0]), rc)
# single-field struct type -> simple
sdt = [('a', 'f4')]
a = np.array([(5.5,), (8,)], dtype=sdt)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes='i4')
assert_equal([x_[()] for x_ in i], [5, 8])
# make sure multi-field struct type -> simple doesn't work
sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
assert_raises(TypeError, lambda: (
nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes='i4')))
# struct type -> struct type (field-wise copy)
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
assert_equal([np.array(x_) for x_ in i],
[np.array((1, 2, 3), dtype=sdt2),
np.array((4, 5, 6), dtype=sdt2)])
def test_iter_buffered_cast_structured_type_failure_with_cleanup():
# make sure struct type -> struct type with different
# number of fields fails
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('b', 'O'), ('a', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
for intent in ["readwrite", "readonly", "writeonly"]:
# This test was initially designed to test an error at a different
# place, but will now raise earlier to to the cast not being possible:
# `assert np.can_cast(a.dtype, sdt2, casting="unsafe")` fails.
# Without a faulty DType, there is probably no reliable
# way to get the initial tested behaviour.
simple_arr = np.array([1, 2], dtype="i,i") # requires clean up
with pytest.raises(TypeError):
nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent],
casting='unsafe', op_dtypes=["f,f", sdt2])
def test_buffered_cast_error_paths():
with pytest.raises(ValueError):
# The input is cast into an `S3` buffer
np.nditer((np.array("a", dtype="S1"),), op_dtypes=["i"],
casting="unsafe", flags=["buffered"])
# The `M8[ns]` is cast into the `S3` output
it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"],
op_flags=["writeonly"], casting="unsafe", flags=["buffered"])
with pytest.raises(ValueError):
with it:
buf = next(it)
buf[...] = "a" # cannot be converted to int.
@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.")
def test_buffered_cast_error_paths_unraisable():
# The following gives an unraisable error. Pytest sometimes captures that
# (depending python and/or pytest version). So with Python>=3.8 this can
# probably be cleaned out in the future to check for
# pytest.PytestUnraisableExceptionWarning:
code = textwrap.dedent("""
import numpy as np
it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"],
op_flags=["writeonly"], casting="unsafe", flags=["buffered"])
buf = next(it)
buf[...] = "a"
del buf, it # Flushing only happens during deallocate right now.
""")
res = subprocess.check_output([sys.executable, "-c", code],
stderr=subprocess.STDOUT, text=True)
assert "ValueError" in res
def test_iter_buffered_cast_subarray():
# Tests buffering of subarrays
# one element -> many (copies it to all)
sdt1 = [('a', 'f4')]
sdt2 = [('a', 'f8', (3, 2, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
for x, count in zip(i, list(range(6))):
assert_(np.all(x['a'] == count))
# one element -> many -> back (copies it to all)
sdt1 = [('a', 'O', (1, 1))]
sdt2 = [('a', 'O', (3, 2, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'][:, 0, 0] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
casting='unsafe',
op_dtypes=sdt2)
with i:
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_(np.all(x['a'] == count))
x['a'][0] += 2
count += 1
assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2)
# many -> one element -> back (copies just element 0)
sdt1 = [('a', 'O', (3, 2, 2))]
sdt2 = [('a', 'O', (1,))]
a = np.zeros((6,), dtype=sdt1)
a['a'][:, 0, 0, 0] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
casting='unsafe',
op_dtypes=sdt2)
with i:
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], count)
x['a'] += 2
count += 1
assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2)
# many -> one element -> back (copies just element 0)
sdt1 = [('a', 'f8', (3, 2, 2))]
sdt2 = [('a', 'O', (1,))]
a = np.zeros((6,), dtype=sdt1)
a['a'][:, 0, 0, 0] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], count)
count += 1
# many -> one element (copies just element 0)
sdt1 = [('a', 'O', (3, 2, 2))]
sdt2 = [('a', 'f4', (1,))]
a = np.zeros((6,), dtype=sdt1)
a['a'][:, 0, 0, 0] = np.arange(6)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], count)
count += 1
# many -> matching shape (straightforward copy)
sdt1 = [('a', 'O', (3, 2, 2))]
sdt2 = [('a', 'f4', (3, 2, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], a[count]['a'])
count += 1
# vector -> smaller vector (truncates)
sdt1 = [('a', 'f8', (6,))]
sdt2 = [('a', 'f4', (2,))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*6).reshape(6, 6)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'], a[count]['a'][:2])
count += 1
# vector -> bigger vector (pads with zeros)
sdt1 = [('a', 'f8', (2,))]
sdt2 = [('a', 'f4', (6,))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*2).reshape(6, 2)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'][:2], a[count]['a'])
assert_equal(x['a'][2:], [0, 0, 0, 0])
count += 1
# vector -> matrix (broadcasts)
sdt1 = [('a', 'f8', (2,))]
sdt2 = [('a', 'f4', (2, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*2).reshape(6, 2)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'][0], a[count]['a'])
assert_equal(x['a'][1], a[count]['a'])
count += 1
# vector -> matrix (broadcasts and zero-pads)
sdt1 = [('a', 'f8', (2, 1))]
sdt2 = [('a', 'f4', (3, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*2).reshape(6, 2, 1)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
assert_equal(x['a'][:2, 1], a[count]['a'][:, 0])
assert_equal(x['a'][2,:], [0, 0])
count += 1
# matrix -> matrix (truncates and zero-pads)
sdt1 = [('a', 'f8', (2, 3))]
sdt2 = [('a', 'f4', (3, 2))]
a = np.zeros((6,), dtype=sdt1)
a['a'] = np.arange(6*2*3).reshape(6, 2, 3)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
count = 0
for x in i:
assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
assert_equal(x['a'][:2, 1], a[count]['a'][:, 1])
assert_equal(x['a'][2,:], [0, 0])
count += 1
def test_iter_buffering_badwriteback():
# Writing back from a buffer cannot combine elements
# a needs write buffering, but had a broadcast dimension
a = np.arange(6).reshape(2, 3, 1)
b = np.arange(12).reshape(2, 3, 2)
assert_raises(ValueError, nditer, [a, b],
['buffered', 'external_loop'],
[['readwrite'], ['writeonly']],
order='C')
# But if a is readonly, it's fine
nditer([a, b], ['buffered', 'external_loop'],
[['readonly'], ['writeonly']],
order='C')
# If a has just one element, it's fine too (constant 0 stride, a reduction)
a = np.arange(1).reshape(1, 1, 1)
nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'],
[['readwrite'], ['writeonly']],
order='C')
# check that it fails on other dimensions too
a = np.arange(6).reshape(1, 3, 2)
assert_raises(ValueError, nditer, [a, b],
['buffered', 'external_loop'],
[['readwrite'], ['writeonly']],
order='C')
a = np.arange(4).reshape(2, 1, 2)
assert_raises(ValueError, nditer, [a, b],
['buffered', 'external_loop'],
[['readwrite'], ['writeonly']],
order='C')
def test_iter_buffering_string():
# Safe casting disallows shrinking strings
a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_)
assert_equal(a.dtype, np.dtype('S4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='S2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6')
assert_equal(i[0], b'abc')
assert_equal(i[0].dtype, np.dtype('S6'))
a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_)
assert_equal(a.dtype, np.dtype('U4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='U2')
i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6')
assert_equal(i[0], u'abc')
assert_equal(i[0].dtype, np.dtype('U6'))
def test_iter_buffering_growinner():
# Test that the inner loop grows when no buffering is needed
a = np.arange(30)
i = nditer(a, ['buffered', 'growinner', 'external_loop'],
buffersize=5)
# Should end up with just one inner loop here
assert_equal(i[0].size, a.size)
@pytest.mark.slow
def test_iter_buffered_reduce_reuse():
# large enough array for all views, including negative strides.
a = np.arange(2*3**5)[3**5:3**5+1]
flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok']
op_flags = [('readonly',), ('readwrite', 'allocate')]
op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]]
# wrong dtype to force buffering
op_dtypes = [float, a.dtype]
def get_params():
for xs in range(-3**2, 3**2 + 1):
for ys in range(xs, 3**2 + 1):
for op_axes in op_axes_list:
# last stride is reduced and because of that not
# important for this test, as it is the inner stride.
strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize)
arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides)
for skip in [0, 1]:
yield arr, op_axes, skip
for arr, op_axes, skip in get_params():
nditer2 = np.nditer([arr.copy(), None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
op_dtypes=op_dtypes)
with nditer2:
nditer2.operands[-1][...] = 0
nditer2.reset()
nditer2.iterindex = skip
for (a2_in, b2_in) in nditer2:
b2_in += a2_in.astype(np.int_)
comp_res = nditer2.operands[-1]
for bufsize in range(0, 3**3):
nditer1 = np.nditer([arr, None],
op_axes=op_axes, flags=flags, op_flags=op_flags,
buffersize=bufsize, op_dtypes=op_dtypes)
with nditer1:
nditer1.operands[-1][...] = 0
nditer1.reset()
nditer1.iterindex = skip
for (a1_in, b1_in) in nditer1:
b1_in += a1_in.astype(np.int_)
res = nditer1.operands[-1]
assert_array_equal(res, comp_res)
def test_iter_no_broadcast():
# Test that the no_broadcast flag works
a = np.arange(24).reshape(2, 3, 4)
b = np.arange(6).reshape(2, 3, 1)
c = np.arange(12).reshape(3, 4)
nditer([a, b, c], [],
[['readonly', 'no_broadcast'],
['readonly'], ['readonly']])
assert_raises(ValueError, nditer, [a, b, c], [],
[['readonly'], ['readonly', 'no_broadcast'], ['readonly']])
assert_raises(ValueError, nditer, [a, b, c], [],
[['readonly'], ['readonly'], ['readonly', 'no_broadcast']])
class TestIterNested:
def test_basic(self):
# Test nested iteration basic usage
a = arange(12).reshape(2, 3, 2)
i, j = np.nested_iters(a, [[0], [1, 2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[0, 1], [2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[0, 2], [1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
def test_reorder(self):
# Test nested iteration basic usage
a = arange(12).reshape(2, 3, 2)
# In 'K' order (default), it gets reordered
i, j = np.nested_iters(a, [[0], [2, 1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[1, 0], [2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[2, 0], [1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
# In 'C' order, it doesn't
i, j = np.nested_iters(a, [[0], [2, 1]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]])
i, j = np.nested_iters(a, [[1, 0], [2]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]])
i, j = np.nested_iters(a, [[2, 0], [1]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]])
def test_flip_axes(self):
# Test nested iteration with negative axes
a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1]
# In 'K' order (default), the axes all get flipped
i, j = np.nested_iters(a, [[0], [1, 2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[0, 1], [2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[0, 2], [1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
# In 'C' order, flipping axes is disabled
i, j = np.nested_iters(a, [[0], [1, 2]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]])
i, j = np.nested_iters(a, [[0, 1], [2]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]])
i, j = np.nested_iters(a, [[0, 2], [1]], order='C')
vals = [list(j) for _ in i]
assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]])
def test_broadcast(self):
# Test nested iteration with broadcasting
a = arange(2).reshape(2, 1)
b = arange(3).reshape(1, 3)
i, j = np.nested_iters([a, b], [[0], [1]])
vals = [list(j) for _ in i]
assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]])
i, j = np.nested_iters([a, b], [[1], [0]])
vals = [list(j) for _ in i]
assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]])
def test_dtype_copy(self):
# Test nested iteration with a copy to change dtype
# copy
a = arange(6, dtype='i4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readonly', 'copy'],
op_dtypes='f8')
assert_equal(j[0].dtype, np.dtype('f8'))
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
vals = None
# writebackifcopy - using context manager
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readwrite', 'updateifcopy'],
casting='same_kind',
op_dtypes='f8')
with i, j:
assert_equal(j[0].dtype, np.dtype('f8'))
for x in i:
for y in j:
y[...] += 1
assert_equal(a, [[0, 1, 2], [3, 4, 5]])
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
# writebackifcopy - using close()
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readwrite', 'updateifcopy'],
casting='same_kind',
op_dtypes='f8')
assert_equal(j[0].dtype, np.dtype('f8'))
for x in i:
for y in j:
y[...] += 1
assert_equal(a, [[0, 1, 2], [3, 4, 5]])
i.close()
j.close()
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
def test_dtype_buffered(self):
# Test nested iteration with buffering to change dtype
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
flags=['buffered'],
op_flags=['readwrite'],
casting='same_kind',
op_dtypes='f8')
assert_equal(j[0].dtype, np.dtype('f8'))
for x in i:
for y in j:
y[...] += 1
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
def test_0d(self):
a = np.arange(12).reshape(2, 3, 2)
i, j = np.nested_iters(a, [[], [1, 0, 2]])
vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[1, 0, 2], []])
vals = [list(j) for _ in i]
assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]])
i, j, k = np.nested_iters(a, [[2, 0], [], [1]])
vals = []
for x in i:
for y in j:
vals.append([z for z in k])
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
def test_iter_nested_iters_dtype_buffered(self):
# Test nested iteration with buffering to change dtype
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
flags=['buffered'],
op_flags=['readwrite'],
casting='same_kind',
op_dtypes='f8')
with i, j:
assert_equal(j[0].dtype, np.dtype('f8'))
for x in i:
for y in j:
y[...] += 1
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
def test_iter_reduction_error():
a = np.arange(6)
assert_raises(ValueError, nditer, [a, None], [],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0], [-1]])
a = np.arange(6).reshape(2, 3)
assert_raises(ValueError, nditer, [a, None], ['external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0, 1], [-1, -1]])
def test_iter_reduction():
# Test doing reductions with the iterator
a = np.arange(6)
i = nditer([a, None], ['reduce_ok'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0], [-1]])
# Need to initialize the output operand to the addition unit
with i:
i.operands[1][...] = 0
# Do the reduction
for x, y in i:
y[...] += x
# Since no axes were specified, should have allocated a scalar
assert_equal(i.operands[1].ndim, 0)
assert_equal(i.operands[1], np.sum(a))
a = np.arange(6).reshape(2, 3)
i = nditer([a, None], ['reduce_ok', 'external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[0, 1], [-1, -1]])
# Need to initialize the output operand to the addition unit
with i:
i.operands[1][...] = 0
# Reduction shape/strides for the output
assert_equal(i[1].shape, (6,))
assert_equal(i[1].strides, (0,))
# Do the reduction
for x, y in i:
# Use a for loop instead of ``y[...] += x``
# (equivalent to ``y[...] = y[...].copy() + x``),
# because y has zero strides we use for the reduction
for j in range(len(y)):
y[j] += x[j]
# Since no axes were specified, should have allocated a scalar
assert_equal(i.operands[1].ndim, 0)
assert_equal(i.operands[1], np.sum(a))
# This is a tricky reduction case for the buffering double loop
# to handle
a = np.ones((2, 3, 5))
it1 = nditer([a, None], ['reduce_ok', 'external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[None, [0, -1, 1]])
it2 = nditer([a, None], ['reduce_ok', 'external_loop',
'buffered', 'delay_bufalloc'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[None, [0, -1, 1]], buffersize=10)
with it1, it2:
it1.operands[1].fill(0)
it2.operands[1].fill(0)
it2.reset()
for x in it1:
x[1][...] += x[0]
for x in it2:
x[1][...] += x[0]
assert_equal(it1.operands[1], it2.operands[1])
assert_equal(it2.operands[1].sum(), a.size)
def test_iter_buffering_reduction():
# Test doing buffered reductions with the iterator
a = np.arange(6)
b = np.array(0., dtype='f8').byteswap().newbyteorder()
i = nditer([a, b], ['reduce_ok', 'buffered'],
[['readonly'], ['readwrite', 'nbo']],
op_axes=[[0], [-1]])
with i:
assert_equal(i[1].dtype, np.dtype('f8'))
assert_(i[1].dtype != b.dtype)
# Do the reduction
for x, y in i:
y[...] += x
# Since no axes were specified, should have allocated a scalar
assert_equal(b, np.sum(a))
a = np.arange(6).reshape(2, 3)
b = np.array([0, 0], dtype='f8').byteswap().newbyteorder()
i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'],
[['readonly'], ['readwrite', 'nbo']],
op_axes=[[0, 1], [0, -1]])
# Reduction shape/strides for the output
with i:
assert_equal(i[1].shape, (3,))
assert_equal(i[1].strides, (0,))
# Do the reduction
for x, y in i:
# Use a for loop instead of ``y[...] += x``
# (equivalent to ``y[...] = y[...].copy() + x``),
# because y has zero strides we use for the reduction
for j in range(len(y)):
y[j] += x[j]
assert_equal(b, np.sum(a, axis=1))
# Iterator inner double loop was wrong on this one
p = np.arange(2) + 1
it = np.nditer([p, None],
['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'],
[['readonly'], ['readwrite', 'allocate']],
op_axes=[[-1, 0], [-1, -1]],
itershape=(2, 2))
with it:
it.operands[1].fill(0)
it.reset()
assert_equal(it[0], [1, 2, 1, 2])
# Iterator inner loop should take argument contiguity into account
x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0)
x[...] = np.arange(x.size).reshape(x.shape)
y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4)
y_base_copy = y_base.copy()
y = y_base[::2,:,None]
it = np.nditer([y, x],
['buffered', 'external_loop', 'reduce_ok'],
[['readwrite'], ['readonly']])
with it:
for a, b in it:
a.fill(2)
assert_equal(y_base[1::2], y_base_copy[1::2])
assert_equal(y_base[::2], 2)
def test_iter_buffering_reduction_reuse_reduce_loops():
# There was a bug triggering reuse of the reduce loop inappropriately,
# which caused processing to happen in unnecessarily small chunks
# and overran the buffer.
a = np.zeros((2, 7))
b = np.zeros((1, 7))
it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'],
op_flags=[['readonly'], ['readwrite']],
buffersize=5)
with it:
bufsizes = [x.shape[0] for x, y in it]
assert_equal(bufsizes, [5, 2, 5, 2])
assert_equal(sum(bufsizes), a.size)
def test_iter_writemasked_badinput():
a = np.zeros((2, 3))
b = np.zeros((3,))
m = np.array([[True, True, False], [False, True, False]])
m2 = np.array([True, True, False])
m3 = np.array([0, 1, 1], dtype='u1')
mbad1 = np.array([0, 1, 1], dtype='i1')
mbad2 = np.array([0, 1, 1], dtype='f4')
# Need an 'arraymask' if any operand is 'writemasked'
assert_raises(ValueError, nditer, [a, m], [],
[['readwrite', 'writemasked'], ['readonly']])
# A 'writemasked' operand must not be readonly
assert_raises(ValueError, nditer, [a, m], [],
[['readonly', 'writemasked'], ['readonly', 'arraymask']])
# 'writemasked' and 'arraymask' may not be used together
assert_raises(ValueError, nditer, [a, m], [],
[['readonly'], ['readwrite', 'arraymask', 'writemasked']])
# 'arraymask' may only be specified once
assert_raises(ValueError, nditer, [a, m, m2], [],
[['readwrite', 'writemasked'],
['readonly', 'arraymask'],
['readonly', 'arraymask']])
# An 'arraymask' with nothing 'writemasked' also doesn't make sense
assert_raises(ValueError, nditer, [a, m], [],
[['readwrite'], ['readonly', 'arraymask']])
# A writemasked reduction requires a similarly smaller mask
assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'],
[['readonly'],
['readwrite', 'writemasked'],
['readonly', 'arraymask']])
# But this should work with a smaller/equal mask to the reduction operand
np.nditer([a, b, m2], ['reduce_ok'],
[['readonly'],
['readwrite', 'writemasked'],
['readonly', 'arraymask']])
# The arraymask itself cannot be a reduction
assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'],
[['readonly'],
['readwrite', 'writemasked'],
['readwrite', 'arraymask']])
# A uint8 mask is ok too
np.nditer([a, m3], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=['f4', None],
casting='same_kind')
# An int8 mask isn't ok
assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=['f4', None],
casting='same_kind')
# A float32 mask isn't ok
assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=['f4', None],
casting='same_kind')
def _is_buffered(iterator):
try:
iterator.itviews
except ValueError:
return True
return False
@pytest.mark.parametrize("a",
[np.zeros((3,), dtype='f8'),
np.zeros((9876, 3*5), dtype='f8')[::2, :],
np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :],
# Also test with the last dimension strided (so it does not fit if
# there is repeated access)
np.zeros((9,), dtype='f8')[::3],
np.zeros((9876, 3*10), dtype='f8')[::2, ::5],
np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]])
def test_iter_writemasked(a):
# Note, the slicing above is to ensure that nditer cannot combine multiple
# axes into one. The repetition is just to make things a bit more
# interesting.
shape = a.shape
reps = shape[-1] // 3
msk = np.empty(shape, dtype=bool)
msk[...] = [True, True, False] * reps
# When buffering is unused, 'writemasked' effectively does nothing.
# It's up to the user of the iterator to obey the requested semantics.
it = np.nditer([a, msk], [],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']])
with it:
for x, m in it:
x[...] = 1
# Because we violated the semantics, all the values became 1
assert_equal(a, np.broadcast_to([1, 1, 1] * reps, shape))
# Even if buffering is enabled, we still may be accessing the array
# directly.
it = np.nditer([a, msk], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']])
# @seberg: I honestly don't currently understand why a "buffered" iterator
# would end up not using a buffer for the small array here at least when
# "writemasked" is used, that seems confusing... Check by testing for
# actual memory overlap!
is_buffered = True
with it:
for x, m in it:
x[...] = 2.5
if np.may_share_memory(x, a):
is_buffered = False
if not is_buffered:
# Because we violated the semantics, all the values became 2.5
assert_equal(a, np.broadcast_to([2.5, 2.5, 2.5] * reps, shape))
else:
# For large sizes, the iterator may be buffered:
assert_equal(a, np.broadcast_to([2.5, 2.5, 1] * reps, shape))
a[...] = 2.5
# If buffering will definitely happening, for instance because of
# a cast, only the items selected by the mask will be copied back from
# the buffer.
it = np.nditer([a, msk], ['buffered'],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=['i8', None],
casting='unsafe')
with it:
for x, m in it:
x[...] = 3
# Even though we violated the semantics, only the selected values
# were copied back
assert_equal(a, np.broadcast_to([3, 3, 2.5] * reps, shape))
def test_iter_writemasked_decref():
# force casting (to make it interesting) by using a structured dtype.
arr = np.arange(10000).astype(">i,O")
original = arr.copy()
mask = np.random.randint(0, 2, size=10000).astype(bool)
it = np.nditer([arr, mask], ['buffered', "refs_ok"],
[['readwrite', 'writemasked'],
['readonly', 'arraymask']],
op_dtypes=["<i,O", "?"])
singleton = object()
if HAS_REFCOUNT:
count = sys.getrefcount(singleton)
for buf, mask_buf in it:
buf[...] = (3, singleton)
del buf, mask_buf, it # delete everything to ensure correct cleanup
if HAS_REFCOUNT:
# The buffer would have included additional items, they must be
# cleared correctly:
assert sys.getrefcount(singleton) - count == np.count_nonzero(mask)
assert_array_equal(arr[~mask], original[~mask])
assert (arr[mask] == np.array((3, singleton), arr.dtype)).all()
del arr
if HAS_REFCOUNT:
assert sys.getrefcount(singleton) == count
def test_iter_non_writable_attribute_deletion():
it = np.nditer(np.ones(2))
attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc",
"iterationneedsapi", "has_multi_index", "has_index", "dtypes",
"ndim", "nop", "itersize", "finished"]
for s in attr:
assert_raises(AttributeError, delattr, it, s)
def test_iter_writable_attribute_deletion():
it = np.nditer(np.ones(2))
attr = [ "multi_index", "index", "iterrange", "iterindex"]
for s in attr:
assert_raises(AttributeError, delattr, it, s)
def test_iter_element_deletion():
it = np.nditer(np.ones(3))
try:
del it[1]
del it[1:2]
except TypeError:
pass
except Exception:
raise AssertionError
def test_iter_allocated_array_dtypes():
# If the dtype of an allocated output has a shape, the shape gets
# tacked onto the end of the result.
it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))])
for a, b in it:
b[0] = a - 1
b[1] = a + 1
assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
# Check the same (less sensitive) thing when `op_axes` with -1 is given.
it = np.nditer(([[1, 3, 20]], None), op_dtypes=[None, ('i4', (2,))],
flags=["reduce_ok"], op_axes=[None, (-1, 0)])
for a, b in it:
b[0] = a - 1
b[1] = a + 1
assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
# Make sure this works for scalars too
it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))])
for a, b, c in it:
c[0, 0] = a - b
c[0, 1] = a + b
c[1, 0] = a * b
c[1, 1] = a / b
assert_equal(it.operands[2], [[8, 12], [20, 5]])
def test_0d_iter():
# Basic test for iteration of 0-d arrays:
i = nditer([2, 3], ['multi_index'], [['readonly']]*2)
assert_equal(i.ndim, 0)
assert_equal(next(i), (2, 3))
assert_equal(i.multi_index, ())
assert_equal(i.iterindex, 0)
assert_raises(StopIteration, next, i)
# test reset:
i.reset()
assert_equal(next(i), (2, 3))
assert_raises(StopIteration, next, i)
# test forcing to 0-d
i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()])
assert_equal(i.ndim, 0)
assert_equal(len(i), 1)
i = nditer(np.arange(5), ['multi_index'], [['readonly']],
op_axes=[()], itershape=())
assert_equal(i.ndim, 0)
assert_equal(len(i), 1)
# passing an itershape alone is not enough, the op_axes are also needed
with assert_raises(ValueError):
nditer(np.arange(5), ['multi_index'], [['readonly']], itershape=())
# Test a more complex buffered casting case (same as another test above)
sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
a = np.array(0.5, dtype='f4')
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe', op_dtypes=sdt)
vals = next(i)
assert_equal(vals['a'], 0.5)
assert_equal(vals['b'], 0)
assert_equal(vals['c'], [[(0.5)]*3]*2)
assert_equal(vals['d'], 0.5)
def test_object_iter_cleanup():
# see gh-18450
# object arrays can raise a python exception in ufunc inner loops using
# nditer, which should cause iteration to stop & cleanup. There were bugs
# in the nditer cleanup when decref'ing object arrays.
# This test would trigger valgrind "uninitialized read" before the bugfix.
assert_raises(TypeError, lambda: np.zeros((17000, 2), dtype='f4') * None)
# this more explicit code also triggers the invalid access
arr = np.arange(np.BUFSIZE * 10).reshape(10, -1).astype(str)
oarr = arr.astype(object)
oarr[:, -1] = None
assert_raises(TypeError, lambda: np.add(oarr[:, ::-1], arr[:, ::-1]))
# followup: this tests for a bug introduced in the first pass of gh-18450,
# caused by an incorrect fallthrough of the TypeError
class T:
def __bool__(self):
raise TypeError("Ambiguous")
assert_raises(TypeError, np.logical_or.reduce,
np.array([T(), T()], dtype='O'))
def test_object_iter_cleanup_reduce():
# Similar as above, but a complex reduction case that was previously
# missed (see gh-18810).
# The following array is special in that it cannot be flattened:
arr = np.array([[None, 1], [-1, -1], [None, 2], [-1, -1]])[::2]
with pytest.raises(TypeError):
np.sum(arr)
@pytest.mark.parametrize("arr", [
np.ones((8000, 4, 2), dtype=object)[:, ::2, :],
np.ones((8000, 4, 2), dtype=object, order="F")[:, ::2, :],
np.ones((8000, 4, 2), dtype=object)[:, ::2, :].copy("F")])
def test_object_iter_cleanup_large_reduce(arr):
# More complicated calls are possible for large arrays:
out = np.ones(8000, dtype=np.intp)
# force casting with `dtype=object`
res = np.sum(arr, axis=(1, 2), dtype=object, out=out)
assert_array_equal(res, np.full(8000, 4, dtype=object))
def test_iter_too_large():
# The total size of the iterator must not exceed the maximum intp due
# to broadcasting. Dividing by 1024 will keep it small enough to
# give a legal array.
size = np.iinfo(np.intp).max // 1024
arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,))
assert_raises(ValueError, nditer, (arr, arr[:, None]))
# test the same for multiindex. That may get more interesting when
# removing 0 dimensional axis is allowed (since an iterator can grow then)
assert_raises(ValueError, nditer,
(arr, arr[:, None]), flags=['multi_index'])
def test_iter_too_large_with_multiindex():
# When a multi index is being tracked, the error is delayed this
# checks the delayed error messages and getting below that by
# removing an axis.
base_size = 2**10
num = 1
while base_size**num < np.iinfo(np.intp).max:
num += 1
shape_template = [1, 1] * num
arrays = []
for i in range(num):
shape = shape_template[:]
shape[i * 2] = 2**10
arrays.append(np.empty(shape))
arrays = tuple(arrays)
# arrays are now too large to be broadcast. The different modes test
# different nditer functionality with or without GIL.
for mode in range(6):
with assert_raises(ValueError):
_multiarray_tests.test_nditer_too_large(arrays, -1, mode)
# but if we do nothing with the nditer, it can be constructed:
_multiarray_tests.test_nditer_too_large(arrays, -1, 7)
# When an axis is removed, things should work again (half the time):
for i in range(num):
for mode in range(6):
# an axis with size 1024 is removed:
_multiarray_tests.test_nditer_too_large(arrays, i*2, mode)
# an axis with size 1 is removed:
with assert_raises(ValueError):
_multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode)
def test_writebacks():
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
assert_(a.dtype.byteorder != au.dtype.byteorder)
it = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
with it:
it.operands[0][:] = 100
assert_equal(au, 100)
# do it again, this time raise an error,
it = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
try:
with it:
assert_equal(au.flags.writeable, False)
it.operands[0][:] = 0
raise ValueError('exit context manager on exception')
except:
pass
assert_equal(au, 0)
assert_equal(au.flags.writeable, True)
# cannot reuse i outside context manager
assert_raises(ValueError, getattr, it, 'operands')
it = nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
with it:
x = it.operands[0]
x[:] = 6
assert_(x.flags.writebackifcopy)
assert_equal(au, 6)
assert_(not x.flags.writebackifcopy)
x[:] = 123 # x.data still valid
assert_equal(au, 6) # but not connected to au
it = nditer(au, [],
[['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
# reentering works
with it:
with it:
for x in it:
x[...] = 123
it = nditer(au, [],
[['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
# make sure exiting the inner context manager closes the iterator
with it:
with it:
for x in it:
x[...] = 123
assert_raises(ValueError, getattr, it, 'operands')
# do not crash if original data array is decrefed
it = nditer(au, [],
[['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
del au
with it:
for x in it:
x[...] = 123
# make sure we cannot reenter the closed iterator
enter = it.__enter__
assert_raises(RuntimeError, enter)
def test_close_equivalent():
''' using a context amanger and using nditer.close are equivalent
'''
def add_close(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
for (a, b, c) in it:
addop(a, b, out=c)
ret = it.operands[2]
it.close()
return ret
def add_context(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
with it:
for (a, b, c) in it:
addop(a, b, out=c)
return it.operands[2]
z = add_close(range(5), range(5))
assert_equal(z, range(0, 10, 2))
z = add_context(range(5), range(5))
assert_equal(z, range(0, 10, 2))
def test_close_raises():
it = np.nditer(np.arange(3))
assert_equal (next(it), 0)
it.close()
assert_raises(StopIteration, next, it)
assert_raises(ValueError, getattr, it, 'operands')
def test_close_parameters():
it = np.nditer(np.arange(3))
assert_raises(TypeError, it.close, 1)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_warn_noclose():
a = np.arange(6, dtype='f4')
au = a.byteswap().newbyteorder()
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
it = np.nditer(au, [], [['readwrite', 'updateifcopy']],
casting='equiv', op_dtypes=[np.dtype('f4')])
del it
assert len(sup.log) == 1
@pytest.mark.skipif(sys.version_info[:2] == (3, 9) and sys.platform == "win32",
reason="Errors with Python 3.9 on Windows")
@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
[("i", "O"), ("O", "i"), # most simple cases
("i,O", "O,O"), # structured partially only copying O
("O,i", "i,O"), # structured casting to and from O
])
@pytest.mark.parametrize("steps", [1, 2, 3])
def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps):
"""
Checks for reference counting leaks during cleanup. Using explicit
reference counts lead to occasional false positives (at least in parallel
test setups). This test now should still test leaks correctly when
run e.g. with pytest-valgrind or pytest-leaks
"""
value = 2**30 + 1 # just a random value that Python won't intern
arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
for step in range(steps):
# The iteration finishes in 3 steps, the first two are partial
next(it)
del it # not necessary, but we test the cleanup
# Repeat the test with `iternext`
it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
for step in range(steps):
it.iternext()
del it # not necessary, but we test the cleanup
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
[("O", "i"), # most simple cases
("O,i", "i,O"), # structured casting to and from O
])
def test_partial_iteration_error(in_dtype, buf_dtype):
value = 123 # relies on python cache (leak-check will still find it)
arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
if in_dtype == "O":
arr[int(np.BUFSIZE * 1.5)] = None
else:
arr[int(np.BUFSIZE * 1.5)]["f0"] = None
count = sys.getrefcount(value)
it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
with pytest.raises(TypeError):
# pytest.raises seems to have issues with the error originating
# in the for loop, so manually unravel:
next(it)
next(it) # raises TypeError
# Repeat the test with `iternext` after resetting, the buffers should
# already be cleared from any references, so resetting is sufficient.
it.reset()
with pytest.raises(TypeError):
it.iternext()
it.iternext()
assert count == sys.getrefcount(value)
def test_debug_print(capfd):
"""
Matches the expected output of a debug print with the actual output.
Note that the iterator dump should not be considered stable API,
this test is mainly to ensure the print does not crash.
Currently uses a subprocess to avoid dealing with the C level `printf`s.
"""
# the expected output with all addresses and sizes stripped (they vary
# and/or are platform dependent).
expected = """
------ BEGIN ITERATOR DUMP ------
| Iterator Address:
| ItFlags: BUFFER REDUCE REUSE_REDUCE_LOOPS
| NDim: 2
| NOp: 2
| IterSize: 50
| IterStart: 0
| IterEnd: 50
| IterIndex: 0
| Iterator SizeOf:
| BufferData SizeOf:
| AxisData SizeOf:
|
| Perm: 0 1
| DTypes:
| DTypes: dtype('float64') dtype('int32')
| InitDataPtrs:
| BaseOffsets: 0 0
| Operands:
| Operand DTypes: dtype('int64') dtype('float64')
| OpItFlags:
| Flags[0]: READ CAST ALIGNED
| Flags[1]: READ WRITE CAST ALIGNED REDUCE
|
| BufferData:
| BufferSize: 50
| Size: 5
| BufIterEnd: 5
| REDUCE Pos: 0
| REDUCE OuterSize: 10
| REDUCE OuterDim: 1
| Strides: 8 4
| Ptrs:
| REDUCE Outer Strides: 40 0
| REDUCE Outer Ptrs:
| ReadTransferFn:
| ReadTransferData:
| WriteTransferFn:
| WriteTransferData:
| Buffers:
|
| AxisData[0]:
| Shape: 5
| Index: 0
| Strides: 16 8
| Ptrs:
| AxisData[1]:
| Shape: 10
| Index: 0
| Strides: 80 0
| Ptrs:
------- END ITERATOR DUMP -------
""".strip().splitlines()
arr1 = np.arange(100, dtype=np.int64).reshape(10, 10)[:, ::2]
arr2 = np.arange(5.)
it = np.nditer((arr1, arr2), op_dtypes=["d", "i4"], casting="unsafe",
flags=["reduce_ok", "buffered"],
op_flags=[["readonly"], ["readwrite"]])
it.debug_print()
res = capfd.readouterr().out
res = res.strip().splitlines()
assert len(res) == len(expected)
for res_line, expected_line in zip(res, expected):
# The actual output may have additional pointers listed that are
# stripped from the example output:
assert res_line.startswith(expected_line.strip())
| 127,984 | Python | 38.055539 | 95 | 0.516627 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_defchararray.py |
import numpy as np
from numpy.core.multiarray import _vec_string
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises,
assert_raises_regex
)
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
class TestBasic:
def test_from_object_array(self):
A = np.array([['abc', 2],
['long ', '0123456789']], dtype='O')
B = np.char.array(A)
assert_equal(B.dtype.itemsize, 10)
assert_array_equal(B, [[b'abc', b'2'],
[b'long', b'0123456789']])
def test_from_object_array_unicode(self):
A = np.array([['abc', u'Sigma \u03a3'],
['long ', '0123456789']], dtype='O')
assert_raises(ValueError, np.char.array, (A,))
B = np.char.array(A, **kw_unicode_true)
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
assert_array_equal(B, [['abc', u'Sigma \u03a3'],
['long', '0123456789']])
def test_from_string_array(self):
A = np.array([[b'abc', b'foo'],
[b'long ', b'0123456789']])
assert_equal(A.dtype.type, np.string_)
B = np.char.array(A)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
B[0, 0] = 'changed'
assert_(B[0, 0] != A[0, 0])
C = np.char.asarray(A)
assert_array_equal(C, A)
assert_equal(C.dtype, A.dtype)
C[0, 0] = 'changed again'
assert_(C[0, 0] != B[0, 0])
assert_(C[0, 0] == A[0, 0])
def test_from_unicode_array(self):
A = np.array([['abc', u'Sigma \u03a3'],
['long ', '0123456789']])
assert_equal(A.dtype.type, np.unicode_)
B = np.char.array(A)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
B = np.char.array(A, **kw_unicode_true)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
def fail():
np.char.array(A, **kw_unicode_false)
assert_raises(UnicodeEncodeError, fail)
def test_unicode_upconvert(self):
A = np.char.array(['abc'])
B = np.char.array([u'\u03a3'])
assert_(issubclass((A + B).dtype.type, np.unicode_))
def test_from_string(self):
A = np.char.array(b'abc')
assert_equal(len(A), 1)
assert_equal(len(A[0]), 3)
assert_(issubclass(A.dtype.type, np.string_))
def test_from_unicode(self):
A = np.char.array(u'\u03a3')
assert_equal(len(A), 1)
assert_equal(len(A[0]), 1)
assert_equal(A.itemsize, 4)
assert_(issubclass(A.dtype.type, np.unicode_))
class TestVecString:
def test_non_existent_method(self):
def fail():
_vec_string('a', np.string_, 'bogus')
assert_raises(AttributeError, fail)
def test_non_string_array(self):
def fail():
_vec_string(1, np.string_, 'strip')
assert_raises(TypeError, fail)
def test_invalid_args_tuple(self):
def fail():
_vec_string(['a'], np.string_, 'strip', 1)
assert_raises(TypeError, fail)
def test_invalid_type_descr(self):
def fail():
_vec_string(['a'], 'BOGUS', 'strip')
assert_raises(TypeError, fail)
def test_invalid_function_args(self):
def fail():
_vec_string(['a'], np.string_, 'strip', (1,))
assert_raises(TypeError, fail)
def test_invalid_result_type(self):
def fail():
_vec_string(['a'], np.int_, 'strip')
assert_raises(TypeError, fail)
def test_broadcast_error(self):
def fail():
_vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],))
assert_raises(ValueError, fail)
class TestWhitespace:
def setup_method(self):
self.A = np.array([['abc ', '123 '],
['789 ', 'xyz ']]).view(np.chararray)
self.B = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
def test1(self):
assert_(np.all(self.A == self.B))
assert_(np.all(self.A >= self.B))
assert_(np.all(self.A <= self.B))
assert_(not np.any(self.A > self.B))
assert_(not np.any(self.A < self.B))
assert_(not np.any(self.A != self.B))
class TestChar:
def setup_method(self):
self.A = np.array('abc1', dtype='c').view(np.chararray)
def test_it(self):
assert_equal(self.A.shape, (4,))
assert_equal(self.A.upper()[:2].tobytes(), b'AB')
class TestComparisons:
def setup_method(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '123 '],
['051', 'tuv']]).view(np.chararray)
def test_not_equal(self):
assert_array_equal((self.A != self.B), [[True, False], [True, True]])
def test_equal(self):
assert_array_equal((self.A == self.B), [[False, True], [False, False]])
def test_greater_equal(self):
assert_array_equal((self.A >= self.B), [[False, True], [True, True]])
def test_less_equal(self):
assert_array_equal((self.A <= self.B), [[True, True], [False, False]])
def test_greater(self):
assert_array_equal((self.A > self.B), [[False, False], [True, True]])
def test_less(self):
assert_array_equal((self.A < self.B), [[True, False], [False, False]])
def test_type(self):
out1 = np.char.equal(self.A, self.B)
out2 = np.char.equal('a', 'a')
assert_(isinstance(out1, np.ndarray))
assert_(isinstance(out2, np.ndarray))
class TestComparisonsMixed1(TestComparisons):
"""Ticket #1276"""
def setup_method(self):
TestComparisons.setup_method(self)
self.B = np.array([['efg', '123 '],
['051', 'tuv']], np.unicode_).view(np.chararray)
class TestComparisonsMixed2(TestComparisons):
"""Ticket #1276"""
def setup_method(self):
TestComparisons.setup_method(self)
self.A = np.array([['abc', '123'],
['789', 'xyz']], np.unicode_).view(np.chararray)
class TestInformation:
def setup_method(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
self.B = np.array([[u' \u03a3 ', u''],
[u'12345', u'MixedCase'],
[u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
def test_len(self):
assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
def test_count(self):
assert_(issubclass(self.A.count('').dtype.type, np.integer))
assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
# Python doesn't seem to like counting NULL characters
# assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
# assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
def test_endswith(self):
assert_(issubclass(self.A.endswith('').dtype.type, np.bool_))
assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
def fail():
self.A.endswith('3', 'fdjk')
assert_raises(TypeError, fail)
def test_find(self):
assert_(issubclass(self.A.find('a').dtype.type, np.integer))
assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]])
assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]])
assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]])
def test_index(self):
def fail():
self.A.index('a')
assert_raises(ValueError, fail)
assert_(np.char.index('abcba', 'b') == 1)
assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
def test_isalnum(self):
assert_(issubclass(self.A.isalnum().dtype.type, np.bool_))
assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
def test_isalpha(self):
assert_(issubclass(self.A.isalpha().dtype.type, np.bool_))
assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
def test_isdigit(self):
assert_(issubclass(self.A.isdigit().dtype.type, np.bool_))
assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
def test_islower(self):
assert_(issubclass(self.A.islower().dtype.type, np.bool_))
assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
def test_isspace(self):
assert_(issubclass(self.A.isspace().dtype.type, np.bool_))
assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
def test_istitle(self):
assert_(issubclass(self.A.istitle().dtype.type, np.bool_))
assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
def test_isupper(self):
assert_(issubclass(self.A.isupper().dtype.type, np.bool_))
assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
def test_rfind(self):
assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
def test_rindex(self):
def fail():
self.A.rindex('a')
assert_raises(ValueError, fail)
assert_(np.char.rindex('abcba', 'b') == 3)
assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
def test_startswith(self):
assert_(issubclass(self.A.startswith('').dtype.type, np.bool_))
assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
def fail():
self.A.startswith('3', 'fdjk')
assert_raises(TypeError, fail)
class TestMethods:
def setup_method(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']],
dtype='S').view(np.chararray)
self.B = np.array([[u' \u03a3 ', u''],
[u'12345', u'MixedCase'],
[u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
def test_capitalize(self):
tgt = [[b' abc ', b''],
[b'12345', b'Mixedcase'],
[b'123 \t 345 \0 ', b'Upper']]
assert_(issubclass(self.A.capitalize().dtype.type, np.string_))
assert_array_equal(self.A.capitalize(), tgt)
tgt = [[u' \u03c3 ', ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']]
assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_))
assert_array_equal(self.B.capitalize(), tgt)
def test_center(self):
assert_(issubclass(self.A.center(10).dtype.type, np.string_))
C = self.A.center([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.center(20, b'#')
assert_(np.all(C.startswith(b'#')))
assert_(np.all(C.endswith(b'#')))
C = np.char.center(b'FOO', [[10, 20], [15, 8]])
tgt = [[b' FOO ', b' FOO '],
[b' FOO ', b' FOO ']]
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_decode(self):
A = np.char.array([b'\\u03a3'])
assert_(A.decode('unicode-escape')[0] == '\u03a3')
def test_encode(self):
B = self.B.encode('unicode_escape')
assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
def test_expandtabs(self):
T = self.A.expandtabs()
assert_(T[2, 0] == b'123 345 \0')
def test_join(self):
# NOTE: list(b'123') == [49, 50, 51]
# so that b','.join(b'123') results to an error on Py3
A0 = self.A.decode('ascii')
A = np.char.join([',', '#'], A0)
assert_(issubclass(A.dtype.type, np.unicode_))
tgt = np.array([[' ,a,b,c, ', ''],
['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
assert_array_equal(np.char.join([',', '#'], A0), tgt)
def test_ljust(self):
assert_(issubclass(self.A.ljust(10).dtype.type, np.string_))
C = self.A.ljust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.ljust(20, b'#')
assert_array_equal(C.startswith(b'#'), [
[False, True], [False, False], [False, False]])
assert_(np.all(C.endswith(b'#')))
C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
tgt = [[b'FOO ', b'FOO '],
[b'FOO ', b'FOO ']]
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_lower(self):
tgt = [[b' abc ', b''],
[b'12345', b'mixedcase'],
[b'123 \t 345 \0 ', b'upper']]
assert_(issubclass(self.A.lower().dtype.type, np.string_))
assert_array_equal(self.A.lower(), tgt)
tgt = [[u' \u03c3 ', u''],
[u'12345', u'mixedcase'],
[u'123 \t 345 \0 ', u'upper']]
assert_(issubclass(self.B.lower().dtype.type, np.unicode_))
assert_array_equal(self.B.lower(), tgt)
def test_lstrip(self):
tgt = [[b'abc ', b''],
[b'12345', b'MixedCase'],
[b'123 \t 345 \0 ', b'UPPER']]
assert_(issubclass(self.A.lstrip().dtype.type, np.string_))
assert_array_equal(self.A.lstrip(), tgt)
tgt = [[b' abc', b''],
[b'2345', b'ixedCase'],
[b'23 \t 345 \x00', b'UPPER']]
assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)
tgt = [[u'\u03a3 ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]
assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.lstrip(), tgt)
def test_partition(self):
P = self.A.partition([b'3', b'M'])
tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
[(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
assert_(issubclass(P.dtype.type, np.string_))
assert_array_equal(P, tgt)
def test_replace(self):
R = self.A.replace([b'3', b'a'],
[b'##########', b'@'])
tgt = [[b' abc ', b''],
[b'12##########45', b'MixedC@se'],
[b'12########## \t ##########45 \x00', b'UPPER']]
assert_(issubclass(R.dtype.type, np.string_))
assert_array_equal(R, tgt)
def test_rjust(self):
assert_(issubclass(self.A.rjust(10).dtype.type, np.string_))
C = self.A.rjust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = self.A.rjust(20, b'#')
assert_(np.all(C.startswith(b'#')))
assert_array_equal(C.endswith(b'#'),
[[False, True], [False, False], [False, False]])
C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
tgt = [[b' FOO', b' FOO'],
[b' FOO', b' FOO']]
assert_(issubclass(C.dtype.type, np.string_))
assert_array_equal(C, tgt)
def test_rpartition(self):
P = self.A.rpartition([b'3', b'M'])
tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
[(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
assert_(issubclass(P.dtype.type, np.string_))
assert_array_equal(P, tgt)
def test_rsplit(self):
A = self.A.rsplit(b'3')
tgt = [[[b' abc '], [b'']],
[[b'12', b'45'], [b'MixedCase']],
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), tgt)
def test_rstrip(self):
assert_(issubclass(self.A.rstrip().dtype.type, np.string_))
tgt = [[b' abc', b''],
[b'12345', b'MixedCase'],
[b'123 \t 345', b'UPPER']]
assert_array_equal(self.A.rstrip(), tgt)
tgt = [[b' abc ', b''],
[b'1234', b'MixedCase'],
[b'123 \t 345 \x00', b'UPP']
]
assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)
tgt = [[u' \u03a3', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]
assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_))
assert_array_equal(self.B.rstrip(), tgt)
def test_strip(self):
tgt = [[b'abc', b''],
[b'12345', b'MixedCase'],
[b'123 \t 345', b'UPPER']]
assert_(issubclass(self.A.strip().dtype.type, np.string_))
assert_array_equal(self.A.strip(), tgt)
tgt = [[b' abc ', b''],
[b'234', b'ixedCas'],
[b'23 \t 345 \x00', b'UPP']]
assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)
tgt = [[u'\u03a3', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]
assert_(issubclass(self.B.strip().dtype.type, np.unicode_))
assert_array_equal(self.B.strip(), tgt)
def test_split(self):
A = self.A.split(b'3')
tgt = [
[[b' abc '], [b'']],
[[b'12', b'45'], [b'MixedCase']],
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), tgt)
def test_splitlines(self):
A = np.char.array(['abc\nfds\nwer']).splitlines()
assert_(issubclass(A.dtype.type, np.object_))
assert_(A.shape == (1,))
assert_(len(A[0]) == 3)
def test_swapcase(self):
tgt = [[b' ABC ', b''],
[b'12345', b'mIXEDcASE'],
[b'123 \t 345 \0 ', b'upper']]
assert_(issubclass(self.A.swapcase().dtype.type, np.string_))
assert_array_equal(self.A.swapcase(), tgt)
tgt = [[u' \u03c3 ', u''],
[u'12345', u'mIXEDcASE'],
[u'123 \t 345 \0 ', u'upper']]
assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_))
assert_array_equal(self.B.swapcase(), tgt)
def test_title(self):
tgt = [[b' Abc ', b''],
[b'12345', b'Mixedcase'],
[b'123 \t 345 \0 ', b'Upper']]
assert_(issubclass(self.A.title().dtype.type, np.string_))
assert_array_equal(self.A.title(), tgt)
tgt = [[u' \u03a3 ', u''],
[u'12345', u'Mixedcase'],
[u'123 \t 345 \0 ', u'Upper']]
assert_(issubclass(self.B.title().dtype.type, np.unicode_))
assert_array_equal(self.B.title(), tgt)
def test_upper(self):
tgt = [[b' ABC ', b''],
[b'12345', b'MIXEDCASE'],
[b'123 \t 345 \0 ', b'UPPER']]
assert_(issubclass(self.A.upper().dtype.type, np.string_))
assert_array_equal(self.A.upper(), tgt)
tgt = [[u' \u03a3 ', u''],
[u'12345', u'MIXEDCASE'],
[u'123 \t 345 \0 ', u'UPPER']]
assert_(issubclass(self.B.upper().dtype.type, np.unicode_))
assert_array_equal(self.B.upper(), tgt)
def test_isnumeric(self):
def fail():
self.A.isnumeric()
assert_raises(TypeError, fail)
assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
assert_array_equal(self.B.isnumeric(), [
[False, False], [True, False], [False, False]])
def test_isdecimal(self):
def fail():
self.A.isdecimal()
assert_raises(TypeError, fail)
assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
assert_array_equal(self.B.isdecimal(), [
[False, False], [True, False], [False, False]])
class TestOperations:
def setup_method(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '456'],
['051', 'tuv']]).view(np.chararray)
def test_add(self):
AB = np.array([['abcefg', '123456'],
['789051', 'xyztuv']]).view(np.chararray)
assert_array_equal(AB, (self.A + self.B))
assert_(len((self.A + self.B)[0][0]) == 6)
def test_radd(self):
QA = np.array([['qabc', 'q123'],
['q789', 'qxyz']]).view(np.chararray)
assert_array_equal(QA, ('q' + self.A))
def test_mul(self):
A = self.A
for r in (2, 3, 5, 7, 197):
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
[A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
assert_array_equal(Ar, (self.A * r))
for ob in [object(), 'qrs']:
with assert_raises_regex(ValueError,
'Can only multiply by integers'):
A*ob
def test_rmul(self):
A = self.A
for r in (2, 3, 5, 7, 197):
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
[A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
assert_array_equal(Ar, (r * self.A))
for ob in [object(), 'qrs']:
with assert_raises_regex(ValueError,
'Can only multiply by integers'):
ob * A
def test_mod(self):
"""Ticket #856"""
F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray)
C = np.array([[3, 7], [19, 1]])
FC = np.array([['3', '7.000000'],
['19', '1']]).view(np.chararray)
assert_array_equal(FC, F % C)
A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray)
A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray)
assert_array_equal(A1, (A % 1))
A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray)
assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
def test_rmod(self):
assert_(("%s" % self.A) == str(self.A))
assert_(("%r" % self.A) == repr(self.A))
for ob in [42, object()]:
with assert_raises_regex(
TypeError, "unsupported operand type.* and 'chararray'"):
ob % self.A
def test_slice(self):
"""Regression test for https://github.com/numpy/numpy/issues/5982"""
arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
dtype='S4').view(np.chararray)
sl1 = arr[:]
assert_array_equal(sl1, arr)
assert_(sl1.base is arr)
assert_(sl1.base.base is arr.base)
sl2 = arr[:, :]
assert_array_equal(sl2, arr)
assert_(sl2.base is arr)
assert_(sl2.base.base is arr.base)
assert_(arr[0, 0] == b'abc')
def test_empty_indexing():
"""Regression test for ticket 1948."""
# Check that indexing a chararray with an empty list/array returns an
# empty chararray instead of a chararray with a single empty string in it.
s = np.chararray((4,))
assert_(s[[]].size == 0)
| 24,653 | Python | 35.578635 | 94 | 0.493855 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_indexerrors.py | import numpy as np
from numpy.testing import (
assert_raises, assert_raises_regex,
)
class TestIndexErrors:
'''Tests to exercise indexerrors not covered by other tests.'''
def test_arraytypes_fasttake(self):
'take from a 0-length dimension'
x = np.empty((2, 3, 0, 4))
assert_raises(IndexError, x.take, [0], axis=2)
assert_raises(IndexError, x.take, [1], axis=2)
assert_raises(IndexError, x.take, [0], axis=2, mode='wrap')
assert_raises(IndexError, x.take, [0], axis=2, mode='clip')
def test_take_from_object(self):
# Check exception taking from object array
d = np.zeros(5, dtype=object)
assert_raises(IndexError, d.take, [6])
# Check exception taking from 0-d array
d = np.zeros((5, 0), dtype=object)
assert_raises(IndexError, d.take, [1], axis=1)
assert_raises(IndexError, d.take, [0], axis=1)
assert_raises(IndexError, d.take, [0])
assert_raises(IndexError, d.take, [0], mode='wrap')
assert_raises(IndexError, d.take, [0], mode='clip')
def test_multiindex_exceptions(self):
a = np.empty(5, dtype=object)
assert_raises(IndexError, a.item, 20)
a = np.empty((5, 0), dtype=object)
assert_raises(IndexError, a.item, (0, 0))
a = np.empty(5, dtype=object)
assert_raises(IndexError, a.itemset, 20, 0)
a = np.empty((5, 0), dtype=object)
assert_raises(IndexError, a.itemset, (0, 0), 0)
def test_put_exceptions(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5), dtype=object)
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5, 0))
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5, 0), dtype=object)
assert_raises(IndexError, a.put, 100, 0)
def test_iterators_exceptions(self):
"cases in iterators.c"
def assign(obj, ind, val):
obj[ind] = val
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a[0, 5, None, 2])
assert_raises(IndexError, lambda: a[0, 5, 0, 2])
assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1))
assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a[0, 0, None, 2])
assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[10])
assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[10])
assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[np.array(10)])
assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[np.array(10)])
assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[np.array([10])])
assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[np.array([10])])
assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
def test_mapping(self):
"cases from mapping.c"
def assign(obj, ind, val):
obj[ind] = val
a = np.zeros((0, 10))
assert_raises(IndexError, lambda: a[12])
a = np.zeros((3, 5))
assert_raises(IndexError, lambda: a[(10, 20)])
assert_raises(IndexError, lambda: assign(a, (10, 20), 1))
a = np.zeros((3, 0))
assert_raises(IndexError, lambda: a[(1, 0)])
assert_raises(IndexError, lambda: assign(a, (1, 0), 1))
a = np.zeros((10,))
assert_raises(IndexError, lambda: assign(a, 10, 1))
a = np.zeros((0,))
assert_raises(IndexError, lambda: assign(a, 10, 1))
a = np.zeros((3, 5))
assert_raises(IndexError, lambda: a[(1, [1, 20])])
assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1))
a = np.zeros((3, 0))
assert_raises(IndexError, lambda: a[(1, [0, 1])])
assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1))
def test_mapping_error_message(self):
a = np.zeros((3, 5))
index = (1, 2, 3, 4, 5)
assert_raises_regex(
IndexError,
"too many indices for array: "
"array is 2-dimensional, but 5 were indexed",
lambda: a[index])
def test_methods(self):
"cases from methods.c"
a = np.zeros((3, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
a = np.zeros((0, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
| 5,130 | Python | 37.291044 | 76 | 0.566667 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_longdouble.py | import warnings
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, assert_array_equal,
temppath,
)
from numpy.core.tests._locales import CommaDecimalPointLocale
LD_INFO = np.finfo(np.longdouble)
longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
_o = 1 + LD_INFO.eps
string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o)))
del _o
def test_scalar_extraction():
"""Confirm that extracting a value doesn't convert to python float"""
o = 1 + LD_INFO.eps
a = np.array([o, o, o])
assert_equal(a[1], o)
# Conversions string -> long double
# 0.1 not exactly representable in base 2 floating point.
repr_precision = len(repr(np.longdouble(0.1)))
# +2 from macro block starting around line 842 in scalartypes.c.src.
@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision,
reason="repr precision not enough to show eps")
def test_repr_roundtrip():
# We will only see eps in repr if within printing precision.
o = 1 + LD_INFO.eps
assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o))
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_repr_roundtrip_bytes():
o = 1 + LD_INFO.eps
assert_equal(np.longdouble(repr(o).encode("ascii")), o)
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes))
def test_array_and_stringlike_roundtrip(strtype):
"""
Test that string representations of long-double roundtrip both
for array casting and scalar coercion, see also gh-15608.
"""
o = 1 + LD_INFO.eps
if strtype in (np.bytes_, bytes):
o_str = strtype(repr(o).encode("ascii"))
else:
o_str = strtype(repr(o))
# Test that `o` is correctly coerced from the string-like
assert o == np.longdouble(o_str)
# Test that arrays also roundtrip correctly:
o_strarr = np.asarray([o] * 3, dtype=strtype)
assert (o == o_strarr.astype(np.longdouble)).all()
# And array coercion and casting to string give the same as scalar repr:
assert (o_strarr == o_str).all()
assert (np.asarray([o] * 3).astype(strtype) == o_str).all()
def test_bogus_string():
assert_raises(ValueError, np.longdouble, "spam")
assert_raises(ValueError, np.longdouble, "1.0 flub")
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_fromstring():
o = 1 + LD_INFO.eps
s = (" " + repr(o))*5
a = np.array([o]*5)
assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a,
err_msg="reading '%s'" % s)
def test_fromstring_complex():
for ctype in ["complex", "cdouble", "cfloat"]:
# Check spacing between separator
assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype),
np.array([1., 2., 3., 4.]))
# Real component not specified
assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype),
np.array([1.j, -2.j, 3.j, 40.j]))
# Both components specified
assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype),
np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
# Spaces at wrong places
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+j", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","),
np.array([1j]))
def test_fromstring_bogus():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
np.array([1., 2., 3.]))
def test_fromstring_empty():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("xxxxx", sep="x"),
np.array([]))
def test_fromstring_missing():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
np.array([1]))
class TestFileBased:
ldbl = 1 + LD_INFO.eps
tgt = np.array([ldbl]*5)
out = ''.join([repr(t) + '\n' for t in tgt])
def test_fromfile_bogus(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write("1. 2. 3. flop 4.\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=float, sep=" ")
assert_equal(res, np.array([1., 2., 3.]))
def test_fromfile_complex(self):
for ctype in ["complex", "cdouble", "cfloat"]:
# Check spacing between separator and only real component specified
with temppath() as path:
with open(path, 'wt') as f:
f.write("1, 2 , 3 ,4\n")
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1., 2., 3., 4.]))
# Real component not specified
with temppath() as path:
with open(path, 'wt') as f:
f.write("1j, -2j, 3j, 4e1j\n")
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j]))
# Both components specified
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+1j,2-2j, -3+3j, -4e1+4j\n")
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+2 j,3\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+ 2j,3\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1 +2j,3\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+j\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1j+1\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.j]))
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_fromfile(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.fromfile(path, dtype=np.longdouble, sep="\n")
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_genfromtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.genfromtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_loadtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.loadtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_tofile_roundtrip(self):
with temppath() as path:
self.tgt.tofile(path, sep=" ")
res = np.fromfile(path, dtype=np.longdouble, sep=" ")
assert_equal(res, self.tgt)
# Conversions long double -> string
def test_repr_exact():
o = 1 + LD_INFO.eps
assert_(repr(o) != '1')
@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_format():
o = 1 + LD_INFO.eps
assert_("{0:.40g}".format(o) != '1')
@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_percent():
o = 1 + LD_INFO.eps
assert_("%.40g" % o != '1')
@pytest.mark.skipif(longdouble_longer_than_double,
reason="array repr problem")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_array_repr():
o = 1 + LD_INFO.eps
a = np.array([o])
b = np.array([1], dtype=np.longdouble)
if not np.all(a != b):
raise ValueError("precision loss creating arrays")
assert_(repr(a) != repr(b))
#
# Locale tests: scalar types formatting should be independent of the locale
#
class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
def test_repr_roundtrip_foreign(self):
o = 1.5
assert_equal(o, np.longdouble(repr(o)))
def test_fromstring_foreign_repr(self):
f = 1.234
a = np.fromstring(repr(f), dtype=float, sep=" ")
assert_equal(a[0], f)
def test_fromstring_best_effort_float(self):
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
np.array([1.]))
def test_fromstring_best_effort(self):
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
np.array([1.]))
def test_fromstring_foreign(self):
s = "1.234"
a = np.fromstring(s, dtype=np.longdouble, sep=" ")
assert_equal(a[0], np.longdouble(s))
def test_fromstring_foreign_sep(self):
a = np.array([1, 2, 3, 4])
b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",")
assert_array_equal(a, b)
def test_fromstring_foreign_value(self):
with assert_warns(DeprecationWarning):
b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
assert_array_equal(b[0], 1)
@pytest.mark.parametrize("int_val", [
# cases discussed in gh-10723
# and gh-9968
2 ** 1024, 0])
def test_longdouble_from_int(int_val):
# for issue gh-9968
str_val = str(int_val)
# we'll expect a RuntimeWarning on platforms
# with np.longdouble equivalent to np.double
# for large integer input
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
# can be inf==inf on some platforms
assert np.longdouble(int_val) == np.longdouble(str_val)
# we can't directly compare the int and
# max longdouble value on all platforms
if np.allclose(np.finfo(np.longdouble).max,
np.finfo(np.double).max) and w:
assert w[0].category is RuntimeWarning
@pytest.mark.parametrize("bool_val", [
True, False])
def test_longdouble_from_bool(bool_val):
assert np.longdouble(bool_val) == np.longdouble(int(bool_val))
| 13,042 | Python | 34.156334 | 87 | 0.564637 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_mem_policy.py | import asyncio
import gc
import os
import pytest
import numpy as np
import threading
import warnings
from numpy.testing import extbuild, assert_warns
import sys
@pytest.fixture
def get_module(tmp_path):
""" Add a memory policy that returns a false pointer 64 bytes into the
actual allocation, and fill the prefix with some text. Then check at each
memory manipulation that the prefix exists, to make sure all alloc/realloc/
free/calloc go via the functions here.
"""
if sys.platform.startswith('cygwin'):
pytest.skip('link fails on cygwin')
functions = [
("get_default_policy", "METH_NOARGS", """
Py_INCREF(PyDataMem_DefaultHandler);
return PyDataMem_DefaultHandler;
"""),
("set_secret_data_policy", "METH_NOARGS", """
PyObject *secret_data =
PyCapsule_New(&secret_data_handler, "mem_handler", NULL);
if (secret_data == NULL) {
return NULL;
}
PyObject *old = PyDataMem_SetHandler(secret_data);
Py_DECREF(secret_data);
return old;
"""),
("set_old_policy", "METH_O", """
PyObject *old;
if (args != NULL && PyCapsule_CheckExact(args)) {
old = PyDataMem_SetHandler(args);
}
else {
old = PyDataMem_SetHandler(NULL);
}
return old;
"""),
("get_array", "METH_NOARGS", """
char *buf = (char *)malloc(20);
npy_intp dims[1];
dims[0] = 20;
PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8);
return PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, NULL,
buf, NPY_ARRAY_WRITEABLE, NULL);
"""),
("set_own", "METH_O", """
if (!PyArray_Check(args)) {
PyErr_SetString(PyExc_ValueError,
"need an ndarray");
return NULL;
}
PyArray_ENABLEFLAGS((PyArrayObject*)args, NPY_ARRAY_OWNDATA);
// Maybe try this too?
// PyArray_BASE(PyArrayObject *)args) = NULL;
Py_RETURN_NONE;
"""),
("get_array_with_base", "METH_NOARGS", """
char *buf = (char *)malloc(20);
npy_intp dims[1];
dims[0] = 20;
PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8);
PyObject *arr = PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims,
NULL, buf,
NPY_ARRAY_WRITEABLE, NULL);
if (arr == NULL) return NULL;
PyObject *obj = PyCapsule_New(buf, "buf capsule",
(PyCapsule_Destructor)&warn_on_free);
if (obj == NULL) {
Py_DECREF(arr);
return NULL;
}
if (PyArray_SetBaseObject((PyArrayObject *)arr, obj) < 0) {
Py_DECREF(arr);
Py_DECREF(obj);
return NULL;
}
return arr;
"""),
]
prologue = '''
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include <numpy/arrayobject.h>
/*
* This struct allows the dynamic configuration of the allocator funcs
* of the `secret_data_allocator`. It is provided here for
* demonstration purposes, as a valid `ctx` use-case scenario.
*/
typedef struct {
void *(*malloc)(size_t);
void *(*calloc)(size_t, size_t);
void *(*realloc)(void *, size_t);
void (*free)(void *);
} SecretDataAllocatorFuncs;
NPY_NO_EXPORT void *
shift_alloc(void *ctx, size_t sz) {
SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
char *real = (char *)funcs->malloc(sz + 64);
if (real == NULL) {
return NULL;
}
snprintf(real, 64, "originally allocated %ld", (unsigned long)sz);
return (void *)(real + 64);
}
NPY_NO_EXPORT void *
shift_zero(void *ctx, size_t sz, size_t cnt) {
SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
char *real = (char *)funcs->calloc(sz + 64, cnt);
if (real == NULL) {
return NULL;
}
snprintf(real, 64, "originally allocated %ld via zero",
(unsigned long)sz);
return (void *)(real + 64);
}
NPY_NO_EXPORT void
shift_free(void *ctx, void * p, npy_uintp sz) {
SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
if (p == NULL) {
return ;
}
char *real = (char *)p - 64;
if (strncmp(real, "originally allocated", 20) != 0) {
fprintf(stdout, "uh-oh, unmatched shift_free, "
"no appropriate prefix\\n");
/* Make C runtime crash by calling free on the wrong address */
funcs->free((char *)p + 10);
/* funcs->free(real); */
}
else {
npy_uintp i = (npy_uintp)atoi(real +20);
if (i != sz) {
fprintf(stderr, "uh-oh, unmatched shift_free"
"(ptr, %ld) but allocated %ld\\n", sz, i);
/* This happens in some places, only print */
funcs->free(real);
}
else {
funcs->free(real);
}
}
}
NPY_NO_EXPORT void *
shift_realloc(void *ctx, void * p, npy_uintp sz) {
SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
if (p != NULL) {
char *real = (char *)p - 64;
if (strncmp(real, "originally allocated", 20) != 0) {
fprintf(stdout, "uh-oh, unmatched shift_realloc\\n");
return realloc(p, sz);
}
return (void *)((char *)funcs->realloc(real, sz + 64) + 64);
}
else {
char *real = (char *)funcs->realloc(p, sz + 64);
if (real == NULL) {
return NULL;
}
snprintf(real, 64, "originally allocated "
"%ld via realloc", (unsigned long)sz);
return (void *)(real + 64);
}
}
/* As an example, we use the standard {m|c|re}alloc/free funcs. */
static SecretDataAllocatorFuncs secret_data_handler_ctx = {
malloc,
calloc,
realloc,
free
};
static PyDataMem_Handler secret_data_handler = {
"secret_data_allocator",
1,
{
&secret_data_handler_ctx, /* ctx */
shift_alloc, /* malloc */
shift_zero, /* calloc */
shift_realloc, /* realloc */
shift_free /* free */
}
};
void warn_on_free(void *capsule) {
PyErr_WarnEx(PyExc_UserWarning, "in warn_on_free", 1);
void * obj = PyCapsule_GetPointer(capsule,
PyCapsule_GetName(capsule));
free(obj);
};
'''
more_init = "import_array();"
try:
import mem_policy
return mem_policy
except ImportError:
pass
# if it does not exist, build and load it
return extbuild.build_and_import_extension('mem_policy',
functions,
prologue=prologue,
include_dirs=[np.get_include()],
build_dir=tmp_path,
more_init=more_init)
def test_set_policy(get_module):
get_handler_name = np.core.multiarray.get_handler_name
get_handler_version = np.core.multiarray.get_handler_version
orig_policy_name = get_handler_name()
a = np.arange(10).reshape((2, 5)) # a doesn't own its own data
assert get_handler_name(a) is None
assert get_handler_version(a) is None
assert get_handler_name(a.base) == orig_policy_name
assert get_handler_version(a.base) == 1
orig_policy = get_module.set_secret_data_policy()
b = np.arange(10).reshape((2, 5)) # b doesn't own its own data
assert get_handler_name(b) is None
assert get_handler_version(b) is None
assert get_handler_name(b.base) == 'secret_data_allocator'
assert get_handler_version(b.base) == 1
if orig_policy_name == 'default_allocator':
get_module.set_old_policy(None) # tests PyDataMem_SetHandler(NULL)
assert get_handler_name() == 'default_allocator'
else:
get_module.set_old_policy(orig_policy)
assert get_handler_name() == orig_policy_name
def test_default_policy_singleton(get_module):
get_handler_name = np.core.multiarray.get_handler_name
# set the policy to default
orig_policy = get_module.set_old_policy(None)
assert get_handler_name() == 'default_allocator'
# re-set the policy to default
def_policy_1 = get_module.set_old_policy(None)
assert get_handler_name() == 'default_allocator'
# set the policy to original
def_policy_2 = get_module.set_old_policy(orig_policy)
# since default policy is a singleton,
# these should be the same object
assert def_policy_1 is def_policy_2 is get_module.get_default_policy()
def test_policy_propagation(get_module):
# The memory policy goes hand-in-hand with flags.owndata
class MyArr(np.ndarray):
pass
get_handler_name = np.core.multiarray.get_handler_name
orig_policy_name = get_handler_name()
a = np.arange(10).view(MyArr).reshape((2, 5))
assert get_handler_name(a) is None
assert a.flags.owndata is False
assert get_handler_name(a.base) is None
assert a.base.flags.owndata is False
assert get_handler_name(a.base.base) == orig_policy_name
assert a.base.base.flags.owndata is True
async def concurrent_context1(get_module, orig_policy_name, event):
if orig_policy_name == 'default_allocator':
get_module.set_secret_data_policy()
assert np.core.multiarray.get_handler_name() == 'secret_data_allocator'
else:
get_module.set_old_policy(None)
assert np.core.multiarray.get_handler_name() == 'default_allocator'
event.set()
async def concurrent_context2(get_module, orig_policy_name, event):
await event.wait()
# the policy is not affected by changes in parallel contexts
assert np.core.multiarray.get_handler_name() == orig_policy_name
# change policy in the child context
if orig_policy_name == 'default_allocator':
get_module.set_secret_data_policy()
assert np.core.multiarray.get_handler_name() == 'secret_data_allocator'
else:
get_module.set_old_policy(None)
assert np.core.multiarray.get_handler_name() == 'default_allocator'
async def async_test_context_locality(get_module):
orig_policy_name = np.core.multiarray.get_handler_name()
event = asyncio.Event()
# the child contexts inherit the parent policy
concurrent_task1 = asyncio.create_task(
concurrent_context1(get_module, orig_policy_name, event))
concurrent_task2 = asyncio.create_task(
concurrent_context2(get_module, orig_policy_name, event))
await concurrent_task1
await concurrent_task2
# the parent context is not affected by child policy changes
assert np.core.multiarray.get_handler_name() == orig_policy_name
def test_context_locality(get_module):
if (sys.implementation.name == 'pypy'
and sys.pypy_version_info[:3] < (7, 3, 6)):
pytest.skip('no context-locality support in PyPy < 7.3.6')
asyncio.run(async_test_context_locality(get_module))
def concurrent_thread1(get_module, event):
get_module.set_secret_data_policy()
assert np.core.multiarray.get_handler_name() == 'secret_data_allocator'
event.set()
def concurrent_thread2(get_module, event):
event.wait()
# the policy is not affected by changes in parallel threads
assert np.core.multiarray.get_handler_name() == 'default_allocator'
# change policy in the child thread
get_module.set_secret_data_policy()
def test_thread_locality(get_module):
orig_policy_name = np.core.multiarray.get_handler_name()
event = threading.Event()
# the child threads do not inherit the parent policy
concurrent_task1 = threading.Thread(target=concurrent_thread1,
args=(get_module, event))
concurrent_task2 = threading.Thread(target=concurrent_thread2,
args=(get_module, event))
concurrent_task1.start()
concurrent_task2.start()
concurrent_task1.join()
concurrent_task2.join()
# the parent thread is not affected by child policy changes
assert np.core.multiarray.get_handler_name() == orig_policy_name
@pytest.mark.slow
def test_new_policy(get_module):
a = np.arange(10)
orig_policy_name = np.core.multiarray.get_handler_name(a)
orig_policy = get_module.set_secret_data_policy()
b = np.arange(10)
assert np.core.multiarray.get_handler_name(b) == 'secret_data_allocator'
# test array manipulation. This is slow
if orig_policy_name == 'default_allocator':
# when the np.core.test tests recurse into this test, the
# policy will be set so this "if" will be false, preventing
# infinite recursion
#
# if needed, debug this by
# - running tests with -- -s (to not capture stdout/stderr
# - setting extra_argv=['-vv'] here
assert np.core.test('full', verbose=2, extra_argv=['-vv'])
# also try the ma tests, the pickling test is quite tricky
assert np.ma.test('full', verbose=2, extra_argv=['-vv'])
get_module.set_old_policy(orig_policy)
c = np.arange(10)
assert np.core.multiarray.get_handler_name(c) == orig_policy_name
@pytest.mark.xfail(sys.implementation.name == "pypy",
reason=("bad interaction between getenv and "
"os.environ inside pytest"))
@pytest.mark.parametrize("policy", ["0", "1", None])
def test_switch_owner(get_module, policy):
a = get_module.get_array()
assert np.core.multiarray.get_handler_name(a) is None
get_module.set_own(a)
oldval = os.environ.get('NUMPY_WARN_IF_NO_MEM_POLICY', None)
if policy is None:
if 'NUMPY_WARN_IF_NO_MEM_POLICY' in os.environ:
os.environ.pop('NUMPY_WARN_IF_NO_MEM_POLICY')
else:
os.environ['NUMPY_WARN_IF_NO_MEM_POLICY'] = policy
try:
# The policy should be NULL, so we have to assume we can call
# "free". A warning is given if the policy == "1"
if policy == "1":
with assert_warns(RuntimeWarning) as w:
del a
gc.collect()
else:
del a
gc.collect()
finally:
if oldval is None:
if 'NUMPY_WARN_IF_NO_MEM_POLICY' in os.environ:
os.environ.pop('NUMPY_WARN_IF_NO_MEM_POLICY')
else:
os.environ['NUMPY_WARN_IF_NO_MEM_POLICY'] = oldval
def test_owner_is_base(get_module):
a = get_module.get_array_with_base()
with pytest.warns(UserWarning, match='warn_on_free'):
del a
gc.collect()
| 15,869 | Python | 36.429245 | 79 | 0.556494 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_cython.py | import os
import shutil
import subprocess
import sys
import pytest
import numpy as np
# This import is copied from random.tests.test_extending
try:
import cython
from Cython.Compiler.Version import version as cython_version
except ImportError:
cython = None
else:
from numpy.compat import _pep440
# Cython 0.29.30 is required for Python 3.11 and there are
# other fixes in the 0.29 series that are needed even for earlier
# Python versions.
# Note: keep in sync with the one in pyproject.toml
required_version = "0.29.30"
if _pep440.parse(cython_version) < _pep440.Version(required_version):
# too old or wrong cython, skip the test
cython = None
pytestmark = pytest.mark.skipif(cython is None, reason="requires cython")
@pytest.fixture
def install_temp(request, tmp_path):
# Based in part on test_cython from random.tests.test_extending
here = os.path.dirname(__file__)
ext_dir = os.path.join(here, "examples", "cython")
cytest = str(tmp_path / "cytest")
shutil.copytree(ext_dir, cytest)
# build the examples and "install" them into a temporary directory
install_log = str(tmp_path / "tmp_install_log.txt")
subprocess.check_output(
[
sys.executable,
"setup.py",
"build",
"install",
"--prefix", str(tmp_path / "installdir"),
"--single-version-externally-managed",
"--record",
install_log,
],
cwd=cytest,
)
# In order to import the built module, we need its path to sys.path
# so parse that out of the record
with open(install_log) as fid:
for line in fid:
if "checks" in line:
sys.path.append(os.path.dirname(line))
break
else:
raise RuntimeError(f'could not parse "{install_log}"')
def test_is_timedelta64_object(install_temp):
import checks
assert checks.is_td64(np.timedelta64(1234))
assert checks.is_td64(np.timedelta64(1234, "ns"))
assert checks.is_td64(np.timedelta64("NaT", "ns"))
assert not checks.is_td64(1)
assert not checks.is_td64(None)
assert not checks.is_td64("foo")
assert not checks.is_td64(np.datetime64("now", "s"))
def test_is_datetime64_object(install_temp):
import checks
assert checks.is_dt64(np.datetime64(1234, "ns"))
assert checks.is_dt64(np.datetime64("NaT", "ns"))
assert not checks.is_dt64(1)
assert not checks.is_dt64(None)
assert not checks.is_dt64("foo")
assert not checks.is_dt64(np.timedelta64(1234))
def test_get_datetime64_value(install_temp):
import checks
dt64 = np.datetime64("2016-01-01", "ns")
result = checks.get_dt64_value(dt64)
expected = dt64.view("i8")
assert result == expected
def test_get_timedelta64_value(install_temp):
import checks
td64 = np.timedelta64(12345, "h")
result = checks.get_td64_value(td64)
expected = td64.view("i8")
assert result == expected
def test_get_datetime64_unit(install_temp):
import checks
dt64 = np.datetime64("2016-01-01", "ns")
result = checks.get_dt64_unit(dt64)
expected = 10
assert result == expected
td64 = np.timedelta64(12345, "h")
result = checks.get_dt64_unit(td64)
expected = 5
assert result == expected
def test_abstract_scalars(install_temp):
import checks
assert checks.is_integer(1)
assert checks.is_integer(np.int8(1))
assert checks.is_integer(np.uint64(1))
| 3,536 | Python | 25.2 | 73 | 0.64819 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_limited_api.py | import os
import shutil
import subprocess
import sys
import sysconfig
import pytest
@pytest.mark.xfail(
sysconfig.get_config_var("Py_DEBUG"),
reason=(
"Py_LIMITED_API is incompatible with Py_DEBUG, Py_TRACE_REFS, "
"and Py_REF_DEBUG"
),
)
def test_limited_api(tmp_path):
"""Test building a third-party C extension with the limited API."""
# Based in part on test_cython from random.tests.test_extending
here = os.path.dirname(__file__)
ext_dir = os.path.join(here, "examples", "limited_api")
cytest = str(tmp_path / "limited_api")
shutil.copytree(ext_dir, cytest)
# build the examples and "install" them into a temporary directory
install_log = str(tmp_path / "tmp_install_log.txt")
subprocess.check_output(
[
sys.executable,
"setup.py",
"build",
"install",
"--prefix", str(tmp_path / "installdir"),
"--single-version-externally-managed",
"--record",
install_log,
],
cwd=cytest,
)
| 1,075 | Python | 24.619047 | 71 | 0.596279 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_umath.py | import platform
import warnings
import fnmatch
import itertools
import pytest
import sys
import os
import operator
from fractions import Fraction
from functools import reduce
from collections import namedtuple
import numpy.core.umath as ncu
from numpy.core import _umath_tests as ncu_tests
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings,
_gen_alignment_data, assert_array_almost_equal_nulp
)
from numpy.testing._private.utils import _glibc_older_than
def interesting_binop_operands(val1, val2, dtype):
"""
Helper to create "interesting" operands to cover common code paths:
* scalar inputs
* only first "values" is an array (e.g. scalar division fast-paths)
* Longer array (SIMD) placing the value of interest at different positions
* Oddly strided arrays which may not be SIMD compatible
It does not attempt to cover unaligned access or mixed dtypes.
These are normally handled by the casting/buffering machinery.
This is not a fixture (currently), since I believe a fixture normally
only yields once?
"""
fill_value = 1 # could be a parameter, but maybe not an optional one?
arr1 = np.full(10003, dtype=dtype, fill_value=fill_value)
arr2 = np.full(10003, dtype=dtype, fill_value=fill_value)
arr1[0] = val1
arr2[0] = val2
extractor = lambda res: res
yield arr1[0], arr2[0], extractor, "scalars"
extractor = lambda res: res
yield arr1[0, ...], arr2[0, ...], extractor, "scalar-arrays"
# reset array values to fill_value:
arr1[0] = fill_value
arr2[0] = fill_value
for pos in [0, 1, 2, 3, 4, 5, -1, -2, -3, -4]:
arr1[pos] = val1
arr2[pos] = val2
extractor = lambda res: res[pos]
yield arr1, arr2, extractor, f"off-{pos}"
yield arr1, arr2[pos], extractor, f"off-{pos}-with-scalar"
arr1[pos] = fill_value
arr2[pos] = fill_value
for stride in [-1, 113]:
op1 = arr1[::stride]
op2 = arr2[::stride]
op1[10] = val1
op2[10] = val2
extractor = lambda res: res[10]
yield op1, op2, extractor, f"stride-{stride}"
op1[10] = fill_value
op2[10] = fill_value
def on_powerpc():
""" True if we are running on a Power PC platform."""
return platform.processor() == 'powerpc' or \
platform.machine().startswith('ppc')
def bad_arcsinh():
"""The blocklisted trig functions are not accurate on aarch64/PPC for
complex256. Rather than dig through the actual problem skip the
test. This should be fixed when we can move past glibc2.17
which is the version in manylinux2014
"""
if platform.machine() == 'aarch64':
x = 1.78e-10
elif on_powerpc():
x = 2.16e-10
else:
return False
v1 = np.arcsinh(np.float128(x))
v2 = np.arcsinh(np.complex256(x)).real
# The eps for float128 is 1-e33, so this is way bigger
return abs((v1 / v2) - 1.0) > 1e-23
class _FilterInvalids:
def setup_method(self):
self.olderr = np.seterr(invalid='ignore')
def teardown_method(self):
np.seterr(**self.olderr)
class TestConstants:
def test_pi(self):
assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
def test_e(self):
assert_allclose(ncu.e, 2.718281828459045, 1e-15)
def test_euler_gamma(self):
assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
class TestOut:
def test_out_subok(self):
for subok in (True, False):
a = np.array(0.5)
o = np.empty(())
r = np.add(a, 2, o, subok=subok)
assert_(r is o)
r = np.add(a, 2, out=o, subok=subok)
assert_(r is o)
r = np.add(a, 2, out=(o,), subok=subok)
assert_(r is o)
d = np.array(5.7)
o1 = np.empty(())
o2 = np.empty((), dtype=np.int32)
r1, r2 = np.frexp(d, o1, None, subok=subok)
assert_(r1 is o1)
r1, r2 = np.frexp(d, None, o2, subok=subok)
assert_(r2 is o2)
r1, r2 = np.frexp(d, o1, o2, subok=subok)
assert_(r1 is o1)
assert_(r2 is o2)
r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
assert_(r1 is o1)
r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
assert_(r2 is o2)
r1, r2 = np.frexp(d, out=(o1, o2), subok=subok)
assert_(r1 is o1)
assert_(r2 is o2)
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs.
r1, r2 = np.frexp(d, out=o1, subok=subok)
assert_raises(TypeError, np.add, a, 2, o, o, subok=subok)
assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok)
assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(), subok=subok)
assert_raises(TypeError, np.add, a, 2, [], subok=subok)
assert_raises(TypeError, np.add, a, 2, out=[], subok=subok)
assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok)
o.flags.writeable = False
assert_raises(ValueError, np.add, a, 2, o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok)
def test_out_wrap_subok(self):
class ArrayWrap(np.ndarray):
__array_priority__ = 10
def __new__(cls, arr):
return np.asarray(arr).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
for subok in (True, False):
a = ArrayWrap([0.5])
r = np.add(a, 2, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, None, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, out=None, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, out=(None,), subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
d = ArrayWrap([5.7])
o1 = np.empty((1,))
o2 = np.empty((1,), dtype=np.int32)
r1, r2 = np.frexp(d, o1, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, o1, None, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, None, o2, subok=subok)
if subok:
assert_(isinstance(r1, ArrayWrap))
else:
assert_(type(r1) == np.ndarray)
r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
if subok:
assert_(isinstance(r1, ArrayWrap))
else:
assert_(type(r1) == np.ndarray)
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs.
r1, r2 = np.frexp(d, out=o1, subok=subok)
class TestComparisons:
def test_ignore_object_identity_in_equal(self):
# Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
a = np.array([np.array([1, 2, 3]), None], dtype=object)
assert_raises(ValueError, np.equal, a, a)
# Check error raised when comparing identical non-comparable objects.
class FunkyType:
def __eq__(self, other):
raise TypeError("I won't compare")
a = np.array([FunkyType()])
assert_raises(TypeError, np.equal, a, a)
# Check identity doesn't override comparison mismatch.
a = np.array([np.nan], dtype=object)
assert_equal(np.equal(a, a), [False])
def test_ignore_object_identity_in_not_equal(self):
# Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
a = np.array([np.array([1, 2, 3]), None], dtype=object)
assert_raises(ValueError, np.not_equal, a, a)
# Check error raised when comparing identical non-comparable objects.
class FunkyType:
def __ne__(self, other):
raise TypeError("I won't compare")
a = np.array([FunkyType()])
assert_raises(TypeError, np.not_equal, a, a)
# Check identity doesn't override comparison mismatch.
a = np.array([np.nan], dtype=object)
assert_equal(np.not_equal(a, a), [True])
def test_error_in_equal_reduce(self):
# gh-20929
# make sure np.equal.reduce raises a TypeError if an array is passed
# without specifying the dtype
a = np.array([0, 0])
assert_equal(np.equal.reduce(a, dtype=bool), True)
assert_raises(TypeError, np.equal.reduce, a)
class TestAdd:
def test_reduce_alignment(self):
# gh-9876
# make sure arrays with weird strides work with the optimizations in
# pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a
# 4 byte offset, even though its itemsize is 8.
a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)])
a['a'] = -1
assert_equal(a['b'].sum(), 0)
class TestDivision:
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
@pytest.mark.parametrize("dtype,ex_val", itertools.product(
np.sctypes['int'] + np.sctypes['uint'], (
(
# dividend
"np.arange(fo.max-lsize, fo.max, dtype=dtype),"
# divisors
"np.arange(lsize, dtype=dtype),"
# scalar divisors
"range(15)"
),
(
# dividend
"np.arange(fo.min, fo.min+lsize, dtype=dtype),"
# divisors
"np.arange(lsize//-2, lsize//2, dtype=dtype),"
# scalar divisors
"range(fo.min, fo.min + 15)"
), (
# dividend
"np.arange(fo.max-lsize, fo.max, dtype=dtype),"
# divisors
"np.arange(lsize, dtype=dtype),"
# scalar divisors
"[1,3,9,13,neg, fo.min+1, fo.min//2, fo.max//3, fo.max//4]"
)
)
))
def test_division_int_boundary(self, dtype, ex_val):
fo = np.iinfo(dtype)
neg = -1 if fo.min < 0 else 1
# Large enough to test SIMD loops and remaind elements
lsize = 512 + 7
a, b, divisors = eval(ex_val)
a_lst, b_lst = a.tolist(), b.tolist()
c_div = lambda n, d: (
0 if d == 0 else (
fo.min if (n and n == fo.min and d == -1) else n//d
)
)
with np.errstate(divide='ignore'):
ac = a.copy()
ac //= b
div_ab = a // b
div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)]
msg = "Integer arrays floor division check (//)"
assert all(div_ab == div_lst), msg
msg_eq = "Integer arrays floor division check (//=)"
assert all(ac == div_lst), msg_eq
for divisor in divisors:
ac = a.copy()
with np.errstate(divide='ignore', over='ignore'):
div_a = a // divisor
ac //= divisor
div_lst = [c_div(i, divisor) for i in a_lst]
assert all(div_a == div_lst), msg
assert all(ac == div_lst), msg_eq
with np.errstate(divide='raise', over='raise'):
if 0 in b:
# Verify overflow case
with pytest.raises(FloatingPointError,
match="divide by zero encountered in floor_divide"):
a // b
else:
a // b
if fo.min and fo.min in a:
with pytest.raises(FloatingPointError,
match='overflow encountered in floor_divide'):
a // -1
elif fo.min:
a // -1
with pytest.raises(FloatingPointError,
match="divide by zero encountered in floor_divide"):
a // 0
with pytest.raises(FloatingPointError,
match="divide by zero encountered in floor_divide"):
ac = a.copy()
ac //= 0
np.array([], dtype=dtype) // 0
@pytest.mark.parametrize("dtype,ex_val", itertools.product(
np.sctypes['int'] + np.sctypes['uint'], (
"np.array([fo.max, 1, 2, 1, 1, 2, 3], dtype=dtype)",
"np.array([fo.min, 1, -2, 1, 1, 2, -3], dtype=dtype)",
"np.arange(fo.min, fo.min+(100*10), 10, dtype=dtype)",
"np.arange(fo.max-(100*7), fo.max, 7, dtype=dtype)",
)
))
def test_division_int_reduce(self, dtype, ex_val):
fo = np.iinfo(dtype)
a = eval(ex_val)
lst = a.tolist()
c_div = lambda n, d: (
0 if d == 0 or (n and n == fo.min and d == -1) else n//d
)
with np.errstate(divide='ignore'):
div_a = np.floor_divide.reduce(a)
div_lst = reduce(c_div, lst)
msg = "Reduce floor integer division check"
assert div_a == div_lst, msg
with np.errstate(divide='raise', over='raise'):
with pytest.raises(FloatingPointError,
match="divide by zero encountered in reduce"):
np.floor_divide.reduce(np.arange(-100, 100, dtype=dtype))
if fo.min:
with pytest.raises(FloatingPointError,
match='overflow encountered in reduce'):
np.floor_divide.reduce(
np.array([fo.min, 1, -1], dtype=dtype)
)
@pytest.mark.parametrize(
"dividend,divisor,quotient",
[(np.timedelta64(2,'Y'), np.timedelta64(2,'M'), 12),
(np.timedelta64(2,'Y'), np.timedelta64(-2,'M'), -12),
(np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12),
(np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12),
(np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1),
(np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0),
(np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')),
(np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')),
(np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')),
(np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')),
(np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')),
(np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')),
(np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')),
])
def test_division_int_timedelta(self, dividend, divisor, quotient):
# If either divisor is 0 or quotient is Nat, check for division by 0
if divisor and (isinstance(quotient, int) or not np.isnat(quotient)):
msg = "Timedelta floor division check"
assert dividend // divisor == quotient, msg
# Test for arrays as well
msg = "Timedelta arrays floor division check"
dividend_array = np.array([dividend]*5)
quotient_array = np.array([quotient]*5)
assert all(dividend_array // divisor == quotient_array), msg
else:
with np.errstate(divide='raise', invalid='raise'):
with pytest.raises(FloatingPointError):
dividend // divisor
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
with np.errstate(invalid="ignore", divide="ignore"):
x = np.array([0.0], dtype=np.complex128)
y = 1.0/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.nan)/x
assert_(np.isinf(y)[0])
y = complex(np.nan, np.inf)/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.inf)/x
assert_(np.isinf(y)[0])
y = 0.0/x
assert_(np.isnan(y)[0])
def test_floor_division_complex(self):
# check that floor division, divmod and remainder raises type errors
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
with pytest.raises(TypeError):
x // 7
with pytest.raises(TypeError):
np.divmod(x, 7)
with pytest.raises(TypeError):
np.remainder(x, 7)
def test_floor_division_signed_zero(self):
# Check that the sign bit is correctly set when dividing positive and
# negative zero by one.
x = np.zeros(10)
assert_equal(np.signbit(x//1), 0)
assert_equal(np.signbit((-x)//1), 1)
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
def test_floor_division_errors(self, dtype):
fnan = np.array(np.nan, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
fzer = np.array(0.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
# divide by zero error check
with np.errstate(divide='raise', invalid='ignore'):
assert_raises(FloatingPointError, np.floor_divide, fone, fzer)
with np.errstate(divide='ignore', invalid='raise'):
np.floor_divide(fone, fzer)
# The following already contain a NaN and should not warn
with np.errstate(all='raise'):
np.floor_divide(fnan, fone)
np.floor_divide(fone, fnan)
np.floor_divide(fnan, fzer)
np.floor_divide(fzer, fnan)
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
def test_floor_division_corner_cases(self, dtype):
# test corner cases like 1.0//0.0 for errors and return vals
x = np.zeros(10, dtype=dtype)
y = np.ones(10, dtype=dtype)
fnan = np.array(np.nan, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
fzer = np.array(0.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in floor_divide")
div = np.floor_divide(fnan, fone)
assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div)
div = np.floor_divide(fone, fnan)
assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div)
div = np.floor_divide(fnan, fzer)
assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div)
# verify 1.0//0.0 computations return inf
with np.errstate(divide='ignore'):
z = np.floor_divide(y, x)
assert_(np.isinf(z).all())
def floor_divide_and_remainder(x, y):
return (np.floor_divide(x, y), np.remainder(x, y))
def _signs(dt):
if dt in np.typecodes['UnsignedInteger']:
return (+1,)
else:
return (+1, -1)
class TestRemainder:
def test_remainder_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floor_divide_and_remainder, np.divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*71, dtype=dt1)
b = np.array(sg2*19, dtype=dt2)
div, rem = op(a, b)
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_remainder_exact(self):
# test that float results are exact for small integers. This also
# holds for the same integers scaled by powers of two.
nlst = list(range(-127, 0))
plst = list(range(1, 128))
dividend = nlst + [0] + plst
divisor = nlst + plst
arg = list(itertools.product(dividend, divisor))
tgt = list(divmod(*t) for t in arg)
a, b = np.array(arg, dtype=int).T
# convert exact integer results from Python to float so that
# signed zero can be used, it is checked.
tgtdiv, tgtrem = np.array(tgt, dtype=float).T
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
for op in [floor_divide_and_remainder, np.divmod]:
for dt in np.typecodes['Float']:
msg = 'op: %s, dtype: %s' % (op.__name__, dt)
fa = a.astype(dt)
fb = b.astype(dt)
div, rem = op(fa, fb)
assert_equal(div, tgtdiv, err_msg=msg)
assert_equal(rem, tgtrem, err_msg=msg)
def test_float_remainder_roundoff(self):
# gh-6127
dt = np.typecodes['Float']
for op in [floor_divide_and_remainder, np.divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*78*6e-8, dtype=dt1)
b = np.array(sg2*6e-8, dtype=dt2)
div, rem = op(a, b)
# Equal assertion should hold when fmod is used
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
@pytest.mark.xfail(sys.platform.startswith("darwin"),
reason="MacOS seems to not give the correct 'invalid' warning for "
"`fmod`. Hopefully, others always do.")
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
def test_float_divmod_errors(self, dtype):
# Check valid errors raised for divmod and remainder
fzero = np.array(0.0, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
fnan = np.array(np.nan, dtype=dtype)
# since divmod is combination of both remainder and divide
# ops it will set both dividebyzero and invalid flags
with np.errstate(divide='raise', invalid='ignore'):
assert_raises(FloatingPointError, np.divmod, fone, fzero)
with np.errstate(divide='ignore', invalid='raise'):
assert_raises(FloatingPointError, np.divmod, fone, fzero)
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, np.divmod, fzero, fzero)
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, np.divmod, finf, finf)
with np.errstate(divide='ignore', invalid='raise'):
assert_raises(FloatingPointError, np.divmod, finf, fzero)
with np.errstate(divide='raise', invalid='ignore'):
# inf / 0 does not set any flags, only the modulo creates a NaN
np.divmod(finf, fzero)
@pytest.mark.xfail(sys.platform.startswith("darwin"),
reason="MacOS seems to not give the correct 'invalid' warning for "
"`fmod`. Hopefully, others always do.")
@pytest.mark.parametrize('dtype', np.typecodes['Float'])
@pytest.mark.parametrize('fn', [np.fmod, np.remainder])
def test_float_remainder_errors(self, dtype, fn):
fzero = np.array(0.0, dtype=dtype)
fone = np.array(1.0, dtype=dtype)
finf = np.array(np.inf, dtype=dtype)
fnan = np.array(np.nan, dtype=dtype)
# The following already contain a NaN and should not warn.
with np.errstate(all='raise'):
with pytest.raises(FloatingPointError,
match="invalid value"):
fn(fone, fzero)
fn(fnan, fzero)
fn(fzero, fnan)
fn(fone, fnan)
fn(fnan, fone)
def test_float_remainder_overflow(self):
a = np.finfo(np.float64).tiny
with np.errstate(over='ignore', invalid='ignore'):
div, mod = np.divmod(4, a)
np.isinf(div)
assert_(mod == 0)
with np.errstate(over='raise', invalid='ignore'):
assert_raises(FloatingPointError, np.divmod, 4, a)
with np.errstate(invalid='raise', over='ignore'):
assert_raises(FloatingPointError, np.divmod, 4, a)
def test_float_divmod_corner_cases(self):
# check nan cases
for dt in np.typecodes['Float']:
fnan = np.array(np.nan, dtype=dt)
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in divmod")
sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
div, rem = np.divmod(fone, fzer)
assert(np.isinf(div)), 'dt: %s, div: %s' % (dt, rem)
assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
div, rem = np.divmod(fzer, fzer)
assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
assert_(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem)
div, rem = np.divmod(finf, finf)
assert(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem)
assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
div, rem = np.divmod(finf, fzer)
assert(np.isinf(div)), 'dt: %s, rem: %s' % (dt, rem)
assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
div, rem = np.divmod(fnan, fone)
assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
div, rem = np.divmod(fone, fnan)
assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
div, rem = np.divmod(fnan, fzer)
assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
def test_float_remainder_corner_cases(self):
# Check remainder magnitude.
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = np.remainder(a, b)
assert_(rem <= b, 'dt: %s' % dt)
rem = np.remainder(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)
# Check nans, inf
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in remainder")
sup.filter(RuntimeWarning, "invalid value encountered in fmod")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
rem = np.remainder(fone, fzer)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
# MSVC 2008 returns NaN here, so disable the check.
#rem = np.remainder(fone, finf)
#assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem))
rem = np.remainder(finf, fone)
fmod = np.fmod(finf, fone)
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
rem = np.remainder(finf, finf)
fmod = np.fmod(finf, fone)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
rem = np.remainder(finf, fzer)
fmod = np.fmod(finf, fzer)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
rem = np.remainder(fone, fnan)
fmod = np.fmod(fone, fnan)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
rem = np.remainder(fnan, fzer)
fmod = np.fmod(fnan, fzer)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem))
rem = np.remainder(fnan, fone)
fmod = np.fmod(fnan, fone)
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem))
class TestDivisionIntegerOverflowsAndDivideByZero:
result_type = namedtuple('result_type',
['nocast', 'casted'])
helper_lambdas = {
'zero': lambda dtype: 0,
'min': lambda dtype: np.iinfo(dtype).min,
'neg_min': lambda dtype: -np.iinfo(dtype).min,
'min-zero': lambda dtype: (np.iinfo(dtype).min, 0),
'neg_min-zero': lambda dtype: (-np.iinfo(dtype).min, 0),
}
overflow_results = {
np.remainder: result_type(
helper_lambdas['zero'], helper_lambdas['zero']),
np.fmod: result_type(
helper_lambdas['zero'], helper_lambdas['zero']),
operator.mod: result_type(
helper_lambdas['zero'], helper_lambdas['zero']),
operator.floordiv: result_type(
helper_lambdas['min'], helper_lambdas['neg_min']),
np.floor_divide: result_type(
helper_lambdas['min'], helper_lambdas['neg_min']),
np.divmod: result_type(
helper_lambdas['min-zero'], helper_lambdas['neg_min-zero'])
}
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
def test_signed_division_overflow(self, dtype):
to_check = interesting_binop_operands(np.iinfo(dtype).min, -1, dtype)
for op1, op2, extractor, operand_identifier in to_check:
with pytest.warns(RuntimeWarning, match="overflow encountered"):
res = op1 // op2
assert res.dtype == op1.dtype
assert extractor(res) == np.iinfo(op1.dtype).min
# Remainder is well defined though, and does not warn:
res = op1 % op2
assert res.dtype == op1.dtype
assert extractor(res) == 0
# Check fmod as well:
res = np.fmod(op1, op2)
assert extractor(res) == 0
# Divmod warns for the division part:
with pytest.warns(RuntimeWarning, match="overflow encountered"):
res1, res2 = np.divmod(op1, op2)
assert res1.dtype == res2.dtype == op1.dtype
assert extractor(res1) == np.iinfo(op1.dtype).min
assert extractor(res2) == 0
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
def test_divide_by_zero(self, dtype):
# Note that the return value cannot be well defined here, but NumPy
# currently uses 0 consistently. This could be changed.
to_check = interesting_binop_operands(1, 0, dtype)
for op1, op2, extractor, operand_identifier in to_check:
with pytest.warns(RuntimeWarning, match="divide by zero"):
res = op1 // op2
assert res.dtype == op1.dtype
assert extractor(res) == 0
with pytest.warns(RuntimeWarning, match="divide by zero"):
res1, res2 = np.divmod(op1, op2)
assert res1.dtype == res2.dtype == op1.dtype
assert extractor(res1) == 0
assert extractor(res2) == 0
@pytest.mark.parametrize("dividend_dtype",
np.sctypes['int'])
@pytest.mark.parametrize("divisor_dtype",
np.sctypes['int'])
@pytest.mark.parametrize("operation",
[np.remainder, np.fmod, np.divmod, np.floor_divide,
operator.mod, operator.floordiv])
@np.errstate(divide='warn', over='warn')
def test_overflows(self, dividend_dtype, divisor_dtype, operation):
# SIMD tries to perform the operation on as many elements as possible
# that is a multiple of the register's size. We resort to the
# default implementation for the leftover elements.
# We try to cover all paths here.
arrays = [np.array([np.iinfo(dividend_dtype).min]*i,
dtype=dividend_dtype) for i in range(1, 129)]
divisor = np.array([-1], dtype=divisor_dtype)
# If dividend is a larger type than the divisor (`else` case),
# then, result will be a larger type than dividend and will not
# result in an overflow for `divmod` and `floor_divide`.
if np.dtype(dividend_dtype).itemsize >= np.dtype(
divisor_dtype).itemsize and operation in (
np.divmod, np.floor_divide, operator.floordiv):
with pytest.warns(
RuntimeWarning,
match="overflow encountered in"):
result = operation(
dividend_dtype(np.iinfo(dividend_dtype).min),
divisor_dtype(-1)
)
assert result == self.overflow_results[operation].nocast(
dividend_dtype)
# Arrays
for a in arrays:
# In case of divmod, we need to flatten the result
# column first as we get a column vector of quotient and
# remainder and a normal flatten of the expected result.
with pytest.warns(
RuntimeWarning,
match="overflow encountered in"):
result = np.array(operation(a, divisor)).flatten('f')
expected_array = np.array(
[self.overflow_results[operation].nocast(
dividend_dtype)]*len(a)).flatten()
assert_array_equal(result, expected_array)
else:
# Scalars
result = operation(
dividend_dtype(np.iinfo(dividend_dtype).min),
divisor_dtype(-1)
)
assert result == self.overflow_results[operation].casted(
dividend_dtype)
# Arrays
for a in arrays:
# See above comment on flatten
result = np.array(operation(a, divisor)).flatten('f')
expected_array = np.array(
[self.overflow_results[operation].casted(
dividend_dtype)]*len(a)).flatten()
assert_array_equal(result, expected_array)
class TestCbrt:
def test_cbrt_scalar(self):
assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
def test_cbrt(self):
x = np.array([1., 2., -3., np.inf, -np.inf])
assert_almost_equal(np.cbrt(x**3), x)
assert_(np.isnan(np.cbrt(np.nan)))
assert_equal(np.cbrt(np.inf), np.inf)
assert_equal(np.cbrt(-np.inf), -np.inf)
class TestPower:
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
for out, inp, msg in _gen_alignment_data(dtype=np.float32,
type='unary',
max_size=11):
exp = [ncu.sqrt(i) for i in inp]
assert_almost_equal(inp**(0.5), exp, err_msg=msg)
np.sqrt(inp, out=out)
assert_equal(out, exp, err_msg=msg)
for out, inp, msg in _gen_alignment_data(dtype=np.float64,
type='unary',
max_size=7):
exp = [ncu.sqrt(i) for i in inp]
assert_almost_equal(inp**(0.5), exp, err_msg=msg)
np.sqrt(inp, out=out)
assert_equal(out, exp, err_msg=msg)
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
z = np.array([z], dtype=np.complex_)
with np.errstate(invalid="ignore"):
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1+0j])
cnan = np.array([complex(np.nan, np.nan)])
# FIXME cinf not tested.
#cinf = np.array([complex(np.inf, 0)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
with np.errstate(invalid="ignore"):
assert_complex_equal(np.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
def test_fast_power(self):
x = np.array([1, 2, 3], np.int16)
res = x**2.0
assert_((x**2.00001).dtype is res.dtype)
assert_array_equal(res, [1, 4, 9])
# check the inplace operation on the casted copy doesn't mess with x
assert_(not np.may_share_memory(res, x))
assert_array_equal(x, [1, 2, 3])
# Check that the fast path ignores 1-element not 0-d arrays
res = x ** np.array([[[2]]])
assert_equal(res.shape, (1, 1, 3))
def test_integer_power(self):
a = np.array([15, 15], 'i8')
b = np.power(a, a)
assert_equal(b, [437893890380859375, 437893890380859375])
def test_integer_power_with_integer_zero_exponent(self):
dtypes = np.typecodes['Integer']
for dt in dtypes:
arr = np.arange(-10, 10, dtype=dt)
assert_equal(np.power(arr, 0), np.ones_like(arr))
dtypes = np.typecodes['UnsignedInteger']
for dt in dtypes:
arr = np.arange(10, dtype=dt)
assert_equal(np.power(arr, 0), np.ones_like(arr))
def test_integer_power_of_1(self):
dtypes = np.typecodes['AllInteger']
for dt in dtypes:
arr = np.arange(10, dtype=dt)
assert_equal(np.power(1, arr), np.ones_like(arr))
def test_integer_power_of_zero(self):
dtypes = np.typecodes['AllInteger']
for dt in dtypes:
arr = np.arange(1, 10, dtype=dt)
assert_equal(np.power(0, arr), np.zeros_like(arr))
def test_integer_to_negative_power(self):
dtypes = np.typecodes['Integer']
for dt in dtypes:
a = np.array([0, 1, 2, 3], dtype=dt)
b = np.array([0, 1, 2, -3], dtype=dt)
one = np.array(1, dtype=dt)
minusone = np.array(-1, dtype=dt)
assert_raises(ValueError, np.power, a, b)
assert_raises(ValueError, np.power, a, minusone)
assert_raises(ValueError, np.power, one, b)
assert_raises(ValueError, np.power, one, minusone)
class TestFloat_power:
def test_type_conversion(self):
arg_type = '?bhilBHILefdgFDG'
res_type = 'ddddddddddddgDDG'
for dtin, dtout in zip(arg_type, res_type):
msg = "dtin: %s, dtout: %s" % (dtin, dtout)
arg = np.ones(1, dtype=dtin)
res = np.float_power(arg, arg)
assert_(res.dtype.name == np.dtype(dtout).name, msg)
class TestLog2:
@pytest.mark.parametrize('dt', ['f', 'd', 'g'])
def test_log2_values(self, dt):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
@pytest.mark.parametrize("i", range(1, 65))
def test_log2_ints(self, i):
# a good log2 implementation should provide this,
# might fail on OS with bad libm
v = np.log2(2.**i)
assert_equal(v, float(i), err_msg='at exponent %d' % i)
def test_log2_special(self):
assert_equal(np.log2(1.), 0.)
assert_equal(np.log2(np.inf), np.inf)
assert_(np.isnan(np.log2(np.nan)))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.log2(-1.)))
assert_(np.isnan(np.log2(-np.inf)))
assert_equal(np.log2(0.), -np.inf)
assert_(w[0].category is RuntimeWarning)
assert_(w[1].category is RuntimeWarning)
assert_(w[2].category is RuntimeWarning)
class TestExp2:
def test_exp2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(_FilterInvalids):
# Need test for intermediate precisions
def test_logaddexp2_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_)
def test_logaddexp2_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, 0)))
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
def test_reduce(self):
assert_equal(np.logaddexp2.identity, -np.inf)
assert_equal(np.logaddexp2.reduce([]), -np.inf)
assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf)
assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0)
class TestLog:
def test_log_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.log(xf), yf)
# test aliasing(issue #17761)
x = np.array([2, 0.937500, 3, 0.947500, 1.054697])
xf = np.log(x)
assert_almost_equal(np.log(x, out=x), xf)
# test log() of max for dtype does not raise
for dt in ['f', 'd', 'g']:
with np.errstate(all='raise'):
x = np.finfo(dt).max
np.log(x)
def test_log_strides(self):
np.random.seed(42)
strides = np.array([-4,-3,-2,-1,1,2,3,4])
sizes = np.arange(2,100)
for ii in sizes:
x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0,size=ii))
x_special = x_f64.copy()
x_special[3:-1:4] = 1.0
y_true = np.log(x_f64)
y_special = np.log(x_special)
for jj in strides:
assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2)
class TestExp:
def test_exp_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
def test_exp_strides(self):
np.random.seed(42)
strides = np.array([-4,-3,-2,-1,1,2,3,4])
sizes = np.arange(2,100)
for ii in sizes:
x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1,size=ii))
y_true = np.exp(x_f64)
for jj in strides:
assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2)
class TestSpecialFloats:
def test_exp_values(self):
with np.errstate(under='raise', over='raise'):
x = [np.nan, np.nan, np.inf, 0.]
y = [np.nan, -np.nan, np.inf, -np.inf]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.exp(yf), xf)
# See: https://github.com/numpy/numpy/issues/19192
@pytest.mark.xfail(
_glibc_older_than("2.17"),
reason="Older glibc versions may not raise appropriate FP exceptions"
)
def test_exp_exceptions(self):
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.exp, np.float32(100.))
assert_raises(FloatingPointError, np.exp, np.float32(1E19))
assert_raises(FloatingPointError, np.exp, np.float64(800.))
assert_raises(FloatingPointError, np.exp, np.float64(1E19))
with np.errstate(under='raise'):
assert_raises(FloatingPointError, np.exp, np.float32(-1000.))
assert_raises(FloatingPointError, np.exp, np.float32(-1E19))
assert_raises(FloatingPointError, np.exp, np.float64(-1000.))
assert_raises(FloatingPointError, np.exp, np.float64(-1E19))
def test_log_values(self):
with np.errstate(all='ignore'):
x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan]
y = [np.nan, -np.nan, np.inf, -np.inf, 0.0, -1.0]
y1p = [np.nan, -np.nan, np.inf, -np.inf, -1.0, -2.0]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
yf1p = np.array(y1p, dtype=dt)
assert_equal(np.log(yf), xf)
assert_equal(np.log2(yf), xf)
assert_equal(np.log10(yf), xf)
assert_equal(np.log1p(yf1p), xf)
with np.errstate(divide='raise'):
for dt in ['f', 'd']:
assert_raises(FloatingPointError, np.log,
np.array(0.0, dtype=dt))
assert_raises(FloatingPointError, np.log2,
np.array(0.0, dtype=dt))
assert_raises(FloatingPointError, np.log10,
np.array(0.0, dtype=dt))
assert_raises(FloatingPointError, np.log1p,
np.array(-1.0, dtype=dt))
with np.errstate(invalid='raise'):
for dt in ['f', 'd']:
assert_raises(FloatingPointError, np.log,
np.array(-np.inf, dtype=dt))
assert_raises(FloatingPointError, np.log,
np.array(-1.0, dtype=dt))
assert_raises(FloatingPointError, np.log2,
np.array(-np.inf, dtype=dt))
assert_raises(FloatingPointError, np.log2,
np.array(-1.0, dtype=dt))
assert_raises(FloatingPointError, np.log10,
np.array(-np.inf, dtype=dt))
assert_raises(FloatingPointError, np.log10,
np.array(-1.0, dtype=dt))
assert_raises(FloatingPointError, np.log1p,
np.array(-np.inf, dtype=dt))
assert_raises(FloatingPointError, np.log1p,
np.array(-2.0, dtype=dt))
# See https://github.com/numpy/numpy/issues/18005
with assert_no_warnings():
a = np.array(1e9, dtype='float32')
np.log(a)
def test_sincos_values(self):
with np.errstate(all='ignore'):
x = [np.nan, np.nan, np.nan, np.nan]
y = [np.nan, -np.nan, np.inf, -np.inf]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.sin(yf), xf)
assert_equal(np.cos(yf), xf)
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, np.sin, np.float32(-np.inf))
assert_raises(FloatingPointError, np.sin, np.float32(np.inf))
assert_raises(FloatingPointError, np.cos, np.float32(-np.inf))
assert_raises(FloatingPointError, np.cos, np.float32(np.inf))
@pytest.mark.parametrize('dt', ['f', 'd', 'g'])
def test_sqrt_values(self, dt):
with np.errstate(all='ignore'):
x = [np.nan, np.nan, np.inf, np.nan, 0.]
y = [np.nan, -np.nan, np.inf, -np.inf, 0.]
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.sqrt(yf), xf)
# with np.errstate(invalid='raise'):
# assert_raises(
# FloatingPointError, np.sqrt, np.array(-100., dtype=dt)
# )
def test_abs_values(self):
x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0]
y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.abs(yf), xf)
def test_square_values(self):
x = [np.nan, np.nan, np.inf, np.inf]
y = [np.nan, -np.nan, np.inf, -np.inf]
with np.errstate(all='ignore'):
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.square(yf), xf)
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.square,
np.array(1E32, dtype='f'))
assert_raises(FloatingPointError, np.square,
np.array(1E200, dtype='d'))
def test_reciprocal_values(self):
with np.errstate(all='ignore'):
x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf]
y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_equal(np.reciprocal(yf), xf)
with np.errstate(divide='raise'):
for dt in ['f', 'd', 'g']:
assert_raises(FloatingPointError, np.reciprocal,
np.array(-0.0, dtype=dt))
def test_tan(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, 0.0, -0.0, np.inf, -np.inf]
out = [np.nan, np.nan, 0.0, -0.0, np.nan, np.nan]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.tan(in_arr), out_arr)
with np.errstate(invalid='raise'):
for dt in ['f', 'd']:
assert_raises(FloatingPointError, np.tan,
np.array(np.inf, dtype=dt))
assert_raises(FloatingPointError, np.tan,
np.array(-np.inf, dtype=dt))
def test_arcsincos(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.nan, np.nan]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arcsin(in_arr), out_arr)
assert_equal(np.arccos(in_arr), out_arr)
for callable in [np.arcsin, np.arccos]:
for value in [np.inf, -np.inf, 2.0, -2.0]:
for dt in ['f', 'd']:
with np.errstate(invalid='raise'):
assert_raises(FloatingPointError, callable,
np.array(value, dtype=dt))
def test_arctan(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan]
out = [np.nan, np.nan]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arctan(in_arr), out_arr)
def test_sinh(self):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, -np.inf]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.sinh(in_arr), out_arr)
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.sinh,
np.array(120.0, dtype='f'))
assert_raises(FloatingPointError, np.sinh,
np.array(1200.0, dtype='d'))
def test_cosh(self):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, np.inf]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.cosh(in_arr), out_arr)
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.cosh,
np.array(120.0, dtype='f'))
assert_raises(FloatingPointError, np.cosh,
np.array(1200.0, dtype='d'))
def test_tanh(self):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, 1.0, -1.0]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.tanh(in_arr), out_arr)
def test_arcsinh(self):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, -np.inf]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arcsinh(in_arr), out_arr)
def test_arccosh(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, 0.0]
out = [np.nan, np.nan, np.inf, np.nan, 0.0, np.nan]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arccosh(in_arr), out_arr)
for value in [0.0, -np.inf]:
with np.errstate(invalid='raise'):
for dt in ['f', 'd']:
assert_raises(FloatingPointError, np.arccosh,
np.array(value, dtype=dt))
def test_arctanh(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, -1.0, 2.0]
out = [np.nan, np.nan, np.nan, np.nan, np.inf, -np.inf, np.nan]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.arctanh(in_arr), out_arr)
for value in [1.01, np.inf, -np.inf, 1.0, -1.0]:
with np.errstate(invalid='raise', divide='raise'):
for dt in ['f', 'd']:
assert_raises(FloatingPointError, np.arctanh,
np.array(value, dtype=dt))
# See: https://github.com/numpy/numpy/issues/20448
@pytest.mark.xfail(
_glibc_older_than("2.17"),
reason="Older glibc versions may not raise appropriate FP exceptions"
)
def test_exp2(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, 0.0]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.exp2(in_arr), out_arr)
for value in [2000.0, -2000.0]:
with np.errstate(over='raise', under='raise'):
for dt in ['f', 'd']:
assert_raises(FloatingPointError, np.exp2,
np.array(value, dtype=dt))
def test_expm1(self):
with np.errstate(all='ignore'):
in_ = [np.nan, -np.nan, np.inf, -np.inf]
out = [np.nan, np.nan, np.inf, -1.0]
for dt in ['f', 'd']:
in_arr = np.array(in_, dtype=dt)
out_arr = np.array(out, dtype=dt)
assert_equal(np.expm1(in_arr), out_arr)
for value in [200.0, 2000.0]:
with np.errstate(over='raise'):
assert_raises(FloatingPointError, np.expm1,
np.array(value, dtype='f'))
class TestFPClass:
@pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
def test_fpclass(self, stride):
arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d')
arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f')
nan = np.array([True, True, False, False, False, False, False, False, False, False])
inf = np.array([False, False, True, True, False, False, False, False, False, False])
sign = np.array([False, True, False, True, True, False, True, False, False, True])
finite = np.array([False, False, False, False, True, True, True, True, True, True])
assert_equal(np.isnan(arr_f32[::stride]), nan[::stride])
assert_equal(np.isnan(arr_f64[::stride]), nan[::stride])
assert_equal(np.isinf(arr_f32[::stride]), inf[::stride])
assert_equal(np.isinf(arr_f64[::stride]), inf[::stride])
assert_equal(np.signbit(arr_f32[::stride]), sign[::stride])
assert_equal(np.signbit(arr_f64[::stride]), sign[::stride])
assert_equal(np.isfinite(arr_f32[::stride]), finite[::stride])
assert_equal(np.isfinite(arr_f64[::stride]), finite[::stride])
class TestLDExp:
@pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
@pytest.mark.parametrize("dtype", ['f', 'd'])
def test_ldexp(self, dtype, stride):
mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype)
exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i')
out = np.zeros(8, dtype=dtype)
assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride])
assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride])
class TestFRExp:
@pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
@pytest.mark.parametrize("dtype", ['f', 'd'])
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="np.frexp gives different answers for NAN/INF on windows and linux")
def test_frexp(self, dtype, stride):
arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype)
mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype)
exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i')
out_mant = np.ones(8, dtype=dtype)
out_exp = 2*np.ones(8, dtype='i')
mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride]))
assert_equal(mant_true[::stride], mant)
assert_equal(exp_true[::stride], exp)
assert_equal(out_mant[::stride], mant_true[::stride])
assert_equal(out_exp[::stride], exp_true[::stride])
# func : [maxulperror, low, high]
avx_ufuncs = {'sqrt' :[1, 0., 100.],
'absolute' :[0, -100., 100.],
'reciprocal' :[1, 1., 100.],
'square' :[1, -100., 100.],
'rint' :[0, -100., 100.],
'floor' :[0, -100., 100.],
'ceil' :[0, -100., 100.],
'trunc' :[0, -100., 100.]}
class TestAVXUfuncs:
def test_avx_based_ufunc(self):
strides = np.array([-4,-3,-2,-1,1,2,3,4])
np.random.seed(42)
for func, prop in avx_ufuncs.items():
maxulperr = prop[0]
minval = prop[1]
maxval = prop[2]
# various array sizes to ensure masking in AVX is tested
for size in range(1,32):
myfunc = getattr(np, func)
x_f32 = np.float32(np.random.uniform(low=minval, high=maxval,
size=size))
x_f64 = np.float64(x_f32)
x_f128 = np.longdouble(x_f32)
y_true128 = myfunc(x_f128)
if maxulperr == 0:
assert_equal(myfunc(x_f32), np.float32(y_true128))
assert_equal(myfunc(x_f64), np.float64(y_true128))
else:
assert_array_max_ulp(myfunc(x_f32), np.float32(y_true128),
maxulp=maxulperr)
assert_array_max_ulp(myfunc(x_f64), np.float64(y_true128),
maxulp=maxulperr)
# various strides to test gather instruction
if size > 1:
y_true32 = myfunc(x_f32)
y_true64 = myfunc(x_f64)
for jj in strides:
assert_equal(myfunc(x_f64[::jj]), y_true64[::jj])
assert_equal(myfunc(x_f32[::jj]), y_true32[::jj])
class TestAVXFloat32Transcendental:
def test_exp_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000))
x_f64 = np.float64(x_f32)
assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3)
def test_log_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000))
x_f64 = np.float64(x_f32)
assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4)
def test_sincos_float32(self):
np.random.seed(42)
N = 1000000
M = np.int_(N/20)
index = np.random.randint(low=0, high=N, size=M)
x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N))
if not _glibc_older_than("2.17"):
# test coverage for elements > 117435.992f for which glibc is used
# this is known to be problematic on old glibc, so skip it there
x_f32[index] = np.float32(10E+10*np.random.rand(M))
x_f64 = np.float64(x_f32)
assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2)
assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2)
# test aliasing(issue #17761)
tx_f32 = x_f32.copy()
assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2)
assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2)
def test_strided_float32(self):
np.random.seed(42)
strides = np.array([-4,-3,-2,-1,1,2,3,4])
sizes = np.arange(2,100)
for ii in sizes:
x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii))
x_f32_large = x_f32.copy()
x_f32_large[3:-1:4] = 120000.0
exp_true = np.exp(x_f32)
log_true = np.log(x_f32)
sin_true = np.sin(x_f32_large)
cos_true = np.cos(x_f32_large)
for jj in strides:
assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
def test_logaddexp_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
def test_reduce(self):
assert_equal(np.logaddexp.identity, -np.inf)
assert_equal(np.logaddexp.reduce([]), -np.inf)
class TestLog1p:
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
def test_special(self):
with np.errstate(invalid="ignore", divide="ignore"):
assert_equal(ncu.log1p(np.nan), np.nan)
assert_equal(ncu.log1p(np.inf), np.inf)
assert_equal(ncu.log1p(-1.), -np.inf)
assert_equal(ncu.log1p(-2.), np.nan)
assert_equal(ncu.log1p(-np.inf), np.nan)
class TestExpm1:
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
def test_special(self):
assert_equal(ncu.expm1(np.inf), np.inf)
assert_equal(ncu.expm1(0.), 0.)
assert_equal(ncu.expm1(-0.), -0.)
assert_equal(ncu.expm1(np.inf), np.inf)
assert_equal(ncu.expm1(-np.inf), -1.)
def test_complex(self):
x = np.asarray(1e-12)
assert_allclose(x, ncu.expm1(x))
x = x.astype(np.complex128)
assert_allclose(x, ncu.expm1(x))
class TestHypot:
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
def test_reduce(self):
assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0)
assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0)
assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0)
assert_equal(ncu.hypot.reduce([]), 0.0)
def assert_hypot_isnan(x, y):
with np.errstate(invalid='ignore'):
assert_(np.isnan(ncu.hypot(x, y)),
"hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
def assert_hypot_isinf(x, y):
with np.errstate(invalid='ignore'):
assert_(np.isinf(ncu.hypot(x, y)),
"hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
class TestHypotSpecialValues:
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
def test_nan_outputs2(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
assert_hypot_isinf(0, np.inf)
assert_hypot_isinf(np.inf, np.inf)
assert_hypot_isinf(np.inf, 23.0)
def test_no_fpe(self):
assert_no_warnings(ncu.hypot, np.inf, 0)
def assert_arctan2_isnan(x, y):
assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispinf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isninf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
class TestArctan2SpecialValues:
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
def test_zero_nzero(self):
# atan2(+-0, -0) returns +-pi.
assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
def test_zero_pzero(self):
# atan2(+-0, +0) returns +-0.
assert_arctan2_ispzero(np.PZERO, np.PZERO)
assert_arctan2_isnzero(np.NZERO, np.PZERO)
def test_zero_negative(self):
# atan2(+-0, x) returns +-pi for x < 0.
assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
def test_zero_positive(self):
# atan2(+-0, x) returns +-0 for x > 0.
assert_arctan2_ispzero(np.PZERO, 1)
assert_arctan2_isnzero(np.NZERO, 1)
def test_positive_zero(self):
# atan2(y, +-0) returns +pi/2 for y > 0.
assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
def test_negative_zero(self):
# atan2(y, +-0) returns -pi/2 for y < 0.
assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
class TestLdexp:
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
np.array(3, tp)), 16.)
def test_ldexp(self):
# The default Python int type should work
assert_almost_equal(ncu.ldexp(2., 3), 16.)
# The following int types should all be accepted
self._check_ldexp(np.int8)
self._check_ldexp(np.int16)
self._check_ldexp(np.int32)
self._check_ldexp('i')
self._check_ldexp('l')
def test_ldexp_overflow(self):
# silence warning emitted on overflow
with np.errstate(over="ignore"):
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
class TestMaximum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.maximum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1, 2j]), 1)
assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.maximum(arg1, arg2), out)
def test_object_nans(self):
# Multiple checks to give this a chance to
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
x = np.array(float('nan'), object)
y = 1.0
z = np.array(float('nan'), object)
assert_(np.maximum(x, y) == 1.0)
assert_(np.maximum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([nan, nan, nan], dtype=complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
def test_strided_array(self):
arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf])
arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0])
maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0])
out = np.ones(8)
out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0])
assert_equal(np.maximum(arr1,arr2), maxtrue)
assert_equal(np.maximum(arr1[::2],arr2[::2]), maxtrue[::2])
assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0]))
assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan]))
assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan]))
assert_equal(out, out_maxtrue)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, -np.inf, dtmin),
(dtmax, -np.inf, dtmax),
(d1, d1_next, d1_next),
(dtmax, np.nan, np.nan),
]
for v1, v2, expected in test_cases:
assert_equal(np.maximum([v1], [v2]), [expected])
assert_equal(np.maximum.reduce([v1, v2]), expected)
class TestMinimum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.minimum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.minimum.reduce([1, 2j]), 2j)
assert_equal(np.minimum.reduce([1+3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.minimum(arg1, arg2), out)
def test_object_nans(self):
# Multiple checks to give this a chance to
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
x = np.array(float('nan'), object)
y = 1.0
z = np.array(float('nan'), object)
assert_(np.minimum(x, y) == 1.0)
assert_(np.minimum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([nan, nan, nan], dtype=complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
def test_strided_array(self):
arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf])
arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0])
mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf])
out = np.ones(8)
out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0])
assert_equal(np.minimum(arr1,arr2), mintrue)
assert_equal(np.minimum(arr1[::2],arr2[::2]), mintrue[::2])
assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0]))
assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan]))
assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan]))
assert_equal(out, out_mintrue)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, np.inf, dtmin),
(dtmax, np.inf, dtmax),
(d1, d1_next, d1),
(dtmin, np.nan, np.nan),
]
for v1, v2, expected in test_cases:
assert_equal(np.minimum([v1], [v2]), [expected])
assert_equal(np.minimum.reduce([v1, v2]), expected)
class TestFmax(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1, 2j]), 1)
assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmax(arg1, arg2), out)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, -np.inf, dtmin),
(dtmax, -np.inf, dtmax),
(d1, d1_next, d1_next),
(dtmax, np.nan, dtmax),
]
for v1, v2, expected in test_cases:
assert_equal(np.fmax([v1], [v2]), [expected])
assert_equal(np.fmax.reduce([v1, v2]), expected)
class TestFmin(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1, 2j]), 2j)
assert_equal(np.fmin.reduce([1+3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmin(arg1, arg2), out)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, np.inf, dtmin),
(dtmax, np.inf, dtmax),
(d1, d1_next, d1),
(dtmin, np.nan, dtmin),
]
for v1, v2, expected in test_cases:
assert_equal(np.fmin([v1], [v2]), [expected])
assert_equal(np.fmin.reduce([v1, v2]), expected)
class TestBool:
def test_exceptions(self):
a = np.ones(1, dtype=np.bool_)
assert_raises(TypeError, np.negative, a)
assert_raises(TypeError, np.positive, a)
assert_raises(TypeError, np.subtract, a, a)
def test_truth_table_logical(self):
# 2, 3 and 4 serves as true values
input1 = [0, 0, 3, 2]
input2 = [0, 4, 0, 2]
typecodes = (np.typecodes['AllFloat']
+ np.typecodes['AllInteger']
+ '?') # boolean
for dtype in map(np.dtype, typecodes):
arg1 = np.asarray(input1, dtype=dtype)
arg2 = np.asarray(input2, dtype=dtype)
# OR
out = [False, True, True, True]
for func in (np.logical_or, np.maximum):
assert_equal(func(arg1, arg2).astype(bool), out)
# AND
out = [False, False, False, True]
for func in (np.logical_and, np.minimum):
assert_equal(func(arg1, arg2).astype(bool), out)
# XOR
out = [False, True, True, False]
for func in (np.logical_xor, np.not_equal):
assert_equal(func(arg1, arg2).astype(bool), out)
def test_truth_table_bitwise(self):
arg1 = [False, False, True, True]
arg2 = [False, True, False, True]
out = [False, True, True, True]
assert_equal(np.bitwise_or(arg1, arg2), out)
out = [False, False, False, True]
assert_equal(np.bitwise_and(arg1, arg2), out)
out = [False, True, True, False]
assert_equal(np.bitwise_xor(arg1, arg2), out)
def test_reduce(self):
none = np.array([0, 0, 0, 0], bool)
some = np.array([1, 0, 1, 1], bool)
every = np.array([1, 1, 1, 1], bool)
empty = np.array([], bool)
arrs = [none, some, every, empty]
for arr in arrs:
assert_equal(np.logical_and.reduce(arr), all(arr))
for arr in arrs:
assert_equal(np.logical_or.reduce(arr), any(arr))
for arr in arrs:
assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1)
class TestBitwiseUFuncs:
bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O']
def test_values(self):
for dt in self.bitwise_types:
zeros = np.array([0], dtype=dt)
ones = np.array([-1], dtype=dt)
msg = "dt = '%s'" % dt.char
assert_equal(np.bitwise_not(zeros), ones, err_msg=msg)
assert_equal(np.bitwise_not(ones), zeros, err_msg=msg)
assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg)
assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg)
assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg)
assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg)
assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg)
assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg)
assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg)
assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg)
assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg)
assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg)
assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg)
assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg)
def test_types(self):
for dt in self.bitwise_types:
zeros = np.array([0], dtype=dt)
ones = np.array([-1], dtype=dt)
msg = "dt = '%s'" % dt.char
assert_(np.bitwise_not(zeros).dtype == dt, msg)
assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg)
assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg)
assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg)
def test_identity(self):
assert_(np.bitwise_or.identity == 0, 'bitwise_or')
assert_(np.bitwise_xor.identity == 0, 'bitwise_xor')
assert_(np.bitwise_and.identity == -1, 'bitwise_and')
def test_reduction(self):
binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and)
for dt in self.bitwise_types:
zeros = np.array([0], dtype=dt)
ones = np.array([-1], dtype=dt)
for f in binary_funcs:
msg = "dt: '%s', f: '%s'" % (dt, f)
assert_equal(f.reduce(zeros), zeros, err_msg=msg)
assert_equal(f.reduce(ones), ones, err_msg=msg)
# Test empty reduction, no object dtype
for dt in self.bitwise_types[:-1]:
# No object array types
empty = np.array([], dtype=dt)
for f in binary_funcs:
msg = "dt: '%s', f: '%s'" % (dt, f)
tgt = np.array(f.identity, dtype=dt)
res = f.reduce(empty)
assert_equal(res, tgt, err_msg=msg)
assert_(res.dtype == tgt.dtype, msg)
# Empty object arrays use the identity. Note that the types may
# differ, the actual type used is determined by the assign_identity
# function and is not the same as the type returned by the identity
# method.
for f in binary_funcs:
msg = "dt: '%s'" % (f,)
empty = np.array([], dtype=object)
tgt = f.identity
res = f.reduce(empty)
assert_equal(res, tgt, err_msg=msg)
# Non-empty object arrays do not use the identity
for f in binary_funcs:
msg = "dt: '%s'" % (f,)
btype = np.array([True], dtype=object)
assert_(type(f.reduce(btype)) is bool, msg)
class TestInt:
def test_logical_not(self):
x = np.ones(10, dtype=np.int16)
o = np.ones(10 * 2, dtype=bool)
tgt = o.copy()
tgt[::2] = False
os = o[::2]
assert_array_equal(np.logical_not(x, out=os), False)
assert_array_equal(o, tgt)
class TestFloatingPoint:
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
class TestDegrees:
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
class TestRadians:
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
class TestHeavside:
def test_heaviside(self):
x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]])
expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]])
expected1 = expectedhalf.copy()
expected1[0, 2] = 1
h = ncu.heaviside(x, 0.5)
assert_equal(h, expectedhalf)
h = ncu.heaviside(x, 1.0)
assert_equal(h, expected1)
x = x.astype(np.float32)
h = ncu.heaviside(x, np.float32(0.5))
assert_equal(h, expectedhalf.astype(np.float32))
h = ncu.heaviside(x, np.float32(1.0))
assert_equal(h, expected1.astype(np.float32))
class TestSign:
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
with np.errstate(invalid='ignore'):
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
def test_sign_dtype_object(self):
# In reference to github issue #6229
foo = np.array([-.1, 0, .1])
a = np.sign(foo.astype(object))
b = np.sign(foo)
assert_array_equal(a, b)
def test_sign_dtype_nan_object(self):
# In reference to github issue #6229
def test_nan():
foo = np.array([np.nan])
# FIXME: a not used
a = np.sign(foo.astype(object))
assert_raises(TypeError, test_nan)
class TestMinMax:
def test_minmax_blocked(self):
# simd tests on max/min, test all alignments, slow but important
# for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
for dt, sz in [(np.float32, 15), (np.float64, 7)]:
for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
max_size=sz):
for i in range(inp.size):
inp[:] = np.arange(inp.size, dtype=dt)
inp[i] = np.nan
emsg = lambda: '%r\n%s' % (inp, msg)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
assert_(np.isnan(inp.max()), msg=emsg)
assert_(np.isnan(inp.min()), msg=emsg)
inp[i] = 1e10
assert_equal(inp.max(), 1e10, err_msg=msg)
inp[i] = -1e10
assert_equal(inp.min(), -1e10, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
def test_reduce_reorder(self):
# gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
# and put it before the call to an intrisic function that causes
# invalid status to be set. Also make sure warnings are not emitted
for n in (2, 4, 8, 16, 32):
for dt in (np.float32, np.float16, np.complex64):
for r in np.diagflat(np.array([np.nan] * n, dtype=dt)):
assert_equal(np.min(r), np.nan)
def test_minimize_no_warns(self):
a = np.minimum(np.nan, 1)
assert_equal(a, np.nan)
class TestAbsoluteNegative:
def test_abs_neg_blocked(self):
# simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 5)]:
for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
max_size=sz):
tgt = [ncu.absolute(i) for i in inp]
np.absolute(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
assert_((out >= 0).all())
tgt = [-1*(i) for i in inp]
np.negative(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
for v in [np.nan, -np.inf, np.inf]:
for i in range(inp.size):
d = np.arange(inp.size, dtype=dt)
inp[:] = -d
inp[i] = v
d[i] = -v if v == -np.inf else v
assert_array_equal(np.abs(inp), d, err_msg=msg)
np.abs(inp, out=out)
assert_array_equal(out, d, err_msg=msg)
assert_array_equal(-inp, -1*inp, err_msg=msg)
d = -1 * inp
np.negative(inp, out=out)
assert_array_equal(out, d, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_equal(np.abs(d), d)
assert_equal(np.negative(d), -d)
np.negative(d, out=d)
np.negative(np.ones_like(d), out=d)
np.abs(d, out=d)
np.abs(np.ones_like(d), out=d)
class TestPositive:
def test_valid(self):
valid_dtypes = [int, float, complex, object]
for dtype in valid_dtypes:
x = np.arange(5, dtype=dtype)
result = np.positive(x)
assert_equal(x, result, err_msg=str(dtype))
def test_invalid(self):
with assert_raises(TypeError):
np.positive(True)
with assert_raises(TypeError):
np.positive(np.datetime64('2000-01-01'))
with assert_raises(TypeError):
np.positive(np.array(['foo'], dtype=str))
with assert_raises(TypeError):
np.positive(np.array(['bar'], dtype=object))
class TestSpecialMethods:
def test_wrap(self):
class with_wrap:
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
assert_(func is ncu.minimum)
assert_equal(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
assert_equal(i, 0)
def test_wrap_and_prepare_out(self):
# Calling convention for out should not affect how special methods are
# called
class StoreArrayPrepareWrap(np.ndarray):
_wrap_args = None
_prepare_args = None
def __new__(cls):
return np.zeros(()).view(cls)
def __array_wrap__(self, obj, context):
self._wrap_args = context[1]
return obj
def __array_prepare__(self, obj, context):
self._prepare_args = context[1]
return obj
@property
def args(self):
# We need to ensure these are fetched at the same time, before
# any other ufuncs are called by the assertions
return (self._prepare_args, self._wrap_args)
def __repr__(self):
return "a" # for short test output
def do_test(f_call, f_expected):
a = StoreArrayPrepareWrap()
f_call(a)
p, w = a.args
expected = f_expected(a)
try:
assert_equal(p, expected)
assert_equal(w, expected)
except AssertionError as e:
# assert_equal produces truly useless error messages
raise AssertionError("\n".join([
"Bad arguments passed in ufunc call",
" expected: {}".format(expected),
" __array_prepare__ got: {}".format(p),
" __array_wrap__ got: {}".format(w)
]))
# method not on the out argument
do_test(lambda a: np.add(a, 0), lambda a: (a, 0))
do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0))
do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0))
do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0))
# method on the out argument
do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a))
do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a))
do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a))
# Also check the where mask handling:
do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0))
do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a))
def test_wrap_with_iterable(self):
# test fix for bug #1026:
class with_wrap(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
assert_(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
# test fix for bug #826:
class A(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1.0, 'float64').view(cls).copy()
a = A()
x = np.float64(1)*a
assert_(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
class with_wrap:
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
def test_priority(self):
class A:
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = type(self)()
r.arr = arr
r.context = context
return r
class B(A):
__array_priority__ = 20.
class C(A):
__array_priority__ = 40.
x = np.zeros(1)
a = A()
b = B()
c = C()
f = ncu.minimum
assert_(type(f(x, x)) is np.ndarray)
assert_(type(f(x, a)) is A)
assert_(type(f(x, b)) is B)
assert_(type(f(x, c)) is C)
assert_(type(f(a, x)) is A)
assert_(type(f(b, x)) is B)
assert_(type(f(c, x)) is C)
assert_(type(f(a, a)) is A)
assert_(type(f(a, b)) is B)
assert_(type(f(b, a)) is B)
assert_(type(f(b, b)) is B)
assert_(type(f(b, c)) is C)
assert_(type(f(c, b)) is C)
assert_(type(f(c, c)) is C)
assert_(type(ncu.exp(a) is A))
assert_(type(ncu.exp(b) is B))
assert_(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A:
def __array__(self):
return np.zeros(2)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
assert_raises(RuntimeError, ncu.maximum, a, a)
assert_raises(RuntimeError, ncu.maximum.reduce, a)
def test_failing_out_wrap(self):
singleton = np.array([1.0])
class Ok(np.ndarray):
def __array_wrap__(self, obj):
return singleton
class Bad(np.ndarray):
def __array_wrap__(self, obj):
raise RuntimeError
ok = np.empty(1).view(Ok)
bad = np.empty(1).view(Bad)
# double-free (segfault) of "ok" if "bad" raises an exception
for i in range(10):
assert_raises(RuntimeError, ncu.frexp, 1, ok, bad)
def test_none_wrap(self):
# Tests that issue #8507 is resolved. Previously, this would segfault
class A:
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context=None):
return None
a = A()
assert_equal(ncu.maximum(a, a), None)
def test_default_prepare(self):
class with_wrap:
__array_priority__ = 10
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
@pytest.mark.parametrize("use_where", [True, False])
def test_prepare(self, use_where):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
# make sure we can return a new
return np.array(arr).view(type=with_prepare)
a = np.array(1).view(type=with_prepare)
if use_where:
x = np.add(a, a, where=np.array(True))
else:
x = np.add(a, a)
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
@pytest.mark.parametrize("use_where", [True, False])
def test_prepare_out(self, use_where):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
return np.array(arr).view(type=with_prepare)
a = np.array([1]).view(type=with_prepare)
if use_where:
x = np.add(a, a, a, where=[True])
else:
x = np.add(a, a, a)
# Returned array is new, because of the strange
# __array_prepare__ above
assert_(not np.shares_memory(x, a))
assert_equal(x, np.array([2]))
assert_equal(type(x), with_prepare)
def test_failing_prepare(self):
class A:
def __array__(self):
return np.zeros(1)
def __array_prepare__(self, arr, context=None):
raise RuntimeError
a = A()
assert_raises(RuntimeError, ncu.maximum, a, a)
assert_raises(RuntimeError, ncu.maximum, a, a, where=False)
def test_array_too_many_args(self):
class A:
def __array__(self, dtype, context):
return np.zeros(1)
a = A()
assert_raises_regex(TypeError, '2 required positional', np.sum, a)
def test_ufunc_override(self):
# check override works even with instance with high priority.
class A:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return self, func, method, inputs, kwargs
class MyNDArray(np.ndarray):
__array_priority__ = 100
a = A()
b = np.array([1]).view(MyNDArray)
res0 = np.multiply(a, b)
res1 = np.multiply(b, b, out=a)
# self
assert_equal(res0[0], a)
assert_equal(res1[0], a)
assert_equal(res0[1], np.multiply)
assert_equal(res1[1], np.multiply)
assert_equal(res0[2], '__call__')
assert_equal(res1[2], '__call__')
assert_equal(res0[3], (a, b))
assert_equal(res1[3], (b, b))
assert_equal(res0[4], {})
assert_equal(res1[4], {'out': (a,)})
def test_ufunc_override_mro(self):
# Some multi arg functions for testing.
def tres_mul(a, b, c):
return a * b * c
def quatro_mul(a, b, c, d):
return a * b * c * d
# Make these into ufuncs.
three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1)
four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1)
class A:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "A"
class ASub(A):
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "ASub"
class B:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "B"
class C:
def __init__(self):
self.count = 0
def __array_ufunc__(self, func, method, *inputs, **kwargs):
self.count += 1
return NotImplemented
class CSub(C):
def __array_ufunc__(self, func, method, *inputs, **kwargs):
self.count += 1
return NotImplemented
a = A()
a_sub = ASub()
b = B()
c = C()
# Standard
res = np.multiply(a, a_sub)
assert_equal(res, "ASub")
res = np.multiply(a_sub, b)
assert_equal(res, "ASub")
# With 1 NotImplemented
res = np.multiply(c, a)
assert_equal(res, "A")
assert_equal(c.count, 1)
# Check our counter works, so we can trust tests below.
res = np.multiply(c, a)
assert_equal(c.count, 2)
# Both NotImplemented.
c = C()
c_sub = CSub()
assert_raises(TypeError, np.multiply, c, c_sub)
assert_equal(c.count, 1)
assert_equal(c_sub.count, 1)
c.count = c_sub.count = 0
assert_raises(TypeError, np.multiply, c_sub, c)
assert_equal(c.count, 1)
assert_equal(c_sub.count, 1)
c.count = 0
assert_raises(TypeError, np.multiply, c, c)
assert_equal(c.count, 1)
c.count = 0
assert_raises(TypeError, np.multiply, 2, c)
assert_equal(c.count, 1)
# Ternary testing.
assert_equal(three_mul_ufunc(a, 1, 2), "A")
assert_equal(three_mul_ufunc(1, a, 2), "A")
assert_equal(three_mul_ufunc(1, 2, a), "A")
assert_equal(three_mul_ufunc(a, a, 6), "A")
assert_equal(three_mul_ufunc(a, 2, a), "A")
assert_equal(three_mul_ufunc(a, 2, b), "A")
assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub")
assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub")
c.count = 0
assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub")
assert_equal(c.count, 1)
c.count = 0
assert_equal(three_mul_ufunc(1, a_sub, c), "ASub")
assert_equal(c.count, 0)
c.count = 0
assert_equal(three_mul_ufunc(a, b, c), "A")
assert_equal(c.count, 0)
c_sub.count = 0
assert_equal(three_mul_ufunc(a, b, c_sub), "A")
assert_equal(c_sub.count, 0)
assert_equal(three_mul_ufunc(1, 2, b), "B")
assert_raises(TypeError, three_mul_ufunc, 1, 2, c)
assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c)
assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3)
# Quaternary testing.
assert_equal(four_mul_ufunc(a, 1, 2, 3), "A")
assert_equal(four_mul_ufunc(1, a, 2, 3), "A")
assert_equal(four_mul_ufunc(1, 1, a, 3), "A")
assert_equal(four_mul_ufunc(1, 1, 2, a), "A")
assert_equal(four_mul_ufunc(a, b, 2, 3), "A")
assert_equal(four_mul_ufunc(1, a, 2, b), "A")
assert_equal(four_mul_ufunc(b, 1, a, 3), "B")
assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub")
assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub")
c = C()
c_sub = CSub()
assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c)
assert_equal(c.count, 1)
c.count = 0
assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c)
assert_equal(c_sub.count, 1)
assert_equal(c.count, 1)
c2 = C()
c.count = c_sub.count = 0
assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2)
assert_equal(c_sub.count, 1)
assert_equal(c.count, 1)
assert_equal(c2.count, 0)
c.count = c2.count = c_sub.count = 0
assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c)
assert_equal(c_sub.count, 1)
assert_equal(c.count, 0)
assert_equal(c2.count, 1)
def test_ufunc_override_methods(self):
class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return self, ufunc, method, inputs, kwargs
# __call__
a = A()
with assert_raises(TypeError):
np.multiply.__call__(1, a, foo='bar', answer=42)
res = np.multiply.__call__(1, a, subok='bar', where=42)
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], '__call__')
assert_equal(res[3], (1, a))
assert_equal(res[4], {'subok': 'bar', 'where': 42})
# __call__, wrong args
assert_raises(TypeError, np.multiply, a)
assert_raises(TypeError, np.multiply, a, a, a, a)
assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a')
assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0])
# reduce, positional args
res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
assert_equal(res[3], (a,))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'keepdims': 'keep0',
'axis': 'axis0'})
# reduce, kwargs
res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
keepdims='keep0', initial='init0',
where='where0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
assert_equal(res[3], (a,))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'keepdims': 'keep0',
'axis': 'axis0',
'initial': 'init0',
'where': 'where0'})
# reduce, output equal to None removed, but not other explicit ones,
# even if they are at their default value.
res = np.multiply.reduce(a, 0, None, None, False)
assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
res = np.multiply.reduce(a, out=None, axis=0, keepdims=True)
assert_equal(res[4], {'axis': 0, 'keepdims': True})
res = np.multiply.reduce(a, None, out=(None,), dtype=None)
assert_equal(res[4], {'axis': None, 'dtype': None})
res = np.multiply.reduce(a, 0, None, None, False, 2, True)
assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
'initial': 2, 'where': True})
# np._NoValue ignored for initial
res = np.multiply.reduce(a, 0, None, None, False,
np._NoValue, True)
assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
'where': True})
# None kept for initial, True for where.
res = np.multiply.reduce(a, 0, None, None, False, None, True)
assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
'initial': None, 'where': True})
# reduce, wrong args
assert_raises(ValueError, np.multiply.reduce, a, out=())
assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0')
# accumulate, pos args
res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'accumulate')
assert_equal(res[3], (a,))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'axis': 'axis0'})
# accumulate, kwargs
res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0',
out='out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'accumulate')
assert_equal(res[3], (a,))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'axis': 'axis0'})
# accumulate, output equal to None removed.
res = np.multiply.accumulate(a, 0, None, None)
assert_equal(res[4], {'axis': 0, 'dtype': None})
res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1')
assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'})
res = np.multiply.accumulate(a, None, out=(None,), dtype=None)
assert_equal(res[4], {'axis': None, 'dtype': None})
# accumulate, wrong args
assert_raises(ValueError, np.multiply.accumulate, a, out=())
assert_raises(ValueError, np.multiply.accumulate, a,
out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.accumulate, a,
'axis0', axis='axis0')
# reduceat, pos args
res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduceat')
assert_equal(res[3], (a, [4, 2]))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'axis': 'axis0'})
# reduceat, kwargs
res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0',
out='out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduceat')
assert_equal(res[3], (a, [4, 2]))
assert_equal(res[4], {'dtype':'dtype0',
'out': ('out0',),
'axis': 'axis0'})
# reduceat, output equal to None removed.
res = np.multiply.reduceat(a, [4, 2], 0, None, None)
assert_equal(res[4], {'axis': 0, 'dtype': None})
res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt')
assert_equal(res[4], {'axis': None, 'dtype': 'dt'})
res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,))
assert_equal(res[4], {'axis': None, 'dtype': None})
# reduceat, wrong args
assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=())
assert_raises(ValueError, np.multiply.reduce, a, [4, 2],
out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.reduce, a, [4, 2],
'axis0', axis='axis0')
# outer
res = np.multiply.outer(a, 42)
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'outer')
assert_equal(res[3], (a, 42))
assert_equal(res[4], {})
# outer, wrong args
assert_raises(TypeError, np.multiply.outer, a)
assert_raises(TypeError, np.multiply.outer, a, a, a, a)
assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a')
# at
res = np.multiply.at(a, [4, 2], 'b0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'at')
assert_equal(res[3], (a, [4, 2], 'b0'))
# at, wrong args
assert_raises(TypeError, np.multiply.at, a)
assert_raises(TypeError, np.multiply.at, a, a, a, a)
def test_ufunc_override_out(self):
class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return kwargs
class B:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return kwargs
a = A()
b = B()
res0 = np.multiply(a, b, 'out_arg')
res1 = np.multiply(a, b, out='out_arg')
res2 = np.multiply(2, b, 'out_arg')
res3 = np.multiply(3, b, out='out_arg')
res4 = np.multiply(a, 4, 'out_arg')
res5 = np.multiply(a, 5, out='out_arg')
assert_equal(res0['out'][0], 'out_arg')
assert_equal(res1['out'][0], 'out_arg')
assert_equal(res2['out'][0], 'out_arg')
assert_equal(res3['out'][0], 'out_arg')
assert_equal(res4['out'][0], 'out_arg')
assert_equal(res5['out'][0], 'out_arg')
# ufuncs with multiple output modf and frexp.
res6 = np.modf(a, 'out0', 'out1')
res7 = np.frexp(a, 'out0', 'out1')
assert_equal(res6['out'][0], 'out0')
assert_equal(res6['out'][1], 'out1')
assert_equal(res7['out'][0], 'out0')
assert_equal(res7['out'][1], 'out1')
# While we're at it, check that default output is never passed on.
assert_(np.sin(a, None) == {})
assert_(np.sin(a, out=None) == {})
assert_(np.sin(a, out=(None,)) == {})
assert_(np.modf(a, None) == {})
assert_(np.modf(a, None, None) == {})
assert_(np.modf(a, out=(None, None)) == {})
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs.
np.modf(a, out=None)
# don't give positional and output argument, or too many arguments.
# wrong number of arguments in the tuple is an error too.
assert_raises(TypeError, np.multiply, a, b, 'one', out='two')
assert_raises(TypeError, np.multiply, a, b, 'one', 'two')
assert_raises(ValueError, np.multiply, a, b, out=('one', 'two'))
assert_raises(TypeError, np.multiply, a, out=())
assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three'))
assert_raises(TypeError, np.modf, a, 'one', 'two', 'three')
assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three'))
assert_raises(ValueError, np.modf, a, out=('one',))
def test_ufunc_override_exception(self):
class A:
def __array_ufunc__(self, *a, **kwargs):
raise ValueError("oops")
a = A()
assert_raises(ValueError, np.negative, 1, out=a)
assert_raises(ValueError, np.negative, a)
assert_raises(ValueError, np.divide, 1., a)
def test_ufunc_override_not_implemented(self):
class A:
def __array_ufunc__(self, *args, **kwargs):
return NotImplemented
msg = ("operand type(s) all returned NotImplemented from "
"__array_ufunc__(<ufunc 'negative'>, '__call__', <*>): 'A'")
with assert_raises_regex(TypeError, fnmatch.translate(msg)):
np.negative(A())
msg = ("operand type(s) all returned NotImplemented from "
"__array_ufunc__(<ufunc 'add'>, '__call__', <*>, <object *>, "
"out=(1,)): 'A', 'object', 'int'")
with assert_raises_regex(TypeError, fnmatch.translate(msg)):
np.add(A(), object(), out=1)
def test_ufunc_override_disabled(self):
class OptOut:
__array_ufunc__ = None
opt_out = OptOut()
# ufuncs always raise
msg = "operand 'OptOut' does not support ufuncs"
with assert_raises_regex(TypeError, msg):
np.add(opt_out, 1)
with assert_raises_regex(TypeError, msg):
np.add(1, opt_out)
with assert_raises_regex(TypeError, msg):
np.negative(opt_out)
# opt-outs still hold even when other arguments have pathological
# __array_ufunc__ implementations
class GreedyArray:
def __array_ufunc__(self, *args, **kwargs):
return self
greedy = GreedyArray()
assert_(np.negative(greedy) is greedy)
with assert_raises_regex(TypeError, msg):
np.add(greedy, opt_out)
with assert_raises_regex(TypeError, msg):
np.add(greedy, 1, out=opt_out)
def test_gufunc_override(self):
# gufunc are just ufunc instances, but follow a different path,
# so check __array_ufunc__ overrides them properly.
class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return self, ufunc, method, inputs, kwargs
inner1d = ncu_tests.inner1d
a = A()
res = inner1d(a, a)
assert_equal(res[0], a)
assert_equal(res[1], inner1d)
assert_equal(res[2], '__call__')
assert_equal(res[3], (a, a))
assert_equal(res[4], {})
res = inner1d(1, 1, out=a)
assert_equal(res[0], a)
assert_equal(res[1], inner1d)
assert_equal(res[2], '__call__')
assert_equal(res[3], (1, 1))
assert_equal(res[4], {'out': (a,)})
# wrong number of arguments in the tuple is an error too.
assert_raises(TypeError, inner1d, a, out='two')
assert_raises(TypeError, inner1d, a, a, 'one', out='two')
assert_raises(TypeError, inner1d, a, a, 'one', 'two')
assert_raises(ValueError, inner1d, a, a, out=('one', 'two'))
assert_raises(ValueError, inner1d, a, a, out=())
def test_ufunc_override_with_super(self):
# NOTE: this class is used in doc/source/user/basics.subclassing.rst
# if you make any changes here, do update it there too.
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
args = []
in_no = []
for i, input_ in enumerate(inputs):
if isinstance(input_, A):
in_no.append(i)
args.append(input_.view(np.ndarray))
else:
args.append(input_)
outputs = out
out_no = []
if outputs:
out_args = []
for j, output in enumerate(outputs):
if isinstance(output, A):
out_no.append(j)
out_args.append(output.view(np.ndarray))
else:
out_args.append(output)
kwargs['out'] = tuple(out_args)
else:
outputs = (None,) * ufunc.nout
info = {}
if in_no:
info['inputs'] = in_no
if out_no:
info['outputs'] = out_no
results = super().__array_ufunc__(ufunc, method,
*args, **kwargs)
if results is NotImplemented:
return NotImplemented
if method == 'at':
if isinstance(inputs[0], A):
inputs[0].info = info
return
if ufunc.nout == 1:
results = (results,)
results = tuple((np.asarray(result).view(A)
if output is None else output)
for result, output in zip(results, outputs))
if results and isinstance(results[0], A):
results[0].info = info
return results[0] if len(results) == 1 else results
class B:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if any(isinstance(input_, A) for input_ in inputs):
return "A!"
else:
return NotImplemented
d = np.arange(5.)
# 1 input, 1 output
a = np.arange(5.).view(A)
b = np.sin(a)
check = np.sin(d)
assert_(np.all(check == b))
assert_equal(b.info, {'inputs': [0]})
b = np.sin(d, out=(a,))
assert_(np.all(check == b))
assert_equal(b.info, {'outputs': [0]})
assert_(b is a)
a = np.arange(5.).view(A)
b = np.sin(a, out=a)
assert_(np.all(check == b))
assert_equal(b.info, {'inputs': [0], 'outputs': [0]})
# 1 input, 2 outputs
a = np.arange(5.).view(A)
b1, b2 = np.modf(a)
assert_equal(b1.info, {'inputs': [0]})
b1, b2 = np.modf(d, out=(None, a))
assert_(b2 is a)
assert_equal(b1.info, {'outputs': [1]})
a = np.arange(5.).view(A)
b = np.arange(5.).view(A)
c1, c2 = np.modf(a, out=(a, b))
assert_(c1 is a)
assert_(c2 is b)
assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]})
# 2 input, 1 output
a = np.arange(5.).view(A)
b = np.arange(5.).view(A)
c = np.add(a, b, out=a)
assert_(c is a)
assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]})
# some tests with a non-ndarray subclass
a = np.arange(5.)
b = B()
assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
assert_raises(TypeError, np.add, a, b)
a = a.view(A)
assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!")
assert_(np.add(a, b) == "A!")
# regression check for gh-9102 -- tests ufunc.reduce implicitly.
d = np.array([[1, 2, 3], [1, 2, 3]])
a = d.view(A)
c = a.any()
check = d.any()
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
c = a.max()
check = d.max()
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
b = np.array(0).view(A)
c = a.max(out=b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
check = a.max(axis=0)
b = np.zeros_like(check).view(A)
c = a.max(axis=0, out=b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
# simple explicit tests of reduce, accumulate, reduceat
check = np.add.reduce(d, axis=1)
c = np.add.reduce(a, axis=1)
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
b = np.zeros_like(c)
c = np.add.reduce(a, 1, None, b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
check = np.add.accumulate(d, axis=0)
c = np.add.accumulate(a, axis=0)
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
b = np.zeros_like(c)
c = np.add.accumulate(a, 0, None, b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
indices = [0, 2, 1]
check = np.add.reduceat(d, indices, axis=1)
c = np.add.reduceat(a, indices, axis=1)
assert_equal(c, check)
assert_(c.info, {'inputs': [0]})
b = np.zeros_like(c)
c = np.add.reduceat(a, indices, 1, None, b)
assert_equal(c, check)
assert_(c is b)
assert_(c.info, {'inputs': [0], 'outputs': [0]})
# and a few tests for at
d = np.array([[1, 2, 3], [1, 2, 3]])
check = d.copy()
a = d.copy().view(A)
np.add.at(check, ([0, 1], [0, 2]), 1.)
np.add.at(a, ([0, 1], [0, 2]), 1.)
assert_equal(a, check)
assert_(a.info, {'inputs': [0]})
b = np.array(1.).view(A)
a = d.copy().view(A)
np.add.at(a, ([0, 1], [0, 2]), b)
assert_equal(a, check)
assert_(a.info, {'inputs': [0, 2]})
class TestChoose:
def test_mixed(self):
c = np.array([True, True])
a = np.array([True, True])
assert_equal(np.choose(c, (a, 1)), np.array([1, 1]))
class TestRationalFunctions:
def test_lcm(self):
self._test_lcm_inner(np.int16)
self._test_lcm_inner(np.uint16)
def test_lcm_object(self):
self._test_lcm_inner(np.object_)
def test_gcd(self):
self._test_gcd_inner(np.int16)
self._test_lcm_inner(np.uint16)
def test_gcd_object(self):
self._test_gcd_inner(np.object_)
def _test_lcm_inner(self, dtype):
# basic use
a = np.array([12, 120], dtype=dtype)
b = np.array([20, 200], dtype=dtype)
assert_equal(np.lcm(a, b), [60, 600])
if not issubclass(dtype, np.unsignedinteger):
# negatives are ignored
a = np.array([12, -12, 12, -12], dtype=dtype)
b = np.array([20, 20, -20, -20], dtype=dtype)
assert_equal(np.lcm(a, b), [60]*4)
# reduce
a = np.array([3, 12, 20], dtype=dtype)
assert_equal(np.lcm.reduce([3, 12, 20]), 60)
# broadcasting, and a test including 0
a = np.arange(6).astype(dtype)
b = 20
assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20])
def _test_gcd_inner(self, dtype):
# basic use
a = np.array([12, 120], dtype=dtype)
b = np.array([20, 200], dtype=dtype)
assert_equal(np.gcd(a, b), [4, 40])
if not issubclass(dtype, np.unsignedinteger):
# negatives are ignored
a = np.array([12, -12, 12, -12], dtype=dtype)
b = np.array([20, 20, -20, -20], dtype=dtype)
assert_equal(np.gcd(a, b), [4]*4)
# reduce
a = np.array([15, 25, 35], dtype=dtype)
assert_equal(np.gcd.reduce(a), 5)
# broadcasting, and a test including 0
a = np.arange(6).astype(dtype)
b = 20
assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5])
def test_lcm_overflow(self):
# verify that we don't overflow when a*b does overflow
big = np.int32(np.iinfo(np.int32).max // 11)
a = 2*big
b = 5*big
assert_equal(np.lcm(a, b), 10*big)
def test_gcd_overflow(self):
for dtype in (np.int32, np.int64):
# verify that we don't overflow when taking abs(x)
# not relevant for lcm, where the result is unrepresentable anyway
a = dtype(np.iinfo(dtype).min) # negative power of two
q = -(a // 4)
assert_equal(np.gcd(a, q*3), q)
assert_equal(np.gcd(a, -q*3), q)
def test_decimal(self):
from decimal import Decimal
a = np.array([1, 1, -1, -1]) * Decimal('0.20')
b = np.array([1, -1, 1, -1]) * Decimal('0.12')
assert_equal(np.gcd(a, b), 4*[Decimal('0.04')])
assert_equal(np.lcm(a, b), 4*[Decimal('0.60')])
def test_float(self):
# not well-defined on float due to rounding errors
assert_raises(TypeError, np.gcd, 0.3, 0.4)
assert_raises(TypeError, np.lcm, 0.3, 0.4)
def test_builtin_long(self):
# sanity check that array coercion is alright for builtin longs
assert_equal(np.array(2**200).item(), 2**200)
# expressed as prime factors
a = np.array(2**100 * 3**5)
b = np.array([2**100 * 5**7, 2**50 * 3**10])
assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5])
assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10])
assert_equal(np.gcd(2**100, 3**100), 1)
class TestRoundingFunctions:
def test_object_direct(self):
""" test direct implementation of these magic methods """
class C:
def __floor__(self):
return 1
def __ceil__(self):
return 2
def __trunc__(self):
return 3
arr = np.array([C(), C()])
assert_equal(np.floor(arr), [1, 1])
assert_equal(np.ceil(arr), [2, 2])
assert_equal(np.trunc(arr), [3, 3])
def test_object_indirect(self):
""" test implementations via __float__ """
class C:
def __float__(self):
return -2.5
arr = np.array([C(), C()])
assert_equal(np.floor(arr), [-3, -3])
assert_equal(np.ceil(arr), [-2, -2])
with pytest.raises(TypeError):
np.trunc(arr) # consistent with math.trunc
def test_fraction(self):
f = Fraction(-4, 3)
assert_equal(np.floor(f), -2)
assert_equal(np.ceil(f), -1)
assert_equal(np.trunc(f), -1)
class TestComplexFunctions:
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
np.log1p]
def test_it(self):
for f in self.funcs:
if f is np.arccosh:
x = 1.5
else:
x = .5
fr = f(x)
fz = f(complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s' % f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f)
def test_precisions_consistent(self):
z = 1 + 1j
for f in self.funcs:
fcf = f(np.csingle(z))
fcd = f(np.cdouble(z))
fcl = f(np.clongdouble(z))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f)
assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f)
def test_branch_cuts(self):
# check branch cuts and continuity on them
_check_branch_cut(np.log, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.log2, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.log10, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True)
_check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True)
_check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True)
_check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True)
_check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True)
_check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True)
_check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True)
_check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True)
# check against bogus branch cuts: assert continuity between quadrants
_check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1)
_check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1)
_check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1)
_check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1)
_check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1)
_check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1)
def test_branch_cuts_complex64(self):
# check branch cuts and continuity on them
_check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64)
_check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
_check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
_check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
_check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
_check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64)
_check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
# check against bogus branch cuts: assert continuity between quadrants
_check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
_check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
_check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64)
_check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64)
_check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64)
_check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64)
def test_against_cmath(self):
import cmath
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
@pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex])
def test_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
info = np.finfo(dtype)
real_dtype = dtype(0.).real.dtype
eps = info.eps
def check(x, rtol):
x = x.astype(real_dtype)
z = x.astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsinh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsin'))
z = x.astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctanh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctan'))
# The switchover was chosen as 1e-3; hence there can be up to
# ~eps/1e-3 of relative cancellation error before it
x_series = np.logspace(-20, -3.001, 200)
x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
if dtype is np.longcomplex:
if bad_arcsinh():
pytest.skip("Trig functions of np.longcomplex values known "
"to be inaccurate on aarch64 and PPC for some "
"compilation configurations.")
# It's not guaranteed that the system-provided arc functions
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
check(x_series, 50.0*eps)
else:
check(x_series, 2.1*eps)
check(x_basic, 2.0*eps/1e-3)
# Check a few points
z = np.array([1e-5*(1+1j)], dtype=dtype)
p = 9.999999999333333333e-6 + 1.000000000066666666e-5j
d = np.absolute(1-np.arctanh(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j
d = np.absolute(1-np.arcsinh(z)/p)
assert_(np.all(d < 1e-15))
p = 9.999999999333333333e-6j + 1.000000000066666666e-5
d = np.absolute(1-np.arctan(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5j + 9.999999999666666667e-6
d = np.absolute(1-np.arcsin(z)/p)
assert_(np.all(d < 1e-15))
# Check continuity across switchover points
def check(func, z0, d=1):
z0 = np.asarray(z0, dtype=dtype)
zp = z0 + abs(z0) * d * eps * 2
zm = z0 - abs(z0) * d * eps * 2
assert_(np.all(zp != zm), (zp, zm))
# NB: the cancellation error at the switchover is at least eps
good = (abs(func(zp) - func(zm)) < 2*eps)
assert_(np.all(good), (func, z0[~good]))
for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan):
pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3)
if rp != 0 or ip != 0]
check(func, pts, 1)
check(func, pts, 1j)
check(func, pts, 1+1j)
class TestAttributes:
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
assert_(add.ntypes >= 18) # don't fail if types added
assert_('ii->i' in add.types)
assert_equal(add.nin, 2)
assert_equal(add.nout, 1)
assert_equal(add.identity, 0)
def test_doc(self):
# don't bother checking the long list of kwargs, which are likely to
# change
assert_(ncu.add.__doc__.startswith(
"add(x1, x2, /, out=None, *, where=True"))
assert_(ncu.frexp.__doc__.startswith(
"frexp(x[, out1, out2], / [, out=(None, None)], *, where=True"))
class TestSubclass:
def test_subclass_op(self):
class simple(np.ndarray):
def __new__(subtype, shape):
self = np.ndarray.__new__(subtype, shape, dtype=object)
self.fill(0)
return self
a = simple((3, 4))
assert_equal(a+a, a)
class TestFrompyfunc:
def test_identity(self):
def mul(a, b):
return a * b
# with identity=value
mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1)
assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)
assert_equal(mul_ufunc.reduce([]), 1)
# with identity=None (reorderable)
mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None)
assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)
assert_raises(ValueError, lambda: mul_ufunc.reduce([]))
# with no identity (not reorderable)
mul_ufunc = np.frompyfunc(mul, nin=2, nout=1)
assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)))
assert_raises(ValueError, lambda: mul_ufunc.reduce([]))
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
dtype=complex):
"""
Check for a branch cut in a function.
Assert that `x0` lies on a branch cut of function `f` and `f` is
continuous from the direction `dx`.
Parameters
----------
f : func
Function to check
x0 : array-like
Point on branch cut
dx : array-like
Direction to check continuity in
re_sign, im_sign : {1, -1}
Change of sign of the real or imaginary part expected
sig_zero_ok : bool
Whether to check if the branch cut respects signed zero (if applicable)
dtype : dtype
Dtype to check (should be complex)
"""
x0 = np.atleast_1d(x0).astype(dtype)
dx = np.atleast_1d(dx).astype(dtype)
if np.dtype(dtype).char == 'F':
scale = np.finfo(dtype).eps * 1e2
atol = np.float32(1e-2)
else:
scale = np.finfo(dtype).eps * 1e3
atol = 1e-4
y0 = f(x0)
yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))
assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
if sig_zero_ok:
# check that signed zeros also work as a displacement
jr = (x0.real == 0) & (dx.real != 0)
ji = (x0.imag == 0) & (dx.imag != 0)
if np.any(jr):
x = x0[jr]
x.real = np.NZERO
ym = f(x)
assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym))
assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym))
if np.any(ji):
x = x0[ji]
x.imag = np.NZERO
ym = f(x)
assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym))
assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym))
def test_copysign():
assert_(np.copysign(1, -1) == -1)
with np.errstate(divide="ignore"):
assert_(1 / np.copysign(0, -1) < 0)
assert_(1 / np.copysign(0, 1) > 0)
assert_(np.signbit(np.copysign(np.nan, -1)))
assert_(not np.signbit(np.copysign(np.nan, 1)))
def _test_nextafter(t):
one = t(1)
two = t(2)
zero = t(0)
eps = np.finfo(t).eps
assert_(np.nextafter(one, two) - one == eps)
assert_(np.nextafter(one, zero) - one < 0)
assert_(np.isnan(np.nextafter(np.nan, one)))
assert_(np.isnan(np.nextafter(one, np.nan)))
assert_(np.nextafter(one, one) == one)
def test_nextafter():
return _test_nextafter(np.float64)
def test_nextafterf():
return _test_nextafter(np.float32)
@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double")
@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
reason="IBM double double")
def test_nextafterl():
return _test_nextafter(np.longdouble)
def test_nextafter_0():
for t, direction in itertools.product(np.sctypes['float'], (1, -1)):
# The value of tiny for double double is NaN, so we need to pass the
# assert
with suppress_warnings() as sup:
sup.filter(UserWarning)
if not np.isnan(np.finfo(t).tiny):
tiny = np.finfo(t).tiny
assert_(
0. < direction * np.nextafter(t(0), t(direction)) < tiny)
assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0)
def _test_spacing(t):
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
with np.errstate(invalid='ignore'):
assert_(np.spacing(one) == eps)
assert_(np.isnan(np.spacing(nan)))
assert_(np.isnan(np.spacing(inf)))
assert_(np.isnan(np.spacing(-inf)))
assert_(np.spacing(t(1e30)) != 0)
def test_spacing():
return _test_spacing(np.float64)
def test_spacingf():
return _test_spacing(np.float32)
@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double")
@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
reason="IBM double double")
def test_spacingl():
return _test_spacing(np.longdouble)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {np.float64: [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012],
np.float32: [9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]}
for dt, dec_ in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert_(np.nextafter(f, f1) - f == np.spacing(f))
def test_pos_nan():
"""Check np.nan is a positive nan."""
assert_(np.signbit(np.nan) == 0)
def test_reduceat():
"""Test bug in reduceat when structured arrays are not copied."""
db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)])
a = np.empty([100], dtype=db)
a['name'] = 'Simple'
a['time'] = 10
a['value'] = 100
indx = [0, 7, 15, 25]
h2 = []
val1 = indx[0]
for val2 in indx[1:]:
h2.append(np.add.reduce(a['value'][val1:val2]))
val1 = val2
h2.append(np.add.reduce(a['value'][val1:]))
h2 = np.array(h2)
# test buffered -- this should work
h1 = np.add.reduceat(a['value'], indx)
assert_array_almost_equal(h1, h2)
# This is when the error occurs.
# test no buffer
np.setbufsize(32)
h1 = np.add.reduceat(a['value'], indx)
np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT)
assert_array_almost_equal(h1, h2)
def test_reduceat_empty():
"""Reduceat should work with empty arrays"""
indices = np.array([], 'i4')
x = np.array([], 'f8')
result = np.add.reduceat(x, indices)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (0,))
# Another case with a slightly different zero-sized shape
x = np.ones((5, 2))
result = np.add.reduceat(x, [], axis=0)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (0, 2))
result = np.add.reduceat(x, [], axis=1)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (5, 0))
def test_complex_nan_comparisons():
nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)]
fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1),
complex(1, 1), complex(-1, -1), complex(0, 0)]
with np.errstate(invalid='ignore'):
for x in nans + fins:
x = np.array([x])
for y in nans + fins:
y = np.array([y])
if np.isfinite(x) and np.isfinite(y):
continue
assert_equal(x < y, False, err_msg="%r < %r" % (x, y))
assert_equal(x > y, False, err_msg="%r > %r" % (x, y))
assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y))
assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y))
assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
def test_rint_big_int():
# np.rint bug for large integer values on Windows 32-bit and MKL
# https://github.com/numpy/numpy/issues/6685
val = 4607998452777363968
# This is exactly representable in floating point
assert_equal(val, int(float(val)))
# Rint should not change the value
assert_equal(val, np.rint(val))
@pytest.mark.parametrize('ftype', [np.float32, np.float64])
def test_memoverlap_accumulate(ftype):
# Reproduces bug https://github.com/numpy/numpy/issues/15597
arr = np.array([0.61, 0.60, 0.77, 0.41, 0.19], dtype=ftype)
out_max = np.array([0.61, 0.61, 0.77, 0.77, 0.77], dtype=ftype)
out_min = np.array([0.61, 0.60, 0.60, 0.41, 0.19], dtype=ftype)
assert_equal(np.maximum.accumulate(arr), out_max)
assert_equal(np.minimum.accumulate(arr), out_min)
def test_signaling_nan_exceptions():
with assert_no_warnings():
a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff')
np.isnan(a)
@pytest.mark.parametrize("arr", [
np.arange(2),
np.matrix([0, 1]),
np.matrix([[0, 1], [2, 5]]),
])
def test_outer_subclass_preserve(arr):
# for gh-8661
class foo(np.ndarray): pass
actual = np.multiply.outer(arr.view(foo), arr.view(foo))
assert actual.__class__.__name__ == 'foo'
def test_outer_bad_subclass():
class BadArr1(np.ndarray):
def __array_finalize__(self, obj):
# The outer call reshapes to 3 dims, try to do a bad reshape.
if self.ndim == 3:
self.shape = self.shape + (1,)
def __array_prepare__(self, obj, context=None):
return obj
class BadArr2(np.ndarray):
def __array_finalize__(self, obj):
if isinstance(obj, BadArr2):
# outer inserts 1-sized dims. In that case disturb them.
if self.shape[-1] == 1:
self.shape = self.shape[::-1]
def __array_prepare__(self, obj, context=None):
return obj
for cls in [BadArr1, BadArr2]:
arr = np.ones((2, 3)).view(cls)
with assert_raises(TypeError) as a:
# The first array gets reshaped (not the second one)
np.add.outer(arr, [1, 2])
# This actually works, since we only see the reshaping error:
arr = np.ones((2, 3)).view(cls)
assert type(np.add.outer([1, 2], arr)) is cls
def test_outer_exceeds_maxdims():
deep = np.ones((1,) * 17)
with assert_raises(ValueError):
np.add.outer(deep, deep)
def test_bad_legacy_ufunc_silent_errors():
# legacy ufuncs can't report errors and NumPy can't check if the GIL
# is released. So NumPy has to check after the GIL is released just to
# cover all bases. `np.power` uses/used to use this.
arr = np.arange(3).astype(np.float64)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error(arr, arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
# not contiguous means the fast-path cannot be taken
non_contig = arr.repeat(20).reshape(-1, 6)[:, ::2]
ncu_tests.always_error(non_contig, arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.outer(arr, arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.reduce(arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.reduceat(arr, [0, 1])
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.accumulate(arr)
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error.at(arr, [0, 1, 2], arr)
@pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]])
def test_bad_legacy_gufunc_silent_errors(x1):
# Verify that an exception raised in a gufunc loop propagates correctly.
# The signature of always_error_gufunc is '(i),()->()'.
with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
ncu_tests.always_error_gufunc(x1, 0.0)
| 162,560 | Python | 37.834448 | 135 | 0.521586 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_function_base.py | from numpy import (
logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
ndarray, sqrt, nextafter, stack, errstate
)
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
)
class PhysicalQuantity(float):
def __new__(cls, value):
return float.__new__(cls, value)
def __add__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) + float(self))
__radd__ = __add__
def __sub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(self) - float(x))
def __rsub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) - float(self))
def __mul__(self, x):
return PhysicalQuantity(float(x) * float(self))
__rmul__ = __mul__
def __div__(self, x):
return PhysicalQuantity(float(self) / float(x))
def __rdiv__(self, x):
return PhysicalQuantity(float(x) / float(self))
class PhysicalQuantity2(ndarray):
__array_priority__ = 10
class TestLogspace:
def test_basic(self):
y = logspace(0, 6)
assert_(len(y) == 50)
y = logspace(0, 6, num=100)
assert_(y[-1] == 10 ** 6)
y = logspace(0, 6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
def test_start_stop_array(self):
start = array([0., 1.])
stop = array([6., 7.])
t1 = logspace(start, stop, 6)
t2 = stack([logspace(_start, _stop, 6)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = logspace(start, stop[0], 6)
t4 = stack([logspace(_start, stop[0], 6)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = logspace(start, stop, 6, axis=-1)
assert_equal(t5, t2.T)
def test_dtype(self):
y = logspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = logspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = logspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(logspace(a, b), logspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
ls = logspace(a, b)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0))
ls = logspace(a, b, 1)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0, 1))
class TestGeomspace:
def test_basic(self):
y = geomspace(1, 1e6)
assert_(len(y) == 50)
y = geomspace(1, 1e6, num=100)
assert_(y[-1] == 10 ** 6)
y = geomspace(1, 1e6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = geomspace(1, 1e6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
y = geomspace(8, 2, num=3)
assert_allclose(y, [8, 4, 2])
assert_array_equal(y.imag, 0)
y = geomspace(-1, -100, num=3)
assert_array_equal(y, [-1, -10, -100])
assert_array_equal(y.imag, 0)
y = geomspace(-100, -1, num=3)
assert_array_equal(y, [-100, -10, -1])
assert_array_equal(y.imag, 0)
def test_boundaries_match_start_and_stop_exactly(self):
# make sure that the boundaries of the returned array exactly
# equal 'start' and 'stop' - this isn't obvious because
# np.exp(np.log(x)) isn't necessarily exactly equal to x
start = 0.3
stop = 20.3
y = geomspace(start, stop, num=1)
assert_equal(y[0], start)
y = geomspace(start, stop, num=1, endpoint=False)
assert_equal(y[0], start)
y = geomspace(start, stop, num=3)
assert_equal(y[0], start)
assert_equal(y[-1], stop)
y = geomspace(start, stop, num=3, endpoint=False)
assert_equal(y[0], start)
def test_nan_interior(self):
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:-1]).all())
assert_equal(y[3], 3.0)
with errstate(invalid='ignore'):
y = geomspace(-3, 3, num=4, endpoint=False)
assert_equal(y[0], -3.0)
assert_(isnan(y[1:]).all())
def test_complex(self):
# Purely imaginary
y = geomspace(1j, 16j, num=5)
assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
assert_array_equal(y.real, 0)
y = geomspace(-4j, -324j, num=5)
assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
assert_array_equal(y.real, 0)
y = geomspace(1+1j, 1000+1000j, num=4)
assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
y = geomspace(-1+1j, -1000+1000j, num=4)
assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
# Logarithmic spirals
y = geomspace(-1, 1, num=3, dtype=complex)
assert_allclose(y, [-1, 1j, +1])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(0+3j, 3+0j, 3)
assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j])
y = geomspace(-3+0j, 0-3j, 3)
assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j])
y = geomspace(0+3j, -3+0j, 3)
assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
y = geomspace(-2-3j, 5+7j, 7)
assert_allclose(y, [-2-3j, -0.29058977-4.15771027j,
2.08885354-4.34146838j, 4.58345529-3.16355218j,
6.41401745-0.55233457j, 6.75707386+3.11795092j,
5+7j])
# Type promotion should prevent the -5 from becoming a NaN
y = geomspace(3j, -5, 2)
assert_allclose(y, [3j, -5])
y = geomspace(-5, 3j, 2)
assert_allclose(y, [-5, 3j])
def test_dtype(self):
y = geomspace(1, 1e6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = geomspace(1, 1e6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = geomspace(1, 1e6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
# Native types
y = geomspace(1, 1e6, dtype=float)
assert_equal(y.dtype, dtype('float_'))
y = geomspace(1, 1e6, dtype=complex)
assert_equal(y.dtype, dtype('complex'))
def test_start_stop_array_scalar(self):
lim1 = array([120, 100], dtype="int8")
lim2 = array([-120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = geomspace(lim1[0], lim1[1], 5)
t2 = geomspace(lim2[0], lim2[1], 5)
t3 = geomspace(lim3[0], lim3[1], 5)
t4 = geomspace(120.0, 100.0, 5)
t5 = geomspace(-120.0, -100.0, 5)
t6 = geomspace(1200.0, 1000.0, 5)
# t3 uses float32, t6 uses float64
assert_allclose(t1, t4, rtol=1e-2)
assert_allclose(t2, t5, rtol=1e-2)
assert_allclose(t3, t6, rtol=1e-5)
def test_start_stop_array(self):
# Try to use all special cases.
start = array([1.e0, 32., 1j, -4j, 1+1j, -1])
stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1])
t1 = geomspace(start, stop, 5)
t2 = stack([geomspace(_start, _stop, 5)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = geomspace(start, stop[0], 5)
t4 = stack([geomspace(_start, stop[0], 5)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = geomspace(start, stop, 5, axis=-1)
assert_equal(t5, t2.T)
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
gs = geomspace(a, b)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0))
gs = geomspace(a, b, 1)
assert type(gs) is PhysicalQuantity2
assert_equal(gs, geomspace(1.0, 7.0, 1))
def test_bounds(self):
assert_raises(ValueError, geomspace, 0, 10)
assert_raises(ValueError, geomspace, 10, 0)
assert_raises(ValueError, geomspace, 0, 0)
class TestLinspace:
def test_basic(self):
y = linspace(0, 10)
assert_(len(y) == 50)
y = linspace(2, 10, num=100)
assert_(y[-1] == 10)
y = linspace(2, 10, endpoint=False)
assert_(y[-1] < 10)
assert_raises(ValueError, linspace, 0, 10, num=-1)
def test_corner(self):
y = list(linspace(0, 1, 1))
assert_(y == [0.0], y)
assert_raises(TypeError, linspace, 0, 1, num=2.5)
def test_type(self):
t1 = linspace(0, 1, 0).dtype
t2 = linspace(0, 1, 1).dtype
t3 = linspace(0, 1, 2).dtype
assert_equal(t1, t2)
assert_equal(t2, t3)
def test_dtype(self):
y = linspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = linspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = linspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_start_stop_array_scalar(self):
lim1 = array([-120, 100], dtype="int8")
lim2 = array([120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = linspace(lim1[0], lim1[1], 5)
t2 = linspace(lim2[0], lim2[1], 5)
t3 = linspace(lim3[0], lim3[1], 5)
t4 = linspace(-120.0, 100.0, 5)
t5 = linspace(120.0, -100.0, 5)
t6 = linspace(1200.0, 1000.0, 5)
assert_equal(t1, t4)
assert_equal(t2, t5)
assert_equal(t3, t6)
def test_start_stop_array(self):
start = array([-120, 120], dtype="int8")
stop = array([100, -100], dtype="int8")
t1 = linspace(start, stop, 5)
t2 = stack([linspace(_start, _stop, 5)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = linspace(start, stop[0], 5)
t4 = stack([linspace(_start, stop[0], 5)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = linspace(start, stop, 5, axis=-1)
assert_equal(t5, t2.T)
def test_complex(self):
lim1 = linspace(1 + 2j, 3 + 4j, 5)
t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j])
lim2 = linspace(1j, 10, 5)
t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j])
assert_equal(lim1, t1)
assert_equal(lim2, t2)
def test_physical_quantities(self):
a = PhysicalQuantity(0.0)
b = PhysicalQuantity(1.0)
assert_equal(linspace(a, b), linspace(0.0, 1.0))
def test_subclass(self):
a = array(0).view(PhysicalQuantity2)
b = array(1).view(PhysicalQuantity2)
ls = linspace(a, b)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, linspace(0.0, 1.0))
ls = linspace(a, b, 1)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, linspace(0.0, 1.0, 1))
def test_array_interface(self):
# Regression test for https://github.com/numpy/numpy/pull/6659
# Ensure that start/stop can be objects that implement
# __array_interface__ and are convertible to numeric scalars
class Arrayish:
"""
A generic object that supports the __array_interface__ and hence
can in principle be converted to a numeric scalar, but is not
otherwise recognized as numeric, but also happens to support
multiplication by floats.
Data should be an object that implements the buffer interface,
and contains at least 4 bytes.
"""
def __init__(self, data):
self._data = data
@property
def __array_interface__(self):
return {'shape': (), 'typestr': '<i4', 'data': self._data,
'version': 3}
def __mul__(self, other):
# For the purposes of this test any multiplication is an
# identity operation :)
return self
one = Arrayish(array(1, dtype='<i4'))
five = Arrayish(array(5, dtype='<i4'))
assert_equal(linspace(one, five), linspace(1, 5))
def test_denormal_numbers(self):
# Regression test for gh-5437. Will probably fail when compiled
# with ICC, which flushes denormals to zero
for ftype in sctypes['float']:
stop = nextafter(ftype(0), ftype(1)) * 5 # A denormal number
assert_(any(linspace(0, stop, 10, endpoint=False, dtype=ftype)))
def test_equivalent_to_arange(self):
for j in range(1000):
assert_equal(linspace(0, j, j+1, dtype=int),
arange(j+1, dtype=int))
def test_retstep(self):
for num in [0, 1, 2]:
for ept in [False, True]:
y = linspace(0, 1, num, endpoint=ept, retstep=True)
assert isinstance(y, tuple) and len(y) == 2
if num == 2:
y0_expect = [0.0, 1.0] if ept else [0.0, 0.5]
assert_array_equal(y[0], y0_expect)
assert_equal(y[1], y0_expect[1])
elif num == 1 and not ept:
assert_array_equal(y[0], [0.0])
assert_equal(y[1], 1.0)
else:
assert_array_equal(y[0], [0.0][:num])
assert isnan(y[1])
def test_object(self):
start = array(1, dtype='O')
stop = array(2, dtype='O')
y = linspace(start, stop, 3)
assert_array_equal(y, array([1., 1.5, 2.]))
def test_round_negative(self):
y = linspace(-1, 3, num=8, dtype=int)
t = array([-1, -1, 0, 0, 1, 1, 2, 3], dtype=int)
assert_array_equal(y, t)
| 14,411 | Python | 34.151219 | 78 | 0.538616 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_shape_base.py | import pytest
import numpy as np
from numpy.core import (
array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
newaxis, concatenate, stack
)
from numpy.core.shape_base import (_block_dispatcher, _block_setup,
_block_concatenate, _block_slicing)
from numpy.testing import (
assert_, assert_raises, assert_array_equal, assert_equal,
assert_raises_regex, assert_warns, IS_PYPY
)
class TestAtleast1d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1]), array([2])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1, 2]), array([2, 3])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r1array(self):
""" Test to make sure equivalent Travis O's r1array function
"""
assert_(atleast_1d(3).shape == (1,))
assert_(atleast_1d(3j).shape == (1,))
assert_(atleast_1d(3.0).shape == (1,))
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
class TestAtleast2d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1]]), array([[2]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1, 2]]), array([[2, 3]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r2array(self):
""" Test to make sure equivalent Travis O's r2array function
"""
assert_(atleast_2d(3).shape == (1, 1))
assert_(atleast_2d([3j, 1]).shape == (1, 2))
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
class TestAtleast3d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1]]]), array([[[2]]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1], [2]]]), array([[[2], [3]]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a[:,:, newaxis], b[:,:, newaxis]]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a, b]
assert_array_equal(res, desired)
class TestHstack:
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
def test_empty_input(self):
assert_raises(ValueError, hstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = hstack([a, b])
desired = array([[1, 1], [2, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with assert_warns(FutureWarning):
hstack((np.arange(3) for _ in range(2)))
with assert_warns(FutureWarning):
hstack(map(lambda x: x, np.ones((3, 2))))
class TestVstack:
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
def test_empty_input(self):
assert_raises(ValueError, vstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = vstack([a, b])
desired = array([[1], [2], [1], [2]])
assert_array_equal(res, desired)
def test_2D_array2(self):
a = array([1, 2])
b = array([1, 2])
res = vstack([a, b])
desired = array([[1, 2], [1, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with assert_warns(FutureWarning):
vstack((np.arange(3) for _ in range(2)))
class TestConcatenate:
def test_returns_copy(self):
a = np.eye(3)
b = np.concatenate([a])
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_exceptions(self):
# test axis must be in bounds
for ndim in [1, 2, 3]:
a = np.ones((1,)*ndim)
np.concatenate((a, a), axis=0) # OK
assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
# Scalars cannot be concatenated
assert_raises(ValueError, concatenate, (0,))
assert_raises(ValueError, concatenate, (np.array(0),))
# dimensionality must match
assert_raises_regex(
ValueError,
r"all the input arrays must have same number of dimensions, but "
r"the array at index 0 has 1 dimension\(s\) and the array at "
r"index 1 has 2 dimension\(s\)",
np.concatenate, (np.zeros(1), np.zeros((1, 1))))
# test shapes must match except for concatenation axis
a = np.ones((1, 2, 3))
b = np.ones((2, 2, 3))
axis = list(range(3))
for i in range(3):
np.concatenate((a, b), axis=axis[0]) # OK
assert_raises_regex(
ValueError,
"all the input array dimensions for the concatenation axis "
"must match exactly, but along dimension {}, the array at "
"index 0 has size 1 and the array at index 1 has size 2"
.format(i),
np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
a = np.moveaxis(a, -1, 0)
b = np.moveaxis(b, -1, 0)
axis.append(axis.pop(0))
# No arrays to concatenate raises ValueError
assert_raises(ValueError, concatenate, ())
def test_concatenate_axis_None(self):
a = np.arange(4, dtype=np.float64).reshape((2, 2))
b = list(range(3))
c = ['x']
r = np.concatenate((a, a), axis=None)
assert_equal(r.dtype, a.dtype)
assert_equal(r.ndim, 1)
r = np.concatenate((a, b), axis=None)
assert_equal(r.size, a.size + len(b))
assert_equal(r.dtype, a.dtype)
r = np.concatenate((a, b, c), axis=None, dtype="U")
d = array(['0.0', '1.0', '2.0', '3.0',
'0', '1', '2', 'x'])
assert_array_equal(r, d)
out = np.zeros(a.size + len(b))
r = np.concatenate((a, b), axis=None)
rout = np.concatenate((a, b), axis=None, out=out)
assert_(out is rout)
assert_equal(r, rout)
def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979).
x = np.arange(1, 100)
r = np.concatenate(x, None)
assert_array_equal(x, r)
# This should probably be deprecated:
r = np.concatenate(x, 100) # axis is >= MAXDIMS
assert_array_equal(x, r)
def test_concatenate(self):
# Test concatenate function
# One sequence returns unmodified (but as array)
r4 = list(range(4))
assert_array_equal(concatenate((r4,)), r4)
# Any sequence
assert_array_equal(concatenate((tuple(r4),)), r4)
assert_array_equal(concatenate((array(r4),)), r4)
# 1D default concatenation
r3 = list(range(3))
assert_array_equal(concatenate((r4, r3)), r4 + r3)
# Mixed sequence types
assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
# Explicit axis specification
assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
# Including negative
assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
# 2D
a23 = array([[10, 11, 12], [13, 14, 15]])
a13 = array([[0, 1, 2]])
res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
assert_array_equal(concatenate((a23, a13)), res)
assert_array_equal(concatenate((a23, a13), 0), res)
assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
# Arrays much match shape
assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
# 3D
res = arange(2 * 3 * 7).reshape((2, 3, 7))
a0 = res[..., :4]
a1 = res[..., 4:6]
a2 = res[..., 6:]
assert_array_equal(concatenate((a0, a1, a2), 2), res)
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
out = res.copy()
rout = concatenate((a0, a1, a2), 2, out=out)
assert_(out is rout)
assert_equal(res, rout)
@pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython")
def test_operator_concat(self):
import operator
a = array([1, 2])
b = array([3, 4])
n = [1,2]
res = array([1, 2, 3, 4])
assert_raises(TypeError, operator.concat, a, b)
assert_raises(TypeError, operator.concat, a, n)
assert_raises(TypeError, operator.concat, n, a)
assert_raises(TypeError, operator.concat, a, 1)
assert_raises(TypeError, operator.concat, 1, a)
def test_bad_out_shape(self):
a = array([1, 2])
b = array([3, 4])
assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
concatenate((a, b), out=np.empty(4))
@pytest.mark.parametrize("axis", [None, 0])
@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"])
@pytest.mark.parametrize("casting",
['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
def test_out_and_dtype(self, axis, out_dtype, casting):
# Compare usage of `out=out` with `dtype=out.dtype`
out = np.empty(4, dtype=out_dtype)
to_concat = (array([1.1, 2.2]), array([3.3, 4.4]))
if not np.can_cast(to_concat[0], out_dtype, casting=casting):
with assert_raises(TypeError):
concatenate(to_concat, out=out, axis=axis, casting=casting)
with assert_raises(TypeError):
concatenate(to_concat, dtype=out.dtype,
axis=axis, casting=casting)
else:
res_out = concatenate(to_concat, out=out,
axis=axis, casting=casting)
res_dtype = concatenate(to_concat, dtype=out.dtype,
axis=axis, casting=casting)
assert res_out is out
assert_array_equal(out, res_dtype)
assert res_dtype.dtype == out_dtype
with assert_raises(TypeError):
concatenate(to_concat, out=out, dtype=out_dtype, axis=axis)
@pytest.mark.parametrize("axis", [None, 0])
@pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"])
@pytest.mark.parametrize("arrs",
[([0.],), ([0.], [1]), ([0], ["string"], [1.])])
def test_dtype_with_promotion(self, arrs, string_dt, axis):
# Note that U0 and S0 should be deprecated eventually and changed to
# actually give the empty string result (together with `np.array`)
res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe")
# The actual dtype should be identical to a cast (of a double array):
assert res.dtype == np.array(1.).astype(string_dt).dtype
@pytest.mark.parametrize("axis", [None, 0])
def test_string_dtype_does_not_inspect(self, axis):
with pytest.raises(TypeError):
np.concatenate(([None], [1]), dtype="S", axis=axis)
with pytest.raises(TypeError):
np.concatenate(([None], [1]), dtype="U", axis=axis)
@pytest.mark.parametrize("axis", [None, 0])
def test_subarray_error(self, axis):
with pytest.raises(TypeError, match=".*subarray dtype"):
np.concatenate(([1], [1]), dtype="(2,)i", axis=axis)
def test_stack():
# non-iterable input
assert_raises(TypeError, stack, 1)
# 0d input
for input_ in [(1, 2, 3),
[np.int32(1), np.int32(2), np.int32(3)],
[np.array(1), np.array(2), np.array(3)]]:
assert_array_equal(stack(input_), [1, 2, 3])
# 1d input examples
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
r1 = array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(np.stack((a, b)), r1)
assert_array_equal(np.stack((a, b), axis=1), r1.T)
# all input types
assert_array_equal(np.stack(list([a, b])), r1)
assert_array_equal(np.stack(array([a, b])), r1)
# all shapes for 1d input
arrays = [np.random.randn(3) for _ in range(10)]
axes = [0, 1, -1, -2]
expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)
assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)
# all shapes for 2d input
arrays = [np.random.randn(3, 4) for _ in range(10)]
axes = [0, 1, 2, -1, -2, -3]
expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
(3, 4, 10), (3, 10, 4), (10, 3, 4)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
assert_(stack([[], [], []]).shape == (3, 0))
assert_(stack([[], [], []], axis=1).shape == (0, 3))
# out
out = np.zeros_like(r1)
np.stack((a, b), out=out)
assert_array_equal(out, r1)
# edge cases
assert_raises_regex(ValueError, 'need at least one array', stack, [])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [1, np.arange(3)])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
# generator is deprecated
with assert_warns(FutureWarning):
result = stack((x for x in range(3)))
assert_array_equal(result, np.array([0, 1, 2]))
class TestBlock:
@pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
def block(self, request):
# blocking small arrays and large arrays go through different paths.
# the algorithm is triggered depending on the number of element
# copies required.
# We define a test fixture that forces most tests to go through
# both code paths.
# Ultimately, this should be removed if a single algorithm is found
# to be faster for both small and large arrays.
def _block_force_concatenate(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_concatenate(arrays, list_ndim, result_ndim)
def _block_force_slicing(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_slicing(arrays, list_ndim, result_ndim)
if request.param == 'force_concatenate':
return _block_force_concatenate
elif request.param == 'force_slicing':
return _block_force_slicing
elif request.param == 'block':
return block
else:
raise ValueError('Unknown blocking request. There is a typo in the tests.')
def test_returns_copy(self, block):
a = np.eye(3)
b = block(a)
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_block_total_size_estimate(self, block):
_, _, _, total_size = _block_setup([1])
assert total_size == 1
_, _, _, total_size = _block_setup([[1]])
assert total_size == 1
_, _, _, total_size = _block_setup([[1, 1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1], [1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1, 2], [3, 4]])
assert total_size == 4
def test_block_simple_row_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
desired = np.array([[1, 1, 2, 2],
[1, 1, 2, 2]])
result = block([a_2d, b_2d])
assert_equal(desired, result)
def test_block_simple_column_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
expected = np.array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
result = block([[a_2d], [b_2d]])
assert_equal(expected, result)
def test_block_with_1d_arrays_row_wise(self, block):
# # # 1-D vectors are treated as row arrays
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([1, 2, 3, 2, 3, 4])
result = block([a, b])
assert_equal(expected, result)
def test_block_with_1d_arrays_multiple_rows(self, block):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([[1, 2, 3, 2, 3, 4],
[1, 2, 3, 2, 3, 4]])
result = block([[a, b], [a, b]])
assert_equal(expected, result)
def test_block_with_1d_arrays_column_wise(self, block):
# # # 1-D vectors are treated as row arrays
a_1d = np.array([1, 2, 3])
b_1d = np.array([2, 3, 4])
expected = np.array([[1, 2, 3],
[2, 3, 4]])
result = block([[a_1d], [b_1d]])
assert_equal(expected, result)
def test_block_mixed_1d_and_2d(self, block):
a_2d = np.ones((2, 2))
b_1d = np.array([2, 2])
result = block([[a_2d], [b_1d]])
expected = np.array([[1, 1],
[1, 1],
[2, 2]])
assert_equal(expected, result)
def test_block_complicated(self, block):
# a bit more complicated
one_2d = np.array([[1, 1, 1]])
two_2d = np.array([[2, 2, 2]])
three_2d = np.array([[3, 3, 3, 3, 3, 3]])
four_1d = np.array([4, 4, 4, 4, 4, 4])
five_0d = np.array(5)
six_1d = np.array([6, 6, 6, 6, 6])
zero_2d = np.zeros((2, 6))
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
result = block([[one_2d, two_2d],
[three_2d],
[four_1d],
[five_0d, six_1d],
[zero_2d]])
assert_equal(result, expected)
def test_nested(self, block):
one = np.array([1, 1, 1])
two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
three = np.array([3, 3, 3])
four = np.array([4, 4, 4])
five = np.array(5)
six = np.array([6, 6, 6, 6, 6])
zero = np.zeros((2, 6))
result = block([
[
block([
[one],
[three],
[four]
]),
two
],
[five, six],
[zero]
])
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 2, 2, 2],
[4, 4, 4, 2, 2, 2],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert_equal(result, expected)
def test_3d(self, block):
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
a010 = np.ones((2, 3, 2), int) * 3
a001 = np.ones((2, 2, 3), int) * 4
a011 = np.ones((2, 3, 3), int) * 5
a101 = np.ones((3, 2, 3), int) * 6
a110 = np.ones((3, 3, 2), int) * 7
a111 = np.ones((3, 3, 3), int) * 8
result = block([
[
[a000, a001],
[a010, a011],
],
[
[a100, a101],
[a110, a111],
]
])
expected = array([[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]]])
assert_array_equal(result, expected)
def test_block_with_mismatched_shape(self, block):
a = np.array([0, 0])
b = np.eye(2)
assert_raises(ValueError, block, [a, b])
assert_raises(ValueError, block, [b, a])
to_block = [[np.ones((2,3)), np.ones((2,2))],
[np.ones((2,2)), np.ones((2,2))]]
assert_raises(ValueError, block, to_block)
def test_no_lists(self, block):
assert_equal(block(1), np.array(1))
assert_equal(block(np.eye(3)), np.eye(3))
def test_invalid_nesting(self, block):
msg = 'depths are mismatched'
assert_raises_regex(ValueError, msg, block, [1, [2]])
assert_raises_regex(ValueError, msg, block, [1, []])
assert_raises_regex(ValueError, msg, block, [[1], 2])
assert_raises_regex(ValueError, msg, block, [[], 2])
assert_raises_regex(ValueError, msg, block, [
[[1], [2]],
[[3, 4]],
[5] # missing brackets
])
def test_empty_lists(self, block):
assert_raises_regex(ValueError, 'empty', block, [])
assert_raises_regex(ValueError, 'empty', block, [[]])
assert_raises_regex(ValueError, 'empty', block, [[1], []])
def test_tuple(self, block):
assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
def test_different_ndims(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 1, 3))
result = block([a, b, c])
expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
assert_equal(result, expected)
def test_different_ndims_depths(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 2, 3))
result = block([[a, b], [c]])
expected = np.array([[[1., 2., 2.],
[3., 3., 3.],
[3., 3., 3.]]])
assert_equal(result, expected)
def test_block_memory_order(self, block):
# 3D
arr_c = np.zeros((3,)*3, order='C')
arr_f = np.zeros((3,)*3, order='F')
b_c = [[[arr_c, arr_c],
[arr_c, arr_c]],
[[arr_c, arr_c],
[arr_c, arr_c]]]
b_f = [[[arr_f, arr_f],
[arr_f, arr_f]],
[[arr_f, arr_f],
[arr_f, arr_f]]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
arr_c = np.zeros((3, 3), order='C')
arr_f = np.zeros((3, 3), order='F')
# 2D
b_c = [[arr_c, arr_c],
[arr_c, arr_c]]
b_f = [[arr_f, arr_f],
[arr_f, arr_f]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
def test_block_dispatcher():
class ArrayLike:
pass
a = ArrayLike()
b = ArrayLike()
c = ArrayLike()
assert_equal(list(_block_dispatcher(a)), [a])
assert_equal(list(_block_dispatcher([a])), [a])
assert_equal(list(_block_dispatcher([a, b])), [a, b])
assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
# don't recurse into non-lists
assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
| 27,248 | Python | 34.712975 | 98 | 0.488696 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_scalarprint.py | """ Test printing of scalar types.
"""
import code
import platform
import pytest
import sys
from tempfile import TemporaryFile
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises
class TestRealScalars:
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
wanted = [
['0.0', '0.0', '0.0', '0.0' ],
['-0.0', '-0.0', '-0.0', '-0.0'],
['1.0', '1.0', '1.0', '1.0' ],
['-1.0', '-1.0', '-1.0', '-1.0'],
['inf', 'inf', 'inf', 'inf' ],
['-inf', '-inf', '-inf', '-inf'],
['nan', 'nan', 'nan', 'nan']]
for wants, val in zip(wanted, svals):
for want, styp in zip(wants, styps):
msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val))
assert_equal(str(styp(val)), want, err_msg=msg)
def test_scalar_cutoffs(self):
# test that both the str and repr of np.float64 behaves
# like python floats in python3.
def check(v):
assert_equal(str(np.float64(v)), str(v))
assert_equal(str(np.float64(v)), repr(v))
assert_equal(repr(np.float64(v)), repr(v))
assert_equal(repr(np.float64(v)), str(v))
# check we use the same number of significant digits
check(1.12345678901234567890)
check(0.0112345678901234567890)
# check switch from scientific output to positional and back
check(1e-5)
check(1e-4)
check(1e15)
check(1e16)
def test_py2_float_print(self):
# gh-10753
# In python2, the python float type implements an obsolete method
# tp_print, which overrides tp_repr and tp_str when using "print" to
# output to a "real file" (ie, not a StringIO). Make sure we don't
# inherit it.
x = np.double(0.1999999999999)
with TemporaryFile('r+t') as f:
print(x, file=f)
f.seek(0)
output = f.read()
assert_equal(output, str(x) + '\n')
# In python2 the value float('0.1999999999999') prints with reduced
# precision as '0.2', but we want numpy's np.double('0.1999999999999')
# to print the unique value, '0.1999999999999'.
# gh-11031
# Only in the python2 interactive shell and when stdout is a "real"
# file, the output of the last command is printed to stdout without
# Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print
# x` are potentially different. Make sure they are the same. The only
# way I found to get prompt-like output is using an actual prompt from
# the 'code' module. Again, must use tempfile to get a "real" file.
# dummy user-input which enters one line and then ctrl-Ds.
def userinput():
yield 'np.sqrt(2)'
raise EOFError
gen = userinput()
input_func = lambda prompt="": next(gen)
with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
orig_stdout, orig_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = fo, fe
code.interact(local={'np': np}, readfunc=input_func, banner='')
sys.stdout, sys.stderr = orig_stdout, orig_stderr
fo.seek(0)
capture = fo.read().strip()
assert_equal(capture, repr(np.sqrt(2)))
def test_dragon4(self):
# these tests are adapted from Ryan Juckett's dragon4 implementation,
# see dragon4.c for details.
fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k)
fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k)
fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k)
fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k)
preckwd = lambda prec: {'unique': False, 'precision': prec}
assert_equal(fpos32('1.0'), "1.")
assert_equal(fsci32('1.0'), "1.e+00")
assert_equal(fpos32('10.234'), "10.234")
assert_equal(fpos32('-10.234'), "-10.234")
assert_equal(fsci32('10.234'), "1.0234e+01")
assert_equal(fsci32('-10.234'), "-1.0234e+01")
assert_equal(fpos32('1000.0'), "1000.")
assert_equal(fpos32('1.0', precision=0), "1.")
assert_equal(fsci32('1.0', precision=0), "1.e+00")
assert_equal(fpos32('10.234', precision=0), "10.")
assert_equal(fpos32('-10.234', precision=0), "-10.")
assert_equal(fsci32('10.234', precision=0), "1.e+01")
assert_equal(fsci32('-10.234', precision=0), "-1.e+01")
assert_equal(fpos32('10.234', precision=2), "10.23")
assert_equal(fsci32('-10.234', precision=2), "-1.02e+01")
assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)),
'9.9999999999999995e-08')
assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)),
'9.8813129168249309e-324')
assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)),
'9.9999999999999694e-311')
# test rounding
# 3.1415927410 is closest float32 to np.pi
assert_equal(fpos32('3.14159265358979323846', **preckwd(10)),
"3.1415927410")
assert_equal(fsci32('3.14159265358979323846', **preckwd(10)),
"3.1415927410e+00")
assert_equal(fpos64('3.14159265358979323846', **preckwd(10)),
"3.1415926536")
assert_equal(fsci64('3.14159265358979323846', **preckwd(10)),
"3.1415926536e+00")
# 299792448 is closest float32 to 299792458
assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000")
assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08")
assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000")
assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08")
assert_equal(fpos32('3.14159265358979323846', **preckwd(25)),
"3.1415927410125732421875000")
assert_equal(fpos64('3.14159265358979323846', **preckwd(50)),
"3.14159265358979311599796346854418516159057617187500")
assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793")
# smallest numbers
assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149),
"0.00000000000000000000000000000000000000000000140129846432"
"4817070923729583289916131280261941876515771757068283889791"
"08268586060148663818836212158203125")
assert_equal(fpos64(5e-324, unique=False, precision=1074),
"0.00000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000049406564584124654417656"
"8792868221372365059802614324764425585682500675507270208751"
"8652998363616359923797965646954457177309266567103559397963"
"9877479601078187812630071319031140452784581716784898210368"
"8718636056998730723050006387409153564984387312473397273169"
"6151400317153853980741262385655911710266585566867681870395"
"6031062493194527159149245532930545654440112748012970999954"
"1931989409080416563324524757147869014726780159355238611550"
"1348035264934720193790268107107491703332226844753335720832"
"4319360923828934583680601060115061698097530783422773183292"
"4790498252473077637592724787465608477820373446969953364701"
"7972677717585125660551199131504891101451037862738167250955"
"8373897335989936648099411642057026370902792427675445652290"
"87538682506419718265533447265625")
# largest numbers
f32x = np.finfo(np.float32).max
assert_equal(fpos32(f32x, **preckwd(0)),
"340282346638528859811704183484516925440.")
assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)),
"1797693134862315708145274237317043567980705675258449965989"
"1747680315726078002853876058955863276687817154045895351438"
"2464234321326889464182768467546703537516986049910576551282"
"0762454900903893289440758685084551339423045832369032229481"
"6580855933212334827479782620414472316873817718091929988125"
"0404026184124858368.")
# Warning: In unique mode only the integer digits necessary for
# uniqueness are computed, the rest are 0.
assert_equal(fpos32(f32x),
"340282350000000000000000000000000000000.")
# Further tests of zero-padding vs rounding in different combinations
# of unique, fractional, precision, min_digits
# precision can only reduce digits, not add them.
# min_digits can only extend digits, not reduce them.
assert_equal(fpos32(f32x, unique=True, fractional=True, precision=0),
"340282350000000000000000000000000000000.")
assert_equal(fpos32(f32x, unique=True, fractional=True, precision=4),
"340282350000000000000000000000000000000.")
assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=0),
"340282346638528859811704183484516925440.")
assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=4),
"340282346638528859811704183484516925440.0000")
assert_equal(fpos32(f32x, unique=True, fractional=True,
min_digits=4, precision=4),
"340282346638528859811704183484516925440.0000")
assert_raises(ValueError, fpos32, f32x, unique=True, fractional=False,
precision=0)
assert_equal(fpos32(f32x, unique=True, fractional=False, precision=4),
"340300000000000000000000000000000000000.")
assert_equal(fpos32(f32x, unique=True, fractional=False, precision=20),
"340282350000000000000000000000000000000.")
assert_equal(fpos32(f32x, unique=True, fractional=False, min_digits=4),
"340282350000000000000000000000000000000.")
assert_equal(fpos32(f32x, unique=True, fractional=False,
min_digits=20),
"340282346638528859810000000000000000000.")
assert_equal(fpos32(f32x, unique=True, fractional=False,
min_digits=15),
"340282346638529000000000000000000000000.")
assert_equal(fpos32(f32x, unique=False, fractional=False, precision=4),
"340300000000000000000000000000000000000.")
# test that unique rounding is preserved when precision is supplied
# but no extra digits need to be printed (gh-18609)
a = np.float64.fromhex('-1p-97')
assert_equal(fsci64(a, unique=True), '-6.310887241768095e-30')
assert_equal(fsci64(a, unique=False, precision=15),
'-6.310887241768094e-30')
assert_equal(fsci64(a, unique=True, precision=15),
'-6.310887241768095e-30')
assert_equal(fsci64(a, unique=True, min_digits=15),
'-6.310887241768095e-30')
assert_equal(fsci64(a, unique=True, precision=15, min_digits=15),
'-6.310887241768095e-30')
# adds/remove digits in unique mode with unbiased rnding
assert_equal(fsci64(a, unique=True, precision=14),
'-6.31088724176809e-30')
assert_equal(fsci64(a, unique=True, min_digits=16),
'-6.3108872417680944e-30')
assert_equal(fsci64(a, unique=True, precision=16),
'-6.310887241768095e-30')
assert_equal(fsci64(a, unique=True, min_digits=14),
'-6.310887241768095e-30')
# test min_digits in unique mode with different rounding cases
assert_equal(fsci64('1e120', min_digits=3), '1.000e+120')
assert_equal(fsci64('1e100', min_digits=3), '1.000e+100')
# test trailing zeros
assert_equal(fpos32('1.0', unique=False, precision=3), "1.000")
assert_equal(fpos64('1.0', unique=False, precision=3), "1.000")
assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00")
assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00")
assert_equal(fpos32('1.5', unique=False, precision=3), "1.500")
assert_equal(fpos64('1.5', unique=False, precision=3), "1.500")
assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00")
assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00")
# gh-10713
assert_equal(fpos64('324', unique=False, precision=5,
fractional=False), "324.00")
def test_dragon4_interface(self):
tps = [np.float16, np.float32, np.float64]
if hasattr(np, 'float128'):
tps.append(np.float128)
fpos = np.format_float_positional
fsci = np.format_float_scientific
for tp in tps:
# test padding
assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ")
assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ")
assert_equal(fpos(tp('-10.2'),
pad_left=4, pad_right=4), " -10.2 ")
# test exp_digits
assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001")
# test fixed (non-unique) mode
assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000")
assert_equal(fsci(tp('1.0'), unique=False, precision=4),
"1.0000e+00")
# test trimming
# trim of 'k' or '.' only affects non-unique mode, since unique
# mode will not output trailing 0s.
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'),
"1.0000")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'),
"1.")
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'),
"1.0")
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), trim='0'), "1.0")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'),
"1")
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), trim='-'), "1")
assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1")
@pytest.mark.skipif(not platform.machine().startswith("ppc64"),
reason="only applies to ppc float128 values")
def test_ppc64_ibm_double_double128(self):
# check that the precision decreases once we get into the subnormal
# range. Unlike float64, this starts around 1e-292 instead of 1e-308,
# which happens when the first double is normal and the second is
# subnormal.
x = np.float128('2.123123123123123123123123123123123e-286')
got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)]
expected = [
"1.06156156156156156156156156156157e-286",
"1.06156156156156156156156156156158e-287",
"1.06156156156156156156156156156159e-288",
"1.0615615615615615615615615615616e-289",
"1.06156156156156156156156156156157e-290",
"1.06156156156156156156156156156156e-291",
"1.0615615615615615615615615615616e-292",
"1.0615615615615615615615615615615e-293",
"1.061561561561561561561561561562e-294",
"1.06156156156156156156156156155e-295",
"1.0615615615615615615615615616e-296",
"1.06156156156156156156156156e-297",
"1.06156156156156156156156157e-298",
"1.0615615615615615615615616e-299",
"1.06156156156156156156156e-300",
"1.06156156156156156156155e-301",
"1.0615615615615615615616e-302",
"1.061561561561561561562e-303",
"1.06156156156156156156e-304",
"1.0615615615615615618e-305",
"1.06156156156156156e-306",
"1.06156156156156157e-307",
"1.0615615615615616e-308",
"1.06156156156156e-309",
"1.06156156156157e-310",
"1.0615615615616e-311",
"1.06156156156e-312",
"1.06156156154e-313",
"1.0615615616e-314",
"1.06156156e-315",
"1.06156155e-316",
"1.061562e-317",
"1.06156e-318",
"1.06155e-319",
"1.0617e-320",
"1.06e-321",
"1.04e-322",
"1e-323",
"0.0",
"0.0"]
assert_equal(got, expected)
# Note: we follow glibc behavior, but it (or gcc) might not be right.
# In particular we can get two values that print the same but are not
# equal:
a = np.float128('2')/np.float128('3')
b = np.float128(str(a))
assert_equal(str(a), str(b))
assert_(a != b)
def float32_roundtrip(self):
# gh-9360
x = np.float32(1024 - 2**-14)
y = np.float32(1024 - 2**-13)
assert_(repr(x) != repr(y))
assert_equal(np.float32(repr(x)), x)
assert_equal(np.float32(repr(y)), y)
def float64_vs_python(self):
# gh-2643, gh-6136, gh-6908
assert_equal(repr(np.float64(0.1)), repr(0.1))
assert_(repr(np.float64(0.20000000000000004)) != repr(0.2))
| 18,694 | Python | 47.81201 | 80 | 0.583824 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_abc.py | from numpy.testing import assert_
import numbers
import numpy as np
from numpy.core.numerictypes import sctypes
class TestABC:
def test_abstract(self):
assert_(issubclass(np.number, numbers.Number))
assert_(issubclass(np.inexact, numbers.Complex))
assert_(issubclass(np.complexfloating, numbers.Complex))
assert_(issubclass(np.floating, numbers.Real))
assert_(issubclass(np.integer, numbers.Integral))
assert_(issubclass(np.signedinteger, numbers.Integral))
assert_(issubclass(np.unsignedinteger, numbers.Integral))
def test_floats(self):
for t in sctypes['float']:
assert_(isinstance(t(), numbers.Real),
"{0} is not instance of Real".format(t.__name__))
assert_(issubclass(t, numbers.Real),
"{0} is not subclass of Real".format(t.__name__))
assert_(not isinstance(t(), numbers.Rational),
"{0} is instance of Rational".format(t.__name__))
assert_(not issubclass(t, numbers.Rational),
"{0} is subclass of Rational".format(t.__name__))
def test_complex(self):
for t in sctypes['complex']:
assert_(isinstance(t(), numbers.Complex),
"{0} is not instance of Complex".format(t.__name__))
assert_(issubclass(t, numbers.Complex),
"{0} is not subclass of Complex".format(t.__name__))
assert_(not isinstance(t(), numbers.Real),
"{0} is instance of Real".format(t.__name__))
assert_(not issubclass(t, numbers.Real),
"{0} is subclass of Real".format(t.__name__))
def test_int(self):
for t in sctypes['int']:
assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
def test_uint(self):
for t in sctypes['uint']:
assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
| 2,328 | Python | 41.345454 | 73 | 0.567869 |
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/numpy/core/tests/test_multiarray.py | import collections.abc
import tempfile
import sys
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
import weakref
import pytest
from contextlib import contextmanager
from numpy.compat import pickle
import pathlib
import builtins
from decimal import Decimal
import mmap
import numpy as np
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.core._rational_tests import rational
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, IS_PYSTON, HAS_REFCOUNT, assert_array_less,
runstring, temppath, suppress_warnings, break_cycles,
)
from numpy.testing._private.utils import requires_memory, _no_tracing
from numpy.core.tests._locales import CommaDecimalPointLocale
from numpy.lib.recfunctions import repack_fields
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""
Allocate a new ndarray with aligned memory.
The ndarray is guaranteed *not* aligned to twice the requested alignment.
Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + 2*align + 1, np.uint8)
ptr = buf.__array_interface__['data'][0]
offset = ptr % align
if offset != 0:
offset = align - offset
if (ptr % (2*align)) == 0:
offset += align
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags:
def setup_method(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_writeable_any_base(self):
# Ensure that any base being writeable is sufficient to change flag;
# this is especially interesting for arrays from an array interface.
arr = np.arange(10)
class subclass(np.ndarray):
pass
# Create subclass so base will not be collapsed, this is OK to change
view1 = arr.view(subclass)
view2 = view1[...]
arr.flags.writeable = False
view2.flags.writeable = False
view2.flags.writeable = True # Can be set to True again.
arr = np.arange(10)
class frominterface:
def __init__(self, arr):
self.arr = arr
self.__array_interface__ = arr.__array_interface__
view1 = np.asarray(frominterface)
view2 = view1[...]
view2.flags.writeable = False
view2.flags.writeable = True
view1.flags.writeable = False
view2.flags.writeable = False
with assert_raises(ValueError):
# Must assume not writeable, since only base is not:
view2.flags.writeable = True
def test_writeable_from_readonly(self):
# gh-9440 - make sure fromstring, from buffer on readonly buffers
# set writeable False
data = b'\x00' * 100
vals = np.frombuffer(data, 'B')
assert_raises(ValueError, vals.setflags, write=True)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_raises(ValueError, vals.setflags, write=True)
def test_writeable_from_buffer(self):
data = bytearray(b'\x00' * 100)
vals = np.frombuffer(data, 'B')
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
@pytest.mark.skipif(IS_PYPY, reason="PyPy always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
# See condition for using PyArray_SetBaseObject in
# array_setstate.
a = np.arange(1000)
for v in range(pickle.HIGHEST_PROTOCOL):
vals = pickle.loads(pickle.dumps(a, v))
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
def test_writeable_from_c_data(self):
# Test that the writeable flag can be changed for an array wrapping
# low level C-data, but not owning its data.
# Also see that this is deprecated to change from python.
from numpy.core._multiarray_tests import get_c_wrapping_array
arr_writeable = get_c_wrapping_array(True)
assert not arr_writeable.flags.owndata
assert arr_writeable.flags.writeable
view = arr_writeable[...]
# Toggling the writeable flag works on the view:
view.flags.writeable = False
assert not view.flags.writeable
view.flags.writeable = True
assert view.flags.writeable
# Flag can be unset on the arr_writeable:
arr_writeable.flags.writeable = False
arr_readonly = get_c_wrapping_array(False)
assert not arr_readonly.flags.owndata
assert not arr_readonly.flags.writeable
for arr in [arr_writeable, arr_readonly]:
view = arr[...]
view.flags.writeable = False # make sure it is readonly
arr.flags.writeable = False
assert not arr.flags.writeable
with assert_raises(ValueError):
view.flags.writeable = True
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
with assert_raises(DeprecationWarning):
arr.flags.writeable = True
with assert_warns(DeprecationWarning):
arr.flags.writeable = True
def test_warnonwrite(self):
a = np.arange(10)
a.flags._warn_on_write = True
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always')
a[1] = 10
a[2] = 10
# only warn once
assert_(len(w) == 1)
@pytest.mark.parametrize(["flag", "flag_value", "writeable"],
[("writeable", True, True),
# Delete _warn_on_write after deprecation and simplify
# the parameterization:
("_warn_on_write", True, False),
("writeable", False, False)])
def test_readonly_flag_protocols(self, flag, flag_value, writeable):
a = np.arange(10)
setattr(a.flags, flag, flag_value)
class MyArr():
__array_struct__ = a.__array_struct__
assert memoryview(a).readonly is not writeable
assert a.__array_interface__['data'][1] is not writeable
assert np.asarray(MyArr()).flags.writeable is writeable
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash:
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes:
def setup_method(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
# int_ doesn't inherit from Python int, because it's not fixed-width
assert_(not isinstance(numpy_int, int))
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
# test 0d
arr_0d = np.array(0)
arr_0d.strides = ()
assert_raises(TypeError, set_strides, arr_0d, None)
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction:
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2], dtype=object)
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_0d_array_shape(self):
assert np.ones(np.array(3)).shape == (3,)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
@pytest.mark.parametrize("func",
[np.array,
np.asarray,
np.asanyarray,
np.ascontiguousarray,
np.asfortranarray])
def test_bad_arguments_error(self, func):
with pytest.raises(TypeError):
func(3, dtype="bad dtype")
with pytest.raises(TypeError):
func() # missing arguments
with pytest.raises(TypeError):
func(1, 2, 3, 4, 5, 6, 7, 8) # too many arguments
@pytest.mark.parametrize("func",
[np.array,
np.asarray,
np.asanyarray,
np.ascontiguousarray,
np.asfortranarray])
def test_array_as_keyword(self, func):
# This should likely be made positional only, but do not change
# the name accidentally.
if func is np.array:
func(object=3)
else:
func(a=3)
class TestAssignment:
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence:
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
for dtype in (np.longdouble, np.longcomplex):
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
# construction
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
def test_cast_to_string(self):
# cast to str should do "str(scalar)", not "str(scalar.item())"
# Example: In python2, str(float) is truncated, so we want to avoid
# str(np.float64(...).item()) as this would incorrectly truncate.
a = np.zeros(1, dtype='S20')
a[:] = np.array(['1.12345678901234567890'], dtype='f8')
assert_equal(a[0], b"1.1234567890123457")
class TestDtypedescr:
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
assert_(np.dtype('<i4') != np.dtype('>i4'))
assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
def test_structured_non_void(self):
fields = [('a', '<i2'), ('b', '<i2')]
dt_int = np.dtype(('i4', fields))
assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
# gh-9821
arr_int = np.zeros(4, dt_int)
assert_equal(repr(arr_int),
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
class TestZeroRank:
def setup_method(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
assert_equal(a[...], 0)
assert_equal(b[...], 'x')
assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
assert_equal(a[()], 0)
assert_equal(b[()], 'x')
assert_(type(a[()]) is a.dtype.type)
assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[0], b)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
assert_equal(a, 42)
b[...] = ''
assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
assert_equal(a, 42)
b[()] = ''
assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
assert_raises(IndexError, assign, a, 0, 42)
assert_raises(IndexError, assign, b, 0, '')
assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
assert_equal(x[()], 6)
# strides and shape must be the same length
with pytest.raises(ValueError):
np.ndarray((2,), strides=())
with pytest.raises(ValueError):
np.ndarray((), strides=(2,))
def test_output(self):
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
def test_real_imag(self):
# contiguity checks are for gh-11245
x = np.array(1j)
xr = x.real
xi = x.imag
assert_equal(xr, np.array(0))
assert_(type(xr) is np.ndarray)
assert_equal(xr.flags.contiguous, True)
assert_equal(xr.flags.f_contiguous, True)
assert_equal(xi, np.array(1))
assert_(type(xi) is np.ndarray)
assert_equal(xi.flags.contiguous, True)
assert_equal(xi.flags.f_contiguous, True)
class TestScalarIndexing:
def setup_method(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
assert_equal(a[...], 0)
assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
assert_equal(a[()], 0)
assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation:
"""
Test the np.array constructor
"""
def test_from_attribute(self):
class x:
def __array__(self, dtype=None):
pass
assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert arr.dtype == 'V8' # current default
# Same length scalars (those that go to the same void) work:
arr = np.array([b"1234", b"1234"], dtype="V")
assert arr.dtype == "V4"
# Promoting different lengths will fail (pre 1.20 this worked)
# by going via S5 and casting to V5.
with pytest.raises(TypeError):
np.array([b"1234", b"12345"], dtype="V")
with pytest.raises(TypeError):
np.array([b"12345", b"1234"], dtype="V")
# Check the same for the casting path:
arr = np.array([b"1234", b"1234"], dtype="O").astype("V")
assert arr.dtype == "V4"
with pytest.raises(TypeError):
np.array([b"1234", b"12345"], dtype="O").astype("V")
@pytest.mark.parametrize("idx",
[pytest.param(Ellipsis, id="arr"), pytest.param((), id="scalar")])
def test_structured_void_promotion(self, idx):
arr = np.array(
[np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i')[idx]],
dtype="V")
assert_array_equal(arr, np.array([(1, 1), (2, 2)], dtype="i,i"))
# The following fails to promote the two dtypes, resulting in an error
with pytest.raises(TypeError):
np.array(
[np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i,i')[idx]],
dtype="V")
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
@pytest.mark.skipif(np.dtype(np.intp).itemsize != 8,
reason="malloc may not fail on 32 bit systems")
def test_malloc_fails(self):
# This test is guaranteed to fail due to a too large allocation
with assert_raises(np.core._exceptions._ArrayMemoryError):
np.empty(np.iinfo(np.intp).max, dtype=np.uint8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogeneous(self):
assert_equal(np.array([4, 2**80]).dtype, object)
assert_equal(np.array([4, 2**80, 4]).dtype, object)
assert_equal(np.array([2**80, 4]).dtype, object)
assert_equal(np.array([2**80] * 3).dtype, object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail:
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map:
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
a = np.array(C()) # segfault?
assert_equal(len(a), 0)
def test_false_len_iterable(self):
# Special case where a bad __getitem__ makes us fall back on __iter__:
class C:
def __getitem__(self, x):
raise Exception
def __iter__(self):
return iter(())
def __len__(self):
return 2
a = np.empty(2)
with assert_raises(ValueError):
a[:] = C() # Segfault!
np.array(C()) == list(C())
def test_failed_len_sequence(self):
# gh-7393
class A:
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
def _ragged_creation(self, seq):
# without dtype=object, the ragged object should raise
with assert_warns(np.VisibleDeprecationWarning):
a = np.array(seq)
b = np.array(seq, dtype=object)
assert_equal(a, b)
return b
def test_ragged_ndim_object(self):
# Lists of mismatching depths are treated as object arrays
a = self._ragged_creation([[1], 2, 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([1, [2], 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([1, 2, [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
def test_ragged_shape_object(self):
# The ragged dimension of a list is turned into an object array
a = self._ragged_creation([[1, 1], [2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([[1], [2, 2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = self._ragged_creation([[1], [2], [3, 3]])
assert a.shape == (3,)
assert a.dtype == object
def test_array_of_ragged_array(self):
outer = np.array([None, None])
outer[0] = outer[1] = np.array([1, 2, 3])
assert np.array(outer).shape == (2,)
assert np.array([outer]).shape == (1, 2)
outer_ragged = np.array([None, None])
outer_ragged[0] = np.array([1, 2, 3])
outer_ragged[1] = np.array([1, 2, 3, 4])
# should both of these emit deprecation warnings?
assert np.array(outer_ragged).shape == (2,)
assert np.array([outer_ragged]).shape == (1, 2,)
def test_deep_nonragged_object(self):
# None of these should raise, even though they are missing dtype=object
a = np.array([[[Decimal(1)]]])
a = np.array([1, Decimal(1)])
a = np.array([[1], [Decimal(1)]])
class TestStructured:
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works, including cases that
# require promotion to work:
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# The main importance is that it does not return True:
with pytest.raises(TypeError):
x == y
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# The main importance is that it does not return True:
with pytest.raises(TypeError):
x == y
def test_structured_comparisons_with_promotion(self):
# Check that structured arrays can be compared so long as their
# dtypes promote fine:
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>f8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>i8')])
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Including with embedded subarray dtype (although subarray comparison
# itself may still be a bit weird and compare the raw data)
a = np.array([(5, 42), (10, 1)], dtype=[('a', '10>f8'), ('b', '5<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '10<i8'), ('b', '5>i8')])
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
def test_void_comparison_failures(self):
# In principle, one could decide to return an array of False for some
# if comparisons are impossible. But right now we return TypeError
# when "void" dtype are involved.
x = np.zeros(3, dtype=[('a', 'i1')])
y = np.zeros(3)
# Cannot compare non-structured to structured:
with pytest.raises(TypeError):
x == y
# Added title prevents promotion, but casts are OK:
y = np.zeros(3, dtype=[(('title', 'a'), 'i1')])
assert np.can_cast(y.dtype, x.dtype)
with pytest.raises(TypeError):
x == y
x = np.zeros(3, dtype="V7")
y = np.zeros(3, dtype="V8")
with pytest.raises(TypeError):
x == y
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
# check for no uninitialized memory due to viewing S0 array
assert_equal(xx[:].dtype, xx.dtype)
assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
def test_assignment(self):
def testassign(arr, v):
c = arr.copy()
c[0] = v # assign using setitem
c[1:] = v # assign using "dtype_transfer" code paths
return c
dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
arr = np.ones(2, dt)
v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
v4 = np.array([(2,)], dtype=[('bar', 'i8')])
v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
ans = np.array([(2,3),(2,3)], dtype=dt)
assert_equal(testassign(arr, v1), ans)
assert_equal(testassign(arr, v2), ans)
assert_equal(testassign(arr, v3), ans)
assert_raises(TypeError, lambda: testassign(arr, v4))
assert_equal(testassign(arr, v5), ans)
w[:] = 4
assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
# test field-reordering, assignment by position, and self-assignment
a = np.array([(1,2,3)],
dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
a[['foo', 'bar']] = a[['bar', 'foo']]
assert_equal(a[0].item(), (2,1,3))
# test that this works even for 'simple_unaligned' structs
# (ie, that PyArray_EquivTypes cares about field order too)
a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
a[['a', 'b']] = a[['b', 'a']]
assert_equal(a[0].item(), (2,1))
def test_scalar_assignment(self):
with assert_raises(ValueError):
arr = np.arange(25).reshape(5, 5)
arr.itemset(3)
def test_structuredscalar_indexing(self):
# test gh-7262
x = np.empty(shape=1, dtype="(2)3S,(2)3U")
assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
assert_equal(x[0], x[0][()])
def test_multiindex_titles(self):
a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
assert_raises(KeyError, lambda : a[['a','c']])
assert_raises(KeyError, lambda : a[['a','a']])
assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated
a[['b','c']] # no exception
def test_structured_cast_promotion_fieldorder(self):
# gh-15494
# dtypes with different field names are not promotable
A = ("a", "<i8")
B = ("b", ">i8")
ab = np.array([(1, 2)], dtype=[A, B])
ba = np.array([(1, 2)], dtype=[B, A])
assert_raises(TypeError, np.concatenate, ab, ba)
assert_raises(TypeError, np.result_type, ab.dtype, ba.dtype)
assert_raises(TypeError, np.promote_types, ab.dtype, ba.dtype)
# dtypes with same field names/order but different memory offsets
# and byte-order are promotable to packed nbo.
assert_equal(np.promote_types(ab.dtype, ba[['a', 'b']].dtype),
repack_fields(ab.dtype.newbyteorder('N')))
# gh-13667
# dtypes with different fieldnames but castable field types are castable
assert_equal(np.can_cast(ab.dtype, ba.dtype), True)
assert_equal(ab.astype(ba.dtype).dtype, ba.dtype)
assert_equal(np.can_cast('f8,i8', [('f0', 'f8'), ('f1', 'i8')]), True)
assert_equal(np.can_cast('f8,i8', [('f1', 'f8'), ('f0', 'i8')]), True)
assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')]), False)
assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')],
casting='unsafe'), True)
ab[:] = ba # make sure assignment still works
# tests of type-promotion of corresponding fields
dt1 = np.dtype([("", "i4")])
dt2 = np.dtype([("", "i8")])
assert_equal(np.promote_types(dt1, dt2), np.dtype([('f0', 'i8')]))
assert_equal(np.promote_types(dt2, dt1), np.dtype([('f0', 'i8')]))
assert_raises(TypeError, np.promote_types, dt1, np.dtype([("", "V3")]))
assert_equal(np.promote_types('i4,f8', 'i8,f4'),
np.dtype([('f0', 'i8'), ('f1', 'f8')]))
# test nested case
dt1nest = np.dtype([("", dt1)])
dt2nest = np.dtype([("", dt2)])
assert_equal(np.promote_types(dt1nest, dt2nest),
np.dtype([('f0', np.dtype([('f0', 'i8')]))]))
# note that offsets are lost when promoting:
dt = np.dtype({'names': ['x'], 'formats': ['i4'], 'offsets': [8]})
a = np.ones(3, dtype=dt)
assert_equal(np.concatenate([a, a]).dtype, np.dtype([('x', 'i4')]))
@pytest.mark.parametrize("dtype_dict", [
dict(names=["a", "b"], formats=["i4", "f"], itemsize=100),
dict(names=["a", "b"], formats=["i4", "f"],
offsets=[0, 12])])
@pytest.mark.parametrize("align", [True, False])
def test_structured_promotion_packs(self, dtype_dict, align):
# Structured dtypes are packed when promoted (we consider the packed
# form to be "canonical"), so tere is no extra padding.
dtype = np.dtype(dtype_dict, align=align)
# Remove non "canonical" dtype options:
dtype_dict.pop("itemsize", None)
dtype_dict.pop("offsets", None)
expected = np.dtype(dtype_dict, align=align)
res = np.promote_types(dtype, dtype)
assert res.itemsize == expected.itemsize
assert res.fields == expected.fields
# But the "expected" one, should just be returned unchanged:
res = np.promote_types(expected, expected)
assert res is expected
def test_structured_asarray_is_view(self):
# A scalar viewing an array preserves its view even when creating a
# new array. This test documents behaviour, it may not be the best
# desired behaviour.
arr = np.array([1], dtype="i,i")
scalar = arr[0]
assert not scalar.flags.owndata # view into the array
assert np.asarray(scalar).base is scalar
# But never when a dtype is passed in:
assert np.asarray(scalar, dtype=scalar.dtype).base is None
# A scalar which owns its data does not have this property.
# It is not easy to create one, one method is to use pickle:
scalar = pickle.loads(pickle.dumps(scalar))
assert scalar.flags.owndata
assert np.asarray(scalar).base is None
class TestBool:
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
assert_(a1 is b1)
assert_(np.array([True])[0] is a1)
assert_(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=bool)
c = builtins.sum(l)
assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
assert_equal(np.count_nonzero(a), c)
av *= 4
assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@pytest.mark.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
def _test_cast_from_flexible(self, dtype):
# empty string -> false
for n in range(3):
v = np.array(b'', (dtype, n))
assert_equal(bool(v), False)
assert_equal(bool(v[()]), False)
assert_equal(v.astype(bool), False)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.False_)
# anything else -> true
for n in range(1, 4):
for val in [b'a', b'0', b' ']:
v = np.array(val, (dtype, n))
assert_equal(bool(v), True)
assert_equal(bool(v[()]), True)
assert_equal(v.astype(bool), True)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.True_)
def test_cast_from_void(self):
self._test_cast_from_flexible(np.void)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_unicode(self):
self._test_cast_from_flexible(np.unicode_)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_bytes(self):
self._test_cast_from_flexible(np.bytes_)
class TestZeroSizeFlexible:
@staticmethod
def _zeros(shape, dtype=str):
dtype = np.dtype(dtype)
if dtype == np.void:
return np.zeros(shape, dtype=(dtype, 0))
# not constructable directly
dtype = np.dtype([('x', dtype, 0)])
return np.zeros(shape, dtype=dtype)['x']
def test_create(self):
zs = self._zeros(10, bytes)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, np.void)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, str)
assert_equal(zs.itemsize, 0)
def _test_sort_partition(self, name, kinds, **kwargs):
# Previously, these would all hang
for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
sort_method = getattr(zs, name)
sort_func = getattr(np, name)
for kind in kinds:
sort_method(kind=kind, **kwargs)
sort_func(zs, kind=kind, **kwargs)
def test_sort(self):
self._test_sort_partition('sort', kinds='qhs')
def test_argsort(self):
self._test_sort_partition('argsort', kinds='qhs')
def test_partition(self):
self._test_sort_partition('partition', kinds=['introselect'], kth=2)
def test_argpartition(self):
self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
def test_resize(self):
# previously an error
for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
zs.resize(25)
zs.resize((10, 10))
def test_view(self):
for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
# viewing as itself should be allowed
assert_equal(zs.view(dt).dtype, np.dtype(dt))
# viewing as any non-empty type gives an empty result
assert_equal(zs.view((dt, 1)).shape, (0,))
def test_dumps(self):
zs = self._zeros(10, int)
assert_equal(zs, pickle.loads(zs.dumps()))
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dt in [bytes, np.void, str]:
zs = self._zeros(10, dt)
p = pickle.dumps(zs, protocol=proto)
zs2 = pickle.loads(p)
assert_equal(zs.dtype, zs2.dtype)
def test_pickle_empty(self):
"""Checking if an empty array pickled and un-pickled will not cause a
segmentation fault"""
arr = np.array([]).reshape(999999, 0)
pk_dmp = pickle.dumps(arr)
pk_load = pickle.loads(pk_dmp)
assert pk_load.size == 0
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_pickle_with_buffercallback(self):
array = np.arange(10)
buffers = []
bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
protocol=5)
array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
# when using pickle protocol 5 with buffer callbacks,
# array_from_buffer is reconstructed from a buffer holding a view
# to the initial array's data, so modifying an element in array
# should modify it in array_from_buffer too.
array[0] = -1
assert array_from_buffer[0] == -1, array_from_buffer[0]
class TestMethods:
sort_kinds = ['quicksort', 'heapsort', 'stable']
def test_all_where(self):
a = np.array([[True, False, True],
[False, False, False],
[True, True, True]])
wh_full = np.array([[True, False, True],
[False, False, False],
[True, False, True]])
wh_lower = np.array([[False],
[False],
[True]])
for _ax in [0, None]:
assert_equal(a.all(axis=_ax, where=wh_lower),
np.all(a[wh_lower[:,0],:], axis=_ax))
assert_equal(np.all(a, axis=_ax, where=wh_lower),
a[wh_lower[:,0],:].all(axis=_ax))
assert_equal(a.all(where=wh_full), True)
assert_equal(np.all(a, where=wh_full), True)
assert_equal(a.all(where=False), True)
assert_equal(np.all(a, where=False), True)
def test_any_where(self):
a = np.array([[True, False, True],
[False, False, False],
[True, True, True]])
wh_full = np.array([[False, True, False],
[True, True, True],
[False, False, False]])
wh_middle = np.array([[False],
[True],
[False]])
for _ax in [0, None]:
assert_equal(a.any(axis=_ax, where=wh_middle),
np.any(a[wh_middle[:,0],:], axis=_ax))
assert_equal(np.any(a, axis=_ax, where=wh_middle),
a[wh_middle[:,0],:].any(axis=_ax))
assert_equal(a.any(where=wh_full), False)
assert_equal(np.any(a, where=wh_full), False)
assert_equal(a.any(where=False), False)
assert_equal(np.any(a, where=False), False)
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
oned = np.ones(1)
# gh-12031, caused SEGFAULT
assert_raises(TypeError, oned.choose,np.void(0), [oned])
out = np.array(0)
ret = np.choose(np.array(1), [10, 20, 30], out=out)
assert out is ret
assert_equal(out[()], 20)
# gh-6272 check overlap on out
x = np.arange(5)
y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap')
assert_equal(y, np.array([0, 1, 2]))
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, a.prod)
assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert out is res
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
assert_raises(ValueError, lambda: a.transpose(0))
assert_raises(ValueError, lambda: a.transpose(0, 0))
assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
@pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
np.longdouble])
def test_sort_unsigned(self, dtype):
a = np.arange(101, dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
@pytest.mark.parametrize('dtype',
[np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64, np.longdouble])
def test_sort_signed(self, dtype):
a = np.arange(-50, 51, dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "scalar sort, kind=%s" % (kind)
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
@pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble])
@pytest.mark.parametrize('part', ['real', 'imag'])
def test_sort_complex(self, part, dtype):
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
cdtype = {
np.single: np.csingle,
np.double: np.cdouble,
np.longdouble: np.clongdouble,
}[dtype]
a = np.arange(-50, 51, dtype=dtype)
b = a[::-1].copy()
ai = (a * (1+1j)).astype(cdtype)
bi = (b * (1+1j)).astype(cdtype)
setattr(ai, part, 1)
setattr(bi, part, 1)
for kind in self.sort_kinds:
msg = "complex sort, %s part == 1, kind=%s" % (part, kind)
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
def test_sort_complex_byte_swapping(self):
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
@pytest.mark.parametrize('dtype', [np.bytes_, np.unicode_])
def test_sort_string(self, dtype):
# np.array will perform the encoding to bytes for us in the bytes test
a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
def test_sort_object(self):
# test object array sorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
def test_sort_structured(self):
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
@pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]'])
def test_sort_time(self, dtype):
# test datetime64 and timedelta64 sorts.
a = np.arange(0, 101, dtype=dtype)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
def test_sort_axis(self):
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
def test_sort_size_0(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_sort_bad_ordering(self):
# test generic class with bogus ordering,
# should not segfault.
class Boom:
def __lt__(self, other):
return True
a = np.array([Boom()] * 100, dtype=object)
for kind in self.sort_kinds:
msg = "kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
def test_void_sort(self):
# gh-8210 - previously segfaulted
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view('V4')
arr[::-1].sort()
dt = np.dtype([('val', 'i4', (1,))])
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view(dt)
arr[::-1].sort()
def test_sort_raises(self):
#gh-9404
arr = np.array([0, datetime.now(), 1], dtype=object)
for kind in self.sort_kinds:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
class Raiser:
def raises_anything(*args, **kwargs):
raise TypeError("SOMETHING ERRORED")
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
np.random.shuffle(arr)
for kind in self.sort_kinds:
assert_raises(TypeError, arr.sort, kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
@pytest.mark.parametrize("dtype", ['O', np.int32, 'i,O'])
def test__deepcopy__(self, dtype):
# Force the entry of NULLs into array
a = np.empty(4, dtype=dtype)
ctypes.memset(a.ctypes.data, 0, a.nbytes)
# Ensure no error is raised, see gh-21833
b = a.__deepcopy__({})
a[0] = 42
with pytest.raises(AssertionError):
assert_array_equal(a, b)
def test__deepcopy__catches_failure(self):
class MyObj:
def __deepcopy__(self, *args, **kwargs):
raise RuntimeError
arr = np.array([1, MyObj(), 3], dtype='O')
with pytest.raises(RuntimeError):
arr.__deepcopy__({})
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
assert_raises_regex(ValueError, 'duplicate',
lambda: r.sort(order=['id', 'id']))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', 'U5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
for dtype in [np.int32, np.uint32, np.float32]:
a = np.arange(101, dtype=dtype)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "scalar argsort, kind=%s, dtype=%s" % (kind, dtype)
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in self.sort_kinds:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in self.sort_kinds:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in self.sort_kinds:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in self.sort_kinds:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in self.sort_kinds:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in self.sort_kinds:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode_)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
@pytest.mark.parametrize('a', [
np.array([0, 1, np.nan], dtype=np.float16),
np.array([0, 1, np.nan], dtype=np.float32),
np.array([0, 1, np.nan]),
])
def test_searchsorted_floats(self, a):
# test for floats arrays containing nans. Explicitly test
# half, single, and double precision floats to verify that
# the NaN-handling is correct.
msg = "Test real (%s) searchsorted with nans, side='l'" % a.dtype
b = a.searchsorted(a, side='left')
assert_equal(b, np.arange(3), msg)
msg = "Test real (%s) searchsorted with nans, side='r'" % a.dtype
b = a.searchsorted(a, side='right')
assert_equal(b, np.arange(1, 4), msg)
# check keyword arguments
a.searchsorted(v=1)
x = np.array([0, 1, np.nan], dtype='float32')
y = np.searchsorted(x, x[-1])
assert_equal(y, 2)
def test_searchsorted_complex(self):
# test for complex arrays containing nans.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='left')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='right')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
def test_searchsorted_n_elements(self):
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'left')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'right')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'left')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'right')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'left')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'right')
assert_equal(b, [0, 2, 2])
def test_searchsorted_unaligned_array(self):
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'left')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'right')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'left')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'right')
assert_equal(b, a + 1)
def test_searchsorted_resetting(self):
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'left')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'right')
assert_equal(b, [5, 5, 5])
def test_searchsorted_type_specific(self):
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'left')
assert_equal(b, out)
b = a.searchsorted(a, 'right')
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'left')
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'left')
assert_array_equal(b, np.zeros(0, dtype=np.intp))
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode_)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_invalid_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0,
sorter=np.array((1, (2, 3)), dtype=object))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
def test_searchsorted_with_sorter(self):
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='left', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='right', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'left', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'right', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'left', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'right', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'left', s)
assert_equal(b, out)
b = a.searchsorted(a, 'right', s)
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'left', s[:0])
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'left', s)
assert_array_equal(b, np.zeros(0, dtype=np.intp))
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'left', s)
assert_equal(b, out)
b = a.searchsorted(a, 'right', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'left'), A))
assert_(not isinstance(a.searchsorted(b, 'right'), A))
assert_(not isinstance(a.searchsorted(b, 'left', s), A))
assert_(not isinstance(a.searchsorted(b, 'right', s), A))
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_argpartition_out_of_range(self, dtype):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10).astype(dtype=dtype)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_partition_out_of_range(self, dtype):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10).astype(dtype=dtype)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
assert_raises(TypeError, d.argpartition, 9.)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.)
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(TypeError, d.partition, 9.)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
@pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
def test_partition_empty_array(self, kth_dtype):
# check axis handling for multidimensional empty arrays
kth = np.array(0, dtype=kth_dtype)[()]
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, kth, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, kth, axis=None), a.ravel(), msg)
@pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
def test_argpartition_empty_array(self, kth_dtype):
# check axis handling for multidimensional empty arrays
kth = np.array(0, dtype=kth_dtype)[()]
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, kth, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, kth, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
assert_equal(np.partition(d, 5, kind=k)[5], 5)
assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
assert_equal(np.partition(d, 6, kind=k)[6], 6)
assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(np.AxisError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(np.AxisError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(np.AxisError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
@pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
def test_argpartition_gh5524(self, kth_dtype):
# A test for functionality of argpartition on lists.
kth = np.array(1, dtype=kth_dtype)[()]
d = [6, 7, 3, 2, 9, 0]
p = np.argpartition(d, kth)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
@pytest.mark.parametrize('func', (np.dot, np.matmul))
def test_arr_mult(self, func):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(func(eaf, eaf), eaf)
assert_equal(func(eaf.T, eaf), eaf)
assert_equal(func(eaf, eaf.T), eaf)
assert_equal(func(eaf.T, eaf.T), eaf)
assert_equal(func(eaf.T.copy(), eaf), eaf)
assert_equal(func(eaf, eaf.T.copy()), eaf)
assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(func(ebf, ebf), eaf)
assert_equal(func(ebf.T, ebf), eaf)
assert_equal(func(ebf, ebf.T), eaf)
assert_equal(func(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
func(edf[::-1, :], edf.T),
func(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
func(edf[:, ::-1], edf.T),
func(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
func(edf, edf[::-1, :].T),
func(edf, edf[::-1, :].T.copy())
)
assert_equal(
func(edf, edf[:, ::-1].T),
func(edf, edf[:, ::-1].T.copy())
)
assert_equal(
func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(func(edf, edf.T), eddtf)
assert_equal(func(edf.T, edf), edtdf)
@pytest.mark.parametrize('func', (np.dot, np.matmul))
@pytest.mark.parametrize('dtype', 'ifdFD')
def test_no_dgemv(self, func, dtype):
# check vector arg for contiguous before gemv
# gh-12156
a = np.arange(8.0, dtype=dtype).reshape(2, 4)
b = np.broadcast_to(1., (4, 1))
ret1 = func(a, b)
ret2 = func(a, b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T)
assert_equal(ret1, ret2)
# check for unaligned data
dt = np.dtype(dtype)
a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)
a = a.reshape(2, 4)
b = a[0]
# make sure it is not aligned
assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)
ret1 = func(a, b)
ret2 = func(a.copy(), b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T.copy())
assert_equal(ret1, ret2)
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_dot_out_mem_overlap(self):
np.random.seed(1)
# Test BLAS and non-BLAS code paths, including all dtypes
# that dot() supports
dtypes = [np.dtype(code) for code in np.typecodes['All']
if code not in 'USVM']
for dtype in dtypes:
a = np.random.rand(3, 3).astype(dtype)
# Valid dot() output arrays must be aligned
b = _aligned_zeros((3, 3), dtype=dtype)
b[...] = np.random.rand(3, 3)
y = np.dot(a, b)
x = np.dot(a, b, out=b)
assert_equal(x, y, err_msg=repr(dtype))
# Check invalid output array
assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T)
def test_dot_matmul_out(self):
# gh-9641
class Sub(np.ndarray):
pass
a = np.ones((2, 2)).view(Sub)
b = np.ones((2, 2)).view(Sub)
out = np.ones((2, 2))
# make sure out can be any ndarray (not only subclass of inputs)
np.dot(a, b, out=out)
np.matmul(a, b, out=out)
def test_dot_matmul_inner_array_casting_fails(self):
class A:
def __array__(self, *args, **kwargs):
raise NotImplementedError
# Don't override the error from calling __array__()
assert_raises(NotImplementedError, np.dot, A(), A())
assert_raises(NotImplementedError, np.matmul, A(), A())
assert_raises(NotImplementedError, np.inner, A(), A())
def test_matmul_out(self):
# overlapping memory
a = np.arange(18).reshape(2, 3, 3)
b = np.matmul(a, a)
c = np.matmul(a, a, out=a)
assert_(c is a)
assert_equal(c, b)
a = np.arange(18).reshape(2, 3, 3)
c = np.matmul(a, a, out=a[::-1, ...])
assert_(c.base is a.base)
assert_equal(c, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5)
assert_raises(ValueError, a.diagonal, axis1=1, axis2=1)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_size_zero_memleak(self):
# Regression test for issue 9615
# Exercises a special-case code path for dot products of length
# zero in cblasfuncs (making it is specific to floating dtypes).
a = np.array([], dtype=np.float64)
x = np.array(2.0)
for _ in range(100):
np.dot(a, a, out=x)
if HAS_REFCOUNT:
assert_(sys.getrefcount(x) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
out = np.array(1)
ret = a.trace(out=out)
assert ret is out
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert_(isinstance(t, MyArray))
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test:
a = np.array([[1]])
a.strides = (123, 432)
# If the following stride is not 8, NPY_RELAXED_STRIDES_DEBUG is
# messing them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(np.AxisError, a.swapaxes, -5, 0)
assert_raises(np.AxisError, a.swapaxes, 4, 0)
assert_raises(np.AxisError, a.swapaxes, 0, -5)
assert_raises(np.AxisError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(TypeError, lambda: a.conj())
assert_raises(TypeError, lambda: a.conjugate())
def test_conjugate_out(self):
# Minimal test for the out argument being passed on correctly
# NOTE: The ability to pass `out` is currently undocumented!
a = np.array([1-1j, 1+1j, 23+23.0j])
out = np.empty_like(a)
res = a.conjugate(out)
assert res is out
assert_array_equal(out, a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestCequenceMethods:
def test_array_contains(self):
assert_(4.0 in np.arange(16.).reshape(4,4))
assert_(20.0 not in np.arange(16.).reshape(4,4))
class TestBinop:
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
# - defer if other has __array_ufunc__ and it is None
# or other is not a subclass and has higher array priority
# - else, call ufunc
def test_ufunc_binop_interaction(self):
# Python method name (without underscores)
# -> (numpy ufunc, has_in_place_version, preferred_dtype)
ops = {
'add': (np.add, True, float),
'sub': (np.subtract, True, float),
'mul': (np.multiply, True, float),
'truediv': (np.true_divide, True, float),
'floordiv': (np.floor_divide, True, float),
'mod': (np.remainder, True, float),
'divmod': (np.divmod, False, float),
'pow': (np.power, True, int),
'lshift': (np.left_shift, True, int),
'rshift': (np.right_shift, True, int),
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
'matmul': (np.matmul, False, float),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
# 'lt': (np.greater, False),
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
class Coerced(Exception):
pass
def array_impl(self):
raise Coerced
def op_impl(self, other):
return "forward"
def rop_impl(self, other):
return "reverse"
def iop_impl(self, other):
return "in-place"
def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
return ("__array_ufunc__", ufunc, method, args, kwargs)
# Create an object with the given base, in the given module, with a
# bunch of placeholder __op__ methods, and optionally a
# __array_ufunc__ and __array_priority__.
def make_obj(base, array_priority=False, array_ufunc=False,
alleged_module="__main__"):
class_namespace = {"__array__": array_impl}
if array_priority is not False:
class_namespace["__array_priority__"] = array_priority
for op in ops:
class_namespace["__{0}__".format(op)] = op_impl
class_namespace["__r{0}__".format(op)] = rop_impl
class_namespace["__i{0}__".format(op)] = iop_impl
if array_ufunc is not False:
class_namespace["__array_ufunc__"] = array_ufunc
eval_namespace = {"base": base,
"class_namespace": class_namespace,
"__name__": alleged_module,
}
MyType = eval("type('MyType', (base,), class_namespace)",
eval_namespace)
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
return np.arange(3, 7).reshape(2, 2).view(MyType)
else:
return MyType()
def check(obj, binop_override_expected, ufunc_override_expected,
inplace_override_expected, check_scalar=True):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
arr_method = getattr(arr, "__{0}__".format(op))
def first_out_arg(result):
if op == "divmod":
assert_(isinstance(result, tuple))
return result[0]
else:
return result
# arr __op__ obj
if binop_override_expected:
assert_equal(arr_method(obj), NotImplemented, err_msg)
elif ufunc_override_expected:
assert_equal(arr_method(obj)[0], "__array_ufunc__",
err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_method(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_method, obj, err_msg=err_msg)
# obj __op__ arr
arr_rmethod = getattr(arr, "__r{0}__".format(op))
if ufunc_override_expected:
res = arr_rmethod(obj)
assert_equal(res[0], "__array_ufunc__",
err_msg=err_msg)
assert_equal(res[1], ufunc, err_msg=err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_rmethod(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
# __array_ufunc__ = "asdf" creates a TypeError
assert_raises((TypeError, Coerced),
arr_rmethod, obj, err_msg=err_msg)
# arr __iop__ obj
# array scalars don't have in-place operators
if has_inplace and isinstance(arr, np.ndarray):
arr_imethod = getattr(arr, "__i{0}__".format(op))
if inplace_override_expected:
assert_equal(arr_method(obj), NotImplemented,
err_msg=err_msg)
elif ufunc_override_expected:
res = arr_imethod(obj)
assert_equal(res[0], "__array_ufunc__", err_msg)
assert_equal(res[1], ufunc, err_msg)
assert_(type(res[-1]["out"]) is tuple, err_msg)
assert_(res[-1]["out"][0] is arr, err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
assert_(arr_imethod(obj) is arr, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_imethod, obj,
err_msg=err_msg)
op_fn = getattr(operator, op, None)
if op_fn is None:
op_fn = getattr(operator, op + "_", None)
if op_fn is None:
op_fn = getattr(builtins, op)
assert_equal(op_fn(obj, arr), "forward", err_msg)
if not isinstance(obj, np.ndarray):
if binop_override_expected:
assert_equal(op_fn(arr, obj), "reverse", err_msg)
elif ufunc_override_expected:
assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
err_msg)
if ufunc_override_expected:
assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
err_msg)
# No array priority, no array_ufunc -> nothing called
check(make_obj(object), False, False, False)
# Negative array priority, no array_ufunc -> nothing called
# (has to be very negative, because scalar priority is -1000000.0)
check(make_obj(object, array_priority=-2**30), False, False, False)
# Positive array priority, no array_ufunc -> binops and iops only
check(make_obj(object, array_priority=1), True, False, True)
# ndarray ignores array_priority for ndarray subclasses
check(make_obj(np.ndarray, array_priority=1), False, False, False,
check_scalar=False)
# Positive array_priority and array_ufunc -> array_ufunc only
check(make_obj(object, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
check(make_obj(np.ndarray, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
# array_ufunc set to None -> defer binops only
check(make_obj(object, array_ufunc=None), True, False, False)
check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
check_scalar=False)
@pytest.mark.parametrize("priority", [None, "runtime error"])
def test_ufunc_binop_bad_array_priority(self, priority):
# Mainly checks that this does not crash. The second array has a lower
# priority than -1 ("error value"). If the __radd__ actually exists,
# bad things can happen (I think via the scalar paths).
# In principle both of these can probably just be errors in the future.
class BadPriority:
@property
def __array_priority__(self):
if priority == "runtime error":
raise RuntimeError("RuntimeError in __array_priority__!")
return priority
def __radd__(self, other):
return "result"
class LowPriority(np.ndarray):
__array_priority__ = -1000
# Priority failure uses the same as scalars (smaller -1000). So the
# LowPriority wins with 'result' for each element (inner operation).
res = np.arange(3).view(LowPriority) + BadPriority()
assert res.shape == (3,)
assert res[0] == 'result'
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_array_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
class CheckIndex:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
return i
# calls below mean we must be in an output.
for j, a in enumerate(kw['out']):
if a is self:
return (j,)
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), (0,))
assert_equal(np.sin(dummy, out=a), (0,))
assert_equal(np.sin(dummy, out=(a,)), (0,))
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), (0,))
assert_equal(np.modf(dummy, None, a), (1,))
assert_equal(np.modf(dummy, dummy, a), (1,))
assert_equal(np.modf(dummy, out=(a, None)), (0,))
assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs
np.modf(dummy, out=a)
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), (0,))
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), (0,))
assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][0][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
def test_pow_override_with_errors(self):
# regression test for gh-9112
class PowerOnly(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if ufunc is not np.power:
raise NotImplementedError
return "POWER!"
# explicit cast to float, to ensure the fast power path is taken.
a = np.array(5., dtype=np.float64).view(PowerOnly)
assert_equal(a ** 2.5, "POWER!")
with assert_raises(NotImplementedError):
a ** 0.5
with assert_raises(NotImplementedError):
a ** 0
with assert_raises(NotImplementedError):
a ** 1
with assert_raises(NotImplementedError):
a ** -1
with assert_raises(NotImplementedError):
a ** 2
def test_pow_array_object_dtype(self):
# test pow on arrays of object dtype
class SomeClass:
def __init__(self, num=None):
self.num = num
# want to ensure a fast pow path is not taken
def __mul__(self, other):
raise AssertionError('__mul__ should not be called')
def __div__(self, other):
raise AssertionError('__div__ should not be called')
def __pow__(self, exp):
return SomeClass(num=self.num ** exp)
def __eq__(self, other):
if isinstance(other, SomeClass):
return self.num == other.num
__rpow__ = __pow__
def pow_for(exp, arr):
return np.array([x ** exp for x in arr])
obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
def test_pos_array_ufunc_override(self):
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*[i.view(np.ndarray) for
i in inputs], **kwargs)
tst = np.array('foo').view(A)
with assert_raises(TypeError):
+tst
class TestTemporaryElide:
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide
d = np.ones(100000)
orig, res = incref_elide(d)
d + d
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwritten
l = [1, 1, 1, 1, np.ones(100000)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(100000))
assert_array_equal(res, l[4] + l[4])
def test_temporary_with_cast(self):
# check that we don't elide into a temporary which would need casting
d = np.ones(200000, dtype=np.int64)
assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
r = ((d + d) / 2)
assert_equal(r.dtype, np.dtype('f8'))
r = np.true_divide((d + d), 2)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) / 2.)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) // 2)
assert_equal(r.dtype, np.dtype(np.int64))
# commutative elision into the astype result
f = np.ones(100000, dtype=np.float32)
assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
# no elision into lower type
d = f.astype(np.float64)
assert_equal(((f + f) + d).dtype, d.dtype)
l = np.ones(100000, dtype=np.longdouble)
assert_equal(((d + d) + l).dtype, l.dtype)
# test unary abs with different output dtype
for dt in (np.complex64, np.complex128, np.clongdouble):
c = np.ones(100000, dtype=dt)
r = abs(c * 2.0)
assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
def test_elide_broadcast(self):
# test no elision on broadcast to higher dimension
# only triggers elision code path in debug mode as triggering it in
# normal mode needs 256kb large matching dimension, so a lot of memory
d = np.ones((2000, 1), dtype=int)
b = np.ones((2000), dtype=bool)
r = (1 - d) + b
assert_equal(r, 1)
assert_equal(r.shape, (2000, 2000))
def test_elide_scalar(self):
# check inplace op does not create ndarray from scalars
a = np.bool_()
assert_(type(~(a & a)) is np.bool_)
def test_elide_scalar_readonly(self):
# The imaginary part of a real array is readonly. This needs to go
# through fast_scalar_power which is only called for powers of
# +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
# elision which can be gotten for the imaginary part of a real
# array. Should not error.
a = np.empty(100000, dtype=np.float64)
a.imag ** 2
def test_elide_readonly(self):
# don't try to elide readonly temporaries
r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
assert_equal(r, 0)
def test_elide_updateifcopy(self):
a = np.ones(2**20)[::2]
b = a.flat.__array__() + 1
del b
assert_equal(a, 1)
class TestCAPI:
def test_IsPythonScalar(self):
from numpy.core._multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
@pytest.mark.parametrize("converter",
[_multiarray_tests.run_scalar_intp_converter,
_multiarray_tests.run_scalar_intp_from_sequence])
def test_intp_sequence_converters(self, converter):
# Test simple values (-1 is special for error return paths)
assert converter(10) == (10,)
assert converter(-1) == (-1,)
# A 0-D array looks a bit like a sequence but must take the integer
# path:
assert converter(np.array(123)) == (123,)
# Test simple sequences (intp_from_sequence only supports length 1):
assert converter((10,)) == (10,)
assert converter(np.array([11])) == (11,)
@pytest.mark.parametrize("converter",
[_multiarray_tests.run_scalar_intp_converter,
_multiarray_tests.run_scalar_intp_from_sequence])
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
reason="PyPy bug in error formatting")
def test_intp_sequence_converters_errors(self, converter):
with pytest.raises(TypeError,
match="expected a sequence of integers or a single integer, "):
converter(object())
with pytest.raises(TypeError,
match="expected a sequence of integers or a single integer, "
"got '32.0'"):
converter(32.)
with pytest.raises(TypeError,
match="'float' object cannot be interpreted as an integer"):
converter([32.])
with pytest.raises(ValueError,
match="Maximum allowed dimension"):
# These converters currently convert overflows to a ValueError
converter(2**64)
class TestSubscripting:
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
assert_(isinstance(x[0], np.int_))
assert_(type(x[0, ...]) is np.ndarray)
class TestPickling:
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
reason=('this tests the error messages when trying to'
'protocol 5 although it is not available'))
def test_correct_protocol5_error_message(self):
array = np.arange(10)
def test_record_array_with_object_dtype(self):
my_object = object()
arr_with_object = np.array(
[(my_object, 1, 2.0)],
dtype=[('a', object), ('b', int), ('c', float)])
arr_without_object = np.array(
[('xxx', 1, 2.0)],
dtype=[('a', str), ('b', int), ('c', float)])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_arr_with_object = pickle.loads(
pickle.dumps(arr_with_object, protocol=proto))
depickled_arr_without_object = pickle.loads(
pickle.dumps(arr_without_object, protocol=proto))
assert_equal(arr_with_object.dtype,
depickled_arr_with_object.dtype)
assert_equal(arr_without_object.dtype,
depickled_arr_without_object.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_f_contiguous_array(self):
f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
buffers = []
# When using pickle protocol 5, Fortran-contiguous arrays can be
# serialized using out-of-band buffers
bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
buffer_callback=buffers.append)
assert len(buffers) > 0
depickled_f_contiguous_array = pickle.loads(bytes_string,
buffers=buffers)
assert_equal(f_contiguous_array, depickled_f_contiguous_array)
def test_non_contiguous_array(self):
non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
assert not non_contiguous_array.flags.c_contiguous
assert not non_contiguous_array.flags.f_contiguous
# make sure non-contiguous arrays can be pickled-depickled
# using any protocol
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_non_contiguous_array = pickle.loads(
pickle.dumps(non_contiguous_array, protocol=proto))
assert_equal(non_contiguous_array, depickled_non_contiguous_array)
def test_roundtrip(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
refs = [weakref.ref(a) for a in DATA]
for a in DATA:
assert_equal(
a, pickle.loads(pickle.dumps(a, protocol=proto)),
err_msg="%r" % a)
del a, DATA, carray
break_cycles()
# check for reference leaks (gh-12793)
for ref in refs:
assert ref() is None
def _loads(self, obj):
return pickle.loads(obj, encoding='latin1')
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version0_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version0_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version1_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version1_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
def test_subarray_int_shape(self):
s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(s)
assert_equal(a, p)
def test_datetime64_byteorder(self):
original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]')
original_byte_reversed = original.copy(order='K')
original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S')
original_byte_reversed.byteswap(inplace=True)
new = pickle.loads(pickle.dumps(original_byte_reversed))
assert_equal(original.dtype, new.dtype)
class TestFancyIndexing:
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare:
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([u"This", u"is", u"example"])
g2 = np.array([u"This", u"was", u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmaxArgminCommon:
sizes = [(), (3,), (3, 2), (2, 3),
(3, 3), (2, 3, 4), (4, 3, 2),
(1, 2, 3, 4), (2, 3, 4, 1),
(3, 4, 1, 2), (4, 1, 2, 3),
(64,), (128,), (256,)]
@pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis)
for axis in list(range(-len(size), len(size))) + [None]]
for size in sizes]))
@pytest.mark.parametrize('method', [np.argmax, np.argmin])
def test_np_argmin_argmax_keepdims(self, size, axis, method):
arr = np.random.normal(size=size)
# contiguous arrays
if axis is None:
new_shape = [1 for _ in range(len(size))]
else:
new_shape = list(size)
new_shape[axis] = 1
new_shape = tuple(new_shape)
_res_orig = method(arr, axis=axis)
res_orig = _res_orig.reshape(new_shape)
res = method(arr, axis=axis, keepdims=True)
assert_equal(res, res_orig)
assert_(res.shape == new_shape)
outarray = np.empty(res.shape, dtype=res.dtype)
res1 = method(arr, axis=axis, out=outarray,
keepdims=True)
assert_(res1 is outarray)
assert_equal(res, outarray)
if len(size) > 0:
wrong_shape = list(new_shape)
if axis is not None:
wrong_shape[axis] = 2
else:
wrong_shape[0] = 2
wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
with pytest.raises(ValueError):
method(arr.T, axis=axis,
out=wrong_outarray, keepdims=True)
# non-contiguous arrays
if axis is None:
new_shape = [1 for _ in range(len(size))]
else:
new_shape = list(size)[::-1]
new_shape[axis] = 1
new_shape = tuple(new_shape)
_res_orig = method(arr.T, axis=axis)
res_orig = _res_orig.reshape(new_shape)
res = method(arr.T, axis=axis, keepdims=True)
assert_equal(res, res_orig)
assert_(res.shape == new_shape)
outarray = np.empty(new_shape[::-1], dtype=res.dtype)
outarray = outarray.T
res1 = method(arr.T, axis=axis, out=outarray,
keepdims=True)
assert_(res1 is outarray)
assert_equal(res, outarray)
if len(size) > 0:
# one dimension lesser for non-zero sized
# array should raise an error
with pytest.raises(ValueError):
method(arr[0], axis=axis,
out=outarray, keepdims=True)
if len(size) > 0:
wrong_shape = list(new_shape)
if axis is not None:
wrong_shape[axis] = 2
else:
wrong_shape[0] = 2
wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
with pytest.raises(ValueError):
method(arr.T, axis=axis,
out=wrong_outarray, keepdims=True)
@pytest.mark.parametrize('method', ['max', 'min'])
def test_all(self, method):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
arg_method = getattr(a, 'arg' + method)
val_method = getattr(a, method)
for i in range(a.ndim):
a_maxmin = val_method(i)
aarg_maxmin = arg_method(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(a_maxmin == aarg_maxmin.choose(
*a.transpose(i, *axes))))
@pytest.mark.parametrize('method', ['argmax', 'argmin'])
def test_output_shape(self, method):
# see also gh-616
a = np.ones((10, 5))
arg_method = getattr(a, method)
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, arg_method, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, arg_method, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, arg_method, -1, out)
out = np.ones(10, dtype=np.int_)
arg_method(-1, out=out)
assert_equal(out, arg_method(-1))
@pytest.mark.parametrize('ndim', [0, 1])
@pytest.mark.parametrize('method', ['argmax', 'argmin'])
def test_ret_is_out(self, ndim, method):
a = np.ones((4,) + (256,)*ndim)
arg_method = getattr(a, method)
out = np.empty((256,)*ndim, dtype=np.intp)
ret = arg_method(axis=0, out=out)
assert ret is out
@pytest.mark.parametrize('np_array, method, idx, val',
[(np.zeros, 'argmax', 5942, "as"),
(np.ones, 'argmin', 6001, "0")])
def test_unicode(self, np_array, method, idx, val):
d = np_array(6031, dtype='<U9')
arg_method = getattr(d, method)
d[idx] = val
assert_equal(arg_method(), idx)
@pytest.mark.parametrize('arr_method, np_method',
[('argmax', np.argmax),
('argmin', np.argmin)])
def test_np_vs_ndarray(self, arr_method, np_method):
# make sure both ndarray.argmax/argmin and
# numpy.argmax/argmin support out/axis args
a = np.random.normal(size=(2, 3))
arg_method = getattr(a, arr_method)
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(arg_method(1, out1), np_method(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(arg_method(out=out1, axis=0),
np_method(a, out=out2, axis=0))
assert_equal(out1, out2)
@pytest.mark.leaks_references(reason="replaces None with NULL.")
@pytest.mark.parametrize('method, vals',
[('argmax', (10, 30)),
('argmin', (30, 10))])
def test_object_with_NULLs(self, method, vals):
# See gh-6032
a = np.empty(4, dtype='O')
arg_method = getattr(a, method)
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(arg_method(), 0)
a[3] = vals[0]
assert_equal(arg_method(), 3)
a[1] = vals[1]
assert_equal(arg_method(), 1)
class TestArgmax:
usg_data = [
([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 0),
([3, 3, 3, 3, 2, 2, 2, 2], 0),
([0, 1, 2, 3, 4, 5, 6, 7], 7),
([7, 6, 5, 4, 3, 2, 1, 0], 0)
]
sg_data = usg_data + [
([1, 2, 3, 4, -4, -3, -2, -1], 3),
([1, 2, 3, 4, -1, -2, -3, -4], 3)
]
darr = [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product(usg_data, (
np.uint8, np.uint16, np.uint32, np.uint64
))
)]
darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product(sg_data, (
np.int8, np.int16, np.int32, np.int64, np.float32, np.float64
))
)]
darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product((
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
# To hit the tail of SIMD multi-level(x4, x1) inner loops
# on varient SIMD widthes
([1] * (2*5-1) + [np.nan], 2*5-1),
([1] * (4*5-1) + [np.nan], 4*5-1),
([1] * (8*5-1) + [np.nan], 8*5-1),
([1] * (16*5-1) + [np.nan], 16*5-1),
([1] * (32*5-1) + [np.nan], 32*5-1)
), (
np.float32, np.float64
))
)]
nan_arr = darr + [
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 0),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 2),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 2),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
@pytest.mark.parametrize('data', nan_arr)
def test_combinations(self, data):
arr, pos = data
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
val = np.max(arr)
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], val, err_msg="%r" % arr)
# add padding to test SIMD loops
rarr = np.repeat(arr, 129)
rpos = pos * 129
assert_equal(np.argmax(rarr), rpos, err_msg="%r" % rarr)
assert_equal(rarr[np.argmax(rarr)], val, err_msg="%r" % rarr)
padd = np.repeat(np.min(arr), 513)
rarr = np.concatenate((arr, padd))
rpos = pos
assert_equal(np.argmax(rarr), rpos, err_msg="%r" % rarr)
assert_equal(rarr[np.argmax(rarr)], val, err_msg="%r" % rarr)
def test_maximum_signed_integers(self):
a = np.array([1, 2**7 - 1, -2**7], dtype=np.int8)
assert_equal(np.argmax(a), 1)
a.repeat(129)
assert_equal(np.argmax(a), 1)
a = np.array([1, 2**15 - 1, -2**15], dtype=np.int16)
assert_equal(np.argmax(a), 1)
a.repeat(129)
assert_equal(np.argmax(a), 1)
a = np.array([1, 2**31 - 1, -2**31], dtype=np.int32)
assert_equal(np.argmax(a), 1)
a.repeat(129)
assert_equal(np.argmax(a), 1)
a = np.array([1, 2**63 - 1, -2**63], dtype=np.int64)
assert_equal(np.argmax(a), 1)
a.repeat(129)
assert_equal(np.argmax(a), 1)
class TestArgmin:
usg_data = [
([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 8),
([3, 3, 3, 3, 2, 2, 2, 2], 4),
([0, 1, 2, 3, 4, 5, 6, 7], 0),
([7, 6, 5, 4, 3, 2, 1, 0], 7)
]
sg_data = usg_data + [
([1, 2, 3, 4, -4, -3, -2, -1], 4),
([1, 2, 3, 4, -1, -2, -3, -4], 7)
]
darr = [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product(usg_data, (
np.uint8, np.uint16, np.uint32, np.uint64
))
)]
darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product(sg_data, (
np.int8, np.int16, np.int32, np.int64, np.float32, np.float64
))
)]
darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
itertools.product((
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
# To hit the tail of SIMD multi-level(x4, x1) inner loops
# on varient SIMD widthes
([1] * (2*5-1) + [np.nan], 2*5-1),
([1] * (4*5-1) + [np.nan], 4*5-1),
([1] * (8*5-1) + [np.nan], 8*5-1),
([1] * (16*5-1) + [np.nan], 16*5-1),
([1] * (32*5-1) + [np.nan], 32*5-1)
), (
np.float32, np.float64
))
)]
nan_arr = darr + [
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 0),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 2),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 2),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
@pytest.mark.parametrize('data', nan_arr)
def test_combinations(self, data):
arr, pos = data
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
min_val = np.min(arr)
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
# add padding to test SIMD loops
rarr = np.repeat(arr, 129)
rpos = pos * 129
assert_equal(np.argmin(rarr), rpos, err_msg="%r" % rarr)
assert_equal(rarr[np.argmin(rarr)], min_val, err_msg="%r" % rarr)
padd = np.repeat(np.max(arr), 513)
rarr = np.concatenate((arr, padd))
rpos = pos
assert_equal(np.argmin(rarr), rpos, err_msg="%r" % rarr)
assert_equal(rarr[np.argmin(rarr)], min_val, err_msg="%r" % rarr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1, 2**7 - 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a.repeat(129)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1, 2**15 - 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a.repeat(129)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1, 2**31 - 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a.repeat(129)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1, 2**63 - 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
a.repeat(129)
assert_equal(np.argmin(a), 1)
class TestMinMax:
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
assert_raises(np.AxisError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# Do not ignore NaT
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[3] = 'NaT'
assert_equal(np.amin(a), a[3])
assert_equal(np.amax(a), a[3])
class TestNewaxis:
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip:
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
# The tests that call us pass clip_min and clip_max that
# might not fit in the destination dtype. They were written
# assuming the previous unsafe casting, which now must be
# passed explicitly to avoid a warning.
x.clip(clip_min, clip_max, x, casting='unsafe')
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress:
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask:
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], np.array(val, T))
def test_ip_types(self):
unchecked_types = [bytes, str, np.void]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T), T, mask, val)
# Also test string of a length which uses an untypical length
dt = np.dtype("S3")
self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3])
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_overlaps(self):
# gh-6272 check overlap
x = np.array([True, False, True, False])
np.putmask(x[1:4], [True, True, True], x[:3])
assert_equal(x, np.array([True, True, False, True]))
x = np.array([True, False, True, False])
np.putmask(x[1:4], x[:3], [True, False, True])
assert_equal(x, np.array([True, True, True, True]))
def test_writeable(self):
a = np.arange(5)
a.flags.writeable = False
with pytest.raises(ValueError):
np.putmask(a, a >= 2, 3)
class TestTake:
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, str, np.void]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T))
# Also test string of a length which uses an untypical length
self.tst_basic(x.astype("S3"))
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
def test_out_overlap(self):
# gh-6272 check overlap on out
x = np.arange(5)
y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap')
assert_equal(y, np.array([1, 2, 3]))
@pytest.mark.parametrize('shape', [(1, 2), (1,), ()])
def test_ret_is_out(self, shape):
# 0d arrays should not be an exception to this rule
x = np.arange(5)
inds = np.zeros(shape, dtype=np.intp)
out = np.zeros(shape, dtype=x.dtype)
ret = np.take(x, inds, out=out)
assert ret is out
class TestLexsort:
@pytest.mark.parametrize('dtype',[
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64
])
def test_basic(self, dtype):
a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype)
b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype)
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
assert_array_equal(a[idx], np.sort(a))
def test_mixed(self):
a = np.array([1, 2, 1, 3, 1, 5])
b = np.array([0, 4, 5, 6, 2, 3], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
class TestIO:
"""Test tofile, fromfile, tobytes, and fromstring"""
@pytest.fixture()
def x(self):
shape = (2, 4, 3)
rand = np.random.random
x = rand(shape) + rand(shape).astype(complex) * 1j
x[0, :, 1] = [np.nan, np.inf, -np.inf, np.nan]
return x
@pytest.fixture(params=["string", "path_obj"])
def tmp_filename(self, tmp_path, request):
# This fixture covers two cases:
# one where the filename is a string and
# another where it is a pathlib object
filename = tmp_path / "file"
if request.param == "string":
filename = str(filename)
yield filename
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(OSError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(OSError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_fromstring_count0(self):
d = np.fromstring("1,2", sep=",", dtype=np.int64, count=0)
assert d.shape == (0,)
def test_empty_files_text(self, tmp_filename):
with open(tmp_filename, 'w') as f:
pass
y = np.fromfile(tmp_filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_binary(self, tmp_filename):
with open(tmp_filename, 'wb') as f:
pass
y = np.fromfile(tmp_filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self, x, tmp_filename):
with open(tmp_filename, 'wb') as f:
x.tofile(f)
# NB. doesn't work with flush+seek, due to use of C stdio
with open(tmp_filename, 'rb') as f:
y = np.fromfile(f, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_roundtrip(self, x, tmp_filename):
x.tofile(tmp_filename)
y = np.fromfile(tmp_filename, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_roundtrip_dump_pathlib(self, x, tmp_filename):
p = pathlib.Path(tmp_filename)
x.dump(p)
y = np.load(p, allow_pickle=True)
assert_array_equal(y, x)
def test_roundtrip_binary_str(self, x):
s = x.tobytes()
y = np.frombuffer(s, dtype=x.dtype)
assert_array_equal(y, x.flat)
s = x.tobytes('F')
y = np.frombuffer(s, dtype=x.dtype)
assert_array_equal(y, x.flatten('F'))
def test_roundtrip_str(self, x):
x = x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self, x):
x = x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unseekable_fromfile(self, x, tmp_filename):
# gh-6246
x.tofile(tmp_filename)
def fail(*args, **kwargs):
raise OSError('Can not tell or seek')
with io.open(tmp_filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
assert_raises(OSError, np.fromfile, f, dtype=x.dtype)
def test_io_open_unbuffered_fromfile(self, x, tmp_filename):
# gh-6632
x.tofile(tmp_filename)
with io.open(tmp_filename, 'rb', buffering=0) as f:
y = np.fromfile(f, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_largish_file(self, tmp_filename):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(tmp_filename)
assert_equal(os.path.getsize(tmp_filename), d.nbytes)
assert_array_equal(d, np.fromfile(tmp_filename))
# check offset
with open(tmp_filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
# check append mode (gh-8329)
open(tmp_filename, "w").close() # delete file contents
with open(tmp_filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(tmp_filename))
with open(tmp_filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
def test_io_open_buffered_fromfile(self, x, tmp_filename):
# gh-6632
x.tofile(tmp_filename)
with io.open(tmp_filename, 'rb', buffering=-1) as f:
y = np.fromfile(f, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_file_position_after_fromfile(self, tmp_filename):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
with open(tmp_filename, 'wb') as f:
f.seek(size-1)
f.write(b'\0')
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
with open(tmp_filename, mode) as f:
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self, tmp_filename):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
with open(tmp_filename, 'wb') as f:
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
with open(tmp_filename, 'r+b') as f:
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
assert_equal(pos, 10, err_msg=err_msg)
def test_load_object_array_fromfile(self, tmp_filename):
# gh-12300
with open(tmp_filename, 'w') as f:
# Ensure we have a file with consistent contents
pass
with open(tmp_filename, 'rb') as f:
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, f, dtype=object)
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, tmp_filename, dtype=object)
def test_fromfile_offset(self, x, tmp_filename):
with open(tmp_filename, 'wb') as f:
x.tofile(f)
with open(tmp_filename, 'rb') as f:
y = np.fromfile(f, dtype=x.dtype, offset=0)
assert_array_equal(y, x.flat)
with open(tmp_filename, 'rb') as f:
count_items = len(x.flat) // 8
offset_items = len(x.flat) // 4
offset_bytes = x.dtype.itemsize * offset_items
y = np.fromfile(
f, dtype=x.dtype, count=count_items, offset=offset_bytes
)
assert_array_equal(
y, x.flat[offset_items:offset_items+count_items]
)
# subsequent seeks should stack
offset_bytes = x.dtype.itemsize
z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes)
assert_array_equal(z, x.flat[offset_items+count_items+1:])
with open(tmp_filename, 'wb') as f:
x.tofile(f, sep=",")
with open(tmp_filename, 'rb') as f:
assert_raises_regex(
TypeError,
"'offset' argument only permitted for binary files",
np.fromfile, tmp_filename, dtype=x.dtype,
sep=",", offset=1)
@pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t")
def test_fromfile_bad_dup(self, x, tmp_filename):
def dup_str(fd):
return 'abc'
def dup_bigint(fd):
return 2**68
old_dup = os.dup
try:
with open(tmp_filename, 'wb') as f:
x.tofile(f)
for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)):
os.dup = dup
assert_raises(exc, np.fromfile, f)
finally:
os.dup = old_dup
def _check_from(self, s, value, filename, **kw):
if 'sep' not in kw:
y = np.frombuffer(s, **kw)
else:
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
with open(filename, 'wb') as f:
f.write(s)
y = np.fromfile(filename, **kw)
assert_array_equal(y, value)
@pytest.fixture(params=["period", "comma"])
def decimal_sep_localization(self, request):
"""
Including this fixture in a test will automatically
execute it with both types of decimal separator.
So::
def test_decimal(decimal_sep_localization):
pass
is equivalent to the following two tests::
def test_decimal_period_separator():
pass
def test_decimal_comma_separator():
with CommaDecimalPointLocale():
pass
"""
if request.param == "period":
yield
elif request.param == "comma":
with CommaDecimalPointLocale():
yield
else:
assert False, request.param
def test_nan(self, tmp_filename, decimal_sep_localization):
self._check_from(
b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
tmp_filename,
sep=' ')
def test_inf(self, tmp_filename, decimal_sep_localization):
self._check_from(
b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
tmp_filename,
sep=' ')
def test_numbers(self, tmp_filename, decimal_sep_localization):
self._check_from(
b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133],
tmp_filename,
sep=' ')
def test_binary(self, tmp_filename):
self._check_from(
b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
tmp_filename,
dtype='<f4')
@pytest.mark.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self, tmp_filename):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, sep=',')
def test_counted_string(self, tmp_filename, decimal_sep_localization):
self._check_from(
b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=4, sep=',')
self._check_from(
b'1,2,3,4', [1., 2., 3.], tmp_filename, count=3, sep=',')
self._check_from(
b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=-1, sep=',')
def test_string_with_ws(self, tmp_filename):
self._check_from(
b'1 2 3 4 ', [1, 2, 3, 4], tmp_filename, dtype=int, sep=' ')
def test_counted_string_with_ws(self, tmp_filename):
self._check_from(
b'1 2 3 4 ', [1, 2, 3], tmp_filename, count=3, dtype=int,
sep=' ')
def test_ascii(self, tmp_filename, decimal_sep_localization):
self._check_from(
b'1 , 2 , 3 , 4', [1., 2., 3., 4.], tmp_filename, sep=',')
self._check_from(
b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, dtype=float, sep=',')
def test_malformed(self, tmp_filename, decimal_sep_localization):
with assert_warns(DeprecationWarning):
self._check_from(
b'1.234 1,234', [1.234, 1.], tmp_filename, sep=' ')
def test_long_sep(self, tmp_filename):
self._check_from(
b'1_x_3_x_4_x_5', [1, 3, 4, 5], tmp_filename, sep='_x_')
def test_dtype(self, tmp_filename):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from(b'1,2,3,4', v, tmp_filename, sep=',', dtype=np.int_)
def test_dtype_bool(self, tmp_filename):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = b'1,0,-2.3,0'
with open(tmp_filename, 'wb') as f:
f.write(s)
y = np.fromfile(tmp_filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self, tmp_filename, decimal_sep_localization):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with open(tmp_filename, 'w') as f:
x.tofile(f, sep=',')
with open(tmp_filename, 'r') as f:
s = f.read()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self, tmp_filename, decimal_sep_localization):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with open(tmp_filename, 'w') as f:
x.tofile(f, sep=',', format='%.2f')
with open(tmp_filename, 'r') as f:
s = f.read()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_tofile_cleanup(self, tmp_filename):
x = np.zeros((10), dtype=object)
with open(tmp_filename, 'wb') as f:
assert_raises(OSError, lambda: x.tofile(f, sep=''))
# Dup-ed file handle should be closed or remove will fail on Windows OS
os.remove(tmp_filename)
# Also make sure that we close the Python handle
assert_raises(OSError, lambda: x.tofile(tmp_filename))
os.remove(tmp_filename)
def test_fromfile_subarray_binary(self, tmp_filename):
# Test subarray dtypes which are absorbed into the shape
x = np.arange(24, dtype="i4").reshape(2, 3, 4)
x.tofile(tmp_filename)
res = np.fromfile(tmp_filename, dtype="(3,4)i4")
assert_array_equal(x, res)
x_str = x.tobytes()
with assert_warns(DeprecationWarning):
# binary fromstring is deprecated
res = np.fromstring(x_str, dtype="(3,4)i4")
assert_array_equal(x, res)
def test_parsing_subarray_unsupported(self, tmp_filename):
# We currently do not support parsing subarray dtypes
data = "12,42,13," * 50
with pytest.raises(ValueError):
expected = np.fromstring(data, dtype="(3,)i", sep=",")
with open(tmp_filename, "w") as f:
f.write(data)
with pytest.raises(ValueError):
np.fromfile(tmp_filename, dtype="(3,)i", sep=",")
def test_read_shorter_than_count_subarray(self, tmp_filename):
# Test that requesting more values does not cause any problems
# in conjunction with subarray dimensions being absorbed into the
# array dimension.
expected = np.arange(511 * 10, dtype="i").reshape(-1, 10)
binary = expected.tobytes()
with pytest.raises(ValueError):
with pytest.warns(DeprecationWarning):
np.fromstring(binary, dtype="(10,)i", count=10000)
expected.tofile(tmp_filename)
res = np.fromfile(tmp_filename, dtype="(10,)i", count=10000)
assert_array_equal(res, expected)
class TestFromBuffer:
@pytest.mark.parametrize('byteorder', ['<', '>'])
@pytest.mark.parametrize('dtype', [float, int, complex])
def test_basic(self, byteorder, dtype):
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7)) * 5).astype(dt)
buf = x.tobytes()
assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
@pytest.mark.parametrize("obj", [np.arange(10), b"12345678"])
def test_array_base(self, obj):
# Objects (including NumPy arrays), which do not use the
# `release_buffer` slot should be directly used as a base object.
# See also gh-21612
new = np.frombuffer(obj)
assert new.base is obj
def test_empty(self):
assert_array_equal(np.frombuffer(b''), np.array([]))
@pytest.mark.skipif(IS_PYPY,
reason="PyPy's memoryview currently does not track exports. See: "
"https://foss.heptapod.net/pypy/pypy/-/issues/3724")
def test_mmap_close(self):
# The old buffer protocol was not safe for some things that the new
# one is. But `frombuffer` always used the old one for a long time.
# Checks that it is safe with the new one (using memoryviews)
with tempfile.TemporaryFile(mode='wb') as tmp:
tmp.write(b"asdf")
tmp.flush()
mm = mmap.mmap(tmp.fileno(), 0)
arr = np.frombuffer(mm, dtype=np.uint8)
with pytest.raises(BufferError):
mm.close() # cannot close while array uses the buffer
del arr
mm.close()
class TestFlat:
def setup_method(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is False)
assert_(c.flags.writebackifcopy is False)
assert_(d.flags.writebackifcopy is False)
assert_(e.flags.writebackifcopy is False)
assert_(f.flags.writebackifcopy is False)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount(self):
# includes regression test for reference count error gh-13165
inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None]
indtype = np.dtype(np.intp)
rc_indtype = sys.getrefcount(indtype)
for ind in inds:
rc_ind = sys.getrefcount(ind)
for _ in range(100):
try:
self.a.flat[ind]
except IndexError:
pass
assert_(abs(sys.getrefcount(ind) - rc_ind) < 50)
assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)
def test_index_getset(self):
it = np.arange(10).reshape(2, 1, 5).flat
with pytest.raises(AttributeError):
it.index = 10
for _ in it:
pass
# Check the value of `.index` is updated correctly (see also gh-19153)
# If the type was incorrect, this would show up on big-endian machines
assert it.index == it.base.size
class TestResize:
@_no_tracing
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
@_no_tracing
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_0d_shape(self):
# to it multiple times to test it does not break alloc cache gh-9216
for i in range(10):
x = np.empty((1,))
x.resize(())
assert_equal(x.shape, ())
assert_equal(x.size, 1)
x = np.empty(())
x.resize((1,))
assert_equal(x.shape, (1,))
assert_equal(x.size, 1)
def test_invalid_arguments(self):
assert_raises(TypeError, np.eye(3).resize, 'hi')
assert_raises(ValueError, np.eye(3).resize, -1)
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
@_no_tracing
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
@_no_tracing
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
@_no_tracing
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
def test_empty_view(self):
# check that sizes containing a zero don't trigger a reallocate for
# already empty arrays
x = np.zeros((10, 0), int)
x_view = x[...]
x_view.resize((0, 10))
x_view.resize((0, 100))
def test_check_weakref(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
xref = weakref.ref(x)
assert_raises(ValueError, x.resize, (5, 1))
del xref # avoid pyflakes unused variable warning.
class TestRecord:
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_dtype_init():
np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_dtype_init)
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(b'a', int)])
assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
dt = np.dtype([((b'a', 'b'), int)])
assert_raises(TypeError, dt.__getitem__, b'a')
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, b'a')
y = x[0]
assert_raises(IndexError, y.__getitem__, b'a')
def test_multiple_field_name_unicode(self):
def test_dtype_unicode():
np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_dtype_unicode)
def test_fromarrays_unicode(self):
# A single name string provided to fromarrays() is allowed to be unicode
# on both Python 2 and 3:
x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4')
assert_equal(x['a'][0], 0)
assert_equal(x['b'][0], 1)
def test_unicode_order(self):
# Test that we can sort with order as a unicode field name in both Python 2 and
# 3:
name = u'b'
x = np.array([1, 3, 2], dtype=[(name, int)])
x.sort(order=name)
assert_equal(x[u'b'], np.array([1, 2, 3]))
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, b'f1', 1)
assert_raises(IndexError, a.__getitem__, b'f1')
assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
b = a.copy()
fn1 = str('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = str('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = str('f3')
sfn1 = str('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = str('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
assert_(hash(a[0]) == hash(a[1]))
assert_(hash(a[0]) == hash(b[0]))
assert_(hash(a[0]) != hash(b[1]))
assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
assert_raises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
def test_multifield_indexing_view(self):
a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
v = a[['a', 'c']]
assert_(v.base is a)
assert_(v.dtype == np.dtype({'names': ['a', 'c'],
'formats': ['i4', 'u4'],
'offsets': [0, 8]}))
v[:] = (4,5)
assert_equal(a[0].item(), (4, 1, 5))
class TestView:
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats:
funcs = [_mean, _var, _std]
def setup_method(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_python_type(self):
for x in (np.float16(1.), 1, 1., 1+0j):
assert_equal(np.mean([x]), 1.)
assert_equal(np.std([x]), 0.)
assert_equal(np.var([x]), 0.)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
assert_(_mean(np.ones(100000, dtype='float16')) == 1)
def test_mean_axis_error(self):
# Ensure that AxisError is raised instead of IndexError when axis is
# out of bounds, see gh-15817.
with assert_raises(np.core._exceptions.AxisError):
np.arange(10).mean(axis=2)
def test_mean_where(self):
a = np.arange(16).reshape((4, 4))
wh_full = np.array([[False, True, False, True],
[True, False, True, False],
[True, True, False, False],
[False, False, True, True]])
wh_partial = np.array([[False],
[True],
[True],
[False]])
_cases = [(1, True, [1.5, 5.5, 9.5, 13.5]),
(0, wh_full, [6., 5., 10., 9.]),
(1, wh_full, [2., 5., 8.5, 14.5]),
(0, wh_partial, [6., 7., 8., 9.])]
for _ax, _wh, _res in _cases:
assert_allclose(a.mean(axis=_ax, where=_wh),
np.array(_res))
assert_allclose(np.mean(a, axis=_ax, where=_wh),
np.array(_res))
a3d = np.arange(16).reshape((2, 2, 4))
_wh_partial = np.array([False, True, True, False])
_res = [[1.5, 5.5], [9.5, 13.5]]
assert_allclose(a3d.mean(axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(np.mean(a3d, axis=2, where=_wh_partial),
np.array(_res))
with pytest.warns(RuntimeWarning) as w:
assert_allclose(a.mean(axis=1, where=wh_partial),
np.array([np.nan, 5.5, 9.5, np.nan]))
with pytest.warns(RuntimeWarning) as w:
assert_equal(a.mean(where=False), np.nan)
with pytest.warns(RuntimeWarning) as w:
assert_equal(np.mean(a, where=False), np.nan)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
@pytest.mark.parametrize(('complex_dtype', 'ndec'), (
('complex64', 6),
('complex128', 7),
('clongdouble', 7),
))
def test_var_complex_values(self, complex_dtype, ndec):
# Test fast-paths for every builtin complex type
for axis in [0, 1, None]:
mat = self.cmat.copy().astype(complex_dtype)
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt, decimal=ndec)
def test_var_dimensions(self):
# _var paths for complex number introduce additions on views that
# increase dimensions. Ensure this generalizes to higher dims
mat = np.stack([self.cmat]*3)
for axis in [0, 1, 2, -1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_var_complex_byteorder(self):
# Test that var fast-path does not cause failures for complex arrays
# with non-native byteorder
cmat = self.cmat.copy().astype('complex128')
cmat_swapped = cmat.astype(cmat.dtype.newbyteorder())
assert_almost_equal(cmat.var(), cmat_swapped.var())
def test_var_axis_error(self):
# Ensure that AxisError is raised instead of IndexError when axis is
# out of bounds, see gh-15817.
with assert_raises(np.core._exceptions.AxisError):
np.arange(10).var(axis=2)
def test_var_where(self):
a = np.arange(25).reshape((5, 5))
wh_full = np.array([[False, True, False, True, True],
[True, False, True, True, False],
[True, True, False, False, True],
[False, True, True, False, True],
[True, False, True, True, False]])
wh_partial = np.array([[False],
[True],
[True],
[False],
[True]])
_cases = [(0, True, [50., 50., 50., 50., 50.]),
(1, True, [2., 2., 2., 2., 2.])]
for _ax, _wh, _res in _cases:
assert_allclose(a.var(axis=_ax, where=_wh),
np.array(_res))
assert_allclose(np.var(a, axis=_ax, where=_wh),
np.array(_res))
a3d = np.arange(16).reshape((2, 2, 4))
_wh_partial = np.array([False, True, True, False])
_res = [[0.25, 0.25], [0.25, 0.25]]
assert_allclose(a3d.var(axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(np.var(a3d, axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(np.var(a, axis=1, where=wh_full),
np.var(a[wh_full].reshape((5, 3)), axis=1))
assert_allclose(np.var(a, axis=0, where=wh_partial),
np.var(a[wh_partial[:,0]], axis=0))
with pytest.warns(RuntimeWarning) as w:
assert_equal(a.var(where=False), np.nan)
with pytest.warns(RuntimeWarning) as w:
assert_equal(np.var(a, where=False), np.nan)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_where(self):
a = np.arange(25).reshape((5,5))[::-1]
whf = np.array([[False, True, False, True, True],
[True, False, True, False, True],
[True, True, False, True, False],
[True, False, True, True, False],
[False, True, False, True, True]])
whp = np.array([[False],
[False],
[True],
[True],
[False]])
_cases = [
(0, True, 7.07106781*np.ones((5))),
(1, True, 1.41421356*np.ones((5))),
(0, whf,
np.array([4.0824829 , 8.16496581, 5., 7.39509973, 8.49836586])),
(0, whp, 2.5*np.ones((5)))
]
for _ax, _wh, _res in _cases:
assert_allclose(a.std(axis=_ax, where=_wh), _res)
assert_allclose(np.std(a, axis=_ax, where=_wh), _res)
a3d = np.arange(16).reshape((2, 2, 4))
_wh_partial = np.array([False, True, True, False])
_res = [[0.5, 0.5], [0.5, 0.5]]
assert_allclose(a3d.std(axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(np.std(a3d, axis=2, where=_wh_partial),
np.array(_res))
assert_allclose(a.std(axis=1, where=whf),
np.std(a[whf].reshape((5,3)), axis=1))
assert_allclose(np.std(a, axis=1, where=whf),
(a[whf].reshape((5,3))).std(axis=1))
assert_allclose(a.std(axis=0, where=whp),
np.std(a[whp[:,0]], axis=0))
assert_allclose(np.std(a, axis=0, where=whp),
(a[whp[:,0]]).std(axis=0))
with pytest.warns(RuntimeWarning) as w:
assert_equal(a.std(where=False), np.nan)
with pytest.warns(RuntimeWarning) as w:
assert_equal(np.std(a, where=False), np.nan)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot:
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot:
def setup_method(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec:
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15, 32), (10000,), (200, 89), ('C', 'F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
@pytest.mark.slow
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@requires_memory(free_bytes=18e9) # complex case needs 18GiB+
def test_huge_vectordot(self, dtype):
# Large vector multiplications are chunked with 32bit BLAS
# Test that the chunking does the right thing, see also gh-22262
data = np.ones(2**30+100, dtype=dtype)
res = np.dot(data, data)
assert res == 2**30+100
class MatmulCommon:
"""Common tests for '@' operator and numpy.matmul.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDGO"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
if dt != "O":
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_scalar_output(self):
vec1 = np.array([2])
vec2 = np.array([3, 4]).reshape(1, -1)
tgt = np.array([6, 8])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt)
res = self.matmul(v2.T, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?').reshape(1, -1)
res = self.matmul(vec[:, 0], vec)
assert_equal(res, True)
def test_vector_vector_values(self):
vec1 = np.array([1, 2])
vec2 = np.array([3, 4]).reshape(-1, 1)
tgt1 = np.array([11])
tgt2 = np.array([[3, 6], [4, 8]])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt1)
# no broadcast, we must make v1 into a 2d ndarray
res = self.matmul(v2, v1.reshape(1, -1))
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
class TestMatmul(MatmulCommon):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
tgt = np.dot(a, b)
# test as positional argument
msg = "out positional argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
msg = "Cannot cast ufunc .* output"
out = np.zeros((5, 2), dtype=np.int32)
assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
# test out with type upcast to complex
out = np.zeros((5, 2), dtype=np.complex128)
c = self.matmul(a, b, out=out)
assert_(c is out)
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, '')
c = c.astype(tgt.dtype)
assert_array_equal(c, tgt)
def test_empty_out(self):
# Check that the output cannot be broadcast, so that it cannot be
# size zero when the outer dimensions (iterator size) has size zero.
arr = np.ones((0, 1, 1))
out = np.ones((1, 1, 1))
assert self.matmul(arr, arr).shape == (0, 1, 1)
with pytest.raises(ValueError, match=r"non-broadcastable"):
self.matmul(arr, arr, out=out)
def test_out_contiguous(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
v = np.array([1, 3], dtype=float)
tgt = np.dot(a, b)
tgt_mv = np.dot(a, v)
# test out non-contiguous
out = np.ones((5, 2, 2), dtype=float)
c = self.matmul(a, b, out=out[..., 0])
assert c.base is out
assert_array_equal(c, tgt)
c = self.matmul(a, v, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
c = self.matmul(v, a.T, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
# test out contiguous in only last dim
out = np.ones((10, 2), dtype=float)
c = self.matmul(a, b, out=out[::2, :])
assert_array_equal(c, tgt)
# test transposes of out, args
out = np.ones((5, 2), dtype=float)
c = self.matmul(b.T, a.T, out=out.T)
assert_array_equal(out, tgt)
m1 = np.arange(15.).reshape(5, 3)
m2 = np.arange(21.).reshape(3, 7)
m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous
vc = np.arange(10.)
vr = np.arange(6.)
m0 = np.zeros((3, 0))
@pytest.mark.parametrize('args', (
# matrix-matrix
(m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()),
# matrix-matrix-transpose, contiguous and non
(m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T),
(m3, m3.T), (m3.T, m3),
# matrix-matrix non-contiguous
(m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T),
# vector-matrix, matrix-vector, contiguous
(m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T),
# vector-matrix, matrix-vector, vector non-contiguous
(m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T),
# vector-matrix, matrix-vector, matrix non-contiguous
(m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T),
# vector-matrix, matrix-vector, both non-contiguous
(m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T),
# size == 0
(m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T),
))
def test_dot_equivalent(self, args):
r1 = np.matmul(*args)
r2 = np.dot(*args)
assert_equal(r1, r2)
r3 = np.matmul(args[0].copy(), args[1].copy())
assert_equal(r1, r3)
def test_matmul_object(self):
import fractions
f = np.vectorize(fractions.Fraction)
def random_ints():
return np.random.randint(1, 1000, size=(10, 3, 3))
M1 = f(random_ints(), random_ints())
M2 = f(random_ints(), random_ints())
M3 = self.matmul(M1, M2)
[N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]]
assert_allclose(N3, self.matmul(N1, N2))
def test_matmul_object_type_scalar(self):
from fractions import Fraction as F
v = np.array([F(2,3), F(5,7)])
res = self.matmul(v, v)
assert_(type(res) is F)
def test_matmul_empty(self):
a = np.empty((3, 0), dtype=object)
b = np.empty((0, 3), dtype=object)
c = np.zeros((3, 3))
assert_array_equal(np.matmul(a, b), c)
def test_matmul_exception_multiply(self):
# test that matmul fails if `__mul__` is missing
class add_not_multiply():
def __add__(self, other):
return self
a = np.full((3,3), add_not_multiply())
with assert_raises(TypeError):
b = np.matmul(a, a)
def test_matmul_exception_add(self):
# test that matmul fails if `__add__` is missing
class multiply_not_add():
def __mul__(self, other):
return self
a = np.full((3,3), multiply_not_add())
with assert_raises(TypeError):
b = np.matmul(a, a)
def test_matmul_bool(self):
# gh-14439
a = np.array([[1, 0],[1, 1]], dtype=bool)
assert np.max(a.view(np.uint8)) == 1
b = np.matmul(a, a)
# matmul with boolean output should always be 0, 1
assert np.max(b.view(np.uint8)) == 1
rg = np.random.default_rng(np.random.PCG64(43))
d = rg.integers(2, size=4*5, dtype=np.int8)
d = d.reshape(4, 5) > 0
out1 = np.matmul(d, d.reshape(5, 4))
out2 = np.dot(d, d.reshape(5, 4))
assert_equal(out1, out2)
c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool))
assert not np.any(c)
class TestMatmulOperator(MatmulCommon):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A:
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_raises(self):
assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
assert_raises(TypeError, self.matmul, np.arange(10), np.void(b'abc'))
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
assert_raises(TypeError, exec, "a @= b", globals(), locals())
def test_matmul_axes():
a = np.arange(3*4*5).reshape(3, 4, 5)
c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
assert c.shape == (3, 4, 4)
d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
assert d.shape == (4, 4, 3)
e = np.swapaxes(d, 0, 2)
assert_array_equal(e, c)
f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
assert f.shape == (4, 5)
class TestInner:
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2,3,4).astype(dt)
b = np.arange(24, 48).reshape(2,3,4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]],
dtype=dt
)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
class TestChoose:
def setup_method(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
@pytest.mark.parametrize("ops",
[(1000, np.array([1], dtype=np.uint8)),
(-1, np.array([1], dtype=np.uint8)),
(1., np.float32(3)),
(1., np.array([3], dtype=np.float32))],)
def test_output_dtype(self, ops):
expected_dt = np.result_type(*ops)
assert(np.choose([0], ops).dtype == expected_dt)
class TestRepeat:
def setup_method(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
def test_basic(self):
A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
def test_broadcast1(self):
A = np.repeat(self.m, 2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
def test_axis_spec(self):
A = np.repeat(self.m_rect, [2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
def test_broadcast2(self):
A = np.repeat(self.m_rect, 2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = np.repeat(self.m_rect, 2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object'])
class TestNeighborhoodIter:
# Simple, 2d tests
def test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
assert_array_equal(l, r)
# Test with start in the middle
r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2)
assert_array_equal(l, r)
def test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Simple, 1d tests
def test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = _multiarray_tests.test_neighborhood_iterator(
x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
# Test mirror modes
def test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = _multiarray_tests.test_neighborhood_iterator(
x, [-2, 2], x[1], NEIGH_MODE['mirror'])
assert_([i.dtype == dt for i in l])
assert_array_equal(l, r)
# Circular mode
def test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = _multiarray_tests.test_neighborhood_iterator(
x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter:
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings:
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType:
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype:
def _check(self, spec, wanted):
dt = np.dtype(wanted)
actual = _dtype_from_pep3118(spec)
assert_equal(actual, dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return align*(1 + (n-1)//align)
base = dict(formats=['i'], names=['f0'])
self._check('ix', dict(itemsize=aligned(size + 1), **base))
self._check('ixx', dict(itemsize=aligned(size + 2), **base))
self._check('ixxx', dict(itemsize=aligned(size + 3), **base))
self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))
self._check('i7x', dict(itemsize=aligned(size + 7), **base))
self._check('^ix', dict(itemsize=size + 1, **base))
self._check('^ixx', dict(itemsize=size + 2, **base))
self._check('^ixxx', dict(itemsize=size + 3, **base))
self._check('^ixxxx', dict(itemsize=size + 4, **base))
self._check('^i7x', dict(itemsize=size + 7, **base))
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return (align*(1 + (n-1)//align))
self._check('(3)T{ix}', (dict(
names=['f0'],
formats=['i'],
offsets=[0],
itemsize=aligned(size + 1)
), (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
def test_field_order(self):
# gh-9053 - previously, we relied on dictionary key order
self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')])
self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')])
def test_unnamed_fields(self):
self._check('ii', [('f0', 'i'), ('f1', 'i')])
self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])
self._check('i', 'i')
self._check('i:f0:', [('f0', 'i')])
class TestNewBufferProtocol:
""" Test PEP3118 buffers """
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b'xxx', True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.sctypeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_invalid_buffer_format(self):
# datetime64 cannot be used fully in a buffer yet
# Should be fixed in the next Numpy major release
dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
a = np.empty(3, dt)
assert_raises((ValueError, BufferError), memoryview, a)
assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]'))
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b' ', True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, ())
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, ())
assert_equal(y.ndim, 0)
assert_equal(y.strides, ())
assert_equal(y.suboffsets, ())
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError,
_multiarray_tests.get_buffer_info,
np.arange(5)[::2], ('SIMPLE',))
@pytest.mark.parametrize(["obj", "error"], [
pytest.param(np.array([1, 2], dtype=rational), ValueError, id="array"),
pytest.param(rational(1, 2), TypeError, id="scalar")])
def test_export_and_pickle_user_dtype(self, obj, error):
# User dtypes should export successfully when FORMAT was not requested.
with pytest.raises(error):
_multiarray_tests.get_buffer_info(obj, ("STRIDED_RO", "FORMAT"))
_multiarray_tests.get_buffer_info(obj, ("STRIDED_RO",))
# This is currently also necessary to implement pickling:
pickle_obj = pickle.dumps(obj)
res = pickle.loads(pickle_obj)
assert_array_equal(res, obj)
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
if HAS_REFCOUNT:
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
if HAS_REFCOUNT:
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
@pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.")
def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')):
# Note: c defined as parameter so that it is persistent and leak
# checks will notice gh-16934 (buffer info cache leak).
c.strides = (-1, 80, 8) # strides need to be fixed at export
assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = _multiarray_tests.get_buffer_info(
arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = _multiarray_tests.get_buffer_info(
arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
@pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.")
@pytest.mark.skipif(not np.ones((10, 1), order="C").flags.f_contiguous,
reason="Test is unnecessary (but fails) without relaxed strides.")
def test_relaxed_strides_buffer_info_leak(self, arr=np.ones((1, 10))):
"""Test that alternating export of C- and F-order buffers from
an array which is both C- and F-order when relaxed strides is
active works.
This test defines array in the signature to ensure leaking more
references every time the test is run (catching the leak with
pytest-leaks).
"""
for i in range(10):
_, s = _multiarray_tests.get_buffer_info(arr, ['F_CONTIGUOUS'])
assert s == (8, 8)
_, s = _multiarray_tests.get_buffer_info(arr, ['C_CONTIGUOUS'])
assert s == (80, 8)
def test_out_of_order_fields(self):
dt = np.dtype(dict(
formats=['<i4', '<i4'],
names=['one', 'two'],
offsets=[4, 0],
itemsize=8
))
# overlapping fields cannot be represented by PEP3118
arr = np.empty(1, dt)
with assert_raises(ValueError):
memoryview(arr)
def test_max_dims(self):
a = np.ones((1,) * 32)
self._check_roundtrip(a)
@pytest.mark.slow
def test_error_too_many_dims(self):
def make_ctype(shape, scalar_type):
t = scalar_type
for dim in shape[::-1]:
t = dim * t
return t
# construct a memoryview with 33 dimensions
c_u8_33d = make_ctype((1,)*33, ctypes.c_uint8)
m = memoryview(c_u8_33d())
assert_equal(m.ndim, 33)
assert_raises_regex(
RuntimeError, "ndim",
np.array, m)
# The above seems to create some deep cycles, clean them up for
# easier reference count debugging:
del c_u8_33d, m
for i in range(33):
if gc.collect() == 0:
break
def test_error_pointer_type(self):
# gh-6741
m = memoryview(ctypes.pointer(ctypes.c_uint8()))
assert_('&' in m.format)
assert_raises_regex(
ValueError, "format string",
np.array, m)
def test_error_message_unsupported(self):
# wchar has no corresponding numpy type - if this changes in future, we
# need a better way to construct an invalid memoryview format.
t = ctypes.c_wchar * 4
with assert_raises(ValueError) as cm:
np.array(t())
exc = cm.exception
with assert_raises_regex(
NotImplementedError,
r"Unrepresentable .* 'u' \(UCS-2 strings\)"
):
raise exc.__cause__
def test_ctypes_integer_via_memoryview(self):
# gh-11150, due to bpo-10746
for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}:
value = c_integer(42)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
np.asarray(value)
def test_ctypes_struct_via_memoryview(self):
# gh-10528
class foo(ctypes.Structure):
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)]
f = foo(a=1, b=2)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
arr = np.asarray(f)
assert_equal(arr['a'], 1)
assert_equal(arr['b'], 2)
f.a = 3
assert_equal(arr['a'], 3)
@pytest.mark.parametrize("obj", [np.ones(3), np.ones(1, dtype="i,i")[()]])
def test_error_if_stored_buffer_info_is_corrupted(self, obj):
"""
If a user extends a NumPy array before 1.20 and then runs it
on NumPy 1.20+. A C-subclassed array might in theory modify
the new buffer-info field. This checks that an error is raised
if this happens (for buffer export), an error is written on delete.
This is a sanity check to help users transition to safe code, it
may be deleted at any point.
"""
# corrupt buffer info:
_multiarray_tests.corrupt_or_fix_bufferinfo(obj)
name = type(obj)
with pytest.raises(RuntimeError,
match=f".*{name} appears to be C subclassed"):
memoryview(obj)
# Fix buffer info again before we delete (or we lose the memory)
_multiarray_tests.corrupt_or_fix_bufferinfo(obj)
def test_no_suboffsets(self):
try:
import _testbuffer
except ImportError:
raise pytest.skip("_testbuffer is not available")
for shape in [(2, 3), (2, 3, 4)]:
data = list(range(np.prod(shape)))
buffer = _testbuffer.ndarray(data, shape, format='i',
flags=_testbuffer.ND_PIL)
msg = "NumPy currently does not support.*suboffsets"
with pytest.raises(BufferError, match=msg):
np.asarray(buffer)
with pytest.raises(BufferError, match=msg):
np.asarray([buffer])
# Also check (unrelated and more limited but similar) frombuffer:
with pytest.raises(BufferError):
np.frombuffer(buffer)
class TestArrayCreationCopyArgument(object):
class RaiseOnBool:
def __bool__(self):
raise ValueError
true_vals = [True, np._CopyMode.ALWAYS, np.True_]
false_vals = [False, np._CopyMode.IF_NEEDED, np.False_]
def test_scalars(self):
# Test both numpy and python scalars
for dtype in np.typecodes["All"]:
arr = np.zeros((), dtype=dtype)
scalar = arr[()]
pyscalar = arr.item(0)
# Test never-copy raises error:
assert_raises(ValueError, np.array, scalar,
copy=np._CopyMode.NEVER)
assert_raises(ValueError, np.array, pyscalar,
copy=np._CopyMode.NEVER)
assert_raises(ValueError, np.array, pyscalar,
copy=self.RaiseOnBool())
assert_raises(ValueError, _multiarray_tests.npy_ensurenocopy,
[1])
# Casting with a dtype (to unsigned integers) can be special:
with pytest.raises(ValueError):
np.array(pyscalar, dtype=np.int64, copy=np._CopyMode.NEVER)
def test_compatible_cast(self):
# Some types are compatible even though they are different, no
# copy is necessary for them. This is mostly true for some integers
def int_types(byteswap=False):
int_types = (np.typecodes["Integer"] +
np.typecodes["UnsignedInteger"])
for int_type in int_types:
yield np.dtype(int_type)
if byteswap:
yield np.dtype(int_type).newbyteorder()
for int1 in int_types():
for int2 in int_types(True):
arr = np.arange(10, dtype=int1)
for copy in self.true_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is not arr and res.flags.owndata
assert_array_equal(res, arr)
if int1 == int2:
# Casting is not necessary, base check is sufficient here
for copy in self.false_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is arr or res.base is arr
res = np.array(arr,
copy=np._CopyMode.NEVER,
dtype=int2)
assert res is arr or res.base is arr
else:
# Casting is necessary, assert copy works:
for copy in self.false_vals:
res = np.array(arr, copy=copy, dtype=int2)
assert res is not arr and res.flags.owndata
assert_array_equal(res, arr)
assert_raises(ValueError, np.array,
arr, copy=np._CopyMode.NEVER,
dtype=int2)
assert_raises(ValueError, np.array,
arr, copy=None,
dtype=int2)
def test_buffer_interface(self):
# Buffer interface gives direct memory access (no copy)
arr = np.arange(10)
view = memoryview(arr)
# Checking bases is a bit tricky since numpy creates another
# memoryview, so use may_share_memory.
for copy in self.true_vals:
res = np.array(view, copy=copy)
assert not np.may_share_memory(arr, res)
for copy in self.false_vals:
res = np.array(view, copy=copy)
assert np.may_share_memory(arr, res)
res = np.array(view, copy=np._CopyMode.NEVER)
assert np.may_share_memory(arr, res)
def test_array_interfaces(self):
# Array interface gives direct memory access (much like a memoryview)
base_arr = np.arange(10)
class ArrayLike:
__array_interface__ = base_arr.__array_interface__
arr = ArrayLike()
for copy, val in [(True, None), (np._CopyMode.ALWAYS, None),
(False, arr), (np._CopyMode.IF_NEEDED, arr),
(np._CopyMode.NEVER, arr)]:
res = np.array(arr, copy=copy)
assert res.base is val
def test___array__(self):
base_arr = np.arange(10)
class ArrayLike:
def __array__(self):
# __array__ should return a copy, numpy cannot know this
# however.
return base_arr
arr = ArrayLike()
for copy in self.true_vals:
res = np.array(arr, copy=copy)
assert_array_equal(res, base_arr)
# An additional copy is currently forced by numpy in this case,
# you could argue, numpy does not trust the ArrayLike. This
# may be open for change:
assert res is not base_arr
for copy in self.false_vals:
res = np.array(arr, copy=False)
assert_array_equal(res, base_arr)
assert res is base_arr # numpy trusts the ArrayLike
with pytest.raises(ValueError):
np.array(arr, copy=np._CopyMode.NEVER)
@pytest.mark.parametrize(
"arr", [np.ones(()), np.arange(81).reshape((9, 9))])
@pytest.mark.parametrize("order1", ["C", "F", None])
@pytest.mark.parametrize("order2", ["C", "F", "A", "K"])
def test_order_mismatch(self, arr, order1, order2):
# The order is the main (python side) reason that can cause
# a never-copy to fail.
# Prepare C-order, F-order and non-contiguous arrays:
arr = arr.copy(order1)
if order1 == "C":
assert arr.flags.c_contiguous
elif order1 == "F":
assert arr.flags.f_contiguous
elif arr.ndim != 0:
# Make array non-contiguous
arr = arr[::2, ::2]
assert not arr.flags.forc
# Whether a copy is necessary depends on the order of arr:
if order2 == "C":
no_copy_necessary = arr.flags.c_contiguous
elif order2 == "F":
no_copy_necessary = arr.flags.f_contiguous
else:
# Keeporder and Anyorder are OK with non-contiguous output.
# This is not consistent with the `astype` behaviour which
# enforces contiguity for "A". It is probably historic from when
# "K" did not exist.
no_copy_necessary = True
# Test it for both the array and a memoryview
for view in [arr, memoryview(arr)]:
for copy in self.true_vals:
res = np.array(view, copy=copy, order=order2)
assert res is not arr and res.flags.owndata
assert_array_equal(arr, res)
if no_copy_necessary:
for copy in self.false_vals:
res = np.array(view, copy=copy, order=order2)
# res.base.obj refers to the memoryview
if not IS_PYPY:
assert res is arr or res.base.obj is arr
res = np.array(view, copy=np._CopyMode.NEVER,
order=order2)
if not IS_PYPY:
assert res is arr or res.base.obj is arr
else:
for copy in self.false_vals:
res = np.array(arr, copy=copy, order=order2)
assert_array_equal(arr, res)
assert_raises(ValueError, np.array,
view, copy=np._CopyMode.NEVER,
order=order2)
assert_raises(ValueError, np.array,
view, copy=None,
order=order2)
def test_striding_not_ok(self):
arr = np.array([[1, 2, 4], [3, 4, 5]])
assert_raises(ValueError, np.array,
arr.T, copy=np._CopyMode.NEVER,
order='C')
assert_raises(ValueError, np.array,
arr.T, copy=np._CopyMode.NEVER,
order='C', dtype=np.int64)
assert_raises(ValueError, np.array,
arr, copy=np._CopyMode.NEVER,
order='F')
assert_raises(ValueError, np.array,
arr, copy=np._CopyMode.NEVER,
order='F', dtype=np.int64)
class TestArrayAttributeDeletion:
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
class TestArrayInterface():
class Foo:
def __init__(self, value):
self.value = value
self.iface = {'typestr': 'f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
@pytest.mark.parametrize('val, iface, expected', [
(f, {}, 0.5),
([f], {}, [0.5]),
([f, f], {}, [0.5, 0.5]),
(f, {'shape': ()}, 0.5),
(f, {'shape': None}, TypeError),
(f, {'shape': (1, 1)}, [[0.5]]),
(f, {'shape': (2,)}, ValueError),
(f, {'strides': ()}, 0.5),
(f, {'strides': (2,)}, ValueError),
(f, {'strides': 16}, TypeError),
])
def test_scalar_interface(self, val, iface, expected):
# Test scalar coercion within the array interface
self.f.iface = {'typestr': 'f8'}
self.f.iface.update(iface)
if HAS_REFCOUNT:
pre_cnt = sys.getrefcount(np.dtype('f8'))
if isinstance(expected, type):
assert_raises(expected, np.array, val)
else:
result = np.array(val)
assert_equal(np.array(val), expected)
assert result.dtype == 'f8'
del result
if HAS_REFCOUNT:
post_cnt = sys.getrefcount(np.dtype('f8'))
assert_equal(pre_cnt, post_cnt)
def test_interface_no_shape():
class ArrayLike:
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_array_interface_empty_shape():
# See gh-7994
arr = np.array([1, 2, 3])
interface1 = dict(arr.__array_interface__)
interface1['shape'] = ()
class DummyArray1:
__array_interface__ = interface1
# NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting
# the interface data to bytes would invoke the bug this tests for, that
# __array_interface__ with shape=() is not allowed if the data is an object
# exposing the buffer interface
interface2 = dict(interface1)
interface2['data'] = arr[0].tobytes()
class DummyArray2:
__array_interface__ = interface2
arr1 = np.asarray(DummyArray1())
arr2 = np.asarray(DummyArray2())
arr3 = arr[:1].reshape(())
assert_equal(arr1, arr2)
assert_equal(arr1, arr3)
def test_array_interface_offset():
arr = np.array([1, 2, 3], dtype='int32')
interface = dict(arr.__array_interface__)
interface['data'] = memoryview(arr)
interface['shape'] = (2,)
interface['offset'] = 4
class DummyArray:
__array_interface__ = interface
arr1 = np.asarray(DummyArray())
assert_equal(arr1, arr[1:])
def test_array_interface_unicode_typestr():
arr = np.array([1, 2, 3], dtype='int32')
interface = dict(arr.__array_interface__)
interface['typestr'] = '\N{check mark}'
class DummyArray:
__array_interface__ = interface
# should not be UnicodeEncodeError
with pytest.raises(TypeError):
np.asarray(DummyArray())
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except Exception:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMapIter:
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
_multiarray_tests.test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
_multiarray_tests.test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray:
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = _multiarray_tests.test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = _multiarray_tests.test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion:
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
# Unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
def test_to_bool_scalar(self):
assert_equal(bool(np.array([False])), False)
assert_equal(bool(np.array([True])), True)
assert_equal(bool(np.array([[42]])), True)
assert_raises(ValueError, bool, np.array([1, 2]))
class NotConvertible:
def __bool__(self):
raise NotImplementedError
assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
if IS_PYSTON:
pytest.skip("Pyston disables recursion checking")
self_containing = np.array([None])
self_containing[0] = self_containing
Error = RecursionError
assert_raises(Error, bool, self_containing) # previously stack overflow
self_containing[0] = None # resolve circular reference
def test_to_int_scalar(self):
# gh-9972 means that these aren't always the same
int_funcs = (int, lambda x: x.__int__())
for int_func in int_funcs:
assert_equal(int_func(np.array(0)), 0)
assert_equal(int_func(np.array([1])), 1)
assert_equal(int_func(np.array([[42]])), 42)
assert_raises(TypeError, int_func, np.array([1, 2]))
# gh-9972
assert_equal(4, int_func(np.array('4')))
assert_equal(5, int_func(np.bytes_(b'5')))
assert_equal(6, int_func(np.unicode_(u'6')))
# The delegation of int() to __trunc__ was deprecated in
# Python 3.11.
if sys.version_info < (3, 11):
class HasTrunc:
def __trunc__(self):
return 3
assert_equal(3, int_func(np.array(HasTrunc())))
assert_equal(3, int_func(np.array([HasTrunc()])))
else:
pass
class NotConvertible:
def __int__(self):
raise NotImplementedError
assert_raises(NotImplementedError,
int_func, np.array(NotConvertible()))
assert_raises(NotImplementedError,
int_func, np.array([NotConvertible()]))
class TestWhere:
def test_basic(self):
dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
def test_empty_result(self):
# pass empty where result through an assignment which reads the data of
# empty arrays, error detectable with valgrind, see gh-8922
x = np.zeros((1, 1))
ibad = np.vstack(np.where(x == 99.))
assert_array_equal(ibad,
np.atleast_2d(np.array([[],[]], dtype=np.intp)))
def test_largedim(self):
# invalid read regression gh-9304
shape = [10, 2, 3, 4, 5, 6]
np.random.seed(2)
array = np.random.rand(*shape)
for i in range(10):
benchmark = array.nonzero()
result = array.nonzero()
assert_array_equal(benchmark, result)
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf:
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
@_no_tracing
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing:
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
assert_(not isinstance(x, collections.abc.Hashable))
class TestArrayPriority:
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other:
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero:
def test_empty_bstring_array_is_falsey(self):
assert_(not np.array([''], dtype=str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=str)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=str)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=str)
a[0] = ' \0 \0'
assert_(a)
class TestUnicodeEncoding:
"""
Tests for encoding related bugs, such as UCS2 vs UCS4, round-tripping
issues, etc
"""
def test_round_trip(self):
""" Tests that GETITEM, SETITEM, and PyArray_Scalar roundtrip """
# gh-15363
arr = np.zeros(shape=(), dtype="U1")
for i in range(1, sys.maxunicode + 1):
expected = chr(i)
arr[()] = expected
assert arr[()] == expected
assert arr.item() == expected
def test_assign_scalar(self):
# gh-3258
l = np.array(['aa', 'bb'])
l[:] = np.unicode_('cc')
assert_equal(l, ['cc', 'cc'])
def test_fill_scalar(self):
# gh-7227
l = np.array(['aa', 'bb'])
l.fill(np.unicode_('cc'))
assert_equal(l, ['cc', 'cc'])
class TestUnicodeArrayNonzero:
def test_empty_ustring_array_is_falsey(self):
assert_(not np.array([''], dtype=np.unicode_))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode_)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode_)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode_)
a[0] = ' \0 \0'
assert_(a)
class TestFormat:
def test_0d(self):
a = np.array(np.pi)
assert_equal('{:0.3g}'.format(a), '3.14')
assert_equal('{:0.3g}'.format(a[()]), '3.14')
def test_1d_no_format(self):
a = np.array([np.pi])
assert_equal('{}'.format(a), str(a))
def test_1d_format(self):
# until gh-5543, ensure that the behaviour matches what it used to be
a = np.array([np.pi])
assert_raises(TypeError, '{:30}'.format, a)
from numpy.testing import IS_PYPY
class TestCTypes:
def test_ctypes_is_available(self):
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
assert_equal(ctypes, test_arr.ctypes._ctypes)
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
def test_ctypes_is_not_available(self):
from numpy.core import _internal
_internal.ctypes = None
try:
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
assert_(isinstance(test_arr.ctypes._ctypes,
_internal._missing_ctypes))
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
finally:
_internal.ctypes = ctypes
def _make_readonly(x):
x.flags.writeable = False
return x
@pytest.mark.parametrize('arr', [
np.array([1, 2, 3]),
np.array([['one', 'two'], ['three', 'four']]),
np.array((1, 2), dtype='i4,i4'),
np.zeros((2,), dtype=
np.dtype(dict(
formats=['<i4', '<i4'],
names=['a', 'b'],
offsets=[0, 2],
itemsize=6
))
),
np.array([None], dtype=object),
np.array([]),
np.empty((0, 0)),
_make_readonly(np.array([1, 2, 3])),
], ids=[
'1d',
'2d',
'structured',
'overlapping',
'object',
'empty',
'empty-2d',
'readonly'
])
def test_ctypes_data_as_holds_reference(self, arr):
# gh-9647
# create a copy to ensure that pytest does not mess with the refcounts
arr = arr.copy()
arr_ref = weakref.ref(arr)
ctypes_ptr = arr.ctypes.data_as(ctypes.c_void_p)
# `ctypes_ptr` should hold onto `arr`
del arr
break_cycles()
assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
# but when the `ctypes_ptr` object dies, so should `arr`
del ctypes_ptr
if IS_PYPY:
# Pypy does not recycle arr objects immediately. Trigger gc to
# release arr. Cpython uses refcounts. An explicit call to gc
# should not be needed here.
break_cycles()
assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
def test_ctypes_as_parameter_holds_reference(self):
arr = np.array([None]).copy()
arr_ref = weakref.ref(arr)
ctypes_ptr = arr.ctypes._as_parameter_
# `ctypes_ptr` should hold onto `arr`
del arr
break_cycles()
assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
# but when the `ctypes_ptr` object dies, so should `arr`
del ctypes_ptr
if IS_PYPY:
break_cycles()
assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
class TestWritebackIfCopy:
# all these tests use the WRITEBACKIFCOPY mechanism
def test_argmax_with_out(self):
mat = np.eye(5)
out = np.empty(5, dtype='i2')
res = np.argmax(mat, 0, out=out)
assert_equal(res, range(5))
def test_argmin_with_out(self):
mat = -np.eye(5)
out = np.empty(5, dtype='i2')
res = np.argmin(mat, 0, out=out)
assert_equal(res, range(5))
def test_insert_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
# uses arr_insert
np.place(a, a>2, [44, 55])
assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))
# hit one of the failing paths
assert_raises(ValueError, np.place, a, a>20, [])
def test_put_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
np.put(a, [0, 2], [44, 55])
assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]]))
def test_putmask_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
# uses arr_putmask
np.putmask(a, a>2, a**2)
assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]]))
def test_take_mode_raise(self):
a = np.arange(6, dtype='int')
out = np.empty(2, dtype='int')
np.take(a, [0, 2], out=out, mode='raise')
assert_equal(out, np.array([0, 2]))
def test_choose_mod_raise(self):
a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]])
out = np.empty((3,3), dtype='int')
choices = [-10, 10]
np.choose(a, choices, out=out, mode='raise')
assert_equal(out, np.array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]]))
def test_flatiter__array__(self):
a = np.arange(9).reshape(3,3)
b = a.T.flat
c = b.__array__()
# triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics
del c
def test_dot_out(self):
# if HAVE_CBLAS, will use WRITEBACKIFCOPY
a = np.arange(9, dtype=float).reshape(3,3)
b = np.dot(a, a, out=a)
assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]]))
def test_view_assign(self):
from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve
arr = np.arange(9).reshape(3, 3).T
arr_wb = npy_create_writebackifcopy(arr)
assert_(arr_wb.flags.writebackifcopy)
assert_(arr_wb.base is arr)
arr_wb[...] = -100
npy_resolve(arr_wb)
# arr changes after resolve, even though we assigned to arr_wb
assert_equal(arr, -100)
# after resolve, the two arrays no longer reference each other
assert_(arr_wb.ctypes.data != 0)
assert_equal(arr_wb.base, None)
# assigning to arr_wb does not get transferred to arr
arr_wb[...] = 100
assert_equal(arr, -100)
@pytest.mark.leaks_references(
reason="increments self in dealloc; ignore since deprecated path.")
def test_dealloc_warning(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
arr = np.arange(9).reshape(3, 3)
v = arr.T
_multiarray_tests.npy_abuse_writebackifcopy(v)
assert len(sup.log) == 1
def test_view_discard_refcount(self):
from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard
arr = np.arange(9).reshape(3, 3).T
orig = arr.copy()
if HAS_REFCOUNT:
arr_cnt = sys.getrefcount(arr)
arr_wb = npy_create_writebackifcopy(arr)
assert_(arr_wb.flags.writebackifcopy)
assert_(arr_wb.base is arr)
arr_wb[...] = -100
npy_discard(arr_wb)
# arr remains unchanged after discard
assert_equal(arr, orig)
# after discard, the two arrays no longer reference each other
assert_(arr_wb.ctypes.data != 0)
assert_equal(arr_wb.base, None)
if HAS_REFCOUNT:
assert_equal(arr_cnt, sys.getrefcount(arr))
# assigning to arr_wb does not get transferred to arr
arr_wb[...] = 100
assert_equal(arr, orig)
class TestArange:
def test_infinite(self):
assert_raises_regex(
ValueError, "size exceeded",
np.arange, 0, np.inf
)
def test_nan_step(self):
assert_raises_regex(
ValueError, "cannot compute length",
np.arange, 0, 1, np.nan
)
def test_zero_step(self):
assert_raises(ZeroDivisionError, np.arange, 0, 10, 0)
assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0)
# empty range
assert_raises(ZeroDivisionError, np.arange, 0, 0, 0)
assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0)
def test_require_range(self):
assert_raises(TypeError, np.arange)
assert_raises(TypeError, np.arange, step=3)
assert_raises(TypeError, np.arange, dtype='int64')
assert_raises(TypeError, np.arange, start=4)
def test_start_stop_kwarg(self):
keyword_stop = np.arange(stop=3)
keyword_zerotostop = np.arange(start=0, stop=3)
keyword_start_stop = np.arange(start=3, stop=9)
assert len(keyword_stop) == 3
assert len(keyword_zerotostop) == 3
assert len(keyword_start_stop) == 6
assert_array_equal(keyword_stop, keyword_zerotostop)
class TestArrayFinalize:
""" Tests __array_finalize__ """
def test_receives_base(self):
# gh-11237
class SavesBase(np.ndarray):
def __array_finalize__(self, obj):
self.saved_base = self.base
a = np.array(1).view(SavesBase)
assert_(a.saved_base is a.base)
def test_bad_finalize1(self):
class BadAttributeArray(np.ndarray):
@property
def __array_finalize__(self):
raise RuntimeError("boohoo!")
with pytest.raises(TypeError, match="not callable"):
np.arange(10).view(BadAttributeArray)
def test_bad_finalize2(self):
class BadAttributeArray(np.ndarray):
def __array_finalize__(self):
raise RuntimeError("boohoo!")
with pytest.raises(TypeError, match="takes 1 positional"):
np.arange(10).view(BadAttributeArray)
def test_bad_finalize3(self):
class BadAttributeArray(np.ndarray):
def __array_finalize__(self, obj):
raise RuntimeError("boohoo!")
with pytest.raises(RuntimeError, match="boohoo!"):
np.arange(10).view(BadAttributeArray)
def test_lifetime_on_error(self):
# gh-11237
class RaisesInFinalize(np.ndarray):
def __array_finalize__(self, obj):
# crash, but keep this object alive
raise Exception(self)
# a plain object can't be weakref'd
class Dummy: pass
# get a weak reference to an object within an array
obj_arr = np.array(Dummy())
obj_ref = weakref.ref(obj_arr[()])
# get an array that crashed in __array_finalize__
with assert_raises(Exception) as e:
obj_arr.view(RaisesInFinalize)
obj_subarray = e.exception.args[0]
del e
assert_(isinstance(obj_subarray, RaisesInFinalize))
# reference should still be held by obj_arr
break_cycles()
assert_(obj_ref() is not None, "object should not already be dead")
del obj_arr
break_cycles()
assert_(obj_ref() is not None, "obj_arr should not hold the last reference")
del obj_subarray
break_cycles()
assert_(obj_ref() is None, "no references should remain")
def test_can_use_super(self):
class SuperFinalize(np.ndarray):
def __array_finalize__(self, obj):
self.saved_result = super().__array_finalize__(obj)
a = np.array(1).view(SuperFinalize)
assert_(a.saved_result is None)
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
def test_equal_override():
# gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
# did not respect overrides with __array_priority__ or __array_ufunc__.
# The PR fixed this for __array_priority__ and __array_ufunc__ = None.
class MyAlwaysEqual:
def __eq__(self, other):
return "eq"
def __ne__(self, other):
return "ne"
class MyAlwaysEqualOld(MyAlwaysEqual):
__array_priority__ = 10000
class MyAlwaysEqualNew(MyAlwaysEqual):
__array_ufunc__ = None
array = np.array([(0, 1), (2, 3)], dtype='i4,i4')
for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:
my_always_equal = my_always_equal_cls()
assert_equal(my_always_equal == array, 'eq')
assert_equal(array == my_always_equal, 'eq')
assert_equal(my_always_equal != array, 'ne')
assert_equal(array != my_always_equal, 'ne')
@pytest.mark.parametrize(
["fun", "npfun"],
[
(_multiarray_tests.npy_cabs, np.absolute),
(_multiarray_tests.npy_carg, np.angle)
]
)
@pytest.mark.parametrize("x", [1, np.inf, -np.inf, np.nan])
@pytest.mark.parametrize("y", [1, np.inf, -np.inf, np.nan])
@pytest.mark.parametrize("test_dtype", np.complexfloating.__subclasses__())
def test_npymath_complex(fun, npfun, x, y, test_dtype):
# Smoketest npymath functions
z = test_dtype(complex(x, y))
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
def test_npymath_real():
# Smoketest npymath functions
from numpy.core._multiarray_tests import (
npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh)
funcs = {npy_log10: np.log10,
npy_cosh: np.cosh,
npy_sinh: np.sinh,
npy_tan: np.tan,
npy_tanh: np.tanh}
vals = (1, np.inf, -np.inf, np.nan)
types = (np.float32, np.float64, np.longdouble)
with np.errstate(all='ignore'):
for fun, npfun in funcs.items():
for x, t in itertools.product(vals, types):
z = t(x)
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
def test_uintalignment_and_alignment():
# alignment code needs to satisfy these requirements:
# 1. numpy structs match C struct layout
# 2. ufuncs/casting is safe wrt to aligned access
# 3. copy code is safe wrt to "uint alidned" access
#
# Complex types are the main problem, whose alignment may not be the same
# as their "uint alignment".
#
# This test might only fail on certain platforms, where uint64 alignment is
# not equal to complex64 alignment. The second 2 tests will only fail
# for DEBUG=1.
d1 = np.dtype('u1,c8', align=True)
d2 = np.dtype('u4,c8', align=True)
d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True)
assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True)
assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True)
assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False)
# check that C struct matches numpy struct size
s = _multiarray_tests.get_struct_alignments()
for d, (alignment, size) in zip([d1,d2,d3], s):
assert_equal(d.alignment, alignment)
assert_equal(d.itemsize, size)
# check that ufuncs don't complain in debug mode
# (this is probably OK if the aligned flag is true above)
src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often
np.exp(src) # assert fails?
# check that copy code doesn't complain in debug mode
dst = np.zeros((2,2), dtype='c8')
dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails?
class TestAlignment:
# adapted from scipy._lib.tests.test__util.test__aligned_zeros
# Checks that unusual memory alignments don't trip up numpy.
# In particular, check RELAXED_STRIDES don't trip alignment assertions in
# NDEBUG mode for size-0 arrays (gh-12503)
def check(self, shape, dtype, order, align):
err_msg = repr((shape, dtype, order, align))
x = _aligned_zeros(shape, dtype, order, align=align)
if align is None:
align = np.dtype(dtype).alignment
assert_equal(x.__array_interface__['data'][0] % align, 0)
if hasattr(shape, '__len__'):
assert_equal(x.shape, shape, err_msg)
else:
assert_equal(x.shape, (shape,), err_msg)
assert_equal(x.dtype, dtype)
if order == "C":
assert_(x.flags.c_contiguous, err_msg)
elif order == "F":
if x.size > 0:
assert_(x.flags.f_contiguous, err_msg)
elif order is None:
assert_(x.flags.c_contiguous, err_msg)
else:
raise ValueError()
def test_various_alignments(self):
for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]:
for n in [0, 1, 3, 11]:
for order in ["C", "F", None]:
for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']:
if dtype == 'O':
# object dtype can't be misaligned
continue
for shape in [n, (1, 2, 3, n)]:
self.check(shape, np.dtype(dtype), order, align)
def test_strided_loop_alignments(self):
# particularly test that complex64 and float128 use right alignment
# code-paths, since these are particularly problematic. It is useful to
# turn on USE_DEBUG for this test, so lowlevel-loop asserts are run.
for align in [1, 2, 4, 8, 12, 16, None]:
xf64 = _aligned_zeros(3, np.float64)
xc64 = _aligned_zeros(3, np.complex64, align=align)
xf128 = _aligned_zeros(3, np.longdouble, align=align)
# test casting, both to and from misaligned
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, "Casting complex values")
xc64.astype('f8')
xf64.astype(np.complex64)
test = xc64 + xf64
xf128.astype('f8')
xf64.astype(np.longdouble)
test = xf128 + xf64
test = xf128 + xc64
# test copy, both to and from misaligned
# contig copy
xf64[:] = xf64.copy()
xc64[:] = xc64.copy()
xf128[:] = xf128.copy()
# strided copy
xf64[::2] = xf64[::2].copy()
xc64[::2] = xc64[::2].copy()
xf128[::2] = xf128[::2].copy()
def test_getfield():
a = np.arange(32, dtype='uint16')
if sys.byteorder == 'little':
i = 0
j = 1
else:
i = 1
j = 0
b = a.getfield('int8', i)
assert_equal(b, a)
b = a.getfield('int8', j)
assert_equal(b, 0)
pytest.raises(ValueError, a.getfield, 'uint8', -1)
pytest.raises(ValueError, a.getfield, 'uint8', 16)
pytest.raises(ValueError, a.getfield, 'uint64', 0)
class TestViewDtype:
"""
Verify that making a view of a non-contiguous array works as expected.
"""
def test_smaller_dtype_multiple(self):
# x is non-contiguous
x = np.arange(10, dtype='<i4')[::2]
with pytest.raises(ValueError,
match='the last axis must be contiguous'):
x.view('<i2')
expected = [[0, 0], [2, 0], [4, 0], [6, 0], [8, 0]]
assert_array_equal(x[:, np.newaxis].view('<i2'), expected)
def test_smaller_dtype_not_multiple(self):
# x is non-contiguous
x = np.arange(5, dtype='<i4')[::2]
with pytest.raises(ValueError,
match='the last axis must be contiguous'):
x.view('S3')
with pytest.raises(ValueError,
match='When changing to a smaller dtype'):
x[:, np.newaxis].view('S3')
# Make sure the problem is because of the dtype size
expected = [[b''], [b'\x02'], [b'\x04']]
assert_array_equal(x[:, np.newaxis].view('S4'), expected)
def test_larger_dtype_multiple(self):
# x is non-contiguous in the first dimension, contiguous in the last
x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :]
expected = np.array([[65536], [327684], [589832],
[851980], [1114128]], dtype='<i4')
assert_array_equal(x.view('<i4'), expected)
def test_larger_dtype_not_multiple(self):
# x is non-contiguous in the first dimension, contiguous in the last
x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :]
with pytest.raises(ValueError,
match='When changing to a larger dtype'):
x.view('S3')
# Make sure the problem is because of the dtype size
expected = [[b'\x00\x00\x01'], [b'\x04\x00\x05'], [b'\x08\x00\t'],
[b'\x0c\x00\r'], [b'\x10\x00\x11']]
assert_array_equal(x.view('S4'), expected)
def test_f_contiguous(self):
# x is F-contiguous
x = np.arange(4 * 3, dtype='<i4').reshape(4, 3).T
with pytest.raises(ValueError,
match='the last axis must be contiguous'):
x.view('<i2')
def test_non_c_contiguous(self):
# x is contiguous in axis=-1, but not C-contiguous in other axes
x = np.arange(2 * 3 * 4, dtype='i1').\
reshape(2, 3, 4).transpose(1, 0, 2)
expected = [[[256, 770], [3340, 3854]],
[[1284, 1798], [4368, 4882]],
[[2312, 2826], [5396, 5910]]]
assert_array_equal(x.view('<i2'), expected)
# Test various array sizes that hit different code paths in quicksort-avx512
@pytest.mark.parametrize("N", [8, 16, 24, 32, 48, 64, 96, 128, 151, 191,
256, 383, 512, 1023, 2047])
def test_sort_float(N):
# Regular data with nan sprinkled
np.random.seed(42)
arr = -0.5 + np.random.sample(N).astype('f')
arr[np.random.choice(arr.shape[0], 3)] = np.nan
assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
# (2) with +INF
infarr = np.inf*np.ones(N, dtype='f')
infarr[np.random.choice(infarr.shape[0], 5)] = -1.0
assert_equal(np.sort(infarr, kind='quick'), np.sort(infarr, kind='heap'))
# (3) with -INF
neginfarr = -np.inf*np.ones(N, dtype='f')
neginfarr[np.random.choice(neginfarr.shape[0], 5)] = 1.0
assert_equal(np.sort(neginfarr, kind='quick'),
np.sort(neginfarr, kind='heap'))
# (4) with +/-INF
infarr = np.inf*np.ones(N, dtype='f')
infarr[np.random.choice(infarr.shape[0], (int)(N/2))] = -np.inf
assert_equal(np.sort(infarr, kind='quick'), np.sort(infarr, kind='heap'))
def test_sort_int():
# Random data with NPY_MAX_INT32 and NPY_MIN_INT32 sprinkled
rng = np.random.default_rng(42)
N = 2047
minv = np.iinfo(np.int32).min
maxv = np.iinfo(np.int32).max
arr = rng.integers(low=minv, high=maxv, size=N).astype('int32')
arr[np.random.choice(arr.shape[0], 10)] = minv
arr[np.random.choice(arr.shape[0], 10)] = maxv
assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
def test_sort_uint():
# Random data with NPY_MAX_UINT32 sprinkled
rng = np.random.default_rng(42)
N = 2047
maxv = np.iinfo(np.uint32).max
arr = rng.integers(low=0, high=maxv, size=N).astype('uint32')
arr[np.random.choice(arr.shape[0], 10)] = maxv
assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
| 366,430 | Python | 36.885753 | 588 | 0.514508 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.