response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Calculate the data size (in bytes) following the given `Header`. | def _hdr_data_size(header):
"""Calculate the data size (in bytes) following the given `Header`."""
size = 0
naxis = header.get("NAXIS", 0)
if naxis > 0:
size = 1
for idx in range(naxis):
size = size * header["NAXIS" + str(idx + 1)]
bitpix = header["BITPIX"]
gcount = header.get("GCOUNT", 1)
pcount = header.get("PCOUNT", 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size |
Returns the first item returned by iterating over an iterable object.
Examples
--------
>>> a = [1, 2, 3]
>>> first(a)
1 | def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Examples
--------
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable)) |
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/ | def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter("__name__")):
if sub not in _seen:
_seen.add(sub)
yield sub
yield from itersubclasses(sub, _seen) |
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed. | def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.current_thread()
single_thread = (
threading.active_count() == 1 and curr_thread.name == "MainThread"
)
class SigintHandler:
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn(
f"KeyboardInterrupt ignored until {func.__name__} is complete!",
AstropyUserWarning,
)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped |
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable. | def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if hasattr(f, "readable"):
return f.readable()
if hasattr(f, "closed") and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError("I/O operation on closed file")
if not hasattr(f, "read"):
return False
if hasattr(f, "mode") and not any(c in f.mode for c in "r+"):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True |
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable. | def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if hasattr(f, "writable"):
return f.writable()
if hasattr(f, "closed") and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError("I/O operation on closed file")
if not hasattr(f, "write"):
return False
if hasattr(f, "mode") and not any(c in f.mode for c in "wa+"):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True |
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
This also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper. | def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
This also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, "buffer"):
return isfile(f.buffer)
elif hasattr(f, "raw"):
return isfile(f.raw)
return False |
Returns the 'name' of file-like object *f*, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned. | def fileobj_name(f):
"""
Returns the 'name' of file-like object *f*, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, (str, bytes)):
return f
elif isinstance(f, gzip.GzipFile):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, "name"):
return f.name
elif hasattr(f, "filename"):
return f.filename
elif hasattr(f, "__class__"):
return str(f.__class__)
else:
return str(type(f)) |
Returns True if the given file-like object is closed or if *f* is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state. | def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if *f* is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state.
"""
if isinstance(f, path_like):
return True
if hasattr(f, "closed"):
return f.closed
elif hasattr(f, "fileobj") and hasattr(f.fileobj, "closed"):
return f.fileobj.closed
elif hasattr(f, "fp") and hasattr(f.fp, "closed"):
return f.fp.closed
else:
return False |
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None. | def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
# gzip.GzipFile -like
if hasattr(f, "fileobj") and hasattr(f.fileobj, "mode"):
fileobj = f.fileobj
# astropy.io.fits._File -like, doesn't need additional checks because it's
# already validated
elif hasattr(f, "fileobj_mode"):
return f.fileobj_mode
# PIL-Image -like investigate the fp (filebuffer)
elif hasattr(f, "fp") and hasattr(f.fp, "mode"):
fileobj = f.fp
# FILEIO -like (normal open(...)), keep as is.
elif hasattr(f, "mode"):
fileobj = f
# Doesn't look like a file-like object, for example strings, urls or paths.
else:
return None
return _fileobj_normalize_mode(fileobj) |
Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode. | def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
mode = f.mode
# Special case: Gzip modes:
if isinstance(f, gzip.GzipFile):
# GzipFiles can be either readonly or writeonly
if mode == gzip.READ:
return "rb"
elif mode == gzip.WRITE:
return "wb"
else:
return None # This shouldn't happen?
# Sometimes Python can produce modes like 'r+b' which will be normalized
# here to 'rb+'
if "+" in mode:
mode = mode.replace("+", "")
mode += "+"
return mode |
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default. | def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, "binary"):
return f.binary
if isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return "b" in mode
else:
return True |
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers. | def fill(text, width, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split("\n\n")
def maybe_fill(t):
if all(len(line) < width for line in t.splitlines()):
return t
else:
return textwrap.fill(t, width, **kwargs)
return "\n\n".join(maybe_fill(p) for p in paragraphs) |
Create a numpy array from a file or a file-like object. | def _array_from_file(infile, dtype, count):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if sys.platform == "darwin" and Version(platform.mac_ver()[0]) < Version(
"10.9"
):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024**3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
array = np.ndarray(buffer=s, dtype=dtype, shape=(count,))
# copy is needed because np.frombuffer returns a read-only view of the
# underlying buffer
array = array.copy()
return array |
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : ndarray
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used. | def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : ndarray
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
try:
seekable = outfile.seekable()
except AttributeError:
seekable = False
if isfile(outfile) and seekable:
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (
sys.platform == "darwin"
and arr.nbytes >= _OSX_WRITE_LIMIT + 1
and arr.nbytes % 4096 == 0
):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith("win"):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx : idx + chunksize], outfile)
idx += chunksize |
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`). | def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
# If the array is empty, we can simply take a shortcut and return since
# there is nothing to write.
if len(arr) == 0:
return
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface). If
# it does not have the buffer interface, a TypeError should be returned
# in which case we can fall back to the other methods.
try:
fileobj.write(arr.data)
except TypeError:
pass
else:
return
if hasattr(np, "nditer"):
# nditer version for non-contiguous arrays
for item in np.nditer(arr, order="C"):
fileobj.write(item.tobytes())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if (sys.byteorder == "little" and byteorder == ">") or (
sys.byteorder == "big" and byteorder == "<"
):
for item in arr.flat:
fileobj.write(item.byteswap().tobytes())
else:
for item in arr.flat:
fileobj.write(item.tobytes()) |
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode. | def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, str):
s = encode_ascii(s)
elif not binmode and not isinstance(f, str):
s = decode_ascii(s)
f.write(s) |
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created. | def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif array.dtype.itemsize == dtype.itemsize and not (
np.issubdtype(array.dtype, np.number) and np.issubdtype(dtype, np.number)
):
# Includes a special case when both dtypes are at least numeric to
# account for old Trac ticket 218 (now inaccessible).
return array.view(dtype)
else:
return array.astype(dtype) |
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range. | def _pseudo_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
# special case for int8
if dtype.kind == "i" and dtype.itemsize == 1:
return -128
assert dtype.kind == "u"
return 1 << (dtype.itemsize * 8 - 1) |
Converts a given string to either an int or a float if necessary. | def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num |
Split a long string into parts where each part is no longer than ``strlen``
and no word is cut into two pieces. But if there are any single words
which are longer than ``strlen``, then they will be split in the middle of
the word. | def _words_group(s, width):
"""
Split a long string into parts where each part is no longer than ``strlen``
and no word is cut into two pieces. But if there are any single words
which are longer than ``strlen``, then they will be split in the middle of
the word.
"""
words = []
slen = len(s)
# appending one blank at the end always ensures that the "last" blank
# is beyond the end of the string
arr = np.frombuffer(s.encode("utf8") + b" ", dtype="S1")
# locations of the blanks
blank_loc = np.nonzero(arr == b" ")[0]
offset = 0
xoffset = 0
while True:
try:
loc = np.nonzero(blank_loc >= width + offset)[0][0]
except IndexError:
loc = len(blank_loc)
if loc > 0:
offset = blank_loc[loc - 1] + 1
else:
offset = -1
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = min(xoffset + width, slen)
# collect the pieces in a list
words.append(s[xoffset:offset])
if offset >= slen:
break
xoffset = offset
return words |
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output. | def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn |
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None. | def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, "base") and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base |
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned. | def _extract_number(value, default):
"""
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned.
"""
try:
# The _str_to_num method converts the value to string/float
# so we need to perform one additional conversion to int on top
return int(_str_to_num(value))
except (TypeError, ValueError):
return default |
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file. | def get_testdata_filepath(filename):
"""
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file.
"""
return data.get_pkg_data_filename(f"io/fits/tests/data/{filename}", "astropy") |
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation. | def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in "SU":
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == "S" else 4
dt_int = f"({dt.itemsize // bpc},){dt.byteorder}u{bpc}"
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j : j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array |
Check whether data is a dask array.
We avoid importing dask unless it is likely it is a dask array,
so that non-dask code is not slowed down. | def _is_dask_array(data):
"""Check whether data is a dask array.
We avoid importing dask unless it is likely it is a dask array,
so that non-dask code is not slowed down.
"""
if not hasattr(data, "compute"):
return False
try:
from dask.array import Array
except ImportError:
# If we cannot import dask, surely this cannot be a
# dask array!
return False
else:
return isinstance(data, Array) |
Iterates through the subclasses of _BaseHDU and uses that class's
match_header() method to determine which subclass to instantiate.
It's important to be aware that the class hierarchy is traversed in a
depth-last order. Each match_header() should identify an HDU type as
uniquely as possible. Abstract types may choose to simply return False
or raise NotImplementedError to be skipped.
If any unexpected exceptions are raised while evaluating
match_header(), the type is taken to be _CorruptedHDU.
Used primarily by _BaseHDU._readfrom_internal and _BaseHDU._from_data to
find an appropriate HDU class to use based on values in the header. | def _hdu_class_from_header(cls, header):
"""
Iterates through the subclasses of _BaseHDU and uses that class's
match_header() method to determine which subclass to instantiate.
It's important to be aware that the class hierarchy is traversed in a
depth-last order. Each match_header() should identify an HDU type as
uniquely as possible. Abstract types may choose to simply return False
or raise NotImplementedError to be skipped.
If any unexpected exceptions are raised while evaluating
match_header(), the type is taken to be _CorruptedHDU.
Used primarily by _BaseHDU._readfrom_internal and _BaseHDU._from_data to
find an appropriate HDU class to use based on values in the header.
"""
klass = cls # By default, if no subclasses are defined
if header:
for c in reversed(list(itersubclasses(cls))):
try:
# HDU classes built into astropy.io.fits are always considered,
# but extension HDUs must be explicitly registered
if not (
c.__module__.startswith("astropy.io.fits.")
or c in cls._hdu_registry
):
continue
# skip _NonstandardExtHDU and _ExtensionHDU since those are deprecated
if c.match_header(header) and c not in (
_NonstandardExtHDU,
_ExtensionHDU,
):
klass = c
break
except NotImplementedError:
continue
except Exception as exc:
warnings.warn(
"An exception occurred matching an HDU header to the "
f"appropriate HDU type: {exc}",
AstropyUserWarning,
)
warnings.warn(
"The HDU will be treated as corrupted.", AstropyUserWarning
)
klass = _CorruptedHDU
del exc
break
return klass |
Given a list of objects, returns a mapping of objects in that list to the
index or indices at which that object was found in the list. | def _par_indices(names):
"""
Given a list of objects, returns a mapping of objects in that list to the
index or indices at which that object was found in the list.
"""
unique = {}
for idx, name in enumerate(names):
# Case insensitive
name = name.upper()
if name in unique:
unique[name].append(idx)
else:
unique[name] = [idx]
return unique |
Given a list of parnames, including possible duplicates, returns a new list
of parnames with duplicates prepended by one or more underscores to make
them unique. This is also case insensitive. | def _unique_parnames(names):
"""
Given a list of parnames, including possible duplicates, returns a new list
of parnames with duplicates prepended by one or more underscores to make
them unique. This is also case insensitive.
"""
upper_names = set()
unique_names = []
for name in names:
name_upper = name.upper()
while name_upper in upper_names:
name = "_" + name
name_upper = "_" + name_upper
unique_names.append(name)
upper_names.add(name_upper)
return unique_names |
Factory function to open a FITS file and return an `HDUList` object.
Parameters
----------
name : str, file-like or `pathlib.Path`
File to be opened.
mode : str, optional
Open mode, 'readonly', 'update', 'append', 'denywrite', or
'ostream'. Default is 'readonly'.
If ``name`` is a file object that is already opened, ``mode`` must
match the mode the file was opened with, readonly (rb), update (rb+),
append (ab+), ostream (w), denywrite (rb)).
memmap : bool, optional
Is memory mapping to be used? This value is obtained from the
configuration item ``astropy.io.fits.Conf.use_memmap``.
Default is `True`.
save_backup : bool, optional
If the file was opened in update or append mode, this ensures that
a backup of the original file is saved before any changes are flushed.
The backup has the same name as the original file with ".bak" appended.
If "file.bak" already exists then "file.bak.1" is used, and so on.
Default is `False`.
cache : bool, optional
If the file name is a URL, `~astropy.utils.data.download_file` is used
to open the file. This specifies whether or not to save the file
locally in Astropy's download cache. Default is `True`.
lazy_load_hdus : bool, optional
To avoid reading all the HDUs and headers in a FITS file immediately
upon opening. This is an optimization especially useful for large
files, as FITS has no way of determining the number and offsets of all
the HDUs in a file without scanning through the file and reading all
the headers. Default is `True`.
To disable lazy loading and read all HDUs immediately (the old
behavior) use ``lazy_load_hdus=False``. This can lead to fewer
surprises--for example with lazy loading enabled, ``len(hdul)``
can be slow, as it means the entire FITS file needs to be read in
order to determine the number of HDUs. ``lazy_load_hdus=False``
ensures that all HDUs have already been loaded after the file has
been opened.
.. versionadded:: 1.3
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the central value and
``BSCALE == 1`` as unsigned integer data. For example, ``int16`` data
with ``BZERO = 32768`` and ``BSCALE = 1`` would be treated as
``uint16`` data. Default is `True` so that the pseudo-unsigned
integer convention is assumed.
ignore_missing_end : bool, optional
Do not raise an exception when opening a file that is missing an
``END`` card in the last header. Default is `False`.
ignore_missing_simple : bool, optional
Do not raise an exception when the SIMPLE keyword is missing. Note
that io.fits will raise a warning if a SIMPLE card is present but
written in a way that does not follow the FITS Standard.
Default is `False`.
.. versionadded:: 4.2
checksum : bool, str, optional
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values
(when present in the HDU header) match the header and data of all HDU's
in the file. Updates to a file that already has a checksum will
preserve and update the existing checksums unless this argument is
given a value of 'remove', in which case the CHECKSUM and DATASUM
values are not checked, and are removed when saving changes to the
file. Default is `False`.
disable_image_compression : bool, optional
If `True`, treats compressed image HDU's like normal binary table
HDU's. Default is `False`.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. Default is `False`.
character_as_bytes : bool, optional
Whether to return bytes for string columns, otherwise unicode strings
are returned, but this does not respect memory mapping and loads the
whole column in memory when accessed. Default is `False`.
ignore_blank : bool, optional
If `True`, the BLANK keyword is ignored if present.
Default is `False`.
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled image
data, restore the data to the original type and reapply the original
BSCALE/BZERO values. This could lead to loss of accuracy if scaling
back to integer values after performing floating point operations on
the data. Default is `False`.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
use_fsspec : bool, optional
Use `fsspec.open` to open the file? Defaults to `False` unless
``name`` starts with the Amazon S3 storage prefix ``s3://`` or the
Google Cloud Storage prefix ``gs://``. Can also be used for paths
with other prefixes (e.g., ``http://``) but in this case you must
explicitly pass ``use_fsspec=True``.
Use of this feature requires the optional ``fsspec`` package.
A ``ModuleNotFoundError`` will be raised if the dependency is missing.
.. versionadded:: 5.2
fsspec_kwargs : dict, optional
Keyword arguments passed on to `fsspec.open`. This can be used to
configure cloud storage credentials and caching behavior.
For example, pass ``fsspec_kwargs={"anon": True}`` to enable
anonymous access to Amazon S3 open data buckets.
See ``fsspec``'s documentation for available parameters.
.. versionadded:: 5.2
decompress_in_memory : bool, optional
By default files are decompressed progressively depending on what data
is needed. This is good for memory usage, avoiding decompression of
the whole file, but it can be slow. With decompress_in_memory=True it
is possible to decompress instead the whole file in memory.
.. versionadded:: 6.0
Returns
-------
hdulist : `HDUList`
`HDUList` containing all of the header data units in the file. | def fitsopen(
name,
mode="readonly",
memmap=None,
save_backup=False,
cache=True,
lazy_load_hdus=None,
ignore_missing_simple=False,
*,
use_fsspec=None,
fsspec_kwargs=None,
decompress_in_memory=False,
**kwargs,
):
"""Factory function to open a FITS file and return an `HDUList` object.
Parameters
----------
name : str, file-like or `pathlib.Path`
File to be opened.
mode : str, optional
Open mode, 'readonly', 'update', 'append', 'denywrite', or
'ostream'. Default is 'readonly'.
If ``name`` is a file object that is already opened, ``mode`` must
match the mode the file was opened with, readonly (rb), update (rb+),
append (ab+), ostream (w), denywrite (rb)).
memmap : bool, optional
Is memory mapping to be used? This value is obtained from the
configuration item ``astropy.io.fits.Conf.use_memmap``.
Default is `True`.
save_backup : bool, optional
If the file was opened in update or append mode, this ensures that
a backup of the original file is saved before any changes are flushed.
The backup has the same name as the original file with ".bak" appended.
If "file.bak" already exists then "file.bak.1" is used, and so on.
Default is `False`.
cache : bool, optional
If the file name is a URL, `~astropy.utils.data.download_file` is used
to open the file. This specifies whether or not to save the file
locally in Astropy's download cache. Default is `True`.
lazy_load_hdus : bool, optional
To avoid reading all the HDUs and headers in a FITS file immediately
upon opening. This is an optimization especially useful for large
files, as FITS has no way of determining the number and offsets of all
the HDUs in a file without scanning through the file and reading all
the headers. Default is `True`.
To disable lazy loading and read all HDUs immediately (the old
behavior) use ``lazy_load_hdus=False``. This can lead to fewer
surprises--for example with lazy loading enabled, ``len(hdul)``
can be slow, as it means the entire FITS file needs to be read in
order to determine the number of HDUs. ``lazy_load_hdus=False``
ensures that all HDUs have already been loaded after the file has
been opened.
.. versionadded:: 1.3
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the central value and
``BSCALE == 1`` as unsigned integer data. For example, ``int16`` data
with ``BZERO = 32768`` and ``BSCALE = 1`` would be treated as
``uint16`` data. Default is `True` so that the pseudo-unsigned
integer convention is assumed.
ignore_missing_end : bool, optional
Do not raise an exception when opening a file that is missing an
``END`` card in the last header. Default is `False`.
ignore_missing_simple : bool, optional
Do not raise an exception when the SIMPLE keyword is missing. Note
that io.fits will raise a warning if a SIMPLE card is present but
written in a way that does not follow the FITS Standard.
Default is `False`.
.. versionadded:: 4.2
checksum : bool, str, optional
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values
(when present in the HDU header) match the header and data of all HDU's
in the file. Updates to a file that already has a checksum will
preserve and update the existing checksums unless this argument is
given a value of 'remove', in which case the CHECKSUM and DATASUM
values are not checked, and are removed when saving changes to the
file. Default is `False`.
disable_image_compression : bool, optional
If `True`, treats compressed image HDU's like normal binary table
HDU's. Default is `False`.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. Default is `False`.
character_as_bytes : bool, optional
Whether to return bytes for string columns, otherwise unicode strings
are returned, but this does not respect memory mapping and loads the
whole column in memory when accessed. Default is `False`.
ignore_blank : bool, optional
If `True`, the BLANK keyword is ignored if present.
Default is `False`.
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled image
data, restore the data to the original type and reapply the original
BSCALE/BZERO values. This could lead to loss of accuracy if scaling
back to integer values after performing floating point operations on
the data. Default is `False`.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
use_fsspec : bool, optional
Use `fsspec.open` to open the file? Defaults to `False` unless
``name`` starts with the Amazon S3 storage prefix ``s3://`` or the
Google Cloud Storage prefix ``gs://``. Can also be used for paths
with other prefixes (e.g., ``http://``) but in this case you must
explicitly pass ``use_fsspec=True``.
Use of this feature requires the optional ``fsspec`` package.
A ``ModuleNotFoundError`` will be raised if the dependency is missing.
.. versionadded:: 5.2
fsspec_kwargs : dict, optional
Keyword arguments passed on to `fsspec.open`. This can be used to
configure cloud storage credentials and caching behavior.
For example, pass ``fsspec_kwargs={"anon": True}`` to enable
anonymous access to Amazon S3 open data buckets.
See ``fsspec``'s documentation for available parameters.
.. versionadded:: 5.2
decompress_in_memory : bool, optional
By default files are decompressed progressively depending on what data
is needed. This is good for memory usage, avoiding decompression of
the whole file, but it can be slow. With decompress_in_memory=True it
is possible to decompress instead the whole file in memory.
.. versionadded:: 6.0
Returns
-------
hdulist : `HDUList`
`HDUList` containing all of the header data units in the file.
"""
from astropy.io.fits import conf
if memmap is None:
# distinguish between True (kwarg explicitly set)
# and None (preference for memmap in config, might be ignored)
memmap = None if conf.use_memmap else False
else:
memmap = bool(memmap)
if lazy_load_hdus is None:
lazy_load_hdus = conf.lazy_load_hdus
else:
lazy_load_hdus = bool(lazy_load_hdus)
if "uint" not in kwargs:
kwargs["uint"] = conf.enable_uint
if not name:
raise ValueError(f"Empty filename: {name!r}")
return HDUList.fromfile(
name,
mode,
memmap,
save_backup,
cache,
lazy_load_hdus,
ignore_missing_simple,
use_fsspec=use_fsspec,
fsspec_kwargs=fsspec_kwargs,
decompress_in_memory=decompress_in_memory,
**kwargs,
) |
Ensures that all the data of a binary FITS table (represented as a FITS_rec
object) is in a big-endian byte order. Columns are swapped in-place one
at a time, and then returned to their previous byte order when this context
manager exits.
Because a new dtype is needed to represent the byte-swapped columns, the
new dtype is temporarily applied as well. | def _binary_table_byte_swap(data):
"""
Ensures that all the data of a binary FITS table (represented as a FITS_rec
object) is in a big-endian byte order. Columns are swapped in-place one
at a time, and then returned to their previous byte order when this context
manager exits.
Because a new dtype is needed to represent the byte-swapped columns, the
new dtype is temporarily applied as well.
"""
orig_dtype = data.dtype
names = []
formats = []
offsets = []
to_swap = []
if sys.byteorder == "little":
swap_types = ("<", "=")
else:
swap_types = ("<",)
for idx, name in enumerate(orig_dtype.names):
field = _get_recarray_field(data, idx)
field_dtype, field_offset = orig_dtype.fields[name]
names.append(name)
formats.append(field_dtype)
offsets.append(field_offset)
if isinstance(field, chararray.chararray):
continue
# only swap unswapped
# must use field_dtype.base here since for multi-element dtypes,
# the .str with be '|V<N>' where <N> is the total bytes per element
if field.itemsize > 1 and field_dtype.base.str[0] in swap_types:
to_swap.append(field)
# Override the dtype for this field in the new record dtype with
# the byteswapped version
formats[-1] = field_dtype.newbyteorder()
# deal with var length table
recformat = data.columns._recformats[idx]
if isinstance(recformat, _FormatP):
coldata = data.field(idx)
for c in coldata:
if (
not isinstance(c, chararray.chararray)
and c.itemsize > 1
and c.dtype.str[0] in swap_types
):
to_swap.append(c)
for arr in reversed(to_swap):
arr.byteswap(True)
data.dtype = np.dtype({"names": names, "formats": formats, "offsets": offsets})
yield data
for arr in to_swap:
arr.byteswap(True)
data.dtype = orig_dtype |
Decompress the buffer of a tile using the given compression algorithm.
Parameters
----------
buf
The compressed buffer to be decompressed.
algorithm
A supported decompression algorithm.
settings
Any parameters for the given compression algorithm | def _decompress_tile(buf, *, algorithm: str, **settings):
"""
Decompress the buffer of a tile using the given compression algorithm.
Parameters
----------
buf
The compressed buffer to be decompressed.
algorithm
A supported decompression algorithm.
settings
Any parameters for the given compression algorithm
"""
return ALGORITHMS[algorithm](**settings).decode(buf) |
Compress the buffer of a tile using the given compression algorithm.
Parameters
----------
buf
The decompressed buffer to be compressed.
algorithm
A supported compression algorithm.
settings
Any parameters for the given compression algorithm | def _compress_tile(buf, *, algorithm: str, **settings):
"""
Compress the buffer of a tile using the given compression algorithm.
Parameters
----------
buf
The decompressed buffer to be compressed.
algorithm
A supported compression algorithm.
settings
Any parameters for the given compression algorithm
"""
return ALGORITHMS[algorithm](**settings).encode(buf) |
Extract the settings which are constant given a header | def _header_to_settings(header):
"""
Extract the settings which are constant given a header
"""
settings = {}
compression_type = header["ZCMPTYPE"]
if compression_type == "GZIP_2":
settings["itemsize"] = abs(header["ZBITPIX"]) // 8
elif compression_type in ("RICE_1", "RICE_ONE"):
settings["blocksize"] = _get_compression_setting(header, "BLOCKSIZE", 32)
settings["bytepix"] = _get_compression_setting(header, "BYTEPIX", 4)
elif compression_type == "HCOMPRESS_1":
settings["bytepix"] = 8
settings["scale"] = int(_get_compression_setting(header, "SCALE", 0))
settings["smooth"] = _get_compression_setting(header, "SMOOTH", 0)
return settings |
Update the settings with tile-specific settings | def _update_tile_settings(settings, compression_type, actual_tile_shape):
"""
Update the settings with tile-specific settings
"""
if compression_type in ("PLIO_1", "RICE_1", "RICE_ONE"):
# We have to calculate the tilesize from the shape of the tile not the
# header, so that it's correct for edge tiles etc.
settings["tilesize"] = prod(actual_tile_shape)
elif compression_type == "HCOMPRESS_1":
# HCOMPRESS requires 2D tiles, so to find the shape of the 2D tile we
# need to ignore all length 1 tile dimensions
# Also cfitsio expects the tile shape in C order
shape_2d = tuple(nd for nd in actual_tile_shape if nd != 1)
if len(shape_2d) != 2:
raise ValueError(f"HCOMPRESS expects two dimensional tiles, got {shape_2d}")
settings["nx"] = shape_2d[0]
settings["ny"] = shape_2d[1]
return settings |
Convert a buffer to an array.
This is a helper function which takes a raw buffer (as output by .decode)
and translates it into a numpy array with the correct dtype, endianness and
shape. | def _finalize_array(tile_buffer, *, bitpix, tile_shape, algorithm, lossless):
"""
Convert a buffer to an array.
This is a helper function which takes a raw buffer (as output by .decode)
and translates it into a numpy array with the correct dtype, endianness and
shape.
"""
tile_size = prod(tile_shape)
if algorithm.startswith("GZIP") or algorithm == "NOCOMPRESS":
# This algorithm is taken from fitsio
# https://github.com/astropy/astropy/blob/a8cb1668d4835562b89c0d0b3448ac72ca44db63/cextern/cfitsio/lib/imcompress.c#L6345-L6388
tile_bytesize = len(tile_buffer)
if tile_bytesize == tile_size * 2:
dtype = ">i2"
elif tile_bytesize == tile_size * 4:
if bitpix < 0 and lossless:
dtype = ">f4"
else:
dtype = ">i4"
elif tile_bytesize == tile_size * 8:
if bitpix < 0 and lossless:
dtype = ">f8"
else:
dtype = ">i8"
else:
# Just return the raw bytes
dtype = ">u1"
tile_data = np.asarray(tile_buffer).view(dtype).reshape(tile_shape)
else:
# For RICE_1 compression the tiles that are on the edge can end up
# being padded, so we truncate excess values
if algorithm in ("RICE_1", "RICE_ONE", "PLIO_1") and tile_size < len(
tile_buffer
):
tile_buffer = tile_buffer[:tile_size]
if tile_buffer.data.format == "b":
# NOTE: this feels like a Numpy bug - need to investigate
tile_data = np.asarray(tile_buffer, dtype=np.uint8).reshape(tile_shape)
else:
tile_data = np.asarray(tile_buffer).reshape(tile_shape)
return tile_data |
Decompress the data in a `~astropy.io.fits.CompImageHDU`.
Parameters
----------
compressed_data : `~astropy.io.fits.FITS_rec`
The compressed data
compression_type : str
The compression algorithm
compressed_header : `~astropy.io.fits.Header`
The header of the compressed binary table
bintable : `~astropy.io.fits.BinTableHDU`
The binary table HDU, used to access the raw heap data
first_tile_index : iterable
The indices of the first tile to decompress along each dimension
last_tile_index : iterable
The indices of the last tile to decompress along each dimension
Returns
-------
data : `numpy.ndarray`
The decompressed data array. | def decompress_image_data_section(
compressed_data,
compression_type,
compressed_header,
bintable,
first_tile_index,
last_tile_index,
):
"""
Decompress the data in a `~astropy.io.fits.CompImageHDU`.
Parameters
----------
compressed_data : `~astropy.io.fits.FITS_rec`
The compressed data
compression_type : str
The compression algorithm
compressed_header : `~astropy.io.fits.Header`
The header of the compressed binary table
bintable : `~astropy.io.fits.BinTableHDU`
The binary table HDU, used to access the raw heap data
first_tile_index : iterable
The indices of the first tile to decompress along each dimension
last_tile_index : iterable
The indices of the last tile to decompress along each dimension
Returns
-------
data : `numpy.ndarray`
The decompressed data array.
"""
compressed_coldefs = compressed_data._coldefs
_check_compressed_header(compressed_header)
tile_shape = _tile_shape(compressed_header)
data_shape = _data_shape(compressed_header)
first_array_index = first_tile_index * tile_shape
last_array_index = (last_tile_index + 1) * tile_shape
last_array_index = np.minimum(data_shape, last_array_index)
buffer_shape = tuple((last_array_index - first_array_index).astype(int))
image_data = np.empty(
buffer_shape, dtype=BITPIX2DTYPE[compressed_header["ZBITPIX"]]
)
quantized = "ZSCALE" in compressed_data.dtype.names
if image_data.size == 0:
return image_data
settings = _header_to_settings(compressed_header)
zbitpix = compressed_header["ZBITPIX"]
dither_method = DITHER_METHODS[compressed_header.get("ZQUANTIZ", "NO_DITHER")]
dither_seed = compressed_header.get("ZDITHER0", 0)
# NOTE: in the following and below we convert the column to a Numpy array
# for performance reasons, as accessing rows from a FITS_rec column is
# otherwise slow.
compressed_data_column = np.array(compressed_data["COMPRESSED_DATA"])
compressed_data_dtype = _column_dtype(compressed_coldefs, "COMPRESSED_DATA")
if "ZBLANK" in compressed_coldefs.dtype.names:
zblank_column = np.array(compressed_data["ZBLANK"])
else:
zblank_column = None
if "ZSCALE" in compressed_coldefs.dtype.names:
zscale_column = np.array(compressed_data["ZSCALE"])
else:
zscale_column = None
if "ZZERO" in compressed_coldefs.dtype.names:
zzero_column = np.array(compressed_data["ZZERO"])
else:
zzero_column = None
zblank_header = compressed_header.get("ZBLANK", None)
gzip_compressed_data_column = None
gzip_compressed_data_dtype = None
# If all the data is requested, read in all the heap.
if tuple(buffer_shape) == tuple(data_shape):
heap_cache = bintable._get_raw_data(
compressed_header["PCOUNT"],
np.uint8,
bintable._data_offset + bintable._theap,
)
else:
heap_cache = None
for row_index, tile_slices in _iter_array_tiles(
data_shape, tile_shape, first_tile_index, last_tile_index
):
# For tiles near the edge, the tile shape from the header might not be
# correct so we have to pass the shape manually.
actual_tile_shape = image_data[tile_slices].shape
settings = _update_tile_settings(settings, compression_type, actual_tile_shape)
if compressed_data_column[row_index][0] == 0:
if gzip_compressed_data_column is None:
gzip_compressed_data_column = np.array(
compressed_data["GZIP_COMPRESSED_DATA"]
)
gzip_compressed_data_dtype = _column_dtype(
compressed_coldefs, "GZIP_COMPRESSED_DATA"
)
# When quantizing floating point data, sometimes the data will not
# quantize efficiently. In these cases the raw floating point data can
# be losslessly GZIP compressed and stored in the `GZIP_COMPRESSED_DATA`
# column.
cdata = _get_data_from_heap(
bintable,
*gzip_compressed_data_column[row_index],
gzip_compressed_data_dtype,
heap_cache=heap_cache,
)
tile_buffer = _decompress_tile(cdata, algorithm="GZIP_1")
tile_data = _finalize_array(
tile_buffer,
bitpix=zbitpix,
tile_shape=actual_tile_shape,
algorithm="GZIP_1",
lossless=True,
)
else:
cdata = _get_data_from_heap(
bintable,
*compressed_data_column[row_index],
compressed_data_dtype,
heap_cache=heap_cache,
)
if compression_type == "GZIP_2":
# Decompress with GZIP_1 just to find the total number of
# elements in the uncompressed data.
# TODO: find a way to avoid doing this for all tiles
tile_data = np.asarray(_decompress_tile(cdata, algorithm="GZIP_1"))
settings["itemsize"] = tile_data.size // int(prod(actual_tile_shape))
tile_buffer = _decompress_tile(
cdata, algorithm=compression_type, **settings
)
tile_data = _finalize_array(
tile_buffer,
bitpix=zbitpix,
tile_shape=actual_tile_shape,
algorithm=compression_type,
lossless=not quantized,
)
if zblank_column is None:
zblank = zblank_header
else:
zblank = zblank_column[row_index]
if zblank is not None:
blank_mask = tile_data == zblank
if quantized:
q = Quantize(
row=(row_index + dither_seed) if dither_method != -1 else 0,
dither_method=dither_method,
quantize_level=None,
bitpix=zbitpix,
)
tile_data = np.asarray(
q.decode_quantized(
tile_data, zscale_column[row_index], zzero_column[row_index]
)
).reshape(actual_tile_shape)
if zblank is not None:
if not tile_data.flags.writeable:
tile_data = tile_data.copy()
tile_data[blank_mask] = np.nan
image_data[tile_slices] = tile_data
return image_data |
Compress the data in a `~astropy.io.fits.CompImageHDU`.
The input HDU is expected to have a uncompressed numpy array as it's
``.data`` attribute.
Parameters
----------
image_data : `~numpy.ndarray`
The image data to compress
compression_type : str
The compression algorithm
compressed_header : `~astropy.io.fits.Header`
The header of the compressed binary table
compressed_coldefs : `~astropy.io.fits.ColDefs`
The ColDefs object for the compressed binary table
Returns
-------
nbytes : `int`
The number of bytes of the heap.
heap : `bytes`
The bytes of the FITS table heap. | def compress_image_data(
image_data,
compression_type,
compressed_header,
compressed_coldefs,
):
"""
Compress the data in a `~astropy.io.fits.CompImageHDU`.
The input HDU is expected to have a uncompressed numpy array as it's
``.data`` attribute.
Parameters
----------
image_data : `~numpy.ndarray`
The image data to compress
compression_type : str
The compression algorithm
compressed_header : `~astropy.io.fits.Header`
The header of the compressed binary table
compressed_coldefs : `~astropy.io.fits.ColDefs`
The ColDefs object for the compressed binary table
Returns
-------
nbytes : `int`
The number of bytes of the heap.
heap : `bytes`
The bytes of the FITS table heap.
"""
if not isinstance(image_data, np.ndarray):
raise TypeError("Image data must be a numpy.ndarray")
_check_compressed_header(compressed_header)
# TODO: This implementation is memory inefficient as it generates all the
# compressed bytes before forming them into the heap, leading to 2x the
# potential memory usage. Directly storing the compressed bytes into an
# expanding heap would fix this.
tile_shape = _tile_shape(compressed_header)
data_shape = _data_shape(compressed_header)
compressed_bytes = []
gzip_fallback = []
scales = []
zeros = []
zblank = None
noisebit = _get_compression_setting(compressed_header, "noisebit", 0)
settings = _header_to_settings(compressed_header)
for irow, tile_slices in _iter_array_tiles(data_shape, tile_shape):
tile_data = image_data[tile_slices]
settings = _update_tile_settings(settings, compression_type, tile_data.shape)
quantize = "ZSCALE" in compressed_coldefs.dtype.names
if tile_data.dtype.kind == "f" and quantize:
dither_method = DITHER_METHODS[
compressed_header.get("ZQUANTIZ", "NO_DITHER")
]
dither_seed = compressed_header.get("ZDITHER0", 0)
q = Quantize(
row=(irow + dither_seed) if dither_method != -1 else 0,
dither_method=dither_method,
quantize_level=noisebit,
bitpix=compressed_header["ZBITPIX"],
)
original_shape = tile_data.shape
# If there are any NaN values in the data, we should reset them to
# a value that will not affect the quantization (an already existing
# data value in the array) and we can then reset this after quantization
# to ZBLANK and set the appropriate header keyword
nan_mask = np.isnan(tile_data)
any_nan = np.any(nan_mask)
if any_nan:
# Note that we need to copy here to avoid modifying the input array.
tile_data = tile_data.copy()
if np.all(nan_mask):
tile_data[nan_mask] = 0
else:
tile_data[nan_mask] = np.nanmin(tile_data)
try:
tile_data, scale, zero = q.encode_quantized(tile_data)
except QuantizationFailedException:
if any_nan:
# reset NaN values since we will losslessly compress.
tile_data[nan_mask] = np.nan
scales.append(0)
zeros.append(0)
gzip_fallback.append(True)
else:
tile_data = np.asarray(tile_data).reshape(original_shape)
if any_nan:
if not tile_data.flags.writeable:
tile_data = tile_data.copy()
# For now, we just use the default ZBLANK value and assume
# this is the same for all tiles. We could generalize this
# to allow different ZBLANK values (for example if the data
# includes this value by chance) and to allow different values
# per tile, which is allowed by the FITS standard.
tile_data[nan_mask] = DEFAULT_ZBLANK
zblank = DEFAULT_ZBLANK
scales.append(scale)
zeros.append(zero)
gzip_fallback.append(False)
else:
scales.append(0)
zeros.append(0)
gzip_fallback.append(False)
if gzip_fallback[-1]:
cbytes = _compress_tile(tile_data, algorithm="GZIP_1")
else:
cbytes = _compress_tile(tile_data, algorithm=compression_type, **settings)
compressed_bytes.append(cbytes)
if zblank is not None:
compressed_header["ZBLANK"] = zblank
table = np.zeros(
len(compressed_bytes), dtype=compressed_coldefs.dtype.newbyteorder(">")
)
if "ZSCALE" in table.dtype.names:
table["ZSCALE"] = np.array(scales)
table["ZZERO"] = np.array(zeros)
for irow, cbytes in enumerate(compressed_bytes):
table["COMPRESSED_DATA"][irow, 0] = len(cbytes)
table["COMPRESSED_DATA"][:1, 1] = 0
table["COMPRESSED_DATA"][1:, 1] = np.cumsum(table["COMPRESSED_DATA"][:-1, 0])
for irow in range(len(compressed_bytes)):
if gzip_fallback[irow]:
table["GZIP_COMPRESSED_DATA"][irow] = table["COMPRESSED_DATA"][irow]
table["COMPRESSED_DATA"][irow] = 0
# For PLIO_1, the size of each heap element is a factor of two lower than
# the real size - not clear if this is deliberate or bug somewhere.
if compression_type == "PLIO_1":
table["COMPRESSED_DATA"][:, 0] //= 2
# For PLIO_1, it looks like the compressed data is always stored big endian
if compression_type == "PLIO_1":
for irow in range(len(compressed_bytes)):
if not gzip_fallback[irow]:
array = np.frombuffer(compressed_bytes[irow], dtype="i2")
if array.dtype.byteorder == "<" or (
array.dtype.byteorder == "=" and sys.byteorder == "little"
):
compressed_bytes[irow] = array.astype(">i2", copy=False).tobytes()
compressed_bytes = b"".join(compressed_bytes)
table_bytes = table.tobytes()
heap = table.tobytes() + compressed_bytes
return len(compressed_bytes), np.frombuffer(heap, dtype=np.uint8) |
Expands a list of N iterables of parameters into a flat list with all
combinations of all parameters. | def _expand(*params):
"""
Expands a list of N iterables of parameters into a flat list with all
combinations of all parameters.
"""
expanded = []
for ele in params:
expanded += list(itertools.product(*ele))
return expanded |
Regression test for a bug that caused extensions that used BZERO and BSCALE
that got turned into CompImageHDU to end up with BZERO/BSCALE before the
TFIELDS. | def test_comphdu_bscale(tmp_path):
"""
Regression test for a bug that caused extensions that used BZERO and BSCALE
that got turned into CompImageHDU to end up with BZERO/BSCALE before the
TFIELDS.
"""
filename1 = tmp_path / "3hdus.fits"
filename2 = tmp_path / "3hdus_comp.fits"
x = np.random.random((100, 100)) * 100
x0 = fits.PrimaryHDU()
x1 = fits.ImageHDU(np.array(x - 50, dtype=int), uint=True)
x1.header["BZERO"] = 20331
x1.header["BSCALE"] = 2.3
hdus = fits.HDUList([x0, x1])
hdus.writeto(filename1)
# fitsverify (based on cfitsio) should fail on this file, only seeing the
# first HDU.
with fits.open(filename1) as hdus:
hdus[1] = fits.CompImageHDU(
data=hdus[1].data.astype(np.uint32), header=hdus[1].header
)
hdus.writeto(filename2)
# open again and verify
with fits.open(filename2) as hdus:
hdus[1].verify("exception") |
This fixture provides 4 files downloaded from https://fits.gsfc.nasa.gov/registry/tilecompression.html
Which are used as canonical tests of data not compressed by Astropy. | def canonical_int_hdus(request):
"""
This fixture provides 4 files downloaded from https://fits.gsfc.nasa.gov/registry/tilecompression.html
Which are used as canonical tests of data not compressed by Astropy.
"""
with fits.open(request.param) as hdul:
yield hdul[1] |
Prints a message if any HDU in `filename` has a bad checksum or datasum. | def verify_checksums(filename):
"""
Prints a message if any HDU in `filename` has a bad checksum or datasum.
"""
with warnings.catch_warnings(record=True) as wlist:
warnings.simplefilter("always")
with fits.open(filename, checksum=OPTIONS.checksum_kind) as hdulist:
for i, hdu in enumerate(hdulist):
# looping on HDUs is needed to read them and verify the
# checksums
if not OPTIONS.ignore_missing:
if not hdu._checksum:
log.warning(
f"MISSING {filename!r} .. Checksum not found in HDU #{i}"
)
return 1
if not hdu._datasum:
log.warning(
f"MISSING {filename!r} .. Datasum not found in HDU #{i}"
)
return 1
for w in wlist:
if str(w.message).startswith(
("Checksum verification failed", "Datasum verification failed")
):
log.warning("BAD %r %s", filename, str(w.message))
return 1
log.info(f"OK {filename!r}")
return 0 |
Check for FITS standard compliance. | def verify_compliance(filename):
"""Check for FITS standard compliance."""
with fits.open(filename) as hdulist:
try:
hdulist.verify("exception")
except fits.VerifyError as exc:
log.warning("NONCOMPLIANT %r .. %s", filename, str(exc).replace("\n", " "))
return 1
return 0 |
Sets the ``CHECKSUM`` and ``DATASUM`` keywords for each HDU of `filename`.
Also updates fixes standards violations if possible and requested. | def update(filename):
"""
Sets the ``CHECKSUM`` and ``DATASUM`` keywords for each HDU of `filename`.
Also updates fixes standards violations if possible and requested.
"""
output_verify = "silentfix" if OPTIONS.compliance else "ignore"
# For unit tests we reset temporarily the warning filters. Indeed, before
# updating the checksums, fits.open will verify the existing checksums and
# raise warnings, which are later caught and converted to log.warning...
# which is an issue when testing, using the "error" action to convert
# warnings to exceptions.
with warnings.catch_warnings():
warnings.resetwarnings()
with fits.open(
filename,
do_not_scale_image_data=True,
checksum=OPTIONS.checksum_kind,
mode="update",
) as hdulist:
hdulist.flush(output_verify=output_verify) |
Handle a single .fits file, returning the count of checksum and compliance
errors. | def process_file(filename):
"""
Handle a single .fits file, returning the count of checksum and compliance
errors.
"""
try:
checksum_errors = verify_checksums(filename)
if OPTIONS.compliance:
compliance_errors = verify_compliance(filename)
else:
compliance_errors = 0
if OPTIONS.write_file and checksum_errors == 0 or OPTIONS.force:
update(filename)
return checksum_errors + compliance_errors
except Exception as e:
log.error(f"EXCEPTION {filename!r} .. {e}")
return 1 |
Processes command line parameters into options and files, then checks
or update FITS DATASUM and CHECKSUM keywords for the specified files. | def main(args=None):
"""
Processes command line parameters into options and files, then checks
or update FITS DATASUM and CHECKSUM keywords for the specified files.
"""
errors = 0
fits_files = handle_options(args or sys.argv[1:])
setup_logging()
for filename in fits_files:
errors += process_file(filename)
if errors:
log.warning(f"{errors} errors")
return int(bool(errors)) |
Prints FITS header(s) using the traditional 80-char format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below. | def print_headers_traditional(args):
"""Prints FITS header(s) using the traditional 80-char format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
for idx, filename in enumerate(args.filename): # support wildcards
if idx > 0 and not args.keyword:
print() # print a newline between different files
formatter = None
try:
formatter = HeaderFormatter(filename)
print(
formatter.parse(args.extensions, args.keyword, args.compressed), end=""
)
except OSError as e:
log.error(str(e))
finally:
if formatter:
formatter.close() |
Prints FITS header(s) in a machine-readable table format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below. | def print_headers_as_table(args):
"""Prints FITS header(s) in a machine-readable table format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename)
tbl = formatter.parse(args.extensions, args.keyword, args.compressed)
if tbl:
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
from astropy import table
resulting_table = table.vstack(tables)
# Print the string representation of the concatenated table
resulting_table.write(sys.stdout, format=args.table) |
Prints FITS header(s) with keywords as columns.
This follows the dfits+fitsort format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below. | def print_headers_as_comparison(args):
"""Prints FITS header(s) with keywords as columns.
This follows the dfits+fitsort format.
Parameters
----------
args : argparse.Namespace
Arguments passed from the command-line as defined below.
"""
from astropy import table
tables = []
# Create a Table object for each file
for filename in args.filename: # Support wildcards
formatter = None
try:
formatter = TableHeaderFormatter(filename, verbose=False)
tbl = formatter.parse(args.extensions, args.keyword, args.compressed)
if tbl:
# Remove empty keywords
tbl = tbl[np.where(tbl["keyword"] != "")]
else:
tbl = table.Table([[filename]], names=("filename",))
tables.append(tbl)
except OSError as e:
log.error(str(e)) # file not found or unreadable
finally:
if formatter:
formatter.close()
# Concatenate the tables
if len(tables) == 0:
return False
elif len(tables) == 1:
resulting_table = tables[0]
else:
resulting_table = table.vstack(tables)
# If we obtained more than one hdu, merge hdu and keywords columns
hdus = resulting_table["hdu"]
if np.ma.isMaskedArray(hdus):
hdus = hdus.compressed()
if len(np.unique(hdus)) > 1:
for tab in tables:
new_column = table.Column([f"{row['hdu']}:{row['keyword']}" for row in tab])
tab.add_column(new_column, name="hdu+keyword")
keyword_column_name = "hdu+keyword"
else:
keyword_column_name = "keyword"
# Check how many hdus we are processing
final_tables = []
for tab in tables:
final_table = [table.Column([tab["filename"][0]], name="filename")]
if "value" in tab.colnames:
for row in tab:
if row["keyword"] in ("COMMENT", "HISTORY"):
continue
final_table.append(
table.Column([row["value"]], name=row[keyword_column_name])
)
final_tables.append(table.Table(final_table))
final_table = table.vstack(final_tables)
# Sort if requested
if args.sort:
final_table.sort(args.sort)
# Reorganise to keyword by columns
final_table.pprint(max_lines=-1, max_width=-1) |
This is the main function called by the `fitsheader` script. | def main(args=None):
"""This is the main function called by the `fitsheader` script."""
parser = argparse.ArgumentParser(
description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument(
"-e",
"--extension",
metavar="HDU",
action="append",
dest="extensions",
help=(
"specify the extension by name or number; this argument can "
"be repeated to select multiple extensions"
),
)
parser.add_argument(
"-k",
"--keyword",
metavar="KEYWORD",
action="append",
type=str,
help=(
"specify a keyword; this argument can be repeated to select "
"multiple keywords; also supports wildcards"
),
)
mode_group = parser.add_mutually_exclusive_group()
mode_group.add_argument(
"-t",
"--table",
nargs="?",
default=False,
metavar="FORMAT",
help=(
"print the header(s) in machine-readable table format; the "
'default format is "ascii.fixed_width" (can be "ascii.csv", '
'"ascii.html", "ascii.latex", "fits", etc)'
),
)
mode_group.add_argument(
"-f",
"--fitsort",
action="store_true",
help=(
"print the headers as a table with each unique "
"keyword in a given column (fitsort format) "
),
)
parser.add_argument(
"-s",
"--sort",
metavar="SORT_KEYWORD",
action="append",
type=str,
help=(
"sort output by the specified header keywords, can be repeated to "
"sort by multiple keywords; Only supported with -f/--fitsort"
),
)
parser.add_argument(
"-c",
"--compressed",
action="store_true",
help=(
"for compressed image data, show the true header which describes "
"the compression rather than the data"
),
)
parser.add_argument(
"filename",
nargs="+",
help="path to one or more files; wildcards are supported",
)
args = parser.parse_args(args)
# If `--table` was used but no format specified,
# then use ascii.fixed_width by default
if args.table is None:
args.table = "ascii.fixed_width"
if args.sort:
args.sort = [key.replace(".", " ") for key in args.sort]
if not args.fitsort:
log.error(
"Sorting with -s/--sort is only supported in conjunction with"
" -f/--fitsort"
)
# 2: Unix error convention for command line syntax
sys.exit(2)
if args.keyword:
args.keyword = [key.replace(".", " ") for key in args.keyword]
# Now print the desired headers
try:
if args.table:
print_headers_as_table(args)
elif args.fitsort:
print_headers_as_comparison(args)
else:
print_headers_traditional(args)
except OSError:
# A 'Broken pipe' OSError may occur when stdout is closed prematurely,
# eg. when calling `fitsheader file.fits | head`. We let this pass.
pass |
Print a summary of the HDUs in a FITS file.
Parameters
----------
filename : str
The path to a FITS file. | def fitsinfo(filename):
"""
Print a summary of the HDUs in a FITS file.
Parameters
----------
filename : str
The path to a FITS file.
"""
try:
fits.info(filename)
except OSError as e:
log.error(str(e)) |
The main function called by the `fitsinfo` script. | def main(args=None):
"""The main function called by the `fitsinfo` script."""
parser = argparse.ArgumentParser(
description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument(
"filename",
nargs="+",
help="Path to one or more FITS files. Wildcards are supported.",
)
args = parser.parse_args(args)
for idx, filename in enumerate(args.filename):
if idx > 0:
print()
fitsinfo(filename) |
Pytest fixture to run a test case both with and without tilde paths.
In the tilde-path case, calls like self.data('filename.fits') will
produce '~/filename.fits', and environment variables will be temporarily
modified so that '~' resolves to the data directory. | def home_is_data(request, monkeypatch):
"""
Pytest fixture to run a test case both with and without tilde paths.
In the tilde-path case, calls like self.data('filename.fits') will
produce '~/filename.fits', and environment variables will be temporarily
modified so that '~' resolves to the data directory.
"""
# This checks the value specified in the fixture annotation
if request.param:
# `request.instance` refers to the test case that's using this fixture.
request.instance.monkeypatch = monkeypatch
request.instance.set_home_as_data()
request.instance.set_paths_via_pathlib(request.param == "pathlib") |
Pytest fixture to run a test case both with and without tilde paths.
In the tilde-path case, calls like self.temp('filename.fits') will
produce '~/filename.fits', and environment variables will be temporarily
modified so that '~' resolves to the temp directory. These files will also
be tracked so that, after the test case, we can verify no files were written
to a literal tilde path. | def home_is_temp(request, monkeypatch):
"""
Pytest fixture to run a test case both with and without tilde paths.
In the tilde-path case, calls like self.temp('filename.fits') will
produce '~/filename.fits', and environment variables will be temporarily
modified so that '~' resolves to the temp directory. These files will also
be tracked so that, after the test case, we can verify no files were written
to a literal tilde path.
"""
# This checks the value specified in the fixture annotation
if request.param:
# `request.instance` refers to the test case that's using this fixture.
request.instance.monkeypatch = monkeypatch
request.instance.set_home_as_temp()
request.instance.set_paths_via_pathlib(request.param == "pathlib") |
Regression test for #1795 - this bug originally caused columns where TNULL
was not defined to have their first element masked. | def test_masking_regression_1795():
"""
Regression test for #1795 - this bug originally caused columns where TNULL
was not defined to have their first element masked.
"""
t = Table.read(get_pkg_data_filename("data/tb.fits"))
assert np.all(t["c1"].mask == np.array([False, False]))
assert not hasattr(t["c2"], "mask")
assert not hasattr(t["c3"], "mask")
assert not hasattr(t["c4"], "mask")
assert np.all(t["c1"].data == np.array([1, 2]))
assert np.all(t["c2"].data == np.array([b"abc", b"xy "]))
assert_allclose(t["c3"].data, np.array([3.70000007153, 6.6999997139]))
assert np.all(t["c4"].data == np.array([False, True])) |
Regression test for https://github.com/astropy/astropy/issues/1953
Ensures that Table columns of bools are properly written to a FITS table. | def test_bool_column(tmp_path):
"""
Regression test for https://github.com/astropy/astropy/issues/1953
Ensures that Table columns of bools are properly written to a FITS table.
"""
arr = np.ones(5, dtype=bool)
arr[::2] = False
t = Table([arr])
t.write(tmp_path / "test.fits", overwrite=True)
with fits.open(tmp_path / "test.fits") as hdul:
assert hdul[1].data["col0"].dtype == np.dtype("bool")
assert np.all(hdul[1].data["col0"] == arr) |
Test that a column of unicode strings is still written as one
byte-per-character in the FITS table (so long as the column can be ASCII
encoded).
Regression test for one of the issues fixed in
https://github.com/astropy/astropy/pull/4228 | def test_unicode_column(tmp_path):
"""
Test that a column of unicode strings is still written as one
byte-per-character in the FITS table (so long as the column can be ASCII
encoded).
Regression test for one of the issues fixed in
https://github.com/astropy/astropy/pull/4228
"""
t = Table([np.array(["a", "b", "cd"])])
t.write(tmp_path / "test.fits", overwrite=True)
with fits.open(tmp_path / "test.fits") as hdul:
assert np.all(hdul[1].data["col0"] == ["a", "b", "cd"])
assert hdul[1].header["TFORM1"] == "2A"
t2 = Table([np.array(["\N{SNOWMAN}"])])
with pytest.raises(UnicodeEncodeError):
t2.write(tmp_path / "test.fits", overwrite=True) |
Regression test for https://github.com/astropy/astropy/issues/6079 | def test_convert_comment_convention():
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
filename = get_pkg_data_filename("data/stddata.fits")
with pytest.warns(
AstropyUserWarning,
match=r"hdu= was not specified but multiple tables are present",
):
t = Table.read(filename)
assert t.meta["comments"] == [
"",
" *** End of mandatory fields ***",
"",
"",
" *** Column names ***",
"",
"",
" *** Column formats ***",
"",
] |
Test writing as QTable and reading as Table. Ensure correct classes
come out. | def test_fits_mixins_qtable_to_table(tmp_path):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = tmp_path / "test_simple.fits"
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format="fits")
t2 = Table.read(filename, format="fits", astropy_native=True)
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ["unit"]
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class) |
Test write/read all cols at once and validate intermediate column names | def test_fits_mixins_as_one(table_cls, tmp_path):
"""Test write/read all cols at once and validate intermediate column names"""
filename = tmp_path / "test_simple.fits"
names = sorted(mixin_cols)
# FITS stores times directly, so we just get the column back.
all_serialized_names = []
for name in sorted(mixin_cols):
all_serialized_names.extend(
[name] if isinstance(mixin_cols[name], Time) else serialized_names[name]
)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="fits")
t2 = table_cls.read(filename, format="fits", astropy_native=True)
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["HISTORY"] == ["first", "second", "third"]
assert t.colnames == t2.colnames
# Read directly via fits and confirm column names
with fits.open(filename) as hdus:
assert hdus[1].columns.names == all_serialized_names |
Test write/read one col at a time and do detailed validation | def test_fits_mixins_per_column(table_cls, name_col, tmp_path):
"""Test write/read one col at a time and do detailed validation"""
filename = tmp_path / "test_simple.fits"
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=["c1", name, "c2"])
t[name].info.description = "my \n\n\n description"
t[name].info.meta = {"list": list(range(50)), "dict": {"a": "b" * 200}}
if not t.has_mixin_columns:
pytest.skip("column is not a mixin (e.g. Quantity subclass in Table)")
t.write(filename, format="fits")
t2 = table_cls.read(filename, format="fits", astropy_native=True)
if isinstance(col, Time):
# FITS Time does not preserve format
t2[name].format = col.format
assert t.colnames == t2.colnames
for colname in t.colnames:
compare = ["data"] if colname in ("c1", "c2") else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith("tm"):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray |
Even if there are no mixin columns, if there is metadata that would be lost it still
gets serialized | def test_info_attributes_with_no_mixins(tmp_path):
"""Even if there are no mixin columns, if there is metadata that would be lost it still
gets serialized
"""
filename = tmp_path / "test.fits"
t = Table([[1.0, 2.0]])
t["col0"].description = "hello" * 40
t["col0"].format = "{:8.4f}"
t["col0"].meta["a"] = {"b": "c"}
t.write(filename, overwrite=True)
t2 = Table.read(filename)
assert t2["col0"].description == "hello" * 40
assert t2["col0"].format == "{:8.4f}"
assert t2["col0"].meta["a"] == {"b": "c"} |
Same as previous test but set the serialize_method to 'data_mask' so mask is
written out and the behavior is all correct. | def test_round_trip_masked_table_serialize_mask(tmp_path, method):
"""
Same as previous test but set the serialize_method to 'data_mask' so mask is
written out and the behavior is all correct.
"""
filename = tmp_path / "test.fits"
t = simple_table(masked=True) # int, float, and str cols with one masked element
# MaskedColumn but no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about we test a column with no masked elements.
t["d"] = [1, 2, 3]
if method == "set_cols":
for col in t.itercols():
col.info.serialize_method["fits"] = "data_mask"
t.write(filename)
elif method == "names":
t.write(
filename,
serialize_method={
"a": "data_mask",
"b": "data_mask",
"c": "data_mask",
"d": "data_mask",
},
)
elif method == "class":
t.write(filename, serialize_method="data_mask")
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name]) |
Regression test for https://github.com/astropy/astropy/issues/14305 | def test_is_fits_gh_14305():
"""Regression test for https://github.com/astropy/astropy/issues/14305"""
assert not connect.is_fits("", "foo.bar", None) |
Regression test for https://github.com/astropy/astropy/issues/15417 | def test_keep_masked_state_integer_columns(tmp_path):
"""Regression test for https://github.com/astropy/astropy/issues/15417"""
filename = tmp_path / "test_masked.fits"
t = Table([[1, 2], [1.5, 2.5]], names=["a", "b"])
t["c"] = MaskedColumn([1, 2], mask=[True, False])
t.write(filename)
tr = Table.read(filename)
assert not isinstance(tr["a"], MaskedColumn)
assert not isinstance(tr["b"], MaskedColumn)
assert isinstance(tr["c"], MaskedColumn) |
Checks that integer columns with a TNULL value set (e.g. masked columns)
have their TNULL value propagated when being read in by Table.read | def test_null_propagation_in_table_read(tmp_path):
"""Checks that integer columns with a TNULL value set (e.g. masked columns)
have their TNULL value propagated when being read in by Table.read"""
# Could be anything except for 999999, which is the "default" fill_value
# for masked int arrays
NULL_VALUE = -1
output_filename = tmp_path / "null_table.fits"
data = np.asarray([1, 2, NULL_VALUE, 4], dtype=np.int32)
# Create table with BinTableHDU, with integer column containing a custom null
c = fits.Column(name="a", array=data, null=NULL_VALUE, format="J")
hdu = BinTableHDU.from_columns([c])
hdu.writeto(output_filename)
# Read the table in with Table.read, and ensure the column's fill_value is
# equal to NULL_VALUE
t = Table.read(output_filename)
assert t["a"].fill_value == NULL_VALUE |
Make sure diff report reports HDU name and ver if same in files | def test_fitsdiff_hdu_name(tmp_path):
"""Make sure diff report reports HDU name and ver if same in files"""
path1 = tmp_path / "test1.fits"
path2 = tmp_path / "test2.fits"
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5), name="SCI")])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert "Extension HDU 1 (SCI, 1):" in diff.report() |
Make sure diff report doesn't report HDU name if not in files | def test_fitsdiff_no_hdu_name(tmp_path):
"""Make sure diff report doesn't report HDU name if not in files"""
path1 = tmp_path / "test1.fits"
path2 = tmp_path / "test2.fits"
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert "Extension HDU 1:" in diff.report() |
Make sure diff report doesn't report HDU name if not same in files | def test_fitsdiff_with_names(tmp_path):
"""Make sure diff report doesn't report HDU name if not same in files"""
path1 = tmp_path / "test1.fits"
path2 = tmp_path / "test2.fits"
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5), name="SCI", ver=1)])
hdulist.writeto(path1)
hdulist[1].name = "ERR"
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert "Extension HDU 1:" in diff.report() |
Regression test for https://github.com/astropy/astropy/issues/13330 | def test_rawdatadiff_diff_with_rtol(tmp_path):
"""Regression test for https://github.com/astropy/astropy/issues/13330"""
path1 = tmp_path / "test1.fits"
path2 = tmp_path / "test2.fits"
a = np.zeros((10, 2), dtype="float32")
a[:, 0] = np.arange(10, dtype="float32") + 10
a[:, 1] = np.arange(10, dtype="float32") + 20
b = a.copy()
changes = [(3, 13.1, 23.1), (8, 20.5, 30.5)]
for i, v, w in changes:
b[i, 0] = v
b[i, 1] = w
ca = Column("A", format="20E", array=[a])
cb = Column("A", format="20E", array=[b])
hdu_a = BinTableHDU.from_columns([ca])
hdu_a.writeto(path1, overwrite=True)
hdu_b = BinTableHDU.from_columns([cb])
hdu_b.writeto(path2, overwrite=True)
with fits.open(path1) as fits1:
with fits.open(path2) as fits2:
diff = FITSDiff(fits1, fits2, atol=0, rtol=0.001)
str1 = diff.report(fileobj=None, indent=0)
diff = FITSDiff(fits1, fits2, atol=0, rtol=0.01)
str2 = diff.report(fileobj=None, indent=0)
assert "...and at 1 more indices." in str1
assert "...and at 1 more indices." not in str2 |
Make sure that failing FITSDiff doesn't leave open files. | def test_fitsdiff_openfile(tmp_path):
"""Make sure that failing FITSDiff doesn't leave open files."""
path1 = tmp_path / "file1.fits"
path2 = tmp_path / "file2.fits"
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert diff.identical, diff.report() |
Can we use fsspec to read a local file? | def test_fsspec_local():
"""Can we use fsspec to read a local file?"""
fn = get_pkg_data_filename("data/test0.fits")
with fits.open(fn) as hdulist_classic:
with fits.open(fn, use_fsspec=True) as hdulist_fsspec:
assert_array_equal(hdulist_classic[2].data, hdulist_fsspec[2].data)
assert_array_equal(
hdulist_classic[2].section[3:5], hdulist_fsspec[2].section[3:5]
) |
Can we write to a local file that was opened using fsspec? | def test_fsspec_local_write(tmp_path):
"""Can we write to a local file that was opened using fsspec?"""
fn = get_pkg_data_filename("data/test0.fits")
fn_tmp = tmp_path / "tmp.fits"
with fits.open(fn, use_fsspec=True) as hdul:
# writing to a section is never allowed
with pytest.raises(TypeError):
hdul[1].section[0, 0] = -999
# however writing to .data should work
hdul[1].data[2, 3] = -999
assert hdul[1].data[2, 3] == -999
hdul.writeto(fn_tmp)
# Is the new value present when we re-open the file?
with fits.open(fn_tmp) as hdul:
assert hdul[1].data[2, 3] == -999 |
Does Cutout2D work with data loaded lazily using fsspec and .section? | def test_fsspec_cutout2d():
"""Does Cutout2D work with data loaded lazily using fsspec and .section?"""
fn = get_pkg_data_filename("data/test0.fits")
with fits.open(fn, use_fsspec=True) as hdul:
position = (10, 20)
size = (2, 3)
cutout1 = Cutout2D(hdul[1].data, position, size)
cutout2 = Cutout2D(hdul[1].section, position, size)
assert_allclose(cutout1.data, cutout2.data) |
Does fsspec support compressed data correctly? | def test_fsspec_compressed():
"""Does fsspec support compressed data correctly?"""
# comp.fits[1] is a compressed image with shape (440, 300)
fn = get_pkg_data_filename("data/comp.fits")
with fits.open(fn, use_fsspec=True) as hdul:
# The .data attribute should work as normal
assert hdul[1].data[0, 0] == 7
# And the .section attribute should work too
assert hdul[1].section[0, 0] == 7 |
Make sure that operations on a shallow copy do not alter the original.
#4990. | def test_shallow_copy():
"""Make sure that operations on a shallow copy do not alter the original.
#4990."""
original_header = fits.Header([("a", 1), ("b", 1)])
copied_header = copy.copy(original_header)
# Modifying the original dict should not alter the copy
original_header["c"] = 100
assert "c" not in copied_header
# and changing the copy should not change the original.
copied_header["a"] = 0
assert original_header["a"] == 1 |
Make sure that creating a Header from another Header makes a copy if
copy is True. | def test_init_with_header():
"""Make sure that creating a Header from another Header makes a copy if
copy is True."""
original_header = fits.Header([("a", 10)])
new_header = fits.Header(original_header, copy=True)
original_header["a"] = 20
assert new_header["a"] == 10
new_header["a"] = 0
assert original_header["a"] == 20 |
Check that subclasses don't get ignored on slicing and copying. | def test_subclass():
"""Check that subclasses don't get ignored on slicing and copying."""
class MyHeader(fits.Header):
def append(self, card, *args, **kwargs):
if isinstance(card, tuple) and len(card) == 2:
# Just for our checks we add a comment if there is none.
card += ("no comment",)
return super().append(card, *args, **kwargs)
my_header = MyHeader(
(
("a", 1.0, "first"),
("b", 2.0, "second"),
(
"c",
3.0,
),
)
)
assert my_header.comments["a"] == "first"
assert my_header.comments["b"] == "second"
assert my_header.comments["c"] == "no comment"
slice_ = my_header[1:]
assert type(slice_) is MyHeader
assert slice_.comments["b"] == "second"
assert slice_.comments["c"] == "no comment"
selection = my_header["c*"]
assert type(selection) is MyHeader
assert selection.comments["c"] == "no comment"
copy_ = my_header.copy()
assert type(copy_) is MyHeader
assert copy_.comments["b"] == "second"
assert copy_.comments["c"] == "no comment"
my_header.extend((("d", 4.0),))
assert my_header.comments["d"] == "no comment" |
Test for int8 support, https://github.com/astropy/astropy/issues/11995 | def test_int8(tmp_path):
"""Test for int8 support, https://github.com/astropy/astropy/issues/11995"""
img = np.arange(-50, 50, dtype=np.int8).reshape(10, 10)
hdu = fits.PrimaryHDU(img)
hdu.writeto(tmp_path / "int8.fits")
with fits.open(tmp_path / "int8.fits") as hdul:
assert hdul[0].header["BITPIX"] == 8
assert hdul[0].header["BZERO"] == -128
assert hdul[0].header["BSCALE"] == 1.0
assert_equal(hdul[0].data, img)
assert hdul[0].data.dtype == img.dtype |
Compare the values field-by-field in two sets of numpy arrays or
recarrays. | def compare_arrays(arr1in, arr2in, verbose=False):
"""
Compare the values field-by-field in two sets of numpy arrays or
recarrays.
"""
arr1 = arr1in.view(np.ndarray)
arr2 = arr2in.view(np.ndarray)
nfail = 0
for n2 in arr2.dtype.names:
n1 = n2
if n1 not in arr1.dtype.names:
n1 = n1.lower()
if n1 not in arr1.dtype.names:
n1 = n1.upper()
if n1 not in arr1.dtype.names:
raise ValueError(f"field name {n2} not found in array 1")
if verbose:
sys.stdout.write(f" testing field: '{n2}'\n")
sys.stdout.write(" shape...........")
if arr2[n2].shape != arr1[n1].shape:
nfail += 1
if verbose:
sys.stdout.write("shapes differ\n")
else:
if verbose:
sys.stdout.write("OK\n")
sys.stdout.write(" elements........")
(w,) = np.where(arr1[n1].ravel() != arr2[n2].ravel())
if w.size > 0:
nfail += 1
if verbose:
sys.stdout.write(
f"\n {w.size} elements in field {n2} differ\n"
)
else:
if verbose:
sys.stdout.write("OK\n")
if nfail == 0:
if verbose:
sys.stdout.write("All tests passed\n")
return True
else:
if verbose:
sys.stdout.write(f"{nfail} differences found\n")
return False |
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences. | def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == "float32" or bb.dtype.name == "float32":
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.0
if np.any(mask0):
if diff[mask0].max() != 0.0:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True |
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are. | def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == "S":
fielda = decode_ascii(fielda)
if fieldb.dtype.char == "S":
fieldb = decode_ascii(fieldb)
if not isinstance(fielda, type(fieldb)) and not isinstance(
fieldb, type(fielda)
):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print(f"field {i} type differs")
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
elif isinstance(fielda, fits.column._VLF) or isinstance(
fieldb, fits.column._VLF
):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print(f"fielda[{row}]: {fielda[row]}")
print(f"fieldb[{row}]: {fieldb[row]}")
print(f"field {i} differs in row {row}")
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f"field {i} differs")
return False
return True |
Helper function to compare column attributes | def _assert_attr_col(new_tbhdu, tbhdu):
"""
Helper function to compare column attributes
"""
# Double check that the headers are equivalent
assert tbhdu.columns.names == new_tbhdu.columns.names
attrs = [
k for k, v in fits.Column.__dict__.items() if isinstance(v, ColumnAttribute)
]
for name in tbhdu.columns.names:
col = tbhdu.columns[name]
new_col = new_tbhdu.columns[name]
for attr in attrs:
if getattr(col, attr) and getattr(new_col, attr):
assert getattr(col, attr) == getattr(new_col, attr) |
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement. | def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert (
len(objgraph.by_type(type_)) <= refcount
), "More {0!r} objects still in memory than before." |
Find all structured arrays in an HDF5 file. | def _find_all_structured_arrays(handle):
"""
Find all structured arrays in an HDF5 file.
"""
import h5py
structured_arrays = []
def append_structured_arrays(name, obj):
if isinstance(obj, h5py.Dataset) and obj.dtype.kind == "V":
structured_arrays.append(name)
handle.visititems(append_structured_arrays)
return structured_arrays |
Read a Table object from an HDF5 file.
This requires `h5py <http://www.h5py.org/>`_ to be installed. If more than one
table is present in the HDF5 file or group, the first table is read in and
a warning is displayed.
Parameters
----------
input : str or :class:`h5py.File` or :class:`h5py.Group` or
:class:`h5py.Dataset` If a string, the filename to read the table from.
If an h5py object, either the file or the group object to read the
table from.
path : str
The path from which to read the table inside the HDF5 file.
This should be relative to the input file or group.
character_as_bytes : bool
If `True` then Table columns are left as bytes.
If `False` then Table columns are converted to unicode. | def read_table_hdf5(input, path=None, character_as_bytes=True):
"""
Read a Table object from an HDF5 file.
This requires `h5py <http://www.h5py.org/>`_ to be installed. If more than one
table is present in the HDF5 file or group, the first table is read in and
a warning is displayed.
Parameters
----------
input : str or :class:`h5py.File` or :class:`h5py.Group` or
:class:`h5py.Dataset` If a string, the filename to read the table from.
If an h5py object, either the file or the group object to read the
table from.
path : str
The path from which to read the table inside the HDF5 file.
This should be relative to the input file or group.
character_as_bytes : bool
If `True` then Table columns are left as bytes.
If `False` then Table columns are converted to unicode.
"""
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
# This function is iterative, and only gets to writing the file when
# the input is an hdf5 Group. Moreover, the input variable is changed in
# place.
# Here, we save its value to be used at the end when the conditions are
# right.
input_save = input
if isinstance(input, (h5py.File, h5py.Group)):
# If a path was specified, follow the path
if path is not None:
try:
input = input[path]
except (KeyError, ValueError):
raise OSError(f"Path {path} does not exist")
# `input` is now either a group or a dataset. If it is a group, we
# will search for all structured arrays inside the group, and if there
# is one we can proceed otherwise an error is raised. If it is a
# dataset, we just proceed with the reading.
if isinstance(input, h5py.Group):
# Find all structured arrays in group
arrays = _find_all_structured_arrays(input)
if len(arrays) == 0:
raise ValueError(f"no table found in HDF5 group {path}")
elif len(arrays) > 0:
path = arrays[0] if path is None else path + "/" + arrays[0]
if len(arrays) > 1:
warnings.warn(
"path= was not specified but multiple tables"
" are present, reading in first available"
f" table (path={path})",
AstropyUserWarning,
)
return read_table_hdf5(input, path=path)
elif not isinstance(input, h5py.Dataset):
# If a file object was passed, then we need to extract the filename
# because h5py cannot properly read in file objects.
if hasattr(input, "read"):
try:
input = input.name
except AttributeError:
raise TypeError("h5py can only open regular files")
# Open the file for reading, and recursively call read_table_hdf5 with
# the file object and the path.
f = h5py.File(input, "r")
try:
return read_table_hdf5(f, path=path, character_as_bytes=character_as_bytes)
finally:
f.close()
# If we are here, `input` should be a Dataset object, which we can now
# convert to a Table.
# Create a Table object
from astropy.table import Table, meta, serialize
table = Table(np.array(input))
# Read the meta-data from the file. For back-compatibility, we can read
# the old file format where the serialized metadata were saved in the
# attributes of the HDF5 dataset.
# In the new format, instead, metadata are stored in a new dataset in the
# same file. This is introduced in Astropy 3.0
old_version_meta = META_KEY in input.attrs
new_version_meta = path is not None and meta_path(path) in input_save
if old_version_meta or new_version_meta:
if new_version_meta:
header = meta.get_header_from_yaml(
h.decode("utf-8") for h in input_save[meta_path(path)]
)
else:
# Must be old_version_meta is True. if (A or B) and not A then B is True
header = meta.get_header_from_yaml(
h.decode("utf-8") for h in input.attrs[META_KEY]
)
if "meta" in list(header.keys()):
table.meta = header["meta"]
header_cols = {x["name"]: x for x in header["datatype"]}
for col in table.columns.values():
for attr in ("description", "format", "unit", "meta"):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
table = serialize._construct_mixins_from_columns(table)
else:
# Read the meta-data from the file
table.meta.update(input.attrs)
if not character_as_bytes:
table.convert_bytestring_to_unicode()
return table |
Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding. | def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from astropy.table import serialize
from astropy.utils.data_info import serialize_context_as
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table.
with serialize_context_as("hdf5"):
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl |
Write a Table object to an HDF5 file.
This requires `h5py <http://www.h5py.org/>`_ to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or :class:`h5py.File` or :class:`h5py.Group`
If a string, the filename to write the table to. If an h5py object,
either the file or the group object to write the table to.
path : str
The path to which to write the table inside the HDF5 file.
This should be relative to the input file or group.
If not specified, defaults to ``__astropy_table__``.
compression : bool or str or int
Whether to compress the table inside the HDF5 file. If set to `True`,
``'gzip'`` compression is used. If a string is specified, it should be
one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is
specified (in the range 0-9), ``'gzip'`` compression is used, and the
integer denotes the compression level.
append : bool
Whether to append the table to an existing HDF5 file.
overwrite : bool
Whether to overwrite any existing file without warning.
If ``append=True`` and ``overwrite=True`` then only the dataset will be
replaced; the file/group will not be overwritten.
serialize_meta : bool
Whether to serialize rich table meta-data when writing the HDF5 file, in
particular such data required to write and read back mixin columns like
``Time``, ``SkyCoord``, or ``Quantity`` to the file.
**create_dataset_kwargs
Additional keyword arguments are passed to
``h5py.File.create_dataset()`` or ``h5py.Group.create_dataset()``. | def write_table_hdf5(
table,
output,
path=None,
compression=False,
append=False,
overwrite=False,
serialize_meta=False,
**create_dataset_kwargs,
):
"""
Write a Table object to an HDF5 file.
This requires `h5py <http://www.h5py.org/>`_ to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or :class:`h5py.File` or :class:`h5py.Group`
If a string, the filename to write the table to. If an h5py object,
either the file or the group object to write the table to.
path : str
The path to which to write the table inside the HDF5 file.
This should be relative to the input file or group.
If not specified, defaults to ``__astropy_table__``.
compression : bool or str or int
Whether to compress the table inside the HDF5 file. If set to `True`,
``'gzip'`` compression is used. If a string is specified, it should be
one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is
specified (in the range 0-9), ``'gzip'`` compression is used, and the
integer denotes the compression level.
append : bool
Whether to append the table to an existing HDF5 file.
overwrite : bool
Whether to overwrite any existing file without warning.
If ``append=True`` and ``overwrite=True`` then only the dataset will be
replaced; the file/group will not be overwritten.
serialize_meta : bool
Whether to serialize rich table meta-data when writing the HDF5 file, in
particular such data required to write and read back mixin columns like
``Time``, ``SkyCoord``, or ``Quantity`` to the file.
**create_dataset_kwargs
Additional keyword arguments are passed to
``h5py.File.create_dataset()`` or ``h5py.Group.create_dataset()``.
"""
from astropy.table import meta
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
if path is None:
# table is just an arbitrary, hardcoded string here.
path = "__astropy_table__"
elif path.endswith("/"):
raise ValueError("table path should end with table name, not /")
if "/" in path:
group, name = path.rsplit("/", 1)
else:
group, name = None, path
if isinstance(output, (h5py.File, h5py.Group)):
if len(list(output.keys())) > 0 and name == "__astropy_table__":
raise ValueError(
"table path should always be set via the "
"path= argument when writing to existing "
"files"
)
elif name == "__astropy_table__":
warnings.warn(
"table path was not set via the path= argument; "
f"using default path {path}"
)
if group:
try:
output_group = output[group]
except (KeyError, ValueError):
output_group = output.create_group(group)
else:
output_group = output
elif isinstance(output, str):
if os.path.exists(output) and not append:
if overwrite and not append:
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# Open the file for appending or writing
f = h5py.File(output, "a" if append else "w")
# Recursively call the write function
try:
return write_table_hdf5(
table,
f,
path=path,
compression=compression,
append=append,
overwrite=overwrite,
serialize_meta=serialize_meta,
)
finally:
f.close()
else:
raise TypeError("output should be a string or an h5py File or Group object")
# Check whether table already exists
if name in output_group:
if append and overwrite:
# Delete only the dataset itself
del output_group[name]
if serialize_meta and name + ".__table_column_meta__" in output_group:
del output_group[name + ".__table_column_meta__"]
else:
raise OSError(f"Table {path} already exists")
# Encode any mixin columns as plain columns + appropriate metadata
table = _encode_mixins(table)
# Table with numpy unicode strings can't be written in HDF5 so
# to write such a table a copy of table is made containing columns as
# bytestrings. Now this copy of the table can be written in HDF5.
if any(col.info.dtype.kind == "U" for col in table.itercols()):
table = table.copy(copy_data=False)
table.convert_unicode_to_bytestring()
# Warn if information will be lost when serialize_meta=False. This is
# hardcoded to the set difference between column info attributes and what
# HDF5 can store natively (name, dtype) with no meta.
if serialize_meta is False:
for col in table.itercols():
for attr in ("unit", "format", "description", "meta"):
if getattr(col.info, attr, None) not in (None, {}):
warnings.warn(
"table contains column(s) with defined 'unit', 'format',"
" 'description', or 'meta' info attributes. These will"
" be dropped since serialize_meta=False.",
AstropyUserWarning,
)
# Write the table to the file
if compression:
if compression is True:
compression = "gzip"
dset = output_group.create_dataset(
name,
data=table.as_array(),
compression=compression,
**create_dataset_kwargs,
)
else:
dset = output_group.create_dataset(
name, data=table.as_array(), **create_dataset_kwargs
)
if serialize_meta:
header_yaml = meta.get_yaml_from_table(table)
header_encoded = np.array([h.encode("utf-8") for h in header_yaml])
output_group.create_dataset(meta_path(name), data=header_encoded)
else:
# Write the Table meta dict key:value pairs to the file as HDF5
# attributes. This works only for a limited set of scalar data types
# like numbers, strings, etc., but not any complex types. This path
# also ignores column meta like unit or format.
for key in table.meta:
val = table.meta[key]
try:
dset.attrs[key] = val
except TypeError:
warnings.warn(
f"Attribute `{key}` of type {type(val)} cannot be written to "
"HDF5 files - skipping. (Consider specifying "
"serialize_meta=True to write all meta data)",
AstropyUserWarning,
) |
Register HDF5 with Unified I/O. | def register_hdf5():
"""
Register HDF5 with Unified I/O.
"""
from astropy.io import registry as io_registry
from astropy.table import Table
io_registry.register_reader("hdf5", Table, read_table_hdf5)
io_registry.register_writer("hdf5", Table, write_table_hdf5)
io_registry.register_identifier("hdf5", Table, is_hdf5) |
Checks if input is in the Parquet format.
Parameters
----------
origin : Any
filepath : str or None
fileobj : `~pyarrow.NativeFile` or None
*args, **kwargs
Returns
-------
is_parquet : bool
True if 'fileobj' is not None and is a pyarrow file, or if
'filepath' is a string ending with '.parquet' or '.parq'.
False otherwise. | def parquet_identify(origin, filepath, fileobj, *args, **kwargs):
"""Checks if input is in the Parquet format.
Parameters
----------
origin : Any
filepath : str or None
fileobj : `~pyarrow.NativeFile` or None
*args, **kwargs
Returns
-------
is_parquet : bool
True if 'fileobj' is not None and is a pyarrow file, or if
'filepath' is a string ending with '.parquet' or '.parq'.
False otherwise.
"""
if fileobj is not None:
try: # safely test if pyarrow file
pos = fileobj.tell() # store current stream position
except AttributeError:
return False
signature = fileobj.read(4) # read first 4 bytes
fileobj.seek(pos) # return to original location
return signature == PARQUET_SIGNATURE
elif filepath is not None:
return filepath.endswith((".parquet", ".parq"))
else:
return False |
Read a Table object from a Parquet file.
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
The ``filters`` parameter consists of predicates that are expressed
in disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``.
DNF allows arbitrary boolean logical combinations of single column
predicates. The innermost tuples each describe a single column predicate.
The list of inner predicates is interpreted as a conjunction (AND),
forming a more selective and multiple column predicate. Finally, the most
outer list combines these filters as a disjunction (OR).
Predicates may also be passed as List[Tuple]. This form is interpreted
as a single conjunction. To express OR in predicates, one must
use the (preferred) List[List[Tuple]] notation.
Each tuple has format: (``key``, ``op``, ``value``) and compares the
``key`` with the ``value``.
The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
``value`` must be a collection such as a ``list``, a ``set`` or a
``tuple``.
For example:
.. code-block:: python
('x', '=', 0)
('y', 'in', ['a', 'b', 'c'])
('z', 'not in', {'a','b'})
Parameters
----------
input : str or path-like or file-like object
If a string or path-like object, the filename to read the table from.
If a file-like object, the stream to read data.
include_names : list [str], optional
List of names to include in output. If not supplied, then
include all columns.
exclude_names : list [str], optional
List of names to exclude from output (applied after ``include_names``).
If not supplied then no columns are excluded.
schema_only : bool, optional
Only read the schema/metadata with table information.
filters : list [tuple] or list [list [tuple] ] or None, optional
Rows which do not match the filter predicate will be removed from
scanned data. See `pyarrow.parquet.read_table()` for details.
Returns
-------
table : `~astropy.table.Table`
Table will have zero rows and only metadata information
if schema_only is True. | def read_table_parquet(
input, include_names=None, exclude_names=None, schema_only=False, filters=None
):
"""
Read a Table object from a Parquet file.
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
The ``filters`` parameter consists of predicates that are expressed
in disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``.
DNF allows arbitrary boolean logical combinations of single column
predicates. The innermost tuples each describe a single column predicate.
The list of inner predicates is interpreted as a conjunction (AND),
forming a more selective and multiple column predicate. Finally, the most
outer list combines these filters as a disjunction (OR).
Predicates may also be passed as List[Tuple]. This form is interpreted
as a single conjunction. To express OR in predicates, one must
use the (preferred) List[List[Tuple]] notation.
Each tuple has format: (``key``, ``op``, ``value``) and compares the
``key`` with the ``value``.
The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
``value`` must be a collection such as a ``list``, a ``set`` or a
``tuple``.
For example:
.. code-block:: python
('x', '=', 0)
('y', 'in', ['a', 'b', 'c'])
('z', 'not in', {'a','b'})
Parameters
----------
input : str or path-like or file-like object
If a string or path-like object, the filename to read the table from.
If a file-like object, the stream to read data.
include_names : list [str], optional
List of names to include in output. If not supplied, then
include all columns.
exclude_names : list [str], optional
List of names to exclude from output (applied after ``include_names``).
If not supplied then no columns are excluded.
schema_only : bool, optional
Only read the schema/metadata with table information.
filters : list [tuple] or list [list [tuple] ] or None, optional
Rows which do not match the filter predicate will be removed from
scanned data. See `pyarrow.parquet.read_table()` for details.
Returns
-------
table : `~astropy.table.Table`
Table will have zero rows and only metadata information
if schema_only is True.
"""
pa, parquet, _ = get_pyarrow()
if not isinstance(input, (str, os.PathLike)):
# The 'read' attribute is the key component of a generic
# file-like object.
if not hasattr(input, "read"):
raise TypeError("pyarrow can only open path-like or file-like objects.")
schema = parquet.read_schema(input)
# Pyarrow stores all metadata as byte-strings, so we convert
# to UTF-8 strings here.
if schema.metadata is not None:
md = {k.decode("UTF-8"): v.decode("UTF-8") for k, v in schema.metadata.items()}
else:
md = {}
from astropy.table import Column, Table, meta, serialize
# parse metadata from table yaml
meta_dict = {}
if "table_meta_yaml" in md:
meta_yaml = md.pop("table_meta_yaml").split("\n")
meta_hdr = meta.get_header_from_yaml(meta_yaml)
if "meta" in meta_hdr:
meta_dict = meta_hdr["meta"]
else:
meta_hdr = None
# parse and set serialized columns
full_table_columns = {name: name for name in schema.names}
has_serialized_columns = False
if "__serialized_columns__" in meta_dict:
has_serialized_columns = True
serialized_columns = meta_dict["__serialized_columns__"]
for scol in serialized_columns:
for name in _get_names(serialized_columns[scol]):
full_table_columns[name] = scol
use_names = set(full_table_columns.values())
# Apply include_names before exclude_names
if include_names is not None:
use_names.intersection_update(include_names)
if exclude_names is not None:
use_names.difference_update(exclude_names)
# Preserve column ordering via list, and use this dict trick
# to remove duplicates and preserve ordering (for mixin columns)
use_names = list(
dict.fromkeys([x for x in full_table_columns.values() if x in use_names])
)
# names_to_read is a list of actual serialized column names, where
# e.g. the requested name 'time' becomes ['time.jd1', 'time.jd2']
names_to_read = []
for name in use_names:
names = [n for n, col in full_table_columns.items() if name == col]
names_to_read.extend(names)
if full_table_columns and not names_to_read:
raise ValueError("No include_names specified were found in the table.")
# We need to pop any unread serialized columns out of the meta_dict.
if has_serialized_columns:
for scol in list(meta_dict["__serialized_columns__"].keys()):
if scol not in use_names:
meta_dict["__serialized_columns__"].pop(scol)
# whether to return the whole table or a formatted empty table.
if not schema_only:
# Read the pyarrow table, specifying columns and filters.
pa_table = parquet.read_table(input, columns=names_to_read, filters=filters)
num_rows = pa_table.num_rows
else:
num_rows = 0
# Determine numpy/astropy types of columns from the arrow table.
dtype = []
for name in names_to_read:
t = schema.field(name).type
shape = None
if isinstance(t, pa.FixedSizeListType):
# The FixedSizeListType has an arrow value_type and a size.
value_type = t.value_type
shape = (t.list_size,)
elif isinstance(t, pa.ListType):
# The ListType (variable length arrays) has a value type.
value_type = t.value_type
else:
# All other arrow column types are the value_type.
value_type = t
if value_type not in (pa.string(), pa.binary()):
# Convert the pyarrow value type into a numpy dtype (which is returned
# by the to_pandas_type() method).
# If this is an array column, the numpy dtype needs the shape as well.
if shape is None:
dtype.append(value_type.to_pandas_dtype())
else:
dtype.append((value_type.to_pandas_dtype(), shape))
continue
# Special-case for string and binary columns
md_name = f"table::len::{name}"
if md_name in md:
# String/bytes length from header.
strlen = int(md[md_name])
elif schema_only: # Find the maximum string length.
# Choose an arbitrary string length since
# are not reading in the table.
strlen = 10
warnings.warn(
f"No {md_name} found in metadata. Guessing {{strlen}} for schema.",
AstropyUserWarning,
)
else:
strlen = max(len(row.as_py()) for row in pa_table[name])
warnings.warn(
f"No {md_name} found in metadata. Using longest string"
f" ({strlen} characters).",
AstropyUserWarning,
)
strname = f"U{strlen}" if value_type == pa.string() else f"|S{strlen}"
# If this is an array column, the numpy dtype needs the shape as well.
if shape is None:
dtype.append(strname)
else:
dtype.append((strname, shape))
if schema_only:
# If we only need the schema, create an empty table with the correct dtype.
data = np.zeros(0, dtype=list(zip(names_to_read, dtype)))
table = Table(data=data, meta=meta_dict)
else:
# If we need the full table, create the table and add the columns
# one at a time. This minimizes data copying.
table = Table(meta=meta_dict)
for name, dt in zip(names_to_read, dtype):
# First convert the arrow column to a numpy array.
col = pa_table[name].to_numpy()
t = schema.field(name).type
if t in (pa.string(), pa.binary()):
# If it is a string/binary type, coerce it to the correct type.
col = col.astype(dt)
elif isinstance(t, pa.FixedSizeListType):
# If it is a FixedSizeListType (array column) then it needs to
# be broken into a 2D array, but only if the table has a non-zero
# length.
if len(col) > 0:
col = np.stack(col)
if t.value_type in (pa.string(), pa.binary()):
# If it is a string/binary type, coerce it to the
# correct type.
# The conversion dtype is only the first element
# in the dtype tuple.
col = col.astype(dt[0])
else:
# This is an empty column, and needs to be created with the
# correct type.
col = np.zeros(0, dtype=dt)
elif isinstance(t, pa.ListType):
# If we have a variable length string/binary column,
# we need to convert each row to the proper type.
if t.value_type in (pa.string(), pa.binary()):
col = np.array([row.astype(dt) for row in col], dtype=np.object_)
table.add_column(Column(name=name, data=col))
if meta_hdr is not None:
# Set description, format, unit, meta from the column
# metadata that was serialized with the table.
header_cols = {x["name"]: x for x in meta_hdr["datatype"]}
for col in table.columns.values():
for attr in ("description", "format", "unit", "meta"):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Convert all compound columns to astropy objects
# (e.g. time.jd1, time.jd2 into a single time column)
table = serialize._construct_mixins_from_columns(table)
return table |
Write a Table object to a Parquet file.
The parquet writer supports tables with regular columns, fixed-size array
columns, and variable-length array columns (provided all arrays have the
same type).
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or path-like
The filename to write the table to.
overwrite : bool, optional
Whether to overwrite any existing file without warning. Default `False`.
Notes
-----
Tables written with array columns (fixed-size or variable-length) cannot
be read with pandas.
Raises
------
ValueError
If one of the columns has a mixed-type variable-length array, or
if it is a zero-length table and any of the columns are variable-length
arrays. | def write_table_parquet(table, output, overwrite=False):
"""
Write a Table object to a Parquet file.
The parquet writer supports tables with regular columns, fixed-size array
columns, and variable-length array columns (provided all arrays have the
same type).
This requires `pyarrow <https://arrow.apache.org/docs/python/>`_
to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or path-like
The filename to write the table to.
overwrite : bool, optional
Whether to overwrite any existing file without warning. Default `False`.
Notes
-----
Tables written with array columns (fixed-size or variable-length) cannot
be read with pandas.
Raises
------
ValueError
If one of the columns has a mixed-type variable-length array, or
if it is a zero-length table and any of the columns are variable-length
arrays.
"""
from astropy.table import meta, serialize
from astropy.utils.data_info import serialize_context_as
pa, parquet, writer_version = get_pyarrow()
if not isinstance(output, (str, os.PathLike)):
raise TypeError(f"`output` should be a string or path-like, not {output}")
# Convert all compound columns into serialized column names, where
# e.g. 'time' becomes ['time.jd1', 'time.jd2'].
with serialize_context_as("parquet"):
encode_table = serialize.represent_mixins_as_columns(table)
# We store the encoded serialization metadata as a yaml string.
meta_yaml = meta.get_yaml_from_table(encode_table)
meta_yaml_str = "\n".join(meta_yaml)
# Build the pyarrow schema by converting from the numpy dtype of each
# column to an equivalent pyarrow type with from_numpy_dtype()
type_list = []
for name in encode_table.dtype.names:
dt = encode_table.dtype[name]
if dt.type == np.object_:
# If the column type is np.object_, then it should be a column
# of variable-length arrays. This can be serialized with parquet
# provided all of the elements have the same data-type.
# Additionally, if the table has no elements, we cannot deduce
# the datatype, and hence cannot serialize the table.
if len(encode_table) > 0:
obj_dtype = encode_table[name][0].dtype
# Check that the variable-length array all has the same type.
for row in encode_table[name]:
if row.dtype != obj_dtype:
raise ValueError(
f"Cannot serialize mixed-type column ({name}) with parquet."
)
# Calling pa.list_() creates a ListType which is an array of variable-
# length elements.
arrow_type = pa.list_(
value_type=pa.from_numpy_dtype(obj_dtype.type),
)
else:
raise ValueError(
"Cannot serialize zero-length table "
f"with object column ({name}) with parquet."
)
elif len(dt.shape) > 0:
# This column has a shape, and is an array type column. Calling
# pa.list_() with a list_size creates a FixedSizeListType, which
# is an array of fixed-length elements.
arrow_type = pa.list_(
value_type=pa.from_numpy_dtype(dt.subdtype[0].type),
list_size=np.prod(dt.shape),
)
else:
# This is a standard column.
arrow_type = pa.from_numpy_dtype(dt.type)
type_list.append((name, arrow_type))
metadata = {}
for name, col in encode_table.columns.items():
# Parquet will retain the datatypes of columns, but string and
# byte column length is lost. Therefore, we special-case these
# types to record the length for precise round-tripping.
t = col.dtype.type
itemsize = col.dtype.itemsize
if t is np.object_:
t = encode_table[name][0].dtype.type
if t == np.str_ or t == np.bytes_:
# We need to scan through all of them.
itemsize = -1
for row in encode_table[name]:
itemsize = max(itemsize, row.dtype.itemsize)
if t is np.str_:
metadata[f"table::len::{name}"] = str(itemsize // 4)
elif t is np.bytes_:
metadata[f"table::len::{name}"] = str(itemsize)
metadata["table_meta_yaml"] = meta_yaml_str
# Pyarrow stores all metadata as byte strings, so we explicitly encode
# our unicode strings in metadata as UTF-8 byte strings here.
metadata_encode = {
k.encode("UTF-8"): v.encode("UTF-8") for k, v in metadata.items()
}
schema = pa.schema(type_list, metadata=metadata_encode)
if os.path.exists(output):
if overwrite:
# We must remove the file prior to writing below.
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# We use version='2.0' for full support of datatypes including uint32.
with parquet.ParquetWriter(output, schema, version=writer_version) as writer:
# Convert each Table column to a pyarrow array
arrays = []
for name in encode_table.dtype.names:
dt = encode_table.dtype[name]
# Parquet must be stored little-endian. When we use astype(..., copy=False)
# we get a very fast conversion when the dtype is unchanged, and only
# incur a cost when we need to do a byte-swap operation.
dt_new = dt.newbyteorder("<")
if dt.type == np.object_:
# Turn the column into a list of numpy arrays.
val = [row.astype(dt_new, copy=False) for row in encode_table[name]]
elif len(dt.shape) > 0:
if len(encode_table) > 0:
val = np.split(
encode_table[name].ravel().astype(dt_new.base, copy=False),
len(encode_table),
)
else:
val = []
else:
val = encode_table[name].astype(dt_new, copy=False)
arrays.append(pa.array(val, type=schema.field(name).type))
# Create a pyarrow table from the list of arrays and the schema
pa_table = pa.Table.from_arrays(arrays, schema=schema)
# Write the pyarrow table to a file
writer.write_table(pa_table) |
Recursively find the names in a serialized column dictionary.
Parameters
----------
_dict : `dict`
Dictionary from astropy __serialized_columns__
Returns
-------
all_names : `list` [`str`]
All the column names mentioned in _dict and sub-dicts. | def _get_names(_dict):
"""Recursively find the names in a serialized column dictionary.
Parameters
----------
_dict : `dict`
Dictionary from astropy __serialized_columns__
Returns
-------
all_names : `list` [`str`]
All the column names mentioned in _dict and sub-dicts.
"""
all_names = []
for k, v in _dict.items():
if isinstance(v, dict):
all_names.extend(_get_names(v))
elif k == "name":
all_names.append(v)
return all_names |
Register Parquet with Unified I/O. | def register_parquet():
"""
Register Parquet with Unified I/O.
"""
from astropy.io import registry as io_registry
from astropy.table import Table
io_registry.register_reader("parquet", Table, read_table_parquet)
io_registry.register_writer("parquet", Table, write_table_parquet)
io_registry.register_identifier("parquet", Table, parquet_identify) |
Unpickle pickled objects from a specified file and return the contents.
.. warning:: The ``pickle`` module is not secure. Only unpickle data you trust.
Parameters
----------
fileorname : str or file-like
The file name or file from which to unpickle objects. If a file object,
it should have been opened in binary mode.
number : int
If 0, a single object will be returned (the first in the file). If >0,
this specifies the number of objects to be unpickled, and a list will
be returned with exactly that many objects. If <0, all objects in the
file will be unpickled and returned as a list.
Raises
------
EOFError
If ``number`` is >0 and there are fewer than ``number`` objects in the
pickled file.
Returns
-------
contents : object or list
If ``number`` is 0, this is a individual object - the first one
unpickled from the file. Otherwise, it is a list of objects unpickled
from the file. | def fnunpickle(fileorname, number=0):
"""Unpickle pickled objects from a specified file and return the contents.
.. warning:: The ``pickle`` module is not secure. Only unpickle data you trust.
Parameters
----------
fileorname : str or file-like
The file name or file from which to unpickle objects. If a file object,
it should have been opened in binary mode.
number : int
If 0, a single object will be returned (the first in the file). If >0,
this specifies the number of objects to be unpickled, and a list will
be returned with exactly that many objects. If <0, all objects in the
file will be unpickled and returned as a list.
Raises
------
EOFError
If ``number`` is >0 and there are fewer than ``number`` objects in the
pickled file.
Returns
-------
contents : object or list
If ``number`` is 0, this is a individual object - the first one
unpickled from the file. Otherwise, it is a list of objects unpickled
from the file.
"""
if isinstance(fileorname, str):
f = open(fileorname, "rb")
close = True
else:
f = fileorname
close = False
try:
if number > 0: # get that number
res = []
for i in range(number):
res.append(pickle.load(f))
elif number < 0: # get all objects
res = []
eof = False
while not eof:
try:
res.append(pickle.load(f))
except EOFError:
eof = True
else: # number==0
res = pickle.load(f)
finally:
if close:
f.close()
return res |
Pickle an object to a specified file.
Parameters
----------
object
The python object to pickle.
fileorname : str or file-like
The filename or file into which the `object` should be pickled. If a
file object, it should have been opened in binary mode.
protocol : int or None
Pickle protocol to use - see the :mod:`pickle` module for details on
these options. If None, the most recent protocol will be used.
append : bool
If True, the object is appended to the end of the file, otherwise the
file will be overwritten (if a file object is given instead of a
file name, this has no effect). | def fnpickle(object, fileorname, protocol=None, append=False):
"""Pickle an object to a specified file.
Parameters
----------
object
The python object to pickle.
fileorname : str or file-like
The filename or file into which the `object` should be pickled. If a
file object, it should have been opened in binary mode.
protocol : int or None
Pickle protocol to use - see the :mod:`pickle` module for details on
these options. If None, the most recent protocol will be used.
append : bool
If True, the object is appended to the end of the file, otherwise the
file will be overwritten (if a file object is given instead of a
file name, this has no effect).
"""
if protocol is None:
protocol = pickle.HIGHEST_PROTOCOL
if isinstance(fileorname, str):
f = open(fileorname, "ab" if append else "wb")
close = True
else:
f = fileorname
close = False
try:
pickle.dump(object, f, protocol=protocol)
finally:
if close:
f.close() |
Parse the first YAML document in a stream using the AstropyLoader and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document | def load(stream):
"""Parse the first YAML document in a stream using the AstropyLoader and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document
"""
return yaml.load(stream, Loader=AstropyLoader) |
Parse the all YAML documents in a stream using the AstropyLoader class and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document | def load_all(stream):
"""Parse the all YAML documents in a stream using the AstropyLoader class and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like
YAML input
Returns
-------
obj : object
Object corresponding to YAML document
"""
return yaml.load_all(stream, Loader=AstropyLoader) |
Serialize a Python object into a YAML stream using the AstropyDumper class.
If stream is None, return the produced string instead.
Parameters
----------
data : object
Object to serialize to YAML
stream : file-like, optional
YAML output (if not supplied a string is returned)
**kwargs
Other keyword arguments that get passed to yaml.dump()
Returns
-------
out : str or None
If no ``stream`` is supplied then YAML output is returned as str | def dump(data, stream=None, **kwargs):
"""Serialize a Python object into a YAML stream using the AstropyDumper class.
If stream is None, return the produced string instead.
Parameters
----------
data : object
Object to serialize to YAML
stream : file-like, optional
YAML output (if not supplied a string is returned)
**kwargs
Other keyword arguments that get passed to yaml.dump()
Returns
-------
out : str or None
If no ``stream`` is supplied then YAML output is returned as str
"""
kwargs["Dumper"] = AstropyDumper
kwargs.setdefault("default_flow_style", None)
return yaml.dump(data, stream=stream, **kwargs) |
Subsets and Splits