hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
4ddb2d52aae7cc995cf343dee5093df18061d966a122e84a35a6e101cc522b1c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import numpy as np
import pytest
from astropy import units as u
from astropy.nddata.decorators import support_nddata
from astropy.nddata.nddata import NDData
from astropy.utils.exceptions import AstropyUserWarning
from astropy.wcs import WCS
class CCDData(NDData):
pass
@support_nddata
def wrapped_function_1(data, wcs=None, unit=None):
return data, wcs, unit
def test_pass_numpy():
data_in = np.array([1, 2, 3])
data_out, wcs_out, unit_out = wrapped_function_1(data=data_in)
assert data_out is data_in
assert wcs_out is None
assert unit_out is None
def test_pass_all_separate():
data_in = np.array([1, 2, 3])
wcs_in = WCS(naxis=1)
unit_in = u.Jy
data_out, wcs_out, unit_out = wrapped_function_1(
data=data_in, wcs=wcs_in, unit=unit_in
)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_pass_nddata():
data_in = np.array([1, 2, 3])
wcs_in = WCS(naxis=1)
unit_in = u.Jy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in)
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_pass_nddata_and_explicit():
data_in = np.array([1, 2, 3])
wcs_in = WCS(naxis=1)
unit_in = u.Jy
unit_in_alt = u.mJy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in)
with pytest.warns(
AstropyUserWarning,
match=(
"Property unit has been passed explicitly and as "
"an NDData property, using explicitly specified value"
),
) as w:
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in, unit=unit_in_alt)
assert len(w) == 1
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in_alt
def test_pass_nddata_ignored():
data_in = np.array([1, 2, 3])
wcs_in = WCS(naxis=1)
unit_in = u.Jy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in, mask=[0, 1, 0])
with pytest.warns(
AstropyUserWarning,
match=(
"The following attributes were set on the data "
"object, but will be ignored by the function: mask"
),
) as w:
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in)
assert len(w) == 1
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_incorrect_first_argument():
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_2(something, wcs=None, unit=None):
pass
assert (
exc.value.args[0]
== "Can only wrap functions whose first positional argument is `data`"
)
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_3(something, data, wcs=None, unit=None):
pass
assert (
exc.value.args[0]
== "Can only wrap functions whose first positional argument is `data`"
)
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_4(wcs=None, unit=None):
pass
assert (
exc.value.args[0]
== "Can only wrap functions whose first positional argument is `data`"
)
def test_wrap_function_no_kwargs():
@support_nddata
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
assert wrapped_function_5(nddata_in, [1, 2, 3]) is data_in
def test_wrap_function_repack_valid():
@support_nddata(repack=True, returns=["data"])
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
nddata_out = wrapped_function_5(nddata_in, [1, 2, 3])
assert isinstance(nddata_out, NDData)
assert nddata_out.data is data_in
def test_wrap_function_accepts():
class MyData(NDData):
pass
@support_nddata(accepts=MyData)
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
mydata_in = MyData(data_in)
assert wrapped_function_5(mydata_in, [1, 2, 3]) is data_in
with pytest.raises(
TypeError,
match=(
"Only NDData sub-classes that inherit "
"from MyData can be used by this function"
),
):
wrapped_function_5(nddata_in, [1, 2, 3])
def test_wrap_preserve_signature_docstring():
@support_nddata
def wrapped_function_6(data, wcs=None, unit=None):
"""
An awesome function
"""
pass
if wrapped_function_6.__doc__ is not None:
assert wrapped_function_6.__doc__.strip() == "An awesome function"
signature = inspect.signature(wrapped_function_6)
assert str(signature) == "(data, wcs=None, unit=None)"
def test_setup_failures1():
# repack but no returns
with pytest.raises(ValueError):
support_nddata(repack=True)
def test_setup_failures2():
# returns but no repack
with pytest.raises(ValueError):
support_nddata(returns=["data"])
def test_setup_failures9():
# keeps but no repack
with pytest.raises(ValueError):
support_nddata(keeps=["unit"])
def test_setup_failures3():
# same attribute in keeps and returns
with pytest.raises(ValueError):
support_nddata(repack=True, keeps=["mask"], returns=["data", "mask"])
def test_setup_failures4():
# function accepts *args
with pytest.raises(ValueError):
@support_nddata
def test(data, *args):
pass
def test_setup_failures10():
# function accepts **kwargs
with pytest.raises(ValueError):
@support_nddata
def test(data, **kwargs):
pass
def test_setup_failures5():
# function accepts *args (or **kwargs)
with pytest.raises(ValueError):
@support_nddata
def test(data, *args):
pass
def test_setup_failures6():
# First argument is not data
with pytest.raises(ValueError):
@support_nddata
def test(img):
pass
def test_setup_failures7():
# accepts CCDData but was given just an NDData
with pytest.raises(TypeError):
@support_nddata(accepts=CCDData)
def test(data):
pass
test(NDData(np.ones((3, 3))))
def test_setup_failures8():
# function returns a different amount of arguments than specified. Using
# NDData here so we don't get into troubles when creating a CCDData without
# unit!
with pytest.raises(ValueError):
@support_nddata(repack=True, returns=["data", "mask"])
def test(data):
return 10
test(NDData(np.ones((3, 3)))) # do NOT use CCDData here.
def test_setup_failures11():
# function accepts no arguments
with pytest.raises(ValueError):
@support_nddata
def test():
pass
def test_setup_numpyarray_default():
# It should be possible (even if it's not advisable to use mutable
# defaults) to have a numpy array as default value.
@support_nddata
def func(data, wcs=np.array([1, 2, 3])):
return wcs
def test_still_accepts_other_input():
@support_nddata(repack=True, returns=["data"])
def test(data):
return data
assert isinstance(test(NDData(np.ones((3, 3)))), NDData)
assert isinstance(test(10), int)
assert isinstance(test([1, 2, 3]), list)
def test_accepting_property_normal():
# Accepts a mask attribute and takes it from the input
@support_nddata
def test(data, mask=None):
return mask
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._mask = np.zeros((3, 3))
assert np.all(test(ndd) == 0)
# Use the explicitly given one (raises a Warning)
with pytest.warns(AstropyUserWarning) as w:
assert test(ndd, mask=10) == 10
assert len(w) == 1
def test_parameter_default_identical_to_explicit_passed_argument():
# If the default is identical to the explicitly passed argument this
# should still raise a Warning and use the explicit one.
@support_nddata
def func(data, meta={"a": 1}):
return meta
with pytest.warns(AstropyUserWarning) as w:
assert func(NDData(1, meta={"b": 2}), {"a": 1}) == {"a": 1}
assert len(w) == 1
assert func(NDData(1, meta={"b": 2})) == {"b": 2}
def test_accepting_property_notexist():
# Accepts flags attribute but NDData doesn't have one
@support_nddata
def test(data, flags=10):
return flags
ndd = NDData(np.ones((3, 3)))
test(ndd)
def test_accepting_property_translated():
# Accepts a error attribute and we want to pass in uncertainty!
@support_nddata(mask="masked")
def test(data, masked=None):
return masked
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._mask = np.zeros((3, 3))
assert np.all(test(ndd) == 0)
# Use the explicitly given one (raises a Warning)
with pytest.warns(AstropyUserWarning) as w:
assert test(ndd, masked=10) == 10
assert len(w) == 1
def test_accepting_property_meta_empty():
# Meta is always set (OrderedDict) so it has a special case that it's
# ignored if it's empty but not None
@support_nddata
def test(data, meta=None):
return meta
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._meta = {"a": 10}
assert test(ndd) == {"a": 10}
|
a9ca8ae60fc24d04a2bcc9623a25c6224ab2778eb89625614cf83b7230b0bcdb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.nddata.ccddata import CCDData
from astropy.nddata.compat import NDDataArray
from astropy.nddata.nddata import NDData
from astropy.nddata.nduncertainty import (
IncompatibleUncertaintiesException,
InverseVariance,
MissingDataAssociationException,
NDUncertainty,
StdDevUncertainty,
UnknownUncertainty,
VarianceUncertainty,
)
# Regarding setter tests:
# No need to test setters since the uncertainty is considered immutable after
# creation except of the parent_nddata attribute and this accepts just
# everything.
# Additionally they should be covered by NDData, NDArithmeticMixin which rely
# on it
# Regarding propagate, _convert_uncert, _propagate_* tests:
# They should be covered by NDArithmeticMixin since there is generally no need
# to test them without this mixin.
# Regarding __getitem__ tests:
# Should be covered by NDSlicingMixin.
# Regarding StdDevUncertainty tests:
# This subclass only overrides the methods for propagation so the same
# they should be covered in NDArithmeticMixin.
# Not really fake but the minimum an uncertainty has to override not to be
# abstract.
class FakeUncertainty(NDUncertainty):
@property
def uncertainty_type(self):
return "fake"
def _data_unit_to_uncertainty_unit(self, value):
return None
def _propagate_add(self, data, final_data):
pass
def _propagate_subtract(self, data, final_data):
pass
def _propagate_multiply(self, data, final_data):
pass
def _propagate_divide(self, data, final_data):
pass
# Test the fake (added also StdDevUncertainty which should behave identical)
# the list of classes used for parametrization in tests below
uncertainty_types_to_be_tested = [
FakeUncertainty,
StdDevUncertainty,
VarianceUncertainty,
InverseVariance,
UnknownUncertainty,
]
uncertainty_types_with_conversion_support = (
StdDevUncertainty,
VarianceUncertainty,
InverseVariance,
)
uncertainty_types_without_conversion_support = (FakeUncertainty, UnknownUncertainty)
@pytest.mark.parametrize("UncertClass", uncertainty_types_to_be_tested)
def test_init_fake_with_list(UncertClass):
fake_uncert = UncertClass([1, 2, 3])
assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))
# Copy makes no difference since casting a list to an np.ndarray always
# makes a copy.
# But let's give the uncertainty a unit too
fake_uncert = UncertClass([1, 2, 3], unit=u.adu)
assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))
assert fake_uncert.unit is u.adu
@pytest.mark.parametrize("UncertClass", uncertainty_types_to_be_tested)
def test_init_fake_with_ndarray(UncertClass):
uncert = np.arange(100).reshape(10, 10)
fake_uncert = UncertClass(uncert)
# Numpy Arrays are copied by default
assert_array_equal(fake_uncert.array, uncert)
assert fake_uncert.array is not uncert
# Now try it without copy
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array is uncert
# let's provide a unit
fake_uncert = UncertClass(uncert, unit=u.adu)
assert_array_equal(fake_uncert.array, uncert)
assert fake_uncert.array is not uncert
assert fake_uncert.unit is u.adu
@pytest.mark.parametrize("UncertClass", uncertainty_types_to_be_tested)
def test_init_fake_with_quantity(UncertClass):
uncert = np.arange(10).reshape(2, 5) * u.adu
fake_uncert = UncertClass(uncert)
# Numpy Arrays are copied by default
assert_array_equal(fake_uncert.array, uncert.value)
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.adu
# Try without copy (should not work, quantity.value always returns a copy)
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.adu
# Now try with an explicit unit parameter too
fake_uncert = UncertClass(uncert, unit=u.m)
assert_array_equal(fake_uncert.array, uncert.value) # No conversion done
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.m # It took the explicit one
@pytest.mark.parametrize("UncertClass", uncertainty_types_to_be_tested)
def test_init_fake_with_fake(UncertClass):
uncert = np.arange(5).reshape(5, 1)
fake_uncert1 = UncertClass(uncert)
fake_uncert2 = UncertClass(fake_uncert1)
assert_array_equal(fake_uncert2.array, uncert)
assert fake_uncert2.array is not uncert
# Without making copies
fake_uncert1 = UncertClass(uncert, copy=False)
fake_uncert2 = UncertClass(fake_uncert1, copy=False)
assert_array_equal(fake_uncert2.array, fake_uncert1.array)
assert fake_uncert2.array is fake_uncert1.array
# With a unit
uncert = np.arange(5).reshape(5, 1) * u.adu
fake_uncert1 = UncertClass(uncert)
fake_uncert2 = UncertClass(fake_uncert1)
assert_array_equal(fake_uncert2.array, uncert.value)
assert fake_uncert2.array is not uncert.value
assert fake_uncert2.unit is u.adu
# With a unit and an explicit unit-parameter
fake_uncert2 = UncertClass(fake_uncert1, unit=u.cm)
assert_array_equal(fake_uncert2.array, uncert.value)
assert fake_uncert2.array is not uncert.value
assert fake_uncert2.unit is u.cm
@pytest.mark.parametrize("UncertClass", uncertainty_types_to_be_tested)
def test_init_fake_with_somethingElse(UncertClass):
# What about a dict?
uncert = {"rdnoise": 2.9, "gain": 0.6}
fake_uncert = UncertClass(uncert)
assert fake_uncert.array == uncert
# We can pass a unit too but since we cannot do uncertainty propagation
# the interpretation is up to the user
fake_uncert = UncertClass(uncert, unit=u.s)
assert fake_uncert.array == uncert
assert fake_uncert.unit is u.s
# So, now check what happens if copy is False
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array == uncert
assert id(fake_uncert) != id(uncert)
# dicts cannot be referenced without copy
# TODO : Find something that can be referenced without copy :-)
def test_init_fake_with_StdDevUncertainty():
# Different instances of uncertainties are not directly convertible so this
# should fail
uncert = np.arange(5).reshape(5, 1)
std_uncert = StdDevUncertainty(uncert)
with pytest.raises(IncompatibleUncertaintiesException):
FakeUncertainty(std_uncert)
# Ok try it the other way around
fake_uncert = FakeUncertainty(uncert)
with pytest.raises(IncompatibleUncertaintiesException):
StdDevUncertainty(fake_uncert)
def test_uncertainty_type():
fake_uncert = FakeUncertainty([10, 2])
assert fake_uncert.uncertainty_type == "fake"
std_uncert = StdDevUncertainty([10, 2])
assert std_uncert.uncertainty_type == "std"
var_uncert = VarianceUncertainty([10, 2])
assert var_uncert.uncertainty_type == "var"
ivar_uncert = InverseVariance([10, 2])
assert ivar_uncert.uncertainty_type == "ivar"
def test_uncertainty_correlated():
fake_uncert = FakeUncertainty([10, 2])
assert not fake_uncert.supports_correlated
std_uncert = StdDevUncertainty([10, 2])
assert std_uncert.supports_correlated
def test_for_leak_with_uncertainty():
# Regression test for memory leak because of cyclic references between
# NDData and uncertainty
from collections import defaultdict
from gc import get_objects
def test_leak(func, specific_objects=None):
"""Function based on gc.get_objects to determine if any object or
a specific object leaks.
It requires a function to be given and if any objects survive the
function scope it's considered a leak (so don't return anything).
"""
before = defaultdict(int)
for i in get_objects():
before[type(i)] += 1
func()
after = defaultdict(int)
for i in get_objects():
after[type(i)] += 1
if specific_objects is None:
assert all(after[k] - before[k] == 0 for k in after)
else:
assert after[specific_objects] - before[specific_objects] == 0
def non_leaker_nddata():
# Without uncertainty there is no reason to assume that there is a
# memory leak but test it nevertheless.
NDData(np.ones(100))
def leaker_nddata():
# With uncertainty there was a memory leak!
NDData(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))
test_leak(non_leaker_nddata, NDData)
test_leak(leaker_nddata, NDData)
# Same for NDDataArray:
from astropy.nddata.compat import NDDataArray
def non_leaker_nddataarray():
NDDataArray(np.ones(100))
def leaker_nddataarray():
NDDataArray(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))
test_leak(non_leaker_nddataarray, NDDataArray)
test_leak(leaker_nddataarray, NDDataArray)
def test_for_stolen_uncertainty():
# Sharing uncertainties should not overwrite the parent_nddata attribute
ndd1 = NDData(1, uncertainty=1)
ndd2 = NDData(2, uncertainty=ndd1.uncertainty)
# uncertainty.parent_nddata.data should be the original data!
assert ndd1.uncertainty.parent_nddata.data == ndd1.data
assert ndd2.uncertainty.parent_nddata.data == ndd2.data
def test_stddevuncertainty_pickle():
uncertainty = StdDevUncertainty(np.ones(3), unit=u.m)
uncertainty_restored = pickle.loads(pickle.dumps(uncertainty))
np.testing.assert_array_equal(uncertainty.array, uncertainty_restored.array)
assert uncertainty.unit == uncertainty_restored.unit
with pytest.raises(MissingDataAssociationException):
uncertainty_restored.parent_nddata
@pytest.mark.parametrize("UncertClass", uncertainty_types_to_be_tested)
def test_quantity(UncertClass):
fake_uncert = UncertClass([1, 2, 3], unit=u.adu)
assert isinstance(fake_uncert.quantity, u.Quantity)
assert fake_uncert.quantity.unit.is_equivalent(u.adu)
fake_uncert_nounit = UncertClass([1, 2, 3])
assert isinstance(fake_uncert_nounit.quantity, u.Quantity)
assert fake_uncert_nounit.quantity.unit.is_equivalent(u.dimensionless_unscaled)
@pytest.mark.parametrize(
"UncertClass", [VarianceUncertainty, StdDevUncertainty, InverseVariance]
)
def test_setting_uncertainty_unit_results_in_unit_object(UncertClass):
v = UncertClass([1, 1])
v.unit = "electron"
assert isinstance(v.unit, u.UnitBase)
@pytest.mark.parametrize("NDClass", [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(
"UncertClass", [VarianceUncertainty, StdDevUncertainty, InverseVariance]
)
def test_changing_unit_to_value_inconsistent_with_parent_fails(NDClass, UncertClass):
ndd1 = NDClass(1, unit="adu")
v = UncertClass(1)
# Sets the uncertainty unit to whatever makes sense with this data.
ndd1.uncertainty = v
with pytest.raises(u.UnitConversionError):
# Nothing special about 15 except no one would ever use that unit
v.unit = ndd1.unit**15
@pytest.mark.parametrize("NDClass", [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(
"UncertClass, expected_unit",
[
(VarianceUncertainty, u.adu**2),
(StdDevUncertainty, u.adu),
(InverseVariance, 1 / u.adu**2),
],
)
def test_assigning_uncertainty_to_parent_gives_correct_unit(
NDClass, UncertClass, expected_unit
):
# Does assigning a unitless uncertainty to an NDData result in the
# expected unit?
ndd = NDClass([1, 1], unit=u.adu)
v = UncertClass([1, 1])
ndd.uncertainty = v
assert v.unit == expected_unit
@pytest.mark.parametrize("NDClass", [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(
"UncertClass, expected_unit",
[
(VarianceUncertainty, u.adu**2),
(StdDevUncertainty, u.adu),
(InverseVariance, 1 / u.adu**2),
],
)
def test_assigning_uncertainty_with_unit_to_parent_with_unit(
NDClass, UncertClass, expected_unit
):
# Does assigning an uncertainty with an appropriate unit to an NDData
# with a unit work?
ndd = NDClass([1, 1], unit=u.adu)
v = UncertClass([1, 1], unit=expected_unit)
ndd.uncertainty = v
assert v.unit == expected_unit
@pytest.mark.parametrize("NDClass", [NDData, NDDataArray, CCDData])
@pytest.mark.parametrize(
"UncertClass", [(VarianceUncertainty), (StdDevUncertainty), (InverseVariance)]
)
def test_assigning_uncertainty_with_bad_unit_to_parent_fails(NDClass, UncertClass):
# Does assigning an uncertainty with a non-matching unit to an NDData
# with a unit work?
ndd = NDClass([1, 1], unit=u.adu)
# Set the unit to something inconsistent with ndd's unit
v = UncertClass([1, 1], unit=u.second)
with pytest.raises(u.UnitConversionError):
ndd.uncertainty = v
@pytest.mark.parametrize("UncertClass", uncertainty_types_with_conversion_support)
def test_self_conversion_via_variance_supported(UncertClass):
uncert = np.arange(1, 11).reshape(2, 5) * u.adu
start_uncert = UncertClass(uncert)
final_uncert = start_uncert.represent_as(UncertClass)
assert_array_equal(start_uncert.array, final_uncert.array)
assert start_uncert.unit == final_uncert.unit
@pytest.mark.parametrize(
"UncertClass,to_variance_func",
zip(
uncertainty_types_with_conversion_support,
(lambda x: x**2, lambda x: x, lambda x: 1 / x),
),
)
def test_conversion_to_from_variance_supported(UncertClass, to_variance_func):
uncert = np.arange(1, 11).reshape(2, 5) * u.adu
start_uncert = UncertClass(uncert)
var_uncert = start_uncert.represent_as(VarianceUncertainty)
final_uncert = var_uncert.represent_as(UncertClass)
assert_allclose(to_variance_func(start_uncert.array), var_uncert.array)
assert_array_equal(start_uncert.array, final_uncert.array)
assert start_uncert.unit == final_uncert.unit
@pytest.mark.parametrize("UncertClass", uncertainty_types_without_conversion_support)
def test_self_conversion_via_variance_not_supported(UncertClass):
uncert = np.arange(1, 11).reshape(2, 5) * u.adu
start_uncert = UncertClass(uncert)
with pytest.raises(TypeError):
final_uncert = start_uncert.represent_as(UncertClass)
|
92990bb28caccc4af38be63287533bee4c553bc5f88cadc0be866d986f5e9a93 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.nddata import NDData, NDSlicingMixin
from astropy.nddata import _testing as nd_testing
from astropy.nddata.nduncertainty import NDUncertainty, StdDevUncertainty
# Just add the Mixin to NDData
# TODO: Make this use NDDataRef instead!
class NDDataSliceable(NDSlicingMixin, NDData):
pass
# Just some uncertainty (following the StdDevUncertainty implementation of
# storing the uncertainty in a property 'array') with slicing.
class SomeUncertainty(NDUncertainty):
@property
def uncertainty_type(self):
return "fake"
def _propagate_add(self, data, final_data):
pass
def _propagate_subtract(self, data, final_data):
pass
def _propagate_multiply(self, data, final_data):
pass
def _propagate_divide(self, data, final_data):
pass
def test_slicing_only_data():
data = np.arange(10)
nd = NDDataSliceable(data)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
def test_slicing_data_scalar_fail():
data = np.array(10)
nd = NDDataSliceable(data)
with pytest.raises(TypeError): # as exc
nd[:]
# assert exc.value.args[0] == 'Scalars cannot be sliced.'
def test_slicing_1ddata_ndslice():
data = np.array([10, 20])
nd = NDDataSliceable(data)
# Standard numpy warning here:
with pytest.raises(IndexError):
nd[:, :]
@pytest.mark.parametrize("prop_name", ["mask", "uncertainty"])
def test_slicing_1dmask_ndslice(prop_name):
# Data is 2d but mask/uncertainty only 1d so this should let the IndexError when
# slicing the mask rise to the user.
data = np.ones((3, 3))
kwarg = {prop_name: np.ones(3)}
nd = NDDataSliceable(data, **kwarg)
# Standard numpy warning here:
with pytest.raises(IndexError):
nd[:, :]
def test_slicing_all_npndarray_1d():
data = np.arange(10)
mask = data > 3
uncertainty = StdDevUncertainty(np.linspace(10, 20, 10))
naxis = 1
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
# Just to have them too
unit = u.s
meta = {"observer": "Brian"}
nd = NDDataSliceable(
data, mask=mask, uncertainty=uncertainty, wcs=wcs, unit=unit, meta=meta
)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5].array, nd2.uncertainty.array)
assert nd2.wcs.pixel_to_world(1) == nd.wcs.pixel_to_world(3)
assert unit is nd2.unit
assert meta == nd.meta
def test_slicing_all_npndarray_nd():
# See what happens for multidimensional properties
data = np.arange(1000).reshape(10, 10, 10)
mask = data > 3
uncertainty = np.linspace(10, 20, 1000).reshape(10, 10, 10)
naxis = 3
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
# Slice only 1D
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
# Slice 3D
nd2 = nd[2:5, :, 4:7]
assert_array_equal(data[2:5, :, 4:7], nd2.data)
assert_array_equal(mask[2:5, :, 4:7], nd2.mask)
assert_array_equal(uncertainty[2:5, :, 4:7], nd2.uncertainty.array)
assert nd2.wcs.pixel_to_world(1, 5, 1) == nd.wcs.pixel_to_world(5, 5, 3)
def test_slicing_all_npndarray_shape_diff():
data = np.arange(10)
mask = (data > 3)[0:9]
uncertainty = np.linspace(10, 20, 15)
naxis = 1
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
# All are sliced even if the shapes differ (no Info)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
assert nd2.wcs.pixel_to_world(1) == nd.wcs.pixel_to_world(3)
def test_slicing_all_something_wrong():
data = np.arange(10)
mask = [False] * 10
uncertainty = {"rdnoise": 2.9, "gain": 1.4}
naxis = 1
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[2:5]
# Sliced properties:
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
# Not sliced attributes (they will raise a Info nevertheless)
uncertainty is nd2.uncertainty
assert nd2.wcs.pixel_to_world(1) == nd.wcs.pixel_to_world(3)
def test_boolean_slicing():
data = np.arange(10)
mask = data.copy()
uncertainty = StdDevUncertainty(data.copy())
naxis = 1
wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[3] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
with pytest.raises(ValueError):
nd2 = nd[(nd.data >= 3) & (nd.data < 8)]
nd.wcs = None
nd2 = nd[(nd.data >= 3) & (nd.data < 8)]
assert_array_equal(data[3:8], nd2.data)
assert_array_equal(mask[3:8], nd2.mask)
|
a59087e0e371fc8db77af79c5a2c42fabe0076a2902da4e96446911bc1673755 | from astropy.nddata import NDData, NDDataRef, NDIOMixin
# Alias NDDataAllMixins in case this will be renamed ... :-)
NDDataIO = NDDataRef
def test_simple_write_read():
ndd = NDDataIO([1, 2, 3])
assert hasattr(ndd, "read")
assert hasattr(ndd, "write")
|
d7705549d877c51051f64fe6fe36f85210986c84a7aae0d503a04491bb1fa883 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from astropy import units as u
from astropy.nddata import NDDataRef
from astropy.nddata import _testing as nd_testing
from astropy.nddata.nduncertainty import (
IncompatibleUncertaintiesException,
InverseVariance,
StdDevUncertainty,
UnknownUncertainty,
VarianceUncertainty,
)
from astropy.units import Quantity, UnitsError
from astropy.utils.exceptions import AstropyUserWarning
from astropy.wcs import WCS
# Alias NDDataAllMixins in case this will be renamed ... :-)
NDDataArithmetic = NDDataRef
class StdDevUncertaintyUncorrelated(StdDevUncertainty):
@property
def supports_correlated(self):
return False
# Test with Data covers:
# scalars, 1D, 2D and 3D
# broadcasting between them
@pytest.mark.filterwarnings("ignore:divide by zero encountered.*")
@pytest.mark.parametrize(
("data1", "data2"),
[
(np.array(5), np.array(10)),
(np.array(5), np.arange(10)),
(np.array(5), np.arange(10).reshape(2, 5)),
(np.arange(10), np.ones(10) * 2),
(np.arange(10), np.ones((10, 10)) * 2),
(np.arange(10).reshape(2, 5), np.ones((2, 5)) * 3),
(np.arange(1000).reshape(20, 5, 10), np.ones((20, 5, 10)) * 3),
],
)
def test_arithmetics_data(data1, data2):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
# Addition
nd3 = nd1.add(nd2)
assert_array_equal(data1 + data2, nd3.data)
# Subtraction
nd4 = nd1.subtract(nd2)
assert_array_equal(data1 - data2, nd4.data)
# Multiplication
nd5 = nd1.multiply(nd2)
assert_array_equal(data1 * data2, nd5.data)
# Division
nd6 = nd1.divide(nd2)
assert_array_equal(data1 / data2, nd6.data)
for nd in [nd3, nd4, nd5, nd6]:
# Check that broadcasting worked as expected
if data1.ndim > data2.ndim:
assert data1.shape == nd.data.shape
else:
assert data2.shape == nd.data.shape
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Invalid arithmetic operations for data covering:
# not broadcastable data
def test_arithmetics_data_invalid():
nd1 = NDDataArithmetic([1, 2, 3])
nd2 = NDDataArithmetic([1, 2])
with pytest.raises(ValueError):
nd1.add(nd2)
# Test with Data and unit and covers:
# identical units (even dimensionless unscaled vs. no unit),
# equivalent units (such as meter and kilometer)
# equivalent composite units (such as m/s and km/h)
@pytest.mark.filterwarnings("ignore:divide by zero encountered.*")
@pytest.mark.parametrize(
("data1", "data2"),
[
(np.array(5) * u.s, np.array(10) * u.s),
(np.array(5) * u.s, np.arange(10) * u.h),
(np.array(5) * u.s, np.arange(10).reshape(2, 5) * u.min),
(np.arange(10) * u.m / u.s, np.ones(10) * 2 * u.km / u.s),
(np.arange(10) * u.m / u.s, np.ones((10, 10)) * 2 * u.m / u.h),
(np.arange(10).reshape(2, 5) * u.m / u.s, np.ones((2, 5)) * 3 * u.km / u.h),
(
np.arange(1000).reshape(20, 5, 10),
np.ones((20, 5, 10)) * 3 * u.dimensionless_unscaled,
),
(np.array(5), np.array(10) * u.s / u.h),
],
)
def test_arithmetics_data_unit_identical(data1, data2):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
# Addition
nd3 = nd1.add(nd2)
ref = data1 + data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd3.data)
assert nd3.unit == ref_unit
# Subtraction
nd4 = nd1.subtract(nd2)
ref = data1 - data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd4.data)
assert nd4.unit == ref_unit
# Multiplication
nd5 = nd1.multiply(nd2)
ref = data1 * data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd5.data)
assert nd5.unit == ref_unit
# Division
nd6 = nd1.divide(nd2)
ref = data1 / data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd6.data)
assert nd6.unit == ref_unit
for nd in [nd3, nd4, nd5, nd6]:
# Check that broadcasting worked as expected
if data1.ndim > data2.ndim:
assert data1.shape == nd.data.shape
else:
assert data2.shape == nd.data.shape
# Check all other attributes are not set
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Test with Data and unit and covers:
# not identical not convertible units
# one with unit (which is not dimensionless) and one without
@pytest.mark.parametrize(
("data1", "data2"),
[
(np.array(5) * u.s, np.array(10) * u.m),
(np.array(5) * u.Mpc, np.array(10) * u.km / u.s),
(np.array(5) * u.Mpc, np.array(10)),
(np.array(5), np.array(10) * u.s),
],
)
def test_arithmetics_data_unit_not_identical(data1, data2):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
# Addition should not be possible
with pytest.raises(UnitsError):
nd1.add(nd2)
# Subtraction should not be possible
with pytest.raises(UnitsError):
nd1.subtract(nd2)
# Multiplication is possible
nd3 = nd1.multiply(nd2)
ref = data1 * data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd3.data)
assert nd3.unit == ref_unit
# Division is possible
nd4 = nd1.divide(nd2)
ref = data1 / data2
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd4.data)
assert nd4.unit == ref_unit
for nd in [nd3, nd4]:
# Check all other attributes are not set
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Tests with wcs (not very sensible because there is no operation between them
# covering:
# both set and identical/not identical
# one set
# None set
@pytest.mark.parametrize(
("wcs1", "wcs2"),
[
(None, None),
(None, WCS(naxis=2)),
(WCS(naxis=2), None),
nd_testing.create_two_equal_wcs(naxis=2),
nd_testing.create_two_unequal_wcs(naxis=2),
],
)
def test_arithmetics_data_wcs(wcs1, wcs2):
nd1 = NDDataArithmetic(1, wcs=wcs1)
nd2 = NDDataArithmetic(1, wcs=wcs2)
if wcs1 is None and wcs2 is None:
ref_wcs = None
elif wcs1 is None:
ref_wcs = wcs2
elif wcs2 is None:
ref_wcs = wcs1
else:
ref_wcs = wcs1
# Addition
nd3 = nd1.add(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd3.wcs)
# Subtraction
nd4 = nd1.subtract(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd4.wcs)
# Multiplication
nd5 = nd1.multiply(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd5.wcs)
# Division
nd6 = nd1.divide(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd6.wcs)
for nd in [nd3, nd4, nd5, nd6]:
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert len(nd.meta) == 0
assert nd.mask is None
# Masks are completely separated in the NDArithmetics from the data so we need
# no correlated tests but covering:
# masks 1D, 2D and mixed cases with broadcasting
@pytest.mark.parametrize(
("mask1", "mask2"),
[
(None, None),
(None, False),
(True, None),
(False, False),
(True, False),
(False, True),
(True, True),
(np.array(False), np.array(True)),
(np.array(False), np.array([0, 1, 0, 1, 1], dtype=np.bool_)),
(np.array(True), np.array([[0, 1, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=np.bool_)),
(
np.array([0, 1, 0, 1, 1], dtype=np.bool_),
np.array([1, 1, 0, 0, 1], dtype=np.bool_),
),
(
np.array([0, 1, 0, 1, 1], dtype=np.bool_),
np.array([[0, 1, 0, 1, 1], [1, 0, 0, 1, 1]], dtype=np.bool_),
),
(
np.array([[0, 1, 0, 1, 1], [1, 0, 0, 1, 1]], dtype=np.bool_),
np.array([[0, 1, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=np.bool_),
),
],
)
def test_arithmetics_data_masks(mask1, mask2):
nd1 = NDDataArithmetic(1, mask=mask1)
nd2 = NDDataArithmetic(1, mask=mask2)
if mask1 is None and mask2 is None:
ref_mask = None
elif mask1 is None:
ref_mask = mask2
elif mask2 is None:
ref_mask = mask1
else:
ref_mask = mask1 | mask2
# Addition
nd3 = nd1.add(nd2)
assert_array_equal(ref_mask, nd3.mask)
# Subtraction
nd4 = nd1.subtract(nd2)
assert_array_equal(ref_mask, nd4.mask)
# Multiplication
nd5 = nd1.multiply(nd2)
assert_array_equal(ref_mask, nd5.mask)
# Division
nd6 = nd1.divide(nd2)
assert_array_equal(ref_mask, nd6.mask)
for nd in [nd3, nd4, nd5, nd6]:
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert len(nd.meta) == 0
assert nd.wcs is None
# One additional case which can not be easily incorporated in the test above
# what happens if the masks are numpy ndarrays are not broadcastable
def test_arithmetics_data_masks_invalid():
nd1 = NDDataArithmetic(1, mask=np.array([1, 0], dtype=np.bool_))
nd2 = NDDataArithmetic(1, mask=np.array([1, 0, 1], dtype=np.bool_))
with pytest.raises(ValueError):
nd1.add(nd2)
with pytest.raises(ValueError):
nd1.multiply(nd2)
with pytest.raises(ValueError):
nd1.subtract(nd2)
with pytest.raises(ValueError):
nd1.divide(nd2)
# Covering:
# both have uncertainties (data and uncertainty without unit)
# tested against manually determined resulting uncertainties to verify the
# implemented formulas
# this test only works as long as data1 and data2 do not contain any 0
def test_arithmetics_stddevuncertainty_basic():
nd1 = NDDataArithmetic([1, 2, 3], uncertainty=StdDevUncertainty([1, 1, 3]))
nd2 = NDDataArithmetic([2, 2, 2], uncertainty=StdDevUncertainty([2, 2, 2]))
nd3 = nd1.add(nd2)
nd4 = nd2.add(nd1)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(np.array([1, 1, 3]) ** 2 + np.array([2, 2, 2]) ** 2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2)
nd4 = nd2.subtract(nd1)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty (same as for add)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2)
nd4 = nd2.multiply(nd1)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.abs(np.array([2, 4, 6])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3])) ** 2
+ (np.array([2, 2, 2]) / np.array([2, 2, 2])) ** 2
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2)
nd4 = nd2.divide(nd1)
# Inverse operation gives a different uncertainty!
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = np.abs(np.array([1 / 2, 2 / 2, 3 / 2])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3])) ** 2
+ (np.array([2, 2, 2]) / np.array([2, 2, 2])) ** 2
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = np.abs(np.array([2, 1, 2 / 3])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3])) ** 2
+ (np.array([2, 2, 2]) / np.array([2, 2, 2])) ** 2
)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.parametrize(
("cor", "uncert1", "data2"),
[
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
],
)
def test_arithmetics_stddevuncertainty_basic_with_correlation(cor, uncert1, data2):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = np.array(uncert1)
uncert2 = np.array([2, 2, 2])
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertainty(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(
uncert1**2 + uncert2**2 + 2 * cor * np.abs(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(
uncert1**2 + uncert2**2 - 2 * cor * np.abs(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = (np.abs(data1 * data2)) * np.sqrt(
(uncert1 / data1) ** 2
+ (uncert2 / data2) ** 2
+ (2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty!
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = (np.abs(data1 / data2)) * np.sqrt(
(uncert1 / data1) ** 2
+ (uncert2 / data2) ** 2
- (2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = (np.abs(data2 / data1)) * np.sqrt(
(uncert1 / data1) ** 2
+ (uncert2 / data2) ** 2
- (2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.parametrize(
("cor", "uncert1", "data2"),
[
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
],
)
def test_arithmetics_varianceuncertainty_basic_with_correlation(cor, uncert1, data2):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = np.array(uncert1) ** 2
uncert2 = np.array([2, 2, 2]) ** 2
nd1 = NDDataArithmetic(data1, uncertainty=VarianceUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=VarianceUncertainty(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = uncert1 + uncert2 + 2 * cor * np.sqrt(uncert1 * uncert2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = uncert1 + uncert2 - 2 * cor * np.sqrt(uncert1 * uncert2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = (data1 * data2) ** 2 * (
uncert1 / data1**2
+ uncert2 / data2**2
+ (2 * cor * np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty because of the
# prefactor nd1/nd2 vs nd2/nd1. Howeveare, a large chunk is the same.
ref_common = (
uncert1 / data1**2
+ uncert2 / data2**2
- (2 * cor * np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = (data1 / data2) ** 2 * ref_common
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = (data2 / data1) ** 2 * ref_common
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.filterwarnings("ignore:divide by zero encountered.*")
@pytest.mark.parametrize(
("cor", "uncert1", "data2"),
[
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
],
)
def test_arithmetics_inversevarianceuncertainty_basic_with_correlation(
cor, uncert1, data2
):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = 1 / np.array(uncert1) ** 2
uncert2 = 1 / np.array([2, 2, 2]) ** 2
nd1 = NDDataArithmetic(data1, uncertainty=InverseVariance(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=InverseVariance(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (
1 / uncert1 + 1 / uncert2 + 2 * cor / np.sqrt(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (
1 / uncert1 + 1 / uncert2 - 2 * cor / np.sqrt(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (
(data1 * data2) ** 2
* (
1 / uncert1 / data1**2
+ 1 / uncert2 / data2**2
+ (2 * cor / np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty because of the
# prefactor nd1/nd2 vs nd2/nd1. Howeveare, a large chunk is the same.
ref_common = (
1 / uncert1 / data1**2
+ 1 / uncert2 / data2**2
- (2 * cor / np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = 1 / ((data1 / data2) ** 2 * ref_common)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = 1 / ((data2 / data1) ** 2 * ref_common)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Covering:
# just an example that a np.ndarray works as correlation, no checks for
# the right result since these were basically done in the function above.
def test_arithmetics_stddevuncertainty_basic_with_correlation_array():
data1 = np.array([1, 2, 3])
data2 = np.array([1, 1, 1])
uncert1 = np.array([1, 1, 1])
uncert2 = np.array([2, 2, 2])
cor = np.array([0, 0.25, 0])
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertainty(uncert2))
nd1.add(nd2, uncertainty_correlation=cor)
# Covering:
# That propagate throws an exception when correlation is given but the
# uncertainty does not support correlation.
def test_arithmetics_with_correlation_unsupported():
data1 = np.array([1, 2, 3])
data2 = np.array([1, 1, 1])
uncert1 = np.array([1, 1, 1])
uncert2 = np.array([2, 2, 2])
cor = 3
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertaintyUncorrelated(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertaintyUncorrelated(uncert2))
with pytest.raises(ValueError):
nd1.add(nd2, uncertainty_correlation=cor)
# Covering:
# only one has an uncertainty (data and uncertainty without unit)
# tested against the case where the other one has zero uncertainty. (this case
# must be correct because we tested it in the last case)
# Also verify that if the result of the data has negative values the resulting
# uncertainty has no negative values.
def test_arithmetics_stddevuncertainty_one_missing():
nd1 = NDDataArithmetic([1, -2, 3])
nd1_ref = NDDataArithmetic([1, -2, 3], uncertainty=StdDevUncertainty([0, 0, 0]))
nd2 = NDDataArithmetic([2, 2, -2], uncertainty=StdDevUncertainty([2, 2, 2]))
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2.add(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2.subtract(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2.multiply(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2.divide(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide.*")
@pytest.mark.parametrize(
("uncert1", "uncert2"),
[
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
],
)
def test_arithmetics_stddevuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = StdDevUncertainty(uncert1)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(data1.unit)
else:
uncert1_ref = uncert1
uncert_ref1 = StdDevUncertainty(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = StdDevUncertainty(uncert2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(data2.unit)
else:
uncert2_ref = uncert2
uncert_ref2 = StdDevUncertainty(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide.*")
@pytest.mark.parametrize(
("uncert1", "uncert2"),
[
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
],
)
def test_arithmetics_varianceuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = VarianceUncertainty(uncert1**2)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(data1.unit**2)
else:
uncert1_ref = uncert1
uncert_ref1 = VarianceUncertainty(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = VarianceUncertainty(uncert2**2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(data2.unit**2)
else:
uncert2_ref = uncert2
uncert_ref2 = VarianceUncertainty(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide.*")
@pytest.mark.parametrize(
("uncert1", "uncert2"),
[
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
],
)
def test_arithmetics_inversevarianceuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = InverseVariance(1 / uncert1**2)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(1 / data1.unit**2)
else:
uncert1_ref = uncert1
uncert_ref1 = InverseVariance(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = InverseVariance(1 / uncert2**2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(1 / data2.unit**2)
else:
uncert2_ref = uncert2
uncert_ref2 = InverseVariance(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Test abbreviation and long name for taking the first found meta, mask, wcs
@pytest.mark.parametrize("use_abbreviation", ["ff", "first_found"])
def test_arithmetics_handle_switches(use_abbreviation):
meta1 = {"a": 1}
meta2 = {"b": 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
wcs1, wcs2 = nd_testing.create_two_unequal_wcs(naxis=1)
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(
data1, meta=meta1, mask=mask1, wcs=wcs1, uncertainty=uncertainty1
)
nd2 = NDDataArithmetic(
data2, meta=meta2, mask=mask2, wcs=wcs2, uncertainty=uncertainty2
)
nd3 = NDDataArithmetic(data1)
# Both have the attributes but option None is chosen
nd_ = nd1.add(
nd2,
propagate_uncertainties=None,
handle_meta=None,
handle_mask=None,
compare_wcs=None,
)
assert nd_.wcs is None
assert len(nd_.meta) == 0
assert nd_.mask is None
assert nd_.uncertainty is None
# Only second has attributes and False is chosen
nd_ = nd3.add(
nd2,
propagate_uncertainties=False,
handle_meta=use_abbreviation,
handle_mask=use_abbreviation,
compare_wcs=use_abbreviation,
)
nd_testing.assert_wcs_seem_equal(nd_.wcs, wcs2)
assert nd_.meta == meta2
assert nd_.mask == mask2
assert_array_equal(nd_.uncertainty.array, uncertainty2.array)
# Only first has attributes and False is chosen
nd_ = nd1.add(
nd3,
propagate_uncertainties=False,
handle_meta=use_abbreviation,
handle_mask=use_abbreviation,
compare_wcs=use_abbreviation,
)
nd_testing.assert_wcs_seem_equal(nd_.wcs, wcs1)
assert nd_.meta == meta1
assert nd_.mask == mask1
assert_array_equal(nd_.uncertainty.array, uncertainty1.array)
def test_arithmetics_meta_func():
def meta_fun_func(meta1, meta2, take="first"):
if take == "first":
return meta1
else:
return meta2
meta1 = {"a": 1}
meta2 = {"a": 3, "b": 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, uncertainty=uncertainty1)
nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, uncertainty=uncertainty2)
nd3 = nd1.add(nd2, handle_meta=meta_fun_func)
assert nd3.meta["a"] == 1
assert "b" not in nd3.meta
nd4 = nd1.add(nd2, handle_meta=meta_fun_func, meta_take="second")
assert nd4.meta["a"] == 3
assert nd4.meta["b"] == 2
with pytest.raises(KeyError):
nd1.add(nd2, handle_meta=meta_fun_func, take="second")
def test_arithmetics_wcs_func():
def wcs_comp_func(wcs1, wcs2, tolerance=0.1):
if tolerance < 0.01:
return False
return True
meta1 = {"a": 1}
meta2 = {"a": 3, "b": 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=1)
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(
data1, meta=meta1, mask=mask1, wcs=wcs1, uncertainty=uncertainty1
)
nd2 = NDDataArithmetic(
data2, meta=meta2, mask=mask2, wcs=wcs2, uncertainty=uncertainty2
)
nd3 = nd1.add(nd2, compare_wcs=wcs_comp_func)
nd_testing.assert_wcs_seem_equal(nd3.wcs, wcs1)
# Fails because the function fails
with pytest.raises(ValueError):
nd1.add(nd2, compare_wcs=wcs_comp_func, wcs_tolerance=0.00001)
# Fails because for a parameter to be passed correctly to the function it
# needs the wcs_ prefix
with pytest.raises(KeyError):
nd1.add(nd2, compare_wcs=wcs_comp_func, tolerance=1)
def test_arithmetics_mask_func():
def mask_sad_func(mask1, mask2, fun=0):
if fun > 0.5:
return mask2
else:
return mask1
meta1 = {"a": 1}
meta2 = {"a": 3, "b": 2}
mask1 = [True, False, True]
mask2 = [True, False, False]
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, uncertainty=uncertainty1)
nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, uncertainty=uncertainty2)
nd3 = nd1.add(nd2, handle_mask=mask_sad_func)
assert_array_equal(nd3.mask, nd1.mask)
nd4 = nd1.add(nd2, handle_mask=mask_sad_func, mask_fun=1)
assert_array_equal(nd4.mask, nd2.mask)
with pytest.raises(KeyError):
nd1.add(nd2, handle_mask=mask_sad_func, fun=1)
@pytest.mark.parametrize("meth", ["add", "subtract", "divide", "multiply"])
def test_two_argument_useage(meth):
ndd1 = NDDataArithmetic(np.ones((3, 3)))
ndd2 = NDDataArithmetic(np.ones((3, 3)))
# Call add on the class (not the instance) and compare it with already
# tested useage:
ndd3 = getattr(NDDataArithmetic, meth)(ndd1, ndd2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
# And the same done on an unrelated instance...
ndd3 = getattr(NDDataArithmetic(-100), meth)(ndd1, ndd2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
@pytest.mark.parametrize("meth", ["add", "subtract", "divide", "multiply"])
def test_two_argument_useage_non_nddata_first_arg(meth):
data1 = 50
data2 = 100
# Call add on the class (not the instance)
ndd3 = getattr(NDDataArithmetic, meth)(data1, data2)
# Compare it with the instance-useage and two identical NDData-like
# classes:
ndd1 = NDDataArithmetic(data1)
ndd2 = NDDataArithmetic(data2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
# and check it's also working when called on an instance
ndd3 = getattr(NDDataArithmetic(-100), meth)(data1, data2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
def test_arithmetics_unknown_uncertainties():
# Not giving any uncertainty class means it is saved as UnknownUncertainty
ndd1 = NDDataArithmetic(
np.ones((3, 3)), uncertainty=UnknownUncertainty(np.ones((3, 3)))
)
ndd2 = NDDataArithmetic(
np.ones((3, 3)), uncertainty=UnknownUncertainty(np.ones((3, 3)) * 2)
)
# There is no way to propagate uncertainties:
with pytest.raises(IncompatibleUncertaintiesException):
ndd1.add(ndd2)
# But it should be possible without propagation
ndd3 = ndd1.add(ndd2, propagate_uncertainties=False)
np.testing.assert_array_equal(ndd1.uncertainty.array, ndd3.uncertainty.array)
ndd4 = ndd1.add(ndd2, propagate_uncertainties=None)
assert ndd4.uncertainty is None
def test_psf_warning():
"""Test that math on objects with a psf warn."""
ndd1 = NDDataArithmetic(np.ones((3, 3)), psf=np.zeros(3))
ndd2 = NDDataArithmetic(np.ones((3, 3)), psf=None)
# no warning if both are None
ndd2.add(ndd2)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd1.add(ndd2)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd2.add(ndd1)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd1.add(ndd1)
|
3d24e539c4832fd553e720635a52d8e0d748f66f3a0be9813a12c494dd01e49a | from .high_level_api import *
from .high_level_wcs_wrapper import *
from .low_level_api import *
from .utils import *
from .wrappers import *
|
0a572f0a85f8afe4d89f95e22d6b76fae7581b9279523f3e74055a429c32842a | import numpy as np
import pytest
from astropy.coordinates import SkyCoord
from astropy.units import Quantity
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseLowLevelWCS
# NOTE: This module is deprecated and is emitting warning.
collect_ignore = ["sliced_low_level_wcs.py"]
@pytest.fixture
def spectral_1d_fitswcs():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ("FREQ",)
wcs.wcs.cunit = ("Hz",)
wcs.wcs.cdelt = (3.0e9,)
wcs.wcs.crval = (4.0e9,)
wcs.wcs.crpix = (11.0,)
wcs.wcs.cname = ("Frequency",)
return wcs
@pytest.fixture
def time_1d_fitswcs():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ("TIME",)
wcs.wcs.mjdref = (30042, 0)
wcs.wcs.crval = (3.0,)
wcs.wcs.crpix = (11.0,)
wcs.wcs.cname = ("Time",)
wcs.wcs.cunit = "s"
return wcs
@pytest.fixture
def celestial_2d_fitswcs():
wcs = WCS(naxis=2)
wcs.wcs.ctype = "RA---CAR", "DEC--CAR"
wcs.wcs.cunit = "deg", "deg"
wcs.wcs.cdelt = -2.0, 2.0
wcs.wcs.crval = 4.0, 0.0
wcs.wcs.crpix = 6.0, 7.0
wcs.wcs.cname = "Right Ascension", "Declination"
wcs.pixel_shape = (6, 7)
wcs.pixel_bounds = [(-1, 5), (1, 7)]
return wcs
@pytest.fixture
def spectral_cube_3d_fitswcs():
wcs = WCS(naxis=3)
wcs.wcs.ctype = "RA---CAR", "DEC--CAR", "FREQ"
wcs.wcs.cunit = "deg", "deg", "Hz"
wcs.wcs.cdelt = -2.0, 2.0, 3.0e9
wcs.wcs.crval = 4.0, 0.0, 4.0e9
wcs.wcs.crpix = 6.0, 7.0, 11.0
wcs.wcs.cname = "Right Ascension", "Declination", "Frequency"
wcs.pixel_shape = (6, 7, 3)
wcs.pixel_bounds = [(-1, 5), (1, 7), (1, 2.5)]
return wcs
@pytest.fixture
def cube_4d_fitswcs():
wcs = WCS(naxis=4)
wcs.wcs.ctype = "RA---CAR", "DEC--CAR", "FREQ", "TIME"
wcs.wcs.cunit = "deg", "deg", "Hz", "s"
wcs.wcs.cdelt = -2.0, 2.0, 3.0e9, 1
wcs.wcs.crval = 4.0, 0.0, 4.0e9, 3
wcs.wcs.crpix = 6.0, 7.0, 11.0, 11.0
wcs.wcs.cname = "Right Ascension", "Declination", "Frequency", "Time"
wcs.wcs.mjdref = (30042, 0)
return wcs
class Spectral1DLowLevelWCS(BaseLowLevelWCS):
@property
def pixel_n_dim(self):
return 1
@property
def world_n_dim(self):
return 1
@property
def world_axis_physical_types(self):
return ("em.freq",)
@property
def world_axis_units(self):
return ("Hz",)
@property
def world_axis_names(self):
return ("Frequency",)
_pixel_shape = None
@property
def pixel_shape(self):
return self._pixel_shape
@pixel_shape.setter
def pixel_shape(self, value):
self._pixel_shape = value
_pixel_bounds = None
@property
def pixel_bounds(self):
return self._pixel_bounds
@pixel_bounds.setter
def pixel_bounds(self, value):
self._pixel_bounds = value
def pixel_to_world_values(self, pixel_array):
return np.asarray(pixel_array - 10) * 3e9 + 4e9
def world_to_pixel_values(self, world_array):
return np.asarray(world_array - 4e9) / 3e9 + 10
@property
def world_axis_object_components(self):
return (("test", 0, "value"),)
@property
def world_axis_object_classes(self):
return {"test": (Quantity, (), {"unit": "Hz"})}
@pytest.fixture
def spectral_1d_ape14_wcs():
return Spectral1DLowLevelWCS()
class Celestial2DLowLevelWCS(BaseLowLevelWCS):
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return "pos.eq.ra", "pos.eq.dec"
@property
def world_axis_units(self):
return "deg", "deg"
@property
def world_axis_names(self):
return "Right Ascension", "Declination"
@property
def pixel_shape(self):
return (6, 7)
@property
def pixel_bounds(self):
return (-1, 5), (1, 7)
def pixel_to_world_values(self, px, py):
return (-(np.asarray(px) - 5.0) * 2 + 4.0, (np.asarray(py) - 6.0) * 2)
def world_to_pixel_values(self, wx, wy):
return (-(np.asarray(wx) - 4.0) / 2 + 5.0, np.asarray(wy) / 2 + 6.0)
@property
def world_axis_object_components(self):
return [
("test", 0, "spherical.lon.degree"),
("test", 1, "spherical.lat.degree"),
]
@property
def world_axis_object_classes(self):
return {"test": (SkyCoord, (), {"unit": "deg"})}
@pytest.fixture
def celestial_2d_ape14_wcs():
return Celestial2DLowLevelWCS()
|
9cbeae970ddf3981a632e947cd46bb449f40fb85bef1bf47d820923fe7c91a2b | from .high_level_api import HighLevelWCSMixin
from .low_level_api import BaseLowLevelWCS
from .utils import wcs_info_str
__all__ = ["HighLevelWCSWrapper"]
class HighLevelWCSWrapper(HighLevelWCSMixin):
"""
Wrapper class that can take any :class:`~astropy.wcs.wcsapi.BaseLowLevelWCS`
object and expose the high-level WCS API.
"""
def __init__(self, low_level_wcs):
if not isinstance(low_level_wcs, BaseLowLevelWCS):
raise TypeError(
"Input to a HighLevelWCSWrapper must be a low level WCS object"
)
self._low_level_wcs = low_level_wcs
@property
def low_level_wcs(self):
return self._low_level_wcs
@property
def pixel_n_dim(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`
"""
return self.low_level_wcs.pixel_n_dim
@property
def world_n_dim(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`
"""
return self.low_level_wcs.world_n_dim
@property
def world_axis_physical_types(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_physical_types`
"""
return self.low_level_wcs.world_axis_physical_types
@property
def world_axis_units(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`
"""
return self.low_level_wcs.world_axis_units
@property
def array_shape(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape`
"""
return self.low_level_wcs.array_shape
@property
def pixel_bounds(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_bounds`
"""
return self.low_level_wcs.pixel_bounds
@property
def axis_correlation_matrix(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS.axis_correlation_matrix`
"""
return self.low_level_wcs.axis_correlation_matrix
def _as_mpl_axes(self):
"""
See `~astropy.wcs.wcsapi.BaseLowLevelWCS._as_mpl_axes`
"""
return self.low_level_wcs._as_mpl_axes()
def __str__(self):
return wcs_info_str(self.low_level_wcs)
def __repr__(self):
return f"{object.__repr__(self)}\n{str(self)}"
|
1afd0a7a577d306b66b733f9ba0392bd0319afd8b20fc6237472a352c034497d | import abc
import os
import numpy as np
__all__ = ["BaseLowLevelWCS", "validate_physical_types"]
class BaseLowLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the low-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def pixel_n_dim(self):
"""
The number of axes in the pixel coordinate system.
"""
@property
@abc.abstractmethod
def world_n_dim(self):
"""
The number of axes in the world coordinate system.
"""
@property
@abc.abstractmethod
def world_axis_physical_types(self):
"""
An iterable of strings describing the physical type for each world axis.
These should be names from the VO UCD1+ controlled Vocabulary
(http://www.ivoa.net/documents/latest/UCDlist.html). If no matching UCD
type exists, this can instead be ``"custom:xxx"``, where ``xxx`` is an
arbitrary string. Alternatively, if the physical type is
unknown/undefined, an element can be `None`.
"""
@property
@abc.abstractmethod
def world_axis_units(self):
"""
An iterable of strings given the units of the world coordinates for each
axis.
The strings should follow the `IVOA VOUnit standard
<http://ivoa.net/documents/VOUnits/>`_ (though as noted in the VOUnit
specification document, units that do not follow this standard are still
allowed, but just not recommended).
"""
@abc.abstractmethod
def pixel_to_world_values(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays as
input, and pixel coordinates should be zero-based. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays in units given by
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Note that pixel coordinates are
assumed to be 0 at the center of the first pixel in each dimension. If a
pixel is in a region where the WCS is not defined, NaN can be returned.
The coordinates should be specified in the ``(x, y)`` order, where for
an image, ``x`` is the horizontal coordinate and ``y`` is the vertical
coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
def array_index_to_world_values(self, *index_arrays):
"""
Convert array indices to world coordinates.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` except that
the indices should be given in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`).
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
return self.pixel_to_world_values(*index_arrays[::-1])
@abc.abstractmethod
def world_to_pixel_values(self, *world_arrays):
"""
Convert world coordinates to pixel coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays as
input in units given by `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays. Note that pixel
coordinates are assumed to be 0 at the center of the first pixel in each
dimension. If a world coordinate does not have a matching pixel
coordinate, NaN can be returned. The coordinates should be returned in
the ``(x, y)`` order, where for an image, ``x`` is the horizontal
coordinate and ``y`` is the vertical coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
def world_to_array_index_values(self, *world_arrays):
"""
Convert world coordinates to array indices.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` except that
the indices should be returned in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`). The indices should be
returned as rounded integers.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
pixel_arrays = self.world_to_pixel_values(*world_arrays)
if self.pixel_n_dim == 1:
pixel_arrays = (pixel_arrays,)
else:
pixel_arrays = pixel_arrays[::-1]
array_indices = tuple(
np.asarray(np.floor(pixel + 0.5), dtype=np.int_) for pixel in pixel_arrays
)
return array_indices[0] if self.pixel_n_dim == 1 else array_indices
@property
@abc.abstractmethod
def world_axis_object_components(self):
"""
A list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` elements giving information
on constructing high-level objects for the world coordinates.
Each element of the list is a tuple with three items:
* The first is a name for the world object this world array
corresponds to, which *must* match the string names used in
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`. Note that names might
appear twice because two world arrays might correspond to a single
world object (e.g. a celestial coordinate might have both “ra” and
“dec” arrays, which correspond to a single sky coordinate object).
* The second element is either a string keyword argument name or a
positional index for the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`.
* The third argument is a string giving the name of the property
to access on the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes` in
order to get numerical values. Alternatively, this argument can be a
callable Python object that takes a high-level coordinate object and
returns the numerical values suitable for passing to the low-level
WCS transformation methods.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples.
"""
@property
@abc.abstractmethod
def world_axis_object_classes(self):
"""
A dictionary giving information on constructing high-level objects for
the world coordinates.
Each key of the dictionary is a string key from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components`, and each value is a
tuple with three elements or four elements:
* The first element of the tuple must be a class or a string specifying
the fully-qualified name of a class, which will specify the actual
Python object to be created.
* The second element, should be a tuple specifying the positional
arguments required to initialize the class. If
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components` specifies that the
world coordinates should be passed as a positional argument, this this
tuple should include `None` placeholders for the world coordinates.
* The third tuple element must be a dictionary with the keyword
arguments required to initialize the class.
* Optionally, for advanced use cases, the fourth element (if present)
should be a callable Python object that gets called instead of the
class and gets passed the positional and keyword arguments. It should
return an object of the type of the first element in the tuple.
Note that we don't require the classes to be Astropy classes since there
is no guarantee that Astropy will have all the classes to represent all
kinds of world coordinates. Furthermore, we recommend that the output be
kept as human-readable as possible.
The classes used here should have the ability to do conversions by
passing an instance as the first argument to the same class with
different arguments (e.g. ``Time(Time(...), scale='tai')``). This is
a requirement for the implementation of the high-level interface.
The second and third tuple elements for each value of this dictionary
can in turn contain either instances of classes, or if necessary can
contain serialized versions that should take the same form as the main
classes described above (a tuple with three elements with the fully
qualified name of the class, then the positional arguments and the
keyword arguments). For low-level API objects implemented in Python, we
recommend simply returning the actual objects (not the serialized form)
for optimal performance. Implementations should either always or never
use serialized classes to represent Python objects, and should indicate
which of these they follow using the
`~astropy.wcs.wcsapi.BaseLowLevelWCS.serialized_classes` attribute.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples .
"""
# The following three properties have default fallback implementations, so
# they are not abstract.
@property
def array_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(row, column)``
order (the convention for arrays in Python).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
"""
if self.pixel_shape is None:
return None
else:
return self.pixel_shape[::-1]
@property
def pixel_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(x, y)``
order (where for an image, ``x`` is the horizontal coordinate and ``y``
is the vertical coordinate).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
If you are interested in getting a shape that is comparable to that of
a Numpy array, you should use
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` instead.
"""
return None
@property
def pixel_bounds(self):
"""
The bounds (in pixel coordinates) inside which the WCS is defined,
as a list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`
``(min, max)`` tuples.
The bounds should be given in ``[(xmin, xmax), (ymin, ymax)]``
order. WCS solutions are sometimes only guaranteed to be accurate
within a certain range of pixel values, for example when defining a
WCS that includes fitted distortions. This is an optional property,
and it should return `None` if a shape is not known or relevant.
"""
return None
@property
def pixel_axis_names(self):
"""
An iterable of strings describing the name for each pixel axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized.
"""
return [""] * self.pixel_n_dim
@property
def world_axis_names(self):
"""
An iterable of strings describing the name for each world axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized. For standardized axis types, see
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_physical_types`.
"""
return [""] * self.world_n_dim
@property
def axis_correlation_matrix(self):
"""
Returns an (`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`,
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`) matrix that
indicates using booleans whether a given world coordinate depends on a
given pixel coordinate.
This defaults to a matrix where all elements are `True` in the absence
of any further information. For completely independent axes, the
diagonal would be `True` and all other entries `False`.
"""
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
@property
def serialized_classes(self):
"""
Indicates whether Python objects are given in serialized form or as
actual Python objects.
"""
return False
def _as_mpl_axes(self):
"""
Compatibility hook for Matplotlib and WCSAxes. With this method, one can
do::
from astropy.wcs import WCS
import matplotlib.pyplot as plt
wcs = WCS('filename.fits')
fig = plt.figure()
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=wcs)
...
and this will generate a plot with the correct WCS coordinates on the
axes.
"""
from astropy.visualization.wcsaxes import WCSAxes
return WCSAxes, {"wcs": self}
UCDS_FILE = os.path.join(os.path.dirname(__file__), "data", "ucds.txt")
with open(UCDS_FILE) as f:
VALID_UCDS = {x.strip() for x in f.read().splitlines()[1:]}
def validate_physical_types(physical_types):
"""
Validate a list of physical types against the UCD1+ standard
"""
for physical_type in physical_types:
if (
physical_type is not None
and physical_type not in VALID_UCDS
and not physical_type.startswith("custom:")
):
raise ValueError(
f"'{physical_type}' is not a valid IOVA UCD1+ physical type. It must be"
" a string specified in the list"
" (http://www.ivoa.net/documents/latest/UCDlist.html) or if no"
" matching type exists it can be any string prepended with 'custom:'."
)
|
be95a21b1609f7abae108effaed11962e6e6c4c51dc6cd38d3d7b4d36e2fc0b9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import importlib
import numpy as np
__all__ = ["deserialize_class", "wcs_info_str"]
def deserialize_class(tpl, construct=True):
"""
Deserialize classes recursively.
"""
if not isinstance(tpl, tuple) or len(tpl) != 3:
raise ValueError("Expected a tuple of three values")
module, klass = tpl[0].rsplit(".", 1)
module = importlib.import_module(module)
klass = getattr(module, klass)
args = tuple(
deserialize_class(arg) if isinstance(arg, tuple) else arg for arg in tpl[1]
)
kwargs = dict(
(key, deserialize_class(val)) if isinstance(val, tuple) else (key, val)
for (key, val) in tpl[2].items()
)
if construct:
return klass(*args, **kwargs)
else:
return klass, args, kwargs
def wcs_info_str(wcs):
# Overall header
s = f"{wcs.__class__.__name__} Transformation\n\n"
s += "This transformation has {} pixel and {} world dimensions\n\n".format(
wcs.pixel_n_dim, wcs.world_n_dim
)
s += f"Array shape (Numpy order): {wcs.array_shape}\n\n"
# Pixel dimensions table
array_shape = wcs.array_shape or (0,)
pixel_shape = wcs.pixel_shape or (None,) * wcs.pixel_n_dim
# Find largest between header size and value length
pixel_dim_width = max(9, len(str(wcs.pixel_n_dim)))
pixel_nam_width = max(9, max(len(x) for x in wcs.pixel_axis_names))
pixel_siz_width = max(9, len(str(max(array_shape))))
# fmt: off
s += (('{0:' + str(pixel_dim_width) + 's}').format('Pixel Dim') + ' ' +
('{0:' + str(pixel_nam_width) + 's}').format('Axis Name') + ' ' +
('{0:' + str(pixel_siz_width) + 's}').format('Data size') + ' ' +
'Bounds\n')
# fmt: on
for ipix in range(wcs.pixel_n_dim):
# fmt: off
s += (('{0:' + str(pixel_dim_width) + 'g}').format(ipix) + ' ' +
('{0:' + str(pixel_nam_width) + 's}').format(wcs.pixel_axis_names[ipix] or 'None') + ' ' +
(" " * 5 + str(None) if pixel_shape[ipix] is None else
('{0:' + str(pixel_siz_width) + 'g}').format(pixel_shape[ipix])) + ' ' +
'{:s}'.format(str(None if wcs.pixel_bounds is None else wcs.pixel_bounds[ipix]) + '\n'))
# fmt: on
s += "\n"
# World dimensions table
# Find largest between header size and value length
world_dim_width = max(9, len(str(wcs.world_n_dim)))
world_nam_width = max(
9, max(len(x) if x is not None else 0 for x in wcs.world_axis_names)
)
world_typ_width = max(
13, max(len(x) if x is not None else 0 for x in wcs.world_axis_physical_types)
)
# fmt: off
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') + ' ' +
('{0:' + str(world_nam_width) + 's}').format('Axis Name') + ' ' +
('{0:' + str(world_typ_width) + 's}').format('Physical Type') + ' ' +
'Units\n')
# fmt: on
for iwrl in range(wcs.world_n_dim):
name = wcs.world_axis_names[iwrl] or "None"
typ = wcs.world_axis_physical_types[iwrl] or "None"
unit = wcs.world_axis_units[iwrl] or "unknown"
# fmt: off
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) + ' ' +
('{0:' + str(world_nam_width) + 's}').format(name) + ' ' +
('{0:' + str(world_typ_width) + 's}').format(typ) + ' ' +
'{:s}'.format(unit + '\n'))
# fmt: on
s += "\n"
# Axis correlation matrix
pixel_dim_width = max(3, len(str(wcs.world_n_dim)))
s += "Correlation between pixel and world axes:\n\n"
# fmt: off
s += (' ' * world_dim_width + ' ' +
('{0:^' + str(wcs.pixel_n_dim * 5 - 2) + 's}').format('Pixel Dim') +
'\n')
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') +
''.join([' ' + ('{0:' + str(pixel_dim_width) + 'd}').format(ipix)
for ipix in range(wcs.pixel_n_dim)]) +
'\n')
# fmt: on
matrix = wcs.axis_correlation_matrix
matrix_str = np.empty(matrix.shape, dtype="U3")
matrix_str[matrix] = "yes"
matrix_str[~matrix] = "no"
for iwrl in range(wcs.world_n_dim):
# fmt: off
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) +
''.join([' ' + ('{0:>' + str(pixel_dim_width) + 's}').format(matrix_str[iwrl, ipix])
for ipix in range(wcs.pixel_n_dim)]) +
'\n')
# fmt: on
# Make sure we get rid of the extra whitespace at the end of some lines
return "\n".join([l.rstrip() for l in s.splitlines()])
|
f409d76b88f15f1d12601c71acf0e7d79e54007e845278ba2d5e9a217ce20fed | import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
from .wrappers.sliced_wcs import SlicedLowLevelWCS, sanitize_slices # noqa: F401
warnings.warn(
"SlicedLowLevelWCS has been moved to"
" astropy.wcs.wcsapi.wrappers.sliced_wcs.SlicedLowLevelWCS, or can be"
" imported from astropy.wcs.wcsapi.",
AstropyDeprecationWarning,
)
|
07161e8b70d5aa79221e2be394d575085b0cd8f0710538a15f8b00cc25bb23a8 | import abc
from collections import OrderedDict, defaultdict
import numpy as np
from .utils import deserialize_class
__all__ = ["BaseHighLevelWCS", "HighLevelWCSMixin"]
def rec_getattr(obj, att):
for a in att.split("."):
obj = getattr(obj, a)
return obj
def default_order(components):
order = []
for key, _, _ in components:
if key not in order:
order.append(key)
return order
def _toindex(value):
"""
Convert value to an int or an int array.
Input coordinates converted to integers
corresponding to the center of the pixel.
The convention is that the center of the pixel is
(0, 0), while the lower left corner is (-0.5, -0.5).
The outputs are used to index the mask.
Examples
--------
>>> _toindex(np.array([-0.5, 0.49999]))
array([0, 0])
>>> _toindex(np.array([0.5, 1.49999]))
array([1, 1])
>>> _toindex(np.array([1.5, 2.49999]))
array([2, 2])
"""
indx = np.asarray(np.floor(np.asarray(value) + 0.5), dtype=int)
return indx
class BaseHighLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the high-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def low_level_wcs(self):
"""
Returns a reference to the underlying low-level WCS object.
"""
@abc.abstractmethod
def pixel_to_world(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates (represented by
high-level objects).
If a single high-level object is used to represent the world coordinates
(i.e., if ``len(wcs.world_axis_object_classes) == 1``), it is returned
as-is (not in a tuple/list), otherwise a tuple of high-level objects is
returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` for pixel
indexing and ordering conventions.
"""
def array_index_to_world(self, *index_arrays):
"""
Convert array indices to world coordinates (represented by Astropy
objects).
If a single high-level object is used to represent the world coordinates
(i.e., if ``len(wcs.world_axis_object_classes) == 1``), it is returned
as-is (not in a tuple/list), otherwise a tuple of high-level objects is
returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_index_to_world_values` for
pixel indexing and ordering conventions.
"""
return self.pixel_to_world(*index_arrays[::-1])
@abc.abstractmethod
def world_to_pixel(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to pixel
coordinates.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` for pixel
indexing and ordering conventions.
"""
def world_to_array_index(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to array
indices.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_array_index_values` for
pixel indexing and ordering conventions. The indices should be returned
as rounded integers.
"""
if self.pixel_n_dim == 1:
return _toindex(self.world_to_pixel(*world_objects))
else:
return tuple(_toindex(self.world_to_pixel(*world_objects)[::-1]).tolist())
def high_level_objects_to_values(*world_objects, low_level_wcs):
"""
Convert the input high level object to low level values.
This function uses the information in ``wcs.world_axis_object_classes`` and
``wcs.world_axis_object_components`` to convert the high level objects
(such as `~.SkyCoord`) to low level "values" `~.Quantity` objects.
This is used in `.HighLevelWCSMixin.world_to_pixel`, but provided as a
separate function for use in other places where needed.
Parameters
----------
*world_objects: object
High level coordinate objects.
low_level_wcs: `.BaseLowLevelWCS`
The WCS object to use to interpret the coordinates.
"""
# Cache the classes and components since this may be expensive
serialized_classes = low_level_wcs.world_axis_object_classes
components = low_level_wcs.world_axis_object_components
# Deserialize world_axis_object_classes using the default order
classes = OrderedDict()
for key in default_order(components):
if low_level_wcs.serialized_classes:
classes[key] = deserialize_class(serialized_classes[key], construct=False)
else:
classes[key] = serialized_classes[key]
# Check that the number of classes matches the number of inputs
if len(world_objects) != len(classes):
raise ValueError(
f"Number of world inputs ({len(world_objects)}) does not match expected"
f" ({len(classes)})"
)
# Determine whether the classes are uniquely matched, that is we check
# whether there is only one of each class.
world_by_key = {}
unique_match = True
for w in world_objects:
matches = []
for key, (klass, *_) in classes.items():
if isinstance(w, klass):
matches.append(key)
if len(matches) == 1:
world_by_key[matches[0]] = w
else:
unique_match = False
break
# If the match is not unique, the order of the classes needs to match,
# whereas if all classes are unique, we can still intelligently match
# them even if the order is wrong.
objects = {}
if unique_match:
for key, (klass, args, kwargs, *rest) in classes.items():
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(world_by_key[key], SkyCoord):
if "frame" in kwargs:
objects[key] = world_by_key[key].transform_to(kwargs["frame"])
else:
objects[key] = world_by_key[key]
else:
objects[key] = klass_gen(world_by_key[key], *args, **kwargs)
else:
for ikey, key in enumerate(classes):
klass, args, kwargs, *rest = classes[key]
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
w = world_objects[ikey]
if not isinstance(w, klass):
raise ValueError(
"Expected the following order of world arguments:"
f" {', '.join([k.__name__ for (k, _, _) in classes.values()])}"
)
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(w, SkyCoord):
if "frame" in kwargs:
objects[key] = w.transform_to(kwargs["frame"])
else:
objects[key] = w
else:
objects[key] = klass_gen(w, *args, **kwargs)
# We now extract the attributes needed for the world values
world = []
for key, _, attr in components:
if callable(attr):
world.append(attr(objects[key]))
else:
world.append(rec_getattr(objects[key], attr))
return world
def values_to_high_level_objects(*world_values, low_level_wcs):
"""
Convert low level values into high level objects.
This function uses the information in ``wcs.world_axis_object_classes`` and
``wcs.world_axis_object_components`` to convert low level "values"
`~.Quantity` objects, to high level objects (such as `~.SkyCoord).
This is used in `.HighLevelWCSMixin.pixel_to_world`, but provided as a
separate function for use in other places where needed.
Parameters
----------
*world_values: object
Low level, "values" representations of the world coordinates.
low_level_wcs: `.BaseLowLevelWCS`
The WCS object to use to interpret the coordinates.
"""
# Cache the classes and components since this may be expensive
components = low_level_wcs.world_axis_object_components
classes = low_level_wcs.world_axis_object_classes
# Deserialize classes
if low_level_wcs.serialized_classes:
classes_new = {}
for key, value in classes.items():
classes_new[key] = deserialize_class(value, construct=False)
classes = classes_new
args = defaultdict(list)
kwargs = defaultdict(dict)
for i, (key, attr, _) in enumerate(components):
if isinstance(attr, str):
kwargs[key][attr] = world_values[i]
else:
while attr > len(args[key]) - 1:
args[key].append(None)
args[key][attr] = world_values[i]
result = []
for key in default_order(components):
klass, ar, kw, *rest = classes[key]
if len(rest) == 0:
klass_gen = klass
elif len(rest) == 1:
klass_gen = rest[0]
else:
raise ValueError(
"Tuples in world_axis_object_classes should have length 3 or 4"
)
result.append(klass_gen(*args[key], *ar, **kwargs[key], **kw))
return result
class HighLevelWCSMixin(BaseHighLevelWCS):
"""
Mix-in class that automatically provides the high-level WCS API for the
low-level WCS object given by the `~HighLevelWCSMixin.low_level_wcs`
property.
"""
@property
def low_level_wcs(self):
return self
def world_to_pixel(self, *world_objects):
world_values = high_level_objects_to_values(
*world_objects, low_level_wcs=self.low_level_wcs
)
# Finally we convert to pixel coordinates
pixel_values = self.low_level_wcs.world_to_pixel_values(*world_values)
return pixel_values
def pixel_to_world(self, *pixel_arrays):
# Compute the world coordinate values
world_values = self.low_level_wcs.pixel_to_world_values(*pixel_arrays)
if self.world_n_dim == 1:
world_values = (world_values,)
pixel_values = values_to_high_level_objects(
*world_values, low_level_wcs=self.low_level_wcs
)
if len(pixel_values) == 1:
return pixel_values[0]
else:
return pixel_values
|
128baffa3743d5a978d9051f55ee534e2c566e00fdbbe98d8899dd4b512f38c1 | # This file includes the definition of a mix-in class that provides the low-
# and high-level WCS API to the astropy.wcs.WCS object. We keep this code
# isolated in this mix-in class to avoid making the main wcs.py file too
# long.
import warnings
import numpy as np
from astropy import units as u
from astropy.constants import c
from astropy.coordinates import ICRS, Galactic, SpectralCoord
from astropy.coordinates.spectral_coordinate import (
attach_zero_velocities,
update_differentials_to_match,
)
from astropy.utils.exceptions import AstropyUserWarning
from .high_level_api import HighLevelWCSMixin
from .low_level_api import BaseLowLevelWCS
from .wrappers import SlicedLowLevelWCS
__all__ = ["custom_ctype_to_ucd_mapping", "SlicedFITSWCS", "FITSWCSAPIMixin"]
C_SI = c.si.value
VELOCITY_FRAMES = {
"GEOCENT": "gcrs",
"BARYCENT": "icrs",
"HELIOCENT": "hcrs",
"LSRK": "lsrk",
"LSRD": "lsrd",
}
# The spectra velocity frames below are needed for FITS spectral WCS
# (see Greisen 06 table 12) but aren't yet defined as real
# astropy.coordinates frames, so we instead define them here as instances
# of existing coordinate frames with offset velocities. In future we should
# make these real frames so that users can more easily recognize these
# velocity frames when used in SpectralCoord.
# This frame is defined as a velocity of 220 km/s in the
# direction of l=90, b=0. The rotation velocity is defined
# in:
#
# Kerr and Lynden-Bell 1986, Review of galactic constants.
#
# NOTE: this may differ from the assumptions of galcen_v_sun
# in the Galactocentric frame - the value used here is
# the one adopted by the WCS standard for spectral
# transformations.
VELOCITY_FRAMES["GALACTOC"] = Galactic(
u=0 * u.km,
v=0 * u.km,
w=0 * u.km,
U=0 * u.km / u.s,
V=-220 * u.km / u.s,
W=0 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
# This frame is defined as a velocity of 300 km/s in the
# direction of l=90, b=0. This is defined in:
#
# Transactions of the IAU Vol. XVI B Proceedings of the
# 16th General Assembly, Reports of Meetings of Commissions:
# Comptes Rendus Des Séances Des Commissions, Commission 28,
# p201.
#
# Note that these values differ from those used by CASA
# (308 km/s towards l=105, b=-7) but we use the above values
# since these are the ones defined in Greisen et al (2006).
VELOCITY_FRAMES["LOCALGRP"] = Galactic(
u=0 * u.km,
v=0 * u.km,
w=0 * u.km,
U=0 * u.km / u.s,
V=-300 * u.km / u.s,
W=0 * u.km / u.s,
representation_type="cartesian",
differential_type="cartesian",
)
# This frame is defined as a velocity of 368 km/s in the
# direction of l=263.85, b=48.25. This is defined in:
#
# Bennett et al. (2003), First-Year Wilkinson Microwave
# Anisotropy Probe (WMAP) Observations: Preliminary Maps
# and Basic Results
#
# Note that in that paper, the dipole is expressed as a
# temperature (T=3.346 +/- 0.017mK)
VELOCITY_FRAMES["CMBDIPOL"] = Galactic(
l=263.85 * u.deg,
b=48.25 * u.deg,
distance=0 * u.km,
radial_velocity=-(3.346e-3 / 2.725 * c).to(u.km / u.s),
)
# Mapping from CTYPE axis name to UCD1
CTYPE_TO_UCD1 = {
# Celestial coordinates
"RA": "pos.eq.ra",
"DEC": "pos.eq.dec",
"GLON": "pos.galactic.lon",
"GLAT": "pos.galactic.lat",
"ELON": "pos.ecliptic.lon",
"ELAT": "pos.ecliptic.lat",
"TLON": "pos.bodyrc.lon",
"TLAT": "pos.bodyrc.lat",
"HPLT": "custom:pos.helioprojective.lat",
"HPLN": "custom:pos.helioprojective.lon",
"HPRZ": "custom:pos.helioprojective.z",
"HGLN": "custom:pos.heliographic.stonyhurst.lon",
"HGLT": "custom:pos.heliographic.stonyhurst.lat",
"CRLN": "custom:pos.heliographic.carrington.lon",
"CRLT": "custom:pos.heliographic.carrington.lat",
"SOLX": "custom:pos.heliocentric.x",
"SOLY": "custom:pos.heliocentric.y",
"SOLZ": "custom:pos.heliocentric.z",
# Spectral coordinates (WCS paper 3)
"FREQ": "em.freq", # Frequency
"ENER": "em.energy", # Energy
"WAVN": "em.wavenumber", # Wavenumber
"WAVE": "em.wl", # Vacuum wavelength
"VRAD": "spect.dopplerVeloc.radio", # Radio velocity
"VOPT": "spect.dopplerVeloc.opt", # Optical velocity
"ZOPT": "src.redshift", # Redshift
"AWAV": "em.wl", # Air wavelength
"VELO": "spect.dopplerVeloc", # Apparent radial velocity
"BETA": "custom:spect.doplerVeloc.beta", # Beta factor (v/c)
"STOKES": "phys.polarization.stokes", # STOKES parameters
# Time coordinates (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
"TIME": "time",
"TAI": "time",
"TT": "time",
"TDT": "time",
"ET": "time",
"IAT": "time",
"UT1": "time",
"UTC": "time",
"GMT": "time",
"GPS": "time",
"TCG": "time",
"TCB": "time",
"TDB": "time",
"LOCAL": "time",
# Distance coordinates
"DIST": "pos.distance",
"DSUN": "custom:pos.distance.sunToObserver"
# UT() and TT() are handled separately in world_axis_physical_types
}
# Keep a list of additional custom mappings that have been registered. This
# is kept as a list in case nested context managers are used
CTYPE_TO_UCD1_CUSTOM = []
class custom_ctype_to_ucd_mapping:
"""
A context manager that makes it possible to temporarily add new CTYPE to
UCD1+ mapping used by :attr:`FITSWCSAPIMixin.world_axis_physical_types`.
Parameters
----------
mapping : dict
A dictionary mapping a CTYPE value to a UCD1+ value
Examples
--------
Consider a WCS with the following CTYPE::
>>> from astropy.wcs import WCS
>>> wcs = WCS(naxis=1)
>>> wcs.wcs.ctype = ['SPAM']
By default, :attr:`FITSWCSAPIMixin.world_axis_physical_types` returns `None`,
but this can be overridden::
>>> wcs.world_axis_physical_types
[None]
>>> with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
... wcs.world_axis_physical_types
['food.spam']
"""
def __init__(self, mapping):
CTYPE_TO_UCD1_CUSTOM.insert(0, mapping)
self.mapping = mapping
def __enter__(self):
pass
def __exit__(self, type, value, tb):
CTYPE_TO_UCD1_CUSTOM.remove(self.mapping)
class SlicedFITSWCS(SlicedLowLevelWCS, HighLevelWCSMixin):
pass
class FITSWCSAPIMixin(BaseLowLevelWCS, HighLevelWCSMixin):
"""
A mix-in class that is intended to be inherited by the
:class:`~astropy.wcs.WCS` class and provides the low- and high-level WCS API
"""
@property
def pixel_n_dim(self):
return self.naxis
@property
def world_n_dim(self):
return len(self.wcs.ctype)
@property
def array_shape(self):
if self.pixel_shape is None:
return None
else:
return self.pixel_shape[::-1]
@array_shape.setter
def array_shape(self, value):
if value is None:
self.pixel_shape = None
else:
self.pixel_shape = value[::-1]
@property
def pixel_shape(self):
if self._naxis == [0, 0]:
return None
else:
return tuple(self._naxis)
@pixel_shape.setter
def pixel_shape(self, value):
if value is None:
self._naxis = [0, 0]
else:
if len(value) != self.naxis:
raise ValueError(
f"The number of data axes, {self.naxis}, does not equal the shape"
f" {len(value)}."
)
self._naxis = list(value)
@property
def pixel_bounds(self):
return self._pixel_bounds
@pixel_bounds.setter
def pixel_bounds(self, value):
if value is None:
self._pixel_bounds = value
else:
if len(value) != self.naxis:
raise ValueError(
"The number of data axes, "
f"{self.naxis}, does not equal the number of "
f"pixel bounds {len(value)}."
)
self._pixel_bounds = list(value)
@property
def world_axis_physical_types(self):
types = []
# TODO: need to support e.g. TT(TAI)
for ctype in self.wcs.ctype:
if ctype.upper().startswith(("UT(", "TT(")):
types.append("time")
else:
ctype_name = ctype.split("-")[0]
for custom_mapping in CTYPE_TO_UCD1_CUSTOM:
if ctype_name in custom_mapping:
types.append(custom_mapping[ctype_name])
break
else:
types.append(CTYPE_TO_UCD1.get(ctype_name.upper(), None))
return types
@property
def world_axis_units(self):
units = []
for unit in self.wcs.cunit:
if unit is None:
unit = ""
elif isinstance(unit, u.Unit):
unit = unit.to_string(format="vounit")
else:
try:
unit = u.Unit(unit).to_string(format="vounit")
except u.UnitsError:
unit = ""
units.append(unit)
return units
@property
def world_axis_names(self):
return list(self.wcs.cname)
@property
def axis_correlation_matrix(self):
# If there are any distortions present, we assume that there may be
# correlations between all axes. Maybe if some distortions only apply
# to the image plane we can improve this?
if self.has_distortion:
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
# Assuming linear world coordinates along each axis, the correlation
# matrix would be given by whether or not the PC matrix is zero
matrix = self.wcs.get_pc() != 0
# We now need to check specifically for celestial coordinates since
# these can assume correlations because of spherical distortions. For
# each celestial coordinate we copy over the pixel dependencies from
# the other celestial coordinates.
celestial = (self.wcs.axis_types // 1000) % 10 == 2
celestial_indices = np.nonzero(celestial)[0]
for world1 in celestial_indices:
for world2 in celestial_indices:
if world1 != world2:
matrix[world1] |= matrix[world2]
matrix[world2] |= matrix[world1]
return matrix
def pixel_to_world_values(self, *pixel_arrays):
world = self.all_pix2world(*pixel_arrays, 0)
return world[0] if self.world_n_dim == 1 else tuple(world)
def world_to_pixel_values(self, *world_arrays):
# avoid circular import
from astropy.wcs.wcs import NoConvergence
try:
pixel = self.all_world2pix(*world_arrays, 0)
except NoConvergence as e:
warnings.warn(str(e))
# use best_solution contained in the exception and format the same
# way as all_world2pix does (using _array_converter)
pixel = self._array_converter(
lambda *args: e.best_solution, "input", *world_arrays, 0
)
return pixel[0] if self.pixel_n_dim == 1 else tuple(pixel)
@property
def world_axis_object_components(self):
return self._get_components_and_classes()[0]
@property
def world_axis_object_classes(self):
return self._get_components_and_classes()[1]
@property
def serialized_classes(self):
return False
def _get_components_and_classes(self):
# The aim of this function is to return whatever is needed for
# world_axis_object_components and world_axis_object_classes. It's easier
# to figure it out in one go and then return the values and let the
# properties return part of it.
# Since this method might get called quite a few times, we need to cache
# it. We start off by defining a hash based on the attributes of the
# WCS that matter here (we can't just use the WCS object as a hash since
# it is mutable)
wcs_hash = (
self.naxis,
list(self.wcs.ctype),
list(self.wcs.cunit),
self.wcs.radesys,
self.wcs.specsys,
self.wcs.equinox,
self.wcs.dateobs,
self.wcs.lng,
self.wcs.lat,
)
# If the cache is present, we need to check that the 'hash' matches.
if getattr(self, "_components_and_classes_cache", None) is not None:
cache = self._components_and_classes_cache
if cache[0] == wcs_hash:
return cache[1]
else:
self._components_and_classes_cache = None
# Avoid circular imports by importing here
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.time import Time, TimeDelta
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.wcs.utils import wcs_to_celestial_frame
components = [None] * self.naxis
classes = {}
# Let's start off by checking whether the WCS has a pair of celestial
# components
if self.has_celestial:
try:
celestial_frame = wcs_to_celestial_frame(self)
except ValueError:
# Some WCSes, e.g. solar, can be recognized by WCSLIB as being
# celestial but we don't necessarily have frames for them.
celestial_frame = None
else:
kwargs = {}
kwargs["frame"] = celestial_frame
kwargs["unit"] = u.deg
classes["celestial"] = (SkyCoord, (), kwargs)
components[self.wcs.lng] = ("celestial", 0, "spherical.lon.degree")
components[self.wcs.lat] = ("celestial", 1, "spherical.lat.degree")
# Next, we check for spectral components
if self.has_spectral:
# Find index of spectral coordinate
ispec = self.wcs.spec
ctype = self.wcs.ctype[ispec][:4]
ctype = ctype.upper()
kwargs = {}
# Determine observer location and velocity
# TODO: determine how WCS standard would deal with observer on a
# spacecraft far from earth. For now assume the obsgeo parameters,
# if present, give the geocentric observer location.
if np.isnan(self.wcs.obsgeo[0]):
observer = None
else:
earth_location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
# Get the time scale from TIMESYS or fall back to 'utc'
tscale = self.wcs.timesys or "utc"
if np.isnan(self.wcs.mjdavg):
obstime = Time(
self.wcs.mjdobs,
format="mjd",
scale=tscale,
location=earth_location,
)
else:
obstime = Time(
self.wcs.mjdavg,
format="mjd",
scale=tscale,
location=earth_location,
)
observer_location = SkyCoord(earth_location.get_itrs(obstime=obstime))
if self.wcs.specsys in VELOCITY_FRAMES:
frame = VELOCITY_FRAMES[self.wcs.specsys]
observer = observer_location.transform_to(frame)
if isinstance(frame, str):
observer = attach_zero_velocities(observer)
else:
observer = update_differentials_to_match(
observer_location,
VELOCITY_FRAMES[self.wcs.specsys],
preserve_observer_frame=True,
)
elif self.wcs.specsys == "TOPOCENT":
observer = attach_zero_velocities(observer_location)
else:
raise NotImplementedError(
f"SPECSYS={self.wcs.specsys} not yet supported"
)
# Determine target
# This is tricker. In principle the target for each pixel is the
# celestial coordinates of the pixel, but we then need to be very
# careful about SSYSOBS which is tricky. For now, we set the
# target using the reference celestial coordinate in the WCS (if
# any).
if self.has_celestial and celestial_frame is not None:
# NOTE: celestial_frame was defined higher up
# NOTE: we set the distance explicitly to avoid warnings in SpectralCoord
target = SkyCoord(
self.wcs.crval[self.wcs.lng] * self.wcs.cunit[self.wcs.lng],
self.wcs.crval[self.wcs.lat] * self.wcs.cunit[self.wcs.lat],
frame=celestial_frame,
distance=1000 * u.kpc,
)
target = attach_zero_velocities(target)
else:
target = None
# SpectralCoord does not work properly if either observer or target
# are not convertible to ICRS, so if this is the case, we (for now)
# drop the observer and target from the SpectralCoord and warn the
# user.
if observer is not None:
try:
observer.transform_to(ICRS())
except Exception:
warnings.warn(
"observer cannot be converted to ICRS, so will "
"not be set on SpectralCoord",
AstropyUserWarning,
)
observer = None
if target is not None:
try:
target.transform_to(ICRS())
except Exception:
warnings.warn(
"target cannot be converted to ICRS, so will "
"not be set on SpectralCoord",
AstropyUserWarning,
)
target = None
# NOTE: below we include Quantity in classes['spectral'] instead
# of SpectralCoord - this is because we want to also be able to
# accept plain quantities.
if ctype == "ZOPT":
def spectralcoord_from_redshift(redshift):
if isinstance(redshift, SpectralCoord):
return redshift
return SpectralCoord(
(redshift + 1) * self.wcs.restwav,
unit=u.m,
observer=observer,
target=target,
)
def redshift_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(u.m) / self.wcs.restwav - 1.0
else:
return (
spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(u.m)
/ self.wcs.restwav
- 1.0
)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_redshift)
components[self.wcs.spec] = ("spectral", 0, redshift_from_spectralcoord)
elif ctype == "BETA":
def spectralcoord_from_beta(beta):
if isinstance(beta, SpectralCoord):
return beta
return SpectralCoord(
beta * C_SI,
unit=u.m / u.s,
doppler_convention="relativistic",
doppler_rest=self.wcs.restwav * u.m,
observer=observer,
target=target,
)
def beta_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
doppler_equiv = u.doppler_relativistic(self.wcs.restwav * u.m)
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(u.m / u.s, doppler_equiv) / C_SI
else:
return (
spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(u.m / u.s, doppler_equiv)
/ C_SI
)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_beta)
components[self.wcs.spec] = ("spectral", 0, beta_from_spectralcoord)
else:
kwargs["unit"] = self.wcs.cunit[ispec]
if self.wcs.restfrq > 0:
if ctype == "VELO":
kwargs["doppler_convention"] = "relativistic"
kwargs["doppler_rest"] = self.wcs.restfrq * u.Hz
elif ctype == "VRAD":
kwargs["doppler_convention"] = "radio"
kwargs["doppler_rest"] = self.wcs.restfrq * u.Hz
elif ctype == "VOPT":
kwargs["doppler_convention"] = "optical"
kwargs["doppler_rest"] = self.wcs.restwav * u.m
def spectralcoord_from_value(value):
if isinstance(value, SpectralCoord):
return value
return SpectralCoord(
value, observer=observer, target=target, **kwargs
)
def value_from_spectralcoord(spectralcoord):
# TODO: check target is consistent between WCS and SpectralCoord,
# if they are not the transformation doesn't make conceptual sense.
if (
observer is None
or spectralcoord.observer is None
or spectralcoord.target is None
):
if observer is None:
msg = "No observer defined on WCS"
elif spectralcoord.observer is None:
msg = "No observer defined on SpectralCoord"
else:
msg = "No target defined on SpectralCoord"
warnings.warn(
f"{msg}, SpectralCoord "
"will be converted without any velocity "
"frame change",
AstropyUserWarning,
)
return spectralcoord.to_value(**kwargs)
else:
return spectralcoord.with_observer_stationary_relative_to(
observer
).to_value(**kwargs)
classes["spectral"] = (u.Quantity, (), {}, spectralcoord_from_value)
components[self.wcs.spec] = ("spectral", 0, value_from_spectralcoord)
# We can then make sure we correctly return Time objects where appropriate
# (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
if "time" in self.world_axis_physical_types:
multiple_time = self.world_axis_physical_types.count("time") > 1
for i in range(self.naxis):
if self.world_axis_physical_types[i] == "time":
if multiple_time:
name = f"time.{i}"
else:
name = "time"
# Initialize delta
reference_time_delta = None
# Extract time scale
scale = self.wcs.ctype[i].lower()
if scale == "time":
if self.wcs.timesys:
scale = self.wcs.timesys.lower()
else:
scale = "utc"
# Drop sub-scales
if "(" in scale:
pos = scale.index("(")
scale, subscale = scale[:pos], scale[pos + 1 : -1]
warnings.warn(
"Dropping unsupported sub-scale "
f"{subscale.upper()} from scale {scale.upper()}",
UserWarning,
)
# TODO: consider having GPS as a scale in Time
# For now GPS is not a scale, we approximate this by TAI - 19s
if scale == "gps":
reference_time_delta = TimeDelta(19, format="sec")
scale = "tai"
elif scale.upper() in FITS_DEPRECATED_SCALES:
scale = FITS_DEPRECATED_SCALES[scale.upper()]
elif scale not in Time.SCALES:
raise ValueError(f"Unrecognized time CTYPE={self.wcs.ctype[i]}")
# Determine location
trefpos = self.wcs.trefpos.lower()
if trefpos.startswith("topocent"):
# Note that some headers use TOPOCENT instead of TOPOCENTER
if np.any(np.isnan(self.wcs.obsgeo[:3])):
warnings.warn(
"Missing or incomplete observer location "
"information, setting location in Time to None",
UserWarning,
)
location = None
else:
location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
elif trefpos == "geocenter":
location = EarthLocation(0, 0, 0, unit=u.m)
elif trefpos == "":
location = None
else:
# TODO: implement support for more locations when Time supports it
warnings.warn(
f"Observation location '{trefpos}' is not "
"supported, setting location in Time to None",
UserWarning,
)
location = None
reference_time = Time(
np.nan_to_num(self.wcs.mjdref[0]),
np.nan_to_num(self.wcs.mjdref[1]),
format="mjd",
scale=scale,
location=location,
)
if reference_time_delta is not None:
reference_time = reference_time + reference_time_delta
def time_from_reference_and_offset(offset):
if isinstance(offset, Time):
return offset
return reference_time + TimeDelta(offset, format="sec")
def offset_from_time_and_reference(time):
return (time - reference_time).sec
classes[name] = (Time, (), {}, time_from_reference_and_offset)
components[i] = (name, 0, offset_from_time_and_reference)
# Fallback: for any remaining components that haven't been identified, just
# return Quantity as the class to use
for i in range(self.naxis):
if components[i] is None:
name = self.wcs.ctype[i].split("-")[0].lower()
if name == "":
name = "world"
while name in classes:
name += "_"
classes[name] = (u.Quantity, (), {"unit": self.wcs.cunit[i]})
components[i] = (name, 0, "value")
# Keep a cached version of result
self._components_and_classes_cache = wcs_hash, (components, classes)
return components, classes
|
5ac71c004beb6e327aad921eab0219667d4223145acdaf6662eb3642f01f4ef2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
def test_wtbarr_i(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].i == 1
def test_wtbarr_m(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].m == 1
def test_wtbarr_kind(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].kind == "c"
def test_wtbarr_extnam(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].extnam == "WCS-TABLE"
def test_wtbarr_extver(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].extver == 1
def test_wtbarr_extlev(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].extlev == 1
def test_wtbarr_ttype(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].ttype == "wavelength"
def test_wtbarr_row(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].row == 1
def test_wtbarr_ndim(tab_wcs_2di):
assert tab_wcs_2di.wcs.wtb[0].ndim == 3
def test_wtbarr_print(tab_wcs_2di, capfd):
tab_wcs_2di.wcs.wtb[0].print_contents()
captured = capfd.readouterr()
s = str(tab_wcs_2di.wcs.wtb[0])
lines = s.split("\n")
assert captured.out == s
assert " i: 1" == lines[0]
assert " m: 1" == lines[1]
assert " kind: c" == lines[2]
assert "extnam: WCS-TABLE" == lines[3]
assert "extver: 1" == lines[4]
assert "extlev: 1" == lines[5]
assert " ttype: wavelength" == lines[6]
assert " row: 1" == lines[7]
assert " ndim: 3" == lines[8]
assert lines[9].startswith("dimlen: ")
assert " 0: 4" == lines[10]
assert " 1: 2" == lines[11]
assert lines[12].startswith("arrayp: ")
|
e4514c2b8be02aeb8fb10a9dd1fa6c4a7c68ccec86b3190a8ab5dd7250fb4ef0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from contextlib import nullcontext
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from packaging.version import Version
from astropy import units as u
from astropy.coordinates import ITRS, EarthLocation, SkyCoord
from astropy.io import fits
from astropy.time import Time
from astropy.units import Quantity
from astropy.utils import unbroadcast
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.data import get_pkg_data_contents, get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.wcs import _wcs
from astropy.wcs.utils import (
_pixel_to_pixel_correlation_matrix,
_pixel_to_world_correlation_matrix,
_split_matrix,
add_stokes_axis_to_wcs,
celestial_frame_to_wcs,
custom_frame_to_wcs_mappings,
custom_wcs_to_frame_mappings,
fit_wcs_from_points,
is_proj_plane_distorted,
local_partial_pixel_derivatives,
non_celestial_pixel_scales,
obsgeo_to_frame,
pixel_to_pixel,
pixel_to_skycoord,
proj_plane_pixel_scales,
skycoord_to_pixel,
wcs_to_celestial_frame,
)
from astropy.wcs.wcs import (
WCS,
WCSSUB_LATITUDE,
WCSSUB_LONGITUDE,
FITSFixedWarning,
Sip,
)
from astropy.wcs.wcsapi.fitswcs import SlicedFITSWCS
def test_wcs_dropping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
dropped = wcs.dropaxis(0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = wcs.dropaxis(1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = wcs.dropaxis(2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = wcs.dropaxis(3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
dropped = wcs.dropaxis(0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = wcs.dropaxis(1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = wcs.dropaxis(2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = wcs.dropaxis(3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
def test_wcs_swapping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
swapped = wcs.swapaxes(0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs.swapaxes(0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs.swapaxes(2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
swapped = wcs.swapaxes(0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs.swapaxes(0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs.swapaxes(2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
@pytest.mark.parametrize("ndim", (2, 3))
def test_add_stokes(ndim):
wcs = WCS(naxis=ndim)
for ii in range(ndim + 1):
outwcs = add_stokes_axis_to_wcs(wcs, ii)
assert outwcs.wcs.naxis == ndim + 1
assert outwcs.wcs.ctype[ii] == "STOKES"
assert outwcs.wcs.cname[ii] == "STOKES"
def test_slice():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
mywcs._naxis = [1000, 500]
pscale = 0.1 # from cdelt
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])
assert np.all(slice_wcs.wcs.crpix == np.array([1, 0]))
assert slice_wcs._naxis == [1000, 499]
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.wcs_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval,
rtol=0.0,
atol=1e-6 * pscale,
)
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])
assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
assert slice_wcs._naxis == [250, 250]
slice_wcs = mywcs.slice([slice(None, None, 2), slice(0, None, 2)])
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.2]))
assert slice_wcs._naxis == [500, 250]
# Non-integral values do not alter the naxis attribute
with pytest.warns(AstropyUserWarning):
slice_wcs = mywcs.slice([slice(50.0), slice(20.0)])
assert slice_wcs._naxis == [1000, 500]
with pytest.warns(AstropyUserWarning):
slice_wcs = mywcs.slice([slice(50.0), slice(20)])
assert slice_wcs._naxis == [20, 500]
with pytest.warns(AstropyUserWarning):
slice_wcs = mywcs.slice([slice(50), slice(20.5)])
assert slice_wcs._naxis == [1000, 50]
def test_slice_with_sip():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
mywcs._naxis = [1000, 500]
mywcs.wcs.ctype = ["RA---TAN-SIP", "DEC--TAN-SIP"]
a = np.array(
[
[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0],
]
)
b = np.array(
[
[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0],
]
)
mywcs.sip = Sip(a, b, None, None, mywcs.wcs.crpix)
mywcs.wcs.set()
pscale = 0.1 # from cdelt
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.all_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval,
rtol=0.0,
atol=1e-6 * pscale,
)
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.all_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval,
rtol=0.0,
atol=1e-6 * pscale,
)
def test_slice_getitem():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
slice_wcs = mywcs[1::2, 0::4]
assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
mywcs.wcs.crpix = [2, 2]
slice_wcs = mywcs[1::2, 0::4]
assert np.all(slice_wcs.wcs.crpix == np.array([0.875, 0.75]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
# Default: numpy order
slice_wcs = mywcs[1::2]
assert np.all(slice_wcs.wcs.crpix == np.array([2, 0.75]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.1, 0.2]))
def test_slice_fitsorder():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0, 1]))
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 0.625]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.4]))
slice_wcs = mywcs.slice([slice(1, None, 2)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 1]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.1]))
def test_slice_wcs():
mywcs = WCS(naxis=2)
sub = mywcs[0]
assert isinstance(sub, SlicedFITSWCS)
with pytest.raises(IndexError, match="Slicing WCS with a step is not supported."):
mywcs[0, ::2]
def test_axis_names():
mywcs = WCS(naxis=4)
mywcs.wcs.ctype = ["RA---TAN", "DEC--TAN", "VOPT-LSR", "STOKES"]
assert mywcs.axis_type_names == ["RA", "DEC", "VOPT", "STOKES"]
mywcs.wcs.cname = ["RA", "DEC", "VOPT", "STOKES"]
assert mywcs.axis_type_names == ["RA", "DEC", "VOPT", "STOKES"]
def test_celestial():
mywcs = WCS(naxis=4)
mywcs.wcs.ctype = ["RA---TAN", "DEC--TAN", "VOPT", "STOKES"]
cel = mywcs.celestial
assert tuple(cel.wcs.ctype) == ("RA---TAN", "DEC--TAN")
assert cel.axis_type_names == ["RA", "DEC"]
def test_wcs_to_celestial_frame():
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates.builtin_frames import FK4, FK5, ICRS, ITRS, Galactic
mywcs = WCS(naxis=2)
mywcs.wcs.set()
with pytest.raises(
ValueError,
match=(
"Could not determine celestial frame "
"corresponding to the specified WCS object"
),
):
assert wcs_to_celestial_frame(mywcs) is None
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ["XOFFSET", "YOFFSET"]
mywcs.wcs.set()
with pytest.raises(ValueError):
assert wcs_to_celestial_frame(mywcs) is None
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
mywcs.wcs.equinox = 1987.0
mywcs.wcs.set()
print(mywcs.to_header())
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, FK5)
assert frame.equinox == Time(1987.0, format="jyear")
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
mywcs.wcs.equinox = 1982
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, FK4)
assert frame.equinox == Time(1982.0, format="byear")
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ["GLON-SIN", "GLAT-SIN"]
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, Galactic)
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ["TLON-CAR", "TLAT-CAR"]
mywcs.wcs.dateobs = "2017-08-17T12:41:04.430"
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ITRS)
assert frame.obstime == Time("2017-08-17T12:41:04.430")
for equinox in [np.nan, 1987, 1982]:
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
mywcs.wcs.radesys = "ICRS"
mywcs.wcs.equinox = equinox
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
# Flipped order
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ["DEC--TAN", "RA---TAN"]
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
# More than two dimensions
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = ["DEC--TAN", "VELOCITY", "RA---TAN"]
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = ["GLAT-CAR", "VELOCITY", "GLON-CAR"]
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, Galactic)
def test_wcs_to_celestial_frame_correlated():
# Regression test for a bug that caused wcs_to_celestial_frame to fail when
# the celestial axes were correlated with other axes.
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates.builtin_frames import ICRS
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = "RA---TAN", "DEC--TAN", "FREQ"
mywcs.wcs.cd = np.ones((3, 3))
mywcs.wcs.set()
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
def test_wcs_to_celestial_frame_extend():
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ["XOFFSET", "YOFFSET"]
mywcs.wcs.set()
with pytest.raises(ValueError):
wcs_to_celestial_frame(mywcs)
class OffsetFrame:
pass
def identify_offset(wcs):
if wcs.wcs.ctype[0].endswith("OFFSET") and wcs.wcs.ctype[1].endswith("OFFSET"):
return OffsetFrame()
with custom_wcs_to_frame_mappings(identify_offset):
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, OffsetFrame)
# Check that things are back to normal after the context manager
with pytest.raises(ValueError):
wcs_to_celestial_frame(mywcs)
def test_celestial_frame_to_wcs():
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import (
FK4,
FK5,
ICRS,
ITRS,
BaseCoordinateFrame,
FK4NoETerms,
Galactic,
)
class FakeFrame(BaseCoordinateFrame):
pass
frame = FakeFrame()
with pytest.raises(
ValueError,
match=(
r"Could not determine WCS corresponding to the specified coordinate frame."
),
):
celestial_frame_to_wcs(frame)
frame = ICRS()
mywcs = celestial_frame_to_wcs(frame)
mywcs.wcs.set()
assert tuple(mywcs.wcs.ctype) == ("RA---TAN", "DEC--TAN")
assert mywcs.wcs.radesys == "ICRS"
assert np.isnan(mywcs.wcs.equinox)
assert mywcs.wcs.lonpole == 180
assert mywcs.wcs.latpole == 0
frame = FK5(equinox="J1987")
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ("RA---TAN", "DEC--TAN")
assert mywcs.wcs.radesys == "FK5"
assert mywcs.wcs.equinox == 1987.0
frame = FK4(equinox="B1982")
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ("RA---TAN", "DEC--TAN")
assert mywcs.wcs.radesys == "FK4"
assert mywcs.wcs.equinox == 1982.0
frame = FK4NoETerms(equinox="B1982")
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ("RA---TAN", "DEC--TAN")
assert mywcs.wcs.radesys == "FK4-NO-E"
assert mywcs.wcs.equinox == 1982.0
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ("GLON-TAN", "GLAT-TAN")
assert mywcs.wcs.radesys == ""
assert np.isnan(mywcs.wcs.equinox)
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame, projection="CAR")
assert tuple(mywcs.wcs.ctype) == ("GLON-CAR", "GLAT-CAR")
assert mywcs.wcs.radesys == ""
assert np.isnan(mywcs.wcs.equinox)
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame, projection="CAR")
mywcs.wcs.crval = [100, -30]
mywcs.wcs.set()
assert_allclose((mywcs.wcs.lonpole, mywcs.wcs.latpole), (180, 60))
frame = ITRS(obstime=Time("2017-08-17T12:41:04.43"))
mywcs = celestial_frame_to_wcs(frame, projection="CAR")
assert tuple(mywcs.wcs.ctype) == ("TLON-CAR", "TLAT-CAR")
assert mywcs.wcs.radesys == "ITRS"
assert mywcs.wcs.dateobs == "2017-08-17T12:41:04.430"
frame = ITRS()
mywcs = celestial_frame_to_wcs(frame, projection="CAR")
assert tuple(mywcs.wcs.ctype) == ("TLON-CAR", "TLAT-CAR")
assert mywcs.wcs.radesys == "ITRS"
assert mywcs.wcs.dateobs == Time("J2000").utc.fits
def test_celestial_frame_to_wcs_extend():
class OffsetFrame:
pass
frame = OffsetFrame()
with pytest.raises(ValueError):
celestial_frame_to_wcs(frame)
def identify_offset(frame, projection=None):
if isinstance(frame, OffsetFrame):
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["XOFFSET", "YOFFSET"]
return wcs
with custom_frame_to_wcs_mappings(identify_offset):
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ("XOFFSET", "YOFFSET")
# Check that things are back to normal after the context manager
with pytest.raises(ValueError):
celestial_frame_to_wcs(frame)
def test_pixscale_nodrop():
mywcs = WCS(naxis=2)
mywcs.wcs.cdelt = [0.1, 0.2]
mywcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
mywcs.wcs.cdelt = [-0.1, 0.2]
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
def test_pixscale_withdrop():
mywcs = WCS(naxis=3)
mywcs.wcs.cdelt = [0.1, 0.2, 1]
mywcs.wcs.ctype = ["RA---TAN", "DEC--TAN", "VOPT"]
assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))
mywcs.wcs.cdelt = [-0.1, 0.2, 1]
assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))
def test_pixscale_cd():
mywcs = WCS(naxis=2)
mywcs.wcs.cd = [[-0.1, 0], [0, 0.2]]
mywcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
@pytest.mark.parametrize("angle", (30, 45, 60, 75))
def test_pixscale_cd_rotated(angle):
mywcs = WCS(naxis=2)
rho = np.radians(angle)
scale = 0.1
mywcs.wcs.cd = [
[scale * np.cos(rho), -scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)],
]
mywcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))
@pytest.mark.parametrize("angle", (30, 45, 60, 75))
def test_pixscale_pc_rotated(angle):
mywcs = WCS(naxis=2)
rho = np.radians(angle)
scale = 0.1
mywcs.wcs.cdelt = [-scale, scale]
mywcs.wcs.pc = [[np.cos(rho), -np.sin(rho)], [np.sin(rho), np.cos(rho)]]
mywcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))
@pytest.mark.parametrize(
("cdelt", "pc", "pccd"),
(
([0.1, 0.2], np.eye(2), np.diag([0.1, 0.2])),
([0.1, 0.2, 0.3], np.eye(3), np.diag([0.1, 0.2, 0.3])),
([1, 1, 1], np.diag([0.1, 0.2, 0.3]), np.diag([0.1, 0.2, 0.3])),
),
)
def test_pixel_scale_matrix(cdelt, pc, pccd):
mywcs = WCS(naxis=(len(cdelt)))
mywcs.wcs.cdelt = cdelt
mywcs.wcs.pc = pc
assert_almost_equal(mywcs.pixel_scale_matrix, pccd)
@pytest.mark.parametrize(
("ctype", "cel"),
(
(["RA---TAN", "DEC--TAN"], True),
(["RA---TAN", "DEC--TAN", "FREQ"], False),
(["RA---TAN", "FREQ"], False),
),
)
def test_is_celestial(ctype, cel):
mywcs = WCS(naxis=len(ctype))
mywcs.wcs.ctype = ctype
assert mywcs.is_celestial == cel
@pytest.mark.parametrize(
("ctype", "cel"),
(
(["RA---TAN", "DEC--TAN"], True),
(["RA---TAN", "DEC--TAN", "FREQ"], True),
(["RA---TAN", "FREQ"], False),
),
)
def test_has_celestial(ctype, cel):
mywcs = WCS(naxis=len(ctype))
mywcs.wcs.ctype = ctype
assert mywcs.has_celestial == cel
def test_has_celestial_correlated():
# Regression test for astropy/astropy#8416 - has_celestial failed when
# celestial axes were correlated with other axes.
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = "RA---TAN", "DEC--TAN", "FREQ"
mywcs.wcs.cd = np.ones((3, 3))
mywcs.wcs.set()
assert mywcs.has_celestial
@pytest.mark.parametrize(
("cdelt", "pc", "cd", "check_warning"),
(
(np.array([0.1, 0.2]), np.eye(2), np.eye(2), True),
(np.array([1, 1]), np.diag([0.1, 0.2]), np.eye(2), True),
(np.array([0.1, 0.2]), np.eye(2), None, False),
(np.array([0.1, 0.2]), None, np.eye(2), True),
),
)
def test_noncelestial_scale(cdelt, pc, cd, check_warning):
mywcs = WCS(naxis=2)
if cd is not None:
mywcs.wcs.cd = cd
if pc is not None:
mywcs.wcs.pc = pc
# TODO: Some inputs emit RuntimeWarning from here onwards.
# Fix the test data. See @nden's comment in PR 9010.
if check_warning:
ctx = pytest.warns()
else:
ctx = nullcontext()
with ctx as warning_lines:
mywcs.wcs.cdelt = cdelt
if check_warning:
for w in warning_lines:
assert issubclass(w.category, RuntimeWarning)
assert "cdelt will be ignored since cd is present" in str(w.message)
mywcs.wcs.ctype = ["RA---TAN", "FREQ"]
ps = non_celestial_pixel_scales(mywcs)
assert_almost_equal(ps.to_value(u.deg), np.array([0.1, 0.2]))
@pytest.mark.parametrize("mode", ["all", "wcs"])
def test_skycoord_to_pixel(mode):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord
header = get_pkg_data_contents("data/maps/1904-66_TAN.hdr", encoding="binary")
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89.0 * u.deg, frame="icrs")
xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to("icrs")
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# Make sure you can specify a different class using ``cls`` keyword
class SkyCoord2(SkyCoord):
pass
new2 = pixel_to_skycoord(xp, yp, wcs, mode=mode, cls=SkyCoord2).transform_to("icrs")
assert new2.__class__ is SkyCoord2
assert_allclose(new2.ra.degree, ref.ra.degree)
assert_allclose(new2.dec.degree, ref.dec.degree)
def test_skycoord_to_pixel_swapped():
# Regression test for a bug that caused skycoord_to_pixel and
# pixel_to_skycoord to not work correctly if the axes were swapped in the
# WCS.
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord
header = get_pkg_data_contents("data/maps/1904-66_TAN.hdr", encoding="binary")
wcs = WCS(header)
wcs_swapped = wcs.sub([WCSSUB_LATITUDE, WCSSUB_LONGITUDE])
ref = SkyCoord(0.1 * u.deg, -89.0 * u.deg, frame="icrs")
xp1, yp1 = skycoord_to_pixel(ref, wcs)
xp2, yp2 = skycoord_to_pixel(ref, wcs_swapped)
assert_allclose(xp1, xp2)
assert_allclose(yp1, yp2)
# WCS is in FK5 so we need to transform back to ICRS
new1 = pixel_to_skycoord(xp1, yp1, wcs).transform_to("icrs")
new2 = pixel_to_skycoord(xp1, yp1, wcs_swapped).transform_to("icrs")
assert_allclose(new1.ra.degree, new2.ra.degree)
assert_allclose(new1.dec.degree, new2.dec.degree)
def test_is_proj_plane_distorted():
# non-orthogonal CD:
wcs = WCS(naxis=2)
wcs.wcs.cd = [[-0.1, 0], [0, 0.2]]
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
assert is_proj_plane_distorted(wcs)
# almost orthogonal CD:
wcs.wcs.cd = [[0.1 + 2.0e-7, 1.7e-7], [1.2e-7, 0.1 - 1.3e-7]]
assert not is_proj_plane_distorted(wcs)
# real case:
header = get_pkg_data_filename("data/sip.fits")
with pytest.warns(FITSFixedWarning):
wcs = WCS(header)
assert is_proj_plane_distorted(wcs)
@pytest.mark.parametrize("mode", ["all", "wcs"])
def test_skycoord_to_pixel_distortions(mode):
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord
header = get_pkg_data_filename("data/sip.fits")
with pytest.warns(FITSFixedWarning):
wcs = WCS(header)
ref = SkyCoord(202.50 * u.deg, 47.19 * u.deg, frame="icrs")
xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to("icrs")
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
@pytest.fixture
def spatial_wcs_2d_small_angle():
"""
This WCS has an almost linear correlation between the pixel and world axes
close to the reference pixel.
"""
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HPLN-TAN", "HPLT-TAN"]
wcs.wcs.crpix = [3.0] * 2
wcs.wcs.cdelt = [0.002] * 2
wcs.wcs.crval = [0] * 2
wcs.wcs.set()
return wcs
def test_local_pixel_derivatives(spatial_wcs_2d_small_angle):
not_diag = np.logical_not(np.diag([1, 1]))
# At (or close to) the reference pixel this should equal the cdelt
derivs = local_partial_pixel_derivatives(spatial_wcs_2d_small_angle, 3, 3)
np.testing.assert_allclose(np.diag(derivs), spatial_wcs_2d_small_angle.wcs.cdelt)
np.testing.assert_allclose(derivs[not_diag].flat, [0, 0], atol=1e-10)
# Far away from the reference pixel this should not equal the cdelt
derivs = local_partial_pixel_derivatives(spatial_wcs_2d_small_angle, 3e4, 3e4)
assert not np.allclose(np.diag(derivs), spatial_wcs_2d_small_angle.wcs.cdelt)
# At (or close to) the reference pixel this should equal the cdelt
derivs = local_partial_pixel_derivatives(
spatial_wcs_2d_small_angle, 3, 3, normalize_by_world=True
)
np.testing.assert_allclose(np.diag(derivs), [1, 1])
np.testing.assert_allclose(derivs[not_diag].flat, [0, 0], atol=1e-8)
def test_pixel_to_world_correlation_matrix_celestial():
wcs = WCS(naxis=2)
wcs.wcs.ctype = "RA---TAN", "DEC--TAN"
wcs.wcs.set()
assert_equal(wcs.axis_correlation_matrix, [[1, 1], [1, 1]])
matrix, classes = _pixel_to_world_correlation_matrix(wcs)
assert_equal(matrix, [[1, 1]])
assert classes == [SkyCoord]
def test_pixel_to_world_correlation_matrix_spectral_cube_uncorrelated():
wcs = WCS(naxis=3)
wcs.wcs.ctype = "RA---TAN", "FREQ", "DEC--TAN"
wcs.wcs.set()
assert_equal(wcs.axis_correlation_matrix, [[1, 0, 1], [0, 1, 0], [1, 0, 1]])
matrix, classes = _pixel_to_world_correlation_matrix(wcs)
assert_equal(matrix, [[1, 0, 1], [0, 1, 0]])
assert classes == [SkyCoord, Quantity]
def test_pixel_to_world_correlation_matrix_spectral_cube_correlated():
wcs = WCS(naxis=3)
wcs.wcs.ctype = "RA---TAN", "FREQ", "DEC--TAN"
wcs.wcs.cd = np.ones((3, 3))
wcs.wcs.set()
assert_equal(wcs.axis_correlation_matrix, [[1, 1, 1], [1, 1, 1], [1, 1, 1]])
matrix, classes = _pixel_to_world_correlation_matrix(wcs)
assert_equal(matrix, [[1, 1, 1], [1, 1, 1]])
assert classes == [SkyCoord, Quantity]
def test_pixel_to_pixel_correlation_matrix_celestial():
wcs_in = WCS(naxis=2)
wcs_in.wcs.ctype = "RA---TAN", "DEC--TAN"
wcs_in.wcs.set()
wcs_out = WCS(naxis=2)
wcs_out.wcs.ctype = "DEC--TAN", "RA---TAN"
wcs_out.wcs.set()
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
assert_equal(matrix, [[1, 1], [1, 1]])
def test_pixel_to_pixel_correlation_matrix_spectral_cube_uncorrelated():
wcs_in = WCS(naxis=3)
wcs_in.wcs.ctype = "RA---TAN", "DEC--TAN", "FREQ"
wcs_in.wcs.set()
wcs_out = WCS(naxis=3)
wcs_out.wcs.ctype = "DEC--TAN", "FREQ", "RA---TAN"
wcs_out.wcs.set()
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
assert_equal(matrix, [[1, 1, 0], [0, 0, 1], [1, 1, 0]])
def test_pixel_to_pixel_correlation_matrix_spectral_cube_correlated():
# NOTE: only make one of the WCSes have correlated axes to really test this
wcs_in = WCS(naxis=3)
wcs_in.wcs.ctype = "RA---TAN", "DEC--TAN", "FREQ"
wcs_in.wcs.set()
wcs_out = WCS(naxis=3)
wcs_out.wcs.ctype = "DEC--TAN", "FREQ", "RA---TAN"
wcs_out.wcs.cd = np.ones((3, 3))
wcs_out.wcs.set()
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
assert_equal(matrix, [[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_pixel_to_pixel_correlation_matrix_mismatch():
wcs_in = WCS(naxis=2)
wcs_in.wcs.ctype = "RA---TAN", "DEC--TAN"
wcs_in.wcs.set()
wcs_out = WCS(naxis=3)
wcs_out.wcs.ctype = "DEC--TAN", "FREQ", "RA---TAN"
wcs_out.wcs.set()
with pytest.raises(
ValueError, match=r"The two WCS return a different number of world coordinates"
):
_pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
wcs3 = WCS(naxis=2)
wcs3.wcs.ctype = "FREQ", "PIXEL"
wcs3.wcs.set()
with pytest.raises(
ValueError, match=r"The world coordinate types of the two WCS do not match"
):
_pixel_to_pixel_correlation_matrix(wcs_out, wcs3)
wcs4 = WCS(naxis=4)
wcs4.wcs.ctype = "RA---TAN", "DEC--TAN", "Q1", "Q2"
wcs4.wcs.cunit = ["deg", "deg", "m/s", "m/s"]
wcs4.wcs.set()
wcs5 = WCS(naxis=4)
wcs5.wcs.ctype = "Q1", "RA---TAN", "DEC--TAN", "Q2"
wcs5.wcs.cunit = ["m/s", "deg", "deg", "m/s"]
wcs5.wcs.set()
with pytest.raises(
ValueError,
match=(
"World coordinate order doesn't match and automatic matching is ambiguous"
),
):
_pixel_to_pixel_correlation_matrix(wcs4, wcs5)
def test_pixel_to_pixel_correlation_matrix_nonsquare():
# Here we set up an input WCS that maps 3 pixel coordinates to 4 world
# coordinates - the idea is to make sure that things work fine in cases
# where the number of input and output pixel coordinates do not match.
class FakeWCS:
pass
wcs_in = FakeWCS()
wcs_in.low_level_wcs = wcs_in
wcs_in.pixel_n_dim = 3
wcs_in.world_n_dim = 4
wcs_in.axis_correlation_matrix = [
[True, True, False],
[True, True, False],
[True, True, False],
[False, False, True],
]
wcs_in.world_axis_object_components = [
("spat", "ra", "ra.degree"),
("spat", "dec", "dec.degree"),
("spec", 0, "value"),
("time", 0, "utc.value"),
]
wcs_in.world_axis_object_classes = {
"spat": ("astropy.coordinates.SkyCoord", (), {"frame": "icrs"}),
"spec": ("astropy.units.Wavelength", (None,), {}),
"time": ("astropy.time.Time", (None,), {"format": "mjd", "scale": "utc"}),
}
wcs_out = FakeWCS()
wcs_out.low_level_wcs = wcs_out
wcs_out.pixel_n_dim = 4
wcs_out.world_n_dim = 4
wcs_out.axis_correlation_matrix = [
[True, False, False, False],
[False, True, True, False],
[False, True, True, False],
[False, False, False, True],
]
wcs_out.world_axis_object_components = [
("spec", 0, "value"),
("spat", "ra", "ra.degree"),
("spat", "dec", "dec.degree"),
("time", 0, "utc.value"),
]
wcs_out.world_axis_object_classes = wcs_in.world_axis_object_classes
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
matrix = matrix.astype(int)
# The shape should be (n_pixel_out, n_pixel_in)
assert matrix.shape == (4, 3)
expected = np.array([[1, 1, 0], [1, 1, 0], [1, 1, 0], [0, 0, 1]])
assert_equal(matrix, expected)
def test_split_matrix():
assert _split_matrix(np.array([[1]])) == [([0], [0])]
assert _split_matrix(
np.array(
[
[1, 1],
[1, 1],
]
)
) == [([0, 1], [0, 1])]
assert _split_matrix(
np.array(
[
[1, 1, 0],
[1, 1, 0],
[0, 0, 1],
]
)
) == [([0, 1], [0, 1]), ([2], [2])]
assert _split_matrix(
np.array(
[
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
]
)
) == [([0], [1]), ([1], [0]), ([2], [2])]
assert _split_matrix(
np.array(
[
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
]
)
) == [([0, 1, 2], [0, 1, 2])]
def test_pixel_to_pixel():
wcs_in = WCS(naxis=3)
wcs_in.wcs.ctype = "DEC--TAN", "FREQ", "RA---TAN"
wcs_in.wcs.set()
wcs_out = WCS(naxis=3)
wcs_out.wcs.ctype = "GLON-CAR", "GLAT-CAR", "FREQ"
wcs_out.wcs.set()
# First try with scalars
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
x, y, z = pixel_to_pixel(wcs_in, wcs_out, 1, 2, 3)
assert x.shape == ()
assert y.shape == ()
assert z.shape == ()
# Now try with broadcasted arrays
x = np.linspace(10, 20, 10)
y = np.linspace(10, 20, 20)
z = np.linspace(10, 20, 30)
Z1, Y1, X1 = np.meshgrid(z, y, x, indexing="ij", copy=False)
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
X2, Y2, Z2 = pixel_to_pixel(wcs_in, wcs_out, X1, Y1, Z1)
# The final arrays should have the correct shape
assert X2.shape == (30, 20, 10)
assert Y2.shape == (30, 20, 10)
assert Z2.shape == (30, 20, 10)
# But behind the scenes should also be broadcasted
assert unbroadcast(X2).shape == (30, 1, 10)
assert unbroadcast(Y2).shape == (30, 1, 10)
assert unbroadcast(Z2).shape == (20, 1)
# We can put the values back through the function to ensure round-tripping
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
X3, Y3, Z3 = pixel_to_pixel(wcs_out, wcs_in, X2, Y2, Z2)
# The final arrays should have the correct shape
assert X2.shape == (30, 20, 10)
assert Y2.shape == (30, 20, 10)
assert Z2.shape == (30, 20, 10)
# But behind the scenes should also be broadcasted
assert unbroadcast(X3).shape == (30, 1, 10)
assert unbroadcast(Y3).shape == (20, 1)
assert unbroadcast(Z3).shape == (30, 1, 10)
# And these arrays should match the input
assert_allclose(X1, X3)
assert_allclose(Y1, Y3)
assert_allclose(Z1, Z3)
def test_pixel_to_pixel_correlated():
wcs_in = WCS(naxis=2)
wcs_in.wcs.ctype = "DEC--TAN", "RA---TAN"
wcs_in.wcs.set()
wcs_out = WCS(naxis=2)
wcs_out.wcs.ctype = "GLON-CAR", "GLAT-CAR"
wcs_out.wcs.set()
# First try with scalars
x, y = pixel_to_pixel(wcs_in, wcs_out, 1, 2)
assert x.shape == ()
assert y.shape == ()
# Now try with broadcasted arrays
x = np.linspace(10, 20, 10)
y = np.linspace(10, 20, 20)
Y1, X1 = np.meshgrid(y, x, indexing="ij", copy=False)
Y2, X2 = pixel_to_pixel(wcs_in, wcs_out, X1, Y1)
# The final arrays should have the correct shape
assert X2.shape == (20, 10)
assert Y2.shape == (20, 10)
# and there are no efficiency gains here since the celestial axes are correlated
assert unbroadcast(X2).shape == (20, 10)
def test_pixel_to_pixel_1d():
# Simple test to make sure that when WCS only returns one world coordinate
# this still works correctly (since this requires special treatment behind
# the scenes).
wcs_in = WCS(naxis=1)
wcs_in.wcs.ctype = ("COORD1",)
wcs_in.wcs.cunit = ("nm",)
wcs_in.wcs.set()
wcs_out = WCS(naxis=1)
wcs_out.wcs.ctype = ("COORD2",)
wcs_out.wcs.cunit = ("cm",)
wcs_out.wcs.set()
# First try with a scalar
x = pixel_to_pixel(wcs_in, wcs_out, 1)
assert x.shape == ()
# Next with a regular array
x = np.linspace(10, 20, 10)
x = pixel_to_pixel(wcs_in, wcs_out, x)
assert x.shape == (10,)
# And now try with a broadcasted array
x = np.broadcast_to(np.linspace(10, 20, 10), (4, 10))
x = pixel_to_pixel(wcs_in, wcs_out, x)
assert x.shape == (4, 10)
# The broadcasting of the input should be retained
assert unbroadcast(x).shape == (10,)
header_str_linear = """
XTENSION= 'IMAGE ' / Image extension
BITPIX = -32 / array data type
NAXIS = 2 / number of array dimensions
NAXIS1 = 50
NAXIS2 = 50
PCOUNT = 0 / number of parameters
GCOUNT = 1 / number of groups
RADESYS = 'ICRS '
EQUINOX = 2000.0
WCSAXES = 2
CTYPE1 = 'RA---TAN'
CTYPE2 = 'DEC--TAN'
CRVAL1 = 250.3497414839765
CRVAL2 = 2.280925599609063
CRPIX1 = 1045.0
CRPIX2 = 1001.0
CD1_1 = -0.005564478186178
CD1_2 = -0.001042099258152
CD2_1 = 0.00118144146585
CD2_2 = -0.005590816683583
"""
header_str_sip = """
XTENSION= 'IMAGE ' / Image extension
BITPIX = -32 / array data type
NAXIS = 2 / number of array dimensions
NAXIS1 = 50
NAXIS2 = 50
PCOUNT = 0 / number of parameters
GCOUNT = 1 / number of groups
RADESYS = 'ICRS '
EQUINOX = 2000.0
WCSAXES = 2
CTYPE1 = 'RA---TAN-SIP'
CTYPE2 = 'DEC--TAN-SIP'
CRVAL1 = 250.3497414839765
CRVAL2 = 2.280925599609063
CRPIX1 = 1045.0
CRPIX2 = 1001.0
CD1_1 = -0.005564478186178
CD1_2 = -0.001042099258152
CD2_1 = 0.00118144146585
CD2_2 = -0.005590816683583
A_ORDER = 2
B_ORDER = 2
A_2_0 = 2.02451189234E-05
A_0_2 = 3.317603337918E-06
A_1_1 = 1.73456334971071E-05
B_2_0 = 3.331330003472E-06
B_0_2 = 2.04247482482589E-05
B_1_1 = 1.71476710804143E-05
AP_ORDER= 2
BP_ORDER= 2
AP_1_0 = 0.000904700296389636
AP_0_1 = 0.000627660715584716
AP_2_0 = -2.023482905861E-05
AP_0_2 = -3.332285841011E-06
AP_1_1 = -1.731636633824E-05
BP_1_0 = 0.000627960882053211
BP_0_1 = 0.000911222886084808
BP_2_0 = -3.343918167224E-06
BP_0_2 = -2.041598249021E-05
BP_1_1 = -1.711876336719E-05
A_DMAX = 44.72893589844534
B_DMAX = 44.62692873032506
"""
header_str_prob = """
NAXIS = 2 / number of array dimensions
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1024.5 / Pixel coordinate of reference point
CRPIX2 = 1024.5 / Pixel coordinate of reference point
CD1_1 = -1.7445934400771E-05 / Coordinate transformation matrix element
CD1_2 = -4.9826985362578E-08 / Coordinate transformation matrix element
CD2_1 = -5.0068838822312E-08 / Coordinate transformation matrix element
CD2_2 = 1.7530614610951E-05 / Coordinate transformation matrix element
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 5.8689341666667 / [deg] Coordinate value at reference point
CRVAL2 = -71.995508583333 / [deg] Coordinate value at reference point
"""
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize(
"header_str,crval,sip_degree,user_proj_point,exp_max_dist,exp_std_dist",
[
# simple testset no distortions
(
header_str_linear,
250.3497414839765,
None,
False,
7e-5 * u.deg,
2.5e-5 * u.deg,
),
# simple testset with distortions
(header_str_sip, 250.3497414839765, 2, False, 7e-6 * u.deg, 2.5e-6 * u.deg),
# testset with problematic WCS header that failed before
(header_str_prob, 5.8689341666667, None, False, 7e-6 * u.deg, 2.5e-6 * u.deg),
# simple testset no distortions, user defined center
(
header_str_linear,
250.3497414839765,
None,
True,
7e-5 * u.deg,
2.5e-5 * u.deg,
),
# 360->0 degree crossover, simple testset no distortions
(
header_str_linear,
352.3497414839765,
None,
False,
7e-5 * u.deg,
2.5e-5 * u.deg,
),
# 360->0 degree crossover, simple testset with distortions
(header_str_sip, 352.3497414839765, 2, False, 7e-6 * u.deg, 2.5e-6 * u.deg),
# 360->0 degree crossover, testset with problematic WCS header that failed before
(header_str_prob, 352.3497414839765, None, False, 7e-6 * u.deg, 2.5e-6 * u.deg),
# 360->0 degree crossover, simple testset no distortions, user defined center
(
header_str_linear,
352.3497414839765,
None,
True,
7e-5 * u.deg,
2.5e-5 * u.deg,
),
],
)
def test_fit_wcs_from_points(
header_str, crval, sip_degree, user_proj_point, exp_max_dist, exp_std_dist
):
header = fits.Header.fromstring(header_str, sep="\n")
header["CRVAL1"] = crval
true_wcs = WCS(header, relax=True)
# Getting the pixel coordinates
x, y = np.meshgrid(list(range(10)), list(range(10)))
x = x.flatten()
y = y.flatten()
# Calculating the true sky positions
world_pix = true_wcs.pixel_to_world(x, y)
# which projection point to use
if user_proj_point:
proj_point = world_pix[0]
projlon = proj_point.data.lon.deg
projlat = proj_point.data.lat.deg
else:
proj_point = "center"
# Fitting the wcs
fit_wcs = fit_wcs_from_points(
(x, y), world_pix, proj_point=proj_point, sip_degree=sip_degree
)
# Validate that the true sky coordinates
# match sky coordinates calculated from the wcs fit
world_pix_new = fit_wcs.pixel_to_world(x, y)
dists = world_pix.separation(world_pix_new)
assert dists.max() < exp_max_dist
assert np.std(dists) < exp_std_dist
if user_proj_point:
assert (fit_wcs.wcs.crval == [projlon, projlat]).all()
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_fit_wcs_from_points_CRPIX_bounds():
# Test CRPIX bounds requirement
wcs_str = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = 0.00056205870415378 / Coordinate transformation matrix element
PC1_2 = -0.00569181083243 / Coordinate transformation matrix element
PC2_1 = 0.0056776810932466 / Coordinate transformation matrix element
PC2_2 = 0.0004208048403273 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 104.57797893504 / [deg] Coordinate value at reference point
CRVAL2 = -74.195502593322 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = -74.195502593322 / [deg] Native latitude of celestial pole
TIMESYS = 'TDB' / Time scale
TIMEUNIT= 'd' / Time units
DATEREF = '1858-11-17' / ISO-8601 fiducial time
MJDREFI = 0.0 / [d] MJD of fiducial time, integer part
MJDREFF = 0.0 / [d] MJD of fiducial time, fractional part
DATE-OBS= '2019-03-27T03:30:13.832Z' / ISO-8601 time of observation
MJD-OBS = 58569.145993426 / [d] MJD of observation
MJD-OBS = 58569.145993426 / [d] MJD at start of observation
TSTART = 1569.6467941661 / [d] Time elapsed since fiducial time at start
DATE-END= '2019-03-27T04:00:13.831Z' / ISO-8601 time at end of observation
MJD-END = 58569.166826748 / [d] MJD at end of observation
TSTOP = 1569.6676274905 / [d] Time elapsed since fiducial time at end
TELAPSE = 0.02083332443 / [d] Elapsed time (start to stop)
TIMEDEL = 0.020833333333333 / [d] Time resolution
TIMEPIXR= 0.5 / Reference position of timestamp in binned data
RADESYS = 'ICRS' / Equatorial coordinate system
"""
wcs_header = fits.Header.fromstring(wcs_str, sep="\n")
ffi_wcs = WCS(wcs_header)
yi, xi = (1000, 1000)
y, x = (10, 200)
center_coord = SkyCoord(
ffi_wcs.all_pix2world([[xi + x // 2, yi + y // 2]], 0), unit="deg"
)[0]
ypix, xpix = (arr.flatten() for arr in np.mgrid[xi : xi + x, yi : yi + y])
world_pix = SkyCoord(*ffi_wcs.all_pix2world(xpix, ypix, 0), unit="deg")
fit_wcs = fit_wcs_from_points((ypix, xpix), world_pix, proj_point="center")
assert (fit_wcs.wcs.crpix.astype(int) == [1100, 1005]).all()
assert fit_wcs.pixel_shape == (1199, 1009)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_issue10991():
# test issue #10991 (it just needs to run and set the user defined crval)
xy = np.array(
[
[1766.88276168, 662.96432257, 171.50212526, 120.70924648],
[1706.69832901, 1788.85480559, 1216.98949653, 1307.41843381],
]
)
world_coords = SkyCoord(
[
(66.3542367, 22.20000162),
(67.15416174, 19.18042906),
(65.73375432, 17.54251555),
(66.02400512, 17.44413253),
],
frame="icrs",
unit="deg",
)
proj_point = SkyCoord(64.67514918, 19.63389538, frame="icrs", unit="deg")
fit_wcs = fit_wcs_from_points(
xy=xy, world_coords=world_coords, proj_point=proj_point, projection="TAN"
)
projlon = proj_point.data.lon.deg
projlat = proj_point.data.lat.deg
assert (fit_wcs.wcs.crval == [projlon, projlat]).all()
@pytest.mark.remote_data
@pytest.mark.parametrize("x_in,y_in", [[0, 0], [np.arange(5), np.arange(5)]])
def test_pixel_to_world_itrs(x_in, y_in):
"""Regression test for https://github.com/astropy/astropy/pull/9609"""
if Version(_wcs.__version__) >= Version("7.4"):
ctx = pytest.warns(
FITSFixedWarning,
match=(
r"'datfix' made the change 'Set MJD-OBS to 57982\.528524 from"
r" DATE-OBS'\."
),
)
else:
ctx = nullcontext()
with ctx:
wcs = WCS(
{
"NAXIS": 2,
"CTYPE1": "TLON-CAR",
"CTYPE2": "TLAT-CAR",
"RADESYS": "ITRS ",
"DATE-OBS": "2017-08-17T12:41:04.444",
}
)
# This shouldn't raise an exception.
coord = wcs.pixel_to_world(x_in, y_in)
# Check round trip transformation.
x, y = wcs.world_to_pixel(coord)
np.testing.assert_almost_equal(x, x_in)
np.testing.assert_almost_equal(y, y_in)
@pytest.fixture
def dkist_location():
return EarthLocation(
*(-5466045.25695494, -2404388.73741278, 2242133.88769004) * u.m
)
def test_obsgeo_cartesian(dkist_location):
obstime = Time("2021-05-21T03:00:00")
wcs = WCS(naxis=2)
wcs.wcs.obsgeo = list(dkist_location.to_value(u.m).tolist()) + [0, 0, 0]
wcs.wcs.dateobs = obstime.isot
frame = obsgeo_to_frame(wcs.wcs.obsgeo, obstime)
assert isinstance(frame, ITRS)
assert frame.x == dkist_location.x
assert frame.y == dkist_location.y
assert frame.z == dkist_location.z
def test_obsgeo_spherical(dkist_location):
obstime = Time("2021-05-21T03:00:00")
dkist_location = dkist_location.get_itrs(obstime)
loc_sph = dkist_location.spherical
wcs = WCS(naxis=2)
wcs.wcs.obsgeo = [0, 0, 0] + [
loc_sph.lon.value,
loc_sph.lat.value,
loc_sph.distance.value,
]
wcs.wcs.dateobs = obstime.isot
frame = obsgeo_to_frame(wcs.wcs.obsgeo, obstime)
assert isinstance(frame, ITRS)
assert u.allclose(frame.x, dkist_location.x)
assert u.allclose(frame.y, dkist_location.y)
assert u.allclose(frame.z, dkist_location.z)
def test_obsgeo_infinite(dkist_location):
obstime = Time("2021-05-21T03:00:00")
dkist_location = dkist_location.get_itrs(obstime)
loc_sph = dkist_location.spherical
wcs = WCS(naxis=2)
wcs.wcs.obsgeo = [1, 1, np.nan] + [
loc_sph.lon.value,
loc_sph.lat.value,
loc_sph.distance.value,
]
wcs.wcs.dateobs = obstime.isot
wcs.wcs.set()
frame = obsgeo_to_frame(wcs.wcs.obsgeo, obstime)
assert isinstance(frame, ITRS)
assert u.allclose(frame.x, dkist_location.x)
assert u.allclose(frame.y, dkist_location.y)
assert u.allclose(frame.z, dkist_location.z)
@pytest.mark.parametrize("obsgeo", ([np.nan] * 6, None, [0] * 6, [54] * 5))
def test_obsgeo_invalid(obsgeo):
with pytest.raises(ValueError):
obsgeo_to_frame(obsgeo, None)
|
15b39a59ca2c21351c9713c4969b5b40531d9444ab3e08aa7ad22d25ae0aebb3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import copy, deepcopy
import numpy as np
import pytest
from astropy import wcs
def test_prjprm_init():
# test PyPrjprm_cnew
assert wcs.WCS().wcs.cel.prj
# test PyPrjprm_new
assert wcs.Prjprm()
with pytest.raises(wcs.InvalidPrjParametersError):
prj = wcs.Prjprm()
prj.set()
# test deletion does not crash
prj = wcs.Prjprm()
del prj
def test_prjprm_copy():
# shallow copy
prj = wcs.Prjprm()
prj2 = copy(prj)
prj3 = copy(prj2)
prj.pv = [0, 6, 8, 18, 3]
assert np.allclose(prj.pv, prj2.pv, atol=1e-12, rtol=0) and np.allclose(
prj.pv, prj3.pv, atol=1e-12, rtol=0
)
del prj, prj2, prj3
# deep copy
prj = wcs.Prjprm()
prj2 = deepcopy(prj)
prj.pv = [0, 6, 8, 18, 3]
assert not np.allclose(prj.pv, prj2.pv, atol=1e-12, rtol=0)
del prj, prj2
def test_prjprm_flag():
prj = wcs.Prjprm()
assert prj._flag == 0
def test_prjprm_code():
prj = wcs.Prjprm()
assert prj.code == " "
assert prj._flag == 0
prj.code = "TAN"
prj.set()
assert prj.code == "TAN"
assert prj._flag
prj.code = "TAN"
assert prj._flag
prj.code = None
assert prj.code == " "
assert prj._flag == 0
def test_prjprm_phi0():
prj = wcs.Prjprm()
assert prj.phi0 == None
assert prj._flag == 0
prj.code = "TAN"
prj.phi0 = 2.0
prj.set()
assert prj.phi0 == 0
prj.phi0 = 0.0
assert prj._flag
prj.phi0 = 2.0
assert prj._flag == 0
prj.phi0 = None
assert prj.phi0 == None
assert prj._flag == 0
def test_prjprm_theta0():
prj = wcs.Prjprm()
assert prj.theta0 == None
assert prj._flag == 0
prj.code = "TAN"
prj.phi0 = 2.0
prj.theta0 = 4.0
prj.set()
assert prj.theta0 == 4.0
prj.theta0 = 4.0
assert prj._flag
prj.theta0 = 8.0
assert prj._flag == 0
prj.theta0 = None
assert prj.theta0 == None
assert prj._flag == 0
def test_prjprm_pv():
prj = wcs.Prjprm()
assert prj.pv.size == wcs.PRJ_PVN
assert prj.theta0 == None
assert prj._flag == 0
prj.code = "ZPN"
prj.phi0 = 2.0
prj.theta0 = 4.0
pv = [float(i) if (i % 2) else i for i in range(wcs.PRJ_PVN)]
prj.pv = pv
prj.set()
assert prj.phi0 == 2.0
assert prj.theta0 == 4.0
assert np.allclose(prj.pv, pv, atol=1e-6, rtol=0)
# test the same with numpy.ndarray (flag not cleared for same values):
prj.pv = prj.pv
assert prj._flag != 0
prj.set()
assert np.allclose(prj.pv, pv, atol=1e-6, rtol=0)
# test the same but modify PV values to check that flag is cleared:
prj.pv = np.array(pv) + 2e-7
assert prj._flag == 0
prj.set()
assert np.allclose(prj.pv, pv, atol=1e-6, rtol=0)
# check that None in pv list leave value unchanged and values not set
# by the list are left unchanged:
prj.code = "SZP"
prj.pv = [0.0, 99.0, None]
assert np.allclose(prj.pv[:4], [0.0, 99.0, 2.0, 3.0], atol=1e-6, rtol=0)
# check that None resets PV to uninitialized (before prjset()) values:
prj.pv = None
assert prj.pv[0] == 0.0
assert np.all(np.isnan(prj.pv[1:4]))
assert np.allclose(prj.pv[5:], 0, atol=0, rtol=0)
# check we can set all PVs to nan:
nan_pvs = wcs.PRJ_PVN * [np.nan]
prj.code = "TAN"
prj.pv = nan_pvs # set using a list
prj.set()
assert np.all(np.isnan(prj.pv))
prj.pv = np.array(nan_pvs) # set using a numpy.ndarray
prj.set()
assert np.all(np.isnan(prj.pv))
def test_prjprm_pvrange():
prj = wcs.Prjprm()
prj.code = "ZPN"
prj.phi0 = 2.0
prj.theta0 = 4.0
prj.pv = [0.0, 1.0, 2.0, 3.0]
prj.set()
assert prj.pvrange == wcs.PRJ_PVN
prj.code = "SZP"
prj.set()
assert prj.pvrange == 103
def test_prjprm_bounds(prj_TAB):
assert prj_TAB.bounds == 7
prj_TAB.bounds = 0
assert prj_TAB.bounds == 0
def test_prjprm_category(prj_TAB):
assert prj_TAB.category == wcs.PRJ_ZENITHAL
def test_prjprm_name(prj_TAB):
assert prj_TAB.name
def test_prjprm_w(prj_TAB):
assert np.all(np.isfinite(prj_TAB.w))
def test_prjprm_simplezen(prj_TAB):
assert prj_TAB.simplezen == 1
def test_prjprm_equiareal(prj_TAB):
assert prj_TAB.equiareal == 0
def test_prjprm_conformal(prj_TAB):
assert prj_TAB.conformal == 0
def test_prjprm_global_projection(prj_TAB):
assert prj_TAB.global_projection == 0
def test_prjprm_divergent(prj_TAB):
assert prj_TAB.divergent == 1
def test_prjprm_r0(prj_TAB):
assert prj_TAB.r0 > 0.0
def test_prjprm_x0_y0(prj_TAB):
assert prj_TAB.x0 == 0.0
assert prj_TAB.y0 == 0.0
def test_prjprm_n_m(prj_TAB):
assert prj_TAB.n == 0
assert prj_TAB.m == 0
def test_prjprm_prj_roundtrips(prj_TAB):
# some random values:
x = [-0.002, 0.014, -0.003, 0.015, -0.047, -0.029, -0.042, 0.027, 0.021]
y = [0.022, -0.017, -0.048, -0.049, -0.043, 0.015, 0.046, 0.031, 0.011]
xr, yr = prj_TAB.prjs2x(*prj_TAB.prjx2s(x, y))
assert np.allclose(x, xr, atol=1e-12, rtol=0)
assert np.allclose(y, yr, atol=1e-12, rtol=0)
# same test for a Prjprm that was not previously explicitly "set":
prj = wcs.Prjprm()
prj.code = "TAN"
xr, yr = prj.prjs2x(*prj.prjx2s(x, y))
assert np.allclose(x, xr, atol=1e-12, rtol=0)
assert np.allclose(y, yr, atol=1e-12, rtol=0)
|
1a174845f204604320236d9a7b330c82223e30b03261aae7d93e16fbefa3d2e7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import numpy as np
import pytest
from astropy import wcs
from astropy.utils.data import get_pkg_data_contents, get_pkg_data_filenames
from astropy.utils.misc import NumpyRNGContext
from astropy.wcs.wcs import FITSFixedWarning
# use the base name of the file, because everything we yield
# will show up in the test name in the pandokia report
hdr_map_file_list = [
os.path.basename(fname)
for fname in get_pkg_data_filenames("data/maps", pattern="*.hdr")
]
# Checking the number of files before reading them in.
# OLD COMMENTS:
# AFTER we tested with every file that we found, check to see that we
# actually have the list we expect. If N=0, we will not have performed
# any tests at all. If N < n_data_files, we are missing some files,
# so we will have skipped some tests. Without this check, both cases
# happen silently!
def test_read_map_files():
# how many map files we expect to see
n_map_files = 28
assert len(hdr_map_file_list) == n_map_files, (
"test_read_map_files has wrong number data files: found"
f" {len(hdr_map_file_list)}, expected {n_map_files}"
)
@pytest.mark.parametrize("filename", hdr_map_file_list)
def test_map(filename):
header = get_pkg_data_contents(os.path.join("data/maps", filename))
wcsobj = wcs.WCS(header)
with NumpyRNGContext(123456789):
x = np.random.rand(2**12, wcsobj.wcs.naxis)
wcsobj.wcs_pix2world(x, 1)
wcsobj.wcs_world2pix(x, 1)
hdr_spec_file_list = [
os.path.basename(fname)
for fname in get_pkg_data_filenames("data/spectra", pattern="*.hdr")
]
def test_read_spec_files():
# how many spec files expected
n_spec_files = 6
assert len(hdr_spec_file_list) == n_spec_files, (
f"test_spectra has wrong number data files: found {len(hdr_spec_file_list)},"
f" expected {n_spec_files}"
)
# b.t.w. If this assert happens, pytest reports one more test
# than it would have otherwise.
@pytest.mark.parametrize("filename", hdr_spec_file_list)
def test_spectrum(filename):
header = get_pkg_data_contents(os.path.join("data", "spectra", filename))
# Warning only pops up for one of the inputs.
with pytest.warns() as warning_lines:
wcsobj = wcs.WCS(header)
for w in warning_lines:
assert issubclass(w.category, FITSFixedWarning)
with NumpyRNGContext(123456789):
x = np.random.rand(2**16, wcsobj.wcs.naxis)
wcsobj.wcs_pix2world(x, 1)
wcsobj.wcs_world2pix(x, 1)
|
88e6b3bfa60e5a2cdbf2cc49bb06c31a623e65bb32a42eabd232676a4a89bf97 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
from contextlib import nullcontext
from datetime import datetime
import numpy as np
import pytest
from numpy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_almost_equal_nulp,
assert_array_equal,
)
from packaging.version import Version
from astropy import units as u
from astropy import wcs
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import (
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_filenames,
)
from astropy.utils.exceptions import (
AstropyDeprecationWarning,
AstropyUserWarning,
AstropyWarning,
)
from astropy.utils.misc import NumpyRNGContext
from astropy.wcs import _wcs
_WCSLIB_VER = Version(_wcs.__version__)
# NOTE: User can choose to use system wcslib instead of bundled.
def ctx_for_v71_dateref_warnings():
if _WCSLIB_VER >= Version("7.1") and _WCSLIB_VER < Version("7.3"):
ctx = pytest.warns(
wcs.FITSFixedWarning,
match=(
r"'datfix' made the change 'Set DATE-REF to '1858-11-17' from"
r" MJD-REF'\."
),
)
else:
ctx = nullcontext()
return ctx
class TestMaps:
def setup_method(self):
# get the list of the hdr files that we want to test
self._file_list = list(get_pkg_data_filenames("data/maps", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 28
assert len(self._file_list) == n_data_files, (
f"test_spectra has wrong number data files: found {len(self._file_list)},"
f" expected {n_data_files}"
)
def test_maps(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "maps", filename), encoding="binary"
)
# finally run the test.
wcsobj = wcs.WCS(header)
world = wcsobj.wcs_pix2world([[97, 97]], 1)
assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1)
pix = wcsobj.wcs_world2pix([[285.0, -66.25]], 1)
assert_array_almost_equal(pix, [[97, 97]], decimal=0)
class TestSpectra:
def setup_method(self):
self._file_list = list(get_pkg_data_filenames("data/spectra", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 6
assert len(self._file_list) == n_data_files, (
f"test_spectra has wrong number data files: found {len(self._file_list)},"
f" expected {n_data_files}"
)
def test_spectra(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "spectra", filename), encoding="binary"
)
# finally run the test.
if _WCSLIB_VER >= Version("7.4"):
ctx = pytest.warns(
wcs.FITSFixedWarning,
match=(
r"'datfix' made the change 'Set MJD-OBS to 53925\.853472 from"
r" DATE-OBS'\."
),
)
else:
ctx = nullcontext()
with ctx:
all_wcs = wcs.find_all_wcs(header)
assert len(all_wcs) == 9
def test_fixes():
"""
From github issue #36
"""
header = get_pkg_data_contents("data/nonstandard_units.hdr", encoding="binary")
with pytest.raises(wcs.InvalidTransformError), pytest.warns(
wcs.FITSFixedWarning
) as w:
wcs.WCS(header, translate_units="dhs")
if Version("7.4") <= _WCSLIB_VER < Version("7.6"):
assert len(w) == 3
assert "'datfix' made the change 'Success'." in str(w.pop().message)
else:
assert len(w) == 2
first_wmsg = str(w[0].message)
assert "unitfix" in first_wmsg and "Hz" in first_wmsg and "M/S" in first_wmsg
assert "plane angle" in str(w[1].message) and "m/s" in str(w[1].message)
# Ignore "PV2_2 = 0.209028857410973 invalid keyvalue" warning seen on Windows.
@pytest.mark.filterwarnings(r"ignore:PV2_2")
def test_outside_sky():
"""
From github issue #107
"""
header = get_pkg_data_contents("data/outside_sky.hdr", encoding="binary")
w = wcs.WCS(header)
assert np.all(np.isnan(w.wcs_pix2world([[100.0, 500.0]], 0))) # outside sky
assert np.all(np.isnan(w.wcs_pix2world([[200.0, 200.0]], 0))) # outside sky
assert not np.any(np.isnan(w.wcs_pix2world([[1000.0, 1000.0]], 0)))
def test_pix2world():
"""
From github issue #1463
"""
# TODO: write this to test the expected output behavior of pix2world,
# currently this just makes sure it doesn't error out in unexpected ways
# (and compares `wcs.pc` and `result` values?)
filename = get_pkg_data_filename("data/sip2.fits")
with pytest.warns(wcs.FITSFixedWarning) as caught_warnings:
# this raises a warning unimportant for this testing the pix2world
# FITSFixedWarning(u'The WCS transformation has more axes (2) than
# the image it is associated with (0)')
ww = wcs.WCS(filename)
# might as well monitor for changing behavior
if Version("7.4") <= _WCSLIB_VER < Version("7.6"):
assert len(caught_warnings) == 2
else:
assert len(caught_warnings) == 1
n = 3
pixels = (np.arange(n) * np.ones((2, n))).T
result = ww.wcs_pix2world(pixels, 0, ra_dec_order=True)
# Catch #2791
ww.wcs_pix2world(pixels[..., 0], pixels[..., 1], 0, ra_dec_order=True)
# assuming that the data of sip2.fits doesn't change
answer = np.array([[0.00024976, 0.00023018], [0.00023043, -0.00024997]])
assert np.allclose(ww.wcs.pc, answer, atol=1.0e-8)
answer = np.array(
[
[202.39265216, 47.17756518],
[202.39335826, 47.17754619],
[202.39406436, 47.1775272],
]
)
assert np.allclose(result, answer, atol=1.0e-8, rtol=1.0e-10)
def test_load_fits_path():
fits_name = get_pkg_data_filename("data/sip.fits")
with pytest.warns(wcs.FITSFixedWarning):
wcs.WCS(fits_name)
def test_dict_init():
"""
Test that WCS can be initialized with a dict-like object
"""
# Dictionary with no actual WCS, returns identity transform
with ctx_for_v71_dateref_warnings():
w = wcs.WCS({})
xp, yp = w.wcs_world2pix(41.0, 2.0, 1)
assert_array_almost_equal_nulp(xp, 41.0, 10)
assert_array_almost_equal_nulp(yp, 2.0, 10)
# Valid WCS
hdr = {
"CTYPE1": "GLON-CAR",
"CTYPE2": "GLAT-CAR",
"CUNIT1": "deg",
"CUNIT2": "deg",
"CRPIX1": 1,
"CRPIX2": 1,
"CRVAL1": 40.0,
"CRVAL2": 0.0,
"CDELT1": -0.1,
"CDELT2": 0.1,
}
if _WCSLIB_VER >= Version("7.1"):
hdr["DATEREF"] = "1858-11-17"
if _WCSLIB_VER >= Version("7.4"):
ctx = pytest.warns(
wcs.wcs.FITSFixedWarning,
match=r"'datfix' made the change 'Set MJDREF to 0\.000000 from DATEREF'\.",
)
else:
ctx = nullcontext()
with ctx:
w = wcs.WCS(hdr)
xp, yp = w.wcs_world2pix(41.0, 2.0, 0)
assert_array_almost_equal_nulp(xp, -10.0, 10)
assert_array_almost_equal_nulp(yp, 20.0, 10)
def test_extra_kwarg():
"""
Issue #444
"""
w = wcs.WCS()
with NumpyRNGContext(123456789):
data = np.random.rand(100, 2)
with pytest.raises(TypeError):
w.wcs_pix2world(data, origin=1)
def test_3d_shapes():
"""
Issue #444
"""
w = wcs.WCS(naxis=3)
with NumpyRNGContext(123456789):
data = np.random.rand(100, 3)
result = w.wcs_pix2world(data, 1)
assert result.shape == (100, 3)
result = w.wcs_pix2world(data[..., 0], data[..., 1], data[..., 2], 1)
assert len(result) == 3
def test_preserve_shape():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((2, 3, 4))
xw, yw = w.wcs_pix2world(x, y, 1)
assert xw.shape == (2, 3, 4)
assert yw.shape == (2, 3, 4)
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_broadcasting():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = 1
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_shape_mismatch():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((3, 2, 4))
MESSAGE = r"Coordinate arrays are not broadcastable to each other"
with pytest.raises(ValueError, match=MESSAGE):
xw, yw = w.wcs_pix2world(x, y, 1)
with pytest.raises(ValueError, match=MESSAGE):
xp, yp = w.wcs_world2pix(x, y, 1)
# There are some ambiguities that need to be worked around when
# naxis == 1
w = wcs.WCS(naxis=1)
x = np.random.random((42, 1))
xw = w.wcs_pix2world(x, 1)
assert xw.shape == (42, 1)
x = np.random.random((42,))
(xw,) = w.wcs_pix2world(x, 1)
assert xw.shape == (42,)
def test_invalid_shape():
"""Issue #1395"""
MESSAGE = r"When providing two arguments, the array must be of shape [(]N, 2[)]"
w = wcs.WCS(naxis=2)
xy = np.random.random((2, 3))
with pytest.raises(ValueError, match=MESSAGE):
w.wcs_pix2world(xy, 1)
xy = np.random.random((2, 1))
with pytest.raises(ValueError, match=MESSAGE):
w.wcs_pix2world(xy, 1)
def test_warning_about_defunct_keywords():
header = get_pkg_data_contents("data/defunct_keywords.hdr", encoding="binary")
if Version("7.4") <= _WCSLIB_VER < Version("7.6"):
n_warn = 5
else:
n_warn = 4
# Make sure the warnings come out every time...
for _ in range(2):
with pytest.warns(wcs.FITSFixedWarning) as w:
wcs.WCS(header)
assert len(w) == n_warn
# 7.4 adds a fifth warning "'datfix' made the change 'Success'."
for item in w[:4]:
assert "PCi_ja" in str(item.message)
def test_warning_about_defunct_keywords_exception():
header = get_pkg_data_contents("data/defunct_keywords.hdr", encoding="binary")
with pytest.warns(wcs.FITSFixedWarning):
wcs.WCS(header)
def test_to_header_string():
# fmt: off
hdrstr = (
"WCSAXES = 2 / Number of coordinate axes ",
"CRPIX1 = 0.0 / Pixel coordinate of reference point ",
"CRPIX2 = 0.0 / Pixel coordinate of reference point ",
"CDELT1 = 1.0 / Coordinate increment at reference point ",
"CDELT2 = 1.0 / Coordinate increment at reference point ",
"CRVAL1 = 0.0 / Coordinate value at reference point ",
"CRVAL2 = 0.0 / Coordinate value at reference point ",
"LATPOLE = 90.0 / [deg] Native latitude of celestial pole ",
)
# fmt: on
if _WCSLIB_VER >= Version("7.3"):
# fmt: off
hdrstr += (
"MJDREF = 0.0 / [d] MJD of fiducial time ",
)
# fmt: on
elif _WCSLIB_VER >= Version("7.1"):
# fmt: off
hdrstr += (
"DATEREF = '1858-11-17' / ISO-8601 fiducial time ",
"MJDREFI = 0.0 / [d] MJD of fiducial time, integer part ",
"MJDREFF = 0.0 / [d] MJD of fiducial time, fractional part "
)
# fmt: on
hdrstr += ("END",)
header_string = "".join(hdrstr)
w = wcs.WCS()
h0 = fits.Header.fromstring(w.to_header_string().strip())
if "COMMENT" in h0:
del h0["COMMENT"]
if "" in h0:
del h0[""]
h1 = fits.Header.fromstring(header_string.strip())
assert dict(h0) == dict(h1)
def test_to_fits():
nrec = 11 if _WCSLIB_VER >= Version("7.1") else 8
if _WCSLIB_VER < Version("7.1"):
nrec = 8
elif _WCSLIB_VER < Version("7.3"):
nrec = 11
else:
nrec = 9
w = wcs.WCS()
header_string = w.to_header()
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert header_string == wfits[0].header[-nrec:]
def test_to_header_warning():
fits_name = get_pkg_data_filename("data/sip.fits")
with pytest.warns(wcs.FITSFixedWarning):
x = wcs.WCS(fits_name)
with pytest.warns(AstropyWarning, match="A_ORDER") as w:
x.to_header()
assert len(w) == 1
def test_no_comments_in_header():
w = wcs.WCS()
header = w.to_header()
assert w.wcs.alt not in header
assert "COMMENT" + w.wcs.alt.strip() not in header
assert "COMMENT" not in header
wkey = "P"
header = w.to_header(key=wkey)
assert wkey not in header
assert "COMMENT" not in header
assert "COMMENT" + w.wcs.alt.strip() not in header
def test_find_all_wcs_crash():
"""
Causes a double free without a recent fix in wcslib_wrap.C
"""
with open(get_pkg_data_filename("data/too_many_pv.hdr")) as fd:
header = fd.read()
# We have to set fix=False here, because one of the fixing tasks is to
# remove redundant SCAMP distortion parameters when SIP distortion
# parameters are also present.
with pytest.raises(wcs.InvalidTransformError), pytest.warns(wcs.FITSFixedWarning):
wcs.find_all_wcs(header, fix=False)
# NOTE: Warning bubbles up from C layer during wcs.validate() and
# is hard to catch, so we just ignore it.
@pytest.mark.filterwarnings("ignore")
def test_validate():
results = wcs.validate(get_pkg_data_filename("data/validate.fits"))
results_txt = sorted({x.strip() for x in repr(results).splitlines()})
if _WCSLIB_VER >= Version("7.6"):
filename = "data/validate.7.6.txt"
elif _WCSLIB_VER >= Version("7.4"):
filename = "data/validate.7.4.txt"
elif _WCSLIB_VER >= Version("6.0"):
filename = "data/validate.6.txt"
elif _WCSLIB_VER >= Version("5.13"):
filename = "data/validate.5.13.txt"
elif _WCSLIB_VER >= Version("5.0"):
filename = "data/validate.5.0.txt"
else:
filename = "data/validate.txt"
with open(get_pkg_data_filename(filename)) as fd:
lines = fd.readlines()
assert sorted({x.strip() for x in lines}) == results_txt
@pytest.mark.filterwarnings("ignore")
def test_validate_wcs_tab():
results = wcs.validate(get_pkg_data_filename("data/tab-time-last-axis.fits"))
results_txt = sorted({x.strip() for x in repr(results).splitlines()})
assert results_txt == [
"",
"HDU 0 (PRIMARY):",
"HDU 1 (WCS-TABLE):",
"No issues.",
"WCS key ' ':",
]
def test_validate_with_2_wcses():
# From Issue #2053
with pytest.warns(AstropyUserWarning):
results = wcs.validate(get_pkg_data_filename("data/2wcses.hdr"))
assert "WCS key 'A':" in str(results)
def test_crpix_maps_to_crval():
twcs = wcs.WCS(naxis=2)
twcs.wcs.crval = [251.29, 57.58]
twcs.wcs.cdelt = [1, 1]
twcs.wcs.crpix = [507, 507]
twcs.wcs.pc = np.array([[7.7e-6, 3.3e-5], [3.7e-5, -6.8e-6]])
twcs._naxis = [1014, 1014]
twcs.wcs.ctype = ["RA---TAN-SIP", "DEC--TAN-SIP"]
a = np.array(
[
[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0],
]
)
b = np.array(
[
[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0],
]
)
twcs.sip = wcs.Sip(a, b, None, None, twcs.wcs.crpix)
twcs.wcs.set()
pscale = np.sqrt(wcs.utils.proj_plane_pixel_area(twcs))
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.wcs_pix2world(*twcs.wcs.crpix, 1),
twcs.wcs.crval,
rtol=0.0,
atol=1e-6 * pscale,
)
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.all_pix2world(*twcs.wcs.crpix, 1),
twcs.wcs.crval,
rtol=0.0,
atol=1e-6 * pscale,
)
def test_all_world2pix(
fname=None,
ext=0,
tolerance=1.0e-4,
origin=0,
random_npts=25000,
adaptive=False,
maxiter=20,
detect_divergence=True,
):
"""Test all_world2pix, iterative inverse of all_pix2world"""
# Open test FITS file:
if fname is None:
fname = get_pkg_data_filename("data/j94f05bgq_flt.fits")
ext = ("SCI", 1)
if not os.path.isfile(fname):
raise OSError(f"Input file '{fname:s}' to 'test_all_world2pix' not found.")
h = fits.open(fname)
w = wcs.WCS(h[ext].header, h)
h.close()
del h
crpix = w.wcs.crpix
ncoord = crpix.shape[0]
# Assume that CRPIX is at the center of the image and that the image has
# a power-of-2 number of pixels along each axis. Only use the central
# 1/64 for this testing purpose:
naxesi_l = list((7.0 / 16 * crpix).astype(int))
naxesi_u = list((9.0 / 16 * crpix).astype(int))
# Generate integer indices of pixels (image grid):
img_pix = np.dstack(
[i.flatten() for i in np.meshgrid(*map(range, naxesi_l, naxesi_u))]
)[0]
# Generage random data (in image coordinates):
with NumpyRNGContext(123456789):
rnd_pix = np.random.rand(random_npts, ncoord)
# Scale random data to cover the central part of the image
mwidth = 2 * (crpix * 1.0 / 8)
rnd_pix = crpix - 0.5 * mwidth + (mwidth - 1) * rnd_pix
# Reference pixel coordinates in image coordinate system (CS):
test_pix = np.append(img_pix, rnd_pix, axis=0)
# Reference pixel coordinates in sky CS using forward transformation:
all_world = w.all_pix2world(test_pix, origin)
try:
runtime_begin = datetime.now()
# Apply the inverse iterative process to pixels in world coordinates
# to recover the pixel coordinates in image space.
all_pix = w.all_world2pix(
all_world,
origin,
tolerance=tolerance,
adaptive=adaptive,
maxiter=maxiter,
detect_divergence=detect_divergence,
)
runtime_end = datetime.now()
except wcs.wcs.NoConvergence as e:
runtime_end = datetime.now()
ndiv = 0
if e.divergent is not None:
ndiv = e.divergent.shape[0]
print(f"There are {ndiv} diverging solutions.")
print(f"Indices of diverging solutions:\n{e.divergent}")
print(f"Diverging solutions:\n{e.best_solution[e.divergent]}\n")
print(
"Mean radius of the diverging solutions:"
f" {np.mean(np.linalg.norm(e.best_solution[e.divergent], axis=1))}"
)
print(
"Mean accuracy of the diverging solutions:"
f" {np.mean(np.linalg.norm(e.accuracy[e.divergent], axis=1))}\n"
)
else:
print("There are no diverging solutions.")
nslow = 0
if e.slow_conv is not None:
nslow = e.slow_conv.shape[0]
print(f"There are {nslow} slowly converging solutions.")
print(f"Indices of slowly converging solutions:\n{e.slow_conv}")
print(f"Slowly converging solutions:\n{e.best_solution[e.slow_conv]}\n")
else:
print("There are no slowly converging solutions.\n")
print(
f"There are {e.best_solution.shape[0] - ndiv - nslow} converged solutions."
)
print(f"Best solutions (all points):\n{e.best_solution}")
print(f"Accuracy:\n{e.accuracy}\n")
print(
"\nFinished running 'test_all_world2pix' with errors.\n"
f"ERROR: {e.args[0]}\nRun time: {runtime_end - runtime_begin}\n"
)
raise e
# Compute differences between reference pixel coordinates and
# pixel coordinates (in image space) recovered from reference
# pixels in world coordinates:
errors = np.sqrt(np.sum(np.power(all_pix - test_pix, 2), axis=1))
meanerr = np.mean(errors)
maxerr = np.amax(errors)
print(
"\nFinished running 'test_all_world2pix'.\n"
f"Mean error = {meanerr:e} (Max error = {maxerr:e})\n"
f"Run time: {runtime_end - runtime_begin}\n"
)
assert maxerr < 2.0 * tolerance
def test_scamp_sip_distortion_parameters():
"""
Test parsing of WCS parameters with redundant SIP and SCAMP distortion
parameters.
"""
header = get_pkg_data_contents("data/validate.fits", encoding="binary")
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(header)
# Just check that this doesn't raise an exception.
w.all_pix2world(0, 0, 0)
def test_fixes2():
"""
From github issue #1854
"""
header = get_pkg_data_contents("data/nonstandard_units.hdr", encoding="binary")
with pytest.raises(wcs.InvalidTransformError):
wcs.WCS(header, fix=False)
def test_unit_normalization():
"""
From github issue #1918
"""
header = get_pkg_data_contents("data/unit.hdr", encoding="binary")
w = wcs.WCS(header)
assert w.wcs.cunit[2] == "m/s"
def test_footprint_to_file(tmp_path):
"""
From github issue #1912
"""
# Arbitrary keywords from real data
hdr = {
"CTYPE1": "RA---ZPN",
"CRUNIT1": "deg",
"CRPIX1": -3.3495999e02,
"CRVAL1": 3.185790700000e02,
"CTYPE2": "DEC--ZPN",
"CRUNIT2": "deg",
"CRPIX2": 3.0453999e03,
"CRVAL2": 4.388538000000e01,
"PV2_1": 1.0,
"PV2_3": 220.0,
"NAXIS1": 2048,
"NAXIS2": 1024,
}
w = wcs.WCS(hdr)
testfile = tmp_path / "test.txt"
w.footprint_to_file(testfile)
with open(testfile) as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == "ICRS\n"
assert "color=green" in lines[3]
w.footprint_to_file(testfile, coordsys="FK5", color="red")
with open(testfile) as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == "FK5\n"
assert "color=red" in lines[3]
with pytest.raises(ValueError):
w.footprint_to_file(testfile, coordsys="FOO")
del hdr["NAXIS1"]
del hdr["NAXIS2"]
w = wcs.WCS(hdr)
with pytest.warns(AstropyUserWarning):
w.footprint_to_file(testfile)
# Ignore FITSFixedWarning about keyrecords following the END keyrecord were
# ignored, which comes from src/astropy_wcs.c . Only a blind catch like this
# seems to work when pytest warnings are turned into exceptions.
@pytest.mark.filterwarnings("ignore")
def test_validate_faulty_wcs():
"""
From github issue #2053
"""
h = fits.Header()
# Illegal WCS:
h["RADESYSA"] = "ICRS"
h["PV2_1"] = 1.0
hdu = fits.PrimaryHDU([[0]], header=h)
hdulist = fits.HDUList([hdu])
# Check that this doesn't raise a NameError exception
wcs.validate(hdulist)
def test_error_message():
header = get_pkg_data_contents("data/invalid_header.hdr", encoding="binary")
with pytest.raises(wcs.InvalidTransformError):
# Both lines are in here, because 0.4 calls .set within WCS.__init__,
# whereas 0.3 and earlier did not.
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(header, _do_set=False)
w.all_pix2world([[536.0, 894.0]], 0)
def test_out_of_bounds():
# See #2107
header = get_pkg_data_contents("data/zpn-hole.hdr", encoding="binary")
w = wcs.WCS(header)
ra, dec = w.wcs_pix2world(110, 110, 0)
assert np.isnan(ra)
assert np.isnan(dec)
ra, dec = w.wcs_pix2world(0, 0, 0)
assert not np.isnan(ra)
assert not np.isnan(dec)
def test_calc_footprint_1():
fits = get_pkg_data_filename("data/sip.fits")
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array(
[
[202.39314493, 47.17753352],
[202.71885939, 46.94630488],
[202.94631893, 47.15855022],
[202.72053428, 47.37893142],
]
)
footprint = w.calc_footprint(axes=axes)
assert_allclose(footprint, ref)
def test_calc_footprint_2():
"""Test calc_footprint without distortion."""
fits = get_pkg_data_filename("data/sip.fits")
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array(
[
[202.39265216, 47.17756518],
[202.7469062, 46.91483312],
[203.11487481, 47.14359319],
[202.76092671, 47.40745948],
]
)
footprint = w.calc_footprint(axes=axes, undistort=False)
assert_allclose(footprint, ref)
def test_calc_footprint_3():
"""Test calc_footprint with corner of the pixel."""
w = wcs.WCS()
w.wcs.ctype = ["GLON-CAR", "GLAT-CAR"]
w.wcs.crpix = [1.5, 5.5]
w.wcs.cdelt = [-0.1, 0.1]
axes = (2, 10)
ref = np.array([[0.1, -0.5], [0.1, 0.5], [359.9, 0.5], [359.9, -0.5]])
footprint = w.calc_footprint(axes=axes, undistort=False, center=False)
assert_allclose(footprint, ref)
def test_sip():
# See #2107
header = get_pkg_data_contents("data/irac_sip.hdr", encoding="binary")
w = wcs.WCS(header)
x0, y0 = w.sip_pix2foc(200, 200, 0)
assert_allclose(72, x0, 1e-3)
assert_allclose(72, y0, 1e-3)
x1, y1 = w.sip_foc2pix(x0, y0, 0)
assert_allclose(200, x1, 1e-3)
assert_allclose(200, y1, 1e-3)
def test_sub_3d_with_sip():
# See #10527
header = get_pkg_data_contents("data/irac_sip.hdr", encoding="binary")
header = fits.Header.fromstring(header)
header["NAXIS"] = 3
header.set("NAXIS3", 64, after=header.index("NAXIS2"))
w = wcs.WCS(header, naxis=2)
assert w.naxis == 2
def test_printwcs(capsys):
"""
Just make sure that it runs
"""
h = get_pkg_data_contents("data/spectra/orion-freq-1.hdr", encoding="binary")
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(h)
w.printwcs()
captured = capsys.readouterr()
assert "WCS Keywords" in captured.out
h = get_pkg_data_contents("data/3d_cd.hdr", encoding="binary")
w = wcs.WCS(h)
w.printwcs()
captured = capsys.readouterr()
assert "WCS Keywords" in captured.out
def test_invalid_spherical():
header = """
SIMPLE = T / conforms to FITS standard
BITPIX = 8 / array data type
WCSAXES = 2 / no comment
CTYPE1 = 'RA---TAN' / TAN (gnomic) projection
CTYPE2 = 'DEC--TAN' / TAN (gnomic) projection
EQUINOX = 2000.0 / Equatorial coordinates definition (yr)
LONPOLE = 180.0 / no comment
LATPOLE = 0.0 / no comment
CRVAL1 = 16.0531567459 / RA of reference point
CRVAL2 = 23.1148929108 / DEC of reference point
CRPIX1 = 2129 / X reference pixel
CRPIX2 = 1417 / Y reference pixel
CUNIT1 = 'deg ' / X pixel scale units
CUNIT2 = 'deg ' / Y pixel scale units
CD1_1 = -0.00912247310646 / Transformation matrix
CD1_2 = -0.00250608809647 / no comment
CD2_1 = 0.00250608809647 / no comment
CD2_2 = -0.00912247310646 / no comment
IMAGEW = 4256 / Image width, in pixels.
IMAGEH = 2832 / Image height, in pixels.
"""
f = io.StringIO(header)
header = fits.Header.fromtextfile(f)
w = wcs.WCS(header)
x, y = w.wcs_world2pix(211, -26, 0)
assert np.isnan(x) and np.isnan(y)
def test_no_iteration():
"""Regression test for #3066"""
MESSAGE = "'{}' object is not iterable"
w = wcs.WCS(naxis=2)
with pytest.raises(TypeError, match=MESSAGE.format("WCS")):
iter(w)
class NewWCS(wcs.WCS):
pass
w = NewWCS(naxis=2)
with pytest.raises(TypeError, match=MESSAGE.format("NewWCS")):
iter(w)
@pytest.mark.skipif(
_wcs.__version__[0] < "5", reason="TPV only works with wcslib 5.x or later"
)
def test_sip_tpv_agreement():
sip_header = get_pkg_data_contents(
os.path.join("data", "siponly.hdr"), encoding="binary"
)
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding="binary"
)
with pytest.warns(wcs.FITSFixedWarning):
w_sip = wcs.WCS(sip_header)
w_tpv = wcs.WCS(tpv_header)
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv.all_pix2world([w_tpv.wcs.crpix], 1),
)
w_sip2 = wcs.WCS(w_sip.to_header())
w_tpv2 = wcs.WCS(w_tpv.to_header())
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_sip2.all_pix2world([w_sip.wcs.crpix], 1),
)
assert_array_almost_equal(
w_tpv.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_sip.wcs.crpix], 1),
)
assert_array_almost_equal(
w_sip2.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_tpv.wcs.crpix], 1),
)
@pytest.mark.skipif(
_wcs.__version__[0] < "5", reason="TPV only works with wcslib 5.x or later"
)
def test_tpv_copy():
# See #3904
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding="binary"
)
with pytest.warns(wcs.FITSFixedWarning):
w_tpv = wcs.WCS(tpv_header)
ra, dec = w_tpv.wcs_pix2world([0, 100, 200], [0, -100, 200], 0)
assert ra[0] != ra[1] and ra[1] != ra[2]
assert dec[0] != dec[1] and dec[1] != dec[2]
def test_hst_wcs():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
with fits.open(path) as hdulist:
# wcslib will complain about the distortion parameters if they
# weren't correctly deleted from the header
w = wcs.WCS(hdulist[1].header, hdulist)
# Check pixel scale and area
assert_quantity_allclose(
w.proj_plane_pixel_scales(), [1.38484378e-05, 1.39758488e-05] * u.deg
)
assert_quantity_allclose(
w.proj_plane_pixel_area(), 1.93085492e-10 * (u.deg * u.deg)
)
# Exercise the main transformation functions, mainly just for
# coverage
w.p4_pix2foc([0, 100, 200], [0, -100, 200], 0)
w.det2im([0, 100, 200], [0, -100, 200], 0)
w.cpdis1 = w.cpdis1
w.cpdis2 = w.cpdis2
w.det2im1 = w.det2im1
w.det2im2 = w.det2im2
w.sip = w.sip
w.cpdis1.cdelt = w.cpdis1.cdelt
w.cpdis1.crpix = w.cpdis1.crpix
w.cpdis1.crval = w.cpdis1.crval
w.cpdis1.data = w.cpdis1.data
assert w.sip.a_order == 4
assert w.sip.b_order == 4
assert w.sip.ap_order == 0
assert w.sip.bp_order == 0
assert_array_equal(w.sip.crpix, [2048.0, 1024.0])
wcs.WCS(hdulist[1].header, hdulist)
def test_cpdis_comments():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
f = fits.open(path)
w = wcs.WCS(f[1].header, f)
hdr = w.to_fits()[0].header
f.close()
wcscards = list(hdr["CPDIS*"].cards) + list(hdr["DP*"].cards)
wcsdict = {k: (v, c) for k, v, c in wcscards}
refcards = [
("CPDIS1", "LOOKUP", "Prior distortion function type"),
("DP1.EXTVER", 1.0, "Version number of WCSDVARR extension"),
("DP1.NAXES", 2.0, "Number of independent variables in CPDIS function"),
("DP1.AXIS.1", 1.0, "Axis number of the 1st variable in a CPDIS function"),
("DP1.AXIS.2", 2.0, "Axis number of the 2nd variable in a CPDIS function"),
("CPDIS2", "LOOKUP", "Prior distortion function type"),
("DP2.EXTVER", 2.0, "Version number of WCSDVARR extension"),
("DP2.NAXES", 2.0, "Number of independent variables in CPDIS function"),
("DP2.AXIS.1", 1.0, "Axis number of the 1st variable in a CPDIS function"),
("DP2.AXIS.2", 2.0, "Axis number of the 2nd variable in a CPDIS function"),
]
assert len(wcsdict) == len(refcards)
for k, v, c in refcards:
assert wcsdict[k] == (v, c)
def test_d2im_comments():
path = get_pkg_data_filename("data/ie6d07ujq_wcs.fits")
f = fits.open(path)
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header, f)
f.close()
wcscards = list(w.to_fits()[0].header["D2IM*"].cards)
wcsdict = {k: (v, c) for k, v, c in wcscards}
refcards = [
("D2IMDIS1", "LOOKUP", "Detector to image correction type"),
("D2IM1.EXTVER", 1.0, "Version number of WCSDVARR extension"),
("D2IM1.NAXES", 2.0, "Number of independent variables in D2IM function"),
("D2IM1.AXIS.1", 1.0, "Axis number of the 1st variable in a D2IM function"),
("D2IM1.AXIS.2", 2.0, "Axis number of the 2nd variable in a D2IM function"),
("D2IMDIS2", "LOOKUP", "Detector to image correction type"),
("D2IM2.EXTVER", 2.0, "Version number of WCSDVARR extension"),
("D2IM2.NAXES", 2.0, "Number of independent variables in D2IM function"),
("D2IM2.AXIS.1", 1.0, "Axis number of the 1st variable in a D2IM function"),
("D2IM2.AXIS.2", 2.0, "Axis number of the 2nd variable in a D2IM function"),
# ('D2IMERR1', 0.049, 'Maximum error of D2IM correction for axis 1'),
# ('D2IMERR2', 0.035, 'Maximum error of D2IM correction for axis 2'),
# ('D2IMEXT', 'iref$y7b1516hi_d2i.fits', ''),
]
assert len(wcsdict) == len(refcards)
for k, v, c in refcards:
assert wcsdict[k] == (v, c)
def test_sip_broken():
# This header caused wcslib to segfault because it has a SIP
# specification in a non-default keyword
hdr = get_pkg_data_contents("data/sip-broken.hdr")
wcs.WCS(hdr)
def test_no_truncate_crval():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "FREQ"]
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header[f"CRVAL{ii + 1}"] == w.wcs.crval[ii]
assert header[f"CDELT{ii + 1}"] == w.wcs.cdelt[ii]
def test_no_truncate_crval_try2():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-5, 1e-5, 1e5]
w.wcs.ctype = ["RA---SIN", "DEC--SIN", "FREQ"]
w.wcs.cunit = ["deg", "deg", "Hz"]
w.wcs.crpix = [1, 1, 1]
w.wcs.restfrq = 2.34e11
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header[f"CRVAL{ii + 1}"] == w.wcs.crval[ii]
assert header[f"CDELT{ii + 1}"] == w.wcs.cdelt[ii]
def test_no_truncate_crval_p17():
"""
Regression test for https://github.com/astropy/astropy/issues/5162
"""
w = wcs.WCS(naxis=2)
w.wcs.crval = [50.1234567890123456, 50.1234567890123456]
w.wcs.cdelt = [1e-3, 1e-3]
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.set()
header = w.to_header()
assert header["CRVAL1"] != w.wcs.crval[0]
assert header["CRVAL2"] != w.wcs.crval[1]
header = w.to_header(relax=wcs.WCSHDO_P17)
assert header["CRVAL1"] == w.wcs.crval[0]
assert header["CRVAL2"] == w.wcs.crval[1]
def test_no_truncate_using_compare():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
This one uses WCS.wcs.compare and some slightly different values
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [2.409303333333e02, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "FREQ"]
w.wcs.set()
w2 = wcs.WCS(w.to_header())
w.wcs.compare(w2.wcs)
def test_passing_ImageHDU():
"""
Passing ImageHDU or PrimaryHDU and comparing it with
wcs initialized from header. For #4493.
"""
path = get_pkg_data_filename("data/validate.fits")
with fits.open(path) as hdulist:
with pytest.warns(wcs.FITSFixedWarning):
wcs_hdu = wcs.WCS(hdulist[0])
wcs_header = wcs.WCS(hdulist[0].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
wcs_hdu = wcs.WCS(hdulist[1])
wcs_header = wcs.WCS(hdulist[1].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
def test_inconsistent_sip():
"""
Test for #4814
"""
hdr = get_pkg_data_contents("data/sip-broken.hdr")
ctx = ctx_for_v71_dateref_warnings()
with ctx:
w = wcs.WCS(hdr)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(relax=None)
# CTYPE should not include "-SIP" if relax is None
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
newhdr = w.to_header(relax=False)
assert "A_0_2" not in newhdr
# CTYPE should not include "-SIP" if relax is False
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(key="C")
assert "A_0_2" not in newhdr
# Test writing header with a different key
with ctx:
wnew = wcs.WCS(newhdr, key="C")
assert all(not ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(key=" ")
# Test writing a primary WCS to header
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
# Test that "-SIP" is kept into CTYPE if relax=True and
# "-SIP" was in the original header
newhdr = w.to_header(relax=True)
with ctx:
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
assert "A_0_2" in newhdr
# Test that SIP coefficients are also written out.
assert wnew.sip is not None
# ######### broken header ###########
# Test that "-SIP" is added to CTYPE if relax=True and
# "-SIP" was not in the original header but SIP coefficients
# are present.
with ctx:
w = wcs.WCS(hdr)
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
newhdr = w.to_header(relax=True)
with ctx:
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith("-SIP") for ctyp in wnew.wcs.ctype)
def test_bounds_check():
"""Test for #4957"""
w = wcs.WCS(naxis=2)
w.wcs.ctype = ["RA---CAR", "DEC--CAR"]
w.wcs.cdelt = [10, 10]
w.wcs.crval = [-90, 90]
w.wcs.crpix = [1, 1]
w.wcs.bounds_check(False, False)
ra, dec = w.wcs_pix2world(300, 0, 0)
assert_allclose(ra, -180)
assert_allclose(dec, -30)
def test_naxis():
w = wcs.WCS(naxis=2)
w.wcs.crval = [1, 1]
w.wcs.cdelt = [0.1, 0.1]
w.wcs.crpix = [1, 1]
w._naxis = [1000, 500]
assert w.pixel_shape == (1000, 500)
assert w.array_shape == (500, 1000)
w.pixel_shape = (99, 59)
assert w._naxis == [99, 59]
w.array_shape = (45, 23)
assert w._naxis == [23, 45]
assert w.pixel_shape == (23, 45)
w.pixel_shape = None
assert w.pixel_bounds is None
def test_sip_with_altkey():
"""
Test that when creating a WCS object using a key, CTYPE with
that key is looked at and not the primary CTYPE.
fix for #5443.
"""
with fits.open(get_pkg_data_filename("data/sip.fits")) as f:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header)
# create a header with two WCSs.
h1 = w.to_header(relax=True, key="A")
h2 = w.to_header(relax=False)
h1["CTYPE1A"] = "RA---SIN-SIP"
h1["CTYPE2A"] = "DEC--SIN-SIP"
h1.update(h2)
with ctx_for_v71_dateref_warnings():
w = wcs.WCS(h1, key="A")
assert (w.wcs.ctype == np.array(["RA---SIN-SIP", "DEC--SIN-SIP"])).all()
def test_to_fits_1():
"""
Test to_fits() with LookupTable distortion.
"""
fits_name = get_pkg_data_filename("data/dist.fits")
with pytest.warns(AstropyDeprecationWarning):
w = wcs.WCS(fits_name)
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert isinstance(wfits[1], fits.ImageHDU)
def test_keyedsip():
"""
Test sip reading with extra key.
"""
hdr_name = get_pkg_data_filename("data/sip-broken.hdr")
header = fits.Header.fromfile(hdr_name)
del header["CRPIX1"]
del header["CRPIX2"]
w = wcs.WCS(header=header, key="A")
assert isinstance(w.sip, wcs.Sip)
assert w.sip.crpix[0] == 2048
assert w.sip.crpix[1] == 1026
def test_zero_size_input():
with fits.open(get_pkg_data_filename("data/sip.fits")) as f:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header)
inp = np.zeros((0, 2))
assert_array_equal(inp, w.all_pix2world(inp, 0))
assert_array_equal(inp, w.all_world2pix(inp, 0))
inp = [], [1]
result = w.all_pix2world([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
result = w.all_world2pix([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
def test_scalar_inputs():
"""
Issue #7845
"""
wcsobj = wcs.WCS(naxis=1)
result = wcsobj.all_pix2world(2, 1)
assert_array_equal(result, [np.array(2.0)])
assert result[0].shape == ()
result = wcsobj.all_pix2world([2], 1)
assert_array_equal(result, [np.array([2.0])])
assert result[0].shape == (1,)
# Ignore RuntimeWarning raised on s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in.*")
def test_footprint_contains():
"""
Test WCS.footprint_contains(skycoord)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
"""
header = fits.Header.fromstring(header.strip(), "\n")
test_wcs = wcs.WCS(header)
hasCoord = test_wcs.footprint_contains(SkyCoord(254, 2, unit="deg"))
assert hasCoord
hasCoord = test_wcs.footprint_contains(SkyCoord(240, 2, unit="deg"))
assert not hasCoord
hasCoord = test_wcs.footprint_contains(SkyCoord(24, 2, unit="deg"))
assert not hasCoord
def test_cunit():
# Initializing WCS
w1 = wcs.WCS(naxis=2)
w2 = wcs.WCS(naxis=2)
w3 = wcs.WCS(naxis=2)
w4 = wcs.WCS(naxis=2)
# Initializing the values of cunit
w1.wcs.cunit = ["deg", "m/s"]
w2.wcs.cunit = ["km/h", "km/h"]
w3.wcs.cunit = ["deg", "m/s"]
w4.wcs.cunit = ["deg", "deg"]
# Equality checking a cunit with itself
assert w1.wcs.cunit == w1.wcs.cunit
assert not w1.wcs.cunit != w1.wcs.cunit
# Equality checking of two different cunit object having same values
assert w1.wcs.cunit == w3.wcs.cunit
assert not w1.wcs.cunit != w3.wcs.cunit
# Equality checking of two different cunit object having the same first unit
# but different second unit (see #9154)
assert not w1.wcs.cunit == w4.wcs.cunit
assert w1.wcs.cunit != w4.wcs.cunit
# Inequality checking of two different cunit object having different values
assert not w1.wcs.cunit == w2.wcs.cunit
assert w1.wcs.cunit != w2.wcs.cunit
# Inequality checking of cunit with a list of literals
assert not w1.wcs.cunit == [1, 2, 3]
assert w1.wcs.cunit != [1, 2, 3]
# Inequality checking with some characters
assert not w1.wcs.cunit == ["a", "b", "c"]
assert w1.wcs.cunit != ["a", "b", "c"]
# Comparison is not implemented TypeError will raise
with pytest.raises(TypeError):
w1.wcs.cunit < w2.wcs.cunit
class TestWcsWithTime:
def setup_method(self):
if _WCSLIB_VER >= Version("7.1"):
fname = get_pkg_data_filename("data/header_with_time_wcslib71.fits")
else:
fname = get_pkg_data_filename("data/header_with_time.fits")
self.header = fits.Header.fromfile(fname)
with pytest.warns(wcs.FITSFixedWarning):
self.w = wcs.WCS(self.header, key="A")
def test_keywods2wcsprm(self):
"""Make sure Wcsprm is populated correctly from the header."""
ctype = [self.header[val] for val in self.header["CTYPE*"]]
crval = [self.header[val] for val in self.header["CRVAL*"]]
crpix = [self.header[val] for val in self.header["CRPIX*"]]
cdelt = [self.header[val] for val in self.header["CDELT*"]]
cunit = [self.header[val] for val in self.header["CUNIT*"]]
assert list(self.w.wcs.ctype) == ctype
time_axis_code = 4000 if _WCSLIB_VER >= Version("7.9") else 0
assert list(self.w.wcs.axis_types) == [2200, 2201, 3300, time_axis_code]
assert_allclose(self.w.wcs.crval, crval)
assert_allclose(self.w.wcs.crpix, crpix)
assert_allclose(self.w.wcs.cdelt, cdelt)
assert list(self.w.wcs.cunit) == cunit
naxis = self.w.naxis
assert naxis == 4
pc = np.zeros((naxis, naxis), dtype=np.float64)
for i in range(1, 5):
for j in range(1, 5):
if i == j:
pc[i - 1, j - 1] = self.header.get(f"PC{i}_{j}A", 1)
else:
pc[i - 1, j - 1] = self.header.get(f"PC{i}_{j}A", 0)
assert_allclose(self.w.wcs.pc, pc)
char_keys = [
"timesys",
"trefpos",
"trefdir",
"plephem",
"timeunit",
"dateref",
"dateobs",
"datebeg",
"dateavg",
"dateend",
]
for key in char_keys:
assert getattr(self.w.wcs, key) == self.header.get(key, "")
num_keys = [
"mjdref",
"mjdobs",
"mjdbeg",
"mjdend",
"jepoch",
"bepoch",
"tstart",
"tstop",
"xposure",
"timsyer",
"timrder",
"timedel",
"timepixr",
"timeoffs",
"telapse",
"czphs",
"cperi",
]
for key in num_keys:
if key.upper() == "MJDREF":
hdrv = [
self.header.get("MJDREFIA", np.nan),
self.header.get("MJDREFFA", np.nan),
]
else:
hdrv = self.header.get(key, np.nan)
assert_allclose(getattr(self.w.wcs, key), hdrv)
def test_transforms(self):
assert_allclose(self.w.all_pix2world(*self.w.wcs.crpix, 1), self.w.wcs.crval)
def test_invalid_coordinate_masking():
# Regression test for an issue which caused all coordinates to be set to NaN
# after a transformation rather than just the invalid ones as reported by
# WCSLIB. A specific example of this is that when considering an all-sky
# spectral cube with a spectral axis that is not correlated with the sky
# axes, if transforming pixel coordinates that did not fall 'in' the sky,
# the spectral world value was also masked even though that coordinate
# was valid.
w = wcs.WCS(naxis=3)
w.wcs.ctype = "VELO_LSR", "GLON-CAR", "GLAT-CAR"
w.wcs.crval = -20, 0, 0
w.wcs.crpix = 1, 1441, 241
w.wcs.cdelt = 1.3, -0.125, 0.125
px = [-10, -10, 20]
py = [-10, 10, 20]
pz = [-10, 10, 20]
wx, wy, wz = w.wcs_pix2world(px, py, pz, 0)
# Before fixing this, wx used to return np.nan for the first element
assert_allclose(wx, [-33, -33, 6])
assert_allclose(wy, [np.nan, 178.75, 177.5])
assert_allclose(wz, [np.nan, -28.75, -27.5])
def test_no_pixel_area():
w = wcs.WCS(naxis=3)
# Pixel area cannot be computed
with pytest.raises(ValueError, match="Pixel area is defined only for 2D pixels"):
w.proj_plane_pixel_area()
# Pixel scales still possible
assert_quantity_allclose(w.proj_plane_pixel_scales(), 1)
def test_distortion_header(tmp_path):
"""
Test that plate distortion model is correctly described by `wcs.to_header()`
and preserved when creating a Cutout2D from the image, writing it to FITS,
and reading it back from the file.
"""
path = get_pkg_data_filename("data/dss.14.29.56-62.41.05.fits.gz")
cen = np.array((50, 50))
siz = np.array((20, 20))
with fits.open(path) as hdulist:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdulist[0].header)
cut = Cutout2D(hdulist[0].data, position=cen, size=siz, wcs=w)
# This converts the DSS plate solution model with AMD[XY]n coefficients into a
# Template Polynomial Distortion model (TPD.FWD.n coefficients);
# not testing explicitly for the header keywords here.
if _WCSLIB_VER < Version("7.4"):
with pytest.warns(
AstropyWarning, match="WCS contains a TPD distortion model in CQDIS"
):
w0 = wcs.WCS(w.to_header_string())
with pytest.warns(
AstropyWarning, match="WCS contains a TPD distortion model in CQDIS"
):
w1 = wcs.WCS(cut.wcs.to_header_string())
if _WCSLIB_VER >= Version("7.1"):
pytest.xfail("TPD coefficients incomplete with WCSLIB >= 7.1 < 7.4")
else:
w0 = wcs.WCS(w.to_header_string())
w1 = wcs.WCS(cut.wcs.to_header_string())
assert w.pixel_to_world(0, 0).separation(w0.pixel_to_world(0, 0)) < 1.0e-3 * u.mas
assert w.pixel_to_world(*cen).separation(w0.pixel_to_world(*cen)) < 1.0e-3 * u.mas
assert (
w.pixel_to_world(*cen).separation(w1.pixel_to_world(*(siz / 2)))
< 1.0e-3 * u.mas
)
cutfile = tmp_path / "cutout.fits"
fits.writeto(cutfile, cut.data, cut.wcs.to_header())
with fits.open(cutfile) as hdulist:
w2 = wcs.WCS(hdulist[0].header)
assert (
w.pixel_to_world(*cen).separation(w2.pixel_to_world(*(siz / 2)))
< 1.0e-3 * u.mas
)
def test_pixlist_wcs_colsel():
"""
Test selection of a specific pixel list WCS using ``colsel``. See #11412.
"""
hdr_file = get_pkg_data_filename("data/chandra-pixlist-wcs.hdr")
hdr = fits.Header.fromtextfile(hdr_file)
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdr, keysel=["image", "pixel"], colsel=[11, 12])
assert w.naxis == 2
assert list(w.wcs.ctype) == ["RA---TAN", "DEC--TAN"]
assert np.allclose(w.wcs.crval, [229.38051931869, -58.81108068885])
assert np.allclose(w.wcs.pc, [[1, 0], [0, 1]])
assert np.allclose(w.wcs.cdelt, [-0.00013666666666666, 0.00013666666666666])
assert np.allclose(w.wcs.lonpole, 180.0)
@pytest.mark.skipif(
_WCSLIB_VER < Version("7.8"),
reason="TIME axis extraction only works with wcslib 7.8 or later",
)
def test_time_axis_selection():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "TIME"]
w.wcs.set()
assert list(w.sub([wcs.WCSSUB_TIME]).wcs.ctype) == ["TIME"]
assert (
w.wcs_pix2world([[1, 2, 3]], 0)[0, 2]
== w.sub([wcs.WCSSUB_TIME]).wcs_pix2world([[3]], 0)[0, 0]
)
@pytest.mark.skipif(
_WCSLIB_VER < Version("7.8"),
reason="TIME axis extraction only works with wcslib 7.8 or later",
)
def test_temporal():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "TIME"]
w.wcs.set()
assert w.has_temporal
assert w.sub([wcs.WCSSUB_TIME]).is_temporal
assert (
w.wcs_pix2world([[1, 2, 3]], 0)[0, 2]
== w.temporal.wcs_pix2world([[3]], 0)[0, 0]
)
def test_swapaxes_same_val_roundtrip():
w = wcs.WCS(naxis=3)
w.wcs.ctype = ["RA---TAN", "DEC--TAN", "FREQ"]
w.wcs.crpix = [32.5, 16.5, 1.0]
w.wcs.crval = [5.63, -72.05, 1.0]
w.wcs.pc = [[5.9e-06, 1.3e-05, 0.0], [-1.2e-05, 5.0e-06, 0.0], [0.0, 0.0, 1.0]]
w.wcs.cdelt = [1.0, 1.0, 1.0]
w.wcs.set()
axes_order = [3, 2, 1]
axes_order0 = list(i - 1 for i in axes_order)
ws = w.sub(axes_order)
imcoord = np.array([3, 5, 7])
imcoords = imcoord[axes_order0]
val_ref = w.wcs_pix2world([imcoord], 0)[0]
val_swapped = ws.wcs_pix2world([imcoords], 0)[0]
# check original axis and swapped give same results
assert np.allclose(val_ref[axes_order0], val_swapped, rtol=0, atol=1e-8)
# check round-tripping:
assert np.allclose(w.wcs_world2pix([val_ref], 0)[0], imcoord, rtol=0, atol=1e-8)
|
e5393d69dcf9f528a1495c89f128e706e31f82a142a365d6ffa344d0f2a65822 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pickle
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from astropy import wcs
from astropy.io import fits
from astropy.utils.data import (
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_fileobj,
)
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.misc import NumpyRNGContext
from astropy.wcs.wcs import FITSFixedWarning
def test_basic():
wcs1 = wcs.WCS()
s = pickle.dumps(wcs1)
pickle.loads(s)
def test_dist():
with get_pkg_data_fileobj(
os.path.join("data", "dist.fits"), encoding="binary"
) as test_file:
hdulist = fits.open(test_file)
# The use of ``AXISCORR`` for D2IM correction has been deprecated
with pytest.warns(AstropyDeprecationWarning):
wcs1 = wcs.WCS(hdulist[0].header, hdulist)
assert wcs1.det2im2 is not None
s = pickle.dumps(wcs1)
wcs2 = pickle.loads(s)
with NumpyRNGContext(123456789):
x = np.random.rand(2**16, wcs1.wcs.naxis)
world1 = wcs1.all_pix2world(x, 1)
world2 = wcs2.all_pix2world(x, 1)
assert_array_almost_equal(world1, world2)
def test_sip():
with get_pkg_data_fileobj(
os.path.join("data", "sip.fits"), encoding="binary"
) as test_file:
hdulist = fits.open(test_file, ignore_missing_end=True)
with pytest.warns(FITSFixedWarning):
wcs1 = wcs.WCS(hdulist[0].header)
assert wcs1.sip is not None
s = pickle.dumps(wcs1)
wcs2 = pickle.loads(s)
with NumpyRNGContext(123456789):
x = np.random.rand(2**16, wcs1.wcs.naxis)
world1 = wcs1.all_pix2world(x, 1)
world2 = wcs2.all_pix2world(x, 1)
assert_array_almost_equal(world1, world2)
def test_sip2():
with get_pkg_data_fileobj(
os.path.join("data", "sip2.fits"), encoding="binary"
) as test_file:
hdulist = fits.open(test_file, ignore_missing_end=True)
with pytest.warns(FITSFixedWarning):
wcs1 = wcs.WCS(hdulist[0].header)
assert wcs1.sip is not None
s = pickle.dumps(wcs1)
wcs2 = pickle.loads(s)
with NumpyRNGContext(123456789):
x = np.random.rand(2**16, wcs1.wcs.naxis)
world1 = wcs1.all_pix2world(x, 1)
world2 = wcs2.all_pix2world(x, 1)
assert_array_almost_equal(world1, world2)
# Ignore "PV2_2 = 0.209028857410973 invalid keyvalue" warning seen on Windows.
@pytest.mark.filterwarnings(r"ignore:PV2_2")
def test_wcs():
header = get_pkg_data_contents(
os.path.join("data", "outside_sky.hdr"), encoding="binary"
)
wcs1 = wcs.WCS(header)
s = pickle.dumps(wcs1)
wcs2 = pickle.loads(s)
with NumpyRNGContext(123456789):
x = np.random.rand(2**16, wcs1.wcs.naxis)
world1 = wcs1.all_pix2world(x, 1)
world2 = wcs2.all_pix2world(x, 1)
assert_array_almost_equal(world1, world2)
class Sub(wcs.WCS):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.foo = 42
def test_subclass():
wcs1 = Sub()
wcs1.foo = 45
s = pickle.dumps(wcs1)
wcs2 = pickle.loads(s)
assert isinstance(wcs2, Sub)
assert wcs1.foo == 45
assert wcs2.foo == 45
assert wcs2.wcs is not None
def test_axes_info():
w = wcs.WCS(naxis=3)
w.pixel_shape = [100, 200, 300]
w.pixel_bounds = ((11, 22), (33, 45), (55, 67))
w.extra = 111
w2 = pickle.loads(pickle.dumps(w))
# explicitly test naxis-related info
assert w.naxis == w2.naxis
assert w.pixel_shape == w2.pixel_shape
assert w.pixel_bounds == w2.pixel_bounds
# test all attributes
for k, v in w.__dict__.items():
assert getattr(w2, k) == v
def test_pixlist_wcs_colsel():
"""
Test selection of a specific pixel list WCS using ``colsel``. See #11412.
"""
hdr_file = get_pkg_data_filename("data/chandra-pixlist-wcs.hdr")
hdr = fits.Header.fromtextfile(hdr_file)
with pytest.warns(wcs.FITSFixedWarning):
w0 = wcs.WCS(hdr, keysel=["image", "pixel"], colsel=[11, 12])
with pytest.warns(wcs.FITSFixedWarning):
w = pickle.loads(pickle.dumps(w0))
assert w.naxis == 2
assert list(w.wcs.ctype) == ["RA---TAN", "DEC--TAN"]
assert np.allclose(w.wcs.crval, [229.38051931869, -58.81108068885])
assert np.allclose(w.wcs.pc, [[1, 0], [0, 1]])
assert np.allclose(w.wcs.cdelt, [-0.00013666666666666, 0.00013666666666666])
assert np.allclose(w.wcs.lonpole, 180.0)
def test_alt_wcskey():
w = wcs.WCS(key="A")
w2 = pickle.loads(pickle.dumps(w))
assert w2.wcs.alt == "A"
|
3fbf0853a86cfb4e7e45ac5474efe52e51619d0fe518b0986702feaea1fab6fd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import wcs
from .helper import SimModelTAB
@pytest.fixture(scope="module")
def tab_wcs_2di():
model = SimModelTAB(nx=150, ny=200)
# generate FITS HDU list:
hdulist = model.hdulist
# create WCS object:
w = wcs.WCS(hdulist[0].header, hdulist)
return w
@pytest.fixture(scope="module")
def tab_wcsh_2di():
model = SimModelTAB(nx=150, ny=200)
# generate FITS HDU list:
hdulist = model.hdulist
# create WCS object:
w = wcs.WCS(hdulist[0].header, hdulist)
return w, hdulist
@pytest.fixture(scope="function")
def tab_wcs_2di_f():
model = SimModelTAB(nx=150, ny=200)
# generate FITS HDU list:
hdulist = model.hdulist
# create WCS object:
w = wcs.WCS(hdulist[0].header, hdulist)
return w
@pytest.fixture(scope="function")
def prj_TAB():
prj = wcs.Prjprm()
prj.code = "TAN"
prj.set()
return prj
|
69792e8a427f65ea545039bb6916454ffe6bf1cf27189fa95ac2c75700cc6271 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
def test_wcsprm_tab_basic(tab_wcs_2di):
assert len(tab_wcs_2di.wcs.tab) == 1
t = tab_wcs_2di.wcs.tab[0]
assert tab_wcs_2di.wcs.tab[0] is not t
def test_tabprm_coord(tab_wcs_2di_f):
t = tab_wcs_2di_f.wcs.tab[0]
c0 = t.coord
c1 = np.ones_like(c0)
t.coord = c1
assert np.allclose(tab_wcs_2di_f.wcs.tab[0].coord, c1)
def test_tabprm_crval_and_deepcopy(tab_wcs_2di_f):
w = deepcopy(tab_wcs_2di_f)
t = tab_wcs_2di_f.wcs.tab[0]
pix = np.array([[2, 3]], dtype=np.float32)
rd1 = tab_wcs_2di_f.wcs_pix2world(pix, 1)
c = t.crval.copy()
d = 0.5 * np.ones_like(c)
t.crval += d
assert np.allclose(tab_wcs_2di_f.wcs.tab[0].crval, c + d)
rd2 = tab_wcs_2di_f.wcs_pix2world(pix - d, 1)
assert np.allclose(rd1, rd2)
rd3 = w.wcs_pix2world(pix, 1)
assert np.allclose(rd1, rd3)
def test_tabprm_delta(tab_wcs_2di):
t = tab_wcs_2di.wcs.tab[0]
assert np.allclose([0.0, 0.0], t.delta)
def test_tabprm_K(tab_wcs_2di):
t = tab_wcs_2di.wcs.tab[0]
assert np.all(t.K == [4, 2])
def test_tabprm_M(tab_wcs_2di):
t = tab_wcs_2di.wcs.tab[0]
assert t.M == 2
def test_tabprm_nc(tab_wcs_2di):
t = tab_wcs_2di.wcs.tab[0]
assert t.nc == 8
def test_tabprm_extrema(tab_wcs_2di):
t = tab_wcs_2di.wcs.tab[0]
extrema = np.array(
[
[[-0.0026, -0.5], [1.001, -0.5]],
[[-0.0026, 0.5], [1.001, 0.5]],
]
)
assert np.allclose(t.extrema, extrema)
def test_tabprm_map(tab_wcs_2di_f):
t = tab_wcs_2di_f.wcs.tab[0]
assert np.allclose(t.map, [0, 1])
t.map[1] = 5
assert np.all(tab_wcs_2di_f.wcs.tab[0].map == [0, 5])
t.map = [1, 4]
assert np.all(tab_wcs_2di_f.wcs.tab[0].map == [1, 4])
def test_tabprm_sense(tab_wcs_2di):
t = tab_wcs_2di.wcs.tab[0]
assert np.all(t.sense == [1, 1])
def test_tabprm_p0(tab_wcs_2di):
t = tab_wcs_2di.wcs.tab[0]
assert np.all(t.p0 == [0, 0])
def test_tabprm_print(tab_wcs_2di_f, capfd):
tab_wcs_2di_f.wcs.tab[0].print_contents()
captured = capfd.readouterr()
s = str(tab_wcs_2di_f.wcs.tab[0])
out = str(captured.out)
lout = out.split("\n")
assert out == s
assert lout[0] == " flag: 137"
assert lout[1] == " M: 2"
def test_wcstab_copy(tab_wcs_2di_f):
t = tab_wcs_2di_f.wcs.tab[0]
c0 = t.coord
c1 = np.ones_like(c0)
t.coord = c1
assert np.allclose(tab_wcs_2di_f.wcs.tab[0].coord, c1)
def test_tabprm_crval(tab_wcs_2di_f):
w = deepcopy(tab_wcs_2di_f)
t = tab_wcs_2di_f.wcs.tab[0]
pix = np.array([[2, 3]], dtype=np.float32)
rd1 = tab_wcs_2di_f.wcs_pix2world(pix, 1)
c = t.crval.copy()
d = 0.5 * np.ones_like(c)
t.crval += d
assert np.allclose(tab_wcs_2di_f.wcs.tab[0].crval, c + d)
rd2 = tab_wcs_2di_f.wcs_pix2world(pix - d, 1)
assert np.allclose(rd1, rd2)
rd3 = w.wcs_pix2world(pix, 1)
assert np.allclose(rd1, rd3)
|
3e6aff0d052f07a27ea849ad0d2752b62d8b50b6a2c4d0af48006354873b0636 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Tests for the auxiliary parameters contained in wcsaux
from numpy.testing import assert_allclose
from astropy.io import fits
from astropy.wcs import WCS
STR_EXPECTED_EMPTY = """
rsun_ref:
dsun_obs:
crln_obs:
hgln_obs:
hglt_obs:""".lstrip()
def test_empty():
w = WCS(naxis=1)
assert w.wcs.aux.rsun_ref is None
assert w.wcs.aux.dsun_obs is None
assert w.wcs.aux.crln_obs is None
assert w.wcs.aux.hgln_obs is None
assert w.wcs.aux.hglt_obs is None
assert str(w.wcs.aux) == STR_EXPECTED_EMPTY
HEADER_SOLAR = fits.Header.fromstring(
"""
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 64.5 / Pixel coordinate of reference point
CRPIX2 = 64.5 / Pixel coordinate of reference point
PC1_1 = 0.99999994260024 / Coordinate transformation matrix element
PC1_2 = -0.00033882076120692 / Coordinate transformation matrix element
PC2_1 = 0.00033882076120692 / Coordinate transformation matrix element
PC2_2 = 0.99999994260024 / Coordinate transformation matrix element
CDELT1 = 0.0053287911111111 / [deg] Coordinate increment at reference point
CDELT2 = 0.0053287911111111 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'HPLN-TAN' / Coordinate type codegnomonic projection
CTYPE2 = 'HPLT-TAN' / Coordinate type codegnomonic projection
CRVAL1 = -0.0012589367249586 / [deg] Coordinate value at reference point
CRVAL2 = 0.00079599300143911 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.00079599300143911 / [deg] Native latitude of celestial pole
DATE-OBS= '2011-02-15T00:00:00.34' / ISO-8601 time of observation
MJD-OBS = 55607.000003935 / [d] MJD at start of observation
RSUN_REF= 696000000.0 / [m] Solar radius
DSUN_OBS= 147724815128.0 / [m] Distance from centre of Sun to observer
CRLN_OBS= 22.814522 / [deg] Carrington heliographic lng of observer
CRLT_OBS= -6.820544 / [deg] Heliographic latitude of observer
HGLN_OBS= 8.431123 / [deg] Stonyhurst heliographic lng of observer
HGLT_OBS= -6.820544 / [deg] Heliographic latitude of observer
""".lstrip(),
sep="\n",
)
STR_EXPECTED_GET = """
rsun_ref: 696000000.000000
dsun_obs: 147724815128.000000
crln_obs: 22.814522
hgln_obs: 8.431123
hglt_obs: -6.820544""".lstrip()
def test_solar_aux_get():
w = WCS(HEADER_SOLAR)
assert_allclose(w.wcs.aux.rsun_ref, 696000000)
assert_allclose(w.wcs.aux.dsun_obs, 147724815128)
assert_allclose(w.wcs.aux.crln_obs, 22.814522)
assert_allclose(w.wcs.aux.hgln_obs, 8.431123)
assert_allclose(w.wcs.aux.hglt_obs, -6.820544)
assert str(w.wcs.aux) == STR_EXPECTED_GET
STR_EXPECTED_SET = """
rsun_ref: 698000000.000000
dsun_obs: 140000000000.000000
crln_obs: 10.000000
hgln_obs: 30.000000
hglt_obs: 40.000000""".lstrip()
def test_solar_aux_set():
w = WCS(HEADER_SOLAR)
w.wcs.aux.rsun_ref = 698000000
assert_allclose(w.wcs.aux.rsun_ref, 698000000)
w.wcs.aux.dsun_obs = 140000000000
assert_allclose(w.wcs.aux.dsun_obs, 140000000000)
w.wcs.aux.crln_obs = 10.0
assert_allclose(w.wcs.aux.crln_obs, 10.0)
w.wcs.aux.hgln_obs = 30.0
assert_allclose(w.wcs.aux.hgln_obs, 30.0)
w.wcs.aux.hglt_obs = 40.0
assert_allclose(w.wcs.aux.hglt_obs, 40.0)
assert str(w.wcs.aux) == STR_EXPECTED_SET
header = w.to_header()
assert_allclose(header["RSUN_REF"], 698000000)
assert_allclose(header["DSUN_OBS"], 140000000000)
assert_allclose(header["CRLN_OBS"], 10.0)
assert_allclose(header["HGLN_OBS"], 30.0)
assert_allclose(header["HGLT_OBS"], 40.0)
def test_set_aux_on_empty():
w = WCS(naxis=2)
w.wcs.aux.rsun_ref = 698000000
assert_allclose(w.wcs.aux.rsun_ref, 698000000)
w.wcs.aux.dsun_obs = 140000000000
assert_allclose(w.wcs.aux.dsun_obs, 140000000000)
w.wcs.aux.crln_obs = 10.0
assert_allclose(w.wcs.aux.crln_obs, 10.0)
w.wcs.aux.hgln_obs = 30.0
assert_allclose(w.wcs.aux.hgln_obs, 30.0)
w.wcs.aux.hglt_obs = 40.0
assert_allclose(w.wcs.aux.hglt_obs, 40.0)
assert str(w.wcs.aux) == STR_EXPECTED_SET
header = w.to_header()
assert_allclose(header["RSUN_REF"], 698000000)
assert_allclose(header["DSUN_OBS"], 140000000000)
assert_allclose(header["CRLN_OBS"], 10.0)
assert_allclose(header["HGLN_OBS"], 30.0)
assert_allclose(header["HGLT_OBS"], 40.0)
def test_unset_aux():
w = WCS(HEADER_SOLAR)
assert w.wcs.aux.rsun_ref is not None
w.wcs.aux.rsun_ref = None
assert w.wcs.aux.rsun_ref is None
assert w.wcs.aux.dsun_obs is not None
w.wcs.aux.dsun_obs = None
assert w.wcs.aux.dsun_obs is None
assert w.wcs.aux.crln_obs is not None
w.wcs.aux.crln_obs = None
assert w.wcs.aux.crln_obs is None
assert w.wcs.aux.hgln_obs is not None
w.wcs.aux.hgln_obs = None
assert w.wcs.aux.hgln_obs is None
assert w.wcs.aux.hglt_obs is not None
w.wcs.aux.hglt_obs = None
assert w.wcs.aux.hglt_obs is None
assert str(w.wcs.aux) == "rsun_ref:\ndsun_obs:\ncrln_obs:\nhgln_obs:\nhglt_obs:"
header = w.to_header()
assert "RSUN_REF" not in header
assert "DSUN_OBS" not in header
assert "CRLN_OBS" not in header
assert "HGLN_OBS" not in header
assert "HGLT_OBS" not in header
|
245c502e6daf7ec8e23d3deeefa116b323985553d69c045460d78c5bbdfa9fcf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
import pytest
from packaging.version import Version
from astropy import wcs
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs import _wcs
from .helper import SimModelTAB
_WCSLIB_VER = Version(_wcs.__version__)
def test_2d_spatial_tab_roundtrip(tab_wcs_2di):
nx, ny = tab_wcs_2di.pixel_shape
# generate "random" test coordinates:
np.random.seed(1)
xy = 0.51 + [nx + 0.99, ny + 0.99] * np.random.random((100, 2))
rd = tab_wcs_2di.wcs_pix2world(xy, 1)
xy_roundtripped = tab_wcs_2di.wcs_world2pix(rd, 1)
m = np.logical_and(*(np.isfinite(xy_roundtripped).T))
assert np.allclose(xy[m], xy_roundtripped[m], rtol=0, atol=1e-7)
def test_2d_spatial_tab_vs_model():
nx = 150
ny = 200
model = SimModelTAB(nx=nx, ny=ny)
# generate FITS HDU list:
hdulist = model.hdulist
# create WCS object:
w = wcs.WCS(hdulist[0].header, hdulist)
# generate "random" test coordinates:
np.random.seed(1)
xy = 0.51 + [nx + 0.99, ny + 0.99] * np.random.random((100, 2))
rd = w.wcs_pix2world(xy, 1)
rd_model = model.fwd_eval(xy)
assert np.allclose(rd, rd_model, rtol=0, atol=1e-7)
@pytest.mark.skipif(
_WCSLIB_VER < Version("7.6"),
reason="Only in WCSLIB 7.6 a 1D -TAB axis roundtrips unless first axis",
)
def test_mixed_celest_and_1d_tab_roundtrip():
# Tests WCS roundtripping for the case when there is one -TAB axis and
# this axis is not the first axis. This tests a bug fixed in WCSLIB 7.6.
filename = get_pkg_data_filename("data/tab-time-last-axis.fits")
with fits.open(filename) as hdul:
w = wcs.WCS(hdul[0].header, hdul)
pts = np.random.random((10, 3)) * [[2047, 2047, 127]]
assert np.allclose(pts, w.wcs_world2pix(w.wcs_pix2world(pts, 0), 0))
@pytest.mark.skipif(
_WCSLIB_VER < Version("7.8"),
reason="Requires WCSLIB >= 7.8 for swapping -TAB axes to work.",
)
def test_wcstab_swapaxes():
# Crash on deepcopy of swapped -TAB axes reported in #13036.
# Fixed in #13063.
filename = get_pkg_data_filename("data/tab-time-last-axis.fits")
with fits.open(filename) as hdul:
w = wcs.WCS(hdul[0].header, hdul)
w.wcs.ctype[-1] = "FREQ-TAB"
w.wcs.set()
wswp = w.swapaxes(2, 0)
deepcopy(wswp)
@pytest.mark.skipif(
_WCSLIB_VER < Version("7.8"),
reason="Requires WCSLIB >= 7.8 for swapping -TAB axes to work.",
)
@pytest.mark.xfail(
Version("7.8") <= _WCSLIB_VER < Version("7.10"),
reason="Requires WCSLIB >= 7.10 for swapped -TAB axes to produce same results.",
)
def test_wcstab_swapaxes_same_val_roundtrip():
filename = get_pkg_data_filename("data/tab-time-last-axis.fits")
axes_order = [3, 2, 1]
axes_order0 = list(i - 1 for i in axes_order)
with fits.open(filename) as hdul:
w = wcs.WCS(hdul[0].header, hdul)
w.wcs.ctype[-1] = "FREQ-TAB"
w.wcs.set()
ws = w.sub(axes_order)
imcoord = np.array([3, 5, 7])
imcoords = imcoord[axes_order0]
val_ref = w.wcs_pix2world([imcoord], 0)[0]
val_swapped = ws.wcs_pix2world([imcoords], 0)[0]
# check original axis and swapped give same results
assert np.allclose(val_ref[axes_order0], val_swapped, rtol=0, atol=1e-8)
# check round-tripping:
assert np.allclose(w.wcs_world2pix([val_ref], 0)[0], imcoord, rtol=0, atol=1e-8)
|
d1f632430336bb21d4fee7036eae8e1fd8c00cbcfdeccea5a9cbe086b2a1505d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import gc
import locale
import re
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from packaging.version import Version
from astropy import units as u
from astropy.io import fits
from astropy.units.core import UnitsWarning
from astropy.utils.data import (
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_fileobj,
)
from astropy.utils.misc import _set_locale
from astropy.wcs import _wcs, wcs
from astropy.wcs.wcs import FITSFixedWarning
######################################################################
def test_alt():
w = _wcs.Wcsprm()
assert w.alt == " "
w.alt = "X"
assert w.alt == "X"
del w.alt
assert w.alt == " "
def test_alt_invalid1():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.alt = "$"
def test_alt_invalid2():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.alt = " "
def test_axis_types():
w = _wcs.Wcsprm()
assert_array_equal(w.axis_types, [0, 0])
def test_cd():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert w.cd.dtype == float
assert w.has_cd() is True
assert_array_equal(w.cd, [[1, 0], [0, 1]])
del w.cd
assert w.has_cd() is False
def test_cd_missing():
w = _wcs.Wcsprm()
assert w.has_cd() is False
with pytest.raises(AttributeError):
w.cd
def test_cd_missing2():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert w.has_cd() is True
del w.cd
assert w.has_cd() is False
with pytest.raises(AttributeError):
w.cd
def test_cd_invalid():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.cd = [1, 0, 0, 1]
def test_cdfix():
w = _wcs.Wcsprm()
w.cdfix()
def test_cdelt():
w = _wcs.Wcsprm()
assert_array_equal(w.cdelt, [1, 1])
w.cdelt = [42, 54]
assert_array_equal(w.cdelt, [42, 54])
def test_cdelt_delete():
w = _wcs.Wcsprm()
with pytest.raises(TypeError):
del w.cdelt
def test_cel_offset():
w = _wcs.Wcsprm()
assert w.cel_offset is False
w.cel_offset = "foo"
assert w.cel_offset is True
w.cel_offset = 0
assert w.cel_offset is False
def test_celfix():
# TODO: We need some data with -NCP or -GLS projections to test
# with. For now, this is just a smoke test
w = _wcs.Wcsprm()
assert w.celfix() == -1
def test_cname():
w = _wcs.Wcsprm()
# Test that this works as an iterator
for x in w.cname:
assert x == ""
assert list(w.cname) == ["", ""]
w.cname = [b"foo", "bar"]
assert list(w.cname) == ["foo", "bar"]
def test_cname_invalid():
w = _wcs.Wcsprm()
with pytest.raises(TypeError):
w.cname = [42, 54]
def test_colax():
w = _wcs.Wcsprm()
assert w.colax.dtype == np.intc
assert_array_equal(w.colax, [0, 0])
w.colax = [42, 54]
assert_array_equal(w.colax, [42, 54])
w.colax[0] = 0
assert_array_equal(w.colax, [0, 54])
with pytest.raises(ValueError):
w.colax = [1, 2, 3]
def test_colnum():
w = _wcs.Wcsprm()
assert w.colnum == 0
w.colnum = 42
assert w.colnum == 42
with pytest.raises(OverflowError):
w.colnum = 0xFFFFFFFFFFFFFFFFFFFF
with pytest.raises(OverflowError):
w.colnum = 0xFFFFFFFF
with pytest.raises(TypeError):
del w.colnum
def test_colnum_invalid():
w = _wcs.Wcsprm()
with pytest.raises(TypeError):
w.colnum = "foo"
def test_crder():
w = _wcs.Wcsprm()
assert w.crder.dtype == float
assert np.all(np.isnan(w.crder))
w.crder[0] = 0
assert np.isnan(w.crder[1])
assert w.crder[0] == 0
w.crder = w.crder
def test_crota():
w = _wcs.Wcsprm()
w.crota = [1, 0]
assert w.crota.dtype == float
assert w.has_crota() is True
assert_array_equal(w.crota, [1, 0])
del w.crota
assert w.has_crota() is False
def test_crota_missing():
w = _wcs.Wcsprm()
assert w.has_crota() is False
with pytest.raises(AttributeError):
w.crota
def test_crota_missing2():
w = _wcs.Wcsprm()
w.crota = [1, 0]
assert w.has_crota() is True
del w.crota
assert w.has_crota() is False
with pytest.raises(AttributeError):
w.crota
def test_crpix():
w = _wcs.Wcsprm()
assert w.crpix.dtype == float
assert_array_equal(w.crpix, [0, 0])
w.crpix = [42, 54]
assert_array_equal(w.crpix, [42, 54])
w.crpix[0] = 0
assert_array_equal(w.crpix, [0, 54])
with pytest.raises(ValueError):
w.crpix = [1, 2, 3]
def test_crval():
w = _wcs.Wcsprm()
assert w.crval.dtype == float
assert_array_equal(w.crval, [0, 0])
w.crval = [42, 54]
assert_array_equal(w.crval, [42, 54])
w.crval[0] = 0
assert_array_equal(w.crval, [0, 54])
def test_csyer():
w = _wcs.Wcsprm()
assert w.csyer.dtype == float
assert np.all(np.isnan(w.csyer))
w.csyer[0] = 0
assert np.isnan(w.csyer[1])
assert w.csyer[0] == 0
w.csyer = w.csyer
def test_ctype():
w = _wcs.Wcsprm()
assert list(w.ctype) == ["", ""]
w.ctype = [b"RA---TAN", "DEC--TAN"]
assert_array_equal(w.axis_types, [2200, 2201])
assert w.lat == 1
assert w.lng == 0
assert w.lattyp == "DEC"
assert w.lngtyp == "RA"
assert list(w.ctype) == ["RA---TAN", "DEC--TAN"]
w.ctype = ["foo", "bar"]
assert_array_equal(w.axis_types, [0, 0])
assert list(w.ctype) == ["foo", "bar"]
assert w.lat == -1
assert w.lng == -1
assert w.lattyp == "DEC"
assert w.lngtyp == "RA"
def test_ctype_repr():
w = _wcs.Wcsprm()
assert list(w.ctype) == ["", ""]
w.ctype = [b"RA-\t--TAN", "DEC-\n-TAN"]
assert repr(w.ctype == '["RA-\t--TAN", "DEC-\n-TAN"]')
def test_ctype_index_error():
w = _wcs.Wcsprm()
assert list(w.ctype) == ["", ""]
for idx in (2, -3):
with pytest.raises(IndexError):
w.ctype[idx]
with pytest.raises(IndexError):
w.ctype[idx] = "FOO"
def test_ctype_invalid_error():
w = _wcs.Wcsprm()
assert list(w.ctype) == ["", ""]
with pytest.raises(ValueError):
w.ctype[0] = "X" * 100
with pytest.raises(TypeError):
w.ctype[0] = True
with pytest.raises(TypeError):
w.ctype = ["a", 0]
with pytest.raises(TypeError):
w.ctype = None
with pytest.raises(ValueError):
w.ctype = ["a", "b", "c"]
with pytest.raises(ValueError):
w.ctype = ["FOO", "A" * 100]
def test_cubeface():
w = _wcs.Wcsprm()
assert w.cubeface == -1
w.cubeface = 0
with pytest.raises(OverflowError):
w.cubeface = -1
def test_cunit():
w = _wcs.Wcsprm()
assert list(w.cunit) == [u.Unit(""), u.Unit("")]
w.cunit = [u.m, "km"]
assert w.cunit[0] == u.m
assert w.cunit[1] == u.km
def test_cunit_invalid():
w = _wcs.Wcsprm()
with pytest.warns(u.UnitsWarning, match="foo") as warns:
w.cunit[0] = "foo"
assert len(warns) == 1
def test_cunit_invalid2():
w = _wcs.Wcsprm()
with pytest.warns(u.UnitsWarning) as warns:
w.cunit = ["foo", "bar"]
assert len(warns) == 2
assert "foo" in str(warns[0].message)
assert "bar" in str(warns[1].message)
def test_unit():
w = wcs.WCS()
w.wcs.cunit[0] = u.erg
assert w.wcs.cunit[0] == u.erg
assert repr(w.wcs.cunit) == "['erg', '']"
def test_unit2():
w = wcs.WCS()
with pytest.warns(UnitsWarning):
myunit = u.Unit("FOOBAR", parse_strict="warn")
w.wcs.cunit[0] = myunit
def test_unit3():
w = wcs.WCS()
for idx in (2, -3):
with pytest.raises(IndexError):
w.wcs.cunit[idx]
with pytest.raises(IndexError):
w.wcs.cunit[idx] = u.m
with pytest.raises(ValueError):
w.wcs.cunit = [u.m, u.m, u.m]
def test_unitfix():
w = _wcs.Wcsprm()
w.unitfix()
def test_cylfix():
# TODO: We need some data with broken cylindrical projections to
# test with. For now, this is just a smoke test.
w = _wcs.Wcsprm()
assert w.cylfix() == -1
assert w.cylfix([0, 1]) == -1
with pytest.raises(ValueError):
w.cylfix([0, 1, 2])
def test_dateavg():
w = _wcs.Wcsprm()
assert w.dateavg == ""
# TODO: When dateavg is verified, check that it works
def test_dateobs():
w = _wcs.Wcsprm()
assert w.dateobs == ""
# TODO: When dateavg is verified, check that it works
def test_datfix():
w = _wcs.Wcsprm()
w.dateobs = "31/12/99"
assert w.datfix() == 0
assert w.dateobs == "1999-12-31"
assert w.mjdobs == 51543.0
def test_equinox():
w = _wcs.Wcsprm()
assert np.isnan(w.equinox)
w.equinox = 0
assert w.equinox == 0
del w.equinox
assert np.isnan(w.equinox)
with pytest.raises(TypeError):
w.equinox = None
def test_fix():
w = _wcs.Wcsprm()
fix_ref = {
"cdfix": "No change",
"cylfix": "No change",
"obsfix": "No change",
"datfix": "No change",
"spcfix": "No change",
"unitfix": "No change",
"celfix": "No change",
"obsfix": "No change",
}
version = wcs._wcs.__version__
if Version(version) <= Version("5"):
del fix_ref["obsfix"]
if Version(version) >= Version("7.1"):
w.dateref = "1858-11-17"
if Version("7.4") <= Version(version) < Version("7.6"):
fix_ref["datfix"] = "Success"
assert w.fix() == fix_ref
def test_fix2():
w = _wcs.Wcsprm()
w.dateobs = "31/12/99"
fix_ref = {
"cdfix": "No change",
"cylfix": "No change",
"obsfix": "No change",
"datfix": (
"Set MJD-OBS to 51543.000000 from DATE-OBS.\n"
"Changed DATE-OBS from '31/12/99' to '1999-12-31'"
),
"spcfix": "No change",
"unitfix": "No change",
"celfix": "No change",
}
version = wcs._wcs.__version__
if Version(version) <= Version("5"):
del fix_ref["obsfix"]
fix_ref["datfix"] = "Changed '31/12/99' to '1999-12-31'"
if Version(version) >= Version("7.3"):
fix_ref["datfix"] = (
"Set DATEREF to '1858-11-17' from MJDREF.\n" + fix_ref["datfix"]
)
elif Version(version) >= Version("7.1"):
fix_ref["datfix"] = (
"Set DATE-REF to '1858-11-17' from MJD-REF.\n" + fix_ref["datfix"]
)
assert w.fix() == fix_ref
assert w.dateobs == "1999-12-31"
assert w.mjdobs == 51543.0
def test_fix3():
w = _wcs.Wcsprm()
w.dateobs = "31/12/F9"
fix_ref = {
"cdfix": "No change",
"cylfix": "No change",
"obsfix": "No change",
"datfix": "Invalid DATE-OBS format '31/12/F9'",
"spcfix": "No change",
"unitfix": "No change",
"celfix": "No change",
}
version = wcs._wcs.__version__
if Version(version) <= Version("5"):
del fix_ref["obsfix"]
fix_ref["datfix"] = "Invalid parameter value: invalid date '31/12/F9'"
if Version(version) >= Version("7.3"):
fix_ref["datfix"] = (
"Set DATEREF to '1858-11-17' from MJDREF.\n" + fix_ref["datfix"]
)
elif Version(version) >= Version("7.1"):
fix_ref["datfix"] = (
"Set DATE-REF to '1858-11-17' from MJD-REF.\n" + fix_ref["datfix"]
)
assert w.fix() == fix_ref
assert w.dateobs == "31/12/F9"
assert np.isnan(w.mjdobs)
def test_fix4():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.fix("X")
def test_fix5():
w = _wcs.Wcsprm()
with pytest.raises(ValueError):
w.fix(naxis=[0, 1, 2])
def test_get_ps():
# TODO: We need some data with PSi_ma keywords
w = _wcs.Wcsprm()
assert len(w.get_ps()) == 0
def test_get_pv():
# TODO: We need some data with PVi_ma keywords
w = _wcs.Wcsprm()
assert len(w.get_pv()) == 0
def test_imgpix_matrix():
w = _wcs.Wcsprm()
with pytest.raises(AssertionError):
w.imgpix_matrix
def test_imgpix_matrix2():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.imgpix_matrix = None
def test_isunity():
w = _wcs.Wcsprm()
assert w.is_unity()
def test_lat():
w = _wcs.Wcsprm()
assert w.lat == -1
def test_lat_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.lat = 0
def test_latpole():
w = _wcs.Wcsprm()
assert w.latpole == 90.0
w.latpole = 45.0
assert w.latpole == 45.0
del w.latpole
assert w.latpole == 90.0
def test_lattyp():
w = _wcs.Wcsprm()
assert w.lattyp == " "
def test_lattyp_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.lattyp = 0
def test_lng():
w = _wcs.Wcsprm()
assert w.lng == -1
def test_lng_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.lng = 0
def test_lngtyp():
w = _wcs.Wcsprm()
assert w.lngtyp == " "
def test_lngtyp_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.lngtyp = 0
def test_lonpole():
w = _wcs.Wcsprm()
assert np.isnan(w.lonpole)
w.lonpole = 45.0
assert w.lonpole == 45.0
del w.lonpole
assert np.isnan(w.lonpole)
def test_mix():
w = _wcs.Wcsprm()
w.ctype = [b"RA---TAN", "DEC--TAN"]
with pytest.raises(_wcs.InvalidCoordinateError):
w.mix(1, 1, [240, 480], 1, 5, [0, 2], [54, 32], 1)
def test_mjdavg():
w = _wcs.Wcsprm()
assert np.isnan(w.mjdavg)
w.mjdavg = 45.0
assert w.mjdavg == 45.0
del w.mjdavg
assert np.isnan(w.mjdavg)
def test_mjdobs():
w = _wcs.Wcsprm()
assert np.isnan(w.mjdobs)
w.mjdobs = 45.0
assert w.mjdobs == 45.0
del w.mjdobs
assert np.isnan(w.mjdobs)
def test_name():
w = _wcs.Wcsprm()
assert w.name == ""
w.name = "foo"
assert w.name == "foo"
def test_naxis():
w = _wcs.Wcsprm()
assert w.naxis == 2
def test_naxis_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.naxis = 4
def test_obsgeo():
w = _wcs.Wcsprm()
assert np.all(np.isnan(w.obsgeo))
w.obsgeo = [1, 2, 3, 4, 5, 6]
assert_array_equal(w.obsgeo, [1, 2, 3, 4, 5, 6])
del w.obsgeo
assert np.all(np.isnan(w.obsgeo))
def test_pc():
w = _wcs.Wcsprm()
assert w.has_pc()
assert_array_equal(w.pc, [[1, 0], [0, 1]])
w.cd = [[1, 0], [0, 1]]
assert not w.has_pc()
del w.cd
assert w.has_pc()
assert_array_equal(w.pc, [[1, 0], [0, 1]])
w.pc = w.pc
def test_pc_missing():
w = _wcs.Wcsprm()
w.cd = [[1, 0], [0, 1]]
assert not w.has_pc()
with pytest.raises(AttributeError):
w.pc
def test_phi0():
w = _wcs.Wcsprm()
assert np.isnan(w.phi0)
w.phi0 = 42.0
assert w.phi0 == 42.0
del w.phi0
assert np.isnan(w.phi0)
def test_piximg_matrix():
w = _wcs.Wcsprm()
with pytest.raises(AssertionError):
w.piximg_matrix
def test_piximg_matrix2():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.piximg_matrix = None
def test_print_contents():
# In general, this is human-consumable, so we don't care if the
# content changes, just check the type
w = _wcs.Wcsprm()
assert isinstance(str(w), str)
def test_radesys():
w = _wcs.Wcsprm()
assert w.radesys == ""
w.radesys = "foo"
assert w.radesys == "foo"
def test_restfrq():
w = _wcs.Wcsprm()
assert w.restfrq == 0.0
w.restfrq = np.nan
assert np.isnan(w.restfrq)
del w.restfrq
def test_restwav():
w = _wcs.Wcsprm()
assert w.restwav == 0.0
w.restwav = np.nan
assert np.isnan(w.restwav)
del w.restwav
def test_set_ps():
w = _wcs.Wcsprm()
data = [(0, 0, "param1"), (1, 1, "param2")]
w.set_ps(data)
assert w.get_ps() == data
def test_set_ps_realloc():
w = _wcs.Wcsprm()
w.set_ps([(0, 0, "param1")] * 16)
def test_set_pv():
w = _wcs.Wcsprm()
data = [(0, 0, 42.0), (1, 1, 54.0)]
w.set_pv(data)
assert w.get_pv() == data
def test_set_pv_realloc():
w = _wcs.Wcsprm()
w.set_pv([(0, 0, 42.0)] * 16)
def test_spcfix():
# TODO: We need some data with broken spectral headers here to
# really test
header = get_pkg_data_contents("data/spectra/orion-velo-1.hdr", encoding="binary")
w = _wcs.Wcsprm(header)
assert w.spcfix() == -1
def test_spec():
w = _wcs.Wcsprm()
assert w.spec == -1
def test_spec_set():
w = _wcs.Wcsprm()
with pytest.raises(AttributeError):
w.spec = 0
def test_specsys():
w = _wcs.Wcsprm()
assert w.specsys == ""
w.specsys = "foo"
assert w.specsys == "foo"
def test_sptr():
# TODO: Write me
pass
def test_ssysobs():
w = _wcs.Wcsprm()
assert w.ssysobs == ""
w.ssysobs = "foo"
assert w.ssysobs == "foo"
def test_ssyssrc():
w = _wcs.Wcsprm()
assert w.ssyssrc == ""
w.ssyssrc = "foo"
assert w.ssyssrc == "foo"
def test_tab():
w = _wcs.Wcsprm()
assert len(w.tab) == 0
# TODO: Inject some headers that have tables and test
def test_theta0():
w = _wcs.Wcsprm()
assert np.isnan(w.theta0)
w.theta0 = 42.0
assert w.theta0 == 42.0
del w.theta0
assert np.isnan(w.theta0)
def test_toheader():
w = _wcs.Wcsprm()
assert isinstance(w.to_header(), str)
def test_velangl():
w = _wcs.Wcsprm()
assert np.isnan(w.velangl)
w.velangl = 42.0
assert w.velangl == 42.0
del w.velangl
assert np.isnan(w.velangl)
def test_velosys():
w = _wcs.Wcsprm()
assert np.isnan(w.velosys)
w.velosys = 42.0
assert w.velosys == 42.0
del w.velosys
assert np.isnan(w.velosys)
def test_velref():
w = _wcs.Wcsprm()
assert w.velref == 0.0
w.velref = 42
assert w.velref == 42.0
del w.velref
assert w.velref == 0.0
def test_zsource():
w = _wcs.Wcsprm()
assert np.isnan(w.zsource)
w.zsource = 42.0
assert w.zsource == 42.0
del w.zsource
assert np.isnan(w.zsource)
def test_cd_3d():
header = get_pkg_data_contents("data/3d_cd.hdr", encoding="binary")
w = _wcs.Wcsprm(header)
assert w.cd.shape == (3, 3)
assert w.get_pc().shape == (3, 3)
assert w.get_cdelt().shape == (3,)
def test_get_pc():
header = get_pkg_data_contents("data/3d_cd.hdr", encoding="binary")
w = _wcs.Wcsprm(header)
pc = w.get_pc()
try:
pc[0, 0] = 42
except (RuntimeError, ValueError):
pass
else:
raise AssertionError()
def test_detailed_err():
w = _wcs.Wcsprm()
w.pc = [[0, 0], [0, 0]]
with pytest.raises(_wcs.SingularMatrixError):
w.set()
def test_header_parse():
from astropy.io import fits
with get_pkg_data_fileobj(
"data/header_newlines.fits", encoding="binary"
) as test_file:
hdulist = fits.open(test_file)
with pytest.warns(FITSFixedWarning):
w = wcs.WCS(hdulist[0].header)
assert w.wcs.ctype[0] == "RA---TAN-SIP"
def test_locale():
try:
with _set_locale("fr_FR"):
header = get_pkg_data_contents("data/locale.hdr", encoding="binary")
with pytest.warns(FITSFixedWarning):
w = _wcs.Wcsprm(header)
assert re.search("[0-9]+,[0-9]*", w.to_header()) is None
except locale.Error:
pytest.xfail(
"Can't set to 'fr_FR' locale, perhaps because it is not installed "
"on this system"
)
def test_unicode():
w = _wcs.Wcsprm()
with pytest.raises(UnicodeEncodeError):
w.alt = "‰"
def test_sub_segfault():
"""Issue #1960"""
header = fits.Header.fromtextfile(get_pkg_data_filename("data/sub-segfault.hdr"))
w = wcs.WCS(header)
w.sub([wcs.WCSSUB_CELESTIAL])
gc.collect()
def test_bounds_check():
w = _wcs.Wcsprm()
w.bounds_check(False)
def test_wcs_sub_error_message():
"""Issue #1587"""
w = _wcs.Wcsprm()
with pytest.raises(TypeError, match="axes must None, a sequence or an integer$"):
w.sub("latitude")
def test_wcs_sub():
"""Issue #3356"""
w = _wcs.Wcsprm()
w.sub(["latitude"])
w = _wcs.Wcsprm()
w.sub([b"latitude"])
def test_compare():
header = get_pkg_data_contents("data/3d_cd.hdr", encoding="binary")
w = _wcs.Wcsprm(header)
w2 = _wcs.Wcsprm(header)
assert w == w2
w.equinox = 42
assert w == w2
assert not w.compare(w2)
assert w.compare(w2, _wcs.WCSCOMPARE_ANCILLARY)
w = _wcs.Wcsprm(header)
w2 = _wcs.Wcsprm(header)
with pytest.warns(RuntimeWarning):
w.cdelt[0] = np.float32(0.00416666666666666666666666)
w2.cdelt[0] = np.float64(0.00416666666666666666666666)
assert not w.compare(w2)
assert w.compare(w2, tolerance=1e-6)
def test_radesys_defaults():
w = _wcs.Wcsprm()
w.ctype = ["RA---TAN", "DEC--TAN"]
w.set()
assert w.radesys == "ICRS"
def test_radesys_defaults_full():
# As described in Section 3.1 of the FITS standard "Equatorial and ecliptic
# coordinates", for those systems the RADESYS keyword can be used to
# indicate the equatorial/ecliptic frame to use. From the standard:
# "For RADESYSa values of FK4 and FK4-NO-E, any stated equinox is Besselian
# and, if neither EQUINOXa nor EPOCH are given, a default of 1950.0 is to
# be taken. For FK5, any stated equinox is Julian and, if neither keyword
# is given, it defaults to 2000.0.
# "If the EQUINOXa keyword is given it should always be accompanied by
# RADESYS a. However, if it should happen to ap- pear by itself then
# RADESYSa defaults to FK4 if EQUINOXa < 1984.0, or to FK5 if EQUINOXa
# 1984.0. Note that these defaults, while probably true of older files
# using the EPOCH keyword, are not required of them.
# By default RADESYS is empty
w = _wcs.Wcsprm(naxis=2)
assert w.radesys == ""
assert np.isnan(w.equinox)
# For non-ecliptic or equatorial systems it is still empty
w = _wcs.Wcsprm(naxis=2)
for ctype in [("GLON-CAR", "GLAT-CAR"), ("SLON-SIN", "SLAT-SIN")]:
w.ctype = ctype
w.set()
assert w.radesys == ""
assert np.isnan(w.equinox)
for ctype in [
("RA---TAN", "DEC--TAN"),
("ELON-TAN", "ELAT-TAN"),
("DEC--TAN", "RA---TAN"),
("ELAT-TAN", "ELON-TAN"),
]:
# Check defaults for RADESYS
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.set()
assert w.radesys == "ICRS"
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.equinox = 1980
w.set()
assert w.radesys == "FK4"
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.equinox = 1984
w.set()
assert w.radesys == "FK5"
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = "foo"
w.set()
assert w.radesys == "foo"
# Check defaults for EQUINOX
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.set()
assert np.isnan(w.equinox) # frame is ICRS, no equinox
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = "ICRS"
w.set()
assert np.isnan(w.equinox)
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = "FK5"
w.set()
assert w.equinox == 2000.0
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = "FK4"
w.set()
assert w.equinox == 1950
w = _wcs.Wcsprm(naxis=2)
w.ctype = ctype
w.radesys = "FK4-NO-E"
w.set()
assert w.equinox == 1950
def test_iteration():
world = np.array(
[
[-0.58995335, -0.5],
[0.00664326, -0.5],
[-0.58995335, -0.25],
[0.00664326, -0.25],
[-0.58995335, 0.0],
[0.00664326, 0.0],
[-0.58995335, 0.25],
[0.00664326, 0.25],
[-0.58995335, 0.5],
[0.00664326, 0.5],
],
float,
)
w = wcs.WCS()
w.wcs.ctype = ["GLON-CAR", "GLAT-CAR"]
w.wcs.cdelt = [-0.006666666828, 0.006666666828]
w.wcs.crpix = [75.907, 74.8485]
x = w.wcs_world2pix(world, 1)
expected = np.array(
[
[1.64400000e02, -1.51498185e-01],
[7.49105110e01, -1.51498185e-01],
[1.64400000e02, 3.73485009e01],
[7.49105110e01, 3.73485009e01],
[1.64400000e02, 7.48485000e01],
[7.49105110e01, 7.48485000e01],
[1.64400000e02, 1.12348499e02],
[7.49105110e01, 1.12348499e02],
[1.64400000e02, 1.49848498e02],
[7.49105110e01, 1.49848498e02],
],
float,
)
assert_array_almost_equal(x, expected)
w2 = w.wcs_pix2world(x, 1)
world[:, 0] %= 360.0
assert_array_almost_equal(w2, world)
def test_invalid_args():
with pytest.raises(TypeError):
_wcs.Wcsprm(keysel="A")
with pytest.raises(ValueError):
_wcs.Wcsprm(keysel=2)
with pytest.raises(ValueError):
_wcs.Wcsprm(colsel=2)
with pytest.raises(ValueError):
_wcs.Wcsprm(naxis=64)
header = get_pkg_data_contents("data/spectra/orion-velo-1.hdr", encoding="binary")
with pytest.raises(ValueError):
_wcs.Wcsprm(header, relax="FOO")
with pytest.raises(ValueError):
_wcs.Wcsprm(header, naxis=3)
with pytest.raises(KeyError):
_wcs.Wcsprm(header, key="A")
# Test keywords in the Time standard
def test_datebeg():
w = _wcs.Wcsprm()
assert w.datebeg == ""
w.datebeg = "2001-02-11"
assert w.datebeg == "2001-02-11"
w.datebeg = "31/12/99"
fix_ref = {
"cdfix": "No change",
"cylfix": "No change",
"obsfix": "No change",
"datfix": "Invalid DATE-BEG format '31/12/99'",
"spcfix": "No change",
"unitfix": "No change",
"celfix": "No change",
}
if Version(wcs._wcs.__version__) >= Version("7.3"):
fix_ref["datfix"] = (
"Set DATEREF to '1858-11-17' from MJDREF.\n" + fix_ref["datfix"]
)
elif Version(wcs._wcs.__version__) >= Version("7.1"):
fix_ref["datfix"] = (
"Set DATE-REF to '1858-11-17' from MJD-REF.\n" + fix_ref["datfix"]
)
assert w.fix() == fix_ref
char_keys = [
"timesys",
"trefpos",
"trefdir",
"plephem",
"timeunit",
"dateref",
"dateavg",
"dateend",
]
@pytest.mark.parametrize("key", char_keys)
def test_char_keys(key):
w = _wcs.Wcsprm()
assert getattr(w, key) == ""
setattr(w, key, "foo")
assert getattr(w, key) == "foo"
with pytest.raises(TypeError):
setattr(w, key, 42)
num_keys = [
"mjdobs",
"mjdbeg",
"mjdend",
"jepoch",
"bepoch",
"tstart",
"tstop",
"xposure",
"timsyer",
"timrder",
"timedel",
"timepixr",
"timeoffs",
"telapse",
"xposure",
]
@pytest.mark.parametrize("key", num_keys)
def test_num_keys(key):
w = _wcs.Wcsprm()
assert np.isnan(getattr(w, key))
setattr(w, key, 42.0)
assert getattr(w, key) == 42.0
delattr(w, key)
assert np.isnan(getattr(w, key))
with pytest.raises(TypeError):
setattr(w, key, "foo")
@pytest.mark.parametrize("key", ["czphs", "cperi", "mjdref"])
def test_array_keys(key):
w = _wcs.Wcsprm()
attr = getattr(w, key)
if key == "mjdref" and Version(_wcs.__version__) >= Version("7.1"):
assert np.allclose(attr, [0, 0])
else:
assert np.all(np.isnan(attr))
assert attr.dtype == float
setattr(w, key, [1.0, 2.0])
assert_array_equal(getattr(w, key), [1.0, 2.0])
with pytest.raises(ValueError):
setattr(w, key, ["foo", "bar"])
with pytest.raises(ValueError):
setattr(w, key, "foo")
|
1607ed745ac8e3b35aef0d3b244aa99989cebb2a45efa414eb84af3c89e3bcb3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import copy, deepcopy
import numpy as np
import pytest
from astropy import wcs
_WCS_UNDEFINED = 987654321.0e99
def test_celprm_init():
# test PyCelprm_cnew
assert wcs.WCS().wcs.cel
# test PyCelprm_new
assert wcs.Celprm()
with pytest.raises(wcs.InvalidPrjParametersError):
cel = wcs.Celprm()
cel.set()
# test deletion does not crash
cel = wcs.Celprm()
del cel
def test_celprm_copy():
# shallow copy
cel = wcs.Celprm()
cel2 = copy(cel)
cel3 = copy(cel2)
cel.ref = [6, 8, 18, 3]
assert np.allclose(cel.ref, cel2.ref, atol=1e-12, rtol=0) and np.allclose(
cel.ref, cel3.ref, atol=1e-12, rtol=0
)
del cel, cel2, cel3
# deep copy
cel = wcs.Celprm()
cel2 = deepcopy(cel)
cel.ref = [6, 8, 18, 3]
assert not np.allclose(cel.ref, cel2.ref, atol=1e-12, rtol=0)
del cel, cel2
def test_celprm_offset():
cel = wcs.Celprm()
assert not cel.offset
cel.offset = True
assert cel.offset
def test_celprm_prj():
cel = wcs.Celprm()
assert cel.prj is not None
cel.prj.code = "TAN"
cel.set()
assert cel._flag
def test_celprm_phi0():
cel = wcs.Celprm()
cel.prj.code = "TAN"
assert cel.phi0 == None
assert cel._flag == 0
cel.set()
assert cel.phi0 == 0.0
cel.phi0 = 0.0
assert cel._flag
cel.phi0 = 2.0
assert cel._flag == 0
cel.phi0 = None
assert cel.phi0 == None
assert cel._flag == 0
def test_celprm_theta0():
cel = wcs.Celprm()
cel.prj.code = "TAN"
assert cel.theta0 == None
assert cel._flag == 0
cel.theta0 = 4.0
cel.set()
assert cel.theta0 == 4.0
cel.theta0 = 4.0
assert cel._flag
cel.theta0 = 8.0
assert cel._flag == 0
cel.theta0 = None
assert cel.theta0 == None
assert cel._flag == 0
def test_celprm_ref():
cel = wcs.Celprm()
cel.prj.code = "TAN"
cel.set()
assert np.allclose(cel.ref, [0.0, 0.0, 180.0, 0.0], atol=1e-12, rtol=0)
cel.phi0 = 2.0
cel.theta0 = 4.0
cel.ref = [123, 12]
cel.set()
assert np.allclose(cel.ref, [123.0, 12.0, 2, 82], atol=1e-12, rtol=0)
cel.ref = [None, 13, None, None]
assert np.allclose(cel.ref, [123.0, 13.0, 2, 82], atol=1e-12, rtol=0)
def test_celprm_isolat():
cel = wcs.Celprm()
cel.prj.code = "TAN"
cel.set()
assert cel.isolat == 0
def test_celprm_latpreq():
cel = wcs.Celprm()
cel.prj.code = "TAN"
cel.set()
assert cel.latpreq == 0
def test_celprm_euler():
cel = wcs.Celprm()
cel.prj.code = "TAN"
cel.set()
assert np.allclose(cel.euler, [0.0, 90.0, 180.0, 0.0, 1.0], atol=1e-12, rtol=0)
|
ab9e4b4b140baa2f8cbd85bd5d9cbaeda061232975dd2b79791b586ce7bbe4db | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.io import fits
class SimModelTAB:
def __init__(
self,
nx=150,
ny=200,
crpix=[1, 1],
crval=[1, 1],
cdelt=[1, 1],
pc={"PC1_1": 1, "PC2_2": 1},
):
"""set essential parameters of the model (coord transformations)"""
assert nx > 2 and ny > 1 # a limitation of this particular simulation
self.nx = nx
self.ny = ny
self.crpix = crpix
self.crval = crval
self.cdelt = cdelt
self.pc = pc
def fwd_eval(self, xy):
xb = 1 + self.nx // 3
px = np.array([1, xb, xb, self.nx + 1])
py = np.array([1, self.ny + 1])
xi = self.crval[0] + self.cdelt[0] * (px - self.crpix[0])
yi = self.crval[1] + self.cdelt[1] * (py - self.crpix[1])
cx = np.array([0.0, 0.26, 0.8, 1.0])
cy = np.array([-0.5, 0.5])
xy = np.atleast_2d(xy)
x = xy[:, 0]
y = xy[:, 1]
mbad = (x < px[0]) | (y < py[0]) | (x > px[-1]) | (y > py[-1])
mgood = np.logical_not(mbad)
i = 2 * (x > xb).astype(int)
psix = self.crval[0] + self.cdelt[0] * (x - self.crpix[0])
psiy = self.crval[1] + self.cdelt[1] * (y - self.crpix[1])
cfx = (psix - xi[i]) / (xi[i + 1] - xi[i])
cfy = (psiy - yi[0]) / (yi[1] - yi[0])
ra = cx[i] + cfx * (cx[i + 1] - cx[i])
dec = cy[0] + cfy * (cy[1] - cy[0])
return np.dstack([ra, dec])[0]
@property
def hdulist(self):
"""Simulates 2D data with a _spatial_ WCS that uses the ``-TAB``
algorithm with indexing.
"""
# coordinate array (some "arbitrary" numbers with a "jump" along x axis):
x = np.array([[0.0, 0.26, 0.8, 1.0], [0.0, 0.26, 0.8, 1.0]])
y = np.array([[-0.5, -0.5, -0.5, -0.5], [0.5, 0.5, 0.5, 0.5]])
c = np.dstack([x, y])
# index arrays (skip PC matrix for simplicity - assume it is an
# identity matrix):
xb = 1 + self.nx // 3
px = np.array([1, xb, xb, self.nx + 1])
py = np.array([1, self.ny + 1])
xi = self.crval[0] + self.cdelt[0] * (px - self.crpix[0])
yi = self.crval[1] + self.cdelt[1] * (py - self.crpix[1])
# structured array (data) for binary table HDU:
arr = np.array(
[(c, xi, yi)],
dtype=[
("wavelength", np.float64, c.shape),
("xi", np.double, (xi.size,)),
("yi", np.double, (yi.size,)),
],
)
# create binary table HDU:
bt = fits.BinTableHDU(arr)
bt.header["EXTNAME"] = "WCS-TABLE"
# create primary header:
image_data = np.ones((self.ny, self.nx), dtype=np.float32)
pu = fits.PrimaryHDU(image_data)
pu.header["ctype1"] = "RA---TAB"
pu.header["ctype2"] = "DEC--TAB"
pu.header["naxis1"] = self.nx
pu.header["naxis2"] = self.ny
pu.header["PS1_0"] = "WCS-TABLE"
pu.header["PS2_0"] = "WCS-TABLE"
pu.header["PS1_1"] = "wavelength"
pu.header["PS2_1"] = "wavelength"
pu.header["PV1_3"] = 1
pu.header["PV2_3"] = 2
pu.header["CUNIT1"] = "deg"
pu.header["CUNIT2"] = "deg"
pu.header["CDELT1"] = self.cdelt[0]
pu.header["CDELT2"] = self.cdelt[1]
pu.header["CRPIX1"] = self.crpix[0]
pu.header["CRPIX2"] = self.crpix[1]
pu.header["CRVAL1"] = self.crval[0]
pu.header["CRVAL2"] = self.crval[1]
pu.header["PS1_2"] = "xi"
pu.header["PS2_2"] = "yi"
for k, v in self.pc.items():
pu.header[k] = v
hdulist = fits.HDUList([pu, bt])
return hdulist
|
8dd6f73a71d3addeee5ccb104d147ecaa5fe332c4e42b841f269cf654be63158 | from .base import BaseWCSWrapper
from .sliced_wcs import *
|
b5d3ae5a47e0e14a69571b7669da8171bec907fe3716bf3c7259bdad757ce769 | import abc
from astropy.wcs.wcsapi import BaseLowLevelWCS, wcs_info_str
class BaseWCSWrapper(BaseLowLevelWCS, metaclass=abc.ABCMeta):
"""
A base wrapper class for things that modify Low Level WCSes.
This wrapper implements a transparent wrapper to many of the properties,
with the idea that not all of them would need to be overridden in your
wrapper, but some probably will.
Parameters
----------
wcs : `astropy.wcs.wcsapi.BaseLowLevelWCS`
The WCS object to wrap
"""
def __init__(self, wcs, *args, **kwargs):
self._wcs = wcs
@property
def pixel_n_dim(self):
return self._wcs.pixel_n_dim
@property
def world_n_dim(self):
return self._wcs.world_n_dim
@property
def world_axis_physical_types(self):
return self._wcs.world_axis_physical_types
@property
def world_axis_units(self):
return self._wcs.world_axis_units
@property
def world_axis_object_components(self):
return self._wcs.world_axis_object_components
@property
def world_axis_object_classes(self):
return self._wcs.world_axis_object_classes
@property
def pixel_shape(self):
return self._wcs.pixel_shape
@property
def pixel_bounds(self):
return self._wcs.pixel_bounds
@property
def pixel_axis_names(self):
return self._wcs.pixel_axis_names
@property
def world_axis_names(self):
return self._wcs.world_axis_names
@property
def axis_correlation_matrix(self):
return self._wcs.axis_correlation_matrix
@property
def serialized_classes(self):
return self._wcs.serialized_classes
@abc.abstractmethod
def pixel_to_world_values(self, *pixel_arrays):
pass
@abc.abstractmethod
def world_to_pixel_values(self, *world_arrays):
pass
def __repr__(self):
return f"{object.__repr__(self)}\n{str(self)}"
def __str__(self):
return wcs_info_str(self)
|
bede7ac8e91d2abb5b68379f868487fd8a648b0dd8fe6978a94ada956b05dd98 | import numbers
from collections import defaultdict
import numpy as np
from astropy.utils import isiterable
from astropy.utils.decorators import lazyproperty
from .base import BaseWCSWrapper
__all__ = ["sanitize_slices", "SlicedLowLevelWCS"]
def sanitize_slices(slices, ndim):
"""
Given a slice as input sanitise it to an easier to parse format.format
This function returns a list ``ndim`` long containing slice objects (or ints).
"""
if not isinstance(slices, (tuple, list)): # We just have a single int
slices = (slices,)
if len(slices) > ndim:
raise ValueError(
f"The dimensionality of the specified slice {slices} can not be greater "
f"than the dimensionality ({ndim}) of the wcs."
)
if any(isiterable(s) for s in slices):
raise IndexError(
"This slice is invalid, only integer or range slices are supported."
)
slices = list(slices)
if Ellipsis in slices:
if slices.count(Ellipsis) > 1:
raise IndexError("an index can only have a single ellipsis ('...')")
# Replace the Ellipsis with the correct number of slice(None)s
e_ind = slices.index(Ellipsis)
slices.remove(Ellipsis)
n_e = ndim - len(slices)
for i in range(n_e):
ind = e_ind + i
slices.insert(ind, slice(None))
for i in range(ndim):
if i < len(slices):
slc = slices[i]
if isinstance(slc, slice):
if slc.step and slc.step != 1:
raise IndexError("Slicing WCS with a step is not supported.")
elif not isinstance(slc, numbers.Integral):
raise IndexError("Only integer or range slices are accepted.")
else:
slices.append(slice(None))
return slices
def combine_slices(slice1, slice2):
"""
Given two slices that can be applied to a 1-d array, find the resulting
slice that corresponds to the combination of both slices. We assume that
slice2 can be an integer, but slice1 cannot.
"""
if isinstance(slice1, slice) and slice1.step is not None:
raise ValueError("Only slices with steps of 1 are supported")
if isinstance(slice2, slice) and slice2.step is not None:
raise ValueError("Only slices with steps of 1 are supported")
if isinstance(slice2, numbers.Integral):
if slice1.start is None:
return slice2
else:
return slice2 + slice1.start
if slice1.start is None:
if slice1.stop is None:
return slice2
else:
if slice2.stop is None:
return slice(slice2.start, slice1.stop)
else:
return slice(slice2.start, min(slice1.stop, slice2.stop))
else:
if slice2.start is None:
start = slice1.start
else:
start = slice1.start + slice2.start
if slice2.stop is None:
stop = slice1.stop
else:
if slice1.start is None:
stop = slice2.stop
else:
stop = slice2.stop + slice1.start
if slice1.stop is not None:
stop = min(slice1.stop, stop)
return slice(start, stop)
class SlicedLowLevelWCS(BaseWCSWrapper):
"""
A Low Level WCS wrapper which applies an array slice to a WCS.
This class does not modify the underlying WCS object and can therefore drop
coupled dimensions as it stores which pixel and world dimensions have been
sliced out (or modified) in the underlying WCS and returns the modified
results on all the Low Level WCS methods.
Parameters
----------
wcs : `~astropy.wcs.wcsapi.BaseLowLevelWCS`
The WCS to slice.
slices : `slice` or `tuple` or `int`
A valid array slice to apply to the WCS.
"""
def __init__(self, wcs, slices):
slices = sanitize_slices(slices, wcs.pixel_n_dim)
if isinstance(wcs, SlicedLowLevelWCS):
# Here we combine the current slices with the previous slices
# to avoid ending up with many nested WCSes
self._wcs = wcs._wcs
slices_original = wcs._slices_array.copy()
for ipixel in range(wcs.pixel_n_dim):
ipixel_orig = wcs._wcs.pixel_n_dim - 1 - wcs._pixel_keep[ipixel]
ipixel_new = wcs.pixel_n_dim - 1 - ipixel
slices_original[ipixel_orig] = combine_slices(
slices_original[ipixel_orig], slices[ipixel_new]
)
self._slices_array = slices_original
else:
self._wcs = wcs
self._slices_array = slices
self._slices_pixel = self._slices_array[::-1]
# figure out which pixel dimensions have been kept, then use axis correlation
# matrix to figure out which world dims are kept
self._pixel_keep = np.nonzero(
[
not isinstance(self._slices_pixel[ip], numbers.Integral)
for ip in range(self._wcs.pixel_n_dim)
]
)[0]
# axis_correlation_matrix[world, pixel]
self._world_keep = np.nonzero(
self._wcs.axis_correlation_matrix[:, self._pixel_keep].any(axis=1)
)[0]
if len(self._pixel_keep) == 0 or len(self._world_keep) == 0:
raise ValueError(
"Cannot slice WCS: the resulting WCS should have "
"at least one pixel and one world dimension."
)
@lazyproperty
def dropped_world_dimensions(self):
"""
Information describing the dropped world dimensions.
"""
world_coords = self._pixel_to_world_values_all(*[0] * len(self._pixel_keep))
dropped_info = defaultdict(list)
for i in range(self._wcs.world_n_dim):
if i in self._world_keep:
continue
if "world_axis_object_classes" not in dropped_info:
dropped_info["world_axis_object_classes"] = dict()
wao_classes = self._wcs.world_axis_object_classes
wao_components = self._wcs.world_axis_object_components
dropped_info["value"].append(world_coords[i])
dropped_info["world_axis_names"].append(self._wcs.world_axis_names[i])
dropped_info["world_axis_physical_types"].append(
self._wcs.world_axis_physical_types[i]
)
dropped_info["world_axis_units"].append(self._wcs.world_axis_units[i])
dropped_info["world_axis_object_components"].append(wao_components[i])
dropped_info["world_axis_object_classes"].update(
dict(
filter(lambda x: x[0] == wao_components[i][0], wao_classes.items())
)
)
dropped_info["serialized_classes"] = self.serialized_classes
return dict(dropped_info)
@property
def pixel_n_dim(self):
return len(self._pixel_keep)
@property
def world_n_dim(self):
return len(self._world_keep)
@property
def world_axis_physical_types(self):
return [self._wcs.world_axis_physical_types[i] for i in self._world_keep]
@property
def world_axis_units(self):
return [self._wcs.world_axis_units[i] for i in self._world_keep]
@property
def pixel_axis_names(self):
return [self._wcs.pixel_axis_names[i] for i in self._pixel_keep]
@property
def world_axis_names(self):
return [self._wcs.world_axis_names[i] for i in self._world_keep]
def _pixel_to_world_values_all(self, *pixel_arrays):
pixel_arrays = tuple(map(np.asanyarray, pixel_arrays))
pixel_arrays_new = []
ipix_curr = -1
for ipix in range(self._wcs.pixel_n_dim):
if isinstance(self._slices_pixel[ipix], numbers.Integral):
pixel_arrays_new.append(self._slices_pixel[ipix])
else:
ipix_curr += 1
if self._slices_pixel[ipix].start is not None:
pixel_arrays_new.append(
pixel_arrays[ipix_curr] + self._slices_pixel[ipix].start
)
else:
pixel_arrays_new.append(pixel_arrays[ipix_curr])
pixel_arrays_new = np.broadcast_arrays(*pixel_arrays_new)
return self._wcs.pixel_to_world_values(*pixel_arrays_new)
def pixel_to_world_values(self, *pixel_arrays):
world_arrays = self._pixel_to_world_values_all(*pixel_arrays)
# Detect the case of a length 0 array
if isinstance(world_arrays, np.ndarray) and not world_arrays.shape:
return world_arrays
if self._wcs.world_n_dim > 1:
# Select the dimensions of the original WCS we are keeping.
world_arrays = [world_arrays[iw] for iw in self._world_keep]
# If there is only one world dimension (after slicing) we shouldn't return a tuple.
if self.world_n_dim == 1:
world_arrays = world_arrays[0]
return world_arrays
def world_to_pixel_values(self, *world_arrays):
sliced_out_world_coords = self._pixel_to_world_values_all(
*[0] * len(self._pixel_keep)
)
world_arrays = tuple(map(np.asanyarray, world_arrays))
world_arrays_new = []
iworld_curr = -1
for iworld in range(self._wcs.world_n_dim):
if iworld in self._world_keep:
iworld_curr += 1
world_arrays_new.append(world_arrays[iworld_curr])
else:
world_arrays_new.append(sliced_out_world_coords[iworld])
world_arrays_new = np.broadcast_arrays(*world_arrays_new)
pixel_arrays = list(self._wcs.world_to_pixel_values(*world_arrays_new))
for ipixel in range(self._wcs.pixel_n_dim):
if (
isinstance(self._slices_pixel[ipixel], slice)
and self._slices_pixel[ipixel].start is not None
):
pixel_arrays[ipixel] -= self._slices_pixel[ipixel].start
# Detect the case of a length 0 array
if isinstance(pixel_arrays, np.ndarray) and not pixel_arrays.shape:
return pixel_arrays
pixel = tuple(pixel_arrays[ip] for ip in self._pixel_keep)
if self.pixel_n_dim == 1 and self._wcs.pixel_n_dim > 1:
pixel = pixel[0]
return pixel
@property
def world_axis_object_components(self):
return [self._wcs.world_axis_object_components[idx] for idx in self._world_keep]
@property
def world_axis_object_classes(self):
keys_keep = [item[0] for item in self.world_axis_object_components]
return dict(
[
item
for item in self._wcs.world_axis_object_classes.items()
if item[0] in keys_keep
]
)
@property
def array_shape(self):
if self._wcs.array_shape:
return np.broadcast_to(0, self._wcs.array_shape)[
tuple(self._slices_array)
].shape
@property
def pixel_shape(self):
if self.array_shape:
return tuple(self.array_shape[::-1])
@property
def pixel_bounds(self):
if self._wcs.pixel_bounds is None:
return
bounds = []
for idx in self._pixel_keep:
if self._slices_pixel[idx].start is None:
bounds.append(self._wcs.pixel_bounds[idx])
else:
imin, imax = self._wcs.pixel_bounds[idx]
start = self._slices_pixel[idx].start
bounds.append((imin - start, imax - start))
return tuple(bounds)
@property
def axis_correlation_matrix(self):
return self._wcs.axis_correlation_matrix[self._world_keep][:, self._pixel_keep]
|
46b45886d859bd82c473cbf4ed8054a9c5d7327ed65b227f1fb2cf58bc8f50c6 | # Note that we test the main astropy.wcs.WCS class directly rather than testing
# the mix-in class on its own (since it's not functional without being used as
# a mix-in)
import warnings
from itertools import product
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
from packaging.version import Version
from astropy import units as u
from astropy.coordinates import (
FK5,
ICRS,
ITRS,
EarthLocation,
Galactic,
SkyCoord,
SpectralCoord,
)
from astropy.io.fits import Header
from astropy.io.fits.verify import VerifyWarning
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from astropy.units import Quantity
from astropy.units.core import UnitsWarning
from astropy.utils import iers
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.wcs._wcs import __version__ as wcsver
from astropy.wcs.wcs import WCS, FITSFixedWarning, NoConvergence, Sip
from astropy.wcs.wcsapi.fitswcs import VELOCITY_FRAMES, custom_ctype_to_ucd_mapping
###############################################################################
# The following example is the simplest WCS with default values
###############################################################################
WCS_EMPTY = WCS(naxis=1)
WCS_EMPTY.wcs.crpix = [1]
def test_empty():
wcs = WCS_EMPTY
# Low-level API
assert wcs.pixel_n_dim == 1
assert wcs.world_n_dim == 1
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [None]
assert wcs.world_axis_units == [""]
assert wcs.pixel_axis_names == [""]
assert wcs.world_axis_names == [""]
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [("world", 0, "value")]
assert wcs.world_axis_object_classes["world"][0] is Quantity
assert wcs.world_axis_object_classes["world"][1] == ()
assert wcs.world_axis_object_classes["world"][2]["unit"] is u.one
assert_allclose(wcs.pixel_to_world_values(29), 29)
assert_allclose(wcs.array_index_to_world_values(29), 29)
assert np.ndim(wcs.pixel_to_world_values(29)) == 0
assert np.ndim(wcs.array_index_to_world_values(29)) == 0
assert_allclose(wcs.world_to_pixel_values(29), 29)
assert_equal(wcs.world_to_array_index_values(29), (29,))
assert np.ndim(wcs.world_to_pixel_values(29)) == 0
assert np.ndim(wcs.world_to_array_index_values(29)) == 0
# High-level API
coord = wcs.pixel_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = wcs.array_index_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = 15 * u.one
x = wcs.world_to_pixel(coord)
assert_allclose(x, 15.0)
assert np.ndim(x) == 0
i = wcs.world_to_array_index(coord)
assert_equal(i, 15)
assert np.ndim(i) == 0
###############################################################################
# The following example is a simple 2D image with celestial coordinates
###############################################################################
HEADER_SIMPLE_CELESTIAL = """
WCSAXES = 2
CTYPE1 = RA---TAN
CTYPE2 = DEC--TAN
CRVAL1 = 10
CRVAL2 = 20
CRPIX1 = 30
CRPIX2 = 40
CDELT1 = -0.1
CDELT2 = 0.1
CROTA2 = 0.
CUNIT1 = deg
CUNIT2 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring(HEADER_SIMPLE_CELESTIAL, sep="\n"))
def test_simple_celestial():
wcs = WCS_SIMPLE_CELESTIAL
# Low-level API
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ["pos.eq.ra", "pos.eq.dec"]
assert wcs.world_axis_units == ["deg", "deg"]
assert wcs.pixel_axis_names == ["", ""]
assert wcs.world_axis_names == ["", ""]
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [
("celestial", 0, "spherical.lon.degree"),
("celestial", 1, "spherical.lat.degree"),
]
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], ICRS)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20))
assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20))
assert_allclose(wcs.world_to_pixel_values(10, 20), (29.0, 39.0))
assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29))
# High-level API
coord = wcs.pixel_to_world(29, 39)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 10)
assert_allclose(coord.dec.deg, 20)
coord = wcs.array_index_to_world(39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 10)
assert_allclose(coord.dec.deg, 20)
coord = SkyCoord(10, 20, unit="deg", frame="icrs")
x, y = wcs.world_to_pixel(coord)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
i, j = wcs.world_to_array_index(coord)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that if the coordinates are passed in a different frame things still
# work properly
coord_galactic = coord.galactic
x, y = wcs.world_to_pixel(coord_galactic)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
i, j = wcs.world_to_array_index(coord_galactic)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that we can actually index the array
data = np.arange(3600).reshape((60, 60))
coord = SkyCoord(10, 20, unit="deg", frame="icrs")
index = wcs.world_to_array_index(coord)
assert_equal(data[index], 2369)
coord = SkyCoord([10, 12], [20, 22], unit="deg", frame="icrs")
index = wcs.world_to_array_index(coord)
assert_equal(data[index], [2369, 3550])
###############################################################################
# The following example is a spectral cube with axes in an unusual order
###############################################################################
HEADER_SPECTRAL_CUBE = """
WCSAXES = 3
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CNAME1 = Latitude
CNAME2 = Frequency
CNAME3 = Longitude
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep="\n"))
def test_spectral_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_SPECTRAL_CUBE
# Low-level API
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
# High-level API
coord, spec = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert_allclose(coord.l.deg, 25)
assert_allclose(coord.b.deg, 10)
assert isinstance(spec, SpectralCoord)
assert_allclose(spec.to_value(u.Hz), 20)
coord, spec = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert_allclose(coord.l.deg, 25)
assert_allclose(coord.b.deg, 10)
assert isinstance(spec, SpectralCoord)
assert_allclose(spec.to_value(u.Hz), 20)
coord = SkyCoord(25, 10, unit="deg", frame="galactic")
spec = 20 * u.Hz
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
x, y, z = wcs.world_to_pixel(coord, spec)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
assert_allclose(z, 44.0)
# Order of world coordinates shouldn't matter
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
x, y, z = wcs.world_to_pixel(spec, coord)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
assert_allclose(z, 44.0)
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
i, j, k = wcs.world_to_array_index(coord, spec)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
i, j, k = wcs.world_to_array_index(spec, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
HEADER_SPECTRAL_CUBE_NONALIGNED = (
HEADER_SPECTRAL_CUBE.strip()
+ "\n"
+ """
PC2_3 = -0.5
PC3_2 = +0.5
"""
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SPECTRAL_CUBE_NONALIGNED = WCS(
Header.fromstring(HEADER_SPECTRAL_CUBE_NONALIGNED, sep="\n")
)
def test_spectral_cube_nonaligned():
# Make sure that correlation matrix gets adjusted if there are non-identity
# CD matrix terms.
wcs = WCS_SPECTRAL_CUBE_NONALIGNED
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[
[True, True, True],
[False, True, True],
[True, True, True],
],
)
# NOTE: we check world_axis_object_components and world_axis_object_classes
# again here because in the past this failed when non-aligned axes were
# present, so this serves as a regression test.
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
###############################################################################
# The following example is from Rots et al (2015), Table 5. It represents a
# cube with two spatial dimensions and one time dimension
###############################################################################
HEADER_TIME_CUBE = """
SIMPLE = T / Fits standard
BITPIX = -32 / Bits per pixel
NAXIS = 3 / Number of axes
NAXIS1 = 2048 / Axis length
NAXIS2 = 2048 / Axis length
NAXIS3 = 11 / Axis length
DATE = '2008-10-28T14:39:06' / Date FITS file was generated
OBJECT = '2008 TC3' / Name of the object observed
EXPTIME = 1.0011 / Integration time
MJD-OBS = 54746.02749237 / Obs start
DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date
TELESCOP= 'VISTA' / ESO Telescope Name
INSTRUME= 'VIRCAM' / Instrument used.
TIMESYS = 'UTC' / From Observatory Time System
TREFPOS = 'TOPOCENT' / Topocentric
MJDREF = 54746.0 / Time reference point in MJD
RADESYS = 'ICRS' / Not equinoctal
CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection
CRVAL2 = 2.01824372640628 / RA at ref pixel
CUNIT2 = 'deg' / Angles are degrees always
CRPIX2 = 2956.6 / Pixel coordinate at ref point
CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection
CRVAL1 = 14.8289418840003 / Dec at ref pixel
CUNIT1 = 'deg' / Angles are degrees always
CRPIX1 = -448.2 / Pixel coordinate at ref point
CTYPE3 = 'UTC' / linear time (UTC)
CRVAL3 = 2375.341 / Relative time of first frame
CUNIT3 = 's' / Time unit
CRPIX3 = 1.0 / Pixel coordinate at ref point
CTYPE3A = 'TT' / alternative linear time (TT)
CRVAL3A = 2440.525 / Relative time of first frame
CUNIT3A = 's' / Time unit
CRPIX3A = 1.0 / Pixel coordinate at ref point
OBSGEO-B= -24.6157 / [deg] Tel geodetic latitute (=North)+
OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+
OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid
CRDER3 = 0.0819 / random error in timings from fit
CSYER3 = 0.0100 / absolute time error
PC1_1 = 0.999999971570892 / WCS transform matrix element
PC1_2 = 0.000238449608932 / WCS transform matrix element
PC2_1 = -0.000621542859395 / WCS transform matrix element
PC2_2 = 0.999999806842218 / WCS transform matrix element
CDELT1 = -9.48575432499806E-5 / Axis scale at reference point
CDELT2 = 9.48683176211164E-5 / Axis scale at reference point
CDELT3 = 13.3629 / Axis scale at reference point
PV1_1 = 1. / ZPN linear term
PV1_3 = 42. / ZPN cubic term
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", (VerifyWarning, FITSFixedWarning))
WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep="\n"))
def test_time_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_TIME_CUBE
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (11, 2048, 2048)
assert wcs.pixel_shape == (2048, 2048, 11)
assert wcs.world_axis_physical_types == ["pos.eq.dec", "pos.eq.ra", "time"]
assert wcs.world_axis_units == ["deg", "deg", "s"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["", "", ""]
assert_equal(
wcs.axis_correlation_matrix,
[[True, True, False], [True, True, False], [False, False, True]],
)
components = wcs.world_axis_object_components
assert components[0] == ("celestial", 1, "spherical.lat.degree")
assert components[1] == ("celestial", 0, "spherical.lon.degree")
assert components[2][:2] == ("time", 0)
assert callable(components[2][2])
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], ICRS)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["time"][0] is Time
assert wcs.world_axis_object_classes["time"][1] == ()
assert wcs.world_axis_object_classes["time"][2] == {}
assert callable(wcs.world_axis_object_classes["time"][3])
assert_allclose(
wcs.pixel_to_world_values(-449.2, 2955.6, 0),
(14.8289418840003, 2.01824372640628, 2375.341),
)
assert_allclose(
wcs.array_index_to_world_values(0, 2955.6, -449.2),
(14.8289418840003, 2.01824372640628, 2375.341),
)
assert_allclose(
wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341),
(-449.2, 2955.6, 0),
)
assert_equal(
wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341),
(0, 2956, -449),
)
# High-level API
coord, time = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 1.7323356692202325)
assert_allclose(coord.dec.deg, 14.783516054817797)
assert isinstance(time, Time)
assert_allclose(time.mjd, 54746.03429755324)
coord, time = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 1.7323356692202325)
assert_allclose(coord.dec.deg, 14.783516054817797)
assert isinstance(time, Time)
assert_allclose(time.mjd, 54746.03429755324)
x, y, z = wcs.world_to_pixel(coord, time)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
assert_allclose(z, 44.0)
# Order of world coordinates shouldn't matter
x, y, z = wcs.world_to_pixel(time, coord)
assert_allclose(x, 29.0)
assert_allclose(y, 39.0)
assert_allclose(z, 44.0)
i, j, k = wcs.world_to_array_index(coord, time)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
i, j, k = wcs.world_to_array_index(time, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
###############################################################################
# The following tests are to make sure that Time objects are constructed
# correctly for a variety of combinations of WCS keywords
###############################################################################
HEADER_TIME_1D = """
SIMPLE = T
BITPIX = -32
NAXIS = 1
NAXIS1 = 2048
TIMESYS = 'UTC'
TREFPOS = 'TOPOCENT'
MJDREF = 50002.6
CTYPE1 = 'UTC'
CRVAL1 = 5
CUNIT1 = 's'
CRPIX1 = 1.0
CDELT1 = 2
OBSGEO-L= -20
OBSGEO-B= -70
OBSGEO-H= 2530
"""
if Version(wcsver) >= Version("7.1"):
HEADER_TIME_1D += "DATEREF = '1995-10-12T14:24:00'\n"
@pytest.fixture
def header_time_1d():
return Header.fromstring(HEADER_TIME_1D, sep="\n")
def assert_time_at(header, position, jd1, jd2, scale, format):
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header)
time = wcs.pixel_to_world(position)
assert_allclose(time.jd1, jd1, rtol=1e-10)
assert_allclose(time.jd2, jd2, rtol=1e-10)
assert time.format == format
assert time.scale == scale
@pytest.mark.parametrize(
"scale", ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc", "local")
)
def test_time_1d_values(header_time_1d, scale):
# Check that Time objects are instantiated with the correct values,
# scales, and formats.
header_time_1d["CTYPE1"] = scale.upper()
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, "mjd")
def test_time_1d_values_gps(header_time_1d):
# Special treatment for GPS scale
header_time_1d["CTYPE1"] = "GPS"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, "tai", "mjd")
def test_time_1d_values_deprecated(header_time_1d):
# Deprecated (in FITS) scales
header_time_1d["CTYPE1"] = "TDT"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "tt", "mjd")
header_time_1d["CTYPE1"] = "IAT"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "tai", "mjd")
header_time_1d["CTYPE1"] = "GMT"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "utc", "mjd")
header_time_1d["CTYPE1"] = "ET"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "tt", "mjd")
def test_time_1d_values_time(header_time_1d):
header_time_1d["CTYPE1"] = "TIME"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "utc", "mjd")
header_time_1d["TIMESYS"] = "TAI"
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, "tai", "mjd")
@pytest.mark.remote_data
@pytest.mark.parametrize("scale", ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc"))
def test_time_1d_roundtrip(header_time_1d, scale):
# Check that coordinates round-trip
pixel_in = np.arange(3, 10)
header_time_1d["CTYPE1"] = scale.upper()
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header_time_1d)
# Simple test
time = wcs.pixel_to_world(pixel_in)
pixel_out = wcs.world_to_pixel(time)
assert_allclose(pixel_in, pixel_out)
# Test with an intermediate change to a different scale/format
time = wcs.pixel_to_world(pixel_in).tdb
time.format = "isot"
pixel_out = wcs.world_to_pixel(time)
assert_allclose(pixel_in, pixel_out)
def test_time_1d_high_precision(header_time_1d):
# Case where the MJDREF is split into two for high precision
del header_time_1d["MJDREF"]
header_time_1d["MJDREFI"] = 52000.0
header_time_1d["MJDREFF"] = 1e-11
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header_time_1d)
time = wcs.pixel_to_world(10)
# Here we have to use a very small rtol to really test that MJDREFF is
# taken into account
assert_allclose(time.jd1, 2452001.0, rtol=1e-12)
assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13)
def test_time_1d_location_geodetic(header_time_1d):
# Make sure that the location is correctly returned (geodetic case)
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header_time_1d)
time = wcs.pixel_to_world(10)
lon, lat, alt = time.location.to_geodetic()
# FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976
# ellipsoid (https://github.com/astropy/astropy/issues/9420)
assert_allclose(lon.degree, -20)
assert_allclose(lat.degree, -70)
# assert_allclose(alt.to_value(u.m), 2530.)
@pytest.fixture
def header_time_1d_no_obs():
header = Header.fromstring(HEADER_TIME_1D, sep="\n")
del header["OBSGEO-L"]
del header["OBSGEO-B"]
del header["OBSGEO-H"]
return header
def test_time_1d_location_geocentric(header_time_1d_no_obs):
# Make sure that the location is correctly returned (geocentric case)
header = header_time_1d_no_obs
header["OBSGEO-X"] = 10
header["OBSGEO-Y"] = -20
header["OBSGEO-Z"] = 30
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header)
time = wcs.pixel_to_world(10)
x, y, z = time.location.to_geocentric()
assert_allclose(x.to_value(u.m), 10)
assert_allclose(y.to_value(u.m), -20)
assert_allclose(z.to_value(u.m), 30)
def test_time_1d_location_geocenter(header_time_1d_no_obs):
header_time_1d_no_obs["TREFPOS"] = "GEOCENTER"
wcs = WCS(header_time_1d_no_obs)
time = wcs.pixel_to_world(10)
x, y, z = time.location.to_geocentric()
assert_allclose(x.to_value(u.m), 0)
assert_allclose(y.to_value(u.m), 0)
assert_allclose(z.to_value(u.m), 0)
def test_time_1d_location_missing(header_time_1d_no_obs):
# Check what happens when no location is present
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(
UserWarning,
match=(
"Missing or incomplete observer location "
"information, setting location in Time to None"
),
):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_location_incomplete(header_time_1d_no_obs):
# Check what happens when location information is incomplete
header_time_1d_no_obs["OBSGEO-L"] = 10.0
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(
UserWarning,
match=(
"Missing or incomplete observer location "
"information, setting location in Time to None"
),
):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_location_unsupported(header_time_1d_no_obs):
# Check what happens when TREFPOS is unsupported
header_time_1d_no_obs["TREFPOS"] = "BARYCENTER"
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(
UserWarning,
match=(
"Observation location 'barycenter' is not "
"supported, setting location in Time to None"
),
):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_unsupported_ctype(header_time_1d_no_obs):
# For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale
# Case where the MJDREF is split into two for high precision
header_time_1d_no_obs["CTYPE1"] = "UT(WWV)"
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(
UserWarning, match="Dropping unsupported sub-scale WWV from scale UT"
):
time = wcs.pixel_to_world(10)
assert isinstance(time, Time)
###############################################################################
# Extra corner cases
###############################################################################
def test_unrecognized_unit():
# TODO: Determine whether the following behavior is desirable
wcs = WCS(naxis=1)
with pytest.warns(UnitsWarning):
wcs.wcs.cunit = ["bananas // sekonds"]
assert wcs.world_axis_units == ["bananas // sekonds"]
def test_distortion_correlations():
filename = get_pkg_data_filename("../../tests/data/sip.fits")
with pytest.warns(FITSFixedWarning):
w = WCS(filename)
assert_equal(w.axis_correlation_matrix, True)
# Changing PC to an identity matrix doesn't change anything since
# distortions are still present.
w.wcs.pc = [[1, 0], [0, 1]]
assert_equal(w.axis_correlation_matrix, True)
# Nor does changing the name of the axes to make them non-celestial
w.wcs.ctype = ["X", "Y"]
assert_equal(w.axis_correlation_matrix, True)
# However once we turn off the distortions the matrix changes
w.sip = None
assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]])
# If we go back to celestial coordinates then the matrix is all True again
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
assert_equal(w.axis_correlation_matrix, True)
# Or if we change to X/Y but have a non-identity PC
w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]]
w.wcs.ctype = ["X", "Y"]
assert_equal(w.axis_correlation_matrix, True)
def test_custom_ctype_to_ucd_mappings():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ["SPAM"]
assert wcs.world_axis_physical_types == [None]
# Check simple behavior
with custom_ctype_to_ucd_mapping({"APPLE": "food.fruit"}):
assert wcs.world_axis_physical_types == [None]
with custom_ctype_to_ucd_mapping({"APPLE": "food.fruit", "SPAM": "food.spam"}):
assert wcs.world_axis_physical_types == ["food.spam"]
# Check nesting
with custom_ctype_to_ucd_mapping({"SPAM": "food.spam"}):
with custom_ctype_to_ucd_mapping({"APPLE": "food.fruit"}):
assert wcs.world_axis_physical_types == ["food.spam"]
with custom_ctype_to_ucd_mapping({"APPLE": "food.fruit"}):
with custom_ctype_to_ucd_mapping({"SPAM": "food.spam"}):
assert wcs.world_axis_physical_types == ["food.spam"]
# Check priority in nesting
with custom_ctype_to_ucd_mapping({"SPAM": "notfood"}):
with custom_ctype_to_ucd_mapping({"SPAM": "food.spam"}):
assert wcs.world_axis_physical_types == ["food.spam"]
with custom_ctype_to_ucd_mapping({"SPAM": "food.spam"}):
with custom_ctype_to_ucd_mapping({"SPAM": "notfood"}):
assert wcs.world_axis_physical_types == ["notfood"]
def test_caching_components_and_classes():
# Make sure that when we change the WCS object, the classes and components
# are updated (we use a cache internally, so we need to make sure the cache
# is invalidated if needed)
wcs = WCS_SIMPLE_CELESTIAL.deepcopy()
assert wcs.world_axis_object_components == [
("celestial", 0, "spherical.lon.degree"),
("celestial", 1, "spherical.lat.degree"),
]
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], ICRS)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
wcs.wcs.radesys = "FK5"
frame = wcs.world_axis_object_classes["celestial"][2]["frame"]
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2000.0
wcs.wcs.equinox = 2010
frame = wcs.world_axis_object_classes["celestial"][2]["frame"]
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2010.0
def test_sub_wcsapi_attributes():
# Regression test for a bug that caused some of the WCS attributes to be
# incorrect when using WCS.sub or WCS.celestial (which is an alias for sub
# with lon/lat types).
wcs = WCS_SPECTRAL_CUBE.deepcopy()
wcs.pixel_shape = (30, 40, 50)
wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
# Use celestial shortcut
wcs_sub1 = wcs.celestial
assert wcs_sub1.pixel_n_dim == 2
assert wcs_sub1.world_n_dim == 2
assert wcs_sub1.array_shape == (50, 30)
assert wcs_sub1.pixel_shape == (30, 50)
assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)]
assert wcs_sub1.world_axis_physical_types == [
"pos.galactic.lat",
"pos.galactic.lon",
]
assert wcs_sub1.world_axis_units == ["deg", "deg"]
assert wcs_sub1.world_axis_names == ["Latitude", "Longitude"]
# Try adding axes
wcs_sub2 = wcs.sub([0, 2, 0])
assert wcs_sub2.pixel_n_dim == 3
assert wcs_sub2.world_n_dim == 3
assert wcs_sub2.array_shape == (None, 40, None)
assert wcs_sub2.pixel_shape == (None, 40, None)
assert wcs_sub2.pixel_bounds == [None, (-2, 18), None]
assert wcs_sub2.world_axis_physical_types == [None, "em.freq", None]
assert wcs_sub2.world_axis_units == ["", "Hz", ""]
assert wcs_sub2.world_axis_names == ["", "Frequency", ""]
# Use strings
wcs_sub3 = wcs.sub(["longitude", "latitude"])
assert wcs_sub3.pixel_n_dim == 2
assert wcs_sub3.world_n_dim == 2
assert wcs_sub3.array_shape == (30, 50)
assert wcs_sub3.pixel_shape == (50, 30)
assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)]
assert wcs_sub3.world_axis_physical_types == [
"pos.galactic.lon",
"pos.galactic.lat",
]
assert wcs_sub3.world_axis_units == ["deg", "deg"]
assert wcs_sub3.world_axis_names == ["Longitude", "Latitude"]
# Now try without CNAME set
wcs.wcs.cname = [""] * wcs.wcs.naxis
wcs_sub4 = wcs.sub(["longitude", "latitude"])
assert wcs_sub4.pixel_n_dim == 2
assert wcs_sub4.world_n_dim == 2
assert wcs_sub4.array_shape == (30, 50)
assert wcs_sub4.pixel_shape == (50, 30)
assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)]
assert wcs_sub4.world_axis_physical_types == [
"pos.galactic.lon",
"pos.galactic.lat",
]
assert wcs_sub4.world_axis_units == ["deg", "deg"]
assert wcs_sub4.world_axis_names == ["", ""]
HEADER_POLARIZED = """
CTYPE1 = 'HPLT-TAN'
CTYPE2 = 'HPLN-TAN'
CTYPE3 = 'STOKES'
"""
@pytest.fixture
def header_polarized():
return Header.fromstring(HEADER_POLARIZED, sep="\n")
def test_phys_type_polarization(header_polarized):
w = WCS(header_polarized)
assert w.world_axis_physical_types[2] == "phys.polarization.stokes"
###############################################################################
# Spectral transformations
###############################################################################
HEADER_SPECTRAL_FRAMES = """
BUNIT = 'Jy/beam'
EQUINOX = 2.000000000E+03
CTYPE1 = 'RA---SIN'
CRVAL1 = 2.60108333333E+02
CDELT1 = -2.777777845E-04
CRPIX1 = 1.0
CUNIT1 = 'deg'
CTYPE2 = 'DEC--SIN'
CRVAL2 = -9.75000000000E-01
CDELT2 = 2.777777845E-04
CRPIX2 = 1.0
CUNIT2 = 'deg'
CTYPE3 = 'FREQ'
CRVAL3 = 1.37835117405E+09
CDELT3 = 9.765625000E+04
CRPIX3 = 32.0
CUNIT3 = 'Hz'
SPECSYS = 'TOPOCENT'
RESTFRQ = 1.420405752E+09 / [Hz]
RADESYS = 'FK5'
"""
@pytest.fixture
def header_spectral_frames():
return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep="\n")
def test_spectralcoord_frame(header_spectral_frames):
# This is a test to check the numerical results of transformations between
# different velocity frames. We simply make sure that the returned
# SpectralCoords are in the right frame but don't check the transformations
# since this is already done in test_spectralcoord_accuracy
# in astropy.coordinates.
with iers.conf.set_temp("auto_download", False):
obstime = Time("2009-05-04T04:44:23", scale="utc")
header = header_spectral_frames.copy()
header["MJD-OBS"] = obstime.mjd
header["CRVAL1"] = 16.33211
header["CRVAL2"] = -34.2221
header["OBSGEO-L"] = 144.2
header["OBSGEO-B"] = -20.2
header["OBSGEO-H"] = 0.0
# We start off with a WCS defined in topocentric frequency
with pytest.warns(FITSFixedWarning):
wcs_topo = WCS(header)
# We convert a single pixel coordinate to world coordinates and keep only
# the second high level object - a SpectralCoord:
sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1]
# We check that this is in topocentric frame with zero velocities
assert isinstance(sc_topo, SpectralCoord)
assert isinstance(sc_topo.observer, ITRS)
assert sc_topo.observer.obstime.isot == obstime.isot
assert_equal(sc_topo.observer.data.differentials["s"].d_xyz.value, 0)
observatory = (
EarthLocation.from_geodetic(144.2, -20.2)
.get_itrs(obstime=obstime)
.transform_to(ICRS())
)
assert (
observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km
)
for specsys, expected_frame in VELOCITY_FRAMES.items():
header["SPECSYS"] = specsys
with pytest.warns(FITSFixedWarning):
wcs = WCS(header)
sc = wcs.pixel_to_world(0, 0, 31)[1]
# Now transform to the expected velocity frame, which should leave
# the spectral coordinate unchanged
sc_check = sc.with_observer_stationary_relative_to(expected_frame)
assert_quantity_allclose(sc.quantity, sc_check.quantity)
@pytest.mark.parametrize(
("ctype3", "observer"),
product(["ZOPT", "BETA", "VELO", "VRAD", "VOPT"], [False, True]),
)
def test_different_ctypes(header_spectral_frames, ctype3, observer):
header = header_spectral_frames.copy()
header["CTYPE3"] = ctype3
header["CRVAL3"] = 0.1
header["CDELT3"] = 0.001
if ctype3[0] == "V":
header["CUNIT3"] = "m s-1"
else:
header["CUNIT3"] = ""
header["RESTWAV"] = 1.420405752e09
header["MJD-OBS"] = 55197
if observer:
header["OBSGEO-L"] = 144.2
header["OBSGEO-B"] = -20.2
header["OBSGEO-H"] = 0.0
header["SPECSYS"] = "BARYCENT"
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header)
skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31)
assert isinstance(spectralcoord, SpectralCoord)
if observer:
pix = wcs.world_to_pixel(skycoord, spectralcoord)
else:
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
pix = wcs.world_to_pixel(skycoord, spectralcoord)
assert_allclose(pix, [0, 0, 31], rtol=1e-6, atol=1e-9)
def test_non_convergence_warning():
"""Test case for issue #11446
Since we can't define a target accuracy when plotting a WCS `all_world2pix`
should not error but only warn when the default accuracy can't be reached.
"""
# define a minimal WCS where convergence fails for certain image positions
wcs = WCS(naxis=2)
crpix = [0, 0]
a = b = ap = bp = np.zeros((4, 4))
a[3, 0] = -1.20116753e-07
test_pos_x = [1000, 1]
test_pos_y = [0, 2]
wcs.sip = Sip(a, b, ap, bp, crpix)
# first make sure the WCS works when using a low accuracy
expected = wcs.all_world2pix(test_pos_x, test_pos_y, 0, tolerance=1e-3)
# then check that it fails when using the default accuracy
with pytest.raises(NoConvergence):
wcs.all_world2pix(test_pos_x, test_pos_y, 0)
# at last check that world_to_pixel_values raises a warning but returns
# the same 'low accuray' result
with pytest.warns(UserWarning):
assert_allclose(wcs.world_to_pixel_values(test_pos_x, test_pos_y), expected)
HEADER_SPECTRAL_1D = """
CTYPE1 = 'FREQ'
CRVAL1 = 1.37835117405E+09
CDELT1 = 9.765625000E+04
CRPIX1 = 32.0
CUNIT1 = 'Hz'
SPECSYS = 'TOPOCENT'
RESTFRQ = 1.420405752E+09 / [Hz]
RADESYS = 'FK5'
"""
@pytest.fixture
def header_spectral_1d():
return Header.fromstring(HEADER_SPECTRAL_1D, sep="\n")
@pytest.mark.parametrize(
("ctype1", "observer"),
product(["ZOPT", "BETA", "VELO", "VRAD", "VOPT"], [False, True]),
)
def test_spectral_1d(header_spectral_1d, ctype1, observer):
# This is a regression test for issues that happened with 1-d WCS
# where the target is not defined but observer is.
header = header_spectral_1d.copy()
header["CTYPE1"] = ctype1
header["CRVAL1"] = 0.1
header["CDELT1"] = 0.001
if ctype1[0] == "V":
header["CUNIT1"] = "m s-1"
else:
header["CUNIT1"] = ""
header["RESTWAV"] = 1.420405752e09
header["MJD-OBS"] = 55197
if observer:
header["OBSGEO-L"] = 144.2
header["OBSGEO-B"] = -20.2
header["OBSGEO-H"] = 0.0
header["SPECSYS"] = "BARYCENT"
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
wcs = WCS(header)
# First ensure that transformations round-trip
spectralcoord = wcs.pixel_to_world(31)
assert isinstance(spectralcoord, SpectralCoord)
assert spectralcoord.target is None
assert (spectralcoord.observer is not None) is observer
if observer:
expected_message = "No target defined on SpectralCoord"
else:
expected_message = "No observer defined on WCS"
with pytest.warns(AstropyUserWarning, match=expected_message):
pix = wcs.world_to_pixel(spectralcoord)
assert_allclose(pix, [31], rtol=1e-6)
# Also make sure that we can convert a SpectralCoord on which the observer
# is not defined but the target is.
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spectralcoord_no_obs = SpectralCoord(
spectralcoord.quantity,
doppler_rest=spectralcoord.doppler_rest,
doppler_convention=spectralcoord.doppler_convention,
target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc),
)
if observer:
expected_message = "No observer defined on SpectralCoord"
else:
expected_message = "No observer defined on WCS"
with pytest.warns(AstropyUserWarning, match=expected_message):
pix2 = wcs.world_to_pixel(spectralcoord_no_obs)
assert_allclose(pix2, [31], rtol=1e-6)
# And finally check case when both observer and target are defined on the
# SpectralCoord
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spectralcoord_no_obs = SpectralCoord(
spectralcoord.quantity,
doppler_rest=spectralcoord.doppler_rest,
doppler_convention=spectralcoord.doppler_convention,
observer=ICRS(10 * u.deg, 20 * u.deg, distance=0 * u.kpc),
target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc),
)
if observer:
pix3 = wcs.world_to_pixel(spectralcoord_no_obs)
else:
with pytest.warns(AstropyUserWarning, match="No observer defined on WCS"):
pix3 = wcs.world_to_pixel(spectralcoord_no_obs)
assert_allclose(pix3, [31], rtol=1e-6)
HEADER_SPECTRAL_WITH_TIME = """
WCSAXES = 3
CTYPE1 = 'RA---TAN'
CTYPE2 = 'DEC--TAN'
CTYPE3 = 'WAVE'
CRVAL1 = 98.83153
CRVAL2 = -66.818
CRVAL3 = 6.4205
CRPIX1 = 21.
CRPIX2 = 22.
CRPIX3 = 1.
CDELT1 = 3.6111E-05
CDELT2 = 3.6111E-05
CDELT3 = 0.001
CUNIT1 = 'deg'
CUNIT2 = 'deg'
CUNIT3 = 'um'
MJD-AVG = 59045.41466
RADESYS = 'ICRS'
SPECSYS = 'BARYCENT'
TIMESYS = 'UTC'
"""
@pytest.fixture
def header_spectral_with_time():
return Header.fromstring(HEADER_SPECTRAL_WITH_TIME, sep="\n")
def test_spectral_with_time_kw(header_spectral_with_time):
def check_wcs(header):
assert_allclose(w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval)
sky, spec = w.pixel_to_world(*w.wcs.crpix)
assert_allclose(
(sky.spherical.lon.degree, sky.spherical.lat.degree, spec.value),
w.wcs.crval,
rtol=1e-3,
)
# Chek with MJD-AVG and TIMESYS
hdr = header_spectral_with_time.copy()
with warnings.catch_warnings():
warnings.simplefilter("ignore", (VerifyWarning, FITSFixedWarning))
w = WCS(hdr)
# Make sure the correct keyword is used in a test
assert ~np.isnan(w.wcs.mjdavg)
assert np.isnan(w.wcs.mjdobs)
check_wcs(w)
# Check fall back to MJD-OBS
hdr["MJD-OBS"] = hdr["MJD-AVG"]
del hdr["MJD-AVG"]
with warnings.catch_warnings():
warnings.simplefilter("ignore", (VerifyWarning, FITSFixedWarning))
w = WCS(hdr)
# Make sure the correct keyword is used in a test
assert ~np.isnan(w.wcs.mjdobs)
assert np.isnan(w.wcs.mjdavg)
check_wcs(w)
# Check fall back to DATE--OBS
hdr["DATE-OBS"] = "2020-07-15"
del hdr["MJD-OBS"]
with warnings.catch_warnings():
warnings.simplefilter("ignore", (VerifyWarning, FITSFixedWarning))
w = WCS(hdr)
w.wcs.mjdobs = np.nan
# Make sure the correct keyword is used in a test
assert np.isnan(w.wcs.mjdobs)
assert np.isnan(w.wcs.mjdavg)
assert w.wcs.dateobs != ""
check_wcs(hdr)
# Check fall back to scale='utc'
del hdr["TIMESYS"]
check_wcs(hdr)
|
bfb9ff0a09721bfdd16633cb89eedd3336466e20033f8c08e869d5c121676a6b | import numpy as np
from numpy.testing import assert_allclose
from astropy.coordinates import SkyCoord
from astropy.units import Quantity
from astropy.wcs.wcsapi.high_level_api import (
HighLevelWCSMixin,
high_level_objects_to_values,
values_to_high_level_objects,
)
from astropy.wcs.wcsapi.low_level_api import BaseLowLevelWCS
class DoubleLowLevelWCS(BaseLowLevelWCS):
"""
Basic dummy transformation that doubles values.
"""
def pixel_to_world_values(self, *pixel_arrays):
return [np.asarray(pix) * 2 for pix in pixel_arrays]
def world_to_pixel_values(self, *world_arrays):
return [np.asarray(world) / 2 for world in world_arrays]
class SimpleDuplicateWCS(DoubleLowLevelWCS, HighLevelWCSMixin):
"""
This example WCS has two of the world coordinates that use the same class,
which triggers a different path in the high level WCS code.
"""
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return ["pos.eq.ra", "pos.eq.dec"]
@property
def world_axis_units(self):
return ["deg", "deg"]
@property
def world_axis_object_components(self):
return [("test1", 0, "value"), ("test2", 0, "value")]
@property
def world_axis_object_classes(self):
return {
"test1": (Quantity, (), {"unit": "deg"}),
"test2": (Quantity, (), {"unit": "deg"}),
}
def test_simple_duplicate():
# Make sure that things work properly when the low-level WCS uses the same
# class for two of the coordinates.
wcs = SimpleDuplicateWCS()
q1, q2 = wcs.pixel_to_world(1, 2)
assert isinstance(q1, Quantity)
assert isinstance(q2, Quantity)
x, y = wcs.world_to_pixel(q1, q2)
assert_allclose(x, 1)
assert_allclose(y, 2)
class SkyCoordDuplicateWCS(DoubleLowLevelWCS, HighLevelWCSMixin):
"""
This example WCS returns two SkyCoord objects which, which triggers a
different path in the high level WCS code.
"""
@property
def pixel_n_dim(self):
return 4
@property
def world_n_dim(self):
return 4
@property
def world_axis_physical_types(self):
return ["pos.eq.ra", "pos.eq.dec", "pos.galactic.lon", "pos.galactic.lat"]
@property
def world_axis_units(self):
return ["deg", "deg", "deg", "deg"]
@property
def world_axis_object_components(self):
# Deliberately use 'ra'/'dec' here to make sure that string argument
# names work properly.
return [
("test1", "ra", "spherical.lon.degree"),
("test1", "dec", "spherical.lat.degree"),
("test2", 0, "spherical.lon.degree"),
("test2", 1, "spherical.lat.degree"),
]
@property
def world_axis_object_classes(self):
return {
"test1": (SkyCoord, (), {"unit": "deg"}),
"test2": (SkyCoord, (), {"unit": "deg", "frame": "galactic"}),
}
def test_skycoord_duplicate():
# Make sure that things work properly when the low-level WCS uses the same
# class, and specifically a SkyCoord for two of the coordinates.
wcs = SkyCoordDuplicateWCS()
c1, c2 = wcs.pixel_to_world(1, 2, 3, 4)
assert isinstance(c1, SkyCoord)
assert isinstance(c2, SkyCoord)
x, y, z, a = wcs.world_to_pixel(c1, c2)
assert_allclose(x, 1)
assert_allclose(y, 2)
assert_allclose(z, 3)
assert_allclose(a, 4)
class SerializedWCS(DoubleLowLevelWCS, HighLevelWCSMixin):
"""
WCS with serialized classes
"""
@property
def serialized_classes(self):
return True
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return ["pos.eq.ra", "pos.eq.dec"]
@property
def world_axis_units(self):
return ["deg", "deg"]
@property
def world_axis_object_components(self):
return [("test", 0, "value")]
@property
def world_axis_object_classes(self):
return {
"test": (
"astropy.units.Quantity",
(),
{"unit": ("astropy.units.Unit", ("deg",), {})},
)
}
def test_serialized_classes():
wcs = SerializedWCS()
q = wcs.pixel_to_world(1)
assert isinstance(q, Quantity)
x = wcs.world_to_pixel(q)
assert_allclose(x, 1)
def test_objects_to_values():
wcs = SkyCoordDuplicateWCS()
c1, c2 = wcs.pixel_to_world(1, 2, 3, 4)
values = high_level_objects_to_values(c1, c2, low_level_wcs=wcs)
assert np.allclose(values, [2, 4, 6, 8])
def test_values_to_objects():
wcs = SkyCoordDuplicateWCS()
c1, c2 = wcs.pixel_to_world(1, 2, 3, 4)
c1_out, c2_out = values_to_high_level_objects(*[2, 4, 6, 8], low_level_wcs=wcs)
assert c1.ra == c1_out.ra
assert c2.l == c2_out.l
assert c1.dec == c1_out.dec
assert c2.b == c2_out.b
|
51adcce57d3077228222ff5879eda0b5c46d156299a4d49be059de694afa95f8 | from pytest import raises
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.wcs import WCS
from astropy.wcs.wcsapi.utils import deserialize_class, wcs_info_str
def test_construct():
result = deserialize_class(("astropy.units.Quantity", (10,), {"unit": "deg"}))
assert_quantity_allclose(result, 10 * u.deg)
def test_noconstruct():
result = deserialize_class(
("astropy.units.Quantity", (), {"unit": "deg"}), construct=False
)
assert result == (u.Quantity, (), {"unit": "deg"})
def test_invalid():
with raises(ValueError) as exc:
deserialize_class(("astropy.units.Quantity", (), {"unit": "deg"}, ()))
assert exc.value.args[0] == "Expected a tuple of three values"
DEFAULT_1D_STR = """
WCS Transformation
This transformation has 1 pixel and 1 world dimensions
Array shape (Numpy order): None
Pixel Dim Axis Name Data size Bounds
0 None None None
World Dim Axis Name Physical Type Units
0 None None unknown
Correlation between pixel and world axes:
Pixel Dim
World Dim 0
0 yes
"""
def test_wcs_info_str():
# The tests in test_sliced_low_level_wcs.py exercise wcs_info_str
# extensively. This test is to ensure that the function exists and the
# API of the function works as expected.
wcs_empty = WCS(naxis=1)
assert wcs_info_str(wcs_empty).strip() == DEFAULT_1D_STR.strip()
|
b78e27a054b989b6cdde2e2d778a5dc2c076d26cc969b09124253f3c89bca897 | from pytest import raises
from astropy.wcs.wcsapi.low_level_api import validate_physical_types
def test_validate_physical_types():
# Check valid cases
validate_physical_types(["pos.eq.ra", "pos.eq.ra"])
validate_physical_types(["spect.dopplerVeloc.radio", "custom:spam"])
validate_physical_types(["time", None])
# Make sure validation is case sensitive
with raises(
ValueError, match=r"'Pos\.eq\.dec' is not a valid IOVA UCD1\+ physical type"
):
validate_physical_types(["pos.eq.ra", "Pos.eq.dec"])
# Make sure nonsense types are picked up
with raises(ValueError, match=r"'spam' is not a valid IOVA UCD1\+ physical type"):
validate_physical_types(["spam"])
|
ca00ca5a9f06d661daab7c6d7808e83327aca602644ff6d8435e7ab1623ae952 | import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.coordinates import SkyCoord
from astropy.wcs.wcsapi.high_level_wcs_wrapper import HighLevelWCSWrapper
from astropy.wcs.wcsapi.low_level_api import BaseLowLevelWCS
class CustomLowLevelWCS(BaseLowLevelWCS):
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return ["pos.eq.ra", "pos.eq.dec"]
@property
def world_axis_units(self):
return ["deg", "deg"]
def pixel_to_world_values(self, *pixel_arrays):
return [np.asarray(pix) * 2 for pix in pixel_arrays]
def world_to_pixel_values(self, *world_arrays):
return [np.asarray(world) / 2 for world in world_arrays]
@property
def world_axis_object_components(self):
return [
("test", 0, "spherical.lon.degree"),
("test", 1, "spherical.lat.degree"),
]
@property
def world_axis_object_classes(self):
return {"test": (SkyCoord, (), {"unit": "deg"})}
def test_wrapper():
wcs = CustomLowLevelWCS()
wrapper = HighLevelWCSWrapper(wcs)
coord = wrapper.pixel_to_world(1, 2)
assert isinstance(coord, SkyCoord)
assert coord.isscalar
x, y = wrapper.world_to_pixel(coord)
assert_allclose(x, 1)
assert_allclose(y, 2)
assert wrapper.low_level_wcs is wcs
assert wrapper.pixel_n_dim == 2
assert wrapper.world_n_dim == 2
assert wrapper.world_axis_physical_types == ["pos.eq.ra", "pos.eq.dec"]
assert wrapper.world_axis_units == ["deg", "deg"]
assert wrapper.array_shape is None
assert wrapper.pixel_bounds is None
assert np.all(wrapper.axis_correlation_matrix)
def test_wrapper_invalid():
class InvalidCustomLowLevelWCS(CustomLowLevelWCS):
@property
def world_axis_object_classes(self):
return {}
wcs = InvalidCustomLowLevelWCS()
wrapper = HighLevelWCSWrapper(wcs)
with pytest.raises(KeyError):
wrapper.pixel_to_world(1, 2)
|
652a3a51b993dda7ec27b4ed42c90e78de8b58fe8e232bc4d2fc7ff83c1d0f4d | import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy.units as u
from astropy.coordinates import ICRS, Galactic, SkyCoord
from astropy.io.fits import Header
from astropy.io.fits.verify import VerifyWarning
from astropy.time import Time
from astropy.units import Quantity
from astropy.wcs.wcs import WCS, FITSFixedWarning
from astropy.wcs.wcsapi.wrappers.sliced_wcs import (
SlicedLowLevelWCS,
combine_slices,
sanitize_slices,
)
# To test the slicing we start off from standard FITS WCS
# objects since those implement the low-level API. We create
# a WCS for a spectral cube with axes in non-standard order
# and with correlated celestial axes and an uncorrelated
# spectral axis.
HEADER_SPECTRAL_CUBE = """
NAXIS = 3
NAXIS1 = 10
NAXIS2 = 20
NAXIS3 = 30
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CNAME1 = Latitude
CNAME2 = Frequency
CNAME3 = Longitude
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep="\n"))
WCS_SPECTRAL_CUBE.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
def test_invalid_slices():
with pytest.raises(IndexError):
SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [None, None, [False, False, False]])
with pytest.raises(IndexError):
SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [None, None, slice(None, None, 2)])
with pytest.raises(IndexError):
SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [None, None, 1000.100])
@pytest.mark.parametrize(
"item, ndim, expected",
(
([Ellipsis, 10], 4, [slice(None)] * 3 + [10]),
([10, slice(20, 30)], 5, [10, slice(20, 30)] + [slice(None)] * 3),
([10, Ellipsis, 8], 10, [10] + [slice(None)] * 8 + [8]),
),
)
def test_sanitize_slice(item, ndim, expected):
new_item = sanitize_slices(item, ndim)
# FIXME: do we still need the first two since the third assert
# should cover it all?
assert len(new_item) == ndim
assert all(isinstance(i, (slice, int)) for i in new_item)
assert new_item == expected
EXPECTED_ELLIPSIS_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 10)
Pixel Dim Axis Name Data size Bounds
0 None 10 (-1, 11)
1 None 20 (-2, 18)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_ellipsis():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 10)
assert wcs.pixel_shape == (10, 20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_ELLIPSIS_REPR.strip()
assert EXPECTED_ELLIPSIS_REPR.strip() in repr(wcs)
def test_pixel_to_world_broadcasting():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, Ellipsis)
assert_allclose(
wcs.pixel_to_world_values((29, 29), 39, 44), ((10, 10), (20, 20), (25, 25))
)
def test_world_to_pixel_broadcasting():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, Ellipsis)
assert_allclose(
wcs.world_to_pixel_values((10, 10), 20, 25),
((29.0, 29.0), (39.0, 39.0), (44.0, 44.0)),
)
EXPECTED_SPECTRAL_SLICE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 2 pixel and 2 world dimensions
Array shape (Numpy order): (30, 10)
Pixel Dim Axis Name Data size Bounds
0 None 10 (-1, 11)
1 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1
0 yes yes
1 yes yes
"""
def test_spectral_slice():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [slice(None), 10])
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert wcs.array_shape == (30, 10)
assert wcs.pixel_shape == (10, 30)
assert wcs.world_axis_physical_types == ["pos.galactic.lat", "pos.galactic.lon"]
assert wcs.world_axis_units == ["deg", "deg"]
assert wcs.pixel_axis_names == ["", ""]
assert wcs.world_axis_names == ["Latitude", "Longitude"]
assert_equal(wcs.axis_correlation_matrix, [[True, True], [True, True]])
assert wcs.world_axis_object_components == [
("celestial", 1, "spherical.lat.degree"),
("celestial", 0, "spherical.lon.degree"),
]
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 44), (10, 25))
assert_allclose(wcs.array_index_to_world_values(44, 29), (10, 25))
assert_allclose(wcs.world_to_pixel_values(10, 25), (29.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 25), (44, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (5, 15)])
assert str(wcs) == EXPECTED_SPECTRAL_SLICE_REPR.strip()
assert EXPECTED_SPECTRAL_SLICE_REPR.strip() in repr(wcs)
EXPECTED_SPECTRAL_RANGE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 6, 10)
Pixel Dim Axis Name Data size Bounds
0 None 10 (-1, 11)
1 None 6 (-6, 14)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_spectral_range():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [slice(None), slice(4, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 6, 10)
assert wcs.pixel_shape == (10, 6, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 35, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 35, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 35.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 35, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-6, 14), (5, 15)])
assert str(wcs) == EXPECTED_SPECTRAL_RANGE_REPR.strip()
assert EXPECTED_SPECTRAL_RANGE_REPR.strip() in repr(wcs)
EXPECTED_CELESTIAL_SLICE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 2 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20)
Pixel Dim Axis Name Data size Bounds
0 None 20 (-2, 18)
1 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1
0 no yes
1 yes no
2 no yes
"""
def test_celestial_slice():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [Ellipsis, 5])
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20)
assert wcs.pixel_shape == (20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix, [[False, True], [True, False], [False, True]]
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(39, 44), (12.4, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39), (12.4, 20, 25))
assert_allclose(wcs.world_to_pixel_values(12.4, 20, 25), (39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(12.4, 20, 25), (44, 39))
assert_equal(wcs.pixel_bounds, [(-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_CELESTIAL_SLICE_REPR.strip()
assert EXPECTED_CELESTIAL_SLICE_REPR.strip() in repr(wcs)
EXPECTED_CELESTIAL_RANGE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 5)
Pixel Dim Axis Name Data size Bounds
0 None 5 (-6, 6)
1 None 20 (-2, 18)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_celestial_range():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [Ellipsis, slice(5, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 5)
assert wcs.pixel_shape == (5, 20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(24, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 24), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (24.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 24))
assert_equal(wcs.pixel_bounds, [(-6, 6), (-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_CELESTIAL_RANGE_REPR.strip()
assert EXPECTED_CELESTIAL_RANGE_REPR.strip() in repr(wcs)
# Now try with a 90 degree rotation
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SPECTRAL_CUBE_ROT = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep="\n"))
WCS_SPECTRAL_CUBE_ROT.wcs.pc = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
WCS_SPECTRAL_CUBE_ROT.wcs.crval[0] = 0
WCS_SPECTRAL_CUBE_ROT.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
EXPECTED_CELESTIAL_RANGE_ROT_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 5)
Pixel Dim Axis Name Data size Bounds
0 None 5 (-6, 6)
1 None 20 (-2, 18)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_celestial_range_rot():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE_ROT, [Ellipsis, slice(5, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 5)
assert wcs.pixel_shape == (5, 20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(14, 29, 34), (1, 15, 24))
assert_allclose(wcs.array_index_to_world_values(34, 29, 14), (1, 15, 24))
assert_allclose(wcs.world_to_pixel_values(1, 15, 24), (14.0, 29.0, 34.0))
assert_equal(wcs.world_to_array_index_values(1, 15, 24), (34, 29, 14))
assert_equal(wcs.pixel_bounds, [(-6, 6), (-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_CELESTIAL_RANGE_ROT_REPR.strip()
assert EXPECTED_CELESTIAL_RANGE_ROT_REPR.strip() in repr(wcs)
HEADER_NO_SHAPE_CUBE = """
NAXIS = 3
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_NO_SHAPE_CUBE = WCS(Header.fromstring(HEADER_NO_SHAPE_CUBE, sep="\n"))
EXPECTED_NO_SHAPE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): None
Pixel Dim Axis Name Data size Bounds
0 None None None
1 None None None
2 None None None
World Dim Axis Name Physical Type Units
0 None pos.galactic.lat deg
1 None em.freq Hz
2 None pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_no_array_shape():
wcs = SlicedLowLevelWCS(WCS_NO_SHAPE_CUBE, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert str(wcs) == EXPECTED_NO_SHAPE_REPR.strip()
assert EXPECTED_NO_SHAPE_REPR.strip() in repr(wcs)
# Testing the WCS object having some physical types as None/Unknown
HEADER_SPECTRAL_CUBE_NONE_TYPES = {
"CTYPE1": "GLAT-CAR",
"CUNIT1": "deg",
"CDELT1": -0.1,
"CRPIX1": 30,
"CRVAL1": 10,
"NAXIS1": 10,
"CTYPE2": "",
"CUNIT2": "Hz",
"CDELT2": 0.5,
"CRPIX2": 40,
"CRVAL2": 20,
"NAXIS2": 20,
"CTYPE3": "GLON-CAR",
"CUNIT3": "deg",
"CDELT3": 0.1,
"CRPIX3": 45,
"CRVAL3": 25,
"NAXIS3": 30,
}
WCS_SPECTRAL_CUBE_NONE_TYPES = WCS(header=HEADER_SPECTRAL_CUBE_NONE_TYPES)
WCS_SPECTRAL_CUBE_NONE_TYPES.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
EXPECTED_ELLIPSIS_REPR_NONE_TYPES = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 10)
Pixel Dim Axis Name Data size Bounds
0 None 10 (-1, 11)
1 None 20 (-2, 18)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 None pos.galactic.lat deg
1 None None Hz
2 None pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_ellipsis_none_types():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE_NONE_TYPES, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 10)
assert wcs.pixel_shape == (10, 20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
None,
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert wcs.world_axis_object_components == [
("celestial", 1, "spherical.lat.degree"),
("world", 0, "value"),
("celestial", 0, "spherical.lon.degree"),
]
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_ELLIPSIS_REPR_NONE_TYPES.strip()
assert EXPECTED_ELLIPSIS_REPR_NONE_TYPES.strip() in repr(wcs)
CASES = [
(slice(None), slice(None), slice(None)),
(slice(None), slice(3, None), slice(3, None)),
(slice(None), slice(None, 16), slice(None, 16)),
(slice(None), slice(3, 16), slice(3, 16)),
(slice(2, None), slice(None), slice(2, None)),
(slice(2, None), slice(3, None), slice(5, None)),
(slice(2, None), slice(None, 16), slice(2, 18)),
(slice(2, None), slice(3, 16), slice(5, 18)),
(slice(None, 10), slice(None), slice(None, 10)),
(slice(None, 10), slice(3, None), slice(3, 10)),
(slice(None, 10), slice(None, 16), slice(None, 10)),
(slice(None, 10), slice(3, 16), slice(3, 10)),
(slice(2, 10), slice(None), slice(2, 10)),
(slice(2, 10), slice(3, None), slice(5, 10)),
(slice(2, 10), slice(None, 16), slice(2, 10)),
(slice(2, 10), slice(3, 16), slice(5, 10)),
(slice(None), 3, 3),
(slice(2, None), 3, 5),
(slice(None, 10), 3, 3),
(slice(2, 10), 3, 5),
]
@pytest.mark.parametrize(("slice1", "slice2", "expected"), CASES)
def test_combine_slices(slice1, slice2, expected):
assert combine_slices(slice1, slice2) == expected
def test_nested_slicing():
# Make sure that if we call slicing several times, the result is the same
# as calling the slicing once with the final slice settings.
wcs = WCS_SPECTRAL_CUBE
sub1 = SlicedLowLevelWCS(
SlicedLowLevelWCS(
SlicedLowLevelWCS(wcs, [slice(None), slice(1, 10), slice(None)]),
[3, slice(2, None)],
),
[slice(None), slice(2, 8)],
)
sub2 = wcs[3, 3:10, 2:8]
assert_allclose(sub1.pixel_to_world_values(3, 5), sub2.pixel_to_world_values(3, 5))
assert not isinstance(sub1._wcs, SlicedLowLevelWCS)
def test_too_much_slicing():
wcs = WCS_SPECTRAL_CUBE
with pytest.raises(
ValueError,
match=(
"Cannot slice WCS: the resulting WCS "
"should have at least one pixel and "
"one world dimension"
),
):
wcs[0, 1, 2]
HEADER_TIME_1D = """
SIMPLE = T
BITPIX = -32
NAXIS = 1
NAXIS1 = 2048
TIMESYS = 'UTC'
TREFPOS = 'TOPOCENT'
MJDREF = 50002.6
CTYPE1 = 'UTC'
CRVAL1 = 5
CUNIT1 = 's'
CRPIX1 = 1.0
CDELT1 = 2
OBSGEO-L= -20
OBSGEO-B= -70
OBSGEO-H= 2530
"""
@pytest.fixture
def header_time_1d():
return Header.fromstring(HEADER_TIME_1D, sep="\n")
@pytest.fixture
def time_1d_wcs(header_time_1d):
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
return WCS(header_time_1d)
def test_1d_sliced_low_level(time_1d_wcs):
sll = SlicedLowLevelWCS(time_1d_wcs, np.s_[10:20])
world = sll.pixel_to_world_values([1, 2])
assert isinstance(world, np.ndarray)
assert np.allclose(world, [27, 29])
def validate_info_dict(result, expected):
result_value = result.pop("value")
expected_value = expected.pop("value")
np.testing.assert_allclose(result_value, expected_value)
assert result == expected
def test_dropped_dimensions():
wcs = WCS_SPECTRAL_CUBE
sub = SlicedLowLevelWCS(wcs, np.s_[:, :, :])
assert sub.dropped_world_dimensions == {}
sub = SlicedLowLevelWCS(wcs, np.s_[:, 2:5, :])
assert sub.dropped_world_dimensions == {}
sub = SlicedLowLevelWCS(wcs, np.s_[:, 0])
waocomp = sub.dropped_world_dimensions.pop("world_axis_object_components")
assert len(waocomp) == 1 and waocomp[0][0] == "spectral" and waocomp[0][1] == 0
waocls = sub.dropped_world_dimensions.pop("world_axis_object_classes")
assert (
len(waocls) == 1
and "spectral" in waocls
and waocls["spectral"][0] == u.Quantity
)
validate_info_dict(
sub.dropped_world_dimensions,
{
"value": [0.5],
"world_axis_physical_types": ["em.freq"],
"world_axis_names": ["Frequency"],
"world_axis_units": ["Hz"],
"serialized_classes": False,
},
)
sub = SlicedLowLevelWCS(wcs, np.s_[:, 0, 0])
waocomp = sub.dropped_world_dimensions.pop("world_axis_object_components")
assert len(waocomp) == 1 and waocomp[0][0] == "spectral" and waocomp[0][1] == 0
waocls = sub.dropped_world_dimensions.pop("world_axis_object_classes")
assert (
len(waocls) == 1
and "spectral" in waocls
and waocls["spectral"][0] == u.Quantity
)
validate_info_dict(
sub.dropped_world_dimensions,
{
"value": [0.5],
"world_axis_physical_types": ["em.freq"],
"world_axis_names": ["Frequency"],
"world_axis_units": ["Hz"],
"serialized_classes": False,
},
)
sub = SlicedLowLevelWCS(wcs, np.s_[0, :, 0])
dwd = sub.dropped_world_dimensions
wao_classes = dwd.pop("world_axis_object_classes")
validate_info_dict(
dwd,
{
"value": [12.86995801, 20.49217541],
"world_axis_physical_types": ["pos.galactic.lat", "pos.galactic.lon"],
"world_axis_names": ["Latitude", "Longitude"],
"world_axis_units": ["deg", "deg"],
"serialized_classes": False,
"world_axis_object_components": [
("celestial", 1, "spherical.lat.degree"),
("celestial", 0, "spherical.lon.degree"),
],
},
)
assert wao_classes["celestial"][0] is SkyCoord
assert wao_classes["celestial"][1] == ()
assert isinstance(wao_classes["celestial"][2]["frame"], Galactic)
assert wao_classes["celestial"][2]["unit"] is u.deg
sub = SlicedLowLevelWCS(wcs, np.s_[5, :5, 12])
dwd = sub.dropped_world_dimensions
wao_classes = dwd.pop("world_axis_object_classes")
validate_info_dict(
dwd,
{
"value": [11.67648267, 21.01921192],
"world_axis_physical_types": ["pos.galactic.lat", "pos.galactic.lon"],
"world_axis_names": ["Latitude", "Longitude"],
"world_axis_units": ["deg", "deg"],
"serialized_classes": False,
"world_axis_object_components": [
("celestial", 1, "spherical.lat.degree"),
("celestial", 0, "spherical.lon.degree"),
],
},
)
assert wao_classes["celestial"][0] is SkyCoord
assert wao_classes["celestial"][1] == ()
assert isinstance(wao_classes["celestial"][2]["frame"], Galactic)
assert wao_classes["celestial"][2]["unit"] is u.deg
def test_dropped_dimensions_4d(cube_4d_fitswcs):
sub = SlicedLowLevelWCS(cube_4d_fitswcs, np.s_[:, 12, 5, 5])
dwd = sub.dropped_world_dimensions
wao_classes = dwd.pop("world_axis_object_classes")
wao_components = dwd.pop("world_axis_object_components")
validate_info_dict(
dwd,
{
"value": [4.0e00, -2.0e00, 1.0e10],
"world_axis_physical_types": ["pos.eq.ra", "pos.eq.dec", "em.freq"],
"world_axis_names": ["Right Ascension", "Declination", "Frequency"],
"world_axis_units": ["deg", "deg", "Hz"],
"serialized_classes": False,
},
)
assert wao_classes["celestial"][0] is SkyCoord
assert wao_classes["celestial"][1] == ()
assert isinstance(wao_classes["celestial"][2]["frame"], ICRS)
assert wao_classes["celestial"][2]["unit"] is u.deg
assert wao_classes["spectral"][0:3] == (u.Quantity, (), {})
assert wao_components[0] == ("celestial", 0, "spherical.lon.degree")
assert wao_components[1] == ("celestial", 1, "spherical.lat.degree")
assert wao_components[2][0:2] == ("spectral", 0)
sub = SlicedLowLevelWCS(cube_4d_fitswcs, np.s_[12, 12])
dwd = sub.dropped_world_dimensions
wao_classes = dwd.pop("world_axis_object_classes")
wao_components = dwd.pop("world_axis_object_components")
validate_info_dict(
dwd,
{
"value": [1.0e10, 5.0e00],
"world_axis_physical_types": ["em.freq", "time"],
"world_axis_names": ["Frequency", "Time"],
"world_axis_units": ["Hz", "s"],
"serialized_classes": False,
},
)
assert wao_components[0][0:2] == ("spectral", 0)
assert wao_components[1][0] == "time"
assert wao_components[1][1] == 0
assert wao_classes["spectral"][0:3] == (u.Quantity, (), {})
assert wao_classes["time"][0:3] == (Time, (), {})
def test_pixel_to_world_values_different_int_types():
int_sliced = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, np.s_[:, 0, :])
np64_sliced = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, np.s_[:, np.int64(0), :])
pixel_arrays = ([0, 1], [0, 1])
for int_coord, np64_coord in zip(
int_sliced.pixel_to_world_values(*pixel_arrays),
np64_sliced.pixel_to_world_values(*pixel_arrays),
):
assert all(int_coord == np64_coord)
COUPLED_WCS_HEADER = {
"WCSAXES": 3,
"CRPIX1": (100 + 1) / 2,
"CRPIX2": (25 + 1) / 2,
"CRPIX3": 1.0,
"PC1_1": 0.0,
"PC1_2": -1.0,
"PC1_3": 0.0,
"PC2_1": 1.0,
"PC2_2": 0.0,
"PC2_3": -1.0,
"CDELT1": 5,
"CDELT2": 5,
"CDELT3": 0.055,
"CUNIT1": "arcsec",
"CUNIT2": "arcsec",
"CUNIT3": "Angstrom",
"CTYPE1": "HPLN-TAN",
"CTYPE2": "HPLT-TAN",
"CTYPE3": "WAVE",
"CRVAL1": 0.0,
"CRVAL2": 0.0,
"CRVAL3": 1.05,
}
def test_coupled_world_slicing():
fits_wcs = WCS(header=COUPLED_WCS_HEADER)
sl = SlicedLowLevelWCS(fits_wcs, 0)
world = fits_wcs.pixel_to_world_values(0, 0, 0)
out_pix = sl.world_to_pixel_values(world[0], world[1])
assert np.allclose(out_pix[0], 0)
|
9aef02d314db7023b7a3f5c5fb8c8c73f0367089d9c0b24b9e72eb0f34aeee48 | """
Helpers for overriding numpy functions in
`~astropy.time.Time.__array_function__`.
"""
import numpy as np
from astropy.units.quantity_helper.function_helpers import FunctionAssigner
# TODO: Fill this in with functions that don't make sense for times
UNSUPPORTED_FUNCTIONS = {}
# Functions that return the final result of the numpy function
CUSTOM_FUNCTIONS = {}
custom_functions = FunctionAssigner(CUSTOM_FUNCTIONS)
@custom_functions(helps={np.linspace})
def linspace(tstart, tstop, *args, **kwargs):
from astropy.time import Time
if isinstance(tstart, Time):
if not isinstance(tstop, Time):
return NotImplemented
if kwargs.get("retstep"):
offsets, step = np.linspace(
np.zeros(tstart.shape), np.ones(tstop.shape), *args, **kwargs
)
tdelta = tstop - tstart
return tstart + tdelta * offsets, tdelta * step
else:
offsets = np.linspace(
np.zeros(tstart.shape), np.ones(tstop.shape), *args, **kwargs
)
return tstart + (tstop - tstart) * offsets
|
7aefdf3310b067b05625addc95fe2a34d967112cf1a283de32efd4429e88bd45 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.time import Time, TimeDelta
from astropy.units.quantity_helper.function_helpers import ARRAY_FUNCTION_ENABLED
class TestFunctionsTime:
def setup_class(cls):
cls.t = Time(50000, np.arange(8).reshape(4, 2), format="mjd", scale="tai")
def check(self, func, cls=None, scale=None, format=None, *args, **kwargs):
if cls is None:
cls = self.t.__class__
if scale is None:
scale = self.t.scale
if format is None:
format = self.t.format
out = func(self.t, *args, **kwargs)
jd1 = func(self.t.jd1, *args, **kwargs)
jd2 = func(self.t.jd2, *args, **kwargs)
expected = cls(jd1, jd2, format=format, scale=scale)
if isinstance(out, np.ndarray):
expected = np.array(expected)
assert np.all(out == expected)
@pytest.mark.parametrize("axis", (0, 1))
def test_diff(self, axis):
self.check(np.diff, axis=axis, cls=TimeDelta, format="jd")
class TestFunctionsTimeDelta(TestFunctionsTime):
def setup_class(cls):
cls.t = TimeDelta(np.arange(8).reshape(4, 2), format="jd", scale="tai")
@pytest.mark.parametrize("axis", (0, 1, None))
@pytest.mark.parametrize("func", (np.sum, np.mean, np.median))
def test_sum_like(self, func, axis):
self.check(func, axis=axis)
@pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
@pytest.mark.parametrize("attribute", ["shape", "ndim", "size"])
@pytest.mark.parametrize(
"t",
[
Time("2001-02-03T04:05:06"),
Time(50000, np.arange(8).reshape(4, 2), format="mjd", scale="tai"),
TimeDelta(100, format="jd"),
],
)
def test_shape_attribute_functions(t, attribute):
# Regression test for
# https://github.com/astropy/astropy/issues/8610#issuecomment-736855217
function = getattr(np, attribute)
result = function(t)
assert result == getattr(t, attribute)
|
205df2275eb0a54ae649971c299b124784221a4a57454ceeb2a665c8452e381b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import itertools
import operator
from datetime import timedelta
from decimal import Decimal
import numpy as np
import pytest
from astropy import units as u
from astropy.table import Table
from astropy.time import (
STANDARD_TIME_SCALES,
TIME_DELTA_SCALES,
TIME_SCALES,
OperandTypeError,
ScaleValueError,
Time,
TimeDelta,
TimeDeltaMissingUnitWarning,
)
from astropy.utils import iers
allclose_jd = functools.partial(np.allclose, rtol=2.0**-52, atol=0)
allclose_jd2 = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52
) # 20 ps atol
allclose_sec = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52 * 24 * 3600
) # 20 ps atol
orig_auto_download = iers.conf.auto_download
def setup_module(module):
"""Use offline IERS table only."""
iers.conf.auto_download = False
def teardown_module(module):
"""Restore original setting."""
iers.conf.auto_download = orig_auto_download
class TestTimeDelta:
"""Test TimeDelta class"""
def setup_method(self):
self.t = Time("2010-01-01", scale="utc")
self.t2 = Time("2010-01-02 00:00:01", scale="utc")
self.t3 = Time(
"2010-01-03 01:02:03",
scale="utc",
precision=9,
in_subfmt="date_hms",
out_subfmt="date_hm",
location=(-75.0 * u.degree, 30.0 * u.degree, 500 * u.m),
)
self.t4 = Time("2010-01-01", scale="local")
self.dt = TimeDelta(100.0, format="sec")
self.dt_array = TimeDelta(np.arange(100, 1000, 100), format="sec")
def test_sub(self):
# time - time
dt = self.t2 - self.t
assert repr(dt).startswith(
"<TimeDelta object: scale='tai' format='jd' value=1.00001157407"
)
assert allclose_jd(dt.jd, 86401.0 / 86400.0)
assert allclose_sec(dt.sec, 86401.0)
# time - delta_time
t = self.t2 - dt
assert t.iso == self.t.iso
# delta_time - delta_time
dt2 = dt - self.dt
assert allclose_sec(dt2.sec, 86301.0)
# delta_time - time
with pytest.raises(OperandTypeError):
dt - self.t
def test_add(self):
# time + time
with pytest.raises(OperandTypeError):
self.t2 + self.t
# time + delta_time
dt = self.t2 - self.t
t2 = self.t + dt
assert t2.iso == self.t2.iso
# delta_time + delta_time
dt2 = dt + self.dt
assert allclose_sec(dt2.sec, 86501.0)
# delta_time + time
dt = self.t2 - self.t
t2 = dt + self.t
assert t2.iso == self.t2.iso
def test_add_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format="mjd", scale="tai")
t2 = Time([0.0, 1.0], format="mjd", scale="tai")
dt = TimeDelta(100.0, format="jd")
dt2 = TimeDelta([100.0, 200.0], format="jd")
out = t + dt
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = t + dt2
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = t2 + dt
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt + dt
assert allclose_jd(out.jd, 200.0)
assert out.isscalar
out = dt + dt2
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
# Reverse the argument order
out = dt + t
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = dt2 + t
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = dt + t2
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt2 + dt
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
def test_sub_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format="mjd", scale="tai")
t2 = Time([0.0, 1.0], format="mjd", scale="tai")
dt = TimeDelta(100.0, format="jd")
dt2 = TimeDelta([100.0, 200.0], format="jd")
out = t - dt
assert allclose_jd(out.mjd, -100.0)
assert out.isscalar
out = t - dt2
assert allclose_jd(out.mjd, [-100.0, -200.0])
assert not out.isscalar
out = t2 - dt
assert allclose_jd(out.mjd, [-100.0, -99.0])
assert not out.isscalar
out = dt - dt
assert allclose_jd(out.jd, 0.0)
assert out.isscalar
out = dt - dt2
assert allclose_jd(out.jd, [0.0, -100.0])
assert not out.isscalar
@pytest.mark.parametrize(
"values", [(2455197.5, 2455198.5), ([2455197.5], [2455198.5])]
)
def test_copy_timedelta(self, values):
"""Test copying the values of a TimeDelta object by passing it into the
Time initializer.
"""
val1, val2 = values
t = Time(val1, format="jd", scale="utc")
t2 = Time(val2, format="jd", scale="utc")
dt = t2 - t
dt2 = TimeDelta(dt, copy=False)
assert np.all(dt.jd == dt2.jd)
assert dt._time.jd1 is dt2._time.jd1
assert dt._time.jd2 is dt2._time.jd2
dt2 = TimeDelta(dt, copy=True)
assert np.all(dt.jd == dt2.jd)
assert dt._time.jd1 is not dt2._time.jd1
assert dt._time.jd2 is not dt2._time.jd2
# Include initializers
dt2 = TimeDelta(dt, format="sec")
assert allclose_sec(dt2.value, 86400.0)
def test_neg_abs(self):
for dt in (self.dt, self.dt_array):
dt2 = -dt
assert np.all(dt2.jd == -dt.jd)
dt3 = abs(dt)
assert np.all(dt3.jd == dt.jd)
dt4 = abs(dt2)
assert np.all(dt4.jd == dt.jd)
def test_mul_div(self):
for dt in (self.dt, self.dt_array):
dt2 = dt + dt + dt
dt3 = 3.0 * dt
assert allclose_jd(dt2.jd, dt3.jd)
dt4 = dt3 / 3.0
assert allclose_jd(dt4.jd, dt.jd)
dt5 = self.dt * np.arange(3)
assert dt5[0].jd == 0.0
assert dt5[-1].jd == (self.dt + self.dt).jd
dt6 = self.dt * [0, 1, 2]
assert np.all(dt6.jd == dt5.jd)
with pytest.raises(OperandTypeError):
self.dt * self.t
with pytest.raises(TypeError):
self.dt * object()
def test_mean(self):
def is_consistent(time_delta: TimeDelta):
mean_expected = (
np.sum(time_delta.jd1) + np.sum(time_delta.jd2)
) / time_delta.size
mean_test = time_delta.mean().jd1 + time_delta.mean().jd2
return mean_test == mean_expected
assert is_consistent(self.dt)
assert is_consistent(self.dt_array)
def test_keep_properties(self):
# closes #1924 (partially)
dt = TimeDelta(1000.0, format="sec")
for t in (self.t, self.t3):
ta = t + dt
assert ta.location is t.location
assert ta.precision == t.precision
assert ta.in_subfmt == t.in_subfmt
assert ta.out_subfmt == t.out_subfmt
tr = dt + t
assert tr.location is t.location
assert tr.precision == t.precision
assert tr.in_subfmt == t.in_subfmt
assert tr.out_subfmt == t.out_subfmt
ts = t - dt
assert ts.location is t.location
assert ts.precision == t.precision
assert ts.in_subfmt == t.in_subfmt
assert ts.out_subfmt == t.out_subfmt
t_tdb = self.t.tdb
assert hasattr(t_tdb, "_delta_tdb_tt")
assert not hasattr(t_tdb, "_delta_ut1_utc")
t_tdb_ut1 = t_tdb.ut1
assert hasattr(t_tdb_ut1, "_delta_tdb_tt")
assert hasattr(t_tdb_ut1, "_delta_ut1_utc")
t_tdb_ut1_utc = t_tdb_ut1.utc
assert hasattr(t_tdb_ut1_utc, "_delta_tdb_tt")
assert hasattr(t_tdb_ut1_utc, "_delta_ut1_utc")
# adding or subtracting some time should remove the delta's
# since these are time-dependent and should be recalculated
for op in (operator.add, operator.sub):
t1 = op(t_tdb, dt)
assert not hasattr(t1, "_delta_tdb_tt")
assert not hasattr(t1, "_delta_ut1_utc")
t2 = op(t_tdb_ut1, dt)
assert not hasattr(t2, "_delta_tdb_tt")
assert not hasattr(t2, "_delta_ut1_utc")
t3 = op(t_tdb_ut1_utc, dt)
assert not hasattr(t3, "_delta_tdb_tt")
assert not hasattr(t3, "_delta_ut1_utc")
def test_set_format(self):
"""
Test basics of setting format attribute.
"""
dt = TimeDelta(86400.0, format="sec")
assert dt.value == 86400.0
assert dt.format == "sec"
dt.format = "jd"
assert dt.value == 1.0
assert dt.format == "jd"
dt.format = "datetime"
assert dt.value == timedelta(days=1)
assert dt.format == "datetime"
def test_from_non_float(self):
dt = TimeDelta("1.000000000000001", format="jd")
assert dt != TimeDelta(1.000000000000001, format="jd") # precision loss.
assert dt == TimeDelta(1, 0.000000000000001, format="jd")
dt2 = TimeDelta(Decimal("1.000000000000001"), format="jd")
assert dt2 == dt
def test_to_value(self):
dt = TimeDelta(86400.0, format="sec")
assert dt.to_value("jd") == 1.0
assert dt.to_value("jd", "str") == "1.0"
assert dt.to_value("sec", subfmt="str") == "86400.0"
with pytest.raises(
ValueError,
match="not one of the known formats.*failed to parse as a unit",
):
dt.to_value("julian")
with pytest.raises(TypeError, match="missing required format or unit"):
dt.to_value()
class TestTimeDeltaScales:
"""Test scale conversion for Time Delta.
Go through @taldcroft's list of expected behavior from #1932"""
def setup_method(self):
# pick a date that includes a leap second for better testing
self.iso_times = [
"2012-06-30 12:00:00",
"2012-06-30 23:59:59",
"2012-07-01 00:00:00",
"2012-07-01 12:00:00",
]
self.t = {
scale: Time(self.iso_times, scale=scale, precision=9)
for scale in TIME_SCALES
}
self.dt = {scale: self.t[scale] - self.t[scale][0] for scale in TIME_SCALES}
def test_delta_scales_definition(self):
for scale in list(TIME_DELTA_SCALES) + [None]:
TimeDelta([0.0, 1.0, 10.0], format="sec", scale=scale)
with pytest.raises(ScaleValueError):
TimeDelta([0.0, 1.0, 10.0], format="sec", scale="utc")
@pytest.mark.parametrize(
("scale1", "scale2"),
list(itertools.product(STANDARD_TIME_SCALES, STANDARD_TIME_SCALES)),
)
def test_standard_scales_for_time_minus_time(self, scale1, scale2):
"""T(X) - T2(Y) -- does T(X) - T2(Y).X and return dT(X)
and T(X) +/- dT(Y) -- does (in essence) (T(X).Y +/- dT(Y)).X
I.e., time differences of two times should have the scale of the
first time. The one exception is UTC, which returns TAI.
There are no standard timescales for which this does not work.
"""
t1 = self.t[scale1]
t2 = self.t[scale2]
dt = t1 - t2
if scale1 in TIME_DELTA_SCALES:
assert dt.scale == scale1
else:
assert scale1 == "utc"
assert dt.scale == "tai"
# now check with delta time; also check reversibility
t1_recover_t2_scale = t2 + dt
assert t1_recover_t2_scale.scale == scale2
t1_recover = getattr(t1_recover_t2_scale, scale1)
assert allclose_jd(t1_recover.jd, t1.jd)
t2_recover_t1_scale = t1 - dt
assert t2_recover_t1_scale.scale == scale1
t2_recover = getattr(t2_recover_t1_scale, scale2)
assert allclose_jd(t2_recover.jd, t2.jd)
def test_local_scales_for_time_minus_time(self):
"""T1(local) - T2(local) should return dT(local)
T1(local) +/- dT(local) or T1(local) +/- Quantity(time-like) should
also return T(local)
I.e. Tests that time differences of two local scale times should
return delta time with local timescale. Furthermore, checks that
arithmetic of T(local) with dT(None) or time-like quantity does work.
Also tests that subtracting two Time objects, one having local time
scale and other having standard time scale should raise TypeError.
"""
t1 = self.t["local"]
t2 = Time("2010-01-01", scale="local")
dt = t1 - t2
assert dt.scale == "local"
# now check with delta time
t1_recover = t2 + dt
assert t1_recover.scale == "local"
assert allclose_jd(t1_recover.jd, t1.jd)
# check that dT(None) can be subtracted from T(local)
dt2 = TimeDelta([10.0], format="sec", scale=None)
t3 = t2 - dt2
assert t3.scale == t2.scale
# check that time quantity can be subtracted from T(local)
q = 10 * u.s
assert (t2 - q).value == (t2 - dt2).value
# Check that one cannot subtract/add times with a standard scale
# from a local one (or vice versa)
t1 = self.t["local"]
for scale in STANDARD_TIME_SCALES:
t2 = self.t[scale]
with pytest.raises(TypeError):
t1 - t2
with pytest.raises(TypeError):
t2 - t1
with pytest.raises(TypeError):
t2 - dt
with pytest.raises(TypeError):
t2 + dt
with pytest.raises(TypeError):
dt + t2
def test_scales_for_delta_minus_delta(self):
"""dT(X) +/- dT2(Y) -- Add/substract JDs for dT(X) and dT(Y).X
I.e. this will succeed if dT(Y) can be converted to scale X.
Returns delta time in scale X
"""
# geocentric timescales
dt_tai = self.dt["tai"]
dt_tt = self.dt["tt"]
dt0 = dt_tai - dt_tt
assert dt0.scale == "tai"
# tai and tt have the same scale, so differences should be the same
assert allclose_sec(dt0.sec, 0.0)
dt_tcg = self.dt["tcg"]
dt1 = dt_tai - dt_tcg
assert dt1.scale == "tai"
# tai and tcg do not have the same scale, so differences different
assert not allclose_sec(dt1.sec, 0.0)
t_tai_tcg = self.t["tai"].tcg
dt_tai_tcg = t_tai_tcg - t_tai_tcg[0]
dt2 = dt_tai - dt_tai_tcg
assert dt2.scale == "tai"
# but if tcg difference calculated from tai, it should roundtrip
assert allclose_sec(dt2.sec, 0.0)
# check that if we put TCG first, we get a TCG scale back
dt3 = dt_tai_tcg - dt_tai
assert dt3.scale == "tcg"
assert allclose_sec(dt3.sec, 0.0)
for scale in "tdb", "tcb", "ut1":
with pytest.raises(TypeError):
dt_tai - self.dt[scale]
# barycentric timescales
dt_tcb = self.dt["tcb"]
dt_tdb = self.dt["tdb"]
dt4 = dt_tcb - dt_tdb
assert dt4.scale == "tcb"
assert not allclose_sec(dt1.sec, 0.0)
t_tcb_tdb = self.t["tcb"].tdb
dt_tcb_tdb = t_tcb_tdb - t_tcb_tdb[0]
dt5 = dt_tcb - dt_tcb_tdb
assert dt5.scale == "tcb"
assert allclose_sec(dt5.sec, 0.0)
for scale in "utc", "tai", "tt", "tcg", "ut1":
with pytest.raises(TypeError):
dt_tcb - self.dt[scale]
# rotational timescale
dt_ut1 = self.dt["ut1"]
dt5 = dt_ut1 - dt_ut1[-1]
assert dt5.scale == "ut1"
assert dt5[-1].sec == 0.0
for scale in "utc", "tai", "tt", "tcg", "tcb", "tdb":
with pytest.raises(TypeError):
dt_ut1 - self.dt[scale]
# local time scale
dt_local = self.dt["local"]
dt6 = dt_local - dt_local[-1]
assert dt6.scale == "local"
assert dt6[-1].sec == 0.0
for scale in "utc", "tai", "tt", "tcg", "tcb", "tdb", "ut1":
with pytest.raises(TypeError):
dt_local - self.dt[scale]
@pytest.mark.parametrize(
("scale", "op"),
list(itertools.product(TIME_SCALES, (operator.add, operator.sub))),
)
def test_scales_for_delta_scale_is_none(self, scale, op):
"""T(X) +/- dT(None) or T(X) +/- Quantity(time-like)
This is always allowed and just adds JDs, i.e., the scale of
the TimeDelta or time-like Quantity will be taken to be X.
The one exception is again for X=UTC, where TAI is assumed instead,
so that a day is always defined as 86400 seconds.
"""
dt_none = TimeDelta([0.0, 1.0, -1.0, 1000.0], format="sec")
assert dt_none.scale is None
q_time = dt_none.to("s")
dt = self.dt[scale]
dt1 = op(dt, dt_none)
assert dt1.scale == dt.scale
assert allclose_jd(dt1.jd, op(dt.jd, dt_none.jd))
dt2 = op(dt_none, dt)
assert dt2.scale == dt.scale
assert allclose_jd(dt2.jd, op(dt_none.jd, dt.jd))
dt3 = op(q_time, dt)
assert dt3.scale == dt.scale
assert allclose_jd(dt3.jd, dt2.jd)
t = self.t[scale]
t1 = op(t, dt_none)
assert t1.scale == t.scale
assert allclose_jd(t1.jd, op(t.jd, dt_none.jd))
if op is operator.add:
t2 = op(dt_none, t)
assert t2.scale == t.scale
assert allclose_jd(t2.jd, t1.jd)
t3 = op(t, q_time)
assert t3.scale == t.scale
assert allclose_jd(t3.jd, t1.jd)
@pytest.mark.parametrize("scale", TIME_SCALES)
def test_delta_day_is_86400_seconds(self, scale):
"""TimeDelta or Quantity holding 1 day always means 24*60*60 seconds
This holds true for all timescales but UTC, for which leap-second
days are longer or shorter by one second.
"""
t = self.t[scale]
dt_day = TimeDelta(1.0, format="jd")
q_day = dt_day.to("day")
dt_day_leap = t[-1] - t[0]
# ^ = exclusive or, so either equal and not UTC, or not equal and UTC
assert allclose_jd(dt_day_leap.jd, dt_day.jd) ^ (scale == "utc")
t1 = t[0] + dt_day
assert allclose_jd(t1.jd, t[-1].jd) ^ (scale == "utc")
t2 = q_day + t[0]
assert allclose_jd(t2.jd, t[-1].jd) ^ (scale == "utc")
t3 = t[-1] - dt_day
assert allclose_jd(t3.jd, t[0].jd) ^ (scale == "utc")
t4 = t[-1] - q_day
assert allclose_jd(t4.jd, t[0].jd) ^ (scale == "utc")
def test_timedelta_setitem():
t = TimeDelta([1, 2, 3] * u.d, format="jd")
t[0] = 0.5
assert allclose_jd(t.value, [0.5, 2, 3])
t[1:] = 4.5
assert allclose_jd(t.value, [0.5, 4.5, 4.5])
t[:] = 86400 * u.s
assert allclose_jd(t.value, [1, 1, 1])
t[1] = TimeDelta(2, format="jd")
assert allclose_jd(t.value, [1, 2, 1])
with pytest.raises(ValueError) as err:
t[1] = 1 * u.m
assert "cannot convert value to a compatible TimeDelta" in str(err.value)
def test_timedelta_setitem_sec():
t = TimeDelta([1, 2, 3], format="sec")
t[0] = 0.5
assert allclose_jd(t.value, [0.5, 2, 3])
t[1:] = 4.5
assert allclose_jd(t.value, [0.5, 4.5, 4.5])
t[:] = 1 * u.day
assert allclose_jd(t.value, [86400, 86400, 86400])
t[1] = TimeDelta(2, format="jd")
assert allclose_jd(t.value, [86400, 86400 * 2, 86400])
with pytest.raises(ValueError) as err:
t[1] = 1 * u.m
assert "cannot convert value to a compatible TimeDelta" in str(err.value)
def test_timedelta_mask():
t = TimeDelta([1, 2] * u.d, format="jd")
t[1] = np.ma.masked
assert np.all(t.mask == [False, True])
assert allclose_jd(t[0].value, 1)
assert t.value[1] is np.ma.masked
def test_python_timedelta_scalar():
td = timedelta(days=1, seconds=1)
td1 = TimeDelta(td, format="datetime")
assert td1.sec == 86401.0
td2 = TimeDelta(86401.0, format="sec")
assert td2.datetime == td
def test_python_timedelta_vector():
td = [
[timedelta(days=1), timedelta(days=2)],
[timedelta(days=3), timedelta(days=4)],
]
td1 = TimeDelta(td, format="datetime")
assert np.all(td1.jd == [[1, 2], [3, 4]])
td2 = TimeDelta([[1, 2], [3, 4]], format="jd")
assert np.all(td2.datetime == td)
def test_timedelta_to_datetime():
td = TimeDelta(1, format="jd")
assert td.to_datetime() == timedelta(days=1)
td2 = TimeDelta([[1, 2], [3, 4]], format="jd")
td = [
[timedelta(days=1), timedelta(days=2)],
[timedelta(days=3), timedelta(days=4)],
]
assert np.all(td2.to_datetime() == td)
def test_insert_timedelta():
tm = TimeDelta([1, 2], format="sec")
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, TimeDelta([10, 20], format="sec"))
assert np.all(tm2 == TimeDelta([1, 10, 20, 2], format="sec"))
def test_no_units_warning():
with pytest.warns(TimeDeltaMissingUnitWarning):
delta = TimeDelta(1)
assert delta.to_value(u.day) == 1
with pytest.warns(TimeDeltaMissingUnitWarning):
table = Table({"t": [1, 2, 3]})
delta = TimeDelta(table["t"])
assert np.all(delta.to_value(u.day) == [1, 2, 3])
with pytest.warns(TimeDeltaMissingUnitWarning):
delta = TimeDelta(np.array([1, 2, 3]))
assert np.all(delta.to_value(u.day) == [1, 2, 3])
with pytest.warns(TimeDeltaMissingUnitWarning):
t = Time("2012-01-01") + 1
assert t.isot[:10] == "2012-01-02"
with pytest.warns(TimeDeltaMissingUnitWarning):
comp = TimeDelta([1, 2, 3], format="jd") >= 2
assert np.all(comp == [False, True, True])
with pytest.warns(TimeDeltaMissingUnitWarning):
# 2 is also interpreted as days, not seconds
assert (TimeDelta(5 * u.s) > 2) is False
# with unit is ok
assert TimeDelta(1 * u.s).to_value(u.s) == 1
# with format is also ok
assert TimeDelta(1, format="sec").to_value(u.s) == 1
assert TimeDelta(1, format="jd").to_value(u.day) == 1
# table column with units
table = Table({"t": [1, 2, 3] * u.s})
assert np.all(TimeDelta(table["t"]).to_value(u.s) == [1, 2, 3])
|
2e6723be5a30ba047e380824d2320c3ea180951032395cb21c5ffc6e706351f8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import numpy as np
import pytest
from astropy.time import Time, TimeYearDayTime, conf
iso_times = [
"2000-02-29",
"1981-12-31 12:13",
"1981-12-31 12:13:14",
"2020-12-31 12:13:14.56",
]
isot_times = [re.sub(" ", "T", tm) for tm in iso_times]
yday_times = ["2000:060", "1981:365:12:13:14", "1981:365:12:13", "2020:366:12:13:14.56"]
# Transpose the array to check that strides are dealt with correctly.
yday_array = np.array(
[["2000:060", "1981:365:12:13:14"], ["1981:365:12:13", "2020:366:12:13:14.56"]]
).T
def test_fast_conf():
# Default is to try C parser and then Python parser. Both fail so we get the
# Python message.
assert conf.use_fast_parser == "True" # default
with pytest.raises(ValueError, match="Time 2000:0601 does not match yday format"):
Time("2000:0601", format="yday")
# This is one case where Python parser is different from C parser because the
# Python parser has a bug and fails with a trailing ".", but C parser works.
Time("2020:150:12:13:14.", format="yday")
with conf.set_temp("use_fast_parser", "force"):
Time("2020:150:12:13:14.", format="yday")
with conf.set_temp("use_fast_parser", "False"):
with pytest.raises(ValueError, match="could not convert string to float"):
Time("2020:150:12:13:14.", format="yday")
with conf.set_temp("use_fast_parser", "False"):
assert conf.use_fast_parser == "False"
# Make sure that this is really giving the Python parser
with pytest.raises(
ValueError, match="Time 2000:0601 does not match yday format"
):
Time("2000:0601", format="yday")
with conf.set_temp("use_fast_parser", "force"):
assert conf.use_fast_parser == "force"
# Make sure that this is really giving the Python parser
err = (
"fast C time string parser failed: time string ends in middle of component"
)
with pytest.raises(ValueError, match=err):
Time("2000:0601", format="yday")
@pytest.mark.parametrize(
"times,format",
[
(iso_times, "iso"),
(isot_times, "isot"),
(yday_times, "yday"),
(yday_array, "yday"),
],
)
@pytest.mark.parametrize("variant", [0, 1, 2])
def test_fast_matches_python(times, format, variant):
if variant == 0:
pass # list/array of different values (null terminated strings included)
elif variant == 1:
times = times[-1] # scalar
elif variant == 2:
times = [times[-1]] * 2 # list/array of identical values (no null terminations)
with conf.set_temp("use_fast_parser", "False"):
tms_py = Time(times, format=format)
with conf.set_temp("use_fast_parser", "force"):
tms_c = Time(times, format=format)
# Times are binary identical
assert np.all(tms_py == tms_c)
def test_fast_yday_exceptions():
# msgs = {1: 'time string ends at beginning of component where break is not allowed',
# 2: 'time string ends in middle of component',
# 3: 'required delimiter character not found',
# 4: 'non-digit found where digit (0-9) required',
# 5: 'bad day of year (1 <= doy <= 365 or 366 for leap year'}
with conf.set_temp("use_fast_parser", "force"):
for times, err in [
("2020:150:12", "time string ends at beginning of component"),
("2020:150:1", "time string ends in middle of component"),
("2020:150*12:13:14", "required delimiter character"),
("2020:15*:12:13:14", "non-digit found where digit"),
("2020:999:12:13:14", "bad day of year"),
]:
with pytest.raises(ValueError, match=err):
Time(times, format="yday")
def test_fast_iso_exceptions():
with conf.set_temp("use_fast_parser", "force"):
for times, err in [
("2020-10-10 12", "time string ends at beginning of component"),
("2020-10-10 1", "time string ends in middle of component"),
("2020*10-10 12:13:14", "required delimiter character"),
("2020-10-10 *2:13:14", "non-digit found where digit"),
]:
with pytest.raises(ValueError, match=err):
Time(times, format="iso")
def test_fast_non_ascii():
with pytest.raises(ValueError, match="input is not pure ASCII"):
with conf.set_temp("use_fast_parser", "force"):
Time("2020-01-01 1ᛦ:13:14.4324")
def test_fast_subclass():
"""Test subclass where use_fast_parser class attribute is not in __dict__"""
class TimeYearDayTimeSubClass(TimeYearDayTime):
name = "yday_subclass"
# Inheritance works
assert hasattr(TimeYearDayTimeSubClass, "fast_parser_pars")
assert "fast_parser_pars" not in TimeYearDayTimeSubClass.__dict__
try:
# For YearDayTime, forcing the fast parser with a bad date will give
# "fast C time string parser failed: time string ends in middle of component".
# But since YearDayTimeSubClass does not have fast_parser_pars it will
# use the Python parser.
with pytest.raises(
ValueError, match="Time 2000:0601 does not match yday_subclass format"
):
with conf.set_temp("use_fast_parser", "force"):
Time("2000:0601", format="yday_subclass")
finally:
del TimeYearDayTimeSubClass._registry["yday_subclass"]
|
abd564657a9d343f6f1ba9f3525c52e993877da1c4bc122a4bdf8fe1b8691e54 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import datetime
import functools
import os
from copy import deepcopy
from decimal import Decimal, localcontext
from io import StringIO
import erfa
import numpy as np
import pytest
from erfa import ErfaWarning
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates import EarthLocation
from astropy.table import Column, Table
from astropy.time import (
STANDARD_TIME_SCALES,
TIME_FORMATS,
ScaleValueError,
Time,
TimeDelta,
TimeString,
TimezoneInfo,
conf,
)
from astropy.utils import iers, isiterable
from astropy.utils.compat.optional_deps import HAS_H5PY, HAS_PYTZ
from astropy.utils.exceptions import AstropyDeprecationWarning
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps
) # 20 ps atol
allclose_sec = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps * 24 * 3600
)
allclose_year = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=0.0
) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic:
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"]
t = Time(times, format="iso", scale="utc")
assert (
repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>"
)
assert allclose_jd(t.jd1, np.array([2451180.0, 2455198.0]))
assert allclose_jd2(
t.jd2, np.array([-0.5 + 1.4288980208333335e-06, -0.50000000e00])
)
# Set scale to TAI
t = t.tai
assert (
repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>"
)
assert allclose_jd(t.jd1, np.array([2451180.0, 2455198.0]))
assert allclose_jd2(
t.jd2,
np.array([-0.5 + 0.00037179926839122024, -0.5 + 0.00039351851851851852]),
)
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (
repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>"
)
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(
t.cxcsec, np.array([31536064.307456788, 378691266.18400002])
)
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format="jd")
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000.0, 2450010.0)
t2 = Time(val, format="jd")
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.0
t3 = Time(val, val2, format="jd")
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.0) / 10.0).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format="jd")
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize("format_", Time.FORMATS)
def test_empty_value(self, format_):
t = Time([], format=format_)
assert t.size == 0
assert t.shape == (0,)
assert t.format == format_
t_value = t.value
assert t_value.size == 0
assert t_value.shape == (0,)
t2 = Time(t_value, format=format_)
assert t2.size == 0
assert t2.shape == (0,)
assert t2.format == format_
t3 = t2.tai
assert t3.size == 0
assert t3.shape == (0,)
assert t3.format == format_
assert t3.scale == "tai"
@pytest.mark.parametrize("value", [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format="jd", scale="utc")
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format="iso", scale="tai", precision=1)
assert t2.value == "2010-01-01 00:00:34.0"
t2 = Time(t, format="iso", scale="tai", out_subfmt="date")
assert t2.value == "2010-01-01"
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format="mjd", scale="utc", location=("45d", "50d"))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format="mjd", scale="utc")
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(
mjd,
format="mjd",
scale="utc",
location=(np.arange(len(mjd)), np.arange(len(mjd))),
)
t5a = t4[3]
assert t5a.location == t4.location[3]
assert t5a.location.shape == ()
t5b = t4[3:4]
assert t5b.location.shape == (1,)
# Check that indexing a size-1 array returns a scalar location as well;
# see gh-10113.
t5c = t5b[0]
assert t5c.location.shape == ()
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0.0, 0.0, 0.0), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0.0, 0.999, 0.2)
t7 = Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=("45d", "50d"),
)
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=(np.arange(len(frac)), np.arange(len(frac))),
)
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == "2010-01-01 00:00:00.000"
assert t.tt.iso == "2010-01-01 00:01:06.184"
assert t.tai.fits == "2010-01-01T00:00:34.000"
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == "2010-01-01T00:01:06.910"
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
# Uses initial class-defined precision=3
assert t.iso == "2010-01-01 00:00:00.000"
# Set instance precision to 9
t.precision = 9
assert t.iso == "2010-01-01 00:00:00.000000000"
assert t.tai.utc.iso == "2010-01-01 00:00:00.000000000"
def test_precision_input(self):
"""Verifies that precision can only be 0-9 (inclusive). Any other
value should raise a ValueError exception."""
err_message = "precision attribute must be an int"
with pytest.raises(ValueError, match=err_message):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc", precision=10)
with pytest.raises(ValueError, match=err_message):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc")
t.precision = -1
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
precision=7,
location=(lon, lat),
)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == "2006-01-15 21:24:37.5000000"
assert t.ut1.iso == "2006-01-15 21:24:37.8341000"
assert t.tai.iso == "2006-01-15 21:25:10.5000000"
assert t.tt.iso == "2006-01-15 21:25:42.6840000"
assert t.tcg.iso == "2006-01-15 21:25:43.3226905"
assert t.tdb.iso == "2006-01-15 21:25:42.6843728"
assert t.tcb.iso == "2006-01-15 21:25:56.8939523"
def test_transforms_no_location(self):
"""Location should default to geocenter (relevant for TDB, TCB)."""
t = Time("2006-01-15 21:24:37.5", format="iso", scale="utc", precision=7)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == "2006-01-15 21:24:37.5000000"
assert t.ut1.iso == "2006-01-15 21:24:37.8341000"
assert t.tai.iso == "2006-01-15 21:25:10.5000000"
assert t.tt.iso == "2006-01-15 21:25:42.6840000"
assert t.tcg.iso == "2006-01-15 21:25:43.3226905"
assert t.tdb.iso == "2006-01-15 21:25:42.6843725"
assert t.tcb.iso == "2006-01-15 21:25:56.8939519"
# Check we get the same result
t2 = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
location=(0 * u.m, 0 * u.m, 0 * u.m),
)
assert t == t2
assert t.tdb == t2.tdb
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=(lon, lat),
)
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=location,
)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(
["2006-01-15 21:24:37.5"],
format="iso",
scale="utc",
precision=6,
location=(location.x, location.y, location.z),
)
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(
["2006-01-15 21:24:37.5"] * 2,
format="iso",
scale="utc",
precision=6,
location=(lon, lat),
)
assert np.all(t.utc.iso == "2006-01-15 21:24:37.500000")
assert np.all(t.tdb.iso[0] == "2006-01-15 21:25:42.684373")
t2 = Time(
["2006-01-15 21:24:37.5"] * 2,
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
assert np.all(t2.utc.iso == "2006-01-15 21:24:37.500000")
assert t2.tdb.iso[0] == "2006-01-15 21:25:42.684373"
assert t2.tdb.iso[1] != "2006-01-15 21:25:42.684373"
with pytest.raises(ValueError): # 1 time, but two locations
Time(
"2006-01-15 21:24:37.5",
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
with pytest.raises(ValueError): # 3 times, but two locations
Time(
["2006-01-15 21:24:37.5"] * 3,
format="iso",
scale="utc",
precision=6,
location=(np.array([lon, 0]), np.array([lat, 0])),
)
# multidimensional
mjd = np.arange(50000.0, 50008.0).reshape(4, 2)
t3 = Time(mjd, format="mjd", scale="utc", location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(
mjd,
format="mjd",
scale="utc",
location=(np.array([lon, 0]), np.array([lat, 0])),
)
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(
mjd,
format="mjd",
scale="utc",
location=(
np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]]),
),
)
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
with iers.conf.set_temp("auto_download", False):
for scale1 in STANDARD_TIME_SCALES:
t1 = Time(
"2006-01-15 21:24:37.5",
format="iso",
scale=scale1,
location=(lon, lat),
)
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = "local"
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format="decimalyear")
Time(100.0, format="cxcsec")
Time(100.0, format="unix")
Time(100.0, format="gps")
Time(1950.0, format="byear", scale="tai")
Time(2000.0, format="jyear", scale="tai")
Time("B1950.0", format="byear_str", scale="tai")
Time("J2000.0", format="jyear_str", scale="tai")
Time("2000-01-01 12:23:34.0", format="iso", scale="tai")
Time("2000-01-01 12:23:34.0Z", format="iso", scale="utc")
Time("2000-01-01T12:23:34.0", format="isot", scale="tai")
Time("2000-01-01T12:23:34.0Z", format="isot", scale="utc")
Time("2000-01-01T12:23:34.0", format="fits")
Time("2000-01-01T12:23:34.0", format="fits", scale="tdb")
Time(2400000.5, 51544.0333981, format="jd", scale="tai")
Time(0.0, 51544.0333981, format="mjd", scale="tai")
Time("2000:001:12:23:34.0", format="yday", scale="tai")
Time("2000:001:12:23:34.0Z", format="yday", scale="utc")
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format="datetime", scale="tai")
Time([dt, dt], format="datetime", scale="tai")
dt64 = np.datetime64("2012-06-18T02:00:05.453000000")
Time(dt64, format="datetime64", scale="tai")
Time([dt64, dt64], format="datetime64", scale="tai")
def test_local_format_transforms(self):
"""
Test transformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time("2006-01-15 21:24:37.5", scale="local")
assert_allclose(t.jd, 2453751.3921006946, atol=0.001 / 3600.0 / 24.0, rtol=0.0)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001 / 3600.0 / 24.0, rtol=0.0)
assert_allclose(
t.decimalyear,
2006.0408002758752,
atol=0.001 / 3600.0 / 24.0 / 365.0,
rtol=0.0,
)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == "2006-01-15T21:24:37.500"
assert t.yday == "2006:015:21:24:37.500"
assert t.fits == "2006-01-15T21:24:37.500"
assert_allclose(
t.byear, 2006.04217888831, atol=0.001 / 3600.0 / 24.0 / 365.0, rtol=0.0
)
assert_allclose(
t.jyear, 2006.0407723496082, atol=0.001 / 3600.0 / 24.0 / 365.0, rtol=0.0
)
assert t.byear_str == "B2006.042"
assert t.jyear_str == "J2006.041"
# epochTimeFormats
with pytest.raises(ScaleValueError):
t.gps
with pytest.raises(ScaleValueError):
t.unix
with pytest.raises(ScaleValueError):
t.cxcsec
with pytest.raises(ScaleValueError):
t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale="utc", precision=9)
assert t.iso == "2000-01-02 03:04:05.123456000"
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale="utc")
assert t2.datetime == dt
t = Time([dt, dt2], scale="utc")
assert np.all(t.value == [dt, dt2])
t = Time("2000-01-01 01:01:01.123456789", scale="tai")
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2 - dt) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale="utc")
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64("2000-01-02T03:04:05.123456789")
dt64_2 = np.datetime64("2000-01-02")
t = Time(dt64, scale="utc", precision=9, format="datetime64")
assert t.iso == "2000-01-02 03:04:05.123456789"
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale="utc")
assert t2.datetime64 == dt64
t = Time(dt64_2, scale="utc", precision=3, format="datetime64")
assert t.iso == "2000-01-02 00:00:00.000"
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale="utc")
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale="utc", format="datetime64")
assert np.all(t.value == [dt64, dt64_2])
t = Time("2000-01-01 01:01:01.123456789", scale="tai")
assert t.datetime64 == np.datetime64("2000-01-01T01:01:01.123456789")
# broadcasting
dt3 = (dt64 + (dt64_2 - dt64) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale="utc", format="datetime64")
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format="datetime64")
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format="datetime64"))
assert Time(t3[2, 0], format="datetime64") == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format="jd", scale="tai", precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == "B2015.136594"
assert t.jyear_str == "J2015.134993"
t2 = Time(t.byear, format="byear", scale="tai")
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format="jyear", scale="tai")
assert allclose_jd(t2.jd, jd)
t = Time("J2015.134993", scale="tai", precision=6)
assert np.allclose(
t.jd, jd, rtol=1e-10, atol=0
) # J2015.134993 has 10 digit precision
assert t.byear_str == "B2015.136594"
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format="iso", scale="utc")
with pytest.raises(ValueError):
Time("2000:001", format="jd", scale="utc")
with pytest.raises(ValueError): # unguessable
Time([])
with pytest.raises(ValueError):
Time([50000.0], ["bad"], format="mjd", scale="tai")
with pytest.raises(ValueError):
Time(50000.0, "bad", format="mjd", scale="tai")
with pytest.raises(ValueError):
Time("2005-08-04T00:01:02.000Z", scale="tai")
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format="jd", scale="utc")
with pytest.raises(ValueError):
with pytest.warns(AstropyDeprecationWarning):
Time("2000-01-02T03:04:05(TAI)", scale="utc")
with pytest.raises(ValueError):
Time("2000-01-02T03:04:05(TAI")
with pytest.raises(ValueError):
Time("2000-01-02T03:04:05(UT(NIST)")
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = f"{year:04d}-{month:02d}"
yyyy_mm_dd = f"{year:04d}-{month:02d}-{day:02d}"
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm + "-01 23:59:60.0", scale="utc")
assert t1.iso == yyyy_mm + "-02 00:00:00.000"
# Leap second is different
t1 = Time(yyyy_mm_dd + " 23:59:59.900", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:59.900"
t1 = Time(yyyy_mm_dd + " 23:59:60.000", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:60.000"
t1 = Time(yyyy_mm_dd + " 23:59:60.999", scale="utc")
assert t1.iso == yyyy_mm_dd + " 23:59:60.999"
if month == 6:
yyyy_mm_dd_plus1 = f"{year:04d}-07-01"
else:
yyyy_mm_dd_plus1 = f"{year + 1:04d}-01-01"
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm_dd + " 23:59:61.0", scale="utc")
assert t1.iso == yyyy_mm_dd_plus1 + " 00:00:00.000"
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + " 23:59:59", scale="utc")
t1 = Time(yyyy_mm_dd_plus1 + " 00:00:00", scale="utc")
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time("2007:001", scale="tai")
t2 = Time(["2007-01-02", "2007-01-03"], scale="utc")
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale="utc")
assert t3.scale == "utc"
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale="tt")
assert t3.scale == "tt"
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000.0, 50006.0)
frac = np.arange(0.0, 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format="mjd", scale="utc")
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
Time(t1, scale="local")
class TestVal2:
"""Tests related to val2"""
@pytest.mark.parametrize(
"d",
[
dict(val="2001:001", val2="ignored", scale="utc"),
dict(
val={
"year": 2015,
"month": 2,
"day": 3,
"hour": 12,
"minute": 13,
"second": 14.567,
},
val2="ignored",
scale="utc",
),
dict(val=np.datetime64("2005-02-25"), val2="ignored", scale="utc"),
dict(
val=datetime.datetime(2000, 1, 2, 12, 0, 0), val2="ignored", scale="utc"
),
],
)
def test_unused_val2_raises(self, d):
"""Test that providing val2 is for string input lets user know we won't use it"""
with pytest.raises(ValueError):
Time(**d)
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format="mjd", scale="tai")
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000.0, 50007.0)
frac = np.arange(0.0, 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format="mjd", scale="utc")
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format="mjd", scale="tai")
def test_broadcast_not_writable(self):
val = (2458000 + np.arange(3))[:, None]
val2 = np.linspace(0, 1, 4, endpoint=False)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1, 2] = t_i
t[1, 2] = t_i
assert t_b[1, 2] == t[1, 2], "writing worked"
assert t_b[0, 2] == t[0, 2], "broadcasting didn't cause problems"
assert t_b[1, 1] == t[1, 1], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
def test_broadcast_one_not_writable(self):
val = 2458000 + np.arange(3)
val2 = np.arange(1)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1] = t_i
t[1] = t_i
assert t_b[1] == t[1], "writing worked"
assert t_b[0] == t[0], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
class TestSubFormat:
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = [
"2000-01-01",
"2000-01-01 01:01",
"2000-01-01 01:01:01",
"2000-01-01 01:01:01.123",
]
t = Time(times, format="iso", scale="tai")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 00:00:00.000",
"2000-01-01 01:01:00.000",
"2000-01-01 01:01:01.000",
"2000-01-01 01:01:01.123",
]
)
)
# Heterogeneous input formats with in_subfmt='date_*'
times = ["2000-01-01 01:01", "2000-01-01 01:01:01", "2000-01-01 01:01:01.123"]
t = Time(times, format="iso", scale="tai", in_subfmt="date_*")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 01:01:00.000",
"2000-01-01 01:01:01.000",
"2000-01-01 01:01:01.123",
]
)
)
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time("2000-01-01 01:01", format="iso", scale="tai", in_subfmt="date")
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time(
"2000-01-01 01:01", format="iso", scale="tai", in_subfmt="doesnt exist"
)
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = [
"2000-01-01",
"2000-01-01 01:01",
"2000-01-01 01:01:01",
"2000-01-01 01:01:01.123",
]
t = Time(times, format="iso", scale="tai", out_subfmt="date_hm")
assert np.all(
t.iso
== np.array(
[
"2000-01-01 00:00",
"2000-01-01 01:01",
"2000-01-01 01:01",
"2000-01-01 01:01",
]
)
)
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ["2000-01-01", "2000-01-01T01:01:01", "2000-01-01T01:01:01.123"]
t = Time(times, format="fits", scale="tai")
assert np.all(
t.fits
== np.array(
[
"2000-01-01T00:00:00.000",
"2000-01-01T01:01:01.000",
"2000-01-01T01:01:01.123",
]
)
)
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format="fits", out_subfmt="long*")
assert np.all(
t2.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"+02000-01-01T01:01:01.123",
]
)
)
# Implicit long format for output, because of negative year.
times[2] = "-00594-01-01"
t3 = Time(times, format="fits", scale="tai")
assert np.all(
t3.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"-00594-01-01T00:00:00.000",
]
)
)
# Implicit long format for output, because of large positive year.
times[2] = "+10594-01-01"
t4 = Time(times, format="fits", scale="tai")
assert np.all(
t4.fits
== np.array(
[
"+02000-01-01T00:00:00.000",
"+02000-01-01T01:01:01.000",
"+10594-01-01T00:00:00.000",
]
)
)
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ["2000-12-01", "2001-12-01 01:01:01.123"]
t = Time(times, format="iso", scale="tai")
t.out_subfmt = "date_hm"
assert np.all(t.yday == np.array(["2000:336:00:00", "2001:335:01:01"]))
t.out_subfmt = "*"
assert np.all(
t.yday == np.array(["2000:336:00:00:00.000", "2001:335:01:01:01.123"])
)
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format="cxcsec", scale="utc")
assert t.scale == "utc"
t = Time(100.0, format="unix", scale="tai")
assert t.scale == "tai"
t = Time(100.0, format="gps", scale="utc")
assert t.scale == "utc"
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format="byear", scale="bad scale")
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time("2000:001:00:00:00", scale="bad scale")
def test_fits_scale(self):
"""Test that the previous FITS-string formatting can still be handled
but with a DeprecationWarning."""
for inputs in (
("2000-01-02(TAI)", "tai"),
("1999-01-01T00:00:00.123(ET(NIST))", "tt"),
("2014-12-12T01:00:44.1(UTC)", "utc"),
):
with pytest.warns(AstropyDeprecationWarning):
t = Time(inputs[0])
assert t.scale == inputs[1]
# Create Time using normal ISOT syntax and compare with FITS
t2 = Time(inputs[0][: inputs[0].index("(")], format="isot", scale=inputs[1])
assert t == t2
# Explicit check that conversions still work despite warning
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:00.123456789(UTC)")
t = t.tai
assert t.isot == "1999-01-01T00:00:32.123"
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)")
t = t.utc
assert t.isot == "1999-01-01T00:00:00.123"
# Check scale consistency
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)", scale="tai")
assert t.scale == "tai"
with pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(ET)", scale="tt")
assert t.scale == "tt"
with pytest.raises(ValueError), pytest.warns(AstropyDeprecationWarning):
t = Time("1999-01-01T00:00:32.123456789(TAI)", scale="utc")
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format="cxcsec")
assert t.scale == "tt"
t = Time(100.0, format="unix")
assert t.scale == "utc"
t = Time(100.0, format="gps")
assert t.scale == "tai"
for date in ("2000:001", "2000-01-01T00:00:00"):
t = Time(date)
assert t.scale == "utc"
t = Time(2000.1, format="byear")
assert t.scale == "tt"
t = Time("J2000")
assert t.scale == "tt"
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format="cxcsec", scale="tai")
assert t.tt.iso == "1998-01-01 00:00:00.000"
# Create new time object from this one and change scale, format
t2 = Time(t, scale="tt", format="iso")
assert t2.value == "1998-01-01 00:00:00.000"
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format="cxcsec", scale="utc")
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == "2010:001:00:00:00.000"
t = Time("2010:001:00:00:00.000", scale="utc")
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Round trip through epoch time
for scale in ("utc", "tt"):
t = Time("2000:001", scale=scale)
t2 = Time(t.unix, scale=scale, format="unix")
assert getattr(t2, scale).iso == "2000-01-01 00:00:00.000"
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time("2013-05-20 21:18:46", scale="utc")
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time("2004-09-16T23:59:59", scale="utc")
assert allclose_sec(t.unix, 1095379199.0)
def test_plot_date(self):
"""Test the plot_date format.
Depending on the situation with matplotlib, this can give different
results because the plot date epoch time changed in matplotlib 3.3. This
test tries to use the matplotlib date2num function to make the test
independent of version, but if matplotlib isn't available then the code
(and test) use the pre-3.3 epoch.
"""
try:
from matplotlib.dates import date2num
except ImportError:
# No matplotlib, in which case this uses the epoch 0000-12-31
# as per matplotlib < 3.3.
# Value from:
# matplotlib.dates.set_epoch('0000-12-31')
# val = matplotlib.dates.date2num('2000-01-01')
val = 730120.0
else:
val = date2num(datetime.datetime(2000, 1, 1))
t = Time("2000-01-01 00:00:00", scale="utc")
assert np.allclose(t.plot_date, val, atol=1e-5, rtol=0)
class TestNumericalSubFormat:
def test_explicit_example(self):
t = Time("54321.000000000001", format="mjd")
assert t == Time(54321, 1e-12, format="mjd")
assert t.mjd == 54321.0 # Lost precision!
assert t.value == 54321.0 # Lost precision!
assert t.to_value("mjd") == 54321.0 # Lost precision!
assert t.to_value("mjd", subfmt="str") == "54321.000000000001"
assert t.to_value("mjd", "bytes") == b"54321.000000000001"
expected_long = np.longdouble(54321.0) + np.longdouble(1e-12)
# Check we're the same to within the double holding jd2
# (which is less precise than longdouble on arm64).
assert np.allclose(
t.to_value("mjd", subfmt="long"),
expected_long,
rtol=0,
atol=np.finfo(float).eps,
)
t.out_subfmt = "str"
assert t.value == "54321.000000000001"
assert t.to_value("mjd") == 54321.0 # Lost precision!
assert t.mjd == "54321.000000000001"
assert t.to_value("mjd", subfmt="bytes") == b"54321.000000000001"
assert t.to_value("mjd", subfmt="float") == 54321.0 # Lost precision!
t.out_subfmt = "long"
assert np.allclose(t.value, expected_long, rtol=0.0, atol=np.finfo(float).eps)
assert np.allclose(
t.to_value("mjd", subfmt=None),
expected_long,
rtol=0.0,
atol=np.finfo(float).eps,
)
assert np.allclose(t.mjd, expected_long, rtol=0.0, atol=np.finfo(float).eps)
assert t.to_value("mjd", subfmt="str") == "54321.000000000001"
assert t.to_value("mjd", subfmt="float") == 54321.0 # Lost precision!
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
def test_explicit_longdouble(self):
i = 54321
# Create a different long double (which will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
f = max(2.0 ** (-np.finfo(np.longdouble).nmant) * 65536, np.finfo(float).eps)
mjd_long = np.longdouble(i) + np.longdouble(f)
assert mjd_long != i, "longdouble failure!"
t = Time(mjd_long, format="mjd")
expected = Time(i, f, format="mjd")
assert abs(t - expected) <= 20.0 * u.ps
t_float = Time(i + f, format="mjd")
assert t_float == Time(i, format="mjd")
assert t_float != t
assert t.value == 54321.0 # Lost precision!
assert np.allclose(
t.to_value("mjd", subfmt="long"),
mjd_long,
rtol=0.0,
atol=np.finfo(float).eps,
)
t2 = Time(mjd_long, format="mjd", out_subfmt="long")
assert np.allclose(t2.value, mjd_long, rtol=0.0, atol=np.finfo(float).eps)
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
def test_explicit_longdouble_one_val(self):
"""Ensure either val1 or val2 being longdouble is possible.
Regression test for issue gh-10033.
"""
i = 54321
f = max(2.0 ** (-np.finfo(np.longdouble).nmant) * 65536, np.finfo(float).eps)
t1 = Time(i, f, format="mjd")
t2 = Time(np.longdouble(i), f, format="mjd")
t3 = Time(i, np.longdouble(f), format="mjd")
t4 = Time(np.longdouble(i), np.longdouble(f), format="mjd")
assert t1 == t2 == t3 == t4
@pytest.mark.skipif(
np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float",
)
@pytest.mark.parametrize("fmt", ["mjd", "unix", "cxcsec"])
def test_longdouble_for_other_types(self, fmt):
t_fmt = getattr(Time(58000, format="mjd"), fmt) # Get regular float
t_fmt_long = np.longdouble(t_fmt)
# Create a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
atol = np.finfo(float).eps * (1.0 if fmt == "mjd" else 24.0 * 3600.0)
t_fmt_long2 = t_fmt_long + max(
t_fmt_long * np.finfo(np.longdouble).eps * 2, atol
)
assert t_fmt_long != t_fmt_long2, "longdouble weird!"
tm = Time(t_fmt_long, format=fmt)
tm2 = Time(t_fmt_long2, format=fmt)
assert tm != tm2
tm_long2 = tm2.to_value(fmt, subfmt="long")
assert np.allclose(tm_long2, t_fmt_long2, rtol=0.0, atol=atol)
def test_subformat_input(self):
s = "54321.01234567890123456789"
i, f = s.split(".") # Note, OK only for fraction < 0.5
t = Time(float(i), float("." + f), format="mjd")
t_str = Time(s, format="mjd")
t_bytes = Time(s.encode("ascii"), format="mjd")
t_decimal = Time(Decimal(s), format="mjd")
assert t_str == t
assert t_bytes == t
assert t_decimal == t
@pytest.mark.parametrize("out_subfmt", ("str", "bytes"))
def test_subformat_output(self, out_subfmt):
i = 54321
f = np.array([0.0, 1e-9, 1e-12])
t = Time(i, f, format="mjd", out_subfmt=out_subfmt)
t_value = t.value
expected = np.array(
["54321.0", "54321.000000001", "54321.000000000001"], dtype=out_subfmt
)
assert np.all(t_value == expected)
assert np.all(Time(expected, format="mjd") == t)
# Explicit sub-format.
t = Time(i, f, format="mjd")
t_mjd_subfmt = t.to_value("mjd", subfmt=out_subfmt)
assert np.all(t_mjd_subfmt == expected)
@pytest.mark.parametrize(
"fmt,string,val1,val2",
[
("jd", "2451544.5333981", 2451544.5, 0.0333981),
("decimalyear", "2000.54321", 2000.0, 0.54321),
("cxcsec", "100.0123456", 100.0123456, None),
("unix", "100.0123456", 100.0123456, None),
("gps", "100.0123456", 100.0123456, None),
("byear", "1950.1", 1950.1, None),
("jyear", "2000.1", 2000.1, None),
],
)
def test_explicit_string_other_formats(self, fmt, string, val1, val2):
t = Time(string, format=fmt)
assert t == Time(val1, val2, format=fmt)
assert t.to_value(fmt, subfmt="str") == string
def test_basic_subformat_setting(self):
t = Time("2001", format="jyear", scale="tai")
t.format = "mjd"
t.out_subfmt = "str"
assert t.value.startswith("5")
def test_basic_subformat_cache_does_not_crash(self):
t = Time("2001", format="jyear", scale="tai")
t.to_value("mjd", subfmt="str")
assert ("mjd", "str") in t.cache["format"]
t.to_value("mjd", "str")
@pytest.mark.parametrize("fmt", ["jd", "mjd", "cxcsec", "unix", "gps", "jyear"])
def test_decimal_context_does_not_affect_string(self, fmt):
t = Time("2001", format="jyear", scale="tai")
t.format = fmt
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value(fmt, "str")
t2 = Time("2001", format="jyear", scale="tai")
t2.format = fmt
with localcontext() as ctx:
ctx.prec = 40
t2_s_40 = t.to_value(fmt, "str")
assert (
t_s_2 == t2_s_40
), "String representation should not depend on Decimal context"
def test_decimal_context_caching(self):
t = Time(val=58000, val2=1e-14, format="mjd", scale="tai")
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value("mjd", subfmt="decimal")
t2 = Time(val=58000, val2=1e-14, format="mjd", scale="tai")
with localcontext() as ctx:
ctx.prec = 40
t_s_40 = t.to_value("mjd", subfmt="decimal")
t2_s_40 = t2.to_value("mjd", subfmt="decimal")
assert t_s_2 == t_s_40, "Should be the same but cache might make this automatic"
assert t_s_2 == t2_s_40, "Different precision should produce the same results"
@pytest.mark.parametrize(
"f, s, t",
[
("sec", "long", np.longdouble),
("sec", "decimal", Decimal),
("sec", "str", str),
],
)
def test_timedelta_basic(self, f, s, t):
dt = Time("58000", format="mjd", scale="tai") - Time(
"58001", format="mjd", scale="tai"
)
value = dt.to_value(f, s)
assert isinstance(value, t)
dt.format = f
dt.out_subfmt = s
assert isinstance(dt.value, t)
assert isinstance(dt.to_value(f, None), t)
def test_need_format_argument(self):
t = Time("J2000")
with pytest.raises(TypeError, match="missing.*required.*'format'"):
t.to_value()
with pytest.raises(ValueError, match="format must be one of"):
t.to_value("julian")
def test_wrong_in_subfmt(self):
with pytest.raises(ValueError, match="not among selected"):
Time("58000", format="mjd", in_subfmt="float")
with pytest.raises(ValueError, match="not among selected"):
Time(np.longdouble(58000), format="mjd", in_subfmt="float")
with pytest.raises(ValueError, match="not among selected"):
Time(58000.0, format="mjd", in_subfmt="str")
with pytest.raises(ValueError, match="not among selected"):
Time(58000.0, format="mjd", in_subfmt="long")
def test_wrong_subfmt(self):
t = Time(58000.0, format="mjd")
with pytest.raises(ValueError, match="must match one"):
t.to_value("mjd", subfmt="parrot")
with pytest.raises(ValueError, match="must match one"):
t.out_subfmt = "parrot"
with pytest.raises(ValueError, match="must match one"):
t.in_subfmt = "parrot"
def test_not_allowed_subfmt(self):
"""Test case where format has no defined subfmts"""
t = Time("J2000")
match = "subformat not allowed for format jyear_str"
with pytest.raises(ValueError, match=match):
t.to_value("jyear_str", subfmt="parrot")
with pytest.raises(ValueError, match=match):
t.out_subfmt = "parrot"
with pytest.raises(ValueError, match=match):
Time("J2000", out_subfmt="parrot")
with pytest.raises(ValueError, match=match):
t.in_subfmt = "parrot"
with pytest.raises(ValueError, match=match):
Time("J2000", format="jyear_str", in_subfmt="parrot")
def test_switch_to_format_with_no_out_subfmt(self):
t = Time("2001-01-01", out_subfmt="date_hm")
assert t.out_subfmt == "date_hm"
# Now do an in-place switch to format 'jyear_str' that has no subfmts
# where out_subfmt is changed to '*'.
t.format = "jyear_str"
assert t.out_subfmt == "*"
assert t.value == "J2001.001"
class TestSofaErrors:
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with pytest.warns(ErfaWarning, match=r"bad day \(JD computed\)") as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.0])
class TestCopyReplicate:
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format="jd", scale="tai")
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format="mjd", scale="tai")
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(["2000:001"], format="yday", scale="tai", location=("45d", "45d"))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time("2000:001", format="yday", scale="tai", location=("45d", "45d"))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
class TestStardate:
"""Sync chronometers with Starfleet Command"""
def test_iso_to_stardate(self):
assert str(Time("2320-01-01", scale="tai").stardate)[:7] == "1368.99"
assert str(Time("2330-01-01", scale="tai").stardate)[:8] == "10552.76"
assert str(Time("2340-01-01", scale="tai").stardate)[:8] == "19734.02"
@pytest.mark.parametrize(
"dates",
[
(10000, "2329-05-26 03:02"),
(20000, "2340-04-15 19:05"),
(30000, "2351-03-07 11:08"),
],
)
def test_stardate_to_iso(self, dates):
stardate, iso = dates
t_star = Time(stardate, format="stardate")
t_iso = Time(t_star, format="iso", out_subfmt="date_hm")
assert t_iso.value == iso
def test_python_builtin_copy():
t = Time("2000:001", format="yday", scale="tai")
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == "datetime"
assert t.scale == "utc"
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time("2001:001", format="yday")
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format="decimalyear")
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time("2000:001").jd
jd1 = Time("2001:001").jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd, jd0 + 0.75 * d_jd])
def test_fits_year0():
t = Time(1721425.5, format="jd", scale="tai")
assert t.fits == "0001-01-01T00:00:00.000"
t = Time(1721425.5 - 366.0, format="jd", scale="tai")
assert t.fits == "+00000-01-01T00:00:00.000"
t = Time(1721425.5 - 366.0 - 365.0, format="jd", scale="tai")
assert t.fits == "-00001-01-01T00:00:00.000"
def test_fits_year10000():
t = Time(5373484.5, format="jd", scale="tai")
assert t.fits == "+10000-01-01T00:00:00.000"
t = Time(5373484.5 - 365.0, format="jd", scale="tai")
assert t.fits == "9999-01-01T00:00:00.000"
t = Time(5373484.5, -1.0 / 24.0 / 3600.0, format="jd", scale="tai")
assert t.fits == "9999-12-31T23:59:59.000"
def test_dir():
t = Time("2000:001", format="yday", scale="tai")
assert "utc" in dir(t)
def test_time_from_epoch_jds():
"""Test that jd1/jd2 in a TimeFromEpoch format is always well-formed:
jd1 is an integral value and abs(jd2) <= 0.5.
"""
# From 1999:001 00:00 to 1999:002 12:00 by a non-round step. This will
# catch jd2 == 0 and a case of abs(jd2) == 0.5.
cxcsecs = np.linspace(0, 86400 * 1.5, 49)
for cxcsec in cxcsecs:
t = Time(cxcsec, format="cxcsec")
assert np.round(t.jd1) == t.jd1
assert np.abs(t.jd2) <= 0.5
t = Time(cxcsecs, format="cxcsec")
assert np.all(np.round(t.jd1) == t.jd1)
assert np.all(np.abs(t.jd2) <= 0.5)
assert np.any(np.abs(t.jd2) == 0.5) # At least one exactly 0.5
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format="mjd", scale="utc")
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format="mjd", scale="utc")
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format="mjd", scale="utc")
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert "Time" in str(err.value)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time("1900-01-01", scale="ut1")
t.delta_ut1_utc = 0.0
with pytest.warns(ErfaWarning):
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion(monkeypatch):
# Check that if we have internet, and downloading is allowed, we
# can get conversion to UT1 for the present, since we will download
# IERS_A in IERS_Auto.
monkeypatch.setattr("astropy.utils.iers.conf.auto_download", True)
Time(Time.now().cxcsec, format="cxcsec", scale="ut1")
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype(">f8")
little_endian = mjd.astype("<f8")
time_mjd = Time(mjd, format="mjd")
time_big = Time(big_endian, format="mjd")
time_little = Time(little_endian, format="mjd")
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = "longyear"
subfmts = (
(
"date",
r"(?P<year>[+-]\d{5})-%m-%d", # hybrid
"{year:+06d}-{mon:02d}-{day:02d}",
),
)
t = Time("+02000-02-03", format="longyear")
assert t.value == "+02000-02-03"
assert t.jd == Time("2000-02-03").jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (
("jd", 2451577.5),
("mjd", 51577.0),
("cxcsec", 65923264.184), # confirmed with Chandra.Time
("datetime", datetime.datetime(2000, 2, 3, 0, 0)),
("iso", "2000-02-03 00:00:00.000"),
):
t = Time("+02000-02-03", format="fits")
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_unix_tai_format():
t = Time("2020-01-01", scale="utc")
assert allclose_sec(t.unix_tai - t.unix, 37.0)
t = Time("1970-01-01", scale="utc")
assert allclose_sec(t.unix_tai - t.unix, 8 + 8.2e-05)
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time("+02000-02-03", format="fits", out_subfmt="date_hms", precision=5)
tc = t.copy()
t.format = "isot"
assert t.precision == 5
assert t.out_subfmt == "date_hms"
assert t.value == "2000-02-03T00:00:00.00000"
t.format = "fits"
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time("+02000-02-03", format="fits", out_subfmt="longdate")
t.format = "isot"
assert t.out_subfmt == "*" # longdate_hms not there, goes to default
assert t.value == "2000-02-03T00:00:00.000"
t.format = "fits"
assert t.out_subfmt == "*"
assert t.value == "2000-02-03T00:00:00.000" # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time("2007:001", scale="tai")
with pytest.raises(ValueError) as err:
t1.replicate(format="definitely_not_a_valid_format")
assert "format must be one of" in str(err.value)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time("2007:001", scale="tai")
assert "astropy_time" not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format="astropy_time")
assert "format must be one of" in str(err.value)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(
["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"],
format="iso",
scale="utc",
)
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10 * u.hour, tzname="US/Hawaii")
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time("2010-09-03 00:00:00")
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1990-09-03 06:00:00"])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
with pytest.raises(ValueError, match=r"does not support leap seconds"):
Time("2015-06-30 23:59:60.000").to_datetime()
@pytest.mark.skipif(not HAS_PYTZ, reason="requires pytz")
def test_to_datetime_pytz():
import pytz
tz = pytz.timezone("US/Hawaii")
time = Time("2010-09-03 00:00:00")
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1990-09-03 06:00:00"])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time("2010-09-03 00:00:00")
t2 = Time("2010-09-03 00:00:00")
# Time starts out without a cache
assert "cache" not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache["format"]["iso"] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache["scale"]["tai"] == t2.tai
# New Time object after scale transform does not have a cache yet
assert "cache" not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert "cache" not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert "cache" in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [
[[f"{y:04d}-{m:02d}-{d:02d}" for d in range(1, 3)] for m in range(5, 7)]
for y in range(2012, 2014)
]
cutf32 = Column(times)
cbytes = cutf32.astype("S")
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(["B1950"]))
tbytes = Time(Column([b"B1950"]))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b"2012-01-01", b"2012-01-01T00:00:00"])
assert np.all(Time(times) == Time(["2012-01-01", "2012-01-01T00:00:00"]))
def test_bytes_input():
tstring = "2011-01-02T03:04:05"
tbytes = b"2011-01-02T03:04:05"
assert tbytes.decode("ascii") == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == "S"
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format="cxcsec")
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert "Time object is read-only. Make a copy()" in str(err.value)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert "Time object is read-only. Make a copy()" in str(err.value)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is writeable because it gets boxed into a zero-d array
t = Time("2000:001", scale="utc")
t[()] = "2000:002"
assert t.value.startswith("2000:002")
# Transformed attribute is not writeable
t = Time(["2000:001", "2000:002"], scale="utc")
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = "2005:001"
assert "Time object is read-only. Make a copy()" in str(err.value)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format="cxcsec", location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format="cxcsec")
assert (
"cannot set to Time with different location: "
"expected location={} and "
"got location=None".format(loc[0]) in str(err.value)
)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format="cxcsec", location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format="cxcsec", location=loc[1])
assert (
"cannot set to Time with different location: "
"expected location={} and "
"got location={}".format(loc[0], loc[1]) in str(err.value)
)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format="cxcsec")
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format="cxcsec", location=loc[1])
assert (
"cannot set to Time with different location: "
"expected location=None and "
"got location={}".format(loc[1]) in str(err.value)
)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format="cxcsec", location=loc)
t[0, :] = Time([-3, -4], format="cxcsec", location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format="cxcsec")
assert t.cache == {}
t.iso
assert "iso" in t.cache["format"]
assert np.all(
t.iso
== [
["1998-01-01 00:00:01.000", "1998-01-01 00:00:02.000"],
["1998-01-01 00:00:03.000", "1998-01-01 00:00:04.000"],
]
)
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100], [3, 4]])
assert np.all(
t.iso
== [
["1998-01-01 00:00:01.000", "1998-01-01 00:01:40.000"],
["1998-01-01 00:00:03.000", "1998-01-01 00:00:04.000"],
]
)
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100], [200, 200]])
# Array of strings in yday format
t[:, 1] = ["1998:002", "1998:003"]
assert allclose_sec(t.value, [[1, 86400 * 1], [200, 86400 * 2]])
# Incompatible numeric value
t = Time(["2000:001", "2000:002"])
t[0] = "2001:001"
with pytest.raises(ValueError) as err:
t[0] = 100
assert "cannot convert value to a compatible Time object" in str(err.value)
def test_setitem_from_time_objects():
"""Set from existing Time object."""
# Set from time object with different scale
t = Time(["2000:001", "2000:002"], scale="utc")
t2 = Time(["2000:010"], scale="tai")
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(["2000:001", "2000:002"], scale="utc")
t2.format = "jyear"
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format="cxcsec")
with pytest.raises(IndexError):
t["asdf"] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format="cxcsec")
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, "_delta_tdb_tt")
assert not hasattr(t, "_delta_ut1_utc")
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time("1999-01-01T01:01:01")
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime"""
time_string = "2010-09-03 06:00:00"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S") == time_string
def test_strftime_array():
tstrings = ["2010-09-03 00:00:00", "2005-09-03 06:00:00", "1995-12-31 23:59:60"]
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S").tolist() == tstrings
def test_strftime_array_2():
tstrings = [
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1995-12-31 23:59:60"],
]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime("%Y-%m-%d %H:%M:%S") == tstrings)
assert t.strftime("%Y-%m-%d %H:%M:%S").shape == tstrings.shape
def test_strftime_leapsecond():
time_string = "1995-12-31 23:59:60"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S") == time_string
def test_strptime_scalar():
"""Test of Time.strptime"""
time_string = "2007-May-04 21:08:12"
time_object = Time("2007-05-04 21:08:12")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S")
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime"""
tstrings = [
["1998-Jan-01 00:00:01", "1998-Jan-01 00:00:02"],
["1998-Jan-01 00:00:03", "1998-Jan-01 00:00:04"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1998-01-01 00:00:04"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, "%S")
def test_strptime_input_bytes_scalar():
time_string = b"2007-May-04 21:08:12"
time_object = Time("2007-05-04 21:08:12")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S")
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [
[b"1998-Jan-01 00:00:01", b"1998-Jan-01 00:00:02"],
[b"1998-Jan-01 00:00:03", b"1998-Jan-01 00:00:04"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01", "1998-01-01 00:00:02"],
["1998-01-01 00:00:03", "1998-01-01 00:00:04"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time("1995-12-31T23:59:60", format="isot")
time_obj2 = Time.strptime("1995-Dec-31 23:59:60", "%Y-%b-%d %H:%M:%S")
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time("0995-12-31T00:00:00", format="isot", scale="tai")
time_obj2 = Time.strptime("0995-Dec-31 00:00:00", "%Y-%b-%d %H:%M:%S", scale="tai")
assert time_obj1 == time_obj2
def test_strptime_fracsec_scalar():
time_string = "2007-May-04 21:08:12.123"
time_object = Time("2007-05-04 21:08:12.123")
t = Time.strptime(time_string, "%Y-%b-%d %H:%M:%S.%f")
assert t == time_object
def test_strptime_fracsec_array():
"""Test of Time.strptime"""
tstrings = [
["1998-Jan-01 00:00:01.123", "1998-Jan-01 00:00:02.000001"],
["1998-Jan-01 00:00:03.000900", "1998-Jan-01 00:00:04.123456"],
]
tstrings = np.array(tstrings)
time_object = Time(
[
["1998-01-01 00:00:01.123", "1998-01-01 00:00:02.000001"],
["1998-01-01 00:00:03.000900", "1998-01-01 00:00:04.123456"],
]
)
t = Time.strptime(tstrings, "%Y-%b-%d %H:%M:%S.%f")
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strftime_scalar_fracsec():
"""Test of Time.strftime"""
time_string = "2010-09-03 06:00:00.123"
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == time_string
def test_strftime_scalar_fracsec_precision():
time_string = "2010-09-03 06:00:00.123123123"
t = Time(time_string)
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == "2010-09-03 06:00:00.123"
t.precision = 9
assert t.strftime("%Y-%m-%d %H:%M:%S.%f") == "2010-09-03 06:00:00.123123123"
def test_strftime_array_fracsec():
tstrings = [
"2010-09-03 00:00:00.123000",
"2005-09-03 06:00:00.000001",
"1995-12-31 23:59:60.000900",
]
t = Time(tstrings)
t.precision = 6
for format in t.FORMATS:
t.format = format
assert t.strftime("%Y-%m-%d %H:%M:%S.%f").tolist() == tstrings
def test_insert_time():
tm = Time([1, 2], format="unix")
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, "1970-01-01 00:01:00")
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert scalar using a Time value
tm2 = tm.insert(1, Time("1970-01-01 00:01:00"))
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert length=1 array with a Time value
tm2 = tm.insert(1, [Time("1970-01-01 00:01:00")])
assert np.all(tm2 == Time([1, 60, 2], format="unix"))
# Insert length=2 list with float values matching unix format.
# Also actually provide axis=0 unlike all other tests.
tm2 = tm.insert(1, [10, 20], axis=0)
assert np.all(tm2 == Time([1, 10, 20, 2], format="unix"))
# Insert length=2 np.array with float values matching unix format
tm2 = tm.insert(1, np.array([10, 20]))
assert np.all(tm2 == Time([1, 10, 20, 2], format="unix"))
# Insert length=2 np.array with float values at the end
tm2 = tm.insert(2, np.array([10, 20]))
assert np.all(tm2 == Time([1, 2, 10, 20], format="unix"))
# Insert length=2 np.array with float values at the beginning
# with a negative index
tm2 = tm.insert(-2, np.array([10, 20]))
assert np.all(tm2 == Time([10, 20, 1, 2], format="unix"))
def test_insert_time_out_subfmt():
# Check insert() with out_subfmt set
T = Time(["1999-01-01", "1999-01-02"], out_subfmt="date")
T = T.insert(0, T[0])
assert T.out_subfmt == "date"
assert T[0] == T[1]
T = T.insert(1, "1999-01-03")
assert T.out_subfmt == "date"
assert str(T[1]) == "1999-01-03"
def test_insert_exceptions():
tm = Time(1, format="unix")
with pytest.raises(TypeError) as err:
tm.insert(0, 50)
assert "cannot insert into scalar" in str(err.value)
tm = Time([1, 2], format="unix")
with pytest.raises(ValueError) as err:
tm.insert(0, 50, axis=1)
assert "axis must be 0" in str(err.value)
with pytest.raises(TypeError) as err:
tm.insert(slice(None), 50)
assert "obj arg must be an integer" in str(err.value)
with pytest.raises(IndexError) as err:
tm.insert(-100, 50)
assert "index -100 is out of bounds for axis 0 with size 2" in str(err.value)
def test_datetime64_no_format():
dt64 = np.datetime64("2000-01-02T03:04:05.123456789")
t = Time(dt64, scale="utc", precision=9)
assert t.iso == "2000-01-02 03:04:05.123456789"
assert t.datetime64 == dt64
assert t.value == dt64
def test_hash_time():
loc1 = EarthLocation(1 * u.m, 2 * u.m, 3 * u.m)
for loc in None, loc1:
t = Time([1, 1, 2, 3], format="cxcsec", location=loc)
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'Time' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'Time' (value is masked)"
t = Time(1, format="cxcsec", location=loc)
t2 = Time(1, format="cxcsec")
assert hash(t) != hash(t2)
t = Time("2000:180", scale="utc")
t2 = Time(t, scale="tai")
assert t == t2
assert hash(t) != hash(t2)
def test_hash_time_delta():
t = TimeDelta([1, 1, 2, 3], format="sec")
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (value is masked)"
def test_get_time_fmt_exception_messages():
with pytest.raises(ValueError) as err:
Time(10)
assert "No time format was given, and the input is" in str(err.value)
with pytest.raises(ValueError) as err:
Time("2000:001", format="not-a-format")
assert "Format 'not-a-format' is not one of the allowed" in str(err.value)
with pytest.raises(ValueError) as err:
Time("200")
assert "Input values did not match any of the formats where" in str(err.value)
with pytest.raises(ValueError) as err:
Time("200", format="iso")
assert (
"Input values did not match the format class iso:"
+ os.linesep
+ "ValueError: Time 200 does not match iso format"
) == str(err.value)
with pytest.raises(ValueError) as err:
Time(200, format="iso")
assert (
"Input values did not match the format class iso:"
+ os.linesep
+ "TypeError: Input values for iso class must be strings"
) == str(err.value)
def test_ymdhms_defaults():
t1 = Time({"year": 2001}, format="ymdhms")
assert t1 == Time("2001-01-01")
times_dict_ns = {
"year": [2001, 2002],
"month": [2, 3],
"day": [4, 5],
"hour": [6, 7],
"minute": [8, 9],
"second": [10, 11],
}
table_ns = Table(times_dict_ns)
struct_array_ns = table_ns.as_array()
rec_array_ns = struct_array_ns.view(np.recarray)
ymdhms_names = ("year", "month", "day", "hour", "minute", "second")
@pytest.mark.parametrize("tm_input", [table_ns, struct_array_ns, rec_array_ns])
@pytest.mark.parametrize("kwargs", [{}, {"format": "ymdhms"}])
@pytest.mark.parametrize("as_row", [False, True])
def test_ymdhms_init_from_table_like(tm_input, kwargs, as_row):
time_ns = Time(["2001-02-04 06:08:10", "2002-03-05 07:09:11"])
if as_row:
tm_input = tm_input[0]
time_ns = time_ns[0]
tm = Time(tm_input, **kwargs)
assert np.all(tm == time_ns)
assert tm.value.dtype.names == ymdhms_names
def test_ymdhms_init_from_dict_array():
times_dict_shape = {"year": [[2001, 2002], [2003, 2004]], "month": [2, 3], "day": 4}
time_shape = Time([["2001-02-04", "2002-03-04"], ["2003-02-04", "2004-03-04"]])
time = Time(times_dict_shape, format="ymdhms")
assert np.all(time == time_shape)
assert time.ymdhms.shape == time_shape.shape
@pytest.mark.parametrize("kwargs", [{}, {"format": "ymdhms"}])
def test_ymdhms_init_from_dict_scalar(kwargs):
"""
Test YMDHMS functionality for a dict input. This includes ensuring that
key and attribute access work. For extra fun use a time within a leap
second.
"""
time_dict = {
"year": 2016,
"month": 12,
"day": 31,
"hour": 23,
"minute": 59,
"second": 60.123456789,
}
tm = Time(time_dict, **kwargs)
assert tm == Time("2016-12-31T23:59:60.123456789")
for attr in time_dict:
for value in (tm.value[attr], getattr(tm.value, attr)):
if attr == "second":
assert allclose_sec(time_dict[attr], value)
else:
assert time_dict[attr] == value
# Now test initializing from a YMDHMS format time using the object
tm_rt = Time(tm)
assert tm_rt == tm
assert tm_rt.format == "ymdhms"
# Test initializing from a YMDHMS value (np.void, i.e. recarray row)
# without specified format.
tm_rt = Time(tm.ymdhms)
assert tm_rt == tm
assert tm_rt.format == "ymdhms"
def test_ymdhms_exceptions():
with pytest.raises(ValueError, match="input must be dict or table-like"):
Time(10, format="ymdhms")
match = "'wrong' not allowed as YMDHMS key name(s)"
# NB: for reasons unknown, using match=match in pytest.raises() fails, so we
# fall back to old school ``match in str(err.value)``.
with pytest.raises(ValueError) as err:
Time({"year": 2019, "wrong": 1}, format="ymdhms")
assert match in str(err.value)
match = "for 2 input key names you must supply 'year', 'month'"
with pytest.raises(ValueError, match=match):
Time({"year": 2019, "minute": 1}, format="ymdhms")
def test_ymdhms_masked():
tm = Time({"year": [2000, 2001]}, format="ymdhms")
tm[0] = np.ma.masked
assert isinstance(tm.value[0], np.ma.core.mvoid)
for name in ymdhms_names:
assert tm.value[0][name] is np.ma.masked
# Converted from doctest in astropy/test/formats.py for debugging
def test_ymdhms_output():
t = Time(
{
"year": 2015,
"month": 2,
"day": 3,
"hour": 12,
"minute": 13,
"second": 14.567,
},
scale="utc",
)
# NOTE: actually comes back as np.void for some reason
# NOTE: not necessarily a python int; might be an int32
assert t.ymdhms.year == 2015
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_ecsv(fmt):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format="ascii.ecsv")
assert t["a"].format == t2["a"].format
# Some loss of precision in the serialization
assert not np.all(t["a"] == t2["a"])
# But no loss in the format representation
assert np.all(t["a"].value == t2["a"].value)
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_fits(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = tmp_path / "out.fits"
t.write(out, format="fits")
t2 = Table.read(out, format="fits", astropy_native=True)
# Currently the format is lost in FITS so set it back
t2["a"].format = fmt
# No loss of precision in the serialization or representation
assert np.all(t["a"] == t2["a"])
assert np.all(t["a"].value == t2["a"].value)
@pytest.mark.skipif(not HAS_H5PY, reason="Needs h5py")
@pytest.mark.parametrize("fmt", TIME_FORMATS)
def test_write_every_format_to_hdf5(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = Time("2020-01-01") + [[1, 1 / 7], [3, 4.5]] * u.s
tm.format = fmt
t["a"] = tm
out = tmp_path / "out.h5"
t.write(str(out), format="hdf5", path="root", serialize_meta=True)
t2 = Table.read(str(out), format="hdf5", path="root")
assert t["a"].format == t2["a"].format
# No loss of precision in the serialization or representation
assert np.all(t["a"] == t2["a"])
assert np.all(t["a"].value == t2["a"].value)
# There are two stages of validation now - one on input into a format, so that
# the format conversion code has tidy matched arrays to work with, and the
# other when object construction does not go through a format object. Or at
# least, the format object is constructed with "from_jd=True". In this case the
# normal input validation does not happen but the new input validation does,
# and can ensure that strange broadcasting anomalies can't happen.
# This form of construction uses from_jd=True.
def test_broadcasting_writeable():
t = Time("J2015") + np.linspace(-1, 1, 10) * u.day
t[2] = Time(58000, format="mjd")
def test_format_subformat_compatibility():
"""Test that changing format with out_subfmt defined is not a problem.
See #9812, #9810."""
t = Time("2019-12-20", out_subfmt="date_??")
assert t.mjd == 58837.0
assert t.yday == "2019:354:00:00" # Preserves out_subfmt
t2 = t.replicate(format="mjd")
assert t2.out_subfmt == "*" # Changes to default
t2 = t.copy(format="mjd")
assert t2.out_subfmt == "*"
t2 = Time(t, format="mjd")
assert t2.out_subfmt == "*"
t2 = t.copy(format="yday")
assert t2.out_subfmt == "date_??"
assert t2.value == "2019:354:00:00"
t.format = "yday"
assert t.value == "2019:354:00:00"
assert t.out_subfmt == "date_??"
t = Time("2019-12-20", out_subfmt="date")
assert t.mjd == 58837.0
assert t.yday == "2019:354"
@pytest.mark.parametrize("use_fast_parser", ["force", "False"])
def test_format_fractional_string_parsing(use_fast_parser):
"""Test that string like "2022-08-01.123" does not parse as ISO.
See #6476 and the fix."""
with pytest.raises(
ValueError, match=r"Input values did not match the format class iso"
):
with conf.set_temp("use_fast_parser", use_fast_parser):
Time("2022-08-01.123", format="iso")
@pytest.mark.parametrize("fmt_name,fmt_class", TIME_FORMATS.items())
def test_to_value_with_subfmt_for_every_format(fmt_name, fmt_class):
"""From a starting Time value, test that every valid combination of
to_value(format, subfmt) works. See #9812, #9361.
"""
t = Time("2000-01-01")
subfmts = list(subfmt[0] for subfmt in fmt_class.subfmts) + [None, "*"]
for subfmt in subfmts:
t.to_value(fmt_name, subfmt)
@pytest.mark.parametrize("location", [None, (45, 45)])
def test_location_init(location):
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances.
"""
tm = Time("J2010", location=location)
# Init from a scalar Time
tm2 = Time(tm)
assert np.all(tm.location == tm2.location)
assert type(tm.location) is type(tm2.location)
# From a list of Times
tm2 = Time([tm, tm])
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location)
# Effectively the same as a list of Times, but just to be sure that
# Table mixin inititialization is working as expected.
tm2 = Table([[tm, tm]])["col0"]
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location)
def test_location_init_fail():
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances. Make sure exception is correct.
"""
tm = Time("J2010", location=(45, 45))
tm2 = Time("J2010")
with pytest.raises(
ValueError, match="cannot concatenate times unless all locations"
):
Time([tm, tm2])
def test_linspace():
"""Test `np.linspace` `__array_func__` implementation for scalar and arrays."""
t1 = Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"])
t2 = Time(["2021-01-01 01:00:00", "2021-12-28 00:00:00"])
atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()
ts = np.linspace(t1[0], t2[0], 3)
assert ts[0].isclose(Time("2021-01-01 00:00:00"), atol=atol)
assert ts[1].isclose(Time("2021-01-01 00:30:00"), atol=atol)
assert ts[2].isclose(Time("2021-01-01 01:00:00"), atol=atol)
ts = np.linspace(t1, t2[0], 2, endpoint=False)
assert ts.shape == (2, 2)
assert all(
ts[0].isclose(Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2021-01-01 00:30:00", "2021-01-01 12:30:00"]), atol=atol)
)
ts = np.linspace(t1, t2, 7)
assert ts.shape == (7, 2)
assert all(
ts[0].isclose(Time(["2021-01-01 00:00:00", "2021-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2021-01-01 00:10:00", "2021-03-03 00:00:00"]), atol=atol)
)
assert all(
ts[5].isclose(Time(["2021-01-01 00:50:00", "2021-10-29 00:00:00"]), atol=atol)
)
assert all(
ts[6].isclose(Time(["2021-01-01 01:00:00", "2021-12-28 00:00:00"]), atol=atol)
)
def test_linspace_steps():
"""Test `np.linspace` `retstep` option."""
t1 = Time(["2021-01-01 00:00:00", "2021-01-01 12:00:00"])
t2 = Time("2021-01-02 00:00:00")
atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()
ts, st = np.linspace(t1, t2, 7, retstep=True)
assert ts.shape == (7, 2)
assert st.shape == (2,)
assert all(ts[1].isclose(ts[0] + st, atol=atol))
assert all(ts[6].isclose(ts[0] + 6 * st, atol=atol))
assert all(st.isclose(TimeDelta([14400, 7200], format="sec"), atol=atol))
def test_linspace_fmts():
"""Test `np.linspace` `__array_func__` implementation for start/endpoints
from different formats/systems.
"""
t1 = Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"])
t2 = Time(2458850, format="jd")
t3 = Time(1578009600, format="unix")
atol = 2 * np.finfo(float).eps * abs(t1 - Time([t2, t3])).max()
ts = np.linspace(t1, t2, 3)
assert ts.shape == (3, 2)
assert all(
ts[0].isclose(Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2020-01-01 06:00:00", "2020-01-01 18:00:00"]), atol=atol)
)
assert all(
ts[2].isclose(Time(["2020-01-01 12:00:00", "2020-01-01 12:00:00"]), atol=atol)
)
ts = np.linspace(t1, Time([t2, t3]), 3)
assert ts.shape == (3, 2)
assert all(
ts[0].isclose(Time(["2020-01-01 00:00:00", "2020-01-02 00:00:00"]), atol=atol)
)
assert all(
ts[1].isclose(Time(["2020-01-01 06:00:00", "2020-01-02 12:00:00"]), atol=atol)
)
assert all(
ts[2].isclose(Time(["2020-01-01 12:00:00", "2020-01-03 00:00:00"]), atol=atol)
)
def test_to_string():
dims = [8, 2, 8]
dx = np.arange(np.prod(dims)).reshape(dims)
tm = Time("2020-01-01", out_subfmt="date") + dx * u.day
exp_lines = [
"[[['2020-01-01' '2020-01-02' ... '2020-01-07' '2020-01-08']",
" ['2020-01-09' '2020-01-10' ... '2020-01-15' '2020-01-16']]",
"",
" [['2020-01-17' '2020-01-18' ... '2020-01-23' '2020-01-24']",
" ['2020-01-25' '2020-01-26' ... '2020-01-31' '2020-02-01']]",
"",
" ...",
"",
" [['2020-04-06' '2020-04-07' ... '2020-04-12' '2020-04-13']",
" ['2020-04-14' '2020-04-15' ... '2020-04-20' '2020-04-21']]",
"",
" [['2020-04-22' '2020-04-23' ... '2020-04-28' '2020-04-29']",
" ['2020-04-30' '2020-05-01' ... '2020-05-06' '2020-05-07']]]",
]
exp_str = "\n".join(exp_lines)
with np.printoptions(threshold=100, edgeitems=2, linewidth=75):
out_str = str(tm)
out_repr = repr(tm)
assert out_str == exp_str
exp_repr = f"<Time object: scale='utc' format='iso' value={exp_str}>"
assert out_repr == exp_repr
|
69a6e4a1d4dfbf210a4ed7adb87996f102819ea5ea0d0d2399443af94dd1ecd7 | import contextlib
import decimal
import functools
import warnings
from datetime import datetime, timedelta
from decimal import Decimal
import erfa
import numpy as np
import pytest
from erfa import ErfaError, ErfaWarning
from hypothesis import assume, example, given, target
from hypothesis.extra.numpy import array_shapes, arrays
from hypothesis.strategies import (
composite,
datetimes,
floats,
integers,
one_of,
sampled_from,
timedeltas,
tuples,
)
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import STANDARD_TIME_SCALES, Time, TimeDelta
from astropy.time.utils import day_frac, two_sum
from astropy.utils import iers
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps
) # 20 ps atol
allclose_sec = functools.partial(
np.allclose, rtol=np.finfo(float).eps, atol=np.finfo(float).eps * 24 * 3600
)
tiny = np.finfo(float).eps
dt_tiny = TimeDelta(tiny, format="jd")
def setup_module():
# Pre-load leap seconds table to avoid flakiness in hypothesis runs.
# See https://github.com/astropy/astropy/issues/11030
Time("2020-01-01").ut1
@pytest.fixture(scope="module")
def iers_b():
"""This is an expensive operation, so we share it between tests using a
module-scoped fixture instead of using the context manager form. This
is particularly important for Hypothesis, which invokes the decorated
test function many times (100 by default; see conftest.py for details).
"""
with iers.earth_orientation_table.set(iers.IERS_B.open(iers.IERS_B_FILE)):
yield "<using IERS-B orientation table>"
@contextlib.contextmanager
def quiet_erfa():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ErfaWarning)
yield
def assert_almost_equal(a, b, *, rtol=None, atol=None, label=""):
"""Assert numbers are almost equal.
This version also lets hypothesis know how far apart the inputs are, so
that it can work towards a failure and present the worst failure ever seen
as well as the simplest, which often just barely exceeds the threshold.
"""
__tracebackhide__ = True
if rtol is None or rtol == 0:
thresh = atol
elif atol is None:
thresh = rtol * (abs(a) + abs(b)) / 2
else:
thresh = atol + rtol * (abs(a) + abs(b)) / 2
amb = a - b
if isinstance(amb, TimeDelta):
ambv = amb.to_value(u.s)
target(ambv, label=label + " (a-b).to_value(u.s), from TimeDelta")
target(-ambv, label=label + " (b-a).to_value(u.s), from TimeDelta")
if isinstance(thresh, u.Quantity):
amb = amb.to(thresh.unit)
else:
try:
target_value = float(amb)
except TypeError:
pass
else:
target(target_value, label=label + " float(a-b)")
target(-target_value, label=label + " float(b-a)")
assert abs(amb) < thresh
# Days that end with leap seconds
# Some time scales use a so-called "leap smear" to cope with these, others
# have times they can't represent or can represent two different ways.
# In any case these days are liable to cause trouble in time conversions.
# Note that from_erfa includes some weird non-integer steps before 1970.
leap_second_table = iers.LeapSeconds.from_iers_leap_seconds()
# Days that contain leap_seconds
leap_second_days = leap_second_table["mjd"] - 1
leap_second_deltas = list(
zip(leap_second_days[1:], np.diff(leap_second_table["tai_utc"]))
)
today = Time.now()
mjd0 = Time(0, format="mjd")
def reasonable_ordinary_jd():
return tuples(floats(2440000, 2470000), floats(-0.5, 0.5))
@composite
def leap_second_tricky(draw):
mjd = draw(
one_of(
sampled_from(leap_second_days),
sampled_from(leap_second_days + 1),
sampled_from(leap_second_days - 1),
)
)
return mjd + mjd0.jd1 + mjd0.jd2, draw(floats(0, 1))
def reasonable_jd():
"""Pick a reasonable JD.
These should be not too far in the past or future (so that date conversion
routines don't have to deal with anything too exotic), but they should
include leap second days as a special case, and they should include several
particularly simple cases (today, the beginning of the MJD scale, a
reasonable date) so that hypothesis' example simplification produces
obviously simple examples when they trigger problems.
"""
moments = [(2455000.0, 0.0), (mjd0.jd1, mjd0.jd2), (today.jd1, today.jd2)]
return one_of(sampled_from(moments), reasonable_ordinary_jd(), leap_second_tricky())
def unreasonable_ordinary_jd():
"""JD pair that might be unordered or far away"""
return tuples(floats(-1e7, 1e7), floats(-1e7, 1e7))
def ordered_jd():
"""JD pair that is ordered but not necessarily near now"""
return tuples(floats(-1e7, 1e7), floats(-0.5, 0.5))
def unreasonable_jd():
return one_of(reasonable_jd(), ordered_jd(), unreasonable_ordinary_jd())
@composite
def jd_arrays(draw, jd_values):
s = draw(array_shapes())
d = np.dtype([("jd1", float), ("jd2", float)])
jdv = jd_values.map(lambda x: np.array(x, dtype=d))
a = draw(arrays(d, s, elements=jdv))
return a["jd1"], a["jd2"]
def unreasonable_delta():
return tuples(floats(-1e7, 1e7), floats(-1e7, 1e7))
def reasonable_delta():
return tuples(floats(-1e4, 1e4), floats(-0.5, 0.5))
# redundant?
def test_abs_jd2_always_less_than_half():
"""Make jd2 approach +/-0.5, and check that it doesn't go over."""
t1 = Time(2400000.5, [-tiny, +tiny], format="jd")
assert np.all(t1.jd1 % 1 == 0)
assert np.all(abs(t1.jd2) < 0.5)
t2 = Time(
2400000.0, [[0.5 - tiny, 0.5 + tiny], [-0.5 - tiny, -0.5 + tiny]], format="jd"
)
assert np.all(t2.jd1 % 1 == 0)
assert np.all(abs(t2.jd2) < 0.5)
@given(jd_arrays(unreasonable_jd()))
def test_abs_jd2_always_less_than_half_on_construction(jds):
jd1, jd2 = jds
t = Time(jd1, jd2, format="jd")
target(np.amax(np.abs(t.jd2)))
assert np.all(t.jd1 % 1 == 0)
assert np.all(abs(t.jd2) <= 0.5)
assert np.all((abs(t.jd2) < 0.5) | (t.jd1 % 2 == 0))
@given(integers(-(10**8), 10**8), sampled_from([-0.5, 0.5]))
def test_round_to_even(jd1, jd2):
t = Time(jd1, jd2, format="jd")
assert (abs(t.jd2) == 0.5) and (t.jd1 % 2 == 0)
def test_addition():
"""Check that an addition at the limit of precision (2^-52) is seen"""
t = Time(2455555.0, 0.5, format="jd", scale="utc")
t_dt = t + dt_tiny
assert t_dt.jd1 == t.jd1 and t_dt.jd2 != t.jd2
# Check that the addition is exactly reversed by the corresponding
# subtraction
t2 = t_dt - dt_tiny
assert t2.jd1 == t.jd1 and t2.jd2 == t.jd2
def test_mult_div():
"""Test precision with multiply and divide"""
dt_small = 6 * dt_tiny
# pick a number that will leave remainder if divided by 6.
dt_big = TimeDelta(20000.0, format="jd")
dt_big_small_by_6 = (dt_big + dt_small) / 6.0
dt_frac = dt_big_small_by_6 - TimeDelta(3333.0, format="jd")
assert allclose_jd2(dt_frac.jd2, 0.33333333333333354)
def test_init_variations():
"""Check that 3 ways of specifying a time + small offset are equivalent"""
dt_tiny_sec = dt_tiny.jd2 * 86400.0
t1 = Time(1e11, format="cxcsec") + dt_tiny
t2 = Time(1e11, dt_tiny_sec, format="cxcsec")
t3 = Time(dt_tiny_sec, 1e11, format="cxcsec")
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
def test_precision_exceeds_64bit():
"""
Check that Time object really holds more precision than float64 by looking
at the (naively) summed 64-bit result and asserting equality at the
bit level.
"""
t1 = Time(1.23456789e11, format="cxcsec")
t2 = t1 + dt_tiny
assert t1.jd == t2.jd
def test_through_scale_change():
"""Check that precision holds through scale change (cxcsec is TT)"""
t0 = Time(1.0, format="cxcsec")
t1 = Time(1.23456789e11, format="cxcsec")
dt_tt = t1 - t0
dt_tai = t1.tai - t0.tai
assert allclose_jd(dt_tt.jd1, dt_tai.jd1)
assert allclose_jd2(dt_tt.jd2, dt_tai.jd2)
def test_iso_init():
"""Check when initializing from ISO date"""
t1 = Time("2000:001:00:00:00.00000001", scale="tai")
t2 = Time("3000:001:13:00:00.00000002", scale="tai")
dt = t2 - t1
assert allclose_jd2(dt.jd2, 13.0 / 24.0 + 1e-8 / 86400.0 - 1.0)
def test_jd1_is_mult_of_one():
"""
Check that jd1 is a multiple of 1.
"""
t1 = Time("2000:001:00:00:00.00000001", scale="tai")
assert np.round(t1.jd1) == t1.jd1
t1 = Time(1.23456789, 12345678.90123456, format="jd", scale="tai")
assert np.round(t1.jd1) == t1.jd1
def test_precision_neg():
"""
Check precision when jd1 is negative. This used to fail because ERFA
routines use a test like jd1 > jd2 to decide which component to update.
It was updated to abs(jd1) > abs(jd2) in erfa 1.6 (sofa 20190722).
"""
t1 = Time(-100000.123456, format="jd", scale="tt")
assert np.round(t1.jd1) == t1.jd1
t1_tai = t1.tai
assert np.round(t1_tai.jd1) == t1_tai.jd1
def test_precision_epoch():
"""
Check that input via epoch also has full precision, i.e., against
regression on https://github.com/astropy/astropy/pull/366
"""
t_utc = Time(range(1980, 2001), format="jyear", scale="utc")
t_tai = Time(range(1980, 2001), format="jyear", scale="tai")
dt = t_utc - t_tai
assert allclose_sec(dt.sec, np.round(dt.sec))
def test_leap_seconds_rounded_correctly():
"""Regression tests against #2083, where a leap second was rounded
incorrectly by the underlying ERFA routine."""
with iers.conf.set_temp("auto_download", False):
t = Time(
["2012-06-30 23:59:59.413", "2012-07-01 00:00:00.413"],
scale="ut1",
precision=3,
).utc
assert np.all(
t.iso == np.array(["2012-06-30 23:59:60.000", "2012-07-01 00:00:00.000"])
)
# with the bug, both yielded '2012-06-30 23:59:60.000'
@given(integers(-(2**52) + 2, 2**52 - 2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_two_sum(i, f):
with decimal.localcontext(decimal.Context(prec=40)):
a = Decimal(i) + Decimal(f)
s, r = two_sum(i, f)
b = Decimal(s) + Decimal(r)
assert_almost_equal(a, b, atol=Decimal(tiny), rtol=Decimal(0))
# The bounds are here since we want to be sure the sum does not go to infinity,
# which does not have to be completely symmetric; e.g., this used to fail:
# @example(f1=-3.089785075544792e307, f2=1.7976931348623157e308)
# See https://github.com/astropy/astropy/issues/12955#issuecomment-1186293703
@given(
floats(min_value=np.finfo(float).min / 2, max_value=np.finfo(float).max / 2),
floats(min_value=np.finfo(float).min / 2, max_value=np.finfo(float).max / 2),
)
def test_two_sum_symmetric(f1, f2):
np.testing.assert_equal(two_sum(f1, f2), two_sum(f2, f1))
@given(
floats(allow_nan=False, allow_infinity=False),
floats(allow_nan=False, allow_infinity=False),
)
@example(f1=8.988465674311579e307, f2=8.98846567431158e307)
@example(f1=8.988465674311579e307, f2=-8.98846567431158e307)
@example(f1=-8.988465674311579e307, f2=-8.98846567431158e307)
def test_two_sum_size(f1, f2):
r1, r2 = two_sum(f1, f2)
assert (
abs(r1) > abs(r2) / np.finfo(float).eps
or r1 == r2 == 0
or not np.isfinite(f1 + f2)
)
@given(integers(-(2**52) + 2, 2**52 - 2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_day_frac_harmless(i, f):
with decimal.localcontext(decimal.Context(prec=40)):
a = Decimal(i) + Decimal(f)
i_d, f_d = day_frac(i, f)
a_d = Decimal(i_d) + Decimal(f_d)
assert_almost_equal(a, a_d, atol=Decimal(tiny), rtol=Decimal(0))
@given(integers(-(2**52) + 2, 2**52 - 2), floats(-0.5, 0.5))
@example(i=65536, f=3.637978807091714e-12)
@example(i=1, f=0.49999999999999994)
def test_day_frac_exact(i, f):
assume(abs(f) < 0.5 or i % 2 == 0)
i_d, f_d = day_frac(i, f)
assert i == i_d
assert f == f_d
@given(integers(-(2**52) + 2, 2**52 - 2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_day_frac_idempotent(i, f):
i_d, f_d = day_frac(i, f)
assert (i_d, f_d) == day_frac(i_d, f_d)
@given(integers(-(2**52) + 2, 2**52 - int(erfa.DJM0) - 3), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_mjd_initialization_precise(i, f):
t = Time(val=i, val2=f, format="mjd", scale="tai")
jd1, jd2 = day_frac(i + erfa.DJM0, f)
jd1_t, jd2_t = day_frac(t.jd1, t.jd2)
assert (abs((jd1 - jd1_t) + (jd2 - jd2_t)) * u.day).to(u.ns) < 1 * u.ns
@given(jd_arrays(unreasonable_jd()))
def test_day_frac_always_less_than_half(jds):
jd1, jd2 = jds
t_jd1, t_jd2 = day_frac(jd1, jd2)
assert np.all(t_jd1 % 1 == 0)
assert np.all(abs(t_jd2) <= 0.5)
assert np.all((abs(t_jd2) < 0.5) | (t_jd1 % 2 == 0))
@given(integers(-(10**8), 10**8), sampled_from([-0.5, 0.5]))
def test_day_frac_round_to_even(jd1, jd2):
t_jd1, t_jd2 = day_frac(jd1, jd2)
assert (abs(t_jd2) == 0.5) and (t_jd1 % 2 == 0)
@given(
scale=sampled_from([sc for sc in STANDARD_TIME_SCALES if sc != "utc"]),
jds=unreasonable_jd(),
)
@example(scale="tai", jds=(0.0, 0.0))
@example(scale="tai", jds=(0.0, -31738.500000000346))
def test_resolution_never_decreases(scale, jds):
jd1, jd2 = jds
t = Time(jd1, jd2, format="jd", scale=scale)
with quiet_erfa():
assert t != t + dt_tiny
@given(reasonable_jd())
@example(jds=(2442777.5, 0.9999999999999999))
def test_resolution_never_decreases_utc(jds):
"""UTC is very unhappy with unreasonable times,
Unlike for the other timescales, in which addition is done
directly, here the time is transformed to TAI before addition, and
then back to UTC. Hence, some rounding errors can occur and only
a change of 2*dt_tiny is guaranteed to give a different time.
"""
jd1, jd2 = jds
t = Time(jd1, jd2, format="jd", scale="utc")
with quiet_erfa():
assert t != t + 2 * dt_tiny
@given(
scale1=sampled_from(STANDARD_TIME_SCALES),
scale2=sampled_from(STANDARD_TIME_SCALES),
jds=unreasonable_jd(),
)
@example(scale1="tcg", scale2="ut1", jds=(2445149.5, 0.47187700984387526))
@example(scale1="tai", scale2="tcb", jds=(2441316.5, 0.0))
@example(scale1="tai", scale2="tcb", jds=(0.0, 0.0))
def test_conversion_preserves_jd1_jd2_invariant(iers_b, scale1, scale2, jds):
jd1, jd2 = jds
t = Time(jd1, jd2, scale=scale1, format="jd")
try:
with quiet_erfa():
t2 = getattr(t, scale2)
except iers.IERSRangeError: # UT1 conversion needs IERS data
assume(False)
except ErfaError:
assume(False)
assert t2.jd1 % 1 == 0
assert abs(t2.jd2) <= 0.5
assert abs(t2.jd2) < 0.5 or t2.jd1 % 2 == 0
@given(
scale1=sampled_from(STANDARD_TIME_SCALES),
scale2=sampled_from(STANDARD_TIME_SCALES),
jds=unreasonable_jd(),
)
@example(scale1="tai", scale2="utc", jds=(0.0, 0.0))
@example(scale1="utc", scale2="ut1", jds=(2441316.5, 0.9999999999999991))
@example(scale1="ut1", scale2="tai", jds=(2441498.5, 0.9999999999999999))
def test_conversion_never_loses_precision(iers_b, scale1, scale2, jds):
"""Check that time ordering remains if we convert to another scale.
Here, since scale differences can involve multiplication, we allow
for losing one ULP, i.e., we test that two times that differ by
two ULP will keep the same order if changed to another scale.
"""
jd1, jd2 = jds
t = Time(jd1, jd2, scale=scale1, format="jd")
# Near-zero UTC JDs degrade accuracy; not clear why,
# but also not so relevant, so ignoring.
if (scale1 == "utc" or scale2 == "utc") and abs(jd1 + jd2) < 1:
tiny = 100 * u.us
else:
tiny = 2 * dt_tiny
try:
with quiet_erfa():
t2 = t + tiny
t_scale2 = getattr(t, scale2)
t2_scale2 = getattr(t2, scale2)
assert t_scale2 < t2_scale2
except iers.IERSRangeError: # UT1 conversion needs IERS data
assume(scale1 != "ut1" or 2440000 < jd1 + jd2 < 2458000)
assume(scale2 != "ut1" or 2440000 < jd1 + jd2 < 2458000)
raise
except ErfaError:
# If the generated date is too early to compute a UTC julian date,
# and we're not converting between scales which are known to be safe,
# tell Hypothesis that this example is invalid and to try another.
# See https://docs.astropy.org/en/latest/time/index.html#time-scale
barycentric = {scale1, scale2}.issubset({"tcb", "tdb"})
geocentric = {scale1, scale2}.issubset({"tai", "tt", "tcg"})
assume(jd1 + jd2 >= -31738.5 or geocentric or barycentric)
raise
except AssertionError:
# Before 1972, TAI-UTC changed smoothly but not always very
# consistently; this can cause trouble on day boundaries for UTC to
# UT1; it is not clear whether this will ever be resolved (and is
# unlikely ever to matter).
# Furthermore, exactly at leap-second boundaries, it is possible to
# get the wrong leap-second correction due to rounding errors.
# The latter is xfail'd for now, but should be fixed; see gh-13517.
if "ut1" in (scale1, scale2):
if abs(t_scale2 - t2_scale2 - 1 * u.s) < 1 * u.ms:
pytest.xfail()
assume(t.jd > 2441317.5 or t.jd2 < 0.4999999)
raise
@given(sampled_from(leap_second_deltas), floats(0.1, 0.9))
def test_leap_stretch_mjd(d, f):
mjd, delta = d
t0 = Time(mjd, format="mjd", scale="utc")
th = Time(mjd + f, format="mjd", scale="utc")
t1 = Time(mjd + 1, format="mjd", scale="utc")
assert_quantity_allclose((t1 - t0).to(u.s), (1 * u.day + delta * u.s))
assert_quantity_allclose((th - t0).to(u.s), f * (1 * u.day + delta * u.s))
assert_quantity_allclose((t1 - th).to(u.s), (1 - f) * (1 * u.day + delta * u.s))
@given(
scale=sampled_from(STANDARD_TIME_SCALES),
jds=unreasonable_jd(),
delta=floats(-10000, 10000),
)
@example(scale="utc", jds=(0.0, 2.2204460492503136e-13), delta=6.661338147750941e-13)
@example(
scale="utc", jds=(2441682.5, 2.2204460492503136e-16), delta=7.327471962526035e-12
)
@example(scale="utc", jds=(0.0, 5.787592627370942e-13), delta=0.0)
@example(scale="utc", jds=(1.0, 0.25000000023283064), delta=-1.0)
@example(scale="utc", jds=(0.0, 0.0), delta=2 * 2.220446049250313e-16)
@example(scale="utc", jds=(2442778.5, 0.0), delta=-2.220446049250313e-16)
def test_jd_add_subtract_round_trip(scale, jds, delta):
jd1, jd2 = jds
minimum_for_change = np.finfo(float).eps
thresh = 2 * dt_tiny
if scale == "utc":
if jd1 + jd2 < 1 or jd1 + jd2 + delta < 1:
# Near-zero UTC JDs degrade accuracy; not clear why,
# but also not so relevant, so ignoring.
minimum_for_change = 1e-9
thresh = minimum_for_change * u.day
else:
# UTC goes via TAI, so one can loose an extra bit.
minimum_for_change *= 2
t = Time(jd1, jd2, scale=scale, format="jd")
try:
with quiet_erfa():
t2 = t + delta * u.day
if abs(delta) >= minimum_for_change:
assert t2 != t
t3 = t2 - delta * u.day
assert_almost_equal(t3, t, atol=thresh, rtol=0)
except ErfaError:
assume(scale != "utc" or 2440000 < jd1 + jd2 < 2460000)
raise
@given(
scale=sampled_from(TimeDelta.SCALES),
jds=reasonable_jd(),
delta=floats(-3 * tiny, 3 * tiny),
)
@example(scale="tai", jds=(0.0, 3.5762786865234384), delta=2.220446049250313e-16)
@example(scale="tai", jds=(2441316.5, 0.0), delta=6.938893903907228e-17)
@example(scale="tai", jds=(2441317.5, 0.0), delta=-6.938893903907228e-17)
@example(scale="tai", jds=(2440001.0, 0.49999999999999994), delta=5.551115123125783e-17)
def test_time_argminmaxsort(scale, jds, delta):
jd1, jd2 = jds
t = Time(jd1, jd2, scale=scale, format="jd") + TimeDelta(
[0, delta], scale=scale, format="jd"
)
imin = t.argmin()
imax = t.argmax()
isort = t.argsort()
# Be careful in constructing diff, for case that abs(jd2[1]-jd2[0]) ~ 1.
# and that is compensated by jd1[1]-jd1[0] (see example above).
diff, extra = two_sum(t.jd2[1], -t.jd2[0])
diff += t.jd1[1] - t.jd1[0]
diff += extra
if diff < 0: # item 1 smaller
assert delta < 0
assert imin == 1 and imax == 0 and np.all(isort == [1, 0])
elif diff == 0: # identical within precision
assert abs(delta) <= tiny
assert imin == 0 and imax == 0 and np.all(isort == [0, 1])
else:
assert delta > 0
assert imin == 0 and imax == 1 and np.all(isort == [0, 1])
@given(sampled_from(STANDARD_TIME_SCALES), unreasonable_jd(), unreasonable_jd())
@example(scale="utc", jds_a=(2455000.0, 0.0), jds_b=(2443144.5, 0.5000462962962965))
@example(
scale="utc",
jds_a=(2459003.0, 0.267502885949074),
jds_b=(2454657.001045462, 0.49895453779026877),
)
def test_timedelta_full_precision(scale, jds_a, jds_b):
jd1_a, jd2_a = jds_a
jd1_b, jd2_b = jds_b
assume(
scale != "utc"
or (2440000 < jd1_a + jd2_a < 2460000 and 2440000 < jd1_b + jd2_b < 2460000)
)
if scale == "utc":
# UTC subtraction implies a scale change, so possible rounding errors.
tiny = 2 * dt_tiny
else:
tiny = dt_tiny
t_a = Time(jd1_a, jd2_a, scale=scale, format="jd")
t_b = Time(jd1_b, jd2_b, scale=scale, format="jd")
dt = t_b - t_a
assert dt != (t_b + tiny) - t_a
with quiet_erfa():
assert_almost_equal(
t_b - dt / 2, t_a + dt / 2, atol=2 * dt_tiny, rtol=0, label="midpoint"
)
assert_almost_equal(
t_b + dt, t_a + 2 * dt, atol=2 * dt_tiny, rtol=0, label="up"
)
assert_almost_equal(
t_b - 2 * dt, t_a - dt, atol=2 * dt_tiny, rtol=0, label="down"
)
@given(
scale=sampled_from(STANDARD_TIME_SCALES),
jds_a=unreasonable_jd(),
jds_b=unreasonable_jd(),
x=integers(1, 100),
y=integers(1, 100),
)
def test_timedelta_full_precision_arithmetic(scale, jds_a, jds_b, x, y):
jd1_a, jd2_a = jds_a
jd1_b, jd2_b = jds_b
t_a = Time(jd1_a, jd2_a, scale=scale, format="jd")
t_b = Time(jd1_b, jd2_b, scale=scale, format="jd")
with quiet_erfa():
try:
dt = t_b - t_a
dt_x = x * dt / (x + y)
dt_y = y * dt / (x + y)
assert_almost_equal(dt_x + dt_y, dt, atol=(x + y) * dt_tiny, rtol=0)
except ErfaError:
assume(
scale != "utc"
or (
2440000 < jd1_a + jd2_a < 2460000
and 2440000 < jd1_b + jd2_b < 2460000
)
)
raise
@given(
scale1=sampled_from(STANDARD_TIME_SCALES),
scale2=sampled_from(STANDARD_TIME_SCALES),
jds_a=reasonable_jd(),
jds_b=reasonable_jd(),
)
def test_timedelta_conversion(scale1, scale2, jds_a, jds_b):
jd1_a, jd2_a = jds_a
jd1_b, jd2_b = jds_b
# not translation invariant so can't convert TimeDelta
assume("utc" not in [scale1, scale2])
# Conversions a problem but within UT1 it should work
assume(("ut1" not in [scale1, scale2]) or scale1 == scale2)
t_a = Time(jd1_a, jd2_a, scale=scale1, format="jd")
t_b = Time(jd1_b, jd2_b, scale=scale2, format="jd")
with quiet_erfa():
dt = t_b - t_a
t_a_2 = getattr(t_a, scale2)
t_b_2 = getattr(t_b, scale2)
dt_2 = getattr(dt, scale2)
assert_almost_equal(
t_b_2 - t_a_2, dt_2, atol=dt_tiny, rtol=0, label="converted"
)
# Implicit conversion
assert_almost_equal(
t_b_2 - t_a_2, dt, atol=dt_tiny, rtol=0, label="not converted"
)
# UTC disagrees when there are leap seconds
_utc_bad = [
(pytest.param(s, marks=pytest.mark.xfail) if s == "utc" else s)
for s in STANDARD_TIME_SCALES
]
@given(datetimes(), datetimes()) # datetimes have microsecond resolution
@example(dt1=datetime(1235, 1, 1, 0, 0), dt2=datetime(9950, 1, 1, 0, 0, 0, 890773))
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_difference_agrees_with_timedelta(scale, dt1, dt2):
t1 = Time(dt1, scale=scale)
t2 = Time(dt2, scale=scale)
assert_almost_equal(
t2 - t1,
TimeDelta(dt2 - dt1, scale=None if scale == "utc" else scale),
atol=2 * u.us,
)
@given(
days=integers(-3000 * 365, 3000 * 365),
microseconds=integers(0, 24 * 60 * 60 * 1000000),
)
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_to_timedelta(scale, days, microseconds):
td = timedelta(days=days, microseconds=microseconds)
assert TimeDelta(td, scale=scale) == TimeDelta(
days, microseconds / (86400 * 1e6), scale=scale, format="jd"
)
@given(
days=integers(-3000 * 365, 3000 * 365),
microseconds=integers(0, 24 * 60 * 60 * 1000000),
)
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_timedelta_roundtrip(scale, days, microseconds):
td = timedelta(days=days, microseconds=microseconds)
assert td == TimeDelta(td, scale=scale).value
@given(days=integers(-3000 * 365, 3000 * 365), day_frac=floats(0, 1))
@example(days=262144, day_frac=2.314815006343452e-11)
@example(days=1048576, day_frac=1.157407503171726e-10)
@pytest.mark.parametrize("scale", _utc_bad)
def test_timedelta_datetime_roundtrip(scale, days, day_frac):
td = TimeDelta(days, day_frac, format="jd", scale=scale)
td.format = "datetime"
assert_almost_equal(td, TimeDelta(td.value, scale=scale), atol=2 * u.us)
@given(integers(-3000 * 365, 3000 * 365), floats(0, 1))
@example(days=262144, day_frac=2.314815006343452e-11)
@pytest.mark.parametrize("scale", _utc_bad)
def test_timedelta_from_parts(scale, days, day_frac):
kwargs = dict(format="jd", scale=scale)
whole = TimeDelta(days, day_frac, **kwargs)
from_parts = TimeDelta(days, **kwargs) + TimeDelta(day_frac, **kwargs)
assert whole == from_parts
def test_datetime_difference_agrees_with_timedelta_no_hypothesis():
scale = "tai"
dt1 = datetime(1235, 1, 1, 0, 0)
dt2 = datetime(9950, 1, 1, 0, 0, 0, 890773)
t1 = Time(dt1, scale=scale)
t2 = Time(dt2, scale=scale)
assert abs((t2 - t1) - TimeDelta(dt2 - dt1, scale=scale)) < 1 * u.us
# datetimes have microsecond resolution
@given(datetimes(), timedeltas())
@example(dt=datetime(2000, 1, 1, 0, 0), td=timedelta(days=-397683, microseconds=2))
@example(dt=datetime(2179, 1, 1, 0, 0), td=timedelta(days=-795365, microseconds=53))
@example(dt=datetime(2000, 1, 1, 0, 0), td=timedelta(days=1590729, microseconds=10))
@example(
dt=datetime(4357, 1, 1, 0, 0), td=timedelta(days=-1590729, microseconds=107770)
)
@example(
dt=datetime(4357, 1, 1, 0, 0, 0, 29),
td=timedelta(days=-1590729, microseconds=746292),
)
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_timedelta_sum(scale, dt, td):
try:
dt + td
except OverflowError:
assume(False)
dt_a = Time(dt, scale=scale)
td_a = TimeDelta(td, scale=None if scale == "utc" else scale)
assert_almost_equal(dt_a + td_a, Time(dt + td, scale=scale), atol=2 * u.us)
@given(
jds=reasonable_jd(),
lat1=floats(-90, 90),
lat2=floats(-90, 90),
lon=floats(-180, 180),
)
@pytest.mark.parametrize("kind", ["apparent", "mean"])
def test_sidereal_lat_independent(iers_b, kind, jds, lat1, lat2, lon):
jd1, jd2 = jds
t1 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon, lat1))
t2 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon, lat2))
try:
assert_almost_equal(
t1.sidereal_time(kind), t2.sidereal_time(kind), atol=1 * u.uas
)
except iers.IERSRangeError:
assume(False)
@given(
jds=reasonable_jd(),
lat=floats(-90, 90),
lon=floats(-180, 180),
lon_delta=floats(-360, 360),
)
@pytest.mark.parametrize("kind", ["apparent", "mean"])
def test_sidereal_lon_independent(iers_b, kind, jds, lat, lon, lon_delta):
jd1, jd2 = jds
t1 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon, lat))
t2 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon + lon_delta, lat))
try:
diff = t1.sidereal_time(kind) + lon_delta * u.degree - t2.sidereal_time(kind)
except iers.IERSRangeError:
assume(False)
else:
expected_degrees = (diff.to_value(u.degree) + 180) % 360
assert_almost_equal(expected_degrees, 180, atol=1 / (60 * 60 * 1000))
|
bf9512db3bf6eab77d4c09dfa1a22874d5b48e02b9eaa851090a196869de3a1a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import units as u
from astropy.coordinates import EarthLocation, SkyCoord, solar_system_ephemeris
from astropy.time import Time, TimeDelta
from astropy.utils import iers
from astropy.utils.compat.optional_deps import HAS_JPLEPHEM
class TestHelioBaryCentric:
"""
Verify time offsets to the solar system barycentre and the heliocentre.
Uses the WHT observing site.
Tests are against values returned at time of initial creation of these
routines. They agree to an independent SLALIB based implementation
to 20 microseconds.
"""
@classmethod
def setup_class(cls):
cls.orig_auto_download = iers.conf.auto_download
iers.conf.auto_download = False
@classmethod
def teardown_class(cls):
iers.conf.auto_download = cls.orig_auto_download
def setup_method(self):
wht = EarthLocation(342.12 * u.deg, 28.758333333333333 * u.deg, 2327 * u.m)
self.obstime = Time("2013-02-02T23:00", location=wht)
self.obstime2 = Time("2013-08-02T23:00", location=wht)
self.obstimeArr = Time(["2013-02-02T23:00", "2013-08-02T23:00"], location=wht)
self.star = SkyCoord(
"08:08:08 +32:00:00", unit=(u.hour, u.degree), frame="icrs"
)
def test_heliocentric(self):
hval = self.obstime.light_travel_time(self.star, "heliocentric")
assert isinstance(hval, TimeDelta)
assert hval.scale == "tdb"
assert abs(hval - 461.43037870502235 * u.s) < 1.0 * u.us
def test_barycentric(self):
bval = self.obstime.light_travel_time(self.star, "barycentric")
assert isinstance(bval, TimeDelta)
assert bval.scale == "tdb"
assert abs(bval - 460.58538779827836 * u.s) < 1.0 * u.us
def test_arrays(self):
bval1 = self.obstime.light_travel_time(self.star, "barycentric")
bval2 = self.obstime2.light_travel_time(self.star, "barycentric")
bval_arr = self.obstimeArr.light_travel_time(self.star, "barycentric")
hval1 = self.obstime.light_travel_time(self.star, "heliocentric")
hval2 = self.obstime2.light_travel_time(self.star, "heliocentric")
hval_arr = self.obstimeArr.light_travel_time(self.star, "heliocentric")
assert hval_arr[0] - hval1 < 1.0 * u.us
assert hval_arr[1] - hval2 < 1.0 * u.us
assert bval_arr[0] - bval1 < 1.0 * u.us
assert bval_arr[1] - bval2 < 1.0 * u.us
@pytest.mark.remote_data
@pytest.mark.skipif(not HAS_JPLEPHEM, reason="requires jplephem")
def test_ephemerides(self):
bval1 = self.obstime.light_travel_time(self.star, "barycentric")
with solar_system_ephemeris.set("jpl"):
bval2 = self.obstime.light_travel_time(
self.star, "barycentric", ephemeris="jpl"
)
# should differ by less than 0.1 ms, but not be the same
assert abs(bval1 - bval2) < 1.0 * u.ms
assert abs(bval1 - bval2) > 1.0 * u.us
|
a552736b504b7adf2ac8f469480cd98ef96627d147d12f7aa158030bac6797ca | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pickle
import numpy as np
from astropy.time import Time
class TestPickle:
"""Basic pickle test of time"""
def test_pickle(self):
times = ["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"]
t1 = Time(times, scale="utc")
for prot in range(pickle.HIGHEST_PROTOCOL):
t1d = pickle.dumps(t1, prot)
t1l = pickle.loads(t1d)
assert np.all(t1l == t1)
t2 = Time("2012-06-30 12:00:00", scale="utc")
for prot in range(pickle.HIGHEST_PROTOCOL):
t2d = pickle.dumps(t2, prot)
t2l = pickle.loads(t2d)
assert t2l == t2
|
ecfb2885caa2c6df961b9b756dfb513c9c614a2730cd412577252dd44e0424da | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime, timedelta
import erfa
import pytest
import astropy.time.core
from astropy.time import Time, update_leap_seconds
from astropy.utils import iers
from astropy.utils.exceptions import AstropyWarning
class TestUpdateLeapSeconds:
def setup_method(self):
self.built_in = iers.LeapSeconds.from_iers_leap_seconds()
self.erfa_ls = iers.LeapSeconds.from_erfa()
now = datetime.now()
self.good_enough = now + timedelta(150)
def teardown_method(self):
self.erfa_ls.update_erfa_leap_seconds(initialize_erfa=True)
def test_auto_update_leap_seconds(self):
# Sanity check.
assert erfa.dat(2018, 1, 1, 0.0) == 37.0
# Set expired leap seconds
expired = self.erfa_ls[self.erfa_ls["year"] < 2017]
expired.update_erfa_leap_seconds(initialize_erfa="empty")
# Check the 2017 leap second is indeed missing.
assert erfa.dat(2018, 1, 1, 0.0) == 36.0
# Update with missing leap seconds.
n_update = update_leap_seconds([iers.IERS_LEAP_SECOND_FILE])
assert n_update >= 1
assert erfa.leap_seconds.expires == self.built_in.expires
assert erfa.dat(2018, 1, 1, 0.0) == 37.0
# Doing it again does not change anything
n_update2 = update_leap_seconds([iers.IERS_LEAP_SECOND_FILE])
assert n_update2 == 0
assert erfa.dat(2018, 1, 1, 0.0) == 37.0
@pytest.mark.remote_data
def test_never_expired_if_connected(self):
assert self.erfa_ls.expires > datetime.now()
assert self.erfa_ls.expires >= self.good_enough
@pytest.mark.remote_data
def test_auto_update_always_good(self):
self.erfa_ls.update_erfa_leap_seconds(initialize_erfa="only")
update_leap_seconds()
assert not erfa.leap_seconds.expired
assert erfa.leap_seconds.expires > self.good_enough
def test_auto_update_bad_file(self):
with pytest.warns(AstropyWarning, match="FileNotFound"):
update_leap_seconds(["nonsense"])
def test_auto_update_corrupt_file(self, tmp_path):
bad_file = str(tmp_path / "no_expiration")
with open(iers.IERS_LEAP_SECOND_FILE) as fh:
lines = fh.readlines()
with open(bad_file, "w") as fh:
fh.write("\n".join([line for line in lines if not line.startswith("#")]))
with pytest.warns(AstropyWarning, match="ValueError.*did not find expiration"):
update_leap_seconds([bad_file])
def test_auto_update_expired_file(self, tmp_path):
# Set up expired ERFA leap seconds.
expired = self.erfa_ls[self.erfa_ls["year"] < 2017]
expired.update_erfa_leap_seconds(initialize_erfa="empty")
# Create similarly expired file.
expired_file = str(tmp_path / "expired.dat")
with open(expired_file, "w") as fh:
fh.write(
"\n".join(
["# File expires on 28 June 2010"] + [str(item) for item in expired]
)
)
with pytest.warns(iers.IERSStaleWarning):
update_leap_seconds(["erfa", expired_file])
def test_init_thread_safety(self, monkeypatch):
# Set up expired ERFA leap seconds.
expired = self.erfa_ls[self.erfa_ls["year"] < 2017]
expired.update_erfa_leap_seconds(initialize_erfa="empty")
# Force re-initialization, even if another test already did it
monkeypatch.setattr(
astropy.time.core,
"_LEAP_SECONDS_CHECK",
astropy.time.core._LeapSecondsCheck.NOT_STARTED,
)
workers = 4
with ThreadPoolExecutor(max_workers=workers) as executor:
futures = [
executor.submit(lambda: str(Time("2019-01-01 00:00:00.000").tai))
for i in range(workers)
]
results = [future.result() for future in futures]
assert results == ["2019-01-01 00:00:37.000"] * workers
|
568f8dc36f9adb5d16cd01cd55945cec0381d5e4dccbe2748c09f261e716633a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import numpy as np
import pytest
from astropy.time import Time
from astropy.utils.iers import conf as iers_conf
from astropy.utils.iers import iers # used in testing
allclose_jd = functools.partial(np.allclose, rtol=0, atol=1e-9)
allclose_sec = functools.partial(np.allclose, rtol=1e-15, atol=1e-4)
# 0.1 ms atol; IERS-B files change at that level.
try:
iers.IERS_A.open() # check if IERS_A is available
except OSError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
def do_ut1_prediction_tst(iers_type):
tnow = Time.now()
iers_tab = iers_type.open()
tnow.delta_ut1_utc, status = iers_tab.ut1_utc(tnow, return_status=True)
assert status == iers.FROM_IERS_A_PREDICTION
tnow_ut1_jd = tnow.ut1.jd
assert tnow_ut1_jd != tnow.jd
delta_ut1_utc = tnow.delta_ut1_utc
with iers.earth_orientation_table.set(iers_type.open()):
delta2, status2 = tnow.get_delta_ut1_utc(return_status=True)
assert status2 == status
assert delta2.to_value("s") == delta_ut1_utc
tnow_ut1 = tnow.ut1
assert tnow_ut1._delta_ut1_utc == delta_ut1_utc
assert tnow_ut1.jd != tnow.jd
@pytest.mark.remote_data
class TestTimeUT1Remote:
def setup_class(cls):
# Need auto_download so that IERS_B won't be loaded and cause tests to
# fail.
iers_conf.auto_download = True
def teardown_class(cls):
# This setting is to be consistent with astropy/conftest.py
iers_conf.auto_download = False
def test_utc_to_ut1(self):
"Test conversion of UTC to UT1, making sure to include a leap second"
t = Time(
[
"2012-06-30 12:00:00",
"2012-06-30 23:59:59",
"2012-06-30 23:59:60",
"2012-07-01 00:00:00",
"2012-07-01 12:00:00",
],
scale="utc",
)
t_ut1_jd = t.ut1.jd
t_comp = np.array(
[
2456108.9999932079,
2456109.4999816339,
2456109.4999932083,
2456109.5000047823,
2456110.0000047833,
]
)
assert allclose_jd(t_ut1_jd, t_comp)
t_back = t.ut1.utc
assert allclose_jd(t.jd, t_back.jd)
tnow = Time.now()
tnow.ut1
def test_ut1_iers_auto(self):
do_ut1_prediction_tst(iers.IERS_Auto)
class TestTimeUT1:
"""Test Time.ut1 using IERS tables"""
def test_ut1_to_utc(self):
"""Also test the reverse, around the leap second
(round-trip test closes #2077)"""
with iers_conf.set_temp("auto_download", False):
t = Time(
[
"2012-06-30 12:00:00",
"2012-06-30 23:59:59",
"2012-07-01 00:00:00",
"2012-07-01 00:00:01",
"2012-07-01 12:00:00",
],
scale="ut1",
)
t_utc_jd = t.utc.jd
t_comp = np.array(
[
2456109.0000010049,
2456109.4999836441,
2456109.4999952177,
2456109.5000067917,
2456109.9999952167,
]
)
assert allclose_jd(t_utc_jd, t_comp)
t_back = t.utc.ut1
assert allclose_jd(t.jd, t_back.jd)
def test_empty_ut1(self):
"""Testing for a zero-length Time object from UTC to UT1
when an empty array is passed"""
from astropy import units as u
with iers_conf.set_temp("auto_download", False):
t = Time(["2012-06-30 12:00:00"]) + np.arange(24) * u.hour
t_empty = t[[]].ut1
assert isinstance(t_empty, Time)
assert t_empty.scale == "ut1"
assert t_empty.size == 0
def test_delta_ut1_utc(self):
"""Accessing delta_ut1_utc should try to get it from IERS
(closes #1924 partially)"""
with iers_conf.set_temp("auto_download", False):
t = Time("2012-06-30 12:00:00", scale="utc")
assert not hasattr(t, "_delta_ut1_utc")
# accessing delta_ut1_utc calculates it
assert allclose_sec(t.delta_ut1_utc, -0.58682110003124965)
# and keeps it around
assert allclose_sec(t._delta_ut1_utc, -0.58682110003124965)
class TestTimeUT1SpecificIERSTable:
@pytest.mark.skipif(not HAS_IERS_A, reason="requires IERS_A")
def test_ut1_iers_A(self):
do_ut1_prediction_tst(iers.IERS_A)
def test_ut1_iers_B(self):
tnow = Time.now()
iers_b = iers.IERS_B.open()
delta1, status1 = tnow.get_delta_ut1_utc(iers_b, return_status=True)
assert status1 == iers.TIME_BEYOND_IERS_RANGE
with iers.earth_orientation_table.set(iers.IERS_B.open()):
delta2, status2 = tnow.get_delta_ut1_utc(return_status=True)
assert status2 == status1
with pytest.raises(iers.IERSRangeError):
tnow.ut1
|
1f0d380a5aa46c3bbf1bebd2d558f4a2171a80e4713b71cefb66a626a9356e6d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.time import Time
class TestGuess:
"""Test guessing the input value format"""
def test_guess1(self):
times = ["1999-01-01 00:00:00.123456789", "2010-01-01 00:00:00"]
t = Time(times, scale="utc")
assert (
repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>"
)
def test_guess2(self):
times = ["1999-01-01 00:00:00.123456789", "2010-01 00:00:00"]
with pytest.raises(ValueError):
Time(times, scale="utc")
def test_guess3(self):
times = ["1999:001:00:00:00.123456789", "2010:001"]
t = Time(times, scale="utc")
assert (
repr(t) == "<Time object: scale='utc' format='yday' "
"value=['1999:001:00:00:00.123' '2010:001:00:00:00.000']>"
)
def test_guess4(self):
times = [10, 20]
with pytest.raises(ValueError):
Time(times, scale="utc")
|
190bfcdee3893a98d57beb981ef07698aae0360adeb8489e6b7f7f291d479913 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import numpy as np
import pytest
from astropy import units as u
from astropy.table import Column
from astropy.time import Time, TimeDelta
allclose_sec = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52 * 24 * 3600
) # 20 ps atol
class TestTimeQuantity:
"""Test Interaction of Time with Quantities"""
def test_valid_quantity_input(self):
"""Test Time formats that are allowed to take quantity input."""
q = 2450000.125 * u.day
t1 = Time(q, format="jd", scale="utc")
assert t1.value == q.value
q2 = q.to(u.second)
t2 = Time(q2, format="jd", scale="utc")
assert t2.value == q.value == q2.to_value(u.day)
q3 = q - 2400000.5 * u.day
t3 = Time(q3, format="mjd", scale="utc")
assert t3.value == q3.value
# test we can deal with two quantity arguments, with different units
qs = 24.0 * 36.0 * u.second
t4 = Time(q3, qs, format="mjd", scale="utc")
assert t4.value == (q3 + qs).to_value(u.day)
qy = 1990.0 * u.yr
ty1 = Time(qy, format="jyear", scale="utc")
assert ty1.value == qy.value
ty2 = Time(qy.to(u.day), format="jyear", scale="utc")
assert ty2.value == qy.value
qy2 = 10.0 * u.yr
tcxc = Time(qy2, format="cxcsec")
assert tcxc.value == qy2.to_value(u.second)
tgps = Time(qy2, format="gps")
assert tgps.value == qy2.to_value(u.second)
tunix = Time(qy2, format="unix")
assert tunix.value == qy2.to_value(u.second)
qd = 2000.0 * 365.0 * u.day
tplt = Time(qd, format="plot_date", scale="utc")
assert tplt.value == qd.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
Time(2450000.0 * u.m, format="jd", scale="utc")
with pytest.raises(u.UnitsError):
Time(2450000.0 * u.dimensionless_unscaled, format="jd", scale="utc")
def test_column_with_and_without_units(self):
"""Ensure a Column without a unit is treated as an array [#3648]"""
a = np.arange(50000.0, 50010.0)
ta = Time(a, format="mjd")
c1 = Column(np.arange(50000.0, 50010.0), name="mjd")
tc1 = Time(c1, format="mjd")
assert np.all(ta == tc1)
c2 = Column(np.arange(50000.0, 50010.0), name="mjd", unit="day")
tc2 = Time(c2, format="mjd")
assert np.all(ta == tc2)
c3 = Column(np.arange(50000.0, 50010.0), name="mjd", unit="m")
with pytest.raises(u.UnitsError):
Time(c3, format="mjd")
def test_no_quantity_input_allowed(self):
"""Time formats that are not allowed to take Quantity input."""
qy = 1990.0 * u.yr
for fmt in ("iso", "yday", "datetime", "byear", "byear_str", "jyear_str"):
with pytest.raises(ValueError):
Time(qy, format=fmt, scale="utc")
def test_valid_quantity_operations(self):
"""Check that adding a time-valued quantity to a Time gives a Time"""
t0 = Time(100000.0, format="cxcsec")
q1 = 10.0 * u.second
t1 = t0 + q1
assert isinstance(t1, Time)
assert t1.value == t0.value + q1.to_value(u.second)
q2 = 1.0 * u.day
t2 = t0 - q2
assert allclose_sec(t2.value, t0.value - q2.to_value(u.second))
# check broadcasting
q3 = np.arange(15.0).reshape(3, 5) * u.hour
t3 = t0 - q3
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value - q3.to_value(u.second))
def test_invalid_quantity_operations(self):
"""Check that comparisons of Time with quantities does not work
(even for time-like, since we cannot compare Time to TimeDelta)"""
with pytest.raises(TypeError):
Time(100000.0, format="cxcsec") > 10.0 * u.m
with pytest.raises(TypeError):
Time(100000.0, format="cxcsec") > 10.0 * u.second
class TestTimeDeltaQuantity:
"""Test interaction of TimeDelta with Quantities"""
def test_valid_quantity_input(self):
"""Test that TimeDelta can take quantity input."""
q = 500.25 * u.day
dt1 = TimeDelta(q, format="jd")
assert dt1.value == q.value
dt2 = TimeDelta(q, format="sec")
assert dt2.value == q.to_value(u.second)
dt3 = TimeDelta(q)
assert dt3.value == q.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
TimeDelta(2450000.0 * u.m, format="jd")
with pytest.raises(u.UnitsError):
Time(2450000.0 * u.dimensionless_unscaled, format="jd", scale="utc")
with pytest.raises(TypeError):
TimeDelta(100, format="sec") > 10.0 * u.m
def test_quantity_output(self):
q = 500.25 * u.day
dt = TimeDelta(q)
assert dt.to(u.day) == q
assert dt.to_value(u.day) == q.value
assert dt.to_value("day") == q.value
assert dt.to(u.second).value == q.to_value(u.second)
assert dt.to_value(u.second) == q.to_value(u.second)
assert dt.to_value("s") == q.to_value(u.second)
# Following goes through "format", but should be the same.
assert dt.to_value("sec") == q.to_value(u.second)
def test_quantity_output_errors(self):
dt = TimeDelta(250.0, format="sec")
with pytest.raises(u.UnitsError):
dt.to(u.m)
with pytest.raises(u.UnitsError):
dt.to_value(u.m)
with pytest.raises(u.UnitsError):
dt.to_value(unit=u.m)
with pytest.raises(
ValueError,
match="not one of the known formats.*failed to parse as a unit",
):
dt.to_value("parrot")
with pytest.raises(TypeError):
dt.to_value("sec", unit=u.s)
with pytest.raises(TypeError):
# TODO: would be nice to make this work!
dt.to_value(u.s, subfmt="str")
def test_valid_quantity_operations1(self):
"""Check adding/substracting/comparing a time-valued quantity works
with a TimeDelta. Addition/subtraction should give TimeDelta"""
t0 = TimeDelta(106400.0, format="sec")
q1 = 10.0 * u.second
t1 = t0 + q1
assert isinstance(t1, TimeDelta)
assert t1.value == t0.value + q1.to_value(u.second)
q2 = 1.0 * u.day
t2 = t0 - q2
assert isinstance(t2, TimeDelta)
assert allclose_sec(t2.value, t0.value - q2.to_value(u.second))
# now comparisons
assert t0 > q1
assert t0 < 1.0 * u.yr
# and broadcasting
q3 = np.arange(12.0).reshape(4, 3) * u.hour
t3 = t0 + q3
assert isinstance(t3, TimeDelta)
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value + q3.to_value(u.second))
def test_valid_quantity_operations2(self):
"""Check that TimeDelta is treated as a quantity where possible."""
t0 = TimeDelta(100000.0, format="sec")
f = 1.0 / t0
assert isinstance(f, u.Quantity)
assert f.unit == 1.0 / u.day
g = 10.0 * u.m / u.second**2
v = t0 * g
assert isinstance(v, u.Quantity)
assert u.allclose(v, t0.sec * g.value * u.m / u.second)
q = np.log10(t0 / u.second)
assert isinstance(q, u.Quantity)
assert q.value == np.log10(t0.sec)
s = 1.0 * u.m
v = s / t0
assert isinstance(v, u.Quantity)
assert u.allclose(v, 1.0 / t0.sec * u.m / u.s)
t = 1.0 * u.s
t2 = t0 * t
assert isinstance(t2, u.Quantity)
assert u.allclose(t2, t0.sec * u.s**2)
t3 = [1] / t0
assert isinstance(t3, u.Quantity)
assert u.allclose(t3, 1 / (t0.sec * u.s))
# broadcasting
t1 = TimeDelta(np.arange(100000.0, 100012.0).reshape(6, 2), format="sec")
f = np.array([1.0, 2.0]) * u.cycle * u.Hz
phase = f * t1
assert isinstance(phase, u.Quantity)
assert phase.shape == t1.shape
assert u.allclose(phase, t1.sec * f.value * u.cycle)
q = t0 * t1
assert isinstance(q, u.Quantity)
assert np.all(q == t0.to(u.day) * t1.to(u.day))
q = t1 / t0
assert isinstance(q, u.Quantity)
assert np.all(q == t1.to(u.day) / t0.to(u.day))
def test_valid_quantity_operations3(self):
"""Test a TimeDelta remains one if possible."""
t0 = TimeDelta(10.0, format="jd")
q = 10.0 * u.one
t1 = q * t0
assert isinstance(t1, TimeDelta)
assert t1 == TimeDelta(100.0, format="jd")
t2 = t0 * q
assert isinstance(t2, TimeDelta)
assert t2 == TimeDelta(100.0, format="jd")
t3 = t0 / q
assert isinstance(t3, TimeDelta)
assert t3 == TimeDelta(1.0, format="jd")
q2 = 1.0 * u.percent
t4 = t0 * q2
assert isinstance(t4, TimeDelta)
assert abs(t4 - TimeDelta(0.1, format="jd")) < 1.0 * u.ns
q3 = 1.0 * u.hr / (36.0 * u.s)
t5 = q3 * t0
assert isinstance(t4, TimeDelta)
assert abs(t5 - TimeDelta(1000.0, format="jd")) < 1.0 * u.ns
# Test multiplication with a unit.
t6 = t0 * u.one
assert isinstance(t6, TimeDelta)
assert t6 == TimeDelta(10.0, format="jd")
t7 = u.one * t0
assert isinstance(t7, TimeDelta)
assert t7 == TimeDelta(10.0, format="jd")
t8 = t0 * ""
assert isinstance(t8, TimeDelta)
assert t8 == TimeDelta(10.0, format="jd")
t9 = "" * t0
assert isinstance(t9, TimeDelta)
assert t9 == TimeDelta(10.0, format="jd")
t10 = t0 / u.one
assert isinstance(t10, TimeDelta)
assert t6 == TimeDelta(10.0, format="jd")
t11 = t0 / ""
assert isinstance(t11, TimeDelta)
assert t11 == TimeDelta(10.0, format="jd")
t12 = t0 / [1]
assert isinstance(t12, TimeDelta)
assert t12 == TimeDelta(10.0, format="jd")
t13 = [1] * t0
assert isinstance(t13, TimeDelta)
assert t13 == TimeDelta(10.0, format="jd")
def test_invalid_quantity_operations(self):
"""Check comparisons of TimeDelta with non-time quantities fails."""
with pytest.raises(TypeError):
TimeDelta(100000.0, format="sec") > 10.0 * u.m
def test_invalid_quantity_operations2(self):
"""Check that operations with non-time/quantity fail."""
td = TimeDelta(100000.0, format="sec")
with pytest.raises(TypeError):
td * object()
with pytest.raises(TypeError):
td / object()
def test_invalid_quantity_broadcast(self):
"""Check broadcasting rules in interactions with Quantity."""
t0 = TimeDelta(np.arange(12.0).reshape(4, 3), format="sec")
with pytest.raises(ValueError):
t0 + np.arange(4.0) * u.s
class TestDeltaAttributes:
def test_delta_ut1_utc(self):
t = Time("2010-01-01 00:00:00", format="iso", scale="utc", precision=6)
t.delta_ut1_utc = 0.3 * u.s
assert t.ut1.iso == "2010-01-01 00:00:00.300000"
t.delta_ut1_utc = 0.4 / 60.0 * u.minute
assert t.ut1.iso == "2010-01-01 00:00:00.400000"
with pytest.raises(u.UnitsError):
t.delta_ut1_utc = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_ut1_utc = TimeDelta(0.3, format="sec")
assert t.ut1.iso == "2010-01-01 00:00:00.300000"
t.delta_ut1_utc = TimeDelta(0.5 / 24.0 / 3600.0, format="jd")
assert t.ut1.iso == "2010-01-01 00:00:00.500000"
def test_delta_tdb_tt(self):
t = Time("2010-01-01 00:00:00", format="iso", scale="tt", precision=6)
t.delta_tdb_tt = 20.0 * u.second
assert t.tdb.iso == "2010-01-01 00:00:20.000000"
t.delta_tdb_tt = 30.0 / 60.0 * u.minute
assert t.tdb.iso == "2010-01-01 00:00:30.000000"
with pytest.raises(u.UnitsError):
t.delta_tdb_tt = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_tdb_tt = TimeDelta(40.0, format="sec")
assert t.tdb.iso == "2010-01-01 00:00:40.000000"
t.delta_tdb_tt = TimeDelta(50.0 / 24.0 / 3600.0, format="jd")
assert t.tdb.iso == "2010-01-01 00:00:50.000000"
@pytest.mark.parametrize(
"q1, q2",
(
(5e8 * u.s, None),
(5e17 * u.ns, None),
(4e8 * u.s, 1e17 * u.ns),
(4e14 * u.us, 1e17 * u.ns),
),
)
def test_quantity_conversion_rounding(q1, q2):
"""Check that no rounding errors are incurred by unit conversion.
This occurred before as quantities in seconds were converted to days
before trying to split them into two-part doubles. See gh-7622.
"""
t = Time("2001-01-01T00:00:00.", scale="tai")
expected = Time("2016-11-05T00:53:20.", scale="tai")
if q2 is None:
t0 = t + q1
else:
t0 = t + q1 + q2
assert abs(t0 - expected) < 20 * u.ps
dt1 = TimeDelta(q1, q2)
t1 = t + dt1
assert abs(t1 - expected) < 20 * u.ps
dt2 = TimeDelta(q1, q2, format="sec")
t2 = t + dt2
assert abs(t2 - expected) < 20 * u.ps
|
ac5ad987afe33ed179f48e493ae59e517071d82d49ebb48f2e258fe57d7745eb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import numpy as np
import pytest
import astropy.units as u
from astropy.time import Time, TimeDelta
class TestTimeComparisons:
"""Test Comparisons of Time and TimeDelta classes"""
def setup_method(self):
self.t1 = Time(np.arange(49995, 50005), format="mjd", scale="utc")
self.t2 = Time(np.arange(49000, 51000, 200), format="mjd", scale="utc")
def test_miscompares(self):
"""
If an incompatible object is compared to a Time object, == should
return False and != should return True. All other comparison
operators should raise a TypeError.
"""
t1 = Time("J2000", scale="utc")
for op, op_str in (
(operator.ge, ">="),
(operator.gt, ">"),
(operator.le, "<="),
(operator.lt, "<"),
):
with pytest.raises(TypeError):
op(t1, None)
# Keep == and != as they are specifically meant to test Time.__eq__
# and Time.__ne__
assert (t1 == None) is False
assert (t1 != None) is True
def test_time(self):
t1_lt_t2 = self.t1 < self.t2
assert np.all(
t1_lt_t2
== np.array(
[False, False, False, False, False, False, True, True, True, True]
)
)
t1_ge_t2 = self.t1 >= self.t2
assert np.all(t1_ge_t2 != t1_lt_t2)
t1_le_t2 = self.t1 <= self.t2
assert np.all(
t1_le_t2
== np.array(
[False, False, False, False, False, True, True, True, True, True]
)
)
t1_gt_t2 = self.t1 > self.t2
assert np.all(t1_gt_t2 != t1_le_t2)
t1_eq_t2 = self.t1 == self.t2
assert np.all(
t1_eq_t2
== np.array(
[False, False, False, False, False, True, False, False, False, False]
)
)
t1_ne_t2 = self.t1 != self.t2
assert np.all(t1_ne_t2 != t1_eq_t2)
t1_0_gt_t2_0 = self.t1[0] > self.t2[0]
assert t1_0_gt_t2_0 is True
t1_0_gt_t2 = self.t1[0] > self.t2
assert np.all(
t1_0_gt_t2
== np.array(
[True, True, True, True, True, False, False, False, False, False]
)
)
t1_gt_t2_0 = self.t1 > self.t2[0]
assert np.all(
t1_gt_t2_0
== np.array([True, True, True, True, True, True, True, True, True, True])
)
def test_time_boolean(self):
t1_0_gt_t2_0 = self.t1[0] > self.t2[0]
assert t1_0_gt_t2_0 is True
def test_timedelta(self):
dt = self.t2 - self.t1
with pytest.raises(TypeError):
self.t1 > dt
dt_gt_td0 = dt > TimeDelta(0.0, format="sec")
assert np.all(
dt_gt_td0
== np.array(
[False, False, False, False, False, False, True, True, True, True]
)
)
@pytest.mark.parametrize("swap", [True, False])
@pytest.mark.parametrize("time_delta", [True, False])
def test_isclose_time(swap, time_delta):
"""Test functionality of Time.isclose() method.
Run every test with 2 args in original order and swapped, and using
Quantity or TimeDelta for atol (when provided)."""
def isclose_swap(t1, t2, **kwargs):
if swap:
t1, t2 = t2, t1
if "atol" in kwargs and time_delta:
kwargs["atol"] = TimeDelta(kwargs["atol"])
return t1.isclose(t2, **kwargs)
# Start with original demonstration from #8742. In this issue both t2 == t1
# and t3 == t1 give False, but this may change with a newer ERFA.
t1 = Time("2018-07-24T10:41:56.807015240")
t2 = t1 + 0.0 * u.s
t3 = t1 + TimeDelta(0.0 * u.s)
assert isclose_swap(t1, t2)
assert isclose_swap(t1, t3)
t2 = t1 + 1 * u.s
assert isclose_swap(t1, t2, atol=1.5 / 86400 * u.day) # Test different unit
assert not isclose_swap(t1, t2, atol=0.5 / 86400 * u.day)
t2 = t1 + [-1, 0, 2] * u.s
assert np.all(isclose_swap(t1, t2, atol=1.5 * u.s) == [True, True, False])
t2 = t1 + 3 * np.finfo(float).eps * u.day
assert not isclose_swap(t1, t2)
def test_isclose_time_exceptions():
t1 = Time("2020:001")
t2 = t1 + 1 * u.s
match = "'other' argument must support subtraction with Time"
with pytest.raises(TypeError, match=match):
t1.isclose(1.5)
match = (
"'atol' argument must be a Quantity or TimeDelta instance, got float instead"
)
with pytest.raises(TypeError, match=match):
t1.isclose(t2, 1.5)
@pytest.mark.parametrize("swap", [True, False])
@pytest.mark.parametrize("time_delta", [True, False])
@pytest.mark.parametrize("other_quantity", [True, False])
def test_isclose_timedelta(swap, time_delta, other_quantity):
"""Test functionality of TimeDelta.isclose() method.
Run every test with 2 args in original order and swapped, and using
Quantity or TimeDelta for atol (when provided), and using Quantity or
TimeDelta for the other argument."""
def isclose_swap(t1, t2, **kwargs):
if swap:
t1, t2 = t2, t1
if "atol" in kwargs and time_delta:
kwargs["atol"] = TimeDelta(kwargs["atol"])
return t1.isclose(t2, **kwargs)
def isclose_other_quantity(t1, t2, **kwargs):
if other_quantity:
t2 = t2.to(u.day)
if "atol" in kwargs and time_delta:
kwargs["atol"] = TimeDelta(kwargs["atol"])
return t1.isclose(t2, **kwargs)
t1 = TimeDelta(1.0 * u.s)
t2 = t1 + 0.0 * u.s
t3 = t1 + TimeDelta(0.0 * u.s)
assert isclose_swap(t1, t2)
assert isclose_swap(t1, t3)
assert isclose_other_quantity(t1, t2)
assert isclose_other_quantity(t1, t3)
t2 = t1 + 1 * u.s
assert isclose_swap(t1, t2, atol=1.5 / 86400 * u.day)
assert not isclose_swap(t1, t2, atol=0.5 / 86400 * u.day)
assert isclose_other_quantity(t1, t2, atol=1.5 / 86400 * u.day)
assert not isclose_other_quantity(t1, t2, atol=0.5 / 86400 * u.day)
t1 = TimeDelta(0 * u.s)
t2 = t1 + [-1, 0, 2] * u.s
assert np.all(isclose_swap(t1, t2, atol=1.5 * u.s) == [True, True, False])
assert np.all(isclose_other_quantity(t1, t2, atol=1.5 * u.s) == [True, True, False])
# Check with rtol
# 1 * 0.6 + 0.5 = 1.1 --> 1 <= 1.1 --> True
# 0 * 0.6 + 0.5 = 0.5 --> 0 <= 0.5 --> True
# 2 * 0.6 + 0.5 = 1.7 --> 2 <= 1.7 --> False
assert np.all(t1.isclose(t2, atol=0.5 * u.s, rtol=0.6) == [True, True, False])
t2 = t1 + 2 * np.finfo(float).eps * u.day
assert not isclose_swap(t1, t2)
assert not isclose_other_quantity(t1, t2)
def test_isclose_timedelta_exceptions():
t1 = TimeDelta(1 * u.s)
t2 = t1 + 1 * u.s
match = "other' argument must support conversion to days"
with pytest.raises(TypeError, match=match):
t1.isclose(1.5)
match = (
"'atol' argument must be a Quantity or TimeDelta instance, got float instead"
)
with pytest.raises(TypeError, match=match):
t1.isclose(t2, 1.5)
|
71b3927ba37b5af1430198d7c702bf5727dc934d81f1fec3475152579c9f67f7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import itertools
import warnings
import numpy as np
import pytest
import astropy.units as u
from astropy.time import Time
from astropy.time.utils import day_frac
from astropy.units.quantity_helper.function_helpers import ARRAY_FUNCTION_ENABLED
from astropy.utils import iers
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
def assert_time_all_equal(t1, t2):
"""Checks equality of shape and content."""
assert t1.shape == t2.shape
assert np.all(t1 == t2)
class ShapeSetup:
def setup_class(cls):
mjd = np.arange(50000, 50010)
frac = np.arange(0.0, 0.999, 0.2)
frac_masked = np.ma.array(frac)
frac_masked[1] = np.ma.masked
cls.t0 = {
"not_masked": Time(mjd[:, np.newaxis] + frac, format="mjd", scale="utc"),
"masked": Time(mjd[:, np.newaxis] + frac_masked, format="mjd", scale="utc"),
}
cls.t1 = {
"not_masked": Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=("45d", "50d"),
),
"masked": Time(
mjd[:, np.newaxis] + frac_masked,
format="mjd",
scale="utc",
location=("45d", "50d"),
),
}
cls.t2 = {
"not_masked": Time(
mjd[:, np.newaxis] + frac,
format="mjd",
scale="utc",
location=(np.arange(len(frac)), np.arange(len(frac))),
),
"masked": Time(
mjd[:, np.newaxis] + frac_masked,
format="mjd",
scale="utc",
location=(np.arange(len(frac_masked)), np.arange(len(frac_masked))),
),
}
def create_data(self, use_mask):
self.t0 = self.__class__.t0[use_mask]
self.t1 = self.__class__.t1[use_mask]
self.t2 = self.__class__.t2[use_mask]
@pytest.mark.parametrize("use_mask", ("masked", "not_masked"))
class TestManipulation(ShapeSetup):
"""Manipulation of Time objects, ensuring attributes are done correctly."""
def test_ravel(self, use_mask):
self.create_data(use_mask)
t0_ravel = self.t0.ravel()
assert t0_ravel.shape == (self.t0.size,)
assert np.all(t0_ravel.jd1 == self.t0.jd1.ravel())
assert np.may_share_memory(t0_ravel.jd1, self.t0.jd1)
assert t0_ravel.location is None
t1_ravel = self.t1.ravel()
assert t1_ravel.shape == (self.t1.size,)
assert np.all(t1_ravel.jd1 == self.t1.jd1.ravel())
assert np.may_share_memory(t1_ravel.jd1, self.t1.jd1)
assert t1_ravel.location is self.t1.location
t2_ravel = self.t2.ravel()
assert t2_ravel.shape == (self.t2.size,)
assert np.all(t2_ravel.jd1 == self.t2.jd1.ravel())
assert np.may_share_memory(t2_ravel.jd1, self.t2.jd1)
assert t2_ravel.location.shape == t2_ravel.shape
# Broadcasting and ravelling cannot be done without a copy.
assert not np.may_share_memory(t2_ravel.location, self.t2.location)
def test_flatten(self, use_mask):
self.create_data(use_mask)
t0_flatten = self.t0.flatten()
assert t0_flatten.shape == (self.t0.size,)
assert t0_flatten.location is None
# Flatten always makes a copy.
assert not np.may_share_memory(t0_flatten.jd1, self.t0.jd1)
t1_flatten = self.t1.flatten()
assert t1_flatten.shape == (self.t1.size,)
assert not np.may_share_memory(t1_flatten.jd1, self.t1.jd1)
assert t1_flatten.location is not self.t1.location
assert t1_flatten.location == self.t1.location
t2_flatten = self.t2.flatten()
assert t2_flatten.shape == (self.t2.size,)
assert not np.may_share_memory(t2_flatten.jd1, self.t2.jd1)
assert t2_flatten.location.shape == t2_flatten.shape
assert not np.may_share_memory(t2_flatten.location, self.t2.location)
def test_transpose(self, use_mask):
self.create_data(use_mask)
t0_transpose = self.t0.transpose()
assert t0_transpose.shape == (5, 10)
assert np.all(t0_transpose.jd1 == self.t0.jd1.transpose())
assert np.may_share_memory(t0_transpose.jd1, self.t0.jd1)
assert t0_transpose.location is None
t1_transpose = self.t1.transpose()
assert t1_transpose.shape == (5, 10)
assert np.all(t1_transpose.jd1 == self.t1.jd1.transpose())
assert np.may_share_memory(t1_transpose.jd1, self.t1.jd1)
assert t1_transpose.location is self.t1.location
t2_transpose = self.t2.transpose()
assert t2_transpose.shape == (5, 10)
assert np.all(t2_transpose.jd1 == self.t2.jd1.transpose())
assert np.may_share_memory(t2_transpose.jd1, self.t2.jd1)
assert t2_transpose.location.shape == t2_transpose.shape
assert np.may_share_memory(t2_transpose.location, self.t2.location)
# Only one check on T, since it just calls transpose anyway.
t2_T = self.t2.T
assert t2_T.shape == (5, 10)
assert np.all(t2_T.jd1 == self.t2.jd1.T)
assert np.may_share_memory(t2_T.jd1, self.t2.jd1)
assert t2_T.location.shape == t2_T.location.shape
assert np.may_share_memory(t2_T.location, self.t2.location)
def test_diagonal(self, use_mask):
self.create_data(use_mask)
t0_diagonal = self.t0.diagonal()
assert t0_diagonal.shape == (5,)
assert np.all(t0_diagonal.jd1 == self.t0.jd1.diagonal())
assert t0_diagonal.location is None
assert np.may_share_memory(t0_diagonal.jd1, self.t0.jd1)
t1_diagonal = self.t1.diagonal()
assert t1_diagonal.shape == (5,)
assert np.all(t1_diagonal.jd1 == self.t1.jd1.diagonal())
assert t1_diagonal.location is self.t1.location
assert np.may_share_memory(t1_diagonal.jd1, self.t1.jd1)
t2_diagonal = self.t2.diagonal()
assert t2_diagonal.shape == (5,)
assert np.all(t2_diagonal.jd1 == self.t2.jd1.diagonal())
assert t2_diagonal.location.shape == t2_diagonal.shape
assert np.may_share_memory(t2_diagonal.jd1, self.t2.jd1)
assert np.may_share_memory(t2_diagonal.location, self.t2.location)
def test_swapaxes(self, use_mask):
self.create_data(use_mask)
t0_swapaxes = self.t0.swapaxes(0, 1)
assert t0_swapaxes.shape == (5, 10)
assert np.all(t0_swapaxes.jd1 == self.t0.jd1.swapaxes(0, 1))
assert np.may_share_memory(t0_swapaxes.jd1, self.t0.jd1)
assert t0_swapaxes.location is None
t1_swapaxes = self.t1.swapaxes(0, 1)
assert t1_swapaxes.shape == (5, 10)
assert np.all(t1_swapaxes.jd1 == self.t1.jd1.swapaxes(0, 1))
assert np.may_share_memory(t1_swapaxes.jd1, self.t1.jd1)
assert t1_swapaxes.location is self.t1.location
t2_swapaxes = self.t2.swapaxes(0, 1)
assert t2_swapaxes.shape == (5, 10)
assert np.all(t2_swapaxes.jd1 == self.t2.jd1.swapaxes(0, 1))
assert np.may_share_memory(t2_swapaxes.jd1, self.t2.jd1)
assert t2_swapaxes.location.shape == t2_swapaxes.shape
assert np.may_share_memory(t2_swapaxes.location, self.t2.location)
def test_reshape(self, use_mask):
self.create_data(use_mask)
t0_reshape = self.t0.reshape(5, 2, 5)
assert t0_reshape.shape == (5, 2, 5)
assert np.all(t0_reshape.jd1 == self.t0._time.jd1.reshape(5, 2, 5))
assert np.all(t0_reshape.jd2 == self.t0._time.jd2.reshape(5, 2, 5))
assert np.may_share_memory(t0_reshape.jd1, self.t0.jd1)
assert np.may_share_memory(t0_reshape.jd2, self.t0.jd2)
assert t0_reshape.location is None
t1_reshape = self.t1.reshape(2, 5, 5)
assert t1_reshape.shape == (2, 5, 5)
assert np.all(t1_reshape.jd1 == self.t1.jd1.reshape(2, 5, 5))
assert np.may_share_memory(t1_reshape.jd1, self.t1.jd1)
assert t1_reshape.location is self.t1.location
# For reshape(5, 2, 5), the location array can remain the same.
t2_reshape = self.t2.reshape(5, 2, 5)
assert t2_reshape.shape == (5, 2, 5)
assert np.all(t2_reshape.jd1 == self.t2.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t2_reshape.jd1, self.t2.jd1)
assert t2_reshape.location.shape == t2_reshape.shape
assert np.may_share_memory(t2_reshape.location, self.t2.location)
# But for reshape(5, 5, 2), location has to be broadcast and copied.
t2_reshape2 = self.t2.reshape(5, 5, 2)
assert t2_reshape2.shape == (5, 5, 2)
assert np.all(t2_reshape2.jd1 == self.t2.jd1.reshape(5, 5, 2))
assert np.may_share_memory(t2_reshape2.jd1, self.t2.jd1)
assert t2_reshape2.location.shape == t2_reshape2.shape
assert not np.may_share_memory(t2_reshape2.location, self.t2.location)
t2_reshape_t = self.t2.reshape(10, 5).T
assert t2_reshape_t.shape == (5, 10)
assert np.may_share_memory(t2_reshape_t.jd1, self.t2.jd1)
assert t2_reshape_t.location.shape == t2_reshape_t.shape
assert np.may_share_memory(t2_reshape_t.location, self.t2.location)
# Finally, reshape in a way that cannot be a view.
t2_reshape_t_reshape = t2_reshape_t.reshape(10, 5)
assert t2_reshape_t_reshape.shape == (10, 5)
assert not np.may_share_memory(t2_reshape_t_reshape.jd1, self.t2.jd1)
assert t2_reshape_t_reshape.location.shape == t2_reshape_t_reshape.shape
assert not np.may_share_memory(
t2_reshape_t_reshape.location, t2_reshape_t.location
)
def test_squeeze(self, use_mask):
self.create_data(use_mask)
t0_squeeze = self.t0.reshape(5, 1, 2, 1, 5).squeeze()
assert t0_squeeze.shape == (5, 2, 5)
assert np.all(t0_squeeze.jd1 == self.t0.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t0_squeeze.jd1, self.t0.jd1)
assert t0_squeeze.location is None
t1_squeeze = self.t1.reshape(1, 5, 1, 2, 5).squeeze()
assert t1_squeeze.shape == (5, 2, 5)
assert np.all(t1_squeeze.jd1 == self.t1.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t1_squeeze.jd1, self.t1.jd1)
assert t1_squeeze.location is self.t1.location
t2_squeeze = self.t2.reshape(1, 1, 5, 2, 5, 1, 1).squeeze()
assert t2_squeeze.shape == (5, 2, 5)
assert np.all(t2_squeeze.jd1 == self.t2.jd1.reshape(5, 2, 5))
assert np.may_share_memory(t2_squeeze.jd1, self.t2.jd1)
assert t2_squeeze.location.shape == t2_squeeze.shape
assert np.may_share_memory(t2_squeeze.location, self.t2.location)
def test_add_dimension(self, use_mask):
self.create_data(use_mask)
t0_adddim = self.t0[:, np.newaxis, :]
assert t0_adddim.shape == (10, 1, 5)
assert np.all(t0_adddim.jd1 == self.t0.jd1[:, np.newaxis, :])
assert np.may_share_memory(t0_adddim.jd1, self.t0.jd1)
assert t0_adddim.location is None
t1_adddim = self.t1[:, :, np.newaxis]
assert t1_adddim.shape == (10, 5, 1)
assert np.all(t1_adddim.jd1 == self.t1.jd1[:, :, np.newaxis])
assert np.may_share_memory(t1_adddim.jd1, self.t1.jd1)
assert t1_adddim.location is self.t1.location
t2_adddim = self.t2[:, :, np.newaxis]
assert t2_adddim.shape == (10, 5, 1)
assert np.all(t2_adddim.jd1 == self.t2.jd1[:, :, np.newaxis])
assert np.may_share_memory(t2_adddim.jd1, self.t2.jd1)
assert t2_adddim.location.shape == t2_adddim.shape
assert np.may_share_memory(t2_adddim.location, self.t2.location)
def test_take(self, use_mask):
self.create_data(use_mask)
t0_take = self.t0.take((5, 2))
assert t0_take.shape == (2,)
assert np.all(t0_take.jd1 == self.t0._time.jd1.take((5, 2)))
assert t0_take.location is None
t1_take = self.t1.take((2, 4), axis=1)
assert t1_take.shape == (10, 2)
assert np.all(t1_take.jd1 == self.t1.jd1.take((2, 4), axis=1))
assert t1_take.location is self.t1.location
t2_take = self.t2.take((1, 3, 7), axis=0)
assert t2_take.shape == (3, 5)
assert np.all(t2_take.jd1 == self.t2.jd1.take((1, 3, 7), axis=0))
assert t2_take.location.shape == t2_take.shape
t2_take2 = self.t2.take((5, 15))
assert t2_take2.shape == (2,)
assert np.all(t2_take2.jd1 == self.t2.jd1.take((5, 15)))
assert t2_take2.location.shape == t2_take2.shape
def test_broadcast_via_apply(self, use_mask):
"""Test using a callable method."""
self.create_data(use_mask)
t0_broadcast = self.t0._apply(np.broadcast_to, shape=(3, 10, 5))
assert t0_broadcast.shape == (3, 10, 5)
assert np.all(t0_broadcast.jd1 == self.t0.jd1)
assert np.may_share_memory(t0_broadcast.jd1, self.t0.jd1)
assert t0_broadcast.location is None
t1_broadcast = self.t1._apply(np.broadcast_to, shape=(3, 10, 5))
assert t1_broadcast.shape == (3, 10, 5)
assert np.all(t1_broadcast.jd1 == self.t1.jd1)
assert np.may_share_memory(t1_broadcast.jd1, self.t1.jd1)
assert t1_broadcast.location is self.t1.location
t2_broadcast = self.t2._apply(np.broadcast_to, shape=(3, 10, 5))
assert t2_broadcast.shape == (3, 10, 5)
assert np.all(t2_broadcast.jd1 == self.t2.jd1)
assert np.may_share_memory(t2_broadcast.jd1, self.t2.jd1)
assert t2_broadcast.location.shape == t2_broadcast.shape
assert np.may_share_memory(t2_broadcast.location, self.t2.location)
@pytest.mark.parametrize("use_mask", ("masked", "not_masked"))
class TestSetShape(ShapeSetup):
def test_shape_setting(self, use_mask):
# Shape-setting should be on the object itself, since copying removes
# zero-strides due to broadcasting. Hence, this should be the only
# test in this class.
self.create_data(use_mask)
t0_reshape = self.t0.copy()
mjd = t0_reshape.mjd # Creates a cache of the mjd attribute
t0_reshape.shape = (5, 2, 5)
assert t0_reshape.shape == (5, 2, 5)
assert mjd.shape != t0_reshape.mjd.shape # Cache got cleared
assert np.all(t0_reshape.jd1 == self.t0._time.jd1.reshape(5, 2, 5))
assert np.all(t0_reshape.jd2 == self.t0._time.jd2.reshape(5, 2, 5))
assert t0_reshape.location is None
# But if the shape doesn't work, one should get an error.
t0_reshape_t = t0_reshape.T
with pytest.raises(ValueError):
t0_reshape_t.shape = (12,) # Wrong number of elements.
with pytest.raises(AttributeError):
t0_reshape_t.shape = (10, 5) # Cannot be done without copy.
# check no shape was changed.
assert t0_reshape_t.shape == t0_reshape.T.shape
assert t0_reshape_t.jd1.shape == t0_reshape.T.shape
assert t0_reshape_t.jd2.shape == t0_reshape.T.shape
t1_reshape = self.t1.copy()
t1_reshape.shape = (2, 5, 5)
assert t1_reshape.shape == (2, 5, 5)
assert np.all(t1_reshape.jd1 == self.t1.jd1.reshape(2, 5, 5))
# location is a single element, so its shape should not change.
assert t1_reshape.location.shape == ()
# For reshape(5, 2, 5), the location array can remain the same.
# Note that we need to work directly on self.t2 here, since any
# copy would cause location to have the full shape.
self.t2.shape = (5, 2, 5)
assert self.t2.shape == (5, 2, 5)
assert self.t2.jd1.shape == (5, 2, 5)
assert self.t2.jd2.shape == (5, 2, 5)
assert self.t2.location.shape == (5, 2, 5)
assert self.t2.location.strides == (0, 0, 24)
# But for reshape(50), location would need to be copied, so this
# should fail.
oldshape = self.t2.shape
with pytest.raises(AttributeError):
self.t2.shape = (50,)
# check no shape was changed.
assert self.t2.jd1.shape == oldshape
assert self.t2.jd2.shape == oldshape
assert self.t2.location.shape == oldshape
@pytest.mark.parametrize("use_mask", ("masked", "not_masked"))
class TestShapeFunctions(ShapeSetup):
@needs_array_function
def test_broadcast(self, use_mask):
"""Test as supported numpy function."""
self.create_data(use_mask)
t0_broadcast = np.broadcast_to(self.t0, shape=(3, 10, 5))
assert t0_broadcast.shape == (3, 10, 5)
assert np.all(t0_broadcast.jd1 == self.t0.jd1)
assert np.may_share_memory(t0_broadcast.jd1, self.t0.jd1)
assert t0_broadcast.location is None
t1_broadcast = np.broadcast_to(self.t1, shape=(3, 10, 5))
assert t1_broadcast.shape == (3, 10, 5)
assert np.all(t1_broadcast.jd1 == self.t1.jd1)
assert np.may_share_memory(t1_broadcast.jd1, self.t1.jd1)
assert t1_broadcast.location is self.t1.location
t2_broadcast = np.broadcast_to(self.t2, shape=(3, 10, 5))
assert t2_broadcast.shape == (3, 10, 5)
assert np.all(t2_broadcast.jd1 == self.t2.jd1)
assert np.may_share_memory(t2_broadcast.jd1, self.t2.jd1)
assert t2_broadcast.location.shape == t2_broadcast.shape
assert np.may_share_memory(t2_broadcast.location, self.t2.location)
@needs_array_function
def test_atleast_1d(self, use_mask):
self.create_data(use_mask)
t00 = self.t0.ravel()[0]
assert t00.ndim == 0
t00_1d = np.atleast_1d(t00)
assert t00_1d.ndim == 1
assert_time_all_equal(t00[np.newaxis], t00_1d)
# Actual jd1 will not share memory, as cast to scalar.
assert np.may_share_memory(t00_1d._time.jd1, t00._time.jd1)
@needs_array_function
def test_atleast_2d(self, use_mask):
self.create_data(use_mask)
t0r = self.t0.ravel()
assert t0r.ndim == 1
t0r_2d = np.atleast_2d(t0r)
assert t0r_2d.ndim == 2
assert_time_all_equal(t0r[np.newaxis], t0r_2d)
assert np.may_share_memory(t0r_2d.jd1, t0r.jd1)
@needs_array_function
def test_atleast_3d(self, use_mask):
self.create_data(use_mask)
assert self.t0.ndim == 2
t0_3d, t1_3d = np.atleast_3d(self.t0, self.t1)
assert t0_3d.ndim == t1_3d.ndim == 3
assert_time_all_equal(self.t0[:, :, np.newaxis], t0_3d)
assert_time_all_equal(self.t1[:, :, np.newaxis], t1_3d)
assert np.may_share_memory(t0_3d.jd2, self.t0.jd2)
def test_move_axis(self, use_mask):
# Goes via transpose so works without __array_function__ as well.
self.create_data(use_mask)
t0_10 = np.moveaxis(self.t0, 0, 1)
assert t0_10.shape == (self.t0.shape[1], self.t0.shape[0])
assert_time_all_equal(self.t0.T, t0_10)
assert np.may_share_memory(t0_10.jd1, self.t0.jd1)
def test_roll_axis(self, use_mask):
# Goes via transpose so works without __array_function__ as well.
self.create_data(use_mask)
t0_10 = np.rollaxis(self.t0, 1)
assert t0_10.shape == (self.t0.shape[1], self.t0.shape[0])
assert_time_all_equal(self.t0.T, t0_10)
assert np.may_share_memory(t0_10.jd1, self.t0.jd1)
@needs_array_function
def test_fliplr(self, use_mask):
self.create_data(use_mask)
t0_lr = np.fliplr(self.t0)
assert_time_all_equal(self.t0[:, ::-1], t0_lr)
assert np.may_share_memory(t0_lr.jd2, self.t0.jd2)
@needs_array_function
def test_rot90(self, use_mask):
self.create_data(use_mask)
t0_270 = np.rot90(self.t0, 3)
assert_time_all_equal(self.t0.T[:, ::-1], t0_270)
assert np.may_share_memory(t0_270.jd2, self.t0.jd2)
@needs_array_function
def test_roll(self, use_mask):
self.create_data(use_mask)
t0r = np.roll(self.t0, 1, axis=0)
assert_time_all_equal(t0r[1:], self.t0[:-1])
assert_time_all_equal(t0r[0], self.t0[-1])
@needs_array_function
def test_delete(self, use_mask):
self.create_data(use_mask)
t0d = np.delete(self.t0, [2, 3], axis=0)
assert_time_all_equal(t0d[:2], self.t0[:2])
assert_time_all_equal(t0d[2:], self.t0[4:])
@pytest.mark.parametrize("use_mask", ("masked", "not_masked"))
class TestArithmetic:
"""Arithmetic on Time objects, using both doubles."""
kwargs = ({}, {"axis": None}, {"axis": 0}, {"axis": 1}, {"axis": 2})
functions = ("min", "max", "sort")
def setup_class(cls):
mjd = np.arange(50000, 50100, 10).reshape(2, 5, 1)
frac = np.array([0.1, 0.1 + 1.0e-15, 0.1 - 1.0e-15, 0.9 + 2.0e-16, 0.9])
frac_masked = np.ma.array(frac)
frac_masked[1] = np.ma.masked
cls.t0 = {
"not_masked": Time(mjd, frac, format="mjd", scale="utc"),
"masked": Time(mjd, frac_masked, format="mjd", scale="utc"),
}
# Define arrays with same ordinal properties
frac = np.array([1, 2, 0, 4, 3])
frac_masked = np.ma.array(frac)
frac_masked[1] = np.ma.masked
cls.t1 = {
"not_masked": Time(mjd + frac, format="mjd", scale="utc"),
"masked": Time(mjd + frac_masked, format="mjd", scale="utc"),
}
cls.jd = {"not_masked": mjd + frac, "masked": mjd + frac_masked}
cls.t2 = {
"not_masked": Time(
mjd + frac,
format="mjd",
scale="utc",
location=(np.arange(len(frac)), np.arange(len(frac))),
),
"masked": Time(
mjd + frac_masked,
format="mjd",
scale="utc",
location=(np.arange(len(frac_masked)), np.arange(len(frac_masked))),
),
}
def create_data(self, use_mask):
self.t0 = self.__class__.t0[use_mask]
self.t1 = self.__class__.t1[use_mask]
self.t2 = self.__class__.t2[use_mask]
self.jd = self.__class__.jd[use_mask]
@pytest.mark.parametrize("kw, func", itertools.product(kwargs, functions))
def test_argfuncs(self, kw, func, use_mask):
"""
Test that ``np.argfunc(jd, **kw)`` is the same as ``t0.argfunc(**kw)``
where ``jd`` is a similarly shaped array with the same ordinal properties
but all integer values. Also test the same for t1 which has the same
integral values as jd.
"""
self.create_data(use_mask)
t0v = getattr(self.t0, "arg" + func)(**kw)
t1v = getattr(self.t1, "arg" + func)(**kw)
jdv = getattr(np, "arg" + func)(self.jd, **kw)
if self.t0.masked and kw == {"axis": None} and func == "sort":
t0v = np.ma.array(t0v, mask=self.t0.mask.reshape(t0v.shape)[t0v])
t1v = np.ma.array(t1v, mask=self.t1.mask.reshape(t1v.shape)[t1v])
jdv = np.ma.array(jdv, mask=self.jd.mask.reshape(jdv.shape)[jdv])
assert np.all(t0v == jdv)
assert np.all(t1v == jdv)
assert t0v.shape == jdv.shape
assert t1v.shape == jdv.shape
@pytest.mark.parametrize("kw, func", itertools.product(kwargs, functions))
def test_funcs(self, kw, func, use_mask):
"""
Test that ``np.func(jd, **kw)`` is the same as ``t1.func(**kw)`` where
``jd`` is a similarly shaped array and the same integral values.
"""
self.create_data(use_mask)
t1v = getattr(self.t1, func)(**kw)
jdv = getattr(np, func)(self.jd, **kw)
assert np.all(t1v.value == jdv)
assert t1v.shape == jdv.shape
def test_argmin(self, use_mask):
self.create_data(use_mask)
assert self.t0.argmin() == 2
assert np.all(self.t0.argmin(axis=0) == 0)
assert np.all(self.t0.argmin(axis=1) == 0)
assert np.all(self.t0.argmin(axis=2) == 2)
def test_argmax(self, use_mask):
self.create_data(use_mask)
assert self.t0.argmax() == self.t0.size - 2
if use_mask == "masked":
# The 0 is where all entries are masked in that axis
assert np.all(self.t0.argmax(axis=0) == [1, 0, 1, 1, 1])
assert np.all(self.t0.argmax(axis=1) == [4, 0, 4, 4, 4])
else:
assert np.all(self.t0.argmax(axis=0) == 1)
assert np.all(self.t0.argmax(axis=1) == 4)
assert np.all(self.t0.argmax(axis=2) == 3)
def test_argsort(self, use_mask):
self.create_data(use_mask)
order = [2, 0, 4, 3, 1] if use_mask == "masked" else [2, 0, 1, 4, 3]
assert np.all(self.t0.argsort() == np.array(order))
assert np.all(self.t0.argsort(axis=0) == np.arange(2).reshape(2, 1, 1))
assert np.all(self.t0.argsort(axis=1) == np.arange(5).reshape(5, 1))
assert np.all(self.t0.argsort(axis=2) == np.array(order))
ravel = np.arange(50).reshape(-1, 5)[:, order].ravel()
if use_mask == "masked":
t0v = self.t0.argsort(axis=None)
# Manually remove elements in ravel that correspond to masked
# entries in self.t0. This removes the 10 entries that are masked
# which show up at the end of the list.
mask = self.t0.mask.ravel()[ravel]
ravel = ravel[~mask]
assert np.all(t0v[:-10] == ravel)
else:
assert np.all(self.t0.argsort(axis=None) == ravel)
@pytest.mark.parametrize("scale", Time.SCALES)
def test_argsort_warning(self, use_mask, scale):
self.create_data(use_mask)
if scale == "utc":
pytest.xfail()
with warnings.catch_warnings(record=True) as wlist:
Time([1, 2, 3], format="jd", scale=scale).argsort()
assert len(wlist) == 0
def test_min(self, use_mask):
self.create_data(use_mask)
assert self.t0.min() == self.t0[0, 0, 2]
assert np.all(self.t0.min(0) == self.t0[0])
assert np.all(self.t0.min(1) == self.t0[:, 0])
assert np.all(self.t0.min(2) == self.t0[:, :, 2])
assert self.t0.min(0).shape == (5, 5)
assert self.t0.min(0, keepdims=True).shape == (1, 5, 5)
assert self.t0.min(1).shape == (2, 5)
assert self.t0.min(1, keepdims=True).shape == (2, 1, 5)
assert self.t0.min(2).shape == (2, 5)
assert self.t0.min(2, keepdims=True).shape == (2, 5, 1)
def test_max(self, use_mask):
self.create_data(use_mask)
assert self.t0.max() == self.t0[-1, -1, -2]
assert np.all(self.t0.max(0) == self.t0[1])
assert np.all(self.t0.max(1) == self.t0[:, 4])
assert np.all(self.t0.max(2) == self.t0[:, :, 3])
assert self.t0.max(0).shape == (5, 5)
assert self.t0.max(0, keepdims=True).shape == (1, 5, 5)
def test_ptp(self, use_mask):
self.create_data(use_mask)
assert self.t0.ptp() == self.t0.max() - self.t0.min()
assert np.all(self.t0.ptp(0) == self.t0.max(0) - self.t0.min(0))
assert self.t0.ptp(0).shape == (5, 5)
assert self.t0.ptp(0, keepdims=True).shape == (1, 5, 5)
def test_sort(self, use_mask):
self.create_data(use_mask)
order = [2, 0, 4, 3, 1] if use_mask == "masked" else [2, 0, 1, 4, 3]
assert np.all(self.t0.sort() == self.t0[:, :, order])
assert np.all(self.t0.sort(0) == self.t0)
assert np.all(self.t0.sort(1) == self.t0)
assert np.all(self.t0.sort(2) == self.t0[:, :, order])
if use_mask == "not_masked":
assert np.all(self.t0.sort(None) == self.t0[:, :, order].ravel())
# Bit superfluous, but good to check.
assert np.all(self.t0.sort(-1)[:, :, 0] == self.t0.min(-1))
assert np.all(self.t0.sort(-1)[:, :, -1] == self.t0.max(-1))
@pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 1)])
@pytest.mark.parametrize(
"where", [True, np.array([True, False, True, True, False])[..., np.newaxis]]
)
@pytest.mark.parametrize("keepdims", [False, True])
def test_mean(self, use_mask, axis, where, keepdims):
self.create_data(use_mask)
kwargs = dict(axis=axis, where=where, keepdims=keepdims)
def is_consistent(time):
where_expected = where & ~time.mask
where_expected = np.broadcast_to(where_expected, time.shape)
kw = kwargs.copy()
kw["where"] = where_expected
divisor = where_expected.sum(axis=axis, keepdims=keepdims)
if np.any(divisor == 0):
with pytest.raises(ValueError):
time.mean(**kwargs)
else:
time_mean = time.mean(**kwargs)
time_expected = Time(
*day_frac(
val1=np.ma.getdata(time.tai.jd1).sum(**kw),
val2=np.ma.getdata(time.tai.jd2).sum(**kw),
divisor=divisor,
),
format="jd",
scale="tai",
)
time_expected._set_scale(time.scale)
assert np.all(time_mean == time_expected)
is_consistent(self.t0)
is_consistent(self.t1)
axes_location_not_constant = [None, 2]
if axis in axes_location_not_constant:
with pytest.raises(ValueError):
self.t2.mean(**kwargs)
else:
is_consistent(self.t2)
def test_mean_precision(self, use_mask):
scale = "tai"
epsilon = 1 * u.ns
t0 = Time("2021-07-27T00:00:00", scale=scale)
t1 = Time("2022-07-27T00:00:00", scale=scale)
t2 = Time("2023-07-27T00:00:00", scale=scale)
t = Time([t0, t2 + epsilon])
if use_mask == "masked":
t[0] = np.ma.masked
assert t.mean() == (t2 + epsilon)
else:
assert t.mean() == (t1 + epsilon / 2)
def test_mean_dtype(self, use_mask):
self.create_data(use_mask)
with pytest.raises(ValueError):
self.t0.mean(dtype=int)
def test_mean_out(self, use_mask):
self.create_data(use_mask)
with pytest.raises(ValueError):
self.t0.mean(out=Time(np.zeros_like(self.t0.jd1), format="jd"))
def test_mean_leap_second(self, use_mask):
# Check that leap second is dealt with correctly: for UTC, across a leap
# second bounday, one cannot just average jd, but has to go through TAI.
if use_mask == "not_masked":
t = Time(["2012-06-30 23:59:60.000", "2012-07-01 00:00:01.000"])
mean_expected = t[0] + (t[1] - t[0]) / 2
mean_expected_explicit = Time("2012-07-01 00:00:00")
mean_test = t.mean()
assert mean_expected == mean_expected_explicit
assert mean_expected == mean_test
assert mean_test != Time(
*day_frac(t.jd1.sum(), t.jd2.sum(), divisor=2), format="jd"
)
def test_regression():
# For #5225, where a time with a single-element delta_ut1_utc could not
# be copied, flattened, or ravelled. (For copy, it is in test_basic.)
with iers.conf.set_temp("auto_download", False):
t = Time(49580.0, scale="tai", format="mjd")
t_ut1 = t.ut1
t_ut1_copy = copy.deepcopy(t_ut1)
assert type(t_ut1_copy.delta_ut1_utc) is np.ndarray
t_ut1_flatten = t_ut1.flatten()
assert type(t_ut1_flatten.delta_ut1_utc) is np.ndarray
t_ut1_ravel = t_ut1.ravel()
assert type(t_ut1_ravel.delta_ut1_utc) is np.ndarray
assert t_ut1_copy.delta_ut1_utc == t_ut1.delta_ut1_utc
|
1bcfb3888e930f5c384d7188db1692e4e2ccf59b2cb4c8381b63e57b2087dd3e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from datetime import date
from itertools import count
import numpy as np
import pytest
from erfa import DJM0
from astropy.time import Time, TimeFormat
from astropy.time.utils import day_frac
class SpecificException(ValueError):
pass
@pytest.fixture
def custom_format_name():
for i in count():
if not i:
custom = "custom_format_name"
else:
custom = f"custom_format_name_{i}"
if custom not in Time.FORMATS:
break
yield custom
Time.FORMATS.pop(custom, None)
def test_custom_time_format_set_jds_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
raise SpecificException
try:
Time(7.0, format=custom_format_name)
except ValueError as e:
assert hasattr(e, "__cause__") and isinstance(e.__cause__, SpecificException)
def test_custom_time_format_val_type_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def _check_val_type(self, val, val2):
raise SpecificException
try:
Time(7.0, format=custom_format_name)
except ValueError as e:
assert hasattr(e, "__cause__") and isinstance(e.__cause__, SpecificException)
def test_custom_time_format_value_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
raise SpecificException
t = Time.now()
with pytest.raises(SpecificException):
getattr(t, custom_format_name)
def test_custom_time_format_fine(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
return self.jd1 + self.jd2
t = Time.now()
getattr(t, custom_format_name)
t2 = Time(7, 9, format=custom_format_name)
getattr(t2, custom_format_name)
def test_custom_time_format_forgot_property(custom_format_name):
with pytest.raises(ValueError):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
def value(self):
return self.jd1, self.jd2
def test_custom_time_format_problematic_name():
assert "sort" not in Time.FORMATS, "problematic name in default FORMATS!"
assert hasattr(Time, "sort")
try:
class Custom(TimeFormat):
name = "sort"
_dtype = np.dtype([("jd1", "f8"), ("jd2", "f8")])
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
result = np.empty(self.jd1.shape, self._dtype)
result["jd1"] = self.jd1
result["jd2"] = self.jd2
return result
t = Time.now()
assert t.sort() == t, "bogus time format clobbers everyone's Time objects"
t.format = "sort"
assert t.value.dtype == Custom._dtype
t2 = Time(7, 9, format="sort")
assert t2.value == np.array((7, 9), Custom._dtype)
finally:
Time.FORMATS.pop("sort", None)
def test_mjd_longdouble_preserves_precision(custom_format_name):
class CustomMJD(TimeFormat):
name = custom_format_name
def _check_val_type(self, val, val2):
val = np.longdouble(val)
if val2 is not None:
raise ValueError("Only one value permitted")
return val, 0
def set_jds(self, val, val2):
mjd1 = np.float64(np.floor(val))
mjd2 = np.float64(val - mjd1)
self.jd1, self.jd2 = day_frac(mjd1 + DJM0, mjd2)
@property
def value(self):
mjd1, mjd2 = day_frac(self.jd1 - DJM0, self.jd2)
return np.longdouble(mjd1) + np.longdouble(mjd2)
m = 58000.0
t = Time(m, format=custom_format_name)
# Pick a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
m2 = np.longdouble(m) + max(
2.0 * m * np.finfo(np.longdouble).eps, np.finfo(float).eps
)
assert m2 != m, "long double is weird!"
t2 = Time(m2, format=custom_format_name)
assert t != t2
assert isinstance(getattr(t, custom_format_name), np.longdouble)
assert getattr(t, custom_format_name) != getattr(t2, custom_format_name)
@pytest.mark.parametrize(
"jd1, jd2",
[
("foo", None),
(np.arange(3), np.arange(4)),
("foo", "bar"),
(1j, 2j),
pytest.param(
np.longdouble(3),
np.longdouble(5),
marks=pytest.mark.skipif(
np.longdouble().itemsize == np.dtype(float).itemsize,
reason="long double == double on this platform",
),
),
({1: 2}, {3: 4}),
({1, 2}, {3, 4}),
([1, 2], [3, 4]),
(lambda: 4, lambda: 7),
(np.arange(3), np.arange(4)),
],
)
def test_custom_format_cannot_make_bogus_jd1(custom_format_name, jd1, jd2):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = jd1, jd2
@property
def value(self):
return self.jd1 + self.jd2
with pytest.raises((ValueError, TypeError)):
Time(5, format=custom_format_name)
def test_custom_format_scalar_jd1_jd2_okay(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = 7.0, 3.0
@property
def value(self):
return self.jd1 + self.jd2
getattr(Time(5, format=custom_format_name), custom_format_name)
@pytest.mark.parametrize(
"thing",
[
1,
1.0,
np.longdouble(1),
1.0j,
"foo",
b"foo",
Time(5, format="mjd"),
lambda: 7,
np.datetime64("2005-02-25"),
date(2006, 2, 25),
],
)
def test_custom_format_can_return_any_scalar(custom_format_name, thing):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = 2.0, 0.0
@property
def value(self):
return np.array(thing)
assert type(
getattr(Time(5, format=custom_format_name), custom_format_name)
) == type(thing)
assert np.all(
getattr(Time(5, format=custom_format_name), custom_format_name) == thing
)
@pytest.mark.parametrize(
"thing",
[
(1, 2),
[1, 2],
np.array([2, 3]),
np.array([2, 3, 5, 7]),
{6: 7},
{1, 2},
],
)
def test_custom_format_can_return_any_iterable(custom_format_name, thing):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = 2.0, 0.0
@property
def value(self):
return thing
assert type(
getattr(Time(5, format=custom_format_name), custom_format_name)
) == type(thing)
assert np.all(
getattr(Time(5, format=custom_format_name), custom_format_name) == thing
)
|
5d9e055339cea191264d2f55aa3e25f0bda4cd36830f8646ccb53ff089fd5a3c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import numpy as np
import pytest
from astropy import units as u
from astropy.table import Table
from astropy.time import Time
from astropy.utils import iers
from astropy.utils.compat import PYTHON_LT_3_11
from astropy.utils.compat.optional_deps import HAS_H5PY
allclose_sec = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52 * 24 * 3600
) # 20 ps atol
is_masked = np.ma.is_masked
# The first form is expanded to r"can't set attribute '{0}'" in Python 3.10, and replaced
# with the more informative second form as of 3.11 (python/cpython#31311).
no_setter_err = (
r"can't set attribute"
if PYTHON_LT_3_11
else r"property '{0}' of '{1}' object has no setter"
)
def test_simple():
t = Time([1, 2, 3], format="cxcsec")
assert t.masked is False
assert np.all(t.mask == [False, False, False])
# Before masking, format output is not a masked array (it is an ndarray
# like always)
assert not isinstance(t.value, np.ma.MaskedArray)
assert not isinstance(t.unix, np.ma.MaskedArray)
t[2] = np.ma.masked
assert t.masked is True
assert np.all(t.mask == [False, False, True])
assert allclose_sec(t.value[:2], [1, 2])
assert is_masked(t.value[2])
assert is_masked(t[2].value)
# After masking format output is a masked array
assert isinstance(t.value, np.ma.MaskedArray)
assert isinstance(t.unix, np.ma.MaskedArray)
# Todo : test all formats
def test_scalar_init():
t = Time("2000:001")
assert t.masked is False
assert t.mask == np.array(False)
def test_mask_not_writeable():
t = Time("2000:001")
with pytest.raises(
AttributeError, match=no_setter_err.format("mask", t.__class__.__name__)
):
t.mask = True
t = Time(["2000:001"])
with pytest.raises(ValueError) as err:
t.mask[0] = True
assert "assignment destination is read-only" in str(err.value)
def test_str():
t = Time(["2000:001", "2000:002"])
t[1] = np.ma.masked
assert str(t) == "['2000:001:00:00:00.000' --]"
assert (
repr(t)
== "<Time object: scale='utc' format='yday' value=['2000:001:00:00:00.000' --]>"
)
expected = [
"masked_array(data=['2000-01-01 00:00:00.000', --],",
" mask=[False, True],",
" fill_value='N/A',",
" dtype='<U23')",
]
# Note that we need to take care to allow for big-endian platforms,
# for which the dtype will be >U23 instead of <U23, which we do with
# the call to replace().
assert repr(t.iso).replace(">U23", "<U23").splitlines() == expected
# Assign value to unmask
t[1] = "2000:111"
assert str(t) == "['2000:001:00:00:00.000' '2000:111:00:00:00.000']"
assert t.masked is False
def test_transform():
with iers.conf.set_temp("auto_download", False):
t = Time(["2000:001", "2000:002"])
t[1] = np.ma.masked
# Change scale (this tests the ERFA machinery with masking as well)
t_ut1 = t.ut1
assert is_masked(t_ut1.value[1])
assert not is_masked(t_ut1.value[0])
assert np.all(t_ut1.mask == [False, True])
# Change format
t_unix = t.unix
assert is_masked(t_unix[1])
assert not is_masked(t_unix[0])
assert np.all(t_unix.mask == [False, True])
def test_masked_input():
v0 = np.ma.MaskedArray([[1, 2], [3, 4]]) # No masked elements
v1 = np.ma.MaskedArray([[1, 2], [3, 4]], mask=[[True, False], [False, False]])
v2 = np.ma.MaskedArray([[10, 20], [30, 40]], mask=[[False, False], [False, True]])
# Init from various combinations of masked arrays
t = Time(v0, format="cxcsec")
assert np.ma.allclose(t.value, v0)
assert np.all(t.mask == [[False, False], [False, False]])
assert t.masked is False
t = Time(v1, format="cxcsec")
assert np.ma.allclose(t.value, v1)
assert np.all(t.mask == v1.mask)
assert np.all(t.value.mask == v1.mask)
assert t.masked is True
t = Time(v1, v2, format="cxcsec")
assert np.ma.allclose(t.value, v1 + v2)
assert np.all(t.mask == (v1 + v2).mask)
assert t.masked is True
t = Time(v0, v1, format="cxcsec")
assert np.ma.allclose(t.value, v0 + v1)
assert np.all(t.mask == (v0 + v1).mask)
assert t.masked is True
t = Time(0, v2, format="cxcsec")
assert np.ma.allclose(t.value, v2)
assert np.all(t.mask == v2.mask)
assert t.masked is True
# Init from a string masked array
t_iso = t.iso
t2 = Time(t_iso)
assert np.all(t2.value == t_iso)
assert np.all(t2.mask == v2.mask)
assert t2.masked is True
def test_all_masked_input():
"""Fix for #9612"""
# Test with jd=0 and jd=np.nan. Both triggered an exception prior to #9624
# due to astropy.utils.exceptions.ErfaError.
for val in (0, np.nan):
t = Time(np.ma.masked_array([val], mask=[True]), format="jd")
assert str(t.iso) == "[--]"
def test_serialize_fits_masked(tmp_path):
tm = Time([1, 2, 3], format="cxcsec")
tm[1] = np.ma.masked
fn = tmp_path / "tempfile.fits"
t = Table([tm])
t.write(fn)
t2 = Table.read(fn, astropy_native=True)
# Time FITS handling does not current round-trip format in FITS
t2["col0"].format = tm.format
assert t2["col0"].masked
assert np.all(t2["col0"].mask == [False, True, False])
assert np.all(t2["col0"].value == t["col0"].value)
@pytest.mark.skipif(not HAS_H5PY, reason="Needs h5py")
def test_serialize_hdf5_masked(tmp_path):
tm = Time([1, 2, 3], format="cxcsec")
tm[1] = np.ma.masked
fn = tmp_path / "tempfile.hdf5"
t = Table([tm])
t.write(fn, path="root", serialize_meta=True)
t2 = Table.read(fn)
assert t2["col0"].masked
assert np.all(t2["col0"].mask == [False, True, False])
assert np.all(t2["col0"].value == t["col0"].value)
# Ignore warning in MIPS https://github.com/astropy/astropy/issues/9750
@pytest.mark.filterwarnings("ignore:invalid value encountered")
@pytest.mark.parametrize("serialize_method", ["jd1_jd2", "formatted_value"])
def test_serialize_ecsv_masked(serialize_method, tmp_path):
tm = Time([1, 2, 3], format="cxcsec")
tm[1] = np.ma.masked
tm.info.serialize_method["ecsv"] = serialize_method
fn = tmp_path / "tempfile.ecsv"
t = Table([tm])
t.write(fn)
t2 = Table.read(fn)
assert t2["col0"].masked
assert np.all(t2["col0"].mask == [False, True, False])
# Serializing formatted_value loses some precision.
atol = 0.1 * u.us if serialize_method == "formatted_value" else 1 * u.ps
assert np.all(abs(t2["col0"] - t["col0"]) <= atol)
|
3d938ab3b8091be0abda3c73b0cb191d9e22955ff59386a3cbe3c4dbe02acd42 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import itertools
import erfa
import numpy as np
import pytest
from astropy import units as u
from astropy.time import Time
from astropy.time.core import SIDEREAL_TIME_MODELS
from astropy.utils import iers
allclose_hours = functools.partial(np.allclose, rtol=1e-15, atol=3e-8)
# 0.1 ms atol; IERS-B files change at that level.
within_1_second = functools.partial(np.allclose, rtol=1.0, atol=1.0 / 3600.0)
within_2_seconds = functools.partial(np.allclose, rtol=1.0, atol=2.0 / 3600.0)
def test_doc_string_contains_models():
"""The doc string is formatted; this ensures this remains working."""
for kind in ("mean", "apparent"):
for model in SIDEREAL_TIME_MODELS[kind]:
assert model in Time.sidereal_time.__doc__
class TestERFATestCases:
"""Test that we reproduce the test cases given in erfa/src/t_erfa_c.c"""
def setup_class(cls):
# Sidereal time tests use the following JD inputs.
cls.time_ut1 = Time(2400000.5, 53736.0, scale="ut1", format="jd")
cls.time_tt = Time(2400000.5, 53736.0, scale="tt", format="jd")
# but tt!=ut1 at these dates, unlike what is assumed, so we cannot
# reproduce this exactly. Now it does not really matter,
# but may as well fake this (and avoid IERS table lookup here)
cls.time_ut1.delta_ut1_utc = 0.0
cls.time_ut1.delta_ut1_utc = (
24
* 3600
* (
(cls.time_ut1.tt.jd1 - cls.time_tt.jd1)
+ (cls.time_ut1.tt.jd2 - cls.time_tt.jd2)
)
)
def test_setup(self):
assert np.allclose(
(self.time_ut1.tt.jd1 - self.time_tt.jd1)
+ (self.time_ut1.tt.jd2 - self.time_tt.jd2),
0.0,
atol=1.0e-14,
)
@pytest.mark.parametrize(
"erfa_test_input",
(
(1.754174972210740592, 1e-12, "eraGmst00"),
(1.754174971870091203, 1e-12, "eraGmst06"),
(1.754174981860675096, 1e-12, "eraGmst82"),
(1.754166138018281369, 1e-12, "eraGst00a"),
(1.754166136510680589, 1e-12, "eraGst00b"),
(1.754166137675019159, 1e-12, "eraGst06a"),
(1.754166136020645203, 1e-12, "eraGst94"),
),
)
def test_iau_models(self, erfa_test_input):
result, precision, name = erfa_test_input
if name[4] == "m":
kind = "mean"
model_name = f"IAU{20 if name[7] == '0' else 19:2d}{name[7:]:s}"
else:
kind = "apparent"
model_name = f"IAU{20 if name[6] == '0' else 19:2d}{name[6:].upper():s}"
assert kind in SIDEREAL_TIME_MODELS.keys()
assert model_name in SIDEREAL_TIME_MODELS[kind]
gst = self.time_ut1.sidereal_time(kind, "greenwich", model_name)
assert np.allclose(gst.to_value("radian"), result, rtol=1.0, atol=precision)
def test_era(self):
# Separate since it does not use the same time.
time_ut1 = Time(2400000.5, 54388.0, format="jd", scale="ut1")
era = time_ut1.earth_rotation_angle("tio")
expected = 0.4022837240028158102
assert np.abs(era.to_value(u.radian) - expected) < 1e-12
class TestST:
"""Test Greenwich Sidereal Time. Unlike above, this is relative to
what was found earlier, so checks changes in implementation, including
leap seconds, rather than correctness"""
@classmethod
def setup_class(cls):
cls.orig_auto_download = iers.conf.auto_download
iers.conf.auto_download = False
cls.t1 = Time(
[
"2012-06-30 12:00:00",
"2012-06-30 23:59:59",
"2012-06-30 23:59:60",
"2012-07-01 00:00:00",
"2012-07-01 12:00:00",
],
scale="utc",
)
cls.t2 = Time(cls.t1, location=("120d", "10d"))
@classmethod
def teardown_class(cls):
iers.conf.auto_download = cls.orig_auto_download
def test_gmst(self):
"""Compare Greenwich Mean Sidereal Time with what was found earlier"""
gmst_compare = np.array(
[
6.5968497894730564,
18.629426164144697,
18.629704702452862,
18.629983240761003,
6.6628381828899643,
]
)
gmst = self.t1.sidereal_time("mean", "greenwich")
assert allclose_hours(gmst.value, gmst_compare)
def test_gst(self):
"""Compare Greenwich Apparent Sidereal Time with what was found earlier"""
gst_compare = np.array(
[
6.5971168570494854,
18.629694220878296,
18.62997275921186,
18.630251297545389,
6.6631074284018244,
]
)
gst = self.t1.sidereal_time("apparent", "greenwich")
assert allclose_hours(gst.value, gst_compare)
def test_era(self):
"""Comare ERA relative to erfa.era00 test case."""
t = Time(2400000.5, 54388.0, format="jd", location=(0, 0), scale="ut1")
era = t.earth_rotation_angle()
expected = 0.4022837240028158102 * u.radian
# Without the TIO locator/polar motion, this should be close already.
assert np.abs(era - expected) < 1e-10 * u.radian
# And with it, one should reach full precision.
sp = erfa.sp00(t.tt.jd1, t.tt.jd2)
iers_table = iers.earth_orientation_table.get()
xp, yp = (c.to_value(u.rad) for c in iers_table.pm_xy(t))
r = erfa.rx(-yp, erfa.ry(-xp, erfa.rz(sp, np.eye(3))))
expected1 = expected + (np.arctan2(r[0, 1], r[0, 0]) << u.radian)
assert np.abs(era - expected1) < 1e-12 * u.radian
# Now try at a longitude different from 0.
t2 = Time(2400000.5, 54388.0, format="jd", location=(45, 0), scale="ut1")
era2 = t2.earth_rotation_angle()
r2 = erfa.rz(np.deg2rad(45), r)
expected2 = expected + (np.arctan2(r2[0, 1], r2[0, 0]) << u.radian)
assert np.abs(era2 - expected2) < 1e-12 * u.radian
def test_gmst_gst_close(self):
"""Check that Mean and Apparent are within a few seconds."""
gmst = self.t1.sidereal_time("mean", "greenwich")
gst = self.t1.sidereal_time("apparent", "greenwich")
assert within_2_seconds(gst.value, gmst.value)
def test_gmst_era_close(self):
"""Check that mean sidereal time and earth rotation angle are close."""
gmst = self.t1.sidereal_time("mean", "greenwich")
era = self.t1.earth_rotation_angle("tio")
assert within_2_seconds(era.value, gmst.value)
def test_gmst_independent_of_self_location(self):
"""Check that Greenwich time does not depend on self.location"""
gmst1 = self.t1.sidereal_time("mean", "greenwich")
gmst2 = self.t2.sidereal_time("mean", "greenwich")
assert allclose_hours(gmst1.value, gmst2.value)
def test_gmst_vs_lmst(self):
"""Check that Greenwich and local sidereal time differ."""
gmst = self.t1.sidereal_time("mean", "greenwich")
lmst = self.t1.sidereal_time("mean", 0)
assert allclose_hours(lmst.value, gmst.value)
assert np.all(np.abs(lmst - gmst) > 1e-10 * u.hourangle)
@pytest.mark.parametrize("kind", ("mean", "apparent"))
def test_lst(self, kind):
"""Compare Local Sidereal Time with what was found earlier,
as well as with what is expected from GMST
"""
lst_compare = {
"mean": np.array(
[
14.596849789473058,
2.629426164144693,
2.6297047024528588,
2.6299832407610033,
14.662838182889967,
]
),
"apparent": np.array(
[
14.597116857049487,
2.6296942208782959,
2.6299727592118565,
2.6302512975453887,
14.663107428401826,
]
),
}
gmst2 = self.t2.sidereal_time(kind, "greenwich")
lmst2 = self.t2.sidereal_time(kind)
assert allclose_hours(lmst2.value, lst_compare[kind])
assert allclose_hours(
(lmst2 - gmst2).wrap_at("12h").value,
self.t2.location.lon.to_value("hourangle"),
)
# check it also works when one gives longitude explicitly
lmst1 = self.t1.sidereal_time(kind, self.t2.location.lon)
assert allclose_hours(lmst1.value, lst_compare[kind])
def test_lst_string_longitude(self):
lmst1 = self.t1.sidereal_time("mean", longitude="120d")
lmst2 = self.t2.sidereal_time("mean")
assert allclose_hours(lmst1.value, lmst2.value)
def test_lst_needs_location(self):
with pytest.raises(ValueError):
self.t1.sidereal_time("mean")
with pytest.raises(ValueError):
self.t1.sidereal_time("mean", None)
def test_lera(self):
lera_compare = np.array(
[
14.586176631122177,
2.618751847545134,
2.6190303858265067,
2.619308924107852,
14.652162695594276,
]
)
gera2 = self.t2.earth_rotation_angle("tio")
lera2 = self.t2.earth_rotation_angle()
assert allclose_hours(lera2.value, lera_compare)
assert allclose_hours(
(lera2 - gera2).wrap_at("12h").value,
self.t2.location.lon.to_value("hourangle"),
)
# Check it also works when one gives location explicitly.
# This also verifies input of a location generally.
lera1 = self.t1.earth_rotation_angle(self.t2.location)
assert allclose_hours(lera1.value, lera_compare)
class TestModelInterpretation:
"""Check that models are different, and that wrong models are recognized"""
@classmethod
def setup_class(cls):
cls.orig_auto_download = iers.conf.auto_download
iers.conf.auto_download = False
cls.t = Time(["2012-06-30 12:00:00"], scale="utc", location=("120d", "10d"))
@classmethod
def teardown_class(cls):
iers.conf.auto_download = cls.orig_auto_download
@pytest.mark.parametrize("kind", ("mean", "apparent"))
def test_model_uniqueness(self, kind):
"""Check models give different answers, yet are close."""
for model1, model2 in itertools.combinations(
SIDEREAL_TIME_MODELS[kind].keys(), 2
):
gst1 = self.t.sidereal_time(kind, "greenwich", model1)
gst2 = self.t.sidereal_time(kind, "greenwich", model2)
assert np.all(gst1.value != gst2.value)
assert within_1_second(gst1.value, gst2.value)
lst1 = self.t.sidereal_time(kind, None, model1)
lst2 = self.t.sidereal_time(kind, None, model2)
assert np.all(lst1.value != lst2.value)
assert within_1_second(lst1.value, lst2.value)
@pytest.mark.parametrize(
("kind", "other"), (("mean", "apparent"), ("apparent", "mean"))
)
def test_wrong_models_raise_exceptions(self, kind, other):
with pytest.raises(ValueError):
self.t.sidereal_time(kind, "greenwich", "nonsense")
for model in set(SIDEREAL_TIME_MODELS[other].keys()) - set(
SIDEREAL_TIME_MODELS[kind].keys()
):
with pytest.raises(ValueError):
self.t.sidereal_time(kind, "greenwich", model)
with pytest.raises(ValueError):
self.t.sidereal_time(kind, None, model)
|
ea38f7b0211eae93568c3edee2772e583780631c0c3414b9a4dad013919d4b83 | # Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""Handles the CDS string format for units."""
import operator
import re
from astropy.units.utils import is_effectively_unity
from astropy.utils import classproperty, parsing
from astropy.utils.misc import did_you_mean
from . import core, utils
from .base import Base
class CDS(Base):
"""
Support the `Centre de Données astronomiques de Strasbourg
<http://cds.u-strasbg.fr/>`_ `Standards for Astronomical
Catalogues 2.0 <http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_
format, and the `complete set of supported units
<https://vizier.u-strasbg.fr/viz-bin/Unit>`_. This format is used
by VOTable up to version 1.2.
"""
_tokens = (
"PRODUCT",
"DIVISION",
"OPEN_PAREN",
"CLOSE_PAREN",
"OPEN_BRACKET",
"CLOSE_BRACKET",
"X",
"SIGN",
"UINT",
"UFLOAT",
"UNIT",
"DIMENSIONLESS",
)
@classproperty(lazy=True)
def _units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@staticmethod
def _generate_unit_names():
from astropy import units as u
from astropy.units import cds
names = {}
for key, val in cds.__dict__.items():
if isinstance(val, u.UnitBase):
names[key] = val
return names
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_PRODUCT = r"\."
t_DIVISION = r"/"
t_OPEN_PAREN = r"\("
t_CLOSE_PAREN = r"\)"
t_OPEN_BRACKET = r"\["
t_CLOSE_BRACKET = r"\]"
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"((\d+\.?\d+)|(\.\d+))([eE][+-]?\d+)?"
if not re.search(r"[eE\.]", t.value):
t.type = "UINT"
t.value = int(t.value)
else:
t.value = float(t.value)
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+-](?=\d)"
t.value = float(t.value + "1")
return t
def t_X(t): # multiplication for factor in front of unit
r"[x×]"
return t
def t_UNIT(t):
r"\%|°|\\h|((?!\d)\w)+"
t.value = cls._get_unit(t)
return t
def t_DIMENSIONLESS(t):
r"---|-"
# These are separate from t_UNIT since they cannot have a prefactor.
t.value = cls._get_unit(t)
return t
t_ignore = ""
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
return parsing.lex(
lextab="cds_lextab", package="astropy/units", reflags=int(re.UNICODE)
)
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `Standards
for Astronomical Catalogues 2.0
<http://vizier.u-strasbg.fr/vizier/doc/catstd-3.2.htx>`_, which is not
terribly precise. The exact grammar is here is based on the
YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
tokens = cls._tokens
def p_main(p):
"""
main : factor combined_units
| combined_units
| DIMENSIONLESS
| OPEN_BRACKET combined_units CLOSE_BRACKET
| OPEN_BRACKET DIMENSIONLESS CLOSE_BRACKET
| factor
"""
from astropy.units import dex
from astropy.units.core import Unit
if len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = dex(p[2])
else:
p[0] = Unit(p[1])
def p_combined_units(p):
"""
combined_units : product_of_units
| division_of_units
"""
p[0] = p[1]
def p_product_of_units(p):
"""
product_of_units : unit_expression PRODUCT combined_units
| unit_expression
"""
if len(p) == 4:
p[0] = p[1] * p[3]
else:
p[0] = p[1]
def p_division_of_units(p):
"""
division_of_units : DIVISION unit_expression
| unit_expression DIVISION combined_units
"""
if len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1] / p[3]
def p_unit_expression(p):
"""
unit_expression : unit_with_power
| OPEN_PAREN combined_units CLOSE_PAREN
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_factor(p):
"""
factor : signed_float X UINT signed_int
| UINT X UINT signed_int
| UINT signed_int
| UINT
| signed_float
"""
if len(p) == 5:
if p[3] != 10:
raise ValueError("Only base ten exponents are allowed in CDS")
p[0] = p[1] * 10.0 ** p[4]
elif len(p) == 3:
if p[1] != 10:
raise ValueError("Only base ten exponents are allowed in CDS")
p[0] = 10.0 ** p[2]
elif len(p) == 2:
p[0] = p[1]
def p_unit_with_power(p):
"""
unit_with_power : UNIT numeric_power
| UNIT
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] ** p[2]
def p_numeric_power(p):
"""
numeric_power : sign UINT
"""
p[0] = p[1] * p[2]
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
"""
signed_int : SIGN UINT
"""
p[0] = p[1] * p[2]
def p_signed_float(p):
"""
signed_float : sign UINT
| sign UFLOAT
"""
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule="cds_parsetab", package="astropy/units")
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
registry = core.get_current_unit_registry()
if t.value in registry.aliases:
return registry.aliases[t.value]
raise ValueError(f"At col {t.lexpos}, {str(e)}")
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{}' not supported by the CDS SAC standard. {}".format(
unit, did_you_mean(unit, cls._units)
)
)
else:
raise ValueError()
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
if " " in s:
raise ValueError("CDS unit must not contain whitespace")
if not isinstance(s, str):
s = s.decode("ascii")
# This is a short circuit for the case where the string
# is just a single unit name
try:
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise ValueError(str(e))
else:
raise ValueError("Syntax error")
@staticmethod
def _get_unit_name(unit):
return unit.get_format_name("cds")
@classmethod
def _format_unit_list(cls, units):
out = []
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
out.append(f"{cls._get_unit_name(base)}{int(power)}")
return ".".join(out)
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if unit == core.dimensionless_unscaled:
return "---"
elif is_effectively_unity(unit.scale * 100.0):
return "%"
if unit.scale == 1:
s = ""
else:
m, e = utils.split_mantissa_exponent(unit.scale)
parts = []
if m not in ("", "1"):
parts.append(m)
if e:
if not e.startswith("-"):
e = "+" + e
parts.append(f"10{e}")
s = "x".join(parts)
pairs = list(zip(unit.bases, unit.powers))
if len(pairs) > 0:
pairs.sort(key=operator.itemgetter(1), reverse=True)
s += cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
|
1f7bf061a8f842931fce212f2cbe1ded9c3fe4775b746f082756e5dba61c5b73 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "Console" unit format.
"""
from . import base, core, utils
class Console(base.Base):
"""
Output-only format for to display pretty formatting at the
console.
For example::
>>> import astropy.units as u
>>> print(u.Ry.decompose().to_string('console')) # doctest: +FLOAT_CMP
m^2 kg
2.1798721*10^-18 ------
s^2
"""
_times = "*"
_line = "-"
@classmethod
def _get_unit_name(cls, unit):
return unit.get_format_name("console")
@classmethod
def _format_superscript(cls, number):
return f"^{number}"
@classmethod
def _format_unit_list(cls, units):
out = []
for base_, power in units:
if power == 1:
out.append(cls._get_unit_name(base_))
else:
out.append(
cls._get_unit_name(base_)
+ cls._format_superscript(utils.format_power(power))
)
return " ".join(out)
@classmethod
def format_exponential_notation(cls, val):
m, ex = utils.split_mantissa_exponent(val)
parts = []
if m:
parts.append(m)
if ex:
parts.append(f"10{cls._format_superscript(ex)}")
return cls._times.join(parts)
@classmethod
def to_string(cls, unit):
if isinstance(unit, core.CompositeUnit):
if unit.scale == 1:
s = ""
else:
s = cls.format_exponential_notation(unit.scale)
if len(unit.bases):
positives, negatives = utils.get_grouped_by_powers(
unit.bases, unit.powers
)
if len(negatives):
if len(positives):
positives = cls._format_unit_list(positives)
else:
positives = "1"
negatives = cls._format_unit_list(negatives)
l = len(s)
r = max(len(positives), len(negatives))
f = f"{{0:^{l}s}} {{1:^{r}s}}"
lines = [
f.format("", positives),
f.format(s, cls._line * r),
f.format("", negatives),
]
s = "\n".join(lines)
else:
positives = cls._format_unit_list(positives)
s += positives
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
|
8388d9b9dc42a86ba8f4a409ca6aea393d39230d0a98f028138ec5a62920ca53 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A collection of different unit formats.
"""
# This is pretty atrocious, but it will prevent a circular import for those
# formatters that need access to the units.core module An entry for it should
# exist in sys.modules since astropy.units.core imports this module
import sys
core = sys.modules["astropy.units.core"]
from .base import Base
from .cds import CDS
from .console import Console
from .fits import Fits
from .generic import Generic, Unscaled
from .latex import Latex, LatexInline
from .ogip import OGIP
from .unicode_format import Unicode
from .vounit import VOUnit
__all__ = [
"Base",
"Generic",
"CDS",
"Console",
"Fits",
"Latex",
"LatexInline",
"OGIP",
"Unicode",
"Unscaled",
"VOUnit",
"get_format",
]
def _known_formats():
inout = [
name
for name, cls in Base.registry.items()
if cls.parse.__func__ is not Base.parse.__func__
]
out_only = [
name
for name, cls in Base.registry.items()
if cls.parse.__func__ is Base.parse.__func__
]
return (
f"Valid formatter names are: {inout} for input and output, "
f"and {out_only} for output only."
)
def get_format(format=None):
"""
Get a formatter by name.
Parameters
----------
format : str or `astropy.units.format.Base` instance or subclass
The name of the format, or the format instance or subclass
itself.
Returns
-------
format : `astropy.units.format.Base` instance
The requested formatter.
"""
if format is None:
return Generic
if isinstance(format, type) and issubclass(format, Base):
return format
elif not (isinstance(format, str) or format is None):
raise TypeError(
f"Formatter must a subclass or instance of a subclass of {Base!r} "
f"or a string giving the name of the formatter. {_known_formats()}."
)
format_lower = format.lower()
if format_lower in Base.registry:
return Base.registry[format_lower]
raise ValueError(f"Unknown format {format!r}. {_known_formats()}")
|
9b9453cba7a92422fd6a61a2b7cff6269f1a29aff39a922771c011c587bbb583 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "VOUnit" unit format.
"""
import copy
import keyword
import operator
import re
import warnings
from . import core, generic, utils
class VOUnit(generic.Generic):
"""
The IVOA standard for units used by the VO.
This is an implementation of `Units in the VO 1.0
<http://www.ivoa.net/documents/VOUnits/>`_.
"""
_explicit_custom_unit_regex = re.compile(r"^[YZEPTGMkhdcmunpfazy]?'((?!\d)\w)+'$")
_custom_unit_regex = re.compile(r"^((?!\d)\w)+$")
_custom_units = {}
@staticmethod
def _generate_unit_names():
from astropy import units as u
from astropy.units import required_by_vounit as uvo
names = {}
deprecated_names = set()
bases = [
"A", "C", "D", "F", "G", "H", "Hz", "J", "Jy", "K", "N",
"Ohm", "Pa", "R", "Ry", "S", "T", "V", "W", "Wb", "a",
"adu", "arcmin", "arcsec", "barn", "beam", "bin", "cd",
"chan", "count", "ct", "d", "deg", "eV", "erg", "g", "h",
"lm", "lx", "lyr", "m", "mag", "min", "mol", "pc", "ph",
"photon", "pix", "pixel", "rad", "rad", "s", "solLum",
"solMass", "solRad", "sr", "u", "voxel", "yr",
] # fmt: skip
binary_bases = ["bit", "byte", "B"]
simple_units = ["Angstrom", "angstrom", "AU", "au", "Ba", "dB", "mas"]
si_prefixes = [
"y", "z", "a", "f", "p", "n", "u", "m", "c", "d",
"", "da", "h", "k", "M", "G", "T", "P", "E", "Z", "Y"
] # fmt: skip
binary_prefixes = ["Ki", "Mi", "Gi", "Ti", "Pi", "Ei"]
deprecated_units = {
"a", "angstrom", "Angstrom", "au", "Ba", "barn", "ct",
"erg", "G", "ph", "pix",
} # fmt: skip
def do_defines(bases, prefixes, skips=[]):
for base in bases:
for prefix in prefixes:
key = prefix + base
if key in skips:
continue
if keyword.iskeyword(key):
continue
names[key] = getattr(u if hasattr(u, key) else uvo, key)
if base in deprecated_units:
deprecated_names.add(key)
do_defines(bases, si_prefixes, ["pct", "pcount", "yd"])
do_defines(binary_bases, si_prefixes + binary_prefixes, ["dB", "dbyte"])
do_defines(simple_units, [""])
return names, deprecated_names, []
@classmethod
def parse(cls, s, debug=False):
if s in ("unknown", "UNKNOWN"):
return None
if s == "":
return core.dimensionless_unscaled
# Check for excess solidi, but exclude fractional exponents (allowed)
if s.count("/") > 1 and s.count("/") - len(re.findall(r"\(\d+/\d+\)", s)) > 1:
raise core.UnitsError(
f"'{s}' contains multiple slashes, which is "
"disallowed by the VOUnit standard."
)
result = cls._do_parse(s, debug=debug)
if hasattr(result, "function_unit"):
raise ValueError("Function units are not yet supported in VOUnit.")
return result
@classmethod
def _get_unit(cls, t):
try:
return super()._get_unit(t)
except ValueError:
if cls._explicit_custom_unit_regex.match(t.value):
return cls._def_custom_unit(t.value)
if cls._custom_unit_regex.match(t.value):
warnings.warn(
f"Unit {t.value!r} not supported by the VOUnit standard. "
+ utils.did_you_mean_units(
t.value,
cls._units,
cls._deprecated_units,
cls._to_decomposed_alternative,
),
core.UnitsWarning,
)
return cls._def_custom_unit(t.value)
raise
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], "VOUnit", cls._to_decomposed_alternative
)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
# The da- and d- prefixes are discouraged. This has the
# effect of adding a scale to value in the result.
if isinstance(unit, core.PrefixUnit):
if unit._represents.scale == 10.0:
raise ValueError(
f"In '{unit}': VOUnit can not represent units with the 'da' "
"(deka) prefix"
)
elif unit._represents.scale == 0.1:
raise ValueError(
f"In '{unit}': VOUnit can not represent units with the 'd' "
"(deci) prefix"
)
name = unit.get_format_name("vounit")
if unit in cls._custom_units.values():
return name
if name not in cls._units:
raise ValueError(f"Unit {name!r} is not part of the VOUnit standard")
if name in cls._deprecated_units:
utils.unit_deprecation_warning(
name, unit, "VOUnit", cls._to_decomposed_alternative
)
return name
@classmethod
def _def_custom_unit(cls, unit):
def def_base(name):
if name in cls._custom_units:
return cls._custom_units[name]
if name.startswith("'"):
return core.def_unit(
[name[1:-1], name],
format={"vounit": name},
namespace=cls._custom_units,
)
else:
return core.def_unit(name, namespace=cls._custom_units)
if unit in cls._custom_units:
return cls._custom_units[unit]
for short, full, factor in core.si_prefixes:
for prefix in short:
if unit.startswith(prefix):
base_name = unit[len(prefix) :]
base_unit = def_base(base_name)
return core.PrefixUnit(
[prefix + x for x in base_unit.names],
core.CompositeUnit(
factor, [base_unit], [1], _error_check=False
),
format={"vounit": prefix + base_unit.names[-1]},
namespace=cls._custom_units,
)
return def_base(unit)
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if "/" in power or "." in power:
out.append(f"{cls._get_unit_name(base)}({power})")
else:
out.append(f"{cls._get_unit_name(base)}**{power}")
return ".".join(out)
@classmethod
def to_string(cls, unit):
from astropy.units import core
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if unit.physical_type == "dimensionless" and unit.scale != 1:
raise core.UnitScaleError(
"The VOUnit format is not able to "
"represent scale for dimensionless units. "
f"Multiply your data by {unit.scale:e}."
)
s = ""
if unit.scale != 1:
s += f"{unit.scale:.8g}"
pairs = list(zip(unit.bases, unit.powers))
pairs.sort(key=operator.itemgetter(1), reverse=True)
s += cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
@classmethod
def _to_decomposed_alternative(cls, unit):
from astropy.units import core
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return f"{cls.to_string(unit)} (with data multiplied by {scale})"
return s
|
18c87406b743387c71e1cb80a556e1dddebaa32327b19a8191be4bb41ca5568b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "LaTeX" unit format.
"""
import re
import numpy as np
from . import base, core, utils
class Latex(base.Base):
"""
Output LaTeX to display the unit based on IAU style guidelines.
Attempts to follow the `IAU Style Manual
<https://www.iau.org/static/publications/stylemanual1989.pdf>`_.
"""
@classmethod
def _latex_escape(cls, name):
# This doesn't escape arbitrary LaTeX strings, but it should
# be good enough for unit names which are required to be alpha
# + "_" anyway.
return name.replace("_", r"\_")
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name("latex")
if name == unit.name:
return cls._latex_escape(name)
return name
@classmethod
def _format_unit_list(cls, units):
out = []
for base_, power in units:
base_latex = cls._get_unit_name(base_)
if power == 1:
out.append(base_latex)
else:
# If the LaTeX representation of the base unit already ends with
# a superscript, we need to spell out the unit to avoid double
# superscripts. For example, the logic below ensures that
# `u.deg**2` returns `deg^{2}` instead of `{}^{\circ}^{2}`.
if re.match(r".*\^{[^}]*}$", base_latex): # ends w/ superscript?
base_latex = base_.short_names[0]
out.append(f"{base_latex}^{{{utils.format_power(power)}}}")
return r"\,".join(out)
@classmethod
def _format_bases(cls, unit):
positives, negatives = utils.get_grouped_by_powers(unit.bases, unit.powers)
if len(negatives):
if len(positives):
positives = cls._format_unit_list(positives)
else:
positives = "1"
negatives = cls._format_unit_list(negatives)
s = rf"\frac{{{positives}}}{{{negatives}}}"
else:
positives = cls._format_unit_list(positives)
s = positives
return s
@classmethod
def to_string(cls, unit):
latex_name = None
if hasattr(unit, "_format"):
latex_name = unit._format.get("latex")
if latex_name is not None:
s = latex_name
elif isinstance(unit, core.CompositeUnit):
if unit.scale == 1:
s = ""
else:
s = cls.format_exponential_notation(unit.scale) + r"\,"
if len(unit.bases):
s += cls._format_bases(unit)
elif isinstance(unit, core.NamedUnit):
s = cls._latex_escape(unit.name)
return rf"$\mathrm{{{s}}}$"
@classmethod
def format_exponential_notation(cls, val, format_spec=".8g"):
"""
Formats a value in exponential notation for LaTeX.
Parameters
----------
val : number
The value to be formatted
format_spec : str, optional
Format used to split up mantissa and exponent
Returns
-------
latex_string : str
The value in exponential notation in a format suitable for LaTeX.
"""
if np.isfinite(val):
m, ex = utils.split_mantissa_exponent(val, format_spec)
parts = []
if m:
parts.append(m)
if ex:
parts.append(f"10^{{{ex}}}")
return r" \times ".join(parts)
else:
if np.isnan(val):
return r"{\rm NaN}"
elif val > 0:
# positive infinity
return r"\infty"
else:
# negative infinity
return r"-\infty"
class LatexInline(Latex):
"""
Output LaTeX to display the unit based on IAU style guidelines with negative
powers.
Attempts to follow the `IAU Style Manual
<https://www.iau.org/static/publications/stylemanual1989.pdf>`_ and the
`ApJ and AJ style guide
<https://journals.aas.org/manuscript-preparation/>`_.
"""
name = "latex_inline"
@classmethod
def _format_bases(cls, unit):
return cls._format_unit_list(zip(unit.bases, unit.powers))
|
5720a9f86e8c1036121b172fee671c35786d6c9c1e9a61d1b9ccd97352e45a20 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "Unicode" unit format.
"""
from . import console, utils
class Unicode(console.Console):
"""
Output-only format to display pretty formatting at the console
using Unicode characters.
For example::
>>> import astropy.units as u
>>> print(u.bar.decompose().to_string('unicode'))
kg
100000 ────
m s²
"""
_times = "×"
_line = "─"
@classmethod
def _get_unit_name(cls, unit):
return unit.get_format_name("unicode")
@classmethod
def format_exponential_notation(cls, val):
m, ex = utils.split_mantissa_exponent(val)
parts = []
if m:
parts.append(m.replace("-", "−"))
if ex:
parts.append(f"10{cls._format_superscript(ex)}")
return cls._times.join(parts)
@classmethod
def _format_superscript(cls, number):
mapping = {
"0": "⁰",
"1": "¹",
"2": "²",
"3": "³",
"4": "⁴",
"5": "⁵",
"6": "⁶",
"7": "⁷",
"8": "⁸",
"9": "⁹",
"-": "⁻",
"−": "⁻",
# This is actually a "raised omission bracket", but it's
# the closest thing I could find to a superscript solidus.
"/": "⸍",
}
output = []
for c in number:
output.append(mapping[c])
return "".join(output)
|
7087647036635782ed07b6590662c166b71ccd41cb12b8ed37e80f348ded07ff | # Licensed under a 3-clause BSD style license - see LICENSE.rst
class Base:
"""
The abstract base class of all unit formats.
"""
registry = {}
def __new__(cls, *args, **kwargs):
# This __new__ is to make it clear that there is no reason to
# instantiate a Formatter--if you try to you'll just get back the
# class
return cls
def __init_subclass__(cls, **kwargs):
# Keep a registry of all formats. Key by the class name unless a name
# is explicitly set (i.e., one *not* inherited from a superclass).
if "name" not in cls.__dict__:
cls.name = cls.__name__.lower()
Base.registry[cls.name] = cls
super().__init_subclass__(**kwargs)
@classmethod
def parse(cls, s):
"""
Convert a string to a unit object.
"""
raise NotImplementedError(f"Can not parse with {cls.__name__} format")
@classmethod
def to_string(cls, u):
"""
Convert a unit object to a string.
"""
raise NotImplementedError(f"Can not output in {cls.__name__} format")
|
e93235d1c56bb73a1b2d1e967387c66c4f3f9852aca97e156eedcaf738177980 | # Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
import copy
import keyword
import math
import warnings
from fractions import Fraction
from astropy.utils import parsing
from . import core, generic, utils
class OGIP(generic.Generic):
"""
Support the units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
_tokens = (
"DIVISION",
"OPEN_PAREN",
"CLOSE_PAREN",
"WHITESPACE",
"STARSTAR",
"STAR",
"SIGN",
"UFLOAT",
"LIT10",
"UINT",
"UNKNOWN",
"UNIT",
)
@staticmethod
def _generate_unit_names():
from astropy import units as u
names = {}
deprecated_names = set()
bases = [
"A", "C", "cd", "eV", "F", "g", "H", "Hz", "J",
"Jy", "K", "lm", "lx", "m", "mol", "N", "ohm", "Pa",
"pc", "rad", "s", "S", "sr", "T", "V", "W", "Wb",
] # fmt: skip
deprecated_bases = []
prefixes = [
"y", "z", "a", "f", "p", "n", "u", "m", "c", "d",
"", "da", "h", "k", "M", "G", "T", "P", "E", "Z", "Y",
] # fmt: skip
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
"angstrom", "arcmin", "arcsec", "AU", "barn", "bin",
"byte", "chan", "count", "day", "deg", "erg", "G",
"h", "lyr", "mag", "min", "photon", "pixel",
"voxel", "yr",
] # fmt: skip
for unit in simple_units:
names[unit] = getattr(u, unit)
# Create a separate, disconnected unit for the special case of
# Crab and mCrab, since OGIP doesn't define their quantities.
Crab = u.def_unit(["Crab"], prefixes=False, doc="Crab (X-ray flux)")
mCrab = u.Unit(10**-3 * Crab)
names["Crab"] = Crab
names["mCrab"] = mCrab
deprecated_units = ["Crab", "mCrab"]
for unit in deprecated_units:
deprecated_names.add(unit)
functions = [
"log", "ln", "exp", "sqrt", "sin", "cos", "tan", "asin",
"acos", "atan", "sinh", "cosh", "tanh",
] # fmt: skip
for name in functions:
names[name] = name
return names, deprecated_names, functions
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_DIVISION = r"/"
t_OPEN_PAREN = r"\("
t_CLOSE_PAREN = r"\)"
t_WHITESPACE = "[ \t]+"
t_STARSTAR = r"\*\*"
t_STAR = r"\*"
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"(((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+))|(((\d+\.\d*)|(\.\d+))([eE][+-]?\d+)?)"
t.value = float(t.value)
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+-](?=\d)"
t.value = float(t.value + "1")
return t
def t_X(t): # multiplication for factor in front of unit
r"[x×]"
return t
def t_LIT10(t):
r"10"
return 10
def t_UNKNOWN(t):
r"[Uu][Nn][Kk][Nn][Oo][Ww][Nn]"
return None
def t_UNIT(t):
r"[a-zA-Z][a-zA-Z_]*"
t.value = cls._get_unit(t)
return t
# Don't ignore whitespace
t_ignore = ""
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
return parsing.lex(lextab="ogip_lextab", package="astropy/units")
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the
`Specification of Physical Units within OGIP FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__,
which is not terribly precise. The exact grammar is here is
based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
tokens = cls._tokens
def p_main(p):
"""
main : UNKNOWN
| complete_expression
| scale_factor complete_expression
| scale_factor WHITESPACE complete_expression
"""
if len(p) == 4:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_complete_expression(p):
"""
complete_expression : product_of_units
"""
p[0] = p[1]
def p_product_of_units(p):
"""
product_of_units : unit_expression
| division unit_expression
| product_of_units product unit_expression
| product_of_units division unit_expression
"""
if len(p) == 4:
if p[2] == "DIVISION":
p[0] = p[1] / p[3]
else:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1]
def p_unit_expression(p):
"""
unit_expression : unit
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN
| OPEN_PAREN complete_expression CLOSE_PAREN
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
| OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
"""
# If we run p[1] in cls._functions, it will try and parse each
# item in the list into a unit, which is slow. Since we know that
# all the items in the list are strings, we can simply convert
# p[1] to a string instead.
p1_str = str(p[1])
if p1_str in cls._functions and p1_str != "sqrt":
raise ValueError(
f"The function '{p[1]}' is valid in OGIP, but not understood "
"by astropy.units."
)
if len(p) == 7:
if p1_str == "sqrt":
p[0] = p[1] * p[3] ** (0.5 * p[6])
else:
p[0] = p[1] * p[3] ** p[6]
elif len(p) == 6:
p[0] = p[2] ** p[5]
elif len(p) == 5:
if p1_str == "sqrt":
p[0] = p[3] ** 0.5
else:
p[0] = p[1] * p[3]
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_scale_factor(p):
"""
scale_factor : LIT10 power numeric_power
| LIT10
| signed_float
| signed_float power numeric_power
| signed_int power numeric_power
"""
if len(p) == 4:
p[0] = 10 ** p[3]
else:
p[0] = p[1]
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(p[0]) % 1.0 != 0.0:
from astropy.units.core import UnitsWarning
warnings.warn(
f"'{p[0]}' scale should be a power of 10 in OGIP format",
UnitsWarning,
)
def p_division(p):
"""
division : DIVISION
| WHITESPACE DIVISION
| WHITESPACE DIVISION WHITESPACE
| DIVISION WHITESPACE
"""
p[0] = "DIVISION"
def p_product(p):
"""
product : WHITESPACE
| STAR
| WHITESPACE STAR
| WHITESPACE STAR WHITESPACE
| STAR WHITESPACE
"""
p[0] = "PRODUCT"
def p_power(p):
"""
power : STARSTAR
"""
p[0] = "POWER"
def p_unit(p):
"""
unit : UNIT
| UNIT power numeric_power
"""
if len(p) == 4:
p[0] = p[1] ** p[3]
else:
p[0] = p[1]
def p_numeric_power(p):
"""
numeric_power : UINT
| signed_float
| OPEN_PAREN signed_int CLOSE_PAREN
| OPEN_PAREN signed_float CLOSE_PAREN
| OPEN_PAREN signed_float division UINT CLOSE_PAREN
"""
if len(p) == 6:
p[0] = Fraction(int(p[2]), int(p[4]))
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
"""
signed_int : SIGN UINT
"""
p[0] = p[1] * p[2]
def p_signed_float(p):
"""
signed_float : sign UINT
| sign UFLOAT
"""
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule="ogip_parsetab", package="astropy/units")
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
f"Unit '{unit}' not supported by the OGIP standard. "
+ utils.did_you_mean_units(
unit,
cls._units,
cls._deprecated_units,
cls._to_decomposed_alternative,
),
)
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], "OGIP", cls._to_decomposed_alternative
)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit, detailed_exception=detailed_exception)
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
s = s.strip()
try:
# This is a short circuit for the case where the string is
# just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return core.Unit(cls._parser.parse(s, lexer=cls._lexer, debug=debug))
except ValueError as e:
if str(e):
raise
else:
raise ValueError(f"Syntax error parsing unit '{s}'")
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name("ogip")
cls._validate_unit(name)
return name
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if "/" in power:
out.append(f"{cls._get_unit_name(base)}**({power})")
else:
out.append(f"{cls._get_unit_name(base)}**{power}")
return " ".join(out)
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
warnings.warn(
f"'{unit.scale}' scale should be a power of 10 in OGIP format",
core.UnitsWarning,
)
return generic._to_string(cls, unit)
@classmethod
def _to_decomposed_alternative(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return (
f"{generic._to_string(cls, unit)} (with data multiplied by {scale})"
)
return generic._to_string(unit)
|
610427d85a1ad6644080967c1f39321fa5cd74c8732ade51093967a55d8e3e74 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities shared by the different formats.
"""
import warnings
from astropy.units.utils import maybe_simple_fraction
from astropy.utils.misc import did_you_mean
def get_grouped_by_powers(bases, powers):
"""
Groups the powers and bases in the given
`~astropy.units.CompositeUnit` into positive powers and
negative powers for easy display on either side of a solidus.
Parameters
----------
bases : list of `astropy.units.UnitBase` instances
powers : list of int
Returns
-------
positives, negatives : tuple of lists
Each element in each list is tuple of the form (*base*,
*power*). The negatives have the sign of their power reversed
(i.e. the powers are all positive).
"""
positive = []
negative = []
for base, power in zip(bases, powers):
if power < 0:
negative.append((base, -power))
elif power > 0:
positive.append((base, power))
else:
raise ValueError("Unit with 0 power")
return positive, negative
def split_mantissa_exponent(v, format_spec=".8g"):
"""
Given a number, split it into its mantissa and base 10 exponent
parts, each as strings. If the exponent is too small, it may be
returned as the empty string.
Parameters
----------
v : float
format_spec : str, optional
Number representation formatting string
Returns
-------
mantissa, exponent : tuple of strings
"""
x = format(v, format_spec).split("e")
if x[0] != "1." + "0" * (len(x[0]) - 2):
m = x[0]
else:
m = ""
if len(x) == 2:
ex = x[1].lstrip("0+")
if len(ex) > 0 and ex[0] == "-":
ex = "-" + ex[1:].lstrip("0")
else:
ex = ""
return m, ex
def decompose_to_known_units(unit, func):
"""
Partially decomposes a unit so it is only composed of units that
are "known" to a given format.
Parameters
----------
unit : `~astropy.units.UnitBase` instance
func : callable
This function will be called to determine if a given unit is
"known". If the unit is not known, this function should raise a
`ValueError`.
Returns
-------
unit : `~astropy.units.UnitBase` instance
A flattened unit.
"""
from astropy.units import core
if isinstance(unit, core.CompositeUnit):
new_unit = core.Unit(unit.scale)
for base, power in zip(unit.bases, unit.powers):
new_unit = new_unit * decompose_to_known_units(base, func) ** power
return new_unit
elif isinstance(unit, core.NamedUnit):
try:
func(unit)
except ValueError:
if isinstance(unit, core.Unit):
return decompose_to_known_units(unit._represents, func)
raise
return unit
else:
raise TypeError(
f"unit argument must be a 'NamedUnit' or 'CompositeUnit', not {type(unit)}"
)
def format_power(power):
"""
Converts a value for a power (which may be floating point or a
`fractions.Fraction` object), into a string looking like either
an integer or a fraction, if the power is close to that.
"""
if not hasattr(power, "denominator"):
power = maybe_simple_fraction(power)
if getattr(power, "denonimator", None) == 1:
power = power.nominator
return str(power)
def _try_decomposed(unit, format_decomposed):
represents = getattr(unit, "_represents", None)
if represents is not None:
try:
represents_string = format_decomposed(represents)
except ValueError:
pass
else:
return represents_string
decomposed = unit.decompose()
if decomposed is not unit:
try:
decompose_string = format_decomposed(decomposed)
except ValueError:
pass
else:
return decompose_string
return None
def did_you_mean_units(s, all_units, deprecated_units, format_decomposed):
"""
A wrapper around `astropy.utils.misc.did_you_mean` that deals with
the display of deprecated units.
Parameters
----------
s : str
The invalid unit string
all_units : dict
A mapping from valid unit names to unit objects.
deprecated_units : sequence
The deprecated unit names
format_decomposed : callable
A function to turn a decomposed version of the unit into a
string. Should return `None` if not possible
Returns
-------
msg : str
A string message with a list of alternatives, or the empty
string.
"""
def fix_deprecated(x):
if x in deprecated_units:
results = [x + " (deprecated)"]
decomposed = _try_decomposed(all_units[x], format_decomposed)
if decomposed is not None:
results.append(decomposed)
return results
return (x,)
return did_you_mean(s, all_units, fix=fix_deprecated)
def unit_deprecation_warning(s, unit, standard_name, format_decomposed):
"""
Raises a UnitsWarning about a deprecated unit in a given format.
Suggests a decomposed alternative if one is available.
Parameters
----------
s : str
The deprecated unit name.
unit : astropy.units.core.UnitBase
The unit object.
standard_name : str
The name of the format for which the unit is deprecated.
format_decomposed : callable
A function to turn a decomposed version of the unit into a
string. Should return `None` if not possible
"""
from astropy.units.core import UnitsWarning
message = f"The unit '{s}' has been deprecated in the {standard_name} standard."
decomposed = _try_decomposed(unit, format_decomposed)
if decomposed is not None:
message += f" Suggested: {decomposed}."
warnings.warn(message, UnitsWarning)
|
77c24ea6ee22f660ef62ef12409c953ccfe6859f50ff79b5ce92e971c7cbae9c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "FITS" unit format.
"""
import copy
import keyword
import operator
import numpy as np
from . import core, generic, utils
class Fits(generic.Generic):
"""
The FITS standard unit format.
This supports the format defined in the Units section of the `FITS
Standard <https://fits.gsfc.nasa.gov/fits_standard.html>`_.
"""
name = "fits"
@staticmethod
def _generate_unit_names():
from astropy import units as u
# add some units up-front for which we don't want to use prefixes
# and that have different names from the astropy default.
names = {
"Celsius": u.deg_C,
"deg C": u.deg_C,
}
deprecated_names = set()
bases = [
"m", "g", "s", "rad", "sr", "K", "A", "mol", "cd",
"Hz", "J", "W", "V", "N", "Pa", "C", "Ohm", "S",
"F", "Wb", "T", "H", "lm", "lx", "a", "yr", "eV",
"pc", "Jy", "mag", "R", "bit", "byte", "G", "barn",
] # fmt: skip
deprecated_bases = []
prefixes = [
"y", "z", "a", "f", "p", "n", "u", "m", "c", "d",
"", "da", "h", "k", "M", "G", "T", "P", "E", "Z", "Y",
] # fmt: skip
special_cases = {"dbyte": u.Unit("dbyte", 0.1 * u.byte)}
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
elif key in special_cases:
names[key] = special_cases[key]
else:
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
"deg", "arcmin", "arcsec", "mas", "min", "h", "d", "Ry",
"solMass", "u", "solLum", "solRad", "AU", "lyr", "count",
"ct", "photon", "ph", "pixel", "pix", "D", "Sun", "chan",
"bin", "voxel", "adu", "beam", "erg", "Angstrom", "angstrom",
] # fmt: skip
deprecated_units = []
for unit in simple_units + deprecated_units:
names[unit] = getattr(u, unit)
for unit in deprecated_units:
deprecated_names.add(unit)
return names, deprecated_names, []
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
f"Unit '{unit}' not supported by the FITS standard. "
+ utils.did_you_mean_units(
unit,
cls._units,
cls._deprecated_units,
cls._to_decomposed_alternative,
),
)
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], "FITS", cls._to_decomposed_alternative
)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit, detailed_exception=detailed_exception)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name("fits")
cls._validate_unit(name)
return name
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
parts = []
if isinstance(unit, core.CompositeUnit):
base = np.log10(unit.scale)
if base % 1.0 != 0.0:
raise core.UnitScaleError(
"The FITS unit format is not able to represent scales "
"that are not powers of 10. Multiply your data by "
f"{unit.scale:e}."
)
elif unit.scale != 1.0:
parts.append(f"10**{int(base)}")
pairs = list(zip(unit.bases, unit.powers))
if len(pairs):
pairs.sort(key=operator.itemgetter(1), reverse=True)
parts.append(cls._format_unit_list(pairs))
s = " ".join(parts)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
@classmethod
def _to_decomposed_alternative(cls, unit):
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return f"{cls.to_string(unit)} (with data multiplied by {scale})"
return s
@classmethod
def parse(cls, s, debug=False):
result = super().parse(s, debug)
if hasattr(result, "function_unit"):
raise ValueError("Function units are not yet supported for FITS units.")
return result
|
960d8bb9dbf76129bb3c91d9bd92884c9e6b87530bcd6e7af0f5e750620bb4b2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles a "generic" string format for units
"""
import re
import unicodedata
import warnings
from fractions import Fraction
from astropy.utils import classproperty, parsing
from astropy.utils.misc import did_you_mean
from . import core, utils
from .base import Base
def _to_string(cls, unit):
if isinstance(unit, core.CompositeUnit):
parts = []
if cls._show_scale and unit.scale != 1:
parts.append(f"{unit.scale:g}")
if len(unit.bases):
positives, negatives = utils.get_grouped_by_powers(unit.bases, unit.powers)
if len(positives):
parts.append(cls._format_unit_list(positives))
elif len(parts) == 0:
parts.append("1")
if len(negatives):
parts.append("/")
unit_list = cls._format_unit_list(negatives)
if len(negatives) == 1:
parts.append(f"{unit_list}")
else:
parts.append(f"({unit_list})")
return " ".join(parts)
elif isinstance(unit, core.NamedUnit):
return cls._get_unit_name(unit)
class Generic(Base):
"""
A "generic" format.
The syntax of the format is based directly on the FITS standard,
but instead of only supporting the units that FITS knows about, it
supports any unit available in the `astropy.units` namespace.
"""
_show_scale = True
_tokens = (
"COMMA",
"DOUBLE_STAR",
"STAR",
"PERIOD",
"SOLIDUS",
"CARET",
"OPEN_PAREN",
"CLOSE_PAREN",
"FUNCNAME",
"UNIT",
"SIGN",
"UINT",
"UFLOAT",
)
@classproperty(lazy=True)
def _all_units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _units(cls):
return cls._all_units[0]
@classproperty(lazy=True)
def _deprecated_units(cls):
return cls._all_units[1]
@classproperty(lazy=True)
def _functions(cls):
return cls._all_units[2]
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@classmethod
def _make_lexer(cls):
tokens = cls._tokens
t_COMMA = r"\,"
t_STAR = r"\*"
t_PERIOD = r"\."
t_SOLIDUS = r"/"
t_DOUBLE_STAR = r"\*\*"
t_CARET = r"\^"
t_OPEN_PAREN = r"\("
t_CLOSE_PAREN = r"\)"
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r"((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+)?"
if not re.search(r"[eE\.]", t.value):
t.type = "UINT"
t.value = int(t.value)
elif t.value.endswith("."):
t.type = "UINT"
t.value = int(t.value[:-1])
else:
t.value = float(t.value)
return t
def t_UINT(t):
r"\d+"
t.value = int(t.value)
return t
def t_SIGN(t):
r"[+-](?=\d)"
t.value = int(t.value + "1")
return t
# This needs to be a function so we can force it to happen
# before t_UNIT
def t_FUNCNAME(t):
r"((sqrt)|(ln)|(exp)|(log)|(mag)|(dB)|(dex))(?=\ *\()"
return t
def t_UNIT(t):
"%|([YZEPTGMkhdcmu\N{MICRO SIGN}npfazy]?'((?!\\d)\\w)+')|((?!\\d)\\w)+"
t.value = cls._get_unit(t)
return t
t_ignore = " "
# Error handling rule
def t_error(t):
raise ValueError(f"Invalid character at col {t.lexpos}")
return parsing.lex(
lextab="generic_lextab", package="astropy/units", reflags=int(re.UNICODE)
)
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `FITS
standard
<http://fits.gsfc.nasa.gov/standard30/fits_standard30aa.pdf>`_,
Section 4.3, which is not terribly precise. The exact grammar
is here is based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
This same grammar is used by the `"fits"` and `"vounit"`
formats, the only difference being the set of available unit
strings.
"""
tokens = cls._tokens
def p_main(p):
"""
main : unit
| structured_unit
| structured_subunit
"""
if isinstance(p[1], tuple):
# Unpack possible StructuredUnit inside a tuple, ie.,
# ignore any set of very outer parentheses.
p[0] = p[1][0]
else:
p[0] = p[1]
def p_structured_subunit(p):
"""
structured_subunit : OPEN_PAREN structured_unit CLOSE_PAREN
"""
# We hide a structured unit enclosed by parentheses inside
# a tuple, so that we can easily distinguish units like
# "(au, au/day), yr" from "au, au/day, yr".
p[0] = (p[2],)
def p_structured_unit(p):
"""
structured_unit : subunit COMMA
| subunit COMMA subunit
"""
from astropy.units.structured import StructuredUnit
inputs = (p[1],) if len(p) == 3 else (p[1], p[3])
units = ()
for subunit in inputs:
if isinstance(subunit, tuple):
# Structured unit that should be its own entry in the
# new StructuredUnit (was enclosed in parentheses).
units += subunit
elif isinstance(subunit, StructuredUnit):
# Structured unit whose entries should be
# individiually added to the new StructuredUnit.
units += subunit.values()
else:
# Regular unit to be added to the StructuredUnit.
units += (subunit,)
p[0] = StructuredUnit(units)
def p_subunit(p):
"""
subunit : unit
| structured_unit
| structured_subunit
"""
p[0] = p[1]
def p_unit(p):
"""
unit : product_of_units
| factor product_of_units
| factor product product_of_units
| division_product_of_units
| factor division_product_of_units
| factor product division_product_of_units
| inverse_unit
| factor inverse_unit
| factor product inverse_unit
| factor
"""
from astropy.units.core import Unit
if len(p) == 2:
p[0] = Unit(p[1])
elif len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = Unit(p[1] * p[3])
def p_division_product_of_units(p):
"""
division_product_of_units : division_product_of_units division product_of_units
| product_of_units
"""
from astropy.units.core import Unit
if len(p) == 4:
p[0] = Unit(p[1] / p[3])
else:
p[0] = p[1]
def p_inverse_unit(p):
"""
inverse_unit : division unit_expression
"""
p[0] = p[2] ** -1
def p_factor(p):
"""
factor : factor_fits
| factor_float
| factor_int
"""
p[0] = p[1]
def p_factor_float(p):
"""
factor_float : signed_float
| signed_float UINT signed_int
| signed_float UINT power numeric_power
"""
if cls.name == "fits":
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 4:
p[0] = p[1] * p[2] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** float(p[4])
elif len(p) == 2:
p[0] = p[1]
def p_factor_int(p):
"""
factor_int : UINT
| UINT signed_int
| UINT power numeric_power
| UINT UINT signed_int
| UINT UINT power numeric_power
"""
if cls.name == "fits":
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** float(p[2])
elif len(p) == 4:
if isinstance(p[2], int):
p[0] = p[1] * p[2] ** float(p[3])
else:
p[0] = p[1] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** p[4]
def p_factor_fits(p):
"""
factor_fits : UINT power OPEN_PAREN signed_int CLOSE_PAREN
| UINT power OPEN_PAREN UINT CLOSE_PAREN
| UINT power signed_int
| UINT power UINT
| UINT SIGN UINT
| UINT OPEN_PAREN signed_int CLOSE_PAREN
"""
if p[1] != 10:
if cls.name == "fits":
raise ValueError("Base must be 10")
else:
return
if len(p) == 4:
if p[2] in ("**", "^"):
p[0] = 10 ** p[3]
else:
p[0] = 10 ** (p[2] * p[3])
elif len(p) == 5:
p[0] = 10 ** p[3]
elif len(p) == 6:
p[0] = 10 ** p[4]
def p_product_of_units(p):
"""
product_of_units : unit_expression product product_of_units
| unit_expression product_of_units
| unit_expression
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1] * p[3]
def p_unit_expression(p):
"""
unit_expression : function
| unit_with_power
| OPEN_PAREN product_of_units CLOSE_PAREN
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_unit_with_power(p):
"""
unit_with_power : UNIT power numeric_power
| UNIT numeric_power
| UNIT
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** p[2]
else:
p[0] = p[1] ** p[3]
def p_numeric_power(p):
"""
numeric_power : sign UINT
| OPEN_PAREN paren_expr CLOSE_PAREN
"""
if len(p) == 3:
p[0] = p[1] * p[2]
elif len(p) == 4:
p[0] = p[2]
def p_paren_expr(p):
"""
paren_expr : sign UINT
| signed_float
| frac
"""
if len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_frac(p):
"""
frac : sign UINT division sign UINT
"""
p[0] = Fraction(p[1] * p[2], p[4] * p[5])
def p_sign(p):
"""
sign : SIGN
|
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1
def p_product(p):
"""
product : STAR
| PERIOD
"""
pass
def p_division(p):
"""
division : SOLIDUS
"""
pass
def p_power(p):
"""
power : DOUBLE_STAR
| CARET
"""
p[0] = p[1]
def p_signed_int(p):
"""
signed_int : SIGN UINT
"""
p[0] = p[1] * p[2]
def p_signed_float(p):
"""
signed_float : sign UINT
| sign UFLOAT
"""
p[0] = p[1] * p[2]
def p_function_name(p):
"""
function_name : FUNCNAME
"""
p[0] = p[1]
def p_function(p):
"""
function : function_name OPEN_PAREN main CLOSE_PAREN
"""
if p[1] == "sqrt":
p[0] = p[3] ** 0.5
return
elif p[1] in ("mag", "dB", "dex"):
function_unit = cls._parse_unit(p[1])
# In Generic, this is callable, but that does not have to
# be the case in subclasses (e.g., in VOUnit it is not).
if callable(function_unit):
p[0] = function_unit(p[3])
return
raise ValueError(f"'{p[1]}' is not a recognized function")
def p_error(p):
raise ValueError()
return parsing.yacc(tabmodule="generic_parsetab", package="astropy/units")
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
registry = core.get_current_unit_registry()
if t.value in registry.aliases:
return registry.aliases[t.value]
raise ValueError(f"At col {t.lexpos}, {str(e)}")
@classmethod
def _parse_unit(cls, s, detailed_exception=True):
registry = core.get_current_unit_registry().registry
if s in cls._unit_symbols:
s = cls._unit_symbols[s]
elif not s.isascii():
if s[0] == "\N{MICRO SIGN}":
s = "u" + s[1:]
if s[-1] in cls._prefixable_unit_symbols:
s = s[:-1] + cls._prefixable_unit_symbols[s[-1]]
elif len(s) > 1 and s[-1] in cls._unit_suffix_symbols:
s = s[:-1] + cls._unit_suffix_symbols[s[-1]]
elif s.endswith("R\N{INFINITY}"):
s = s[:-2] + "Ry"
if s in registry:
return registry[s]
if detailed_exception:
raise ValueError(f"{s} is not a valid unit. {did_you_mean(s, registry)}")
else:
raise ValueError()
_unit_symbols = {
"%": "percent",
"\N{PRIME}": "arcmin",
"\N{DOUBLE PRIME}": "arcsec",
"\N{MODIFIER LETTER SMALL H}": "hourangle",
"e\N{SUPERSCRIPT MINUS}": "electron",
}
_prefixable_unit_symbols = {
"\N{GREEK CAPITAL LETTER OMEGA}": "Ohm",
"\N{LATIN CAPITAL LETTER A WITH RING ABOVE}": "Angstrom",
"\N{SCRIPT SMALL L}": "l",
}
_unit_suffix_symbols = {
"\N{CIRCLED DOT OPERATOR}": "sun",
"\N{SUN}": "sun",
"\N{CIRCLED PLUS}": "earth",
"\N{EARTH}": "earth",
"\N{JUPITER}": "jupiter",
"\N{LATIN SUBSCRIPT SMALL LETTER E}": "_e",
"\N{LATIN SUBSCRIPT SMALL LETTER P}": "_p",
}
_translations = str.maketrans(
{
"\N{GREEK SMALL LETTER MU}": "\N{MICRO SIGN}",
"\N{MINUS SIGN}": "-",
}
)
"""Character translations that should be applied before parsing a string.
Note that this does explicitly *not* generally translate MICRO SIGN to u,
since then a string like 'µ' would be interpreted as unit mass.
"""
_superscripts = (
"\N{SUPERSCRIPT MINUS}"
"\N{SUPERSCRIPT PLUS SIGN}"
"\N{SUPERSCRIPT ZERO}"
"\N{SUPERSCRIPT ONE}"
"\N{SUPERSCRIPT TWO}"
"\N{SUPERSCRIPT THREE}"
"\N{SUPERSCRIPT FOUR}"
"\N{SUPERSCRIPT FIVE}"
"\N{SUPERSCRIPT SIX}"
"\N{SUPERSCRIPT SEVEN}"
"\N{SUPERSCRIPT EIGHT}"
"\N{SUPERSCRIPT NINE}"
)
_superscript_translations = str.maketrans(_superscripts, "-+0123456789")
_regex_superscript = re.compile(f"[{_superscripts}]?[{_superscripts[2:]}]+")
_regex_deg = re.compile("°([CF])?")
@classmethod
def _convert_superscript(cls, m):
return f"({m.group().translate(cls._superscript_translations)})"
@classmethod
def _convert_deg(cls, m):
if len(m.string) == 1:
return "deg"
return m.string.replace("°", "deg_")
@classmethod
def parse(cls, s, debug=False):
if not isinstance(s, str):
s = s.decode("ascii")
elif not s.isascii():
# common normalization of unicode strings to avoid
# having to deal with multiple representations of
# the same character. This normalizes to "composed" form
# and will e.g. convert OHM SIGN to GREEK CAPITAL LETTER OMEGA
s = unicodedata.normalize("NFC", s)
# Translate some basic unicode items that we'd like to support on
# input but are not standard.
s = s.translate(cls._translations)
# TODO: might the below be better done in the parser/lexer?
# Translate superscripts to parenthesized numbers; this ensures
# that mixes of superscripts and regular numbers fail.
s = cls._regex_superscript.sub(cls._convert_superscript, s)
# Translate possible degrees.
s = cls._regex_deg.sub(cls._convert_deg, s)
result = cls._do_parse(s, debug=debug)
# Check for excess solidi, but exclude fractional exponents (accepted)
n_slashes = s.count("/")
if n_slashes > 1 and (n_slashes - len(re.findall(r"\(\d+/\d+\)", s))) > 1:
warnings.warn(
"'{}' contains multiple slashes, which is "
"discouraged by the FITS standard".format(s),
core.UnitsWarning,
)
return result
@classmethod
def _do_parse(cls, s, debug=False):
try:
# This is a short circuit for the case where the string
# is just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError as e:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise
else:
raise ValueError(f"Syntax error parsing unit '{s}'")
@classmethod
def _get_unit_name(cls, unit):
return unit.get_format_name("generic")
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if "/" in power or "." in power:
out.append(f"{cls._get_unit_name(base)}({power})")
else:
out.append(f"{cls._get_unit_name(base)}{power}")
return " ".join(out)
@classmethod
def to_string(cls, unit):
return _to_string(cls, unit)
class Unscaled(Generic):
"""
A format that doesn't display the scale part of the unit, other
than that, it is identical to the `Generic` format.
This is used in some error messages where the scale is irrelevant.
"""
_show_scale = False
|
82e1e992999f5ee771671850371f293fad57f3793879e55a4211354a89f3df62 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# The idea for this module (but no code) was borrowed from the
# quantities (http://pythonhosted.org/quantities/) package.
"""Helper functions for Quantity.
In particular, this implements the logic that determines scaling and result
units for a given ufunc, given input units.
"""
from fractions import Fraction
import numpy as np
from astropy.units.core import (
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
unit_scale_converter,
)
from . import UFUNC_HELPERS, UNSUPPORTED_UFUNCS
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
"""Like Unit._get_converter, except returns None if no scaling is needed,
i.e., if the inferred scale is unity."""
converter = from_unit._get_converter(to_unit)
return None if converter is unit_scale_converter else converter
def get_converters_and_unit(f, unit1, unit2):
converters = [None, None]
# By default, we try adjusting unit2 to unit1, so that the result will
# be unit1 as well. But if there is no second unit, we have to try
# adjusting unit1 (to dimensionless, see below).
if unit2 is None:
if unit1 is None:
# No units for any input -- e.g., np.add(a1, a2, out=q)
return converters, dimensionless_unscaled
changeable = 0
# swap units.
unit2 = unit1
unit1 = None
elif unit2 is unit1:
# ensure identical units is fast ("==" is slow, so avoid that).
return converters, unit1
else:
changeable = 1
# Try to get a converter from unit2 to unit1.
if unit1 is None:
try:
converters[changeable] = get_converter(unit2, dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
converters[1 - changeable] = False
return converters, unit2
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(unit2, unit1)
except UnitsError:
raise UnitConversionError(
f"Can only apply '{f.__name__}' function to quantities "
"with compatible dimensions"
)
return converters, unit1
# SINGLE ARGUMENT UFUNC HELPERS
#
# The functions below take a single argument, which is the quantity upon which
# the ufunc is being used. The output of the helper function should be two
# values: a list with a single converter to be used to scale the input before
# it is being passed to the ufunc (or None if no conversion is needed), and
# the unit the output will be in.
def helper_onearg_test(f, unit):
return ([None], None)
def helper_invariant(f, unit):
return ([None], _d(unit))
def helper_square(f, unit):
return ([None], unit**2 if unit is not None else dimensionless_unscaled)
def helper_reciprocal(f, unit):
return ([None], unit**-1 if unit is not None else dimensionless_unscaled)
one_half = 0.5 # faster than Fraction(1, 2)
one_third = Fraction(1, 3)
def helper_sqrt(f, unit):
return ([None], unit**one_half if unit is not None else dimensionless_unscaled)
def helper_cbrt(f, unit):
return ([None], (unit**one_third if unit is not None else dimensionless_unscaled))
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return (
[get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled),
)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to dimensionless quantities"
)
def helper__ones_like(f, unit):
return [None], dimensionless_unscaled
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)], dimensionless_unscaled)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to dimensionless quantities"
)
def helper_dimensionless_to_radian(f, unit):
from astropy.units.si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to dimensionless quantities"
)
def helper_degree_to_radian(f, unit):
from astropy.units.si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_radian_to_degree(f, unit):
from astropy.units.si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_radian_to_dimensionless(f, unit):
from astropy.units.si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to unscaled dimensionless"
" quantities"
)
return [None], (None, None)
# TWO ARGUMENT UFUNC HELPERS
#
# The functions below take a two arguments. The output of the helper function
# should be two values: a tuple of two converters to be used to scale the
# inputs before being passed to the ufunc (None if no conversion is needed),
# and the unit the output will be in.
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
def helper_power(f, unit1, unit2):
# TODO: find a better way to do this, currently need to signal that one
# still needs to raise power of unit1 in main code
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a dimensionless quantity")
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity as second argument.")
else:
return [None, None], _d(unit1)
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (
get_converter(unit2, dimensionless_unscaled) if unit2 is not None else None
)
except UnitsError:
raise UnitTypeError(
"Can only apply 'heaviside' function with a dimensionless second argument."
)
return ([None, converter2], dimensionless_unscaled)
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (
get_converter(unit1, dimensionless_unscaled) if unit1 is not None else None
)
converter2 = (
get_converter(unit2, dimensionless_unscaled) if unit2 is not None else None
)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to dimensionless quantities"
)
return ([converter1, converter2], dimensionless_unscaled)
# This used to be a separate function that just called get_converters_and_unit.
# Using it directly saves a few us; keeping the clearer name.
helper_twoarg_invariant = get_converters_and_unit
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
def helper_twoarg_invtrig(f, unit1, unit2):
from astropy.units.si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
def helper_clip(f, unit1, unit2, unit3):
# Treat the array being clipped as primary.
converters = [None]
if unit1 is None:
result_unit = dimensionless_unscaled
try:
converters += [
(None if unit is None else get_converter(unit, dimensionless_unscaled))
for unit in (unit2, unit3)
]
except UnitsError:
raise UnitConversionError(
f"Can only apply '{f.__name__}' function to quantities with "
"compatible dimensions"
)
else:
result_unit = unit1
for unit in unit2, unit3:
try:
converter = get_converter(_d(unit), result_unit)
except UnitsError:
if unit is None:
# special case: OK if unitless number is zero, inf, nan
converters.append(False)
else:
raise UnitConversionError(
f"Can only apply '{f.__name__}' function to quantities with "
"compatible dimensions"
)
else:
converters.append(converter)
return converters, result_unit
# list of ufuncs:
# https://numpy.org/doc/stable/reference/ufuncs.html#available-ufuncs
UNSUPPORTED_UFUNCS |= {
np.bitwise_and,
np.bitwise_or,
np.bitwise_xor,
np.invert,
np.left_shift,
np.right_shift,
np.logical_and,
np.logical_or,
np.logical_xor,
np.logical_not,
np.isnat,
np.gcd,
np.lcm,
}
# SINGLE ARGUMENT UFUNCS
# ufuncs that do not care about the unit and do not return a Quantity
# (but rather a boolean, or -1, 0, or +1 for np.sign).
onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)
for ufunc in onearg_test_ufuncs:
UFUNC_HELPERS[ufunc] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
invariant_ufuncs = (
np.absolute,
np.fabs,
np.conj,
np.conjugate,
np.negative,
np.spacing,
np.rint,
np.floor,
np.ceil,
np.trunc,
np.positive,
)
for ufunc in invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_invariant
# ufuncs that require dimensionless input and and give dimensionless output
dimensionless_to_dimensionless_ufuncs = (
np.exp,
np.expm1,
np.exp2,
np.log,
np.log10,
np.log2,
np.log1p,
)
# Default numpy does not ship an "erf" ufunc, but some versions hacked by
# intel do. This is bad, since it means code written for that numpy will
# not run on non-hacked numpy. But still, we might as well support it.
if isinstance(getattr(np.core.umath, "erf", None), np.ufunc):
dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,)
for ufunc in dimensionless_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
dimensionless_to_radian_ufuncs = (
np.arccos,
np.arcsin,
np.arctan,
np.arccosh,
np.arcsinh,
np.arctanh,
)
for ufunc in dimensionless_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
degree_to_radian_ufuncs = (np.radians, np.deg2rad)
for ufunc in degree_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
radian_to_degree_ufuncs = (np.degrees, np.rad2deg)
for ufunc in radian_to_degree_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh, np.tanh)
for ufunc in radian_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = helper_sqrt
UFUNC_HELPERS[np.square] = helper_square
UFUNC_HELPERS[np.reciprocal] = helper_reciprocal
UFUNC_HELPERS[np.cbrt] = helper_cbrt
UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like
UFUNC_HELPERS[np.modf] = helper_modf
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
# two argument ufuncs that require dimensionless input and and give
# dimensionless output
two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)
for ufunc in two_arg_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# two argument ufuncs that return a value with the same unit as the input
twoarg_invariant_ufuncs = (
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.fmin,
np.fmax,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
)
for ufunc in twoarg_invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invariant
# two argument ufuncs that need compatible inputs and return a boolean
twoarg_comparison_ufuncs = (
np.greater,
np.greater_equal,
np.less,
np.less_equal,
np.not_equal,
np.equal,
)
for ufunc in twoarg_comparison_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_comparison
# two argument ufuncs that do inverse trigonometry
twoarg_invtrig_ufuncs = (np.arctan2,)
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, "_arg", None), np.ufunc):
twoarg_invtrig_ufuncs += (np.core.umath._arg,)
for ufunc in twoarg_invtrig_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig
# ufuncs handled as special cases
UFUNC_HELPERS[np.multiply] = helper_multiplication
if isinstance(getattr(np, "matmul", None), np.ufunc):
UFUNC_HELPERS[np.matmul] = helper_multiplication
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
UFUNC_HELPERS[np.power] = helper_power
UFUNC_HELPERS[np.ldexp] = helper_ldexp
UFUNC_HELPERS[np.copysign] = helper_copysign
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
UFUNC_HELPERS[np.heaviside] = helper_heaviside
UFUNC_HELPERS[np.float_power] = helper_power
UFUNC_HELPERS[np.divmod] = helper_divmod
# Check for clip ufunc; note that np.clip is a wrapper function, not the ufunc.
if isinstance(getattr(np.core.umath, "clip", None), np.ufunc):
UFUNC_HELPERS[np.core.umath.clip] = helper_clip
del ufunc
|
6acf2644f1b33cf20ff93962ebc59d4013687cc179d9621d6aba1ec398d04b5f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Converters for Quantity."""
import threading
import numpy as np
from astropy.units.core import (
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
)
__all__ = [
"can_have_arbitrary_unit",
"converters_and_unit",
"check_output",
"UFUNC_HELPERS",
"UNSUPPORTED_UFUNCS",
]
class UfuncHelpers(dict):
"""Registry of unit conversion functions to help ufunc evaluation.
Based on dict for quick access, but with a missing method to load
helpers for additional modules such as scipy.special and erfa.
Such modules should be registered using ``register_module``.
"""
def __init__(self, *args, **kwargs):
self.modules = {}
self.UNSUPPORTED = set() # Upper-case for backwards compatibility
self._lock = threading.RLock()
super().__init__(*args, **kwargs)
def register_module(self, module, names, importer):
"""Register (but do not import) a set of ufunc helpers.
Parameters
----------
module : str
Name of the module with the ufuncs (e.g., 'scipy.special').
names : iterable of str
Names of the module ufuncs for which helpers are available.
importer : callable
Function that imports the ufuncs and returns a dict of helpers
keyed by those ufuncs. If the value is `None`, the ufunc is
explicitly *not* supported.
"""
with self._lock:
self.modules[module] = {"names": names, "importer": importer}
def import_module(self, module):
"""Import the helpers from the given module using its helper function.
Parameters
----------
module : str
Name of the module. Has to have been registered beforehand.
"""
with self._lock:
module_info = self.modules.pop(module)
self.update(module_info["importer"]())
def __missing__(self, ufunc):
"""Called if a ufunc is not found.
Check if the ufunc is in any of the available modules, and, if so,
import the helpers for that module.
"""
with self._lock:
# Check if it was loaded while we waited for the lock
if ufunc in self:
return self[ufunc]
if ufunc in self.UNSUPPORTED:
raise TypeError(f"Cannot use ufunc '{ufunc.__name__}' with quantities")
for module, module_info in list(self.modules.items()):
if ufunc.__name__ in module_info["names"]:
# A ufunc with the same name is supported by this module.
# Of course, this doesn't necessarily mean it is the
# right module. So, we try let the importer do its work.
# If it fails (e.g., for `scipy.special`), then that's
# fine, just raise the TypeError. If it succeeds, but
# the ufunc is not found, that is also fine: we will
# enter __missing__ again and either find another
# module or get the TypeError there.
try:
self.import_module(module)
except ImportError: # pragma: no cover
pass
else:
return self[ufunc]
raise TypeError(
f"unknown ufunc {ufunc.__name__}. If you believe this ufunc "
"should be supported, please raise an issue on "
"https://github.com/astropy/astropy"
)
def __setitem__(self, key, value):
# Implementation note: in principle, we could just let `None`
# mean that something is not implemented, but this means an
# extra if clause for the output, slowing down the common
# path where a ufunc is supported.
with self._lock:
if value is None:
self.UNSUPPORTED |= {key}
self.pop(key, None)
else:
super().__setitem__(key, value)
self.UNSUPPORTED -= {key}
UFUNC_HELPERS = UfuncHelpers()
UNSUPPORTED_UFUNCS = UFUNC_HELPERS.UNSUPPORTED
def can_have_arbitrary_unit(value):
"""Test whether the items in value can have arbitrary units
Numbers whose value does not change upon a unit change, i.e.,
zero, infinity, or not-a-number
Parameters
----------
value : number or array
Returns
-------
bool
`True` if each member is either zero or not finite, `False` otherwise
"""
return np.all(np.logical_or(np.equal(value, 0.0), ~np.isfinite(value)))
def converters_and_unit(function, method, *args):
"""Determine the required converters and the unit of the ufunc result.
Converters are functions required to convert to a ufunc's expected unit,
e.g., radian for np.sin; or to ensure units of two inputs are consistent,
e.g., for np.add. In these examples, the unit of the result would be
dimensionless_unscaled for np.sin, and the same consistent unit for np.add.
Parameters
----------
function : `~numpy.ufunc`
Numpy universal function
method : str
Method with which the function is evaluated, e.g.,
'__call__', 'reduce', etc.
*args : `~astropy.units.Quantity` or ndarray subclass
Input arguments to the function
Raises
------
TypeError : when the specified function cannot be used with Quantities
(e.g., np.logical_or), or when the routine does not know how to handle
the specified function (in which case an issue should be raised on
https://github.com/astropy/astropy).
UnitTypeError : when the conversion to the required (or consistent) units
is not possible.
"""
# Check whether we support this ufunc, by getting the helper function
# (defined in helpers) which returns a list of function(s) that convert the
# input(s) to the unit required for the ufunc, as well as the unit the
# result will have (a tuple of units if there are multiple outputs).
ufunc_helper = UFUNC_HELPERS[function]
if method == "__call__" or (method == "outer" and function.nin == 2):
# Find out the units of the arguments passed to the ufunc; usually,
# at least one is a quantity, but for two-argument ufuncs, the second
# could also be a Numpy array, etc. These are given unit=None.
units = [getattr(arg, "unit", None) for arg in args]
# Determine possible conversion functions, and the result unit.
converters, result_unit = ufunc_helper(function, *units)
if any(converter is False for converter in converters):
# for multi-argument ufuncs with a quantity and a non-quantity,
# the quantity normally needs to be dimensionless, *except*
# if the non-quantity can have arbitrary unit, i.e., when it
# is all zero, infinity or NaN. In that case, the non-quantity
# can just have the unit of the quantity
# (this allows, e.g., `q > 0.` independent of unit)
try:
# Don't fold this loop in the test above: this rare case
# should not make the common case slower.
for i, converter in enumerate(converters):
if converter is not False:
continue
if can_have_arbitrary_unit(args[i]):
converters[i] = None
else:
raise UnitConversionError(
f"Can only apply '{function.__name__}' function to "
"dimensionless quantities when other argument is not "
"a quantity (unless the latter is all zero/infinity/nan)."
)
except TypeError:
# _can_have_arbitrary_unit failed: arg could not be compared
# with zero or checked to be finite. Then, ufunc will fail too.
raise TypeError(
"Unsupported operand type(s) for ufunc {}: '{}'".format(
function.__name__,
",".join([arg.__class__.__name__ for arg in args]),
)
)
# In the case of np.power and np.float_power, the unit itself needs to
# be modified by an amount that depends on one of the input values,
# so we need to treat this as a special case.
# TODO: find a better way to deal with this.
if result_unit is False:
if units[0] is None or units[0] == dimensionless_unscaled:
result_unit = dimensionless_unscaled
else:
if units[1] is None:
p = args[1]
else:
p = args[1].to(dimensionless_unscaled).value
try:
result_unit = units[0] ** p
except ValueError as exc:
# Changing the unit does not work for, e.g., array-shaped
# power, but this is OK if we're (scaled) dimensionless.
try:
converters[0] = units[0]._get_converter(dimensionless_unscaled)
except UnitConversionError:
raise exc
else:
result_unit = dimensionless_unscaled
else: # methods for which the unit should stay the same
nin = function.nin
unit = getattr(args[0], "unit", None)
if method == "at" and nin <= 2:
if nin == 1:
units = [unit]
else:
units = [unit, getattr(args[2], "unit", None)]
converters, result_unit = ufunc_helper(function, *units)
# ensure there is no 'converter' for indices (2nd argument)
converters.insert(1, None)
elif method in {"reduce", "accumulate", "reduceat"} and nin == 2:
converters, result_unit = ufunc_helper(function, unit, unit)
converters = converters[:1]
if method == "reduceat":
# add 'scale' for indices (2nd argument)
converters += [None]
else:
if method in {"reduce", "accumulate", "reduceat", "outer"} and nin != 2:
raise ValueError(f"{method} only supported for binary functions")
raise TypeError(
f"Unexpected ufunc method {method}. If this should work, please "
"raise an issue on https://github.com/astropy/astropy"
)
# for all but __call__ method, scaling is not allowed
if unit is not None and result_unit is None:
raise TypeError(
f"Cannot use '{method}' method on ufunc {function.__name__} with a "
"Quantity instance as the result is not a Quantity."
)
if converters[0] is not None or (
unit is not None
and unit is not result_unit
and (not result_unit.is_equivalent(unit) or result_unit.to(unit) != 1.0)
):
# NOTE: this cannot be the more logical UnitTypeError, since
# then things like np.cumprod will not longer fail (they check
# for TypeError).
raise UnitsError(
f"Cannot use '{method}' method on ufunc {function.__name__} with a "
"Quantity instance as it would change the unit."
)
return converters, result_unit
def check_output(output, unit, inputs, function=None):
"""Check that function output can be stored in the output array given.
Parameters
----------
output : array or `~astropy.units.Quantity` or tuple
Array that should hold the function output (or tuple of such arrays).
unit : `~astropy.units.Unit` or None, or tuple
Unit that the output will have, or `None` for pure numbers (should be
tuple of same if output is a tuple of outputs).
inputs : tuple
Any input arguments. These should be castable to the output.
function : callable
The function that will be producing the output. If given, used to
give a more informative error message.
Returns
-------
arrays : ndarray view or tuple thereof
The view(s) is of ``output``.
Raises
------
UnitTypeError : If ``unit`` is inconsistent with the class of ``output``
TypeError : If the ``inputs`` cannot be cast safely to ``output``.
"""
if isinstance(output, tuple):
return tuple(
check_output(output_, unit_, inputs, function)
for output_, unit_ in zip(output, unit)
)
# ``None`` indicates no actual array is needed. This can happen, e.g.,
# with np.modf(a, out=(None, b)).
if output is None:
return None
if hasattr(output, "__quantity_subclass__"):
# Check that we're not trying to store a plain Numpy array or a
# Quantity with an inconsistent unit (e.g., not angular for Angle).
if unit is None:
raise TypeError(
"Cannot store non-quantity output{} in {} instance".format(
(
f" from {function.__name__} function"
if function is not None
else ""
),
type(output),
)
)
q_cls, subok = output.__quantity_subclass__(unit)
if not (subok or q_cls is type(output)):
raise UnitTypeError(
"Cannot store output with unit '{}'{} "
"in {} instance. Use {} instance instead.".format(
unit,
(
f" from {function.__name__} function"
if function is not None
else ""
),
type(output),
q_cls,
)
)
# check we can handle the dtype (e.g., that we are not int
# when float is required). Note that we only do this for Quantity
# output; for array output, we defer to numpy's default handling.
# Also, any structured dtype are ignored (likely erfa ufuncs).
# TODO: make more logical; is this necessary at all?
if inputs and not output.dtype.names:
result_type = np.result_type(*inputs)
if not (
result_type.names
or np.can_cast(result_type, output.dtype, casting="same_kind")
):
raise TypeError(
"Arguments cannot be cast safely to inplace "
f"output with dtype={output.dtype}"
)
# Turn into ndarray, so we do not loop into array_wrap/array_ufunc
# if the output is used to store results of a function.
return output.view(np.ndarray)
else:
# output is not a Quantity, so cannot obtain a unit.
if not (unit is None or unit is dimensionless_unscaled):
raise UnitTypeError(
"Cannot store quantity with dimension "
"{}in a non-Quantity instance.".format(
f"resulting from {function.__name__} function "
if function is not None
else ""
)
)
return output
|
758a57f5f75eeeee5f958b49a74d567f44955951f8f27bd2ebfd6187135e115a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Quantity helpers for the ERFA ufuncs."""
# Tests for these are in coordinates, not in units.
from erfa import dt_eraASTROM, dt_eraLDBODY, dt_pv
from erfa import ufunc as erfa_ufunc
from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled
from astropy.units.structured import StructuredUnit
from . import UFUNC_HELPERS
from .helpers import (
_d,
get_converter,
helper_invariant,
helper_multiplication,
helper_twoarg_invariant,
)
erfa_ufuncs = (
"s2c", "s2p", "c2s", "p2s", "pm", "pdp", "pxp", "rxp", "cpv", "p2pv", "pv2p",
"pv2s", "pvdpv", "pvm", "pvmpv", "pvppv", "pvstar", "pvtob", "pvu", "pvup",
"pvxpv", "rxpv", "s2pv", "s2xpv", "starpv", "sxpv", "trxpv", "gd2gc", "gc2gd",
"ldn", "aper", "apio", "atciq", "atciqn", "atciqz", "aticq", "atioq", "atoiq",
) # fmt: skip
def has_matching_structure(unit, dtype):
dtype_fields = dtype.fields
if dtype_fields:
return (
isinstance(unit, StructuredUnit)
and len(unit) == len(dtype_fields)
and all(
has_matching_structure(u, df_v[0])
for (u, df_v) in zip(unit.values(), dtype_fields.values())
)
)
else:
return not isinstance(unit, StructuredUnit)
def check_structured_unit(unit, dtype):
if not has_matching_structure(unit, dtype):
msg = {dt_pv: "pv", dt_eraLDBODY: "ldbody", dt_eraASTROM: "astrom"}.get(
dtype, "function"
)
raise UnitTypeError(f"{msg} input needs unit matching dtype={dtype}.")
def helper_s2c(f, unit1, unit2):
from astropy.units.si import radian
try:
return [
get_converter(unit1, radian),
get_converter(unit2, radian),
], dimensionless_unscaled
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_s2p(f, unit1, unit2, unit3):
from astropy.units.si import radian
try:
return [get_converter(unit1, radian), get_converter(unit2, radian), None], unit3
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_c2s(f, unit1):
from astropy.units.si import radian
return [None], (radian, radian)
def helper_p2s(f, unit1):
from astropy.units.si import radian
return [None], (radian, radian, unit1)
def helper_gc2gd(f, nounit, unit1):
from astropy.units.si import m, radian
if nounit is not None:
raise UnitTypeError("ellipsoid cannot be a quantity.")
try:
return [None, get_converter(unit1, m)], (radian, radian, m, None)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with length units"
)
def helper_gd2gc(f, nounit, unit1, unit2, unit3):
from astropy.units.si import m, radian
if nounit is not None:
raise UnitTypeError("ellipsoid cannot be a quantity.")
try:
return [
None,
get_converter(unit1, radian),
get_converter(unit2, radian),
get_converter(unit3, m),
], (m, None)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to lon, lat "
"with angle and height with length units"
)
def helper_p2pv(f, unit1):
from astropy.units.si import s
if isinstance(unit1, StructuredUnit):
raise UnitTypeError("p vector unit cannot be a structured unit.")
return [None], StructuredUnit((unit1, unit1 / s))
def helper_pv2p(f, unit1):
check_structured_unit(unit1, dt_pv)
return [None], unit1[0]
def helper_pv2s(f, unit_pv):
from astropy.units.si import radian
check_structured_unit(unit_pv, dt_pv)
ang_unit = radian * unit_pv[1] / unit_pv[0]
return [None], (radian, radian, unit_pv[0], ang_unit, ang_unit, unit_pv[1])
def helper_s2pv(f, unit_theta, unit_phi, unit_r, unit_td, unit_pd, unit_rd):
from astropy.units.si import radian
time_unit = unit_r / unit_rd
return [
get_converter(unit_theta, radian),
get_converter(unit_phi, radian),
None,
get_converter(unit_td, radian / time_unit),
get_converter(unit_pd, radian / time_unit),
None,
], StructuredUnit((unit_r, unit_rd))
def helper_pv_multiplication(f, unit1, unit2):
check_structured_unit(unit1, dt_pv)
check_structured_unit(unit2, dt_pv)
result_unit = StructuredUnit((unit1[0] * unit2[0], unit1[1] * unit2[0]))
converter = get_converter(
unit2, StructuredUnit((unit2[0], unit1[1] * unit2[0] / unit1[0]))
)
return [None, converter], result_unit
def helper_pvm(f, unit1):
check_structured_unit(unit1, dt_pv)
return [None], (unit1[0], unit1[1])
def helper_pvstar(f, unit1):
from astropy.units.astrophys import AU
from astropy.units.si import arcsec, day, km, radian, s, year
return [get_converter(unit1, StructuredUnit((AU, AU / day)))], (
radian,
radian,
radian / year,
radian / year,
arcsec,
km / s,
None,
)
def helper_starpv(f, unit_ra, unit_dec, unit_pmr, unit_pmd, unit_px, unit_rv):
from astropy.units.astrophys import AU
from astropy.units.si import arcsec, day, km, radian, s, year
return [
get_converter(unit_ra, radian),
get_converter(unit_dec, radian),
get_converter(unit_pmr, radian / year),
get_converter(unit_pmd, radian / year),
get_converter(unit_px, arcsec),
get_converter(unit_rv, km / s),
], (StructuredUnit((AU, AU / day)), None)
def helper_pvtob(
f, unit_elong, unit_phi, unit_hm, unit_xp, unit_yp, unit_sp, unit_theta
):
from astropy.units.si import m, radian, s
return [
get_converter(unit_elong, radian),
get_converter(unit_phi, radian),
get_converter(unit_hm, m),
get_converter(unit_xp, radian),
get_converter(unit_yp, radian),
get_converter(unit_sp, radian),
get_converter(unit_theta, radian),
], StructuredUnit((m, m / s))
def helper_pvu(f, unit_t, unit_pv):
check_structured_unit(unit_pv, dt_pv)
return [get_converter(unit_t, unit_pv[0] / unit_pv[1]), None], unit_pv
def helper_pvup(f, unit_t, unit_pv):
check_structured_unit(unit_pv, dt_pv)
return [get_converter(unit_t, unit_pv[0] / unit_pv[1]), None], unit_pv[0]
def helper_s2xpv(f, unit1, unit2, unit_pv):
check_structured_unit(unit_pv, dt_pv)
return [None, None, None], StructuredUnit(
(_d(unit1) * unit_pv[0], _d(unit2) * unit_pv[1])
)
def ldbody_unit():
from astropy.units.astrophys import AU, Msun
from astropy.units.si import day, radian
return StructuredUnit((Msun, radian, (AU, AU / day)), erfa_ufunc.dt_eraLDBODY)
def astrom_unit():
from astropy.units.astrophys import AU
from astropy.units.si import rad, year
one = rel2c = dimensionless_unscaled
return StructuredUnit(
(
year,
AU,
one,
AU,
rel2c,
one,
one,
rad,
rad,
rad,
rad,
one,
one,
rel2c,
rad,
rad,
rad,
),
erfa_ufunc.dt_eraASTROM,
)
def helper_ldn(f, unit_b, unit_ob, unit_sc):
from astropy.units.astrophys import AU
return [
get_converter(unit_b, ldbody_unit()),
get_converter(unit_ob, AU),
get_converter(_d(unit_sc), dimensionless_unscaled),
], dimensionless_unscaled
def helper_aper(f, unit_theta, unit_astrom):
check_structured_unit(unit_astrom, dt_eraASTROM)
unit_along = unit_astrom[7] # along
if unit_astrom[14] is unit_along: # eral
result_unit = unit_astrom
else:
result_units = tuple(
(unit_along if i == 14 else v) for i, v in enumerate(unit_astrom.values())
)
result_unit = unit_astrom.__class__(result_units, names=unit_astrom)
return [get_converter(unit_theta, unit_along), None], result_unit
def helper_apio(
f,
unit_sp,
unit_theta,
unit_elong,
unit_phi,
unit_hm,
unit_xp,
unit_yp,
unit_refa,
unit_refb,
):
from astropy.units.si import m, radian
return [
get_converter(unit_sp, radian),
get_converter(unit_theta, radian),
get_converter(unit_elong, radian),
get_converter(unit_phi, radian),
get_converter(unit_hm, m),
get_converter(unit_xp, radian),
get_converter(unit_xp, radian),
get_converter(unit_xp, radian),
get_converter(unit_xp, radian),
], astrom_unit()
def helper_atciq(f, unit_rc, unit_dc, unit_pr, unit_pd, unit_px, unit_rv, unit_astrom):
from astropy.units.si import arcsec, km, radian, s, year
return [
get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_pr, radian / year),
get_converter(unit_pd, radian / year),
get_converter(unit_px, arcsec),
get_converter(unit_rv, km / s),
get_converter(unit_astrom, astrom_unit()),
], (radian, radian)
def helper_atciqn(
f, unit_rc, unit_dc, unit_pr, unit_pd, unit_px, unit_rv, unit_astrom, unit_b
):
from astropy.units.si import arcsec, km, radian, s, year
return [
get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_pr, radian / year),
get_converter(unit_pd, radian / year),
get_converter(unit_px, arcsec),
get_converter(unit_rv, km / s),
get_converter(unit_astrom, astrom_unit()),
get_converter(unit_b, ldbody_unit()),
], (radian, radian)
def helper_atciqz_aticq(f, unit_rc, unit_dc, unit_astrom):
from astropy.units.si import radian
return [
get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_astrom, astrom_unit()),
], (radian, radian)
def helper_aticqn(f, unit_rc, unit_dc, unit_astrom, unit_b):
from astropy.units.si import radian
return [
get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_astrom, astrom_unit()),
get_converter(unit_b, ldbody_unit()),
], (radian, radian)
def helper_atioq(f, unit_rc, unit_dc, unit_astrom):
from astropy.units.si import radian
return [
get_converter(unit_rc, radian),
get_converter(unit_dc, radian),
get_converter(unit_astrom, astrom_unit()),
], (radian,) * 5
def helper_atoiq(f, unit_type, unit_ri, unit_di, unit_astrom):
from astropy.units.si import radian
if unit_type is not None:
raise UnitTypeError("argument 'type' should not have a unit")
return [
None,
get_converter(unit_ri, radian),
get_converter(unit_di, radian),
get_converter(unit_astrom, astrom_unit()),
], (radian, radian)
def get_erfa_helpers():
ERFA_HELPERS = {}
ERFA_HELPERS[erfa_ufunc.s2c] = helper_s2c
ERFA_HELPERS[erfa_ufunc.s2p] = helper_s2p
ERFA_HELPERS[erfa_ufunc.c2s] = helper_c2s
ERFA_HELPERS[erfa_ufunc.p2s] = helper_p2s
ERFA_HELPERS[erfa_ufunc.pm] = helper_invariant
ERFA_HELPERS[erfa_ufunc.cpv] = helper_invariant
ERFA_HELPERS[erfa_ufunc.p2pv] = helper_p2pv
ERFA_HELPERS[erfa_ufunc.pv2p] = helper_pv2p
ERFA_HELPERS[erfa_ufunc.pv2s] = helper_pv2s
ERFA_HELPERS[erfa_ufunc.pvdpv] = helper_pv_multiplication
ERFA_HELPERS[erfa_ufunc.pvxpv] = helper_pv_multiplication
ERFA_HELPERS[erfa_ufunc.pvm] = helper_pvm
ERFA_HELPERS[erfa_ufunc.pvmpv] = helper_twoarg_invariant
ERFA_HELPERS[erfa_ufunc.pvppv] = helper_twoarg_invariant
ERFA_HELPERS[erfa_ufunc.pvstar] = helper_pvstar
ERFA_HELPERS[erfa_ufunc.pvtob] = helper_pvtob
ERFA_HELPERS[erfa_ufunc.pvu] = helper_pvu
ERFA_HELPERS[erfa_ufunc.pvup] = helper_pvup
ERFA_HELPERS[erfa_ufunc.pdp] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.pxp] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.rxp] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.rxpv] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.s2pv] = helper_s2pv
ERFA_HELPERS[erfa_ufunc.s2xpv] = helper_s2xpv
ERFA_HELPERS[erfa_ufunc.starpv] = helper_starpv
ERFA_HELPERS[erfa_ufunc.sxpv] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.trxpv] = helper_multiplication
ERFA_HELPERS[erfa_ufunc.gc2gd] = helper_gc2gd
ERFA_HELPERS[erfa_ufunc.gd2gc] = helper_gd2gc
ERFA_HELPERS[erfa_ufunc.ldn] = helper_ldn
ERFA_HELPERS[erfa_ufunc.aper] = helper_aper
ERFA_HELPERS[erfa_ufunc.apio] = helper_apio
ERFA_HELPERS[erfa_ufunc.atciq] = helper_atciq
ERFA_HELPERS[erfa_ufunc.atciqn] = helper_atciqn
ERFA_HELPERS[erfa_ufunc.atciqz] = helper_atciqz_aticq
ERFA_HELPERS[erfa_ufunc.aticq] = helper_atciqz_aticq
ERFA_HELPERS[erfa_ufunc.aticqn] = helper_aticqn
ERFA_HELPERS[erfa_ufunc.atioq] = helper_atioq
ERFA_HELPERS[erfa_ufunc.atoiq] = helper_atoiq
return ERFA_HELPERS
UFUNC_HELPERS.register_module("erfa.ufunc", erfa_ufuncs, get_erfa_helpers)
|
05fb594ac46b9074075eff0ae795f88e7c211cf66b0063d08e638ec1e1dc5030 | # Licensed under a 3-clause BSD style license. See LICENSE.rst except
# for parts explicitly labelled as being (largely) copies of numpy
# implementations; for those, see licenses/NUMPY_LICENSE.rst.
"""Helpers for overriding numpy functions.
We override numpy functions in `~astropy.units.Quantity.__array_function__`.
In this module, the numpy functions are split in four groups, each of
which has an associated `set` or `dict`:
1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
supports Quantity; we pass on to ndarray.__array_function__.
2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
after converting quantities to arrays with suitable units,
and possibly setting units on the result.
3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
requires a Quantity-specific implementation
4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
For the FUNCTION_HELPERS `dict`, the value is a function that does the
unit conversion. It should take the same arguments as the numpy
function would (though one can use ``*args`` and ``**kwargs``) and
return a tuple of ``args, kwargs, unit, out``, where ``args`` and
``kwargs`` will be will be passed on to the numpy implementation,
``unit`` is a possible unit of the result (`None` if it should not be
converted to Quantity), and ``out`` is a possible output Quantity passed
in, which will be filled in-place.
For the DISPATCHED_FUNCTIONS `dict`, the value is a function that
implements the numpy functionality for Quantity input. It should
return a tuple of ``result, unit, out``, where ``result`` is generally
a plain array with the result, and ``unit`` and ``out`` are as above.
If unit is `None`, result gets returned directly, so one can also
return a Quantity directly using ``quantity_result, None, None``.
"""
import functools
import operator
import numpy as np
from numpy.lib import recfunctions as rfn
from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled
from astropy.utils import isiterable
from astropy.utils.compat import NUMPY_LT_1_23
# In 1.17, overrides are enabled by default, but it is still possible to
# turn them off using an environment variable. We use getattr since it
# is planned to remove that possibility in later numpy versions.
ARRAY_FUNCTION_ENABLED = getattr(np.core.overrides, "ENABLE_ARRAY_FUNCTION", True)
SUBCLASS_SAFE_FUNCTIONS = set()
"""Functions with implementations supporting subclasses like Quantity."""
FUNCTION_HELPERS = {}
"""Functions with implementations usable with proper unit conversion."""
DISPATCHED_FUNCTIONS = {}
"""Functions for which we provide our own implementation."""
UNSUPPORTED_FUNCTIONS = set()
"""Functions that cannot sensibly be used with quantities."""
SUBCLASS_SAFE_FUNCTIONS |= {
np.shape, np.size, np.ndim,
np.reshape, np.ravel, np.moveaxis, np.rollaxis, np.swapaxes,
np.transpose, np.atleast_1d, np.atleast_2d, np.atleast_3d,
np.expand_dims, np.squeeze, np.broadcast_to, np.broadcast_arrays,
np.flip, np.fliplr, np.flipud, np.rot90,
np.argmin, np.argmax, np.argsort, np.lexsort, np.searchsorted,
np.nonzero, np.argwhere, np.flatnonzero,
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.real, np.imag, np.diagonal, np.diagflat, np.empty_like,
np.compress, np.extract, np.delete, np.trim_zeros, np.roll, np.take,
np.put, np.fill_diagonal, np.tile, np.repeat,
np.split, np.array_split, np.hsplit, np.vsplit, np.dsplit,
np.stack, np.column_stack, np.hstack, np.vstack, np.dstack,
np.amax, np.amin, np.ptp, np.sum, np.cumsum,
np.prod, np.product, np.cumprod, np.cumproduct,
np.round, np.around,
np.fix, np.angle, np.i0, np.clip,
np.isposinf, np.isneginf, np.isreal, np.iscomplex,
np.average, np.mean, np.std, np.var, np.median, np.trace,
np.nanmax, np.nanmin, np.nanargmin, np.nanargmax, np.nanmean,
np.nanmedian, np.nansum, np.nancumsum, np.nanstd, np.nanvar,
np.nanprod, np.nancumprod,
np.einsum_path, np.trapz, np.linspace,
np.sort, np.msort, np.partition, np.meshgrid,
np.common_type, np.result_type, np.can_cast, np.min_scalar_type,
np.iscomplexobj, np.isrealobj,
np.shares_memory, np.may_share_memory,
np.apply_along_axis, np.take_along_axis, np.put_along_axis,
np.linalg.cond, np.linalg.multi_dot,
} # fmt: skip
# Implemented as methods on Quantity:
# np.ediff1d is from setops, but we support it anyway; the others
# currently return NotImplementedError.
# TODO: move latter to UNSUPPORTED? Would raise TypeError instead.
SUBCLASS_SAFE_FUNCTIONS |= {np.ediff1d}
UNSUPPORTED_FUNCTIONS |= {
np.packbits, np.unpackbits, np.unravel_index,
np.ravel_multi_index, np.ix_, np.cov, np.corrcoef,
np.busday_count, np.busday_offset, np.datetime_as_string,
np.is_busday, np.all, np.any, np.sometrue, np.alltrue,
} # fmt: skip
# Could be supported if we had a natural logarithm unit.
UNSUPPORTED_FUNCTIONS |= {np.linalg.slogdet}
TBD_FUNCTIONS = {
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.find_duplicates, rfn.recursive_fill_fields, rfn.require_fields,
rfn.repack_fields, rfn.stack_arrays,
} # fmt: skip
UNSUPPORTED_FUNCTIONS |= TBD_FUNCTIONS
IGNORED_FUNCTIONS = {
# I/O - useless for Quantity, since no way to store the unit.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
# functions taking record arrays (which are deprecated)
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
} # fmt: skip
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
class FunctionAssigner:
def __init__(self, assignments):
self.assignments = assignments
def __call__(self, f=None, helps=None, module=np):
"""Add a helper to a numpy function.
Normally used as a decorator.
If ``helps`` is given, it should be the numpy function helped (or an
iterable of numpy functions helped).
If ``helps`` is not given, it is assumed the function helped is the
numpy function with the same name as the decorated function.
"""
if f is not None:
if helps is None:
helps = getattr(module, f.__name__)
if not isiterable(helps):
helps = (helps,)
for h in helps:
self.assignments[h] = f
return f
elif helps is not None or module is not np:
return functools.partial(self.__call__, helps=helps, module=module)
else: # pragma: no cover
raise ValueError("function_helper requires at least one argument.")
function_helper = FunctionAssigner(FUNCTION_HELPERS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
# fmt: off
@function_helper(
helps={
np.copy, np.asfarray, np.real_if_close, np.sort_complex, np.resize,
np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft,
np.fft.fft2, np.fft.ifft2, np.fft.rfft2, np.fft.irfft2,
np.fft.fftn, np.fft.ifftn, np.fft.rfftn, np.fft.irfftn,
np.fft.hfft, np.fft.ihfft,
np.linalg.eigvals, np.linalg.eigvalsh,
}
)
# fmt: on
def invariant_a_helper(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, a.unit, None
@function_helper(helps={np.tril, np.triu})
def invariant_m_helper(m, *args, **kwargs):
return (m.view(np.ndarray),) + args, kwargs, m.unit, None
@function_helper(helps={np.fft.fftshift, np.fft.ifftshift})
def invariant_x_helper(x, *args, **kwargs):
return (x.view(np.ndarray),) + args, kwargs, x.unit, None
# Note that ones_like does *not* work by default since if one creates an empty
# array with a unit, one cannot just fill it with unity. Indeed, in this
# respect, it is a bit of an odd function for Quantity. On the other hand, it
# matches the idea that a unit is the same as the quantity with that unit and
# value of 1. Also, it used to work without __array_function__.
# zeros_like does work by default for regular quantities, because numpy first
# creates an empty array with the unit and then fills it with 0 (which can have
# any unit), but for structured dtype this fails (0 cannot have an arbitrary
# structured unit), so we include it here too.
@function_helper(helps={np.ones_like, np.zeros_like})
def like_helper(a, *args, **kwargs):
subok = args[2] if len(args) > 2 else kwargs.pop("subok", True)
unit = a.unit if subok else None
return (a.view(np.ndarray),) + args, kwargs, unit, None
@function_helper
def sinc(x):
from astropy.units.si import radian
try:
x = x.to_value(radian)
except UnitsError:
raise UnitTypeError(
"Can only apply 'sinc' function to quantities with angle units"
)
return (x,), {}, dimensionless_unscaled, None
@dispatched_function
def unwrap(p, discont=None, axis=-1):
from astropy.units.si import radian
if discont is None:
discont = np.pi << radian
p, discont = _as_quantities(p, discont)
result = np.unwrap.__wrapped__(
p.to_value(radian), discont.to_value(radian), axis=axis
)
result = radian.to(p.unit, result)
return result, p.unit, None
@function_helper
def argpartition(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, None, None
@function_helper
def full_like(a, fill_value, *args, **kwargs):
unit = a.unit if kwargs.get("subok", True) else None
return (a.view(np.ndarray), a._to_own_unit(fill_value)) + args, kwargs, unit, None
@function_helper
def putmask(a, mask, values):
from astropy.units import Quantity
if isinstance(a, Quantity):
return (a.view(np.ndarray), mask, a._to_own_unit(values)), {}, a.unit, None
elif isinstance(values, Quantity):
return (a, mask, values.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def place(arr, mask, vals):
from astropy.units import Quantity
if isinstance(arr, Quantity):
return (arr.view(np.ndarray), mask, arr._to_own_unit(vals)), {}, arr.unit, None
elif isinstance(vals, Quantity):
return (arr, mask, vals.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def copyto(dst, src, *args, **kwargs):
from astropy.units import Quantity
if isinstance(dst, Quantity):
return (dst.view(np.ndarray), dst._to_own_unit(src)) + args, kwargs, None, None
elif isinstance(src, Quantity):
return (dst, src.to_value(dimensionless_unscaled)) + args, kwargs, None, None
else:
raise NotImplementedError
@function_helper
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
nan = x._to_own_unit(nan)
if posinf is not None:
posinf = x._to_own_unit(posinf)
if neginf is not None:
neginf = x._to_own_unit(neginf)
return (
(x.view(np.ndarray),),
dict(copy=True, nan=nan, posinf=posinf, neginf=neginf),
x.unit,
None,
)
def _as_quantity(a):
"""Convert argument to a Quantity (or raise NotImplementedError)."""
from astropy.units import Quantity
try:
return Quantity(a, copy=False, subok=True)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _as_quantities(*args):
"""Convert arguments to Quantity (or raise NotImplentedError)."""
from astropy.units import Quantity
try:
# Note: this should keep the dtype the same
return tuple(Quantity(a, copy=False, subok=True, dtype=None) for a in args)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _quantities2arrays(*args, unit_from_first=False):
"""Convert to arrays in units of the first argument that has a unit.
If unit_from_first, take the unit of the first argument regardless
whether it actually defined a unit (e.g., dimensionless for arrays).
"""
# Turn first argument into a quantity.
q = _as_quantity(args[0])
if len(args) == 1:
return (q.value,), q.unit
# If we care about the unit being explicit, then check whether this
# argument actually had a unit, or was likely inferred.
if not unit_from_first and (
q.unit is q._default_unit and not hasattr(args[0], "unit")
):
# Here, the argument could still be things like [10*u.one, 11.*u.one]),
# i.e., properly dimensionless. So, we only override with anything
# that has a unit not equivalent to dimensionless (fine to ignore other
# dimensionless units pass, even if explicitly given).
for arg in args[1:]:
trial = _as_quantity(arg)
if not trial.unit.is_equivalent(q.unit):
# Use any explicit unit not equivalent to dimensionless.
q = trial
break
# We use the private _to_own_unit method here instead of just
# converting everything to quantity and then do .to_value(qs0.unit)
# as we want to allow arbitrary unit for 0, inf, and nan.
try:
arrays = tuple((q._to_own_unit(arg)) for arg in args)
except TypeError:
raise NotImplementedError
return arrays, q.unit
def _iterable_helper(*args, out=None, **kwargs):
"""Convert arguments to Quantity, and treat possible 'out'."""
from astropy.units import Quantity
if out is not None:
if isinstance(out, Quantity):
kwargs["out"] = out.view(np.ndarray)
else:
# TODO: for an ndarray output, we could in principle
# try converting all Quantity to dimensionless.
raise NotImplementedError
arrays, unit = _quantities2arrays(*args)
return arrays, kwargs, unit, out
@function_helper
def concatenate(arrays, axis=0, out=None, **kwargs):
# TODO: make this smarter by creating an appropriately shaped
# empty output array and just filling it.
arrays, kwargs, unit, out = _iterable_helper(*arrays, out=out, axis=axis, **kwargs)
return (arrays,), kwargs, unit, out
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
(arrays, list_ndim, result_ndim, final_size) = np.core.shape_base._block_setup(
arrays
)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim
)
# Here, one line of difference!
arrays, unit = _quantities2arrays(*arrays)
# Back to _block_slicing
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags["F_CONTIGUOUS"] for arr in arrays)
C_order = all(arr.flags["C_CONTIGUOUS"] for arr in arrays)
order = "F" if F_order and not C_order else "C"
result = np.empty(shape=shape, dtype=dtype, order=order)
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result, unit, None
@function_helper
def choose(a, choices, out=None, **kwargs):
choices, kwargs, unit, out = _iterable_helper(*choices, out=out, **kwargs)
return (a, choices), kwargs, unit, out
@function_helper
def select(condlist, choicelist, default=0):
choicelist, kwargs, unit, out = _iterable_helper(*choicelist)
if default != 0:
default = (1 * unit)._to_own_unit(default)
return (condlist, choicelist, default), kwargs, unit, out
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
from astropy.units import Quantity
# Copied implementation from numpy.lib.function_base.piecewise,
# taking care of units of function outputs.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0
):
condlist = [condlist]
if any(isinstance(c, Quantity) for c in condlist):
raise NotImplementedError
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
y = np.zeros(x.shape, x.dtype)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
what, unit = _quantities2arrays(*what)
for item, value in zip(where, what):
y[item] = value
return y, unit, None
@function_helper
def append(arr, values, *args, **kwargs):
arrays, unit = _quantities2arrays(arr, values, unit_from_first=True)
return arrays + args, kwargs, unit, None
@function_helper
def insert(arr, obj, values, *args, **kwargs):
from astropy.units import Quantity
if isinstance(obj, Quantity):
raise NotImplementedError
(arr, values), unit = _quantities2arrays(arr, values, unit_from_first=True)
return (arr, obj, values) + args, kwargs, unit, None
@function_helper
def pad(array, pad_width, mode="constant", **kwargs):
# pad dispatches only on array, so that must be a Quantity.
for key in "constant_values", "end_values":
value = kwargs.pop(key, None)
if value is None:
continue
if not isinstance(value, tuple):
value = (value,)
new_value = []
for v in value:
new_value.append(
tuple(array._to_own_unit(_v) for _v in v)
if isinstance(v, tuple)
else array._to_own_unit(v)
)
kwargs[key] = new_value
return (array.view(np.ndarray), pad_width, mode), kwargs, array.unit, None
@function_helper
def where(condition, *args):
from astropy.units import Quantity
if isinstance(condition, Quantity) or len(args) != 2:
raise NotImplementedError
args, unit = _quantities2arrays(*args)
return (condition,) + args, {}, unit, None
@function_helper(helps=({np.quantile, np.nanquantile}))
def quantile(a, q, *args, _q_unit=dimensionless_unscaled, **kwargs):
if len(args) >= 2:
out = args[1]
args = args[:1] + args[2:]
else:
out = kwargs.pop("out", None)
from astropy.units import Quantity
if isinstance(q, Quantity):
q = q.to_value(_q_unit)
(a,), kwargs, unit, out = _iterable_helper(a, out=out, **kwargs)
return (a, q) + args, kwargs, unit, out
@function_helper(helps={np.percentile, np.nanpercentile})
def percentile(a, q, *args, **kwargs):
from astropy.units import percent
return quantile(a, q, *args, _q_unit=percent, **kwargs)
@function_helper
def count_nonzero(a, *args, **kwargs):
return (a.value,) + args, kwargs, None, None
@function_helper(helps={np.isclose, np.allclose})
def close(a, b, rtol=1e-05, atol=1e-08, *args, **kwargs):
from astropy.units import Quantity
(a, b), unit = _quantities2arrays(a, b, unit_from_first=True)
# Allow number without a unit as having the unit.
atol = Quantity(atol, unit).value
return (a, b, rtol, atol) + args, kwargs, None, None
@function_helper
def array_equal(a1, a2, equal_nan=False):
args, unit = _quantities2arrays(a1, a2)
return args, dict(equal_nan=equal_nan), None, None
@function_helper
def array_equiv(a1, a2):
args, unit = _quantities2arrays(a1, a2)
return args, {}, None, None
@function_helper(helps={np.dot, np.outer})
def dot_like(a, b, out=None):
from astropy.units import Quantity
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
return tuple(x.view(np.ndarray) for x in (a, b, out)), {}, unit, out
else:
return (a.view(np.ndarray), b.view(np.ndarray)), {}, unit, None
@function_helper(
helps={
np.cross,
np.inner,
np.vdot,
np.tensordot,
np.kron,
np.correlate,
np.convolve,
}
)
def cross_like(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
return (a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs, unit, None
@function_helper
def einsum(subscripts, *operands, out=None, **kwargs):
from astropy.units import Quantity
if not isinstance(subscripts, str):
raise ValueError('only "subscripts" string mode supported for einsum.')
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
else:
kwargs["out"] = out.view(np.ndarray)
qs = _as_quantities(*operands)
unit = functools.reduce(operator.mul, (q.unit for q in qs), dimensionless_unscaled)
arrays = tuple(q.view(np.ndarray) for q in qs)
return (subscripts,) + arrays, kwargs, unit, out
@function_helper
def bincount(x, weights=None, minlength=0):
from astropy.units import Quantity
if isinstance(x, Quantity):
raise NotImplementedError
return (x, weights.value, minlength), {}, weights.unit, None
@function_helper
def digitize(x, bins, *args, **kwargs):
arrays, unit = _quantities2arrays(x, bins, unit_from_first=True)
return arrays + args, kwargs, None, None
def _check_bins(bins, unit):
from astropy.units import Quantity
check = _as_quantity(bins)
if check.ndim > 0:
return check.to_value(unit)
elif isinstance(bins, Quantity):
# bins should be an integer (or at least definitely not a Quantity).
raise NotImplementedError
else:
return bins
@function_helper
def histogram(a, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if density:
unit = (unit or 1) / a.unit
return (
(a.value, bins, range),
{"weights": weights, "density": density},
(unit, a.unit),
None,
)
@function_helper(helps=np.histogram_bin_edges)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
# weights is currently unused
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
return (a.value, bins, range, weights), {}, a.unit, None
@function_helper
def histogram2d(x, y, bins=10, range=None, weights=None, density=None):
from astropy.units import Quantity
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
x, y = _as_quantities(x, y)
try:
n = len(bins)
except TypeError:
# bins should be an integer (or at least definitely not a Quantity).
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if n == 1:
raise NotImplementedError
elif n == 2 and not isinstance(bins, Quantity):
bins = [_check_bins(b, unit) for (b, unit) in zip(bins, (x.unit, y.unit))]
else:
bins = _check_bins(bins, x.unit)
y = y.to(x.unit)
if density:
unit = (unit or 1) / x.unit / y.unit
return (
(x.value, y.value, bins, range),
{"weights": weights, "density": density},
(unit, x.unit, y.unit),
None,
)
@function_helper
def histogramdd(sample, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
try:
# Sample is an ND-array.
_, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = _as_quantities(*sample)
sample_units = [s.unit for s in sample]
sample = [s.value for s in sample]
D = len(sample)
else:
sample = _as_quantity(sample)
sample_units = [sample.unit] * D
try:
M = len(bins)
except TypeError:
# bins should be an integer
from astropy.units import Quantity
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if M != D:
raise ValueError(
"The dimension of bins must be equal to the dimension of the sample x."
)
bins = [_check_bins(b, unit) for (b, unit) in zip(bins, sample_units)]
if density:
unit = functools.reduce(operator.truediv, sample_units, (unit or 1))
return (
(sample, bins, range),
{"weights": weights, "density": density},
(unit, sample_units),
None,
)
@function_helper
def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
a = _as_quantity(a)
if prepend is not np._NoValue:
prepend = _as_quantity(prepend).to_value(a.unit)
if append is not np._NoValue:
append = _as_quantity(append).to_value(a.unit)
return (a.value, n, axis, prepend, append), {}, a.unit, None
@function_helper
def gradient(f, *varargs, **kwargs):
f = _as_quantity(f)
axis = kwargs.get("axis", None)
if axis is None:
n_axis = f.ndim
elif isinstance(axis, tuple):
n_axis = len(axis)
else:
n_axis = 1
if varargs:
varargs = _as_quantities(*varargs)
if len(varargs) == 1 and n_axis > 1:
varargs = varargs * n_axis
if varargs:
units = [f.unit / q.unit for q in varargs]
varargs = tuple(q.value for q in varargs)
else:
units = [f.unit] * n_axis
if len(units) == 1:
units = units[0]
return (f.value,) + varargs, kwargs, units, None
@function_helper
def logspace(start, stop, *args, **kwargs):
from astropy.units import LogQuantity, dex
if not isinstance(start, LogQuantity) or not isinstance(stop, LogQuantity):
raise NotImplementedError
# Get unit from end point as for linspace.
stop = stop.to(dex(stop.unit.physical_unit))
start = start.to(stop.unit)
unit = stop.unit.physical_unit
return (start.value, stop.value) + args, kwargs, unit, None
@function_helper
def geomspace(start, stop, *args, **kwargs):
# Get unit from end point as for linspace.
(stop, start), unit = _quantities2arrays(stop, start)
return (start, stop) + args, kwargs, unit, None
@function_helper
def interp(x, xp, fp, *args, **kwargs):
from astropy.units import Quantity
(x, xp), _ = _quantities2arrays(x, xp)
if isinstance(fp, Quantity):
unit = fp.unit
fp = fp.value
else:
unit = None
return (x, xp, fp) + args, kwargs, unit, None
@function_helper
def unique(
ar, return_index=False, return_inverse=False, return_counts=False, axis=None
):
unit = ar.unit
n_index = sum(bool(i) for i in (return_index, return_inverse, return_counts))
if n_index:
unit = [unit] + n_index * [None]
return (ar.value, return_index, return_inverse, return_counts, axis), {}, unit, None
@function_helper
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
if return_indices:
unit = [unit, None, None]
return (ar1, ar2, assume_unique, return_indices), {}, unit, None
@function_helper(helps=(np.setxor1d, np.union1d, np.setdiff1d))
def twosetop(ar1, ar2, *args, **kwargs):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
return (ar1, ar2) + args, kwargs, unit, None
@function_helper(helps=(np.isin, np.in1d))
def setcheckop(ar1, ar2, *args, **kwargs):
# This tests whether ar1 is in ar2, so we should change the unit of
# a1 to that of a2.
(ar2, ar1), unit = _quantities2arrays(ar2, ar1)
return (ar1, ar2) + args, kwargs, None, None
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is a Quantity.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError(
"function is not returning an array of the correct shape"
)
# Returning unit is None to signal nothing should happen to
# the output.
return val, None, None
@dispatched_function
def array_repr(arr, *args, **kwargs):
# TODO: The addition of "unit='...'" doesn't worry about line
# length. Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
cls_name = arr.__class__.__name__
fake_name = "_" * len(cls_name)
fake_cls = type(fake_name, (np.ndarray,), {})
no_unit = np.array_repr(arr.view(fake_cls), *args, **kwargs).replace(
fake_name, cls_name
)
unit_part = f"unit='{arr.unit}'"
pre, dtype, post = no_unit.rpartition("dtype")
if dtype:
return f"{pre}{unit_part}, {dtype}{post}", None, None
else:
return f"{no_unit[:-1]}, {unit_part})", None, None
@dispatched_function
def array_str(arr, *args, **kwargs):
# TODO: The addition of the unit doesn't worry about line length.
# Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
no_unit = np.array_str(arr.value, *args, **kwargs)
return no_unit + arr._unitstr, None, None
@function_helper
def array2string(a, *args, **kwargs):
# array2string breaks on quantities as it tries to turn individual
# items into float, which works only for dimensionless. Since the
# defaults would not keep any unit anyway, this is rather pointless -
# we're better off just passing on the array view. However, one can
# also work around this by passing on a formatter (as is done in Angle).
# So, we do nothing if the formatter argument is present and has the
# relevant formatter for our dtype.
formatter = args[6] if len(args) >= 7 else kwargs.get("formatter", None)
if formatter is None:
a = a.value
else:
# See whether it covers our dtype.
from numpy.core.arrayprint import _get_format_function
with np.printoptions(formatter=formatter) as options:
try:
ff = _get_format_function(a.value, **options)
except Exception:
# Shouldn't happen, but possibly we're just not being smart
# enough, so let's pass things on as is.
pass
else:
# If the selected format function is that of numpy, we know
# things will fail
if "numpy" in ff.__module__:
a = a.value
return (a,) + args, kwargs, None, None
@function_helper
def diag(v, *args, **kwargs):
# Function works for *getting* the diagonal, but not *setting*.
# So, override always.
return (v.value,) + args, kwargs, v.unit, None
@function_helper(module=np.linalg)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
unit = a.unit
if compute_uv:
unit = (None, unit, None)
return ((a.view(np.ndarray), full_matrices, compute_uv, hermitian), {}, unit, None)
def _interpret_tol(tol, unit):
from astropy.units import Quantity
return Quantity(tol, unit).value
@function_helper(module=np.linalg)
def matrix_rank(M, tol=None, *args, **kwargs):
if tol is not None:
tol = _interpret_tol(tol, M.unit)
return (M.view(np.ndarray), tol) + args, kwargs, None, None
@function_helper(helps={np.linalg.inv, np.linalg.tensorinv})
def inv(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, 1 / a.unit, None
@function_helper(module=np.linalg)
def pinv(a, rcond=1e-15, *args, **kwargs):
rcond = _interpret_tol(rcond, a.unit)
return (a.view(np.ndarray), rcond) + args, kwargs, 1 / a.unit, None
@function_helper(module=np.linalg)
def det(a):
return (a.view(np.ndarray),), {}, a.unit ** a.shape[-1], None
@function_helper(helps={np.linalg.solve, np.linalg.tensorsolve})
def solve(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
return (
(a.view(np.ndarray), b.view(np.ndarray)) + args,
kwargs,
b.unit / a.unit,
None,
)
@function_helper(module=np.linalg)
def lstsq(a, b, rcond="warn"):
a, b = _as_quantities(a, b)
if rcond not in (None, "warn", -1):
rcond = _interpret_tol(rcond, a.unit)
return (
(a.view(np.ndarray), b.view(np.ndarray), rcond),
{},
(b.unit / a.unit, b.unit**2, None, a.unit),
None,
)
@function_helper(module=np.linalg)
def norm(x, ord=None, *args, **kwargs):
if ord == 0:
from astropy.units import dimensionless_unscaled
unit = dimensionless_unscaled
else:
unit = x.unit
return (x.view(np.ndarray), ord) + args, kwargs, unit, None
@function_helper(module=np.linalg)
def matrix_power(a, n):
return (a.value, n), {}, a.unit**n, None
@function_helper(module=np.linalg)
def cholesky(a):
return (a.value,), {}, a.unit**0.5, None
@function_helper(module=np.linalg)
def qr(a, mode="reduced"):
if mode.startswith("e"):
units = None
elif mode == "r":
units = a.unit
else:
from astropy.units import dimensionless_unscaled
units = (dimensionless_unscaled, a.unit)
return (a.value, mode), {}, units, None
@function_helper(helps={np.linalg.eig, np.linalg.eigh})
def eig(a, *args, **kwargs):
from astropy.units import dimensionless_unscaled
return (a.value,) + args, kwargs, (a.unit, dimensionless_unscaled), None
# ======================= np.lib.recfunctions =======================
@function_helper(module=np.lib.recfunctions)
def structured_to_unstructured(arr, *args, **kwargs):
"""
Convert a structured quantity to an unstructured one.
This only works if all the units are compatible.
"""
from astropy.units import StructuredUnit
target_unit = arr.unit.values()[0]
def replace_unit(x):
if isinstance(x, StructuredUnit):
return x._recursively_apply(replace_unit)
else:
return target_unit
to_unit = arr.unit._recursively_apply(replace_unit)
return (arr.to_value(to_unit),) + args, kwargs, target_unit, None
def _build_structured_unit(dtype, unit):
"""Build structured unit from dtype
Parameters
----------
dtype : `numpy.dtype`
unit : `astropy.units.Unit`
Returns
-------
`astropy.units.Unit` or tuple
"""
if dtype.fields is None:
return unit
return tuple(_build_structured_unit(v[0], unit) for v in dtype.fields.values())
@function_helper(module=np.lib.recfunctions)
def unstructured_to_structured(arr, dtype, *args, **kwargs):
from astropy.units import StructuredUnit
target_unit = StructuredUnit(_build_structured_unit(dtype, arr.unit))
return (arr.to_value(arr.unit), dtype) + args, kwargs, target_unit, None
def _izip_units_flat(iterable):
"""Returns an iterator of collapsing any nested unit structure.
Parameters
----------
iterable : Iterable[StructuredUnit | Unit] or StructuredUnit
A structured unit or iterable thereof.
Yields
------
unit
"""
from astropy.units import StructuredUnit
# Make Structured unit (pass-through if it is already).
units = StructuredUnit(iterable)
# Yield from structured unit.
for v in units.values():
if isinstance(v, StructuredUnit):
yield from _izip_units_flat(v)
else:
yield v
@function_helper(helps=rfn.merge_arrays)
def merge_arrays(
seqarrays,
fill_value=-1,
flatten=False,
usemask=False,
asrecarray=False,
):
"""Merge structured Quantities field by field.
Like :func:`numpy.lib.recfunctions.merge_arrays`. Note that ``usemask`` and
``asrecarray`` are not supported at this time and will raise a ValueError if
not `False`.
"""
from astropy.units import Quantity, StructuredUnit
if asrecarray:
# TODO? implement if Quantity ever supports rec.array
raise ValueError("asrecarray=True is not supported.")
if usemask:
# TODO: use MaskedQuantity for this case
raise ValueError("usemask=True is not supported.")
# Do we have a single Quantity as input?
if isinstance(seqarrays, Quantity):
seqarrays = (seqarrays,)
# Note: this also converts ndarray -> Quantity[dimensionless]
seqarrays = _as_quantities(*seqarrays)
arrays = tuple(q.value for q in seqarrays)
units = tuple(q.unit for q in seqarrays)
if flatten:
unit = StructuredUnit(tuple(_izip_units_flat(units)))
elif len(arrays) == 1:
unit = StructuredUnit(units[0])
else:
unit = StructuredUnit(units)
return (
(arrays,),
dict(
fill_value=fill_value,
flatten=flatten,
usemask=usemask,
asrecarray=asrecarray,
),
unit,
None,
)
|
77f6e5f513be7b243f67ea4d298f29fadeadd314be2df8f67964d90558f11f98 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Quantity helpers for the scipy.special ufuncs.
Available ufuncs in this module are at
https://docs.scipy.org/doc/scipy/reference/special.html
"""
import numpy as np
from astropy.units.core import UnitsError, UnitTypeError, dimensionless_unscaled
from . import UFUNC_HELPERS
from .helpers import (
get_converter,
helper_cbrt,
helper_dimensionless_to_dimensionless,
helper_two_arg_dimensionless,
)
dimensionless_to_dimensionless_sps_ufuncs = (
"erf", "erfc", "erfcx", "erfi", "erfinv", "erfcinv",
"gamma", "gammaln", "loggamma", "gammasgn", "psi", "rgamma", "digamma",
"wofz", "dawsn", "entr", "exprel", "expm1", "log1p", "exp2", "exp10",
"j0", "j1", "y0", "y1", "i0", "i0e", "i1", "i1e",
"k0", "k0e", "k1", "k1e", "itj0y0", "it2j0y0", "iti0k0", "it2i0k0",
"ndtr", "ndtri",
) # fmt: skip
scipy_special_ufuncs = dimensionless_to_dimensionless_sps_ufuncs
# ufuncs that require input in degrees and give dimensionless output.
degree_to_dimensionless_sps_ufuncs = ("cosdg", "sindg", "tandg", "cotdg")
scipy_special_ufuncs += degree_to_dimensionless_sps_ufuncs
two_arg_dimensionless_sps_ufuncs = (
"jv", "jn", "jve", "yn", "yv", "yve", "kn", "kv", "kve", "iv", "ive",
"hankel1", "hankel1e", "hankel2", "hankel2e",
) # fmt: skip
scipy_special_ufuncs += two_arg_dimensionless_sps_ufuncs
# ufuncs handled as special cases
scipy_special_ufuncs += ("cbrt", "radian")
def helper_degree_to_dimensionless(f, unit):
from astropy.units.si import degree
try:
return [get_converter(unit, degree)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_degree_minute_second_to_radian(f, unit1, unit2, unit3):
from astropy.units.si import arcmin, arcsec, degree, radian
try:
return [
get_converter(unit1, degree),
get_converter(unit2, arcmin),
get_converter(unit3, arcsec),
], radian
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def get_scipy_special_helpers():
import scipy.special as sps
SCIPY_HELPERS = {}
for name in dimensionless_to_dimensionless_sps_ufuncs:
# In SCIPY_LT_1_5, erfinv and erfcinv are not ufuncs.
ufunc = getattr(sps, name, None)
if isinstance(ufunc, np.ufunc):
SCIPY_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
for ufunc in degree_to_dimensionless_sps_ufuncs:
SCIPY_HELPERS[getattr(sps, ufunc)] = helper_degree_to_dimensionless
for ufunc in two_arg_dimensionless_sps_ufuncs:
SCIPY_HELPERS[getattr(sps, ufunc)] = helper_two_arg_dimensionless
# ufuncs handled as special cases
SCIPY_HELPERS[sps.cbrt] = helper_cbrt
SCIPY_HELPERS[sps.radian] = helper_degree_minute_second_to_radian
return SCIPY_HELPERS
UFUNC_HELPERS.register_module(
"scipy.special", scipy_special_ufuncs, get_scipy_special_helpers
)
|
4675ce5e3e8d4568cc5808034e665aa00b8029baf7eb5ca7e4816293a294ffa8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Separate tests specifically for equivalencies."""
import numpy as np
# THIRD-PARTY
import pytest
from numpy.testing import assert_allclose
# LOCAL
from astropy import constants
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.units.equivalencies import Equivalency
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_dimensionless_angles():
# test that the angles_dimensionless option allows one to change
# by any order in radian in the unit (#1161)
rad1 = u.dimensionless_angles()
assert u.radian.to(1, equivalencies=rad1) == 1.0
assert u.deg.to(1, equivalencies=rad1) == u.deg.to(u.rad)
assert u.steradian.to(1, equivalencies=rad1) == 1.0
assert u.dimensionless_unscaled.to(u.steradian, equivalencies=rad1) == 1.0
# now quantities
assert (1.0 * u.radian).to_value(1, equivalencies=rad1) == 1.0
assert (1.0 * u.deg).to_value(1, equivalencies=rad1) == u.deg.to(u.rad)
assert (1.0 * u.steradian).to_value(1, equivalencies=rad1) == 1.0
# more complicated example
I = 1.0e45 * u.g * u.cm**2
Omega = u.cycle / (1.0 * u.s)
Erot = 0.5 * I * Omega**2
# check that equivalency makes this work
Erot_in_erg1 = Erot.to(u.erg, equivalencies=rad1)
# and check that value is correct
assert_allclose(Erot_in_erg1.value, (Erot / u.radian**2).to_value(u.erg))
# test built-in equivalency in subclass
class MyRad1(u.Quantity):
_equivalencies = rad1
phase = MyRad1(1.0, u.cycle)
assert phase.to_value(1) == u.cycle.to(u.radian)
@pytest.mark.parametrize("log_unit", (u.mag, u.dex, u.dB))
def test_logarithmic(log_unit):
# check conversion of mag, dB, and dex to dimensionless and vice versa
with pytest.raises(u.UnitsError):
log_unit.to(1, 0.0)
with pytest.raises(u.UnitsError):
u.dimensionless_unscaled.to(log_unit)
assert log_unit.to(1, 0.0, equivalencies=u.logarithmic()) == 1.0
assert u.dimensionless_unscaled.to(log_unit, equivalencies=u.logarithmic()) == 0.0
# also try with quantities
q_dex = np.array([0.0, -1.0, 1.0, 2.0]) * u.dex
q_expected = 10.0**q_dex.value * u.dimensionless_unscaled
q_log_unit = q_dex.to(log_unit)
assert np.all(q_log_unit.to(1, equivalencies=u.logarithmic()) == q_expected)
assert np.all(q_expected.to(log_unit, equivalencies=u.logarithmic()) == q_log_unit)
with u.set_enabled_equivalencies(u.logarithmic()):
assert np.all(np.abs(q_log_unit - q_expected.to(log_unit)) < 1.0e-10 * log_unit)
doppler_functions = [u.doppler_optical, u.doppler_radio, u.doppler_relativistic]
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_frequency_0(function):
rest = 105.01 * u.GHz
velo0 = rest.to(u.km / u.s, equivalencies=function(rest))
assert velo0.value == 0
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_wavelength_0(function):
rest = 105.01 * u.GHz
q1 = 0.00285489437196 * u.m
velo0 = q1.to(u.km / u.s, equivalencies=function(rest))
np.testing.assert_almost_equal(velo0.value, 0, decimal=6)
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_energy_0(function):
rest = 105.01 * u.GHz
q1 = 0.0004342864648539744 * u.eV
velo0 = q1.to(u.km / u.s, equivalencies=function(rest))
np.testing.assert_almost_equal(velo0.value, 0, decimal=6)
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_frequency_circle(function):
rest = 105.01 * u.GHz
shifted = 105.03 * u.GHz
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
freq = velo.to(u.GHz, equivalencies=function(rest))
np.testing.assert_almost_equal(freq.value, shifted.value, decimal=7)
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_wavelength_circle(function):
rest = 105.01 * u.nm
shifted = 105.03 * u.nm
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
wav = velo.to(u.nm, equivalencies=function(rest))
np.testing.assert_almost_equal(wav.value, shifted.value, decimal=7)
@pytest.mark.parametrize("function", doppler_functions)
def test_doppler_energy_circle(function):
rest = 1.0501 * u.eV
shifted = 1.0503 * u.eV
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
en = velo.to(u.eV, equivalencies=function(rest))
np.testing.assert_almost_equal(en.value, shifted.value, decimal=7)
values_ghz = (999.899940784289, 999.8999307714406, 999.8999357778647)
@pytest.mark.parametrize(
("function", "value"), list(zip(doppler_functions, values_ghz))
)
def test_30kms(function, value):
rest = 1000 * u.GHz
velo = 30 * u.km / u.s
shifted = velo.to(u.GHz, equivalencies=function(rest))
np.testing.assert_almost_equal(shifted.value, value, decimal=7)
bad_values = (5, 5 * u.Jy, None)
@pytest.mark.parametrize(
("function", "value"), list(zip(doppler_functions, bad_values))
)
def test_bad_restfreqs(function, value):
with pytest.raises(u.UnitsError):
function(value)
@pytest.mark.parametrize(
("z", "rv_ans"),
[
(0, 0 * (u.km / u.s)),
(0.001, 299642.56184583 * (u.m / u.s)),
(-1, -2.99792458e8 * (u.m / u.s)),
],
)
def test_doppler_redshift(z, rv_ans):
z_in = z * u.dimensionless_unscaled
rv_out = z_in.to(u.km / u.s, u.doppler_redshift())
z_out = rv_out.to(u.dimensionless_unscaled, u.doppler_redshift())
assert_quantity_allclose(rv_out, rv_ans)
assert_quantity_allclose(z_out, z_in) # Check roundtrip
def test_doppler_redshift_no_cosmology():
from astropy.cosmology.units import redshift
with pytest.raises(u.UnitConversionError, match="not convertible"):
(0 * (u.km / u.s)).to(redshift, u.doppler_redshift())
def test_massenergy():
# The relative tolerance of these tests is set by the uncertainties
# in the charge of the electron, which is known to about
# 3e-9 (relative tolerance). Therefore, we limit the
# precision of the tests to 1e-7 to be safe. The masses are
# (loosely) known to ~ 5e-8 rel tolerance, so we couldn't test to
# 1e-7 if we used the values from astropy.constants; that is,
# they might change by more than 1e-7 in some future update, so instead
# they are hardwired here.
# Electron, proton, neutron, muon, 1g
mass_eV = u.Quantity(
[510.998928e3, 938.272046e6, 939.565378e6, 105.6583715e6, 5.60958884539e32],
u.eV,
)
mass_g = u.Quantity(
[9.10938291e-28, 1.672621777e-24, 1.674927351e-24, 1.88353147e-25, 1], u.g
)
# Test both ways
assert np.allclose(
mass_eV.to_value(u.g, equivalencies=u.mass_energy()), mass_g.value, rtol=1e-7
)
assert np.allclose(
mass_g.to_value(u.eV, equivalencies=u.mass_energy()), mass_eV.value, rtol=1e-7
)
# Basic tests of 'derived' equivalencies
# Surface density
sdens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**2)
sdens_g = u.Quantity(1e-4, u.g / u.cm**2)
assert np.allclose(
sdens_eV.to_value(u.g / u.cm**2, equivalencies=u.mass_energy()),
sdens_g.value,
rtol=1e-7,
)
assert np.allclose(
sdens_g.to_value(u.eV / u.m**2, equivalencies=u.mass_energy()),
sdens_eV.value,
rtol=1e-7,
)
# Density
dens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**3)
dens_g = u.Quantity(1e-6, u.g / u.cm**3)
assert np.allclose(
dens_eV.to_value(u.g / u.cm**3, equivalencies=u.mass_energy()),
dens_g.value,
rtol=1e-7,
)
assert np.allclose(
dens_g.to_value(u.eV / u.m**3, equivalencies=u.mass_energy()),
dens_eV.value,
rtol=1e-7,
)
# Power
pow_eV = u.Quantity(5.60958884539e32, u.eV / u.s)
pow_g = u.Quantity(1, u.g / u.s)
assert np.allclose(
pow_eV.to_value(u.g / u.s, equivalencies=u.mass_energy()),
pow_g.value,
rtol=1e-7,
)
assert np.allclose(
pow_g.to_value(u.eV / u.s, equivalencies=u.mass_energy()),
pow_eV.value,
rtol=1e-7,
)
def test_is_equivalent():
assert u.m.is_equivalent(u.pc)
assert u.cycle.is_equivalent(u.mas)
assert not u.cycle.is_equivalent(u.dimensionless_unscaled)
assert u.cycle.is_equivalent(u.dimensionless_unscaled, u.dimensionless_angles())
assert not (u.Hz.is_equivalent(u.J))
assert u.Hz.is_equivalent(u.J, u.spectral())
assert u.J.is_equivalent(u.Hz, u.spectral())
assert u.pc.is_equivalent(u.arcsecond, u.parallax())
assert u.arcminute.is_equivalent(u.au, u.parallax())
# Pass a tuple for multiple possibilities
assert u.cm.is_equivalent((u.m, u.s, u.kg))
assert u.ms.is_equivalent((u.m, u.s, u.kg))
assert u.g.is_equivalent((u.m, u.s, u.kg))
assert not u.L.is_equivalent((u.m, u.s, u.kg))
assert not (u.km / u.s).is_equivalent((u.m, u.s, u.kg))
def test_parallax():
a = u.arcsecond.to(u.pc, 10, u.parallax())
assert_allclose(a, 0.10, rtol=1.0e-12)
b = u.pc.to(u.arcsecond, a, u.parallax())
assert_allclose(b, 10, rtol=1.0e-12)
a = u.arcminute.to(u.au, 1, u.parallax())
assert_allclose(a, 3437.746770785, rtol=1.0e-12)
b = u.au.to(u.arcminute, a, u.parallax())
assert_allclose(b, 1, rtol=1.0e-12)
val = (-1 * u.mas).to(u.pc, u.parallax())
assert np.isnan(val.value)
val = (-1 * u.mas).to_value(u.pc, u.parallax())
assert np.isnan(val)
def test_parallax2():
a = u.arcsecond.to(u.pc, [0.1, 2.5], u.parallax())
assert_allclose(a, [10, 0.4], rtol=1.0e-12)
def test_spectral():
a = u.AA.to(u.Hz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e18)
b = u.Hz.to(u.AA, a, u.spectral())
assert_allclose(b, 1)
a = u.AA.to(u.MHz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e12)
b = u.MHz.to(u.AA, a, u.spectral())
assert_allclose(b, 1)
a = u.m.to(u.Hz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e8)
b = u.Hz.to(u.m, a, u.spectral())
assert_allclose(b, 1)
def test_spectral2():
a = u.nm.to(u.J, 500, u.spectral())
assert_allclose(a, 3.972891366538605e-19)
b = u.J.to(u.nm, a, u.spectral())
assert_allclose(b, 500)
a = u.AA.to(u.Hz, 1, u.spectral())
b = u.Hz.to(u.J, a, u.spectral())
c = u.AA.to(u.J, 1, u.spectral())
assert_allclose(b, c)
c = u.J.to(u.Hz, b, u.spectral())
assert_allclose(a, c)
def test_spectral3():
a = u.nm.to(u.Hz, [1000, 2000], u.spectral())
assert_allclose(a, [2.99792458e14, 1.49896229e14])
@pytest.mark.parametrize(
("in_val", "in_unit"),
[
([0.1, 5000.0, 10000.0], u.AA),
([1e5, 2.0, 1.0], u.micron**-1),
([2.99792458e19, 5.99584916e14, 2.99792458e14], u.Hz),
([1.98644568e-14, 3.97289137e-19, 1.98644568e-19], u.J),
],
)
def test_spectral4(in_val, in_unit):
"""Wave number conversion w.r.t. wavelength, freq, and energy."""
# Spectroscopic and angular
out_units = [u.micron**-1, u.radian / u.micron]
answers = [[1e5, 2.0, 1.0], [6.28318531e05, 12.5663706, 6.28318531]]
for out_unit, ans in zip(out_units, answers):
# Forward
a = in_unit.to(out_unit, in_val, u.spectral())
assert_allclose(a, ans)
# Backward
b = out_unit.to(in_unit, ans, u.spectral())
assert_allclose(b, in_val)
@pytest.mark.parametrize(
"wav", (3500 * u.AA, 8.5654988e14 * u.Hz, 1 / (3500 * u.AA), 5.67555959e-19 * u.J)
)
def test_spectraldensity2(wav):
# flux density
flambda = u.erg / u.angstrom / u.cm**2 / u.s
fnu = u.erg / u.Hz / u.cm**2 / u.s
a = flambda.to(fnu, 1, u.spectral_density(wav))
assert_allclose(a, 4.086160166177361e-12)
# integrated flux
f_int = u.erg / u.cm**2 / u.s
phot_int = u.ph / u.cm**2 / u.s
a = f_int.to(phot_int, 1, u.spectral_density(wav))
assert_allclose(a, 1.7619408e11)
a = phot_int.to(f_int, 1, u.spectral_density(wav))
assert_allclose(a, 5.67555959e-12)
# luminosity density
llambda = u.erg / u.angstrom / u.s
lnu = u.erg / u.Hz / u.s
a = llambda.to(lnu, 1, u.spectral_density(wav))
assert_allclose(a, 4.086160166177361e-12)
a = lnu.to(llambda, 1, u.spectral_density(wav))
assert_allclose(a, 2.44728537142857e11)
def test_spectraldensity3():
# Define F_nu in Jy
f_nu = u.Jy
# Define F_lambda in ergs / cm^2 / s / micron
f_lambda = u.erg / u.cm**2 / u.s / u.micron
# 1 GHz
one_ghz = u.Quantity(1, u.GHz)
# Convert to ergs / cm^2 / s / Hz
assert_allclose(f_nu.to(u.erg / u.cm**2 / u.s / u.Hz, 1.0), 1.0e-23, 10)
# Convert to ergs / cm^2 / s at 10 Ghz
assert_allclose(
f_nu.to(
u.erg / u.cm**2 / u.s, 1.0, equivalencies=u.spectral_density(one_ghz * 10)
),
1.0e-13,
)
# Convert to F_lambda at 1 Ghz
assert_allclose(
f_nu.to(f_lambda, 1.0, equivalencies=u.spectral_density(one_ghz)),
3.335640951981521e-20,
)
# Convert to Jy at 1 Ghz
assert_allclose(
f_lambda.to(u.Jy, 1.0, equivalencies=u.spectral_density(one_ghz)),
1.0 / 3.335640951981521e-20,
)
# Convert to ergs / cm^2 / s at 10 microns
assert_allclose(
f_lambda.to(
u.erg / u.cm**2 / u.s,
1.0,
equivalencies=u.spectral_density(u.Quantity(10, u.micron)),
),
10.0,
)
def test_spectraldensity4():
"""PHOTLAM and PHOTNU conversions."""
flam = u.erg / (u.cm**2 * u.s * u.AA)
fnu = u.erg / (u.cm**2 * u.s * u.Hz)
photlam = u.photon / (u.cm**2 * u.s * u.AA)
photnu = u.photon / (u.cm**2 * u.s * u.Hz)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
flux_photlam = [9.7654e-3, 1.003896e-2, 9.78473e-3]
flux_photnu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14]
flux_flam = [3.9135e-14, 4.0209e-14, 3.9169e-14]
flux_fnu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
flux_jy = [3.20735792e-2, 3.29903646e-2, 3.21727226e-2]
flux_stmag = [12.41858665, 12.38919182, 12.41764379]
flux_abmag = [12.63463143, 12.60403221, 12.63128047]
# PHOTLAM <--> FLAM
assert_allclose(
photlam.to(flam, flux_photlam, u.spectral_density(wave)), flux_flam, rtol=1e-6
)
assert_allclose(
flam.to(photlam, flux_flam, u.spectral_density(wave)), flux_photlam, rtol=1e-6
)
# PHOTLAM <--> FNU
assert_allclose(
photlam.to(fnu, flux_photlam, u.spectral_density(wave)), flux_fnu, rtol=1e-6
)
assert_allclose(
fnu.to(photlam, flux_fnu, u.spectral_density(wave)), flux_photlam, rtol=1e-6
)
# PHOTLAM <--> Jy
assert_allclose(
photlam.to(u.Jy, flux_photlam, u.spectral_density(wave)), flux_jy, rtol=1e-6
)
assert_allclose(
u.Jy.to(photlam, flux_jy, u.spectral_density(wave)), flux_photlam, rtol=1e-6
)
# PHOTLAM <--> PHOTNU
assert_allclose(
photlam.to(photnu, flux_photlam, u.spectral_density(wave)),
flux_photnu,
rtol=1e-6,
)
assert_allclose(
photnu.to(photlam, flux_photnu, u.spectral_density(wave)),
flux_photlam,
rtol=1e-6,
)
# PHOTNU <--> FNU
assert_allclose(
photnu.to(fnu, flux_photnu, u.spectral_density(wave)), flux_fnu, rtol=1e-6
)
assert_allclose(
fnu.to(photnu, flux_fnu, u.spectral_density(wave)), flux_photnu, rtol=1e-6
)
# PHOTNU <--> FLAM
assert_allclose(
photnu.to(flam, flux_photnu, u.spectral_density(wave)), flux_flam, rtol=1e-6
)
assert_allclose(
flam.to(photnu, flux_flam, u.spectral_density(wave)), flux_photnu, rtol=1e-6
)
# PHOTLAM <--> STMAG
assert_allclose(
photlam.to(u.STmag, flux_photlam, u.spectral_density(wave)),
flux_stmag,
rtol=1e-6,
)
assert_allclose(
u.STmag.to(photlam, flux_stmag, u.spectral_density(wave)),
flux_photlam,
rtol=1e-6,
)
# PHOTLAM <--> ABMAG
assert_allclose(
photlam.to(u.ABmag, flux_photlam, u.spectral_density(wave)),
flux_abmag,
rtol=1e-6,
)
assert_allclose(
u.ABmag.to(photlam, flux_abmag, u.spectral_density(wave)),
flux_photlam,
rtol=1e-6,
)
def test_spectraldensity5():
"""Test photon luminosity density conversions."""
L_la = u.erg / (u.s * u.AA)
L_nu = u.erg / (u.s * u.Hz)
phot_L_la = u.photon / (u.s * u.AA)
phot_L_nu = u.photon / (u.s * u.Hz)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
flux_phot_L_la = [9.7654e-3, 1.003896e-2, 9.78473e-3]
flux_phot_L_nu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14]
flux_L_la = [3.9135e-14, 4.0209e-14, 3.9169e-14]
flux_L_nu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
# PHOTLAM <--> FLAM
assert_allclose(
phot_L_la.to(L_la, flux_phot_L_la, u.spectral_density(wave)),
flux_L_la,
rtol=1e-6,
)
assert_allclose(
L_la.to(phot_L_la, flux_L_la, u.spectral_density(wave)),
flux_phot_L_la,
rtol=1e-6,
)
# PHOTLAM <--> FNU
assert_allclose(
phot_L_la.to(L_nu, flux_phot_L_la, u.spectral_density(wave)),
flux_L_nu,
rtol=1e-6,
)
assert_allclose(
L_nu.to(phot_L_la, flux_L_nu, u.spectral_density(wave)),
flux_phot_L_la,
rtol=1e-6,
)
# PHOTLAM <--> PHOTNU
assert_allclose(
phot_L_la.to(phot_L_nu, flux_phot_L_la, u.spectral_density(wave)),
flux_phot_L_nu,
rtol=1e-6,
)
assert_allclose(
phot_L_nu.to(phot_L_la, flux_phot_L_nu, u.spectral_density(wave)),
flux_phot_L_la,
rtol=1e-6,
)
# PHOTNU <--> FNU
assert_allclose(
phot_L_nu.to(L_nu, flux_phot_L_nu, u.spectral_density(wave)),
flux_L_nu,
rtol=1e-6,
)
assert_allclose(
L_nu.to(phot_L_nu, flux_L_nu, u.spectral_density(wave)),
flux_phot_L_nu,
rtol=1e-6,
)
# PHOTNU <--> FLAM
assert_allclose(
phot_L_nu.to(L_la, flux_phot_L_nu, u.spectral_density(wave)),
flux_L_la,
rtol=1e-6,
)
assert_allclose(
L_la.to(phot_L_nu, flux_L_la, u.spectral_density(wave)),
flux_phot_L_nu,
rtol=1e-6,
)
def test_spectraldensity6():
"""Test surface brightness conversions."""
slam = u.erg / (u.cm**2 * u.s * u.AA * u.sr)
snu = u.erg / (u.cm**2 * u.s * u.Hz * u.sr)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
sb_flam = [3.9135e-14, 4.0209e-14, 3.9169e-14]
sb_fnu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
# S(nu) <--> S(lambda)
assert_allclose(snu.to(slam, sb_fnu, u.spectral_density(wave)), sb_flam, rtol=1e-6)
assert_allclose(slam.to(snu, sb_flam, u.spectral_density(wave)), sb_fnu, rtol=1e-6)
@pytest.mark.parametrize(
("from_unit", "to_unit"),
[
(u.ph / u.cm**2 / u.s, (u.cm * u.cm * u.s) ** -1),
(u.ph / u.cm**2 / u.s, u.erg / (u.cm * u.cm * u.s * u.keV)),
(u.erg / u.cm**2 / u.s, (u.cm * u.cm * u.s) ** -1),
(u.erg / u.cm**2 / u.s, u.erg / (u.cm * u.cm * u.s * u.keV)),
],
)
def test_spectraldensity_not_allowed(from_unit, to_unit):
"""Not allowed to succeed as
per https://github.com/astropy/astropy/pull/10015
"""
with pytest.raises(u.UnitConversionError, match="not convertible"):
from_unit.to(to_unit, 1, u.spectral_density(1 * u.AA))
# The other way
with pytest.raises(u.UnitConversionError, match="not convertible"):
to_unit.to(from_unit, 1, u.spectral_density(1 * u.AA))
def test_equivalent_units():
from astropy.units import imperial
with u.add_enabled_units(imperial):
units = u.g.find_equivalent_units()
units_set = set(units)
match = {
u.M_e, u.M_p, u.g, u.kg, u.solMass, u.t, u.u, u.M_earth,
u.M_jup, imperial.oz, imperial.lb, imperial.st, imperial.ton,
imperial.slug,
} # fmt: skip
assert units_set == match
r = repr(units)
assert r.count("\n") == len(units) + 2
def test_equivalent_units2():
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = {
u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr, u.lsec,
u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
u.jupiterRad,
} # fmt: skip
assert units == match
from astropy.units import imperial
with u.add_enabled_units(imperial):
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = {
u.AU, u.Angstrom, imperial.BTU, u.Hz, u.J, u.Ry,
imperial.cal, u.cm, u.eV, u.erg, imperial.ft, imperial.fur,
imperial.inch, imperial.kcal, u.lyr, u.m, imperial.mi, u.lsec,
imperial.mil, u.micron, u.pc, u.solRad, imperial.yd, u.Bq, u.Ci,
imperial.nmi, u.k, u.earthRad, u.jupiterRad,
} # fmt: skip
assert units == match
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = {
u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr, u.lsec,
u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
u.jupiterRad,
} # fmt: skip
assert units == match
def test_trivial_equivalency():
assert u.m.to(u.kg, equivalencies=[(u.m, u.kg)]) == 1.0
def test_invalid_equivalency():
with pytest.raises(ValueError):
u.m.to(u.kg, equivalencies=[(u.m,)])
with pytest.raises(ValueError):
u.m.to(u.kg, equivalencies=[(u.m, 5.0)])
def test_irrelevant_equivalency():
with pytest.raises(u.UnitsError):
u.m.to(u.kg, equivalencies=[(u.m, u.l)])
def test_brightness_temperature():
omega_B = np.pi * (50 * u.arcsec) ** 2
nu = u.GHz * 5
tb = 7.052587837212582 * u.K
np.testing.assert_almost_equal(
tb.value,
(1 * u.Jy).to_value(
u.K, equivalencies=u.brightness_temperature(nu, beam_area=omega_B)
),
)
np.testing.assert_almost_equal(
1.0,
tb.to_value(
u.Jy, equivalencies=u.brightness_temperature(nu, beam_area=omega_B)
),
)
def test_swapped_args_brightness_temperature():
"""
#5173 changes the order of arguments but accepts the old (deprecated) args
"""
omega_B = np.pi * (50 * u.arcsec) ** 2
nu = u.GHz * 5
tb = 7.052587837212582 * u.K
with pytest.warns(AstropyDeprecationWarning) as w:
result = (1 * u.Jy).to(u.K, equivalencies=u.brightness_temperature(omega_B, nu))
roundtrip = result.to(u.Jy, equivalencies=u.brightness_temperature(omega_B, nu))
assert len(w) == 2
np.testing.assert_almost_equal(tb.value, result.value)
np.testing.assert_almost_equal(roundtrip.value, 1)
def test_surfacebrightness():
sb = 50 * u.MJy / u.sr
k = sb.to(u.K, u.brightness_temperature(50 * u.GHz))
np.testing.assert_almost_equal(k.value, 0.650965, 5)
assert k.unit.is_equivalent(u.K)
def test_beam():
# pick a beam area: 2 pi r^2 = area of a Gaussina with sigma=50 arcsec
omega_B = 2 * np.pi * (50 * u.arcsec) ** 2
new_beam = (5 * u.beam).to(u.sr, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(omega_B.to(u.sr).value * 5, new_beam.value)
assert new_beam.unit.is_equivalent(u.sr)
# make sure that it's still consistent with 5 beams
nbeams = new_beam.to(u.beam, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(nbeams.value, 5)
# test inverse beam equivalency
# (this is just a sanity check that the equivalency is defined;
# it's not for testing numerical consistency)
(5 / u.beam).to(1 / u.sr, u.equivalencies.beam_angular_area(omega_B))
# test practical case
# (this is by far the most important one)
flux_density = (5 * u.Jy / u.beam).to(
u.MJy / u.sr, u.equivalencies.beam_angular_area(omega_B)
)
np.testing.assert_almost_equal(flux_density.value, 13.5425483146382)
def test_thermodynamic_temperature():
nu = 143 * u.GHz
tb = 0.0026320501262630277 * u.K
eq = u.thermodynamic_temperature(nu, T_cmb=2.7255 * u.K)
np.testing.assert_almost_equal(
tb.value, (1 * (u.MJy / u.sr)).to_value(u.K, equivalencies=eq)
)
np.testing.assert_almost_equal(1.0, tb.to_value(u.MJy / u.sr, equivalencies=eq))
def test_equivalency_context():
with u.set_enabled_equivalencies(u.dimensionless_angles()):
phase = u.Quantity(1.0, u.cycle)
assert_allclose(np.exp(1j * phase), 1.0)
Omega = u.cycle / (1.0 * u.minute)
assert_allclose(np.exp(1j * Omega * 60.0 * u.second), 1.0)
# ensure we can turn off equivalencies even within the scope
with pytest.raises(u.UnitsError):
phase.to(1, equivalencies=None)
# test the manager also works in the Quantity constructor.
q1 = u.Quantity(phase, u.dimensionless_unscaled)
assert_allclose(q1.value, u.cycle.to(u.radian))
# and also if we use a class that happens to have a unit attribute.
class MyQuantityLookalike(np.ndarray):
pass
mylookalike = np.array(1.0).view(MyQuantityLookalike)
mylookalike.unit = "cycle"
# test the manager also works in the Quantity constructor.
q2 = u.Quantity(mylookalike, u.dimensionless_unscaled)
assert_allclose(q2.value, u.cycle.to(u.radian))
with u.set_enabled_equivalencies(u.spectral()):
u.GHz.to(u.cm)
eq_on = u.GHz.find_equivalent_units()
with pytest.raises(u.UnitsError):
u.GHz.to(u.cm, equivalencies=None)
# without equivalencies, we should find a smaller (sub)set
eq_off = u.GHz.find_equivalent_units()
assert all(eq in set(eq_on) for eq in eq_off)
assert set(eq_off) < set(eq_on)
# Check the equivalency manager also works in ufunc evaluations,
# not just using (wrong) scaling. [#2496]
l2v = u.doppler_optical(6000 * u.angstrom)
l1 = 6010 * u.angstrom
assert l1.to(u.km / u.s, equivalencies=l2v) > 100.0 * u.km / u.s
with u.set_enabled_equivalencies(l2v):
assert l1 > 100.0 * u.km / u.s
assert abs((l1 - 500.0 * u.km / u.s).to(u.angstrom)) < 1.0 * u.km / u.s
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
def just_to_from_units(equivalencies):
return [(equiv[0], equiv[1]) for equiv in equivalencies]
tf_dimensionless_angles = just_to_from_units(u.dimensionless_angles())
tf_spectral = just_to_from_units(u.spectral())
# <=1 b/c might have the dimensionless_redshift equivalency enabled.
assert len(base_registry.equivalencies) <= 1
with u.set_enabled_equivalencies(u.dimensionless_angles()):
new_registry = u.get_current_unit_registry()
assert set(just_to_from_units(new_registry.equivalencies)) == set(
tf_dimensionless_angles
)
assert set(new_registry.all_units) == set(base_registry.all_units)
with u.set_enabled_equivalencies(u.spectral()):
newer_registry = u.get_current_unit_registry()
assert set(just_to_from_units(newer_registry.equivalencies)) == set(
tf_spectral
)
assert set(newer_registry.all_units) == set(base_registry.all_units)
assert set(just_to_from_units(new_registry.equivalencies)) == set(
tf_dimensionless_angles
)
assert set(new_registry.all_units) == set(base_registry.all_units)
with u.add_enabled_equivalencies(u.spectral()):
newer_registry = u.get_current_unit_registry()
assert set(just_to_from_units(newer_registry.equivalencies)) == set(
tf_dimensionless_angles
) | set(tf_spectral)
assert set(newer_registry.all_units) == set(base_registry.all_units)
assert base_registry is u.get_current_unit_registry()
def test_temperature():
from astropy.units.imperial import deg_F, deg_R
t_k = 0 * u.K
assert_allclose(t_k.to_value(u.deg_C, u.temperature()), -273.15)
assert_allclose(t_k.to_value(deg_F, u.temperature()), -459.67)
t_k = 20 * u.K
assert_allclose(t_k.to_value(deg_R, u.temperature()), 36.0)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(u.K, u.temperature()), 11.11, atol=0.01)
t_k = 20 * deg_F
assert_allclose(t_k.to_value(deg_R, u.temperature()), 479.67)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(deg_F, u.temperature()), -439.67)
t_k = 20 * u.deg_C
assert_allclose(t_k.to_value(deg_R, u.temperature()), 527.67)
t_k = 20 * deg_R
assert_allclose(t_k.to_value(u.deg_C, u.temperature()), -262.039, atol=0.01)
def test_temperature_energy():
x = 1000 * u.K
y = (x * constants.k_B).to(u.keV)
assert_allclose(x.to_value(u.keV, u.temperature_energy()), y.value)
assert_allclose(y.to_value(u.K, u.temperature_energy()), x.value)
def test_molar_mass_amu():
x = 1 * (u.g / u.mol)
y = 1 * u.u
assert_allclose(x.to_value(u.u, u.molar_mass_amu()), y.value)
assert_allclose(y.to_value(u.g / u.mol, u.molar_mass_amu()), x.value)
with pytest.raises(u.UnitsError):
x.to(u.u)
def test_compose_equivalencies():
x = u.Unit("arcsec").compose(units=(u.pc,), equivalencies=u.parallax())
assert x[0] == u.pc
x = u.Unit("2 arcsec").compose(units=(u.pc,), equivalencies=u.parallax())
assert x[0] == u.Unit(0.5 * u.pc)
x = u.degree.compose(equivalencies=u.dimensionless_angles())
assert u.Unit(u.degree.to(u.radian)) in x
x = (u.nm).compose(
units=(u.m, u.s), equivalencies=u.doppler_optical(0.55 * u.micron)
)
for y in x:
if y.bases == [u.m, u.s]:
assert y.powers == [1, -1]
assert_allclose(
y.scale,
u.nm.to(u.m / u.s, equivalencies=u.doppler_optical(0.55 * u.micron)),
)
break
else:
assert False, "Didn't find speed in compose results"
def test_pixel_scale():
pix = 75 * u.pix
asec = 30 * u.arcsec
pixscale = 0.4 * u.arcsec / u.pix
pixscale2 = 2.5 * u.pix / u.arcsec
assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale)), asec)
assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale)), asec)
assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale2)), asec)
assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale2)), asec)
assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale2)), pix)
def test_pixel_scale_invalid_scale_unit():
pixscale = 0.4 * u.arcsec
pixscale2 = 0.4 * u.arcsec / u.pix**2
with pytest.raises(u.UnitsError, match="pixel dimension"):
u.pixel_scale(pixscale)
with pytest.raises(u.UnitsError, match="pixel dimension"):
u.pixel_scale(pixscale2)
def test_pixel_scale_acceptable_scale_unit():
pix = 75 * u.pix
v = 3000 * (u.cm / u.s)
pixscale = 0.4 * (u.m / u.s / u.pix)
pixscale2 = 2.5 * (u.pix / (u.m / u.s))
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale)), v)
assert_quantity_allclose(pix.to(u.m / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(pix.to(u.km / u.s, u.pixel_scale(pixscale2)), v)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(v.to(u.pix, u.pixel_scale(pixscale2)), pix)
def test_plate_scale():
mm = 1.5 * u.mm
asec = 30 * u.arcsec
platescale = 20 * u.arcsec / u.mm
platescale2 = 0.05 * u.mm / u.arcsec
assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale)), asec)
assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale)), asec)
assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale2)), asec)
assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale2)), asec)
assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale)), mm)
assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale2)), mm)
def test_equivelency():
ps = u.pixel_scale(10 * u.arcsec / u.pix)
assert isinstance(ps, Equivalency)
assert isinstance(ps.name, list)
assert len(ps.name) == 1
assert ps.name[0] == "pixel_scale"
assert isinstance(ps.kwargs, list)
assert len(ps.kwargs) == 1
assert ps.kwargs[0] == dict({"pixscale": 10 * u.arcsec / u.pix})
def test_add_equivelencies():
e1 = u.pixel_scale(10 * u.arcsec / u.pixel) + u.temperature_energy()
assert isinstance(e1, Equivalency)
assert e1.name == ["pixel_scale", "temperature_energy"]
assert isinstance(e1.kwargs, list)
assert e1.kwargs == [dict({"pixscale": 10 * u.arcsec / u.pix}), dict()]
e2 = u.pixel_scale(10 * u.arcsec / u.pixel) + [1, 2, 3]
assert isinstance(e2, list)
def test_pprint():
pprint_class = u.UnitBase.EquivalentUnitsList
equiv_units_to_Hz = u.Hz.find_equivalent_units()
assert pprint_class.__repr__(equiv_units_to_Hz).splitlines() == [
" Primary name | Unit definition | Aliases ",
"[",
" Bq | 1 / s | becquerel ,",
" Ci | 3.7e+10 / s | curie ,",
" Hz | 1 / s | Hertz, hertz ,",
"]",
]
assert (
pprint_class._repr_html_(equiv_units_to_Hz) == '<table style="width:50%">'
"<tr><th>Primary name</th><th>Unit definition</th>"
"<th>Aliases</th></tr>"
"<tr><td>Bq</td><td>1 / s</td><td>becquerel</td></tr>"
"<tr><td>Ci</td><td>3.7e+10 / s</td><td>curie</td></tr>"
"<tr><td>Hz</td><td>1 / s</td><td>Hertz, hertz</td></tr></table>"
)
|
2fa9ab6883619d30201ca0b4d120dea68ae3935e0696f4e3fdb26d77b04566ec | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test setting and adding unit aliases."""
import pytest
import astropy.units as u
trials = [
({"Angstroms": u.AA}, "Angstroms", u.AA),
({"counts": u.count}, "counts/s", u.count / u.s),
(
{"ergs": u.erg, "Angstroms": u.AA},
"ergs/(s cm**2 Angstroms)",
u.erg / (u.s * u.cm**2 * u.AA),
),
]
class TestAliases:
def teardown_method(self):
u.set_enabled_aliases({})
def teardown_class(self):
assert u.get_current_unit_registry().aliases == {}
@pytest.mark.parametrize("format_", [None, "fits", "ogip", "vounit", "cds"])
@pytest.mark.parametrize("aliases,bad,unit", trials)
def test_set_enabled_aliases_context_manager(self, aliases, bad, unit, format_):
if format_ == "cds":
bad = bad.replace(" ", ".").replace("**", "")
with u.set_enabled_aliases(aliases):
assert u.get_current_unit_registry().aliases == aliases
assert u.Unit(bad) == unit
assert u.get_current_unit_registry().aliases == {}
with pytest.raises(ValueError):
u.Unit(bad)
@pytest.mark.parametrize("aliases,bad,unit", trials)
def test_add_enabled_aliases_context_manager(self, aliases, bad, unit):
with u.add_enabled_aliases(aliases):
assert u.get_current_unit_registry().aliases == aliases
assert u.Unit(bad) == unit
assert u.get_current_unit_registry().aliases == {}
with pytest.raises(ValueError):
u.Unit(bad)
def test_set_enabled_aliases(self):
for i, (aliases, bad, unit) in enumerate(trials):
u.set_enabled_aliases(aliases)
assert u.get_current_unit_registry().aliases == aliases
assert u.Unit(bad) == unit
for _, bad2, unit2 in trials:
if bad2 == bad or bad2 in aliases:
assert u.Unit(bad2) == unit2
else:
with pytest.raises(ValueError):
u.Unit(bad2)
def test_add_enabled_aliases(self):
expected_aliases = {}
for i, (aliases, bad, unit) in enumerate(trials):
u.add_enabled_aliases(aliases)
expected_aliases.update(aliases)
assert u.get_current_unit_registry().aliases == expected_aliases
assert u.Unit(bad) == unit
for j, (_, bad2, unit2) in enumerate(trials):
if j <= i:
assert u.Unit(bad2) == unit2
else:
with pytest.raises(ValueError):
u.Unit(bad2)
def test_cannot_alias_existing_unit(self):
with pytest.raises(ValueError, match="already means"):
u.set_enabled_aliases({"pct": u.Unit(1e-12 * u.count)})
def test_cannot_alias_existing_alias_to_another_unit(self):
u.set_enabled_aliases({"counts": u.count})
with pytest.raises(ValueError, match="already is an alias"):
u.add_enabled_aliases({"counts": u.adu})
|
fffd6a335b1d12b58ad4ba4ad86c619636dca7c4edcd7dc24d2852b0c5af5431 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Regression tests for deprecated units or those that are "soft" deprecated
because they are required for VOUnit support but are not in common use."""
import pytest
from astropy import units as u
from astropy.units import deprecated, required_by_vounit
def test_emu():
with pytest.raises(AttributeError):
u.emu
assert u.Bi.to(deprecated.emu, 1) == 1
with deprecated.enable():
assert u.Bi.compose()[0] == deprecated.emu
assert u.Bi.compose()[0] == u.Bi
# test that the earth/jupiter mass/rad are also in the deprecated bunch
for body in ("earth", "jupiter"):
for phystype in ("Mass", "Rad"):
# only test a couple prefixes to same time
for prefix in ("n", "y"):
namewoprefix = body + phystype
unitname = prefix + namewoprefix
with pytest.raises(AttributeError):
getattr(u, unitname)
assert getattr(deprecated, unitname).represents.bases[0] == getattr(
u, namewoprefix
)
def test_required_by_vounit():
# The tests below could be replicated with all the various prefixes, but it
# seems unnecessary because they all come as a set. So we only use nano for
# the purposes of this test.
with pytest.raises(AttributeError):
# nano-solar mass/rad/lum shouldn't be in the base unit namespace
u.nsolMass
u.nsolRad
u.nsolLum
# but they should be enabled by default via required_by_vounit, to allow
# the Unit constructor to accept them
assert u.Unit("nsolMass") == required_by_vounit.nsolMass
assert u.Unit("nsolRad") == required_by_vounit.nsolRad
assert u.Unit("nsolLum") == required_by_vounit.nsolLum
# but because they are prefixes, they shouldn't be in find_equivalent_units
assert required_by_vounit.nsolMass not in u.solMass.find_equivalent_units()
assert required_by_vounit.nsolRad not in u.solRad.find_equivalent_units()
assert required_by_vounit.nsolLum not in u.solLum.find_equivalent_units()
|
0e1b954e61e1a2bdbe65c96716c06d74fc8398271f58b30ba4dde8115d995c74 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import typing as T
# THIRD PARTY
import pytest
# LOCAL
from astropy import units as u
from astropy.units import Quantity
from astropy.units._typing import HAS_ANNOTATED
def test_ignore_generic_type_annotations():
"""Test annotations that are not unit related are ignored.
This test passes if the function works.
"""
# one unit, one not (should be ignored)
@u.quantity_input
def func(x: u.m, y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = func(i_q, i_str) # if this doesn't fail, it worked.
assert i_q == o_q
assert i_str == o_str
@pytest.mark.skipif(not HAS_ANNOTATED, reason="need `Annotated`")
class TestQuantityUnitAnnotations:
"""Test Quantity[Unit] type annotation."""
def test_simple_annotation(self):
@u.quantity_input
def func(x: Quantity[u.m], y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = func(i_q, i_str)
assert i_q == o_q
assert i_str == o_str
# checks the input on the 1st arg
with pytest.raises(u.UnitsError):
func(1 * u.s, i_str)
# but not the second
o_q, o_str = func(i_q, {"not": "a string"})
assert i_q == o_q
assert i_str != o_str
def test_multiple_annotation(self):
@u.quantity_input
def multi_func(a: Quantity[u.km]) -> Quantity[u.m]:
return a
i_q = 2 * u.km
o_q = multi_func(i_q)
assert o_q == i_q
assert o_q.unit == u.m
@pytest.mark.skipif(not HAS_ANNOTATED, reason="need `Annotated`")
def test_optional_and_annotated(self):
@u.quantity_input
def opt_func(x: T.Optional[Quantity[u.m]] = None) -> Quantity[u.km]:
if x is None:
return 1 * u.km
return x
i_q = 250 * u.m
o_q = opt_func(i_q)
assert o_q.unit == u.km
assert o_q == i_q
i_q = None
o_q = opt_func(i_q)
assert o_q == 1 * u.km
@pytest.mark.skipif(not HAS_ANNOTATED, reason="need `Annotated`")
def test_union_and_annotated(self):
# Union and Annotated
@u.quantity_input
def union_func(x: T.Union[Quantity[u.m], Quantity[u.s], None]):
if x is None:
return None
else:
return 2 * x
i_q = 1 * u.m
o_q = union_func(i_q)
assert o_q == 2 * i_q
i_q = 1 * u.s
o_q = union_func(i_q)
assert o_q == 2 * i_q
i_q = None
o_q = union_func(i_q)
assert o_q is None
def test_not_unit_or_ptype(self):
with pytest.raises(TypeError, match="unit annotation is not"):
Quantity["definitely not a unit"]
@pytest.mark.skipif(HAS_ANNOTATED, reason="requires py3.8 behavior")
def test_not_unit_or_ptype():
"""
Same as above test, but different behavior for python 3.8 b/c it passes
Quantity right through.
"""
with pytest.warns(Warning):
annot = Quantity[u.km]
assert annot == u.km
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.arcsec), ("angle", "angle")]
)
def test_args3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 1 * u.arcsec)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.arcsec), ("angle", "angle")]
)
def test_args_noconvert3(solarx_unit, solary_unit):
@u.quantity_input()
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
solarx, solary = myfunc_args(1 * u.deg, 1 * u.arcmin)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.deg
assert solary.unit == u.arcmin
@pytest.mark.parametrize("solarx_unit", [u.arcsec, "angle"])
def test_args_nonquantity3(solarx_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 100)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert solarx.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.eV), ("angle", "energy")]
)
def test_arg_equivalencies3(solarx_unit, solary_unit):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary + (10 * u.J) # Add an energy to check equiv is working
solarx, solary = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.gram
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(
u.UnitsError,
match=(
"Argument 'solary' to function 'myfunc_args' must be in units "
f"convertible to '{str(solary_unit)}'."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, 100 * u.km)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit):
return solarx, solary
with pytest.raises(
TypeError,
match=(
"Argument 'solary' to function 'myfunc_args' has no 'unit' "
"attribute. You should pass in an astropy Quantity instead."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, 100)
def test_decorator_override():
@u.quantity_input(solarx=u.arcsec)
def myfunc_args(solarx: u.km, solary: u.arcsec):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec, 1 * u.arcsec)
assert isinstance(solarx, Quantity)
assert isinstance(solary, Quantity)
assert solarx.unit == u.arcsec
assert solary.unit == u.arcsec
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary, myk: solary_unit = 1 * u.arcsec):
return solarx, solary, myk
solarx, solary, myk = myfunc_args(1 * u.arcsec, 100, myk=100 * u.deg)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert isinstance(myk, Quantity)
assert myk.unit == u.deg
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_unused_kwargs3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(
solarx: solarx_unit, solary, myk: solary_unit = 1 * u.arcsec, myk2=1000
):
return solarx, solary, myk, myk2
solarx, solary, myk, myk2 = myfunc_args(1 * u.arcsec, 100, myk=100 * u.deg, myk2=10)
assert isinstance(solarx, Quantity)
assert isinstance(solary, int)
assert isinstance(myk, Quantity)
assert isinstance(myk2, int)
assert myk.unit == u.deg
assert myk2 == 10
@pytest.mark.parametrize("solarx_unit,energy", [(u.arcsec, u.eV), ("angle", "energy")])
def test_kwarg_equivalencies3(solarx_unit, energy):
@u.quantity_input(equivalencies=u.mass_energy())
def myfunc_args(solarx: solarx_unit, energy: energy = 10 * u.eV):
return solarx, energy + (10 * u.J) # Add an energy to check equiv is working
solarx, energy = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(solarx, Quantity)
assert isinstance(energy, Quantity)
assert solarx.unit == u.arcsec
assert energy.unit == u.gram
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_wrong_unit3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
with pytest.raises(
u.UnitsError,
match=(
"Argument 'solary' to function 'myfunc_args' must be in "
f"units convertible to '{str(solary_unit)}'."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, solary=100 * u.km)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_not_quantity3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
with pytest.raises(
TypeError,
match=(
"Argument 'solary' to function 'myfunc_args' has no 'unit' attribute. "
"You should pass in an astropy Quantity instead."
),
):
solarx, solary = myfunc_args(1 * u.arcsec, solary=100)
@pytest.mark.parametrize(
"solarx_unit,solary_unit", [(u.arcsec, u.deg), ("angle", "angle")]
)
def test_kwarg_default3(solarx_unit, solary_unit):
@u.quantity_input
def myfunc_args(solarx: solarx_unit, solary: solary_unit = 10 * u.deg):
return solarx, solary
solarx, solary = myfunc_args(1 * u.arcsec)
def test_return_annotation():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> u.deg:
return solarx
solarx = myfunc_args(1 * u.arcsec)
assert solarx.unit is u.deg
def test_return_annotation_none():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> None:
pass
solarx = myfunc_args(1 * u.arcsec)
assert solarx is None
def test_return_annotation_notUnit():
@u.quantity_input
def myfunc_args(solarx: u.arcsec) -> int:
return 0
solarx = myfunc_args(1 * u.arcsec)
assert solarx == 0
def test_enum_annotation():
# Regression test for gh-9932
from enum import Enum, auto
class BasicEnum(Enum):
AnOption = auto()
@u.quantity_input
def myfunc_args(a: BasicEnum, b: u.arcsec) -> None:
pass
myfunc_args(BasicEnum.AnOption, 1 * u.arcsec)
|
805d12ee438ef2f88cb5355fbcb5948fde2f86b8219320913faa12034d3ea532 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the photometric module. Note that this is shorter than
might be expected because a lot of the relevant tests that deal
with magnidues are in `test_logarithmic.py`
"""
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import (
AA,
ABflux,
Jy,
Magnitude,
STflux,
cm,
erg,
mgy,
nmgy,
s,
zero_point_flux,
)
def test_maggies():
assert_quantity_allclose(1e-9 * mgy, 1 * nmgy)
assert_quantity_allclose(Magnitude((1 * nmgy).to(mgy)).value, 22.5)
def test_maggies_zpts():
assert_quantity_allclose(
(1 * nmgy).to(ABflux, zero_point_flux(1 * ABflux)), 3631e-9 * Jy, rtol=1e-3
)
ST_base_unit = erg * cm**-2 / s / AA
stmgy = (10 * mgy).to(STflux, zero_point_flux(1 * ST_base_unit))
assert_quantity_allclose(stmgy, 10 * ST_base_unit)
mgyst = (2 * ST_base_unit).to(mgy, zero_point_flux(0.5 * ST_base_unit))
assert_quantity_allclose(mgyst, 4 * mgy)
nmgyst = (5.0e-10 * ST_base_unit).to(mgy, zero_point_flux(0.5 * ST_base_unit))
assert_quantity_allclose(nmgyst, 1 * nmgy)
|
6f10fd859f1845711be9af073e34f4c919a34af36c4c9aa40e60616b23ea3461 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for the units.format package
"""
import warnings
from contextlib import nullcontext
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.constants import si
from astropy.units import PrefixUnit, Unit, UnitBase, UnitsWarning, dex
from astropy.units import format as u_format
from astropy.units.utils import is_effectively_unity
@pytest.mark.parametrize(
"strings, unit",
[
(["m s", "m*s", "m.s"], u.m * u.s),
(["m/s", "m*s**-1", "m /s", "m / s", "m/ s"], u.m / u.s),
(["m**2", "m2", "m**(2)", "m**+2", "m+2", "m^(+2)"], u.m**2),
(["m**-3", "m-3", "m^(-3)", "/m3"], u.m**-3),
(["m**(1.5)", "m(3/2)", "m**(3/2)", "m^(3/2)"], u.m**1.5),
(["2.54 cm"], u.Unit(u.cm * 2.54)),
(["10+8m"], u.Unit(u.m * 1e8)),
# This is the VOUnits documentation, but doesn't seem to follow the
# unity grammar (["3.45 10**(-4)Jy"], 3.45 * 1e-4 * u.Jy)
(["sqrt(m)"], u.m**0.5),
(["dB(mW)", "dB (mW)"], u.DecibelUnit(u.mW)),
(["mag"], u.mag),
(["mag(ct/s)"], u.MagUnit(u.ct / u.s)),
(["dex"], u.dex),
(["dex(cm s**-2)", "dex(cm/s2)"], u.DexUnit(u.cm / u.s**2)),
],
)
def test_unit_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.Generic.parse(s)
assert unit2 == unit
@pytest.mark.parametrize(
"string", ["sin( /pixel /s)", "mag(mag)", "dB(dB(mW))", "dex()"]
)
def test_unit_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.Generic.parse(string)
@pytest.mark.parametrize(
"strings, unit",
[
(["0.1nm"], u.AA),
(["mW/m2"], u.Unit(u.erg / u.cm**2 / u.s)),
(["mW/(m2)"], u.Unit(u.erg / u.cm**2 / u.s)),
(["km/s", "km.s-1"], u.km / u.s),
(["10pix/nm"], u.Unit(10 * u.pix / u.nm)),
(["1.5x10+11m"], u.Unit(1.5e11 * u.m)),
(["1.5×10+11m"], u.Unit(1.5e11 * u.m)),
(["m2"], u.m**2),
(["10+21m"], u.Unit(u.m * 1e21)),
(["2.54cm"], u.Unit(u.cm * 2.54)),
(["20%"], 0.20 * u.dimensionless_unscaled),
(["10+9"], 1.0e9 * u.dimensionless_unscaled),
(["2x10-9"], 2.0e-9 * u.dimensionless_unscaled),
(["---"], u.dimensionless_unscaled),
(["ma"], u.ma),
(["mAU"], u.mAU),
(["uarcmin"], u.uarcmin),
(["uarcsec"], u.uarcsec),
(["kbarn"], u.kbarn),
(["Gbit"], u.Gbit),
(["Gibit"], 2**30 * u.bit),
(["kbyte"], u.kbyte),
(["mRy"], 0.001 * u.Ry),
(["mmag"], u.mmag),
(["Mpc"], u.Mpc),
(["Gyr"], u.Gyr),
(["°"], u.degree),
(["°/s"], u.degree / u.s),
(["Å"], u.AA),
(["Å/s"], u.AA / u.s),
(["\\h"], si.h),
(["[cm/s2]"], dex(u.cm / u.s**2)),
(["[K]"], dex(u.K)),
(["[-]"], dex(u.dimensionless_unscaled)),
],
)
def test_cds_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.CDS.parse(s)
assert unit2 == unit
@pytest.mark.parametrize(
"string",
[
"0.1 nm",
"solMass(3/2)",
"km / s",
"km s-1",
"pix0.1nm",
"pix/(0.1nm)",
"km*s",
"km**2",
"5x8+3m",
"0.1---",
"---m",
"m---",
"--",
"0.1-",
"-m",
"m-",
"mag(s-1)",
"dB(mW)",
"dex(cm s-2)",
"[--]",
],
)
def test_cds_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.CDS.parse(string)
def test_cds_dimensionless():
assert u.Unit("---", format="cds") == u.dimensionless_unscaled
assert u.dimensionless_unscaled.to_string(format="cds") == "---"
def test_cds_log10_dimensionless():
assert u.Unit("[-]", format="cds") == u.dex(u.dimensionless_unscaled)
assert u.dex(u.dimensionless_unscaled).to_string(format="cds") == "[-]"
# These examples are taken from the EXAMPLES section of
# https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/
@pytest.mark.parametrize(
"strings, unit",
[
(
["count /s", "count/s", "count s**(-1)", "count / s", "count /s "],
u.count / u.s,
),
(
["/pixel /s", "/(pixel * s)"],
(u.pixel * u.s) ** -1,
),
(
[
"count /m**2 /s /eV",
"count m**(-2) * s**(-1) * eV**(-1)",
"count /(m**2 * s * eV)",
],
u.count * u.m**-2 * u.s**-1 * u.eV**-1,
),
(
["erg /pixel /s /GHz", "erg /s /GHz /pixel", "erg /pixel /(s * GHz)"],
u.erg / (u.s * u.GHz * u.pixel),
),
(
["keV**2 /yr /angstrom", "10**(10) keV**2 /yr /m"],
# Though this is given as an example, it seems to violate the rules
# of not raising scales to powers, so I'm just excluding it
# "(10**2 MeV)**2 /yr /m"
u.keV**2 / (u.yr * u.angstrom),
),
(
[
"10**(46) erg /s",
"10**46 erg /s",
"10**(39) J /s",
"10**(39) W",
"10**(15) YW",
"YJ /fs",
],
10**46 * u.erg / u.s,
),
(
[
"10**(-7) J /cm**2 /MeV",
"10**(-9) J m**(-2) eV**(-1)",
"nJ m**(-2) eV**(-1)",
"nJ /m**2 /eV",
],
10**-7 * u.J * u.cm**-2 * u.MeV**-1,
),
(
[
"sqrt(erg /pixel /s /GHz)",
"(erg /pixel /s /GHz)**(0.5)",
"(erg /pixel /s /GHz)**(1/2)",
"erg**(0.5) pixel**(-0.5) s**(-0.5) GHz**(-0.5)",
],
(u.erg * u.pixel**-1 * u.s**-1 * u.GHz**-1) ** 0.5,
),
(
[
"(count /s) (/pixel /s)",
"(count /s) * (/pixel /s)",
"count /pixel /s**2",
],
(u.count / u.s) * (1.0 / (u.pixel * u.s)),
),
],
)
def test_ogip_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.OGIP.parse(s)
assert unit2 == unit
@pytest.mark.parametrize(
"string",
[
"log(photon /m**2 /s /Hz)",
"sin( /pixel /s)",
"log(photon /cm**2 /s /Hz) /(sin( /pixel /s))",
"log(photon /cm**2 /s /Hz) (sin( /pixel /s))**(-1)",
"dB(mW)",
"dex(cm/s**2)",
],
)
def test_ogip_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.OGIP.parse(string)
class RoundtripBase:
deprecated_units = set()
def check_roundtrip(self, unit, output_format=None):
if output_format is None:
output_format = self.format_
with warnings.catch_warnings():
warnings.simplefilter("ignore") # Same warning shows up multiple times
s = unit.to_string(output_format)
if s in self.deprecated_units:
with pytest.warns(UnitsWarning, match="deprecated") as w:
a = Unit(s, format=self.format_)
assert len(w) == 1
else:
a = Unit(s, format=self.format_) # No warning
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
def check_roundtrip_decompose(self, unit):
ud = unit.decompose()
s = ud.to_string(self.format_)
assert " " not in s
a = Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, ud.scale, rtol=1e-5)
class TestRoundtripGeneric(RoundtripBase):
format_ = "generic"
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u.__dict__.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
self.check_roundtrip(unit, output_format="unicode")
self.check_roundtrip_decompose(unit)
class TestRoundtripVOUnit(RoundtripBase):
format_ = "vounit"
deprecated_units = u_format.VOUnit._deprecated_units
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.VOUnit._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
if unit not in (u.mag, u.dB):
self.check_roundtrip_decompose(unit)
class TestRoundtripFITS(RoundtripBase):
format_ = "fits"
deprecated_units = u_format.Fits._deprecated_units
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.Fits._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
class TestRoundtripCDS(RoundtripBase):
format_ = "cds"
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.CDS._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
self.check_roundtrip(unit)
if unit == u.mag:
# Skip mag: decomposes into dex, which is unknown to CDS.
return
self.check_roundtrip_decompose(unit)
@pytest.mark.parametrize(
"unit", [u.dex(unit) for unit in (u.cm / u.s**2, u.K, u.Lsun)]
)
def test_roundtrip_dex(self, unit):
string = unit.to_string(format="cds")
recovered = u.Unit(string, format="cds")
assert recovered == unit
class TestRoundtripOGIP(RoundtripBase):
format_ = "ogip"
deprecated_units = u_format.OGIP._deprecated_units | {"d"}
@pytest.mark.parametrize(
"unit",
[
unit
for unit in u_format.OGIP._units.values()
if (isinstance(unit, UnitBase) and not isinstance(unit, PrefixUnit))
],
)
def test_roundtrip(self, unit):
if str(unit) in ("d", "0.001 Crab"):
# Special-case day, which gets auto-converted to hours, and mCrab,
# which the default check does not recognize as a deprecated unit.
with pytest.warns(UnitsWarning):
s = unit.to_string(self.format_)
a = Unit(s, format=self.format_)
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-9)
else:
self.check_roundtrip(unit)
if str(unit) in ("mag", "byte", "Crab"):
# Skip mag and byte, which decompose into dex and bit, resp.,
# both of which are unknown to OGIP, as well as Crab, which does
# not decompose, and thus gives a deprecated unit warning.
return
power_of_ten = np.log10(unit.decompose().scale)
if abs(power_of_ten - round(power_of_ten)) > 1e-3:
ctx = pytest.warns(UnitsWarning, match="power of 10")
elif str(unit) == "0.001 Crab":
ctx = pytest.warns(UnitsWarning, match="deprecated")
else:
ctx = nullcontext()
with ctx:
self.check_roundtrip_decompose(unit)
def test_fits_units_available():
u_format.Fits._units
def test_vo_units_available():
u_format.VOUnit._units
def test_cds_units_available():
u_format.CDS._units
def test_cds_non_ascii_unit():
"""Regression test for #5350. This failed with a decoding error as
μas could not be represented in ascii."""
from astropy.units import cds
with cds.enable():
u.radian.find_equivalent_units(include_prefix_units=True)
def test_latex():
fluxunit = u.erg / (u.cm**2 * u.s)
assert fluxunit.to_string("latex") == r"$\mathrm{\frac{erg}{s\,cm^{2}}}$"
def test_new_style_latex():
fluxunit = u.erg / (u.cm**2 * u.s)
assert f"{fluxunit:latex}" == r"$\mathrm{\frac{erg}{s\,cm^{2}}}$"
def test_latex_scale():
fluxunit = u.Unit(1.0e-24 * u.erg / (u.cm**2 * u.s * u.Hz))
latex = r"$\mathrm{1 \times 10^{-24}\,\frac{erg}{Hz\,s\,cm^{2}}}$"
assert fluxunit.to_string("latex") == latex
def test_latex_inline_scale():
fluxunit = u.Unit(1.0e-24 * u.erg / (u.cm**2 * u.s * u.Hz))
latex_inline = r"$\mathrm{1 \times 10^{-24}\,erg" r"\,Hz^{-1}\,s^{-1}\,cm^{-2}}$"
assert fluxunit.to_string("latex_inline") == latex_inline
@pytest.mark.parametrize(
"format_spec, string",
[
("generic", "erg / (cm2 s)"),
("s", "erg / (cm2 s)"),
("console", " erg \n ------\n s cm^2"),
("latex", "$\\mathrm{\\frac{erg}{s\\,cm^{2}}}$"),
("latex_inline", "$\\mathrm{erg\\,s^{-1}\\,cm^{-2}}$"),
(">20s", " erg / (cm2 s)"),
],
)
def test_format_styles(format_spec, string):
fluxunit = u.erg / (u.cm**2 * u.s)
assert format(fluxunit, format_spec) == string
def test_flatten_to_known():
myunit = u.def_unit("FOOBAR_One", u.erg / u.Hz)
assert myunit.to_string("fits") == "erg Hz-1"
myunit2 = myunit * u.bit**3
assert myunit2.to_string("fits") == "bit3 erg Hz-1"
def test_flatten_impossible():
myunit = u.def_unit("FOOBAR_Two")
with u.add_enabled_units(myunit), pytest.raises(ValueError):
myunit.to_string("fits")
def test_console_out():
"""
Issue #436.
"""
u.Jy.decompose().to_string("console")
def test_flexible_float():
assert u.min._represents.to_string("latex") == r"$\mathrm{60\,s}$"
def test_fits_to_string_function_error():
"""Test function raises TypeError on bad input.
This instead of returning None, see gh-11825.
"""
with pytest.raises(TypeError, match="unit argument must be"):
u_format.Fits.to_string(None)
def test_fraction_repr():
area = u.cm**2.0
assert "." not in area.to_string("latex")
fractional = u.cm**2.5
assert "5/2" in fractional.to_string("latex")
assert fractional.to_string("unicode") == "cm⁵⸍²"
def test_scale_effectively_unity():
"""Scale just off unity at machine precision level is OK.
Ensures #748 does not recur
"""
a = (3.0 * u.N).cgs
assert is_effectively_unity(a.unit.scale)
assert len(a.__repr__().split()) == 3
def test_percent():
"""Test that the % unit is properly recognized. Since % is a special
symbol, this goes slightly beyond the round-tripping tested above."""
assert u.Unit("%") == u.percent == u.Unit(0.01)
assert u.Unit("%", format="cds") == u.Unit(0.01)
assert u.Unit(0.01).to_string("cds") == "%"
with pytest.raises(ValueError):
u.Unit("%", format="fits")
with pytest.raises(ValueError):
u.Unit("%", format="vounit")
def test_scaled_dimensionless():
"""Test that scaled dimensionless units are properly recognized in generic
and CDS, but not in fits and vounit."""
assert u.Unit("0.1") == u.Unit(0.1) == 0.1 * u.dimensionless_unscaled
assert u.Unit("1.e-4") == u.Unit(1.0e-4)
assert u.Unit("10-4", format="cds") == u.Unit(1.0e-4)
assert u.Unit("10+8").to_string("cds") == "10+8"
with pytest.raises(ValueError):
u.Unit(0.15).to_string("fits")
assert u.Unit(0.1).to_string("fits") == "10**-1"
with pytest.raises(ValueError):
u.Unit(0.1).to_string("vounit")
def test_deprecated_did_you_mean_units():
with pytest.raises(ValueError) as exc_info:
u.Unit("ANGSTROM", format="fits")
assert "Did you mean Angstrom or angstrom?" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
u.Unit("crab", format="ogip")
assert "Crab (deprecated)" in str(exc_info.value)
assert "mCrab (deprecated)" in str(exc_info.value)
with pytest.warns(
UnitsWarning,
match=r".* Did you mean 0\.1nm, Angstrom "
r"\(deprecated\) or angstrom \(deprecated\)\?",
) as w:
u.Unit("ANGSTROM", format="vounit")
assert len(w) == 1
assert str(w[0].message).count("0.1nm") == 1
with pytest.warns(UnitsWarning, match=r".* 0\.1nm\.") as w:
u.Unit("angstrom", format="vounit")
assert len(w) == 1
@pytest.mark.parametrize("string", ["mag(ct/s)", "dB(mW)", "dex(cm s**-2)"])
def test_fits_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError):
print(string)
u_format.Fits().parse(string)
@pytest.mark.parametrize("string", ["mag(ct/s)", "dB(mW)", "dex(cm s**-2)"])
def test_vounit_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError), warnings.catch_warnings():
# ct, dex also raise warnings - irrelevant here.
warnings.simplefilter("ignore")
u_format.VOUnit().parse(string)
def test_vounit_binary_prefix():
u.Unit("KiB", format="vounit") == u.Unit("1024 B")
u.Unit("Kibyte", format="vounit") == u.Unit("1024 B")
u.Unit("Kibit", format="vounit") == u.Unit("1024 B")
with pytest.warns(UnitsWarning) as w:
u.Unit("kibibyte", format="vounit")
assert len(w) == 1
def test_vounit_unknown():
assert u.Unit("unknown", format="vounit") is None
assert u.Unit("UNKNOWN", format="vounit") is None
assert u.Unit("", format="vounit") is u.dimensionless_unscaled
def test_vounit_details():
with pytest.warns(UnitsWarning, match="deprecated") as w:
assert u.Unit("Pa", format="vounit") is u.Pascal
assert len(w) == 1
# The da- prefix is not allowed, and the d- prefix is discouraged
assert u.dam.to_string("vounit") == "10m"
assert u.Unit("dam dag").to_string("vounit") == "100g.m"
# Parse round-trip
with pytest.warns(UnitsWarning, match="deprecated"):
flam = u.erg / u.cm / u.cm / u.s / u.AA
x = u.format.VOUnit.to_string(flam)
assert x == "Angstrom**-1.cm**-2.erg.s**-1"
new_flam = u.format.VOUnit.parse(x)
assert new_flam == flam
@pytest.mark.parametrize(
"unit, vounit, number, scale, voscale",
[
("nm", "nm", 0.1, "10^-1", "0.1"),
("fm", "fm", 100.0, "10+2", "100"),
("m^2", "m**2", 100.0, "100.0", "100"),
("cm", "cm", 2.54, "2.54", "2.54"),
("kg", "kg", 1.898124597e27, "1.898124597E27", "1.8981246e+27"),
("m/s", "m.s**-1", 299792458.0, "299792458", "2.9979246e+08"),
("cm2", "cm**2", 1.0e-20, "10^(-20)", "1e-20"),
],
)
def test_vounit_scale_factor(unit, vounit, number, scale, voscale):
x = u.Unit(f"{scale} {unit}")
assert x == number * u.Unit(unit)
assert x.to_string(format="vounit") == voscale + vounit
def test_vounit_custom():
x = u.Unit("'foo' m", format="vounit")
x_vounit = x.to_string("vounit")
assert x_vounit == "'foo'.m"
x_string = x.to_string()
assert x_string == "foo m"
x = u.Unit("m'foo' m", format="vounit")
assert x.bases[1]._represents.scale == 0.001
x_vounit = x.to_string("vounit")
assert x_vounit == "m.m'foo'"
x_string = x.to_string()
assert x_string == "m mfoo"
def test_vounit_implicit_custom():
# Yikes, this becomes "femto-urlong"... But at least there's a warning.
with pytest.warns(UnitsWarning) as w:
x = u.Unit("furlong/week", format="vounit")
assert x.bases[0]._represents.scale == 1e-15
assert x.bases[0]._represents.bases[0].name == "urlong"
assert len(w) == 2
assert "furlong" in str(w[0].message)
assert "week" in str(w[1].message)
@pytest.mark.parametrize(
"scale, number, string",
[
("10+2", 100, "10**2"),
("10(+2)", 100, "10**2"),
("10**+2", 100, "10**2"),
("10**(+2)", 100, "10**2"),
("10^+2", 100, "10**2"),
("10^(+2)", 100, "10**2"),
("10**2", 100, "10**2"),
("10**(2)", 100, "10**2"),
("10^2", 100, "10**2"),
("10^(2)", 100, "10**2"),
("10-20", 10 ** (-20), "10**-20"),
("10(-20)", 10 ** (-20), "10**-20"),
("10**-20", 10 ** (-20), "10**-20"),
("10**(-20)", 10 ** (-20), "10**-20"),
("10^-20", 10 ** (-20), "10**-20"),
("10^(-20)", 10 ** (-20), "10**-20"),
],
)
def test_fits_scale_factor(scale, number, string):
x = u.Unit(scale + " erg/(s cm**2 Angstrom)", format="fits")
assert x == number * (u.erg / u.s / u.cm**2 / u.Angstrom)
assert x.to_string(format="fits") == string + " Angstrom-1 cm-2 erg s-1"
x = u.Unit(scale + "*erg/(s cm**2 Angstrom)", format="fits")
assert x == number * (u.erg / u.s / u.cm**2 / u.Angstrom)
assert x.to_string(format="fits") == string + " Angstrom-1 cm-2 erg s-1"
def test_fits_scale_factor_errors():
with pytest.raises(ValueError):
x = u.Unit("1000 erg/(s cm**2 Angstrom)", format="fits")
with pytest.raises(ValueError):
x = u.Unit("12 erg/(s cm**2 Angstrom)", format="fits")
x = u.Unit(1.2 * u.erg)
with pytest.raises(ValueError):
x.to_string(format="fits")
x = u.Unit(100.0 * u.erg)
assert x.to_string(format="fits") == "10**2 erg"
def test_double_superscript():
"""Regression test for #5870, #8699, #9218; avoid double superscripts."""
assert (u.deg).to_string("latex") == r"$\mathrm{{}^{\circ}}$"
assert (u.deg**2).to_string("latex") == r"$\mathrm{deg^{2}}$"
assert (u.arcmin).to_string("latex") == r"$\mathrm{{}^{\prime}}$"
assert (u.arcmin**2).to_string("latex") == r"$\mathrm{arcmin^{2}}$"
assert (u.arcsec).to_string("latex") == r"$\mathrm{{}^{\prime\prime}}$"
assert (u.arcsec**2).to_string("latex") == r"$\mathrm{arcsec^{2}}$"
assert (u.hourangle).to_string("latex") == r"$\mathrm{{}^{h}}$"
assert (u.hourangle**2).to_string("latex") == r"$\mathrm{hourangle^{2}}$"
assert (u.electron).to_string("latex") == r"$\mathrm{e^{-}}$"
assert (u.electron**2).to_string("latex") == r"$\mathrm{electron^{2}}$"
@pytest.mark.parametrize(
"power,expected",
(
(1.0, "m"),
(2.0, "m2"),
(-10, "1 / m10"),
(1.5, "m(3/2)"),
(2 / 3, "m(2/3)"),
(7 / 11, "m(7/11)"),
(-1 / 64, "1 / m(1/64)"),
(1 / 100, "m(1/100)"),
(2 / 101, "m(0.019801980198019802)"),
(Fraction(2, 101), "m(2/101)"),
),
)
def test_powers(power, expected):
"""Regression test for #9279 - powers should not be oversimplified."""
unit = u.m**power
s = unit.to_string()
assert s == expected
assert unit == s
@pytest.mark.parametrize(
"string,unit",
[
("\N{MICRO SIGN}g", u.microgram),
("\N{GREEK SMALL LETTER MU}g", u.microgram),
("g\N{MINUS SIGN}1", u.g ** (-1)),
("m\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}", 1 / u.m),
("m s\N{SUPERSCRIPT MINUS}\N{SUPERSCRIPT ONE}", u.m / u.s),
("m\N{SUPERSCRIPT TWO}", u.m**2),
("m\N{SUPERSCRIPT PLUS SIGN}\N{SUPERSCRIPT TWO}", u.m**2),
("m\N{SUPERSCRIPT THREE}", u.m**3),
("m\N{SUPERSCRIPT ONE}\N{SUPERSCRIPT ZERO}", u.m**10),
("\N{GREEK CAPITAL LETTER OMEGA}", u.ohm),
("\N{OHM SIGN}", u.ohm), # deprecated but for compatibility
("\N{MICRO SIGN}\N{GREEK CAPITAL LETTER OMEGA}", u.microOhm),
("\N{ANGSTROM SIGN}", u.Angstrom),
("\N{ANGSTROM SIGN} \N{OHM SIGN}", u.Angstrom * u.Ohm),
("\N{LATIN CAPITAL LETTER A WITH RING ABOVE}", u.Angstrom),
("\N{LATIN CAPITAL LETTER A}\N{COMBINING RING ABOVE}", u.Angstrom),
("m\N{ANGSTROM SIGN}", u.milliAngstrom),
("°C", u.deg_C),
("°", u.deg),
("M⊙", u.Msun), # \N{CIRCLED DOT OPERATOR}
("L☉", u.Lsun), # \N{SUN}
("M⊕", u.Mearth), # normal earth symbol = \N{CIRCLED PLUS}
("M♁", u.Mearth), # be generous with \N{EARTH}
("R♃", u.Rjup), # \N{JUPITER}
("′", u.arcmin), # \N{PRIME}
("R∞", u.Ry),
("Mₚ", u.M_p),
],
)
def test_unicode(string, unit):
assert u_format.Generic.parse(string) == unit
assert u.Unit(string) == unit
@pytest.mark.parametrize(
"string",
[
"g\N{MICRO SIGN}",
"g\N{MINUS SIGN}",
"m\N{SUPERSCRIPT MINUS}1",
"m+\N{SUPERSCRIPT ONE}",
"m\N{MINUS SIGN}\N{SUPERSCRIPT ONE}",
"k\N{ANGSTROM SIGN}",
],
)
def test_unicode_failures(string):
with pytest.raises(ValueError):
u.Unit(string)
@pytest.mark.parametrize("format_", ("unicode", "latex", "latex_inline"))
def test_parse_error_message_for_output_only_format(format_):
with pytest.raises(NotImplementedError, match="not parse"):
u.Unit("m", format=format_)
def test_unknown_parser():
with pytest.raises(ValueError, match=r"Unknown.*unicode'\] for output only"):
u.Unit("m", format="foo")
def test_celsius_fits():
assert u.Unit("Celsius", format="fits") == u.deg_C
assert u.Unit("deg C", format="fits") == u.deg_C
# check that compounds do what we expect: what do we expect?
assert u.Unit("deg C kg-1", format="fits") == u.C * u.deg / u.kg
assert u.Unit("Celsius kg-1", format="fits") == u.deg_C / u.kg
assert u.deg_C.to_string("fits") == "Celsius"
|
bad1456ae711dc9326d2e6abc84ac15af857b0e238e08164aafc594f13ec9851 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Regression tests for the units package."""
import pickle
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import constants as c
from astropy import units as u
from astropy.units import utils
def test_initialisation():
assert u.Unit(u.m) is u.m
ten_meter = u.Unit(10.0 * u.m)
assert ten_meter == u.CompositeUnit(10.0, [u.m], [1])
assert u.Unit(ten_meter) is ten_meter
assert u.Unit(10.0 * ten_meter) == u.CompositeUnit(100.0, [u.m], [1])
foo = u.Unit("foo", (10.0 * ten_meter) ** 2, namespace=locals())
assert foo == u.CompositeUnit(10000.0, [u.m], [2])
assert u.Unit("m") == u.m
assert u.Unit("") == u.dimensionless_unscaled
assert u.one == u.dimensionless_unscaled
assert u.Unit("10 m") == ten_meter
assert u.Unit(10.0) == u.CompositeUnit(10.0, [], [])
assert u.Unit() == u.dimensionless_unscaled
def test_invalid_power():
x = u.m ** Fraction(1, 3)
assert isinstance(x.powers[0], Fraction)
x = u.m ** Fraction(1, 2)
assert isinstance(x.powers[0], float)
# Test the automatic conversion to a fraction
x = u.m ** (1.0 / 3.0)
assert isinstance(x.powers[0], Fraction)
def test_invalid_compare():
assert not (u.m == u.s)
def test_convert():
assert u.h._get_converter(u.s)(1) == 3600
def test_convert_fail():
with pytest.raises(u.UnitsError):
u.cm.to(u.s, 1)
with pytest.raises(u.UnitsError):
(u.cm / u.s).to(u.m, 1)
def test_composite():
assert (u.cm / u.s * u.h)._get_converter(u.m)(1) == 36
assert u.cm * u.cm == u.cm**2
assert u.cm * u.cm * u.cm == u.cm**3
assert u.Hz.to(1000 * u.Hz, 1) == 0.001
def test_str():
assert str(u.cm) == "cm"
def test_repr():
assert repr(u.cm) == 'Unit("cm")'
def test_represents():
assert u.m.represents is u.m
assert u.km.represents.scale == 1000.0
assert u.km.represents.bases == [u.m]
assert u.Ry.scale == 1.0 and u.Ry.bases == [u.Ry]
assert_allclose(u.Ry.represents.scale, 13.605692518464949)
assert u.Ry.represents.bases == [u.eV]
bla = u.def_unit("bla", namespace=locals())
assert bla.represents is bla
blabla = u.def_unit("blabla", 10 * u.hr, namespace=locals())
assert blabla.represents.scale == 10.0
assert blabla.represents.bases == [u.hr]
assert blabla.decompose().scale == 10 * 3600
assert blabla.decompose().bases == [u.s]
def test_units_conversion():
assert_allclose(u.kpc.to(u.Mpc), 0.001)
assert_allclose(u.Mpc.to(u.kpc), 1000)
assert_allclose(u.yr.to(u.Myr), 1.0e-6)
assert_allclose(u.AU.to(u.pc), 4.84813681e-6)
assert_allclose(u.cycle.to(u.rad), 6.283185307179586)
assert_allclose(u.spat.to(u.sr), 12.56637061435917)
def test_units_manipulation():
# Just do some manipulation and check it's happy
(u.kpc * u.yr) ** Fraction(1, 3) / u.Myr
(u.AA * u.erg) ** 9
def test_decompose():
assert u.Ry == u.Ry.decompose()
def test_dimensionless_to_si():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the SI system
"""
testunit = (1.0 * u.kpc) / (1.0 * u.Mpc)
assert testunit.unit.physical_type == "dimensionless"
assert_allclose(testunit.si, 0.001)
def test_dimensionless_to_cgs():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the CGS system
"""
testunit = (1.0 * u.m) / (1.0 * u.km)
assert testunit.unit.physical_type == "dimensionless"
assert_allclose(testunit.cgs, 0.001)
def test_unknown_unit():
with pytest.warns(u.UnitsWarning, match="FOO"):
u.Unit("FOO", parse_strict="warn")
def test_multiple_solidus():
with pytest.warns(
u.UnitsWarning,
match="'m/s/kg' contains multiple slashes, which is discouraged",
):
assert u.Unit("m/s/kg").to_string() == "m / (kg s)"
with pytest.raises(ValueError):
u.Unit("m/s/kg", format="vounit")
# Regression test for #9000: solidi in exponents do not count towards this.
x = u.Unit("kg(3/10) * m(5/2) / s", format="vounit")
assert x.to_string() == "kg(3/10) m(5/2) / s"
def test_unknown_unit3():
unit = u.Unit("FOO", parse_strict="silent")
assert isinstance(unit, u.UnrecognizedUnit)
assert unit.name == "FOO"
unit2 = u.Unit("FOO", parse_strict="silent")
assert unit == unit2
assert unit.is_equivalent(unit2)
unit3 = u.Unit("BAR", parse_strict="silent")
assert unit != unit3
assert not unit.is_equivalent(unit3)
# Also test basic (in)equalities.
assert unit == "FOO"
assert unit != u.m
# next two from gh-7603.
assert unit != None
assert unit not in (None, u.m)
with pytest.raises(ValueError):
unit._get_converter(unit3)
_ = unit.to_string("latex")
_ = unit2.to_string("cgs")
with pytest.raises(ValueError):
u.Unit("BAR", parse_strict="strict")
with pytest.raises(TypeError):
u.Unit(None)
def test_invalid_scale():
with pytest.raises(TypeError):
["a", "b", "c"] * u.m
def test_cds_power():
unit = u.Unit("10+22/cm2", format="cds", parse_strict="silent")
assert unit.scale == 1e22
def test_register():
foo = u.def_unit("foo", u.m**3, namespace=locals())
assert "foo" in locals()
with u.add_enabled_units(foo):
assert "foo" in u.get_current_unit_registry().registry
assert "foo" not in u.get_current_unit_registry().registry
def test_in_units():
speed_unit = u.cm / u.s
_ = speed_unit.in_units(u.pc / u.hour, 1)
def test_null_unit():
assert (u.m / u.m) == u.Unit(1)
def test_unrecognized_equivalency():
assert u.m.is_equivalent("foo") is False
assert u.m.is_equivalent("pc") is True
def test_convertible_exception():
with pytest.raises(u.UnitsError, match=r"length.+ are not convertible"):
u.AA.to(u.h * u.s**2)
def test_convertible_exception2():
with pytest.raises(u.UnitsError, match=r"length. and .+time.+ are not convertible"):
u.m.to(u.s)
def test_invalid_type():
class A:
pass
with pytest.raises(TypeError):
u.Unit(A())
def test_steradian():
"""
Issue #599
"""
assert u.sr.is_equivalent(u.rad * u.rad)
results = u.sr.compose(units=u.cgs.bases)
assert results[0].bases[0] is u.rad
results = u.sr.compose(units=u.cgs.__dict__)
assert results[0].bases[0] is u.sr
def test_decompose_bases():
"""
From issue #576
"""
from astropy.constants import e
from astropy.units import cgs
d = e.esu.unit.decompose(bases=cgs.bases)
assert d._bases == [u.cm, u.g, u.s]
assert d._powers == [Fraction(3, 2), 0.5, -1]
assert d._scale == 1.0
def test_complex_compose():
complex = u.cd * u.sr * u.Wb
composed = complex.compose()
assert set(composed[0]._bases) == {u.lm, u.Wb}
def test_equiv_compose():
composed = u.m.compose(equivalencies=u.spectral())
assert any([u.Hz] == x.bases for x in composed)
def test_empty_compose():
with pytest.raises(u.UnitsError):
u.m.compose(units=[])
def _unit_as_str(unit):
# This function serves two purposes - it is used to sort the units to
# test alphabetically, and it is also use to allow pytest to show the unit
# in the [] when running the parametrized tests.
return str(unit)
# We use a set to make sure we don't have any duplicates.
COMPOSE_ROUNDTRIP = set()
for val in u.__dict__.values():
if isinstance(val, u.UnitBase) and not isinstance(val, u.PrefixUnit):
COMPOSE_ROUNDTRIP.add(val)
@pytest.mark.parametrize(
"unit", sorted(COMPOSE_ROUNDTRIP, key=_unit_as_str), ids=_unit_as_str
)
def test_compose_roundtrip(unit):
composed_list = unit.decompose().compose()
found = False
for composed in composed_list:
if len(composed.bases):
if composed.bases[0] is unit:
found = True
break
elif len(unit.bases) == 0:
found = True
break
assert found
# We use a set to make sure we don't have any duplicates.
COMPOSE_CGS_TO_SI = set()
for val in u.cgs.__dict__.values():
# Can't decompose Celsius
if (
isinstance(val, u.UnitBase)
and not isinstance(val, u.PrefixUnit)
and val != u.cgs.deg_C
):
COMPOSE_CGS_TO_SI.add(val)
@pytest.mark.parametrize(
"unit", sorted(COMPOSE_CGS_TO_SI, key=_unit_as_str), ids=_unit_as_str
)
def test_compose_cgs_to_si(unit):
si = unit.to_system(u.si)
assert [x.is_equivalent(unit) for x in si]
assert si[0] == unit.si
# We use a set to make sure we don't have any duplicates.
COMPOSE_SI_TO_CGS = set()
for val in u.si.__dict__.values():
# Can't decompose Celsius
if (
isinstance(val, u.UnitBase)
and not isinstance(val, u.PrefixUnit)
and val != u.si.deg_C
):
COMPOSE_SI_TO_CGS.add(val)
@pytest.mark.parametrize(
"unit", sorted(COMPOSE_SI_TO_CGS, key=_unit_as_str), ids=_unit_as_str
)
def test_compose_si_to_cgs(unit):
# Can't convert things with Ampere to CGS without more context
try:
cgs = unit.to_system(u.cgs)
except u.UnitsError:
if u.A in unit.decompose().bases:
pass
else:
raise
else:
assert [x.is_equivalent(unit) for x in cgs]
assert cgs[0] == unit.cgs
def test_to_si():
"""Check units that are not official derived units.
Should not appear on its own or as part of a composite unit.
"""
# TODO: extend to all units not listed in Tables 1--6 of
# https://physics.nist.gov/cuu/Units/units.html
# See gh-10585.
# This was always the case
assert u.bar.si is not u.bar
# But this used to fail.
assert u.bar not in (u.kg / (u.s**2 * u.sr * u.nm)).si._bases
def test_to_cgs():
assert u.Pa.to_system(u.cgs)[1]._bases[0] is u.Ba
assert u.Pa.to_system(u.cgs)[1]._scale == 10.0
def test_decompose_to_cgs():
from astropy.units import cgs
assert u.m.decompose(bases=cgs.bases)._bases[0] is cgs.cm
def test_compose_issue_579():
unit = u.kg * u.s**2 / u.m
result = unit.compose(units=[u.N, u.s, u.m])
assert len(result) == 1
assert result[0]._bases == [u.s, u.N, u.m]
assert result[0]._powers == [4, 1, -2]
def test_compose_prefix_unit():
x = u.m.compose(units=(u.m,))
assert x[0].bases[0] is u.m
assert x[0].scale == 1.0
x = u.m.compose(units=[u.km], include_prefix_units=True)
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = u.m.compose(units=[u.km])
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = (u.km / u.s).compose(units=(u.pc, u.Myr))
assert x[0].bases == [u.pc, u.Myr]
assert_allclose(x[0].scale, 1.0227121650537077)
with pytest.raises(u.UnitsError):
(u.km / u.s).compose(units=(u.pc, u.Myr), include_prefix_units=False)
def test_self_compose():
unit = u.kg * u.s
assert len(unit.compose(units=[u.g, u.s])) == 1
def test_compose_failed():
unit = u.kg
with pytest.raises(u.UnitsError):
unit.compose(units=[u.N])
def test_compose_fractional_powers():
# Warning: with a complicated unit, this test becomes very slow;
# e.g., x = (u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2)
# takes 3 s
x = u.m**0.5 / u.yr**1.5
factored = x.compose()
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.cgs)
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.si)
for unit in factored:
assert x.decompose() == unit.decompose()
def test_compose_best_unit_first():
results = u.l.compose()
assert len(results[0].bases) == 1
assert results[0].bases[0] is u.l
results = (u.s**-1).compose()
assert results[0].bases[0] in (u.Hz, u.Bq)
results = (u.Ry.decompose()).compose()
assert results[0].bases[0] is u.Ry
def test_compose_no_duplicates():
new = u.kg / u.s**3 * u.au**2.5 / u.yr**0.5 / u.sr**2
composed = new.compose(units=u.cgs.bases)
assert len(composed) == 1
def test_long_int():
"""
Issue #672
"""
sigma = 10**21 * u.M_p / u.cm**2
sigma.to(u.M_sun / u.pc**2)
def test_endian_independence():
"""
Regression test for #744
A logic issue in the units code meant that big endian arrays could not be
converted because the dtype is '>f4', not 'float32', and the code was
looking for the strings 'float' or 'int'.
"""
for endian in ["<", ">"]:
for ntype in ["i", "f"]:
for byte in ["4", "8"]:
x = np.array([1, 2, 3], dtype=(endian + ntype + byte))
u.m.to(u.cm, x)
def test_radian_base():
"""
Issue #863
"""
assert (1 * u.degree).si.unit == u.rad
def test_no_as():
# We don't define 'as', since it is a keyword, but we
# do want to define the long form (`attosecond`).
assert not hasattr(u, "as")
assert hasattr(u, "attosecond")
def test_no_duplicates_in_names():
# Regression test for #5036
assert u.ct.names == ["ct", "count"]
assert u.ct.short_names == ["ct", "count"]
assert u.ct.long_names == ["count"]
assert set(u.ph.names) == set(u.ph.short_names) | set(u.ph.long_names)
def test_pickling():
p = pickle.dumps(u.m)
other = pickle.loads(p)
assert other is u.m
new_unit = u.IrreducibleUnit(["foo"], format={"baz": "bar"})
# This is local, so the unit should not be registered.
assert "foo" not in u.get_current_unit_registry().registry
# Test pickling of this unregistered unit.
p = pickle.dumps(new_unit)
new_unit_copy = pickle.loads(p)
assert new_unit_copy is not new_unit
assert new_unit_copy.names == ["foo"]
assert new_unit_copy.get_format_name("baz") == "bar"
# It should still not be registered.
assert "foo" not in u.get_current_unit_registry().registry
# Now try the same with a registered unit.
with u.add_enabled_units([new_unit]):
p = pickle.dumps(new_unit)
assert "foo" in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy is new_unit
# Check that a registered unit can be loaded and that it gets re-enabled.
with u.add_enabled_units([]):
assert "foo" not in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy is not new_unit
assert new_unit_copy.names == ["foo"]
assert new_unit_copy.get_format_name("baz") == "bar"
assert "foo" in u.get_current_unit_registry().registry
# And just to be sure, that it gets removed outside of the context.
assert "foo" not in u.get_current_unit_registry().registry
def test_pickle_between_sessions():
"""We cannot really test between sessions easily, so fake it.
This test can be changed if the pickle protocol or the code
changes enough that it no longer works.
"""
hash_m = hash(u.m)
unit = pickle.loads(
b"\x80\x04\x95\xd6\x00\x00\x00\x00\x00\x00\x00\x8c\x12"
b"astropy.units.core\x94\x8c\x1a_recreate_irreducible_unit"
b"\x94\x93\x94h\x00\x8c\x0fIrreducibleUnit\x94\x93\x94]\x94"
b"(\x8c\x01m\x94\x8c\x05meter\x94e\x88\x87\x94R\x94}\x94(\x8c\x06"
b"_names\x94]\x94(h\x06h\x07e\x8c\x0c_short_names"
b"\x94]\x94h\x06a\x8c\x0b_long_names\x94]\x94h\x07a\x8c\x07"
b"_format\x94}\x94\x8c\x07__doc__\x94\x8c "
b"meter: base unit of length in SI\x94ub."
)
assert unit is u.m
assert hash(u.m) == hash_m
@pytest.mark.parametrize(
"unit",
[u.IrreducibleUnit(["foo"], format={"baz": "bar"}), u.Unit("m_per_s", u.m / u.s)],
)
def test_pickle_does_not_keep_memoized_hash(unit):
"""
Tests private attribute since the problem with _hash being pickled
and restored only appeared if the unpickling was done in another
session, for which the hash no longer was valid, and it is difficult
to mimic separate sessions in a simple test. See gh-11872.
"""
unit_hash = hash(unit)
assert unit._hash is not None
unit_copy = pickle.loads(pickle.dumps(unit))
# unit is not registered so we get a copy.
assert unit_copy is not unit
assert unit_copy._hash is None
assert hash(unit_copy) == unit_hash
with u.add_enabled_units([unit]):
# unit is registered, so we get a reference.
unit_ref = pickle.loads(pickle.dumps(unit))
if isinstance(unit, u.IrreducibleUnit):
assert unit_ref is unit
else:
assert unit_ref is not unit
# pickle.load used to override the hash, although in this case
# it would be the same anyway, so not clear this tests much.
assert hash(unit) == unit_hash
def test_pickle_unrecognized_unit():
"""
Issue #2047
"""
a = u.Unit("asdf", parse_strict="silent")
pickle.loads(pickle.dumps(a))
def test_duplicate_define():
with pytest.raises(ValueError):
u.def_unit("m", namespace=u.__dict__)
def test_all_units():
from astropy.units.core import get_current_unit_registry
registry = get_current_unit_registry()
assert len(registry.all_units) > len(registry.non_prefix_units)
def test_repr_latex():
assert u.m._repr_latex_() == u.m.to_string("latex")
def test_operations_with_strings():
assert u.m / "5s" == (u.m / (5.0 * u.s))
assert u.m * "5s" == (5.0 * u.m * u.s)
def test_comparison():
assert u.m > u.cm
assert u.m >= u.cm
assert u.cm < u.m
assert u.cm <= u.m
with pytest.raises(u.UnitsError):
u.m > u.kg
def test_compose_into_arbitrary_units():
# Issue #1438
from astropy.constants import G
G.decompose([u.kg, u.km, u.Unit("15 s")])
def test_unit_multiplication_with_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = "kg"
assert us * u1 == u.Unit(us) * u1
assert u1 * us == u1 * u.Unit(us)
def test_unit_division_by_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = "kg"
assert us / u1 == u.Unit(us) / u1
assert u1 / us == u1 / u.Unit(us)
def test_sorted_bases():
"""See #1616."""
assert (u.m * u.Jy).bases == (u.Jy * u.m).bases
def test_megabit():
"""See #1543"""
assert u.Mbit is u.Mb
assert u.megabit is u.Mb
assert u.Mbyte is u.MB
assert u.megabyte is u.MB
def test_composite_unit_get_format_name():
"""See #1576"""
unit1 = u.Unit("nrad/s")
unit2 = u.Unit("Hz(1/2)")
assert str(u.CompositeUnit(1, [unit1, unit2], [1, -1])) == "nrad / (Hz(1/2) s)"
def test_unicode_policy():
from astropy.tests.helper import assert_follows_unicode_guidelines
assert_follows_unicode_guidelines(u.degree, roundtrip=u.__dict__)
def test_suggestions():
for search, matches in [
("microns", "micron"),
("s/microns", "micron"),
("M", "m"),
("metre", "meter"),
("angstroms", "Angstrom or angstrom"),
("milimeter", "millimeter"),
("ångström", "Angstrom, angstrom, mAngstrom or mangstrom"),
("kev", "EV, eV, kV or keV"),
]:
with pytest.raises(ValueError, match=f"Did you mean {matches}"):
u.Unit(search)
def test_fits_hst_unit():
"""See #1911."""
with pytest.warns(u.UnitsWarning, match="multiple slashes") as w:
x = u.Unit("erg /s /cm**2 /angstrom")
assert x == u.erg * u.s**-1 * u.cm**-2 * u.angstrom**-1
assert len(w) == 1
def test_barn_prefixes():
"""Regression test for https://github.com/astropy/astropy/issues/3753"""
assert u.fbarn is u.femtobarn
assert u.pbarn is u.picobarn
def test_fractional_powers():
"""See #2069"""
m = 1e9 * u.Msun
tH = 1.0 / (70.0 * u.km / u.s / u.Mpc)
vc = 200 * u.km / u.s
x = (c.G**2 * m**2 * tH.cgs) ** Fraction(1, 3) / vc
v1 = x.to("pc")
x = (c.G**2 * m**2 * tH) ** Fraction(1, 3) / vc
v2 = x.to("pc")
x = (c.G**2 * m**2 * tH.cgs) ** (1.0 / 3.0) / vc
v3 = x.to("pc")
x = (c.G**2 * m**2 * tH) ** (1.0 / 3.0) / vc
v4 = x.to("pc")
assert_allclose(v1, v2)
assert_allclose(v2, v3)
assert_allclose(v3, v4)
x = u.m ** (1.0 / 101.0)
assert isinstance(x.powers[0], float)
x = u.m ** (3.0 / 7.0)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 3
assert x.powers[0].denominator == 7
x = u.cm ** Fraction(1, 2) * u.cm ** Fraction(2, 3)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(7, 6)
# Regression test for #9258.
x = (u.TeV ** (-2.2)) ** (1 / -2.2)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(1, 1)
def test_sqrt_mag():
sqrt_mag = u.mag**0.5
assert hasattr(sqrt_mag.decompose().scale, "imag")
assert (sqrt_mag.decompose()) ** 2 == u.mag
def test_composite_compose():
# Issue #2382
composite_unit = u.s.compose(units=[u.Unit("s")])[0]
u.s.compose(units=[composite_unit])
def test_data_quantities():
assert u.byte.is_equivalent(u.bit)
def test_compare_with_none():
# Ensure that equality comparisons with `None` work, and don't
# raise exceptions. We are deliberately not using `is None` here
# because that doesn't trigger the bug. See #3108.
assert not (u.m == None)
assert u.m != None
def test_validate_power_detect_fraction():
frac = utils.validate_power(1.1666666666666665)
assert isinstance(frac, Fraction)
assert frac.numerator == 7
assert frac.denominator == 6
def test_complex_fractional_rounding_errors():
# See #3788
kappa = 0.34 * u.cm**2 / u.g
r_0 = 886221439924.7849 * u.cm
q = 1.75
rho_0 = 5e-10 * u.solMass / u.solRad**3
y = 0.5
beta = 0.19047619047619049
a = 0.47619047619047628
m_h = 1e6 * u.solMass
t1 = 2 * c.c / (kappa * np.sqrt(np.pi))
t2 = (r_0**-q) / (rho_0 * y * beta * (a * c.G * m_h) ** 0.5)
result = (t1 * t2) ** -0.8
assert result.unit.physical_type == "length"
result.to(u.solRad)
def test_fractional_rounding_errors_simple():
x = (u.m**1.5) ** Fraction(4, 5)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 6
assert x.powers[0].denominator == 5
def test_enable_unit_groupings():
from astropy.units import cds
with cds.enable():
assert cds.geoMass in u.kg.find_equivalent_units()
from astropy.units import imperial
with imperial.enable():
assert imperial.inch in u.m.find_equivalent_units()
def test_unit_summary_prefixes():
"""
Test for a few units that the unit summary table correctly reports
whether or not that unit supports prefixes.
Regression test for https://github.com/astropy/astropy/issues/3835
"""
from astropy.units import astrophys
for summary in utils._iter_unit_summary(astrophys.__dict__):
unit, _, _, _, prefixes = summary
if unit.name == "lyr":
assert prefixes
elif unit.name == "pc":
assert prefixes
elif unit.name == "barn":
assert prefixes
elif unit.name == "cycle":
assert prefixes == "No"
elif unit.name == "spat":
assert prefixes == "No"
elif unit.name == "vox":
assert prefixes == "Yes"
def test_raise_to_negative_power():
"""Test that order of bases is changed when raising to negative power.
Regression test for https://github.com/astropy/astropy/issues/8260
"""
m2s2 = u.m**2 / u.s**2
spm = m2s2 ** (-1 / 2)
assert spm.bases == [u.s, u.m]
assert spm.powers == [1, -1]
assert spm == u.s / u.m
@pytest.mark.parametrize(
"name, symbol, multiplying_factor",
[
("quetta", "Q", 1e30),
("ronna", "R", 1e27),
("yotta", "Y", 1e24),
("zetta", "Z", 1e21),
("exa", "E", 1e18),
("peta", "P", 1e15),
("tera", "T", 1e12),
("giga", "G", 1e9),
("mega", "M", 1e6),
("kilo", "k", 1e3),
("deca", "da", 1e1),
("deci", "d", 1e-1),
("centi", "c", 1e-2),
("milli", "m", 1e-3),
("micro", "u", 1e-6),
("nano", "n", 1e-9),
("pico", "p", 1e-12),
("femto", "f", 1e-15),
("atto", "a", 1e-18),
("zepto", "z", 1e-21),
("yocto", "y", 1e-24),
("ronto", "r", 1e-27),
("quecto", "q", 1e-30),
],
)
def test_si_prefixes(name, symbol, multiplying_factor):
base = 1 * u.g
quantity_from_symbol = base.to(f"{symbol}g")
quantity_from_name = base.to(f"{name}gram")
assert u.isclose(quantity_from_name, base)
assert u.isclose(quantity_from_symbol, base)
value_ratio = base.value / quantity_from_symbol.value
assert u.isclose(value_ratio, multiplying_factor)
|
f2281654e76a5c814e451ce4b698238fe02822cee649031a91a0b3ec2741dd60 | # The purpose of these tests are to ensure that calling quantities using
# array methods returns quantities with the right units, or raises exceptions.
import sys
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.utils.compat import NUMPY_LT_1_21_1, NUMPY_LT_1_22
class TestQuantityArrayCopy:
"""
Test whether arrays are properly copied/used in place
"""
def test_copy_on_creation(self):
v = np.arange(1000.0)
q_nocopy = u.Quantity(v, "km/s", copy=False)
q_copy = u.Quantity(v, "km/s", copy=True)
v[0] = -1.0
assert q_nocopy[0].value == v[0]
assert q_copy[0].value != v[0]
def test_to_copies(self):
q = u.Quantity(np.arange(1.0, 100.0), "km/s")
q2 = q.to(u.m / u.s)
assert np.all(q.value != q2.value)
q3 = q.to(u.km / u.s)
assert np.all(q.value == q3.value)
q[0] = -1.0 * u.km / u.s
assert q[0].value != q3[0].value
def test_si_copies(self):
q = u.Quantity(np.arange(100.0), "m/s")
q2 = q.si
assert np.all(q.value == q2.value)
q[0] = -1.0 * u.m / u.s
assert q[0].value != q2[0].value
def test_getitem_is_view(self):
"""Check that [keys] work, and that, like ndarray, it returns
a view, so that changing one changes the other.
Also test that one can add axes (closes #1422)
"""
q = u.Quantity(np.arange(100.0), "m/s")
q_sel = q[10:20]
q_sel[0] = -1.0 * u.m / u.s
assert q_sel[0] == q[10]
# also check that getitem can do new axes
q2 = q[:, np.newaxis]
q2[10, 0] = -9 * u.m / u.s
assert np.all(q2.flatten() == q)
def test_flat(self):
q = u.Quantity(np.arange(9.0).reshape(3, 3), "m/s")
q_flat = q.flat
# check that a single item is a quantity (with the right value)
assert q_flat[8] == 8.0 * u.m / u.s
# and that getting a range works as well
assert np.all(q_flat[0:2] == np.arange(2.0) * u.m / u.s)
# as well as getting items via iteration
q_flat_list = [_q for _q in q.flat]
assert np.all(
u.Quantity(q_flat_list) == u.Quantity([_a for _a in q.value.flat], q.unit)
)
# check that flat works like a view of the real array
q_flat[8] = -1.0 * u.km / u.s
assert q_flat[8] == -1.0 * u.km / u.s
assert q[2, 2] == -1.0 * u.km / u.s
# while if one goes by an iterated item, a copy is made
q_flat_list[8] = -2 * u.km / u.s
assert q_flat_list[8] == -2.0 * u.km / u.s
assert q_flat[8] == -1.0 * u.km / u.s
assert q[2, 2] == -1.0 * u.km / u.s
class TestQuantityReshapeFuncs:
"""Test different ndarray methods that alter the array shape
tests: reshape, squeeze, ravel, flatten, transpose, swapaxes
"""
def test_reshape(self):
q = np.arange(6.0) * u.m
q_reshape = q.reshape(3, 2)
assert isinstance(q_reshape, u.Quantity)
assert q_reshape.unit == q.unit
assert np.all(q_reshape.value == q.value.reshape(3, 2))
def test_squeeze(self):
q = np.arange(6.0).reshape(6, 1) * u.m
q_squeeze = q.squeeze()
assert isinstance(q_squeeze, u.Quantity)
assert q_squeeze.unit == q.unit
assert np.all(q_squeeze.value == q.value.squeeze())
def test_ravel(self):
q = np.arange(6.0).reshape(3, 2) * u.m
q_ravel = q.ravel()
assert isinstance(q_ravel, u.Quantity)
assert q_ravel.unit == q.unit
assert np.all(q_ravel.value == q.value.ravel())
def test_flatten(self):
q = np.arange(6.0).reshape(3, 2) * u.m
q_flatten = q.flatten()
assert isinstance(q_flatten, u.Quantity)
assert q_flatten.unit == q.unit
assert np.all(q_flatten.value == q.value.flatten())
def test_transpose(self):
q = np.arange(6.0).reshape(3, 2) * u.m
q_transpose = q.transpose()
assert isinstance(q_transpose, u.Quantity)
assert q_transpose.unit == q.unit
assert np.all(q_transpose.value == q.value.transpose())
def test_swapaxes(self):
q = np.arange(6.0).reshape(3, 1, 2) * u.m
q_swapaxes = q.swapaxes(0, 2)
assert isinstance(q_swapaxes, u.Quantity)
assert q_swapaxes.unit == q.unit
assert np.all(q_swapaxes.value == q.value.swapaxes(0, 2))
@pytest.mark.xfail(
sys.byteorder == "big" and NUMPY_LT_1_21_1, reason="Numpy GitHub Issue 19153"
)
def test_flat_attributes(self):
"""While ``flat`` doesn't make a copy, it changes the shape."""
q = np.arange(6.0).reshape(3, 1, 2) * u.m
qf = q.flat
# flat shape is same as before reshaping
assert len(qf) == 6
# see TestQuantityArrayCopy.test_flat for tests of iteration
# and slicing and setting. Here we test the properties and methods to
# match `numpy.ndarray.flatiter`
assert qf.base is q
# testing the indices -- flat and full -- into the array
assert qf.coords == (0, 0, 0) # to start
assert qf.index == 0
# now consume the iterator
endindices = [(qf.index, qf.coords) for x in qf][-2] # next() oversteps
assert endindices[0] == 5
assert endindices[1] == (2, 0, 1) # shape of q - 1
# also check q_flat copies properly
q_flat_copy = qf.copy()
assert all(q_flat_copy == q.flatten())
assert isinstance(q_flat_copy, u.Quantity)
assert not np.may_share_memory(q_flat_copy, q)
class TestQuantityStatsFuncs:
"""
Test statistical functions
"""
def test_mean(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert_array_equal(np.mean(q1), 3.6 * u.m)
assert_array_equal(np.mean(q1, keepdims=True), [3.6] * u.m)
def test_mean_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
qi2 = np.mean(q1, out=qi)
assert qi2 is qi
assert qi == 3.6 * u.m
def test_mean_where(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0, 7.0]) * u.m
assert_array_equal(np.mean(q1, where=q1 < 7 * u.m), 3.6 * u.m)
def test_std(self):
q1 = np.array([1.0, 2.0]) * u.m
assert_array_equal(np.std(q1), 0.5 * u.m)
assert_array_equal(q1.std(axis=-1, keepdims=True), [0.5] * u.m)
def test_std_inplace(self):
q1 = np.array([1.0, 2.0]) * u.m
qi = 1.5 * u.s
np.std(q1, out=qi)
assert qi == 0.5 * u.m
def test_std_where(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
assert_array_equal(np.std(q1, where=q1 < 3 * u.m), 0.5 * u.m)
def test_var(self):
q1 = np.array([1.0, 2.0]) * u.m
assert_array_equal(np.var(q1), 0.25 * u.m**2)
assert_array_equal(q1.var(axis=0, keepdims=True), [0.25] * u.m**2)
def test_var_inplace(self):
q1 = np.array([1.0, 2.0]) * u.m
qi = 1.5 * u.s
np.var(q1, out=qi)
assert qi == 0.25 * u.m**2
def test_var_where(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
assert_array_equal(np.var(q1, where=q1 < 3 * u.m), 0.25 * u.m**2)
def test_median(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.median(q1) == 4.0 * u.m
def test_median_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
np.median(q1, out=qi)
assert qi == 4 * u.m
def test_min(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.min(q1) == 1.0 * u.m
def test_min_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
np.min(q1, out=qi)
assert qi == 1.0 * u.m
def test_min_where(self):
q1 = np.array([0.0, 1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.min(q1, initial=10 * u.m, where=q1 > 0 * u.m) == 1.0 * u.m
def test_argmin(self):
q1 = np.array([6.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.argmin(q1) == 1
@pytest.mark.skipif(NUMPY_LT_1_22, reason="keepdims only introduced in numpy 1.22")
def test_argmin_keepdims(self):
q1 = np.array([[6.0, 2.0], [4.0, 5.0]]) * u.m
assert_array_equal(q1.argmin(axis=0, keepdims=True), np.array([[1, 0]]))
def test_max(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.max(q1) == 6.0 * u.m
def test_max_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
np.max(q1, out=qi)
assert qi == 6.0 * u.m
def test_max_where(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0, 7.0]) * u.m
assert np.max(q1, initial=0 * u.m, where=q1 < 7 * u.m) == 6.0 * u.m
def test_argmax(self):
q1 = np.array([5.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.argmax(q1) == 4
@pytest.mark.skipif(NUMPY_LT_1_22, reason="keepdims only introduced in numpy 1.22")
def test_argmax_keepdims(self):
q1 = np.array([[6.0, 2.0], [4.0, 5.0]]) * u.m
assert_array_equal(q1.argmax(axis=0, keepdims=True), np.array([[0, 1]]))
def test_clip(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.km / u.m
c1 = q1.clip(1500, 5.5 * u.Mm / u.km)
assert np.all(c1 == np.array([1.5, 2.0, 4.0, 5.0, 5.5]) * u.km / u.m)
def test_clip_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.km / u.m
c1 = q1.clip(1500, 5.5 * u.Mm / u.km, out=q1)
assert np.all(q1 == np.array([1.5, 2.0, 4.0, 5.0, 5.5]) * u.km / u.m)
c1[0] = 10 * u.Mm / u.mm
assert np.all(c1.value == q1.value)
def test_conj(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.km / u.m
assert np.all(q1.conj() == q1)
def test_ptp(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
assert np.ptp(q1) == 5.0 * u.m
def test_ptp_inplace(self):
q1 = np.array([1.0, 2.0, 4.0, 5.0, 6.0]) * u.m
qi = 1.5 * u.s
np.ptp(q1, out=qi)
assert qi == 5.0 * u.m
def test_round(self):
q1 = np.array([1.253, 2.253, 3.253]) * u.kg
assert np.all(np.round(q1) == np.array([1, 2, 3]) * u.kg)
assert np.all(np.round(q1, decimals=2) == np.round(q1.value, decimals=2) * u.kg)
assert np.all(q1.round(decimals=2) == q1.value.round(decimals=2) * u.kg)
def test_round_inplace(self):
q1 = np.array([1.253, 2.253, 3.253]) * u.kg
qi = np.zeros(3) * u.s
a = q1.round(decimals=2, out=qi)
assert a is qi
assert np.all(q1.round(decimals=2) == qi)
def test_sum(self):
q1 = np.array([1.0, 2.0, 6.0]) * u.m
assert np.all(q1.sum() == 9.0 * u.m)
assert np.all(np.sum(q1) == 9.0 * u.m)
q2 = np.array([[4.0, 5.0, 9.0], [1.0, 1.0, 1.0]]) * u.s
assert np.all(q2.sum(0) == np.array([5.0, 6.0, 10.0]) * u.s)
assert np.all(np.sum(q2, 0) == np.array([5.0, 6.0, 10.0]) * u.s)
def test_sum_inplace(self):
q1 = np.array([1.0, 2.0, 6.0]) * u.m
qi = 1.5 * u.s
np.sum(q1, out=qi)
assert qi == 9.0 * u.m
def test_sum_where(self):
q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.m
where = q1 < 7 * u.m
assert np.all(q1.sum(where=where) == 9.0 * u.m)
assert np.all(np.sum(q1, where=where) == 9.0 * u.m)
@pytest.mark.parametrize("initial", [0, 0 * u.m, 1 * u.km])
def test_sum_initial(self, initial):
q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.m
expected = 16 * u.m + initial
assert q1.sum(initial=initial) == expected
assert np.sum(q1, initial=initial) == expected
def test_sum_dimensionless_initial(self):
q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.one
assert q1.sum(initial=1000) == 1016 * u.one
@pytest.mark.parametrize("initial", [10, 1 * u.s])
def test_sum_initial_exception(self, initial):
q1 = np.array([1.0, 2.0, 6.0, 7.0]) * u.m
with pytest.raises(u.UnitsError):
q1.sum(initial=initial)
def test_cumsum(self):
q1 = np.array([1, 2, 6]) * u.m
assert np.all(q1.cumsum() == np.array([1, 3, 9]) * u.m)
assert np.all(np.cumsum(q1) == np.array([1, 3, 9]) * u.m)
q2 = np.array([4, 5, 9]) * u.s
assert np.all(q2.cumsum() == np.array([4, 9, 18]) * u.s)
assert np.all(np.cumsum(q2) == np.array([4, 9, 18]) * u.s)
def test_cumsum_inplace(self):
q1 = np.array([1, 2, 6]) * u.m
qi = np.ones(3) * u.s
np.cumsum(q1, out=qi)
assert np.all(qi == np.array([1, 3, 9]) * u.m)
q2 = q1
q1.cumsum(out=q1)
assert np.all(q2 == qi)
def test_nansum(self):
q1 = np.array([1.0, 2.0, np.nan]) * u.m
assert np.all(q1.nansum() == 3.0 * u.m)
assert np.all(np.nansum(q1) == 3.0 * u.m)
q2 = np.array([[np.nan, 5.0, 9.0], [1.0, np.nan, 1.0]]) * u.s
assert np.all(q2.nansum(0) == np.array([1.0, 5.0, 10.0]) * u.s)
assert np.all(np.nansum(q2, 0) == np.array([1.0, 5.0, 10.0]) * u.s)
def test_nansum_inplace(self):
q1 = np.array([1.0, 2.0, np.nan]) * u.m
qi = 1.5 * u.s
qout = q1.nansum(out=qi)
assert qout is qi
assert qi == np.nansum(q1.value) * q1.unit
qi2 = 1.5 * u.s
qout2 = np.nansum(q1, out=qi2)
assert qout2 is qi2
assert qi2 == np.nansum(q1.value) * q1.unit
@pytest.mark.xfail(
NUMPY_LT_1_22, reason="'where' keyword argument not supported for numpy < 1.22"
)
def test_nansum_where(self):
q1 = np.array([1.0, 2.0, np.nan, 4.0]) * u.m
initial = 0 * u.m
where = q1 < 4 * u.m
assert np.all(q1.nansum(initial=initial, where=where) == 3.0 * u.m)
assert np.all(np.nansum(q1, initial=initial, where=where) == 3.0 * u.m)
def test_prod(self):
q1 = np.array([1, 2, 6]) * u.m
with pytest.raises(u.UnitsError) as exc:
q1.prod()
with pytest.raises(u.UnitsError) as exc:
np.prod(q1)
q2 = np.array([3.0, 4.0, 5.0]) * u.Unit(1)
assert q2.prod() == 60.0 * u.Unit(1)
assert np.prod(q2) == 60.0 * u.Unit(1)
def test_cumprod(self):
q1 = np.array([1, 2, 6]) * u.m
with pytest.raises(u.UnitsError) as exc:
q1.cumprod()
with pytest.raises(u.UnitsError) as exc:
np.cumprod(q1)
q2 = np.array([3, 4, 5]) * u.Unit(1)
assert np.all(q2.cumprod() == np.array([3, 12, 60]) * u.Unit(1))
assert np.all(np.cumprod(q2) == np.array([3, 12, 60]) * u.Unit(1))
def test_diff(self):
q1 = np.array([1.0, 2.0, 4.0, 10.0]) * u.m
assert np.all(q1.diff() == np.array([1.0, 2.0, 6.0]) * u.m)
assert np.all(np.diff(q1) == np.array([1.0, 2.0, 6.0]) * u.m)
def test_ediff1d(self):
q1 = np.array([1.0, 2.0, 4.0, 10.0]) * u.m
assert np.all(q1.ediff1d() == np.array([1.0, 2.0, 6.0]) * u.m)
assert np.all(np.ediff1d(q1) == np.array([1.0, 2.0, 6.0]) * u.m)
def test_dot_meth(self):
q1 = np.array([1.0, 2.0, 4.0, 10.0]) * u.m
q2 = np.array([3.0, 4.0, 5.0, 6.0]) * u.s
q3 = q1.dot(q2)
assert q3.value == np.dot(q1.value, q2.value)
assert q3.unit == u.m * u.s
def test_trace_func(self):
q = np.array([[1.0, 2.0], [3.0, 4.0]]) * u.m
assert np.trace(q) == 5.0 * u.m
def test_trace_meth(self):
q1 = np.array([[1.0, 2.0], [3.0, 4.0]]) * u.m
assert q1.trace() == 5.0 * u.m
cont = u.Quantity(4.0, u.s)
q2 = np.array([[3.0, 4.0], [5.0, 6.0]]) * u.m
q2.trace(out=cont)
assert cont == 9.0 * u.m
def test_clip_func(self):
q = np.arange(10) * u.m
assert np.all(
np.clip(q, 3 * u.m, 6 * u.m)
== np.array([3.0, 3.0, 3.0, 3.0, 4.0, 5.0, 6.0, 6.0, 6.0, 6.0]) * u.m
)
def test_clip_meth(self):
expected = np.array([3.0, 3.0, 3.0, 3.0, 4.0, 5.0, 6.0, 6.0, 6.0, 6.0]) * u.m
q1 = np.arange(10) * u.m
q3 = q1.clip(3 * u.m, 6 * u.m)
assert np.all(q1.clip(3 * u.m, 6 * u.m) == expected)
cont = np.zeros(10) * u.s
q1.clip(3 * u.m, 6 * u.m, out=cont)
assert np.all(cont == expected)
class TestArrayConversion:
"""
Test array conversion methods
"""
def test_item(self):
q1 = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
assert q1.item(1) == 2 * q1.unit
q1.itemset(1, 1)
assert q1.item(1) == 1000 * u.m / u.km
q1.itemset(1, 100 * u.cm / u.km)
assert q1.item(1) == 1 * u.m / u.km
with pytest.raises(TypeError):
q1.itemset(1, 1.5 * u.m / u.km)
with pytest.raises(ValueError):
q1.itemset()
q1[1] = 1
assert q1[1] == 1000 * u.m / u.km
q1[1] = 100 * u.cm / u.km
assert q1[1] == 1 * u.m / u.km
with pytest.raises(TypeError):
q1[1] = 1.5 * u.m / u.km
def test_take_put(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
assert q1.take(1) == 2 * u.m / u.km
assert all(q1.take((0, 2)) == np.array([1, 3]) * u.m / u.km)
q1.put((1, 2), (3, 4))
assert np.all(q1.take((1, 2)) == np.array([3000, 4000]) * q1.unit)
q1.put(0, 500 * u.cm / u.km)
assert q1.item(0) == 5 * u.m / u.km
def test_slice(self):
"""Test that setitem changes the unit if needed (or ignores it for
values where that is allowed; viz., #2695)"""
q2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) * u.km / u.m
q1 = q2.copy()
q2[0, 0] = 10000.0
assert q2.unit == q1.unit
assert q2[0, 0].value == 10.0
q2[0] = 9.0 * u.Mm / u.km
assert all(q2.flatten()[:3].value == np.array([9.0, 9.0, 9.0]))
q2[0, :-1] = 8000.0
assert all(q2.flatten()[:3].value == np.array([8.0, 8.0, 9.0]))
with pytest.raises(u.UnitsError):
q2[1, 1] = 10 * u.s
# just to be sure, repeat with a dimensionfull unit
q3 = u.Quantity(np.arange(10.0), "m/s")
q3[5] = 100.0 * u.cm / u.s
assert q3[5].value == 1.0
# and check unit is ignored for 0, inf, nan, where that is reasonable
q3[5] = 0.0
assert q3[5] == 0.0
q3[5] = np.inf
assert np.isinf(q3[5])
q3[5] = np.nan
assert np.isnan(q3[5])
def test_fill(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q1.fill(2)
assert np.all(q1 == 2000 * u.m / u.km)
def test_repeat_compress_diagonal(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
q2 = q1.repeat(2)
assert q2.unit == q1.unit
assert all(q2.value == q1.value.repeat(2))
q2.sort()
assert q2.unit == q1.unit
q2 = q1.compress(np.array([True, True, False, False]))
assert q2.unit == q1.unit
assert all(q2.value == q1.value.compress(np.array([True, True, False, False])))
q1 = np.array([[1, 2], [3, 4]]) * u.m / u.km
q2 = q1.diagonal()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.diagonal())
def test_view(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.view(np.ndarray)
assert not hasattr(q2, "unit")
q3 = q2.view(u.Quantity)
assert q3._unit is None
# MaskedArray copies and properties assigned in __dict__
q4 = np.ma.MaskedArray(q1)
assert q4._unit is q1._unit
q5 = q4.view(u.Quantity)
assert q5.unit is q1.unit
def test_slice_to_quantity(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2003
"""
a = np.random.uniform(size=(10, 8))
x, y, z = a[:, 1:4].T * u.km / u.s
total = np.sum(a[:, 1] * u.km / u.s - x)
assert isinstance(total, u.Quantity)
assert total == (0.0 * u.km / u.s)
def test_byte_type_view_field_changes(self):
q1 = np.array([1, 2, 3], dtype=np.int64) * u.m / u.km
q2 = q1.byteswap()
assert q2.unit == q1.unit
assert all(q2.value == q1.value.byteswap())
q2 = q1.astype(np.float64)
assert all(q2 == q1)
assert q2.dtype == np.float64
q2a = q1.getfield(np.int32, offset=0)
q2b = q1.byteswap().getfield(np.int32, offset=4)
assert q2a.unit == q1.unit
assert all(q2b.byteswap() == q2a)
def test_sort(self):
q1 = np.array([1.0, 5.0, 2.0, 4.0]) * u.km / u.m
i = q1.argsort()
assert not hasattr(i, "unit")
q1.sort()
i = q1.searchsorted([1500, 2500])
assert not hasattr(i, "unit")
assert all(
i == q1.to(u.dimensionless_unscaled).value.searchsorted([1500, 2500])
)
def test_not_implemented(self):
q1 = np.array([1, 2, 3]) * u.m / u.km
with pytest.raises(NotImplementedError):
q1.choose([0, 0, 1])
with pytest.raises(NotImplementedError):
q1.tolist()
with pytest.raises(NotImplementedError):
q1.tostring()
with pytest.raises(NotImplementedError):
q1.tobytes()
with pytest.raises(NotImplementedError):
q1.tofile(0)
with pytest.raises(NotImplementedError):
q1.dump("a.a")
with pytest.raises(NotImplementedError):
q1.dumps()
class TestRecArray:
"""Record arrays are not specifically supported, but we should not
prevent their use unnecessarily"""
def setup_method(self):
self.ra = (
np.array(np.arange(12.0).reshape(4, 3)).view(dtype="f8,f8,f8").squeeze()
)
def test_creation(self):
qra = u.Quantity(self.ra, u.m)
assert np.all(qra[:2].value == self.ra[:2])
def test_equality(self):
qra = u.Quantity(self.ra, u.m)
qra[1] = qra[2]
assert qra[1] == qra[2]
|
4c5047dd68085a38c025e7ef58963c8255dacf86a7c1fa3091468a83ba3718ff | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
import itertools
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import constants as c
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g / u.s**2, u.Jy)
class TestLogUnitCreation:
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize("lu_unit, lu_cls", zip(lu_units, lu_subclasses))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize("lu_unit", lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize(
"lu_unit, physical_unit", itertools.product(lu_units, pu_sample)
)
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize(
"lu_cls, physical_unit",
itertools.product(lu_subclasses + [u.LogUnit], pu_sample),
)
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit, function_unit=2 * lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2 * lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_lshift_magnitude(self):
mag = 1.0 << u.ABmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.ABmag
assert mag.value == 1.0
# same test for an array, which should produce a view
a2 = np.arange(10.0)
q2 = a2 << u.ABmag
assert isinstance(q2, u.Magnitude)
assert q2.unit == u.ABmag
assert np.all(q2.value == a2)
a2[9] = 0.0
assert np.all(q2.value == a2)
# a different magnitude unit
mag = 10.0 << u.STmag
assert isinstance(mag, u.Magnitude)
assert mag.unit == u.STmag
assert mag.value == 10.0
def test_ilshift_magnitude(self):
# test in-place operation and conversion
mag_fnu_cgs = u.mag(u.erg / u.s / u.cm**2 / u.Hz)
m = np.arange(10.0) * u.mag(u.Jy)
jy = m.physical
m2 = m << mag_fnu_cgs
assert np.all(m2 == m.to(mag_fnu_cgs))
m2 = m
m <<= mag_fnu_cgs
assert m is m2 # Check it was done in-place!
assert np.all(m.value == m2.value)
assert m.unit == mag_fnu_cgs
# Check it works if equivalencies are in-place.
with u.add_enabled_equivalencies(u.spectral_density(5500 * u.AA)):
st = jy.to(u.ST)
m <<= u.STmag
assert m is m2
assert_quantity_allclose(m.physical, st)
assert m.unit == u.STmag
def test_lshift_errors(self):
m = np.arange(10.0) * u.mag(u.Jy)
with pytest.raises(u.UnitsError):
m << u.STmag
with pytest.raises(u.UnitsError):
m << u.Jy
with pytest.raises(u.UnitsError):
m <<= u.STmag
with pytest.raises(u.UnitsError):
m <<= u.Jy
def test_predefined_magnitudes():
assert_quantity_allclose(
(-21.1 * u.STmag).physical, 1.0 * u.erg / u.cm**2 / u.s / u.AA
)
assert_quantity_allclose(
(-48.6 * u.ABmag).physical, 1.0 * u.erg / u.cm**2 / u.s / u.Hz
)
assert_quantity_allclose((0 * u.M_bol).physical, c.L_bol0)
assert_quantity_allclose(
(0 * u.m_bol).physical, c.L_bol0 / (4.0 * np.pi * (10.0 * c.pc) ** 2)
)
def test_predefined_reinitialisation():
assert u.mag("STflux") == u.STmag
assert u.mag("ABflux") == u.ABmag
assert u.mag("Bol") == u.M_bol
assert u.mag("bol") == u.m_bol
# required for backwards-compatibility, at least unless deprecated
assert u.mag("ST") == u.STmag
assert u.mag("AB") == u.ABmag
def test_predefined_string_roundtrip():
"""Ensure round-tripping; see #5015"""
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regression for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
class TestLogUnitStrings:
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == "mag(Jy)"
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string("generic") == "mag(Jy)"
with pytest.raises(ValueError):
lu1.to_string("fits")
with pytest.raises(ValueError):
lu1.to_string(format="cds")
lu2 = u.dex()
assert str(lu2) == "dex"
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == "dex(1)"
lu3 = u.MagUnit(u.Jy, function_unit=2 * u.mag)
assert str(lu3) == "2 mag(Jy)"
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == "2 mag(Jy)"
lu4 = u.mag(u.ct)
assert lu4.to_string("generic") == "mag(ct)"
latex_str = r"$\mathrm{mag}$$\mathrm{\left( \mathrm{ct} \right)}$"
assert lu4.to_string("latex") == latex_str
assert lu4.to_string("latex_inline") == latex_str
assert lu4._repr_latex_() == latex_str
lu5 = u.mag(u.ct / u.s)
assert lu5.to_string("latex") == (
r"$\mathrm{mag}$$\mathrm{\left( " r"\mathrm{\frac{ct}{s}} \right)}$"
)
latex_str = r"$\mathrm{mag}$$\mathrm{\left( \mathrm{ct\,s^{-1}} " r"\right)}$"
assert lu5.to_string("latex_inline") == latex_str
class TestLogUnitConversion:
@pytest.mark.parametrize(
"lu_unit, physical_unit", itertools.product(lu_units, pu_sample)
)
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.0) == 1.0
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.0) == 0.0
pu = u.Unit(8.0 * physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.0) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0.0, atol=1.0e-15)
# Check we round-trip.
value = np.linspace(0.0, 10.0, 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.0e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize("lu_unit", lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0.0, 10.0, 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize(
"flu_unit, tlu_unit, physical_unit",
itertools.product(lu_units, lu_units, pu_sample),
)
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0.0, 10.0, 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(
flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)
)
tlu2 = tlu_unit(u.Unit(100.0 * physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.0e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.0e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
def test_magnitude_conversion_fails_message(self):
"""Check that "dimensionless" magnitude units include a message in their
exception text suggesting a possible cause of the problem.
"""
with pytest.raises(
u.UnitConversionError,
match="Did you perhaps subtract magnitudes so the unit got lost?",
):
(10 * u.ABmag - 2 * u.ABmag).to(u.nJy)
class TestLogUnitArithmetic:
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(
t.to(u.dimensionless_unscaled, np.arange(3.0) / 100.0),
lu2.to(lu2.physical_unit, np.arange(3.0)),
)
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
@pytest.mark.parametrize("power", (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogUnits to some power is only possible when the
physical unit is dimensionless, and that conversion is turned off when
the resulting logarithmic unit (such as mag**2) is incompatible."""
lu1 = u.mag(u.Jy)
if power == 0:
assert lu1**power == u.dimensionless_unscaled
elif power == 1:
assert lu1**power == lu1
else:
with pytest.raises(u.UnitsError):
lu1**power
# With dimensionless, though, it works, but returns a normal unit.
lu2 = u.mag(u.dimensionless_unscaled)
t = lu2**power
if power == 0:
assert t == u.dimensionless_unscaled
elif power == 1:
assert t == lu2
else:
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit**power
# also check we roundtrip
t2 = t ** (1.0 / power)
assert t2 == lu2.function_unit
with u.set_enabled_equivalencies(u.logarithmic()):
assert_allclose(
t2.to(u.dimensionless_unscaled, np.arange(3.0)),
lu2.to(lu2.physical_unit, np.arange(3.0)),
)
@pytest.mark.parametrize("other", pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 + other
with pytest.raises(u.UnitsError):
lu1 - other
with pytest.raises(u.UnitsError):
other - lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.0
with pytest.raises(TypeError):
lu1 - [1.0, 2.0, 3.0]
@pytest.mark.parametrize(
"other",
(
u.mag,
u.mag(),
u.mag(u.Jy),
u.mag(u.m),
u.Unit(2 * u.mag),
u.MagUnit("", 2.0 * u.mag),
),
)
def test_addition_subtraction(self, other):
"""Check physical units are changed appropriately"""
lu1 = u.mag(u.Jy)
other_pu = getattr(other, "physical_unit", u.dimensionless_unscaled)
lu_sf = lu1 + other
assert lu_sf.is_equivalent(lu1.physical_unit * other_pu)
lu_sr = other + lu1
assert lu_sr.is_equivalent(lu1.physical_unit * other_pu)
lu_df = lu1 - other
assert lu_df.is_equivalent(lu1.physical_unit / other_pu)
lu_dr = other - lu1
assert lu_dr.is_equivalent(other_pu / lu1.physical_unit)
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit("DM", 1.0 / (4.0 * np.pi * (10.0 * u.pc) ** 2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg / u.s / u.AA)
def test_neg_pos(self):
lu1 = u.mag(u.Jy)
neg_lu = -lu1
assert neg_lu != lu1
assert neg_lu.physical_unit == u.Jy**-1
assert -neg_lu == lu1
pos_lu = +lu1
assert pos_lu is not lu1
assert pos_lu == lu1
def test_pickle():
lu1 = u.dex(u.cm / u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation:
@pytest.mark.parametrize(
"lq, lu", zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])
)
def test_logarithmic_quantities(self, lq, lu):
"""Check logarithmic quantities are all set up correctly"""
assert lq._unit_class == lu
assert type(lu()._quantity_class(1.0)) is lq
@pytest.mark.parametrize(
"lq_cls, physical_unit", itertools.product(lq_subclasses, pu_sample)
)
def test_subclass_creation(self, lq_cls, physical_unit):
"""Create LogQuantity subclass objects for some physical units,
and basic check on transformations"""
value = np.arange(1.0, 10.0)
log_q = lq_cls(value * physical_unit)
assert log_q.unit.physical_unit == physical_unit
assert log_q.unit.function_unit == log_q.unit._default_function_unit
assert_allclose(log_q.physical.value, value)
with pytest.raises(ValueError):
lq_cls(value, physical_unit)
@pytest.mark.parametrize(
"unit",
(
u.mag,
u.mag(),
u.mag(u.Jy),
u.mag(u.m),
u.Unit(2 * u.mag),
u.MagUnit("", 2.0 * u.mag),
u.MagUnit(u.Jy, -1 * u.mag),
u.MagUnit(u.m, -2.0 * u.mag),
),
)
def test_different_units(self, unit):
q = u.Magnitude(1.23, unit)
assert q.unit.function_unit == getattr(unit, "function_unit", unit)
assert q.unit.physical_unit is getattr(
unit, "physical_unit", u.dimensionless_unscaled
)
@pytest.mark.parametrize(
"value, unit",
(
(1.0 * u.mag(u.Jy), None),
(1.0 * u.dex(u.Jy), None),
(1.0 * u.mag(u.W / u.m**2 / u.Hz), u.mag(u.Jy)),
(1.0 * u.dex(u.W / u.m**2 / u.Hz), u.mag(u.Jy)),
),
)
def test_function_values(self, value, unit):
lq = u.Magnitude(value, unit)
assert lq == value
assert lq.unit.function_unit == u.mag
assert lq.unit.physical_unit == getattr(
unit, "physical_unit", value.unit.physical_unit
)
@pytest.mark.parametrize(
"unit",
(
u.mag(),
u.mag(u.Jy),
u.mag(u.m),
u.MagUnit("", 2.0 * u.mag),
u.MagUnit(u.Jy, -1 * u.mag),
u.MagUnit(u.m, -2.0 * u.mag),
),
)
def test_indirect_creation(self, unit):
q1 = 2.5 * unit
assert isinstance(q1, u.Magnitude)
assert q1.value == 2.5
assert q1.unit == unit
pv = 100.0 * unit.physical_unit
q2 = unit * pv
assert q2.unit == unit
assert q2.unit.physical_unit == pv.unit
assert q2.to_value(unit.physical_unit) == 100.0
assert (q2._function_view / u.mag).to_value(1) == -5.0
q3 = unit / 0.4
assert q3 == q1
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100.0, 1000.0] * u.cm / u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2.0, 3.0] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1.0, lu)
q = u.Quantity(1.0, lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10.0, 12.0, 14.0] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.0 * u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews:
def setup_method(self):
self.lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
self.lq2 = u.Magnitude(np.arange(1.0, 5.0))
def test_value_view(self):
lq_value = self.lq.value
assert type(lq_value) is np.ndarray
lq_value[2] = -1.0
assert np.all(self.lq.value == lq_value)
def test_function_view(self):
lq_fv = self.lq._function_view
assert type(lq_fv) is u.Quantity
assert lq_fv.unit is self.lq.unit.function_unit
lq_fv[3] = -2.0 * lq_fv.unit
assert np.all(self.lq.value == lq_fv.value)
def test_quantity_view(self):
# Cannot view as Quantity, since the unit cannot be represented.
with pytest.raises(TypeError):
self.lq.view(u.Quantity)
# But a dimensionless one is fine.
q2 = self.lq2.view(u.Quantity)
assert q2.unit is u.mag
assert np.all(q2.value == self.lq2.value)
lq3 = q2.view(u.Magnitude)
assert type(lq3.unit) is u.MagUnit
assert lq3.unit.physical_unit == u.dimensionless_unscaled
assert np.all(lq3 == self.lq2)
class TestLogQuantitySlicing:
def test_item_get_and_set(self):
lq1 = u.Magnitude(np.arange(1.0, 11.0) * u.Jy)
assert lq1[9] == u.Magnitude(10.0 * u.Jy)
lq1[2] = 100.0 * u.Jy
assert lq1[2] == u.Magnitude(100.0 * u.Jy)
with pytest.raises(u.UnitsError):
lq1[2] = 100.0 * u.m
with pytest.raises(u.UnitsError):
lq1[2] = 100.0 * u.mag
with pytest.raises(u.UnitsError):
lq1[2] = u.Magnitude(100.0 * u.m)
assert lq1[2] == u.Magnitude(100.0 * u.Jy)
def test_slice_get_and_set(self):
lq1 = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
lq1[2:4] = 100.0 * u.Jy
assert np.all(lq1[2:4] == u.Magnitude(100.0 * u.Jy))
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.0 * u.m
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.0 * u.mag
with pytest.raises(u.UnitsError):
lq1[2:4] = u.Magnitude(100.0 * u.m)
assert np.all(lq1[2] == u.Magnitude(100.0 * u.Jy))
class TestLogQuantityArithmetic:
@pytest.mark.parametrize(
"other",
[
2.4 * u.mag(),
12.34 * u.ABmag,
u.Magnitude(3.45 * u.Jy),
u.Dex(3.0),
u.Dex(np.linspace(3000, 5000, 10) * u.Angstrom),
u.Magnitude(6.78, 2.0 * u.mag),
],
)
@pytest.mark.parametrize("fac", [1.0, 2, 0.4])
def test_multiplication_division(self, other, fac):
"""Check that multiplication and division works as expectes"""
lq_sf = fac * other
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical**fac)
lq_sf = other * fac
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical**fac)
lq_sf = other / fac
assert lq_sf.unit.physical_unit**fac == other.unit.physical_unit
assert_allclose(lq_sf.physical**fac, other.physical)
lq_sf = other.copy()
lq_sf *= fac
assert lq_sf.unit.physical_unit == other.unit.physical_unit**fac
assert_allclose(lq_sf.physical, other.physical**fac)
lq_sf = other.copy()
lq_sf /= fac
assert lq_sf.unit.physical_unit**fac == other.unit.physical_unit
assert_allclose(lq_sf.physical**fac, other.physical)
def test_more_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this keeps
the result as a LogQuantity if possible."""
lq = u.Magnitude(np.arange(1.0, 11.0) * u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.0 * u.m)
with pytest.raises(u.UnitsError):
(1.0 * u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.0))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
lq_sf = lq.copy()
with pytest.raises(u.UnitsError):
lq_sf *= lq2
# ensure that nothing changed inside
assert (lq_sf == lq).all()
with pytest.raises(u.UnitsError):
lq_sf /= lq2
# ensure that nothing changed inside
assert (lq_sf == lq).all()
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.0)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value / 2.0)
# And multiplying with a dimensionless array is also OK.
r2 = lq2 * np.arange(10.0)
assert isinstance(r2, u.Magnitude)
assert np.all(r2 == lq2._function_view * np.arange(10.0))
# with dimensionless, normal units OK, but return normal quantities
# if the unit no longer is consistent with the logarithmic unit.
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.0 * u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view * 2)
@pytest.mark.parametrize("power", (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogQuantities to some power is only possible when
the physical unit is dimensionless, and that conversion is turned off
when the resulting logarithmic unit (say, mag**2) is incompatible."""
lq = u.Magnitude(np.arange(1.0, 4.0) * u.Jy)
if power == 0:
assert np.all(lq**power == 1.0)
elif power == 1:
assert np.all(lq**power == lq)
else:
with pytest.raises(u.UnitsError):
lq**power
# with dimensionless, it works, but falls back to normal quantity
# (except for power=1)
lq2 = u.Magnitude(np.arange(10.0))
t = lq2**power
if power == 0:
assert t.unit is u.dimensionless_unscaled
assert np.all(t.value == 1.0)
elif power == 1:
assert np.all(t == lq2)
else:
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit**power
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(u.dimensionless_unscaled)
def test_error_on_lq_as_power(self):
lq = u.Magnitude(np.arange(1.0, 4.0) * u.Jy)
with pytest.raises(TypeError):
lq**lq
@pytest.mark.parametrize("other", pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
q = 1.23 * other
with pytest.raises(u.UnitsError):
lq + q
with pytest.raises(u.UnitsError):
lq - q
with pytest.raises(u.UnitsError):
q - lq
@pytest.mark.parametrize(
"other",
(
1.23 * u.mag,
2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy),
u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2 * u.mag),
u.Magnitude(6.78, 2.0 * u.mag),
),
)
def test_addition_subtraction(self, other):
"""Check that addition/subtraction with quantities with magnitude or
MagUnit units works, and that it changes the physical units
appropriately."""
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
other_physical = other.to(
getattr(other.unit, "physical_unit", u.dimensionless_unscaled),
equivalencies=u.logarithmic(),
)
lq_sf = lq + other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_sr = other + lq
assert_allclose(lq_sr.physical, lq.physical * other_physical)
lq_df = lq - other
assert_allclose(lq_df.physical, lq.physical / other_physical)
lq_dr = other - lq
assert_allclose(lq_dr.physical, other_physical / lq.physical)
@pytest.mark.parametrize("other", pu_sample)
def test_inplace_addition_subtraction_unit_checks(self, other):
lu1 = u.mag(u.Jy)
lq1 = u.Magnitude(np.arange(1.0, 10.0), lu1)
with pytest.raises(u.UnitsError):
lq1 += other
assert np.all(lq1.value == np.arange(1.0, 10.0))
assert lq1.unit == lu1
with pytest.raises(u.UnitsError):
lq1 -= other
assert np.all(lq1.value == np.arange(1.0, 10.0))
assert lq1.unit == lu1
@pytest.mark.parametrize(
"other",
(
1.23 * u.mag,
2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy),
u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2 * u.mag),
u.Magnitude(6.78, 2.0 * u.mag),
),
)
def test_inplace_addition_subtraction(self, other):
"""Check that inplace addition/subtraction with quantities with
magnitude or MagUnit units works, and that it changes the physical
units appropriately."""
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
other_physical = other.to(
getattr(other.unit, "physical_unit", u.dimensionless_unscaled),
equivalencies=u.logarithmic(),
)
lq_sf = lq.copy()
lq_sf += other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_df = lq.copy()
lq_df -= other
assert_allclose(lq_df.physical, lq.physical / other_physical)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit("DM", 1.0 / (4.0 * np.pi * (10.0 * u.pc) ** 2))
DMmag = u.mag(dm0)
m_st = 10.0 * u.STmag
dm = 5.0 * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg / u.s / u.AA)
ratio = M_st.physical / (m_st.physical * 4.0 * np.pi * (100.0 * u.pc) ** 2)
assert np.abs(ratio - 1.0) < 1.0e-15
class TestLogQuantityComparisons:
def test_comparison_to_non_quantities_fails(self):
lq = u.Magnitude(np.arange(1.0, 10.0) * u.Jy)
with pytest.raises(TypeError):
lq > "a"
assert not (lq == "a")
assert lq != "a"
def test_comparison(self):
lq1 = u.Magnitude(np.arange(1.0, 4.0) * u.Jy)
lq2 = u.Magnitude(2.0 * u.Jy)
assert np.all((lq1 > lq2) == np.array([True, False, False]))
assert np.all((lq1 == lq2) == np.array([False, True, False]))
lq3 = u.Dex(2.0 * u.Jy)
assert np.all((lq1 > lq3) == np.array([True, False, False]))
assert np.all((lq1 == lq3) == np.array([False, True, False]))
lq4 = u.Magnitude(2.0 * u.m)
assert not (lq1 == lq4)
assert lq1 != lq4
with pytest.raises(u.UnitsError):
lq1 < lq4
q5 = 1.5 * u.Jy
assert np.all((lq1 > q5) == np.array([True, False, False]))
assert np.all((q5 < lq1) == np.array([True, False, False]))
with pytest.raises(u.UnitsError):
lq1 >= 2.0 * u.m
with pytest.raises(u.UnitsError):
lq1 <= lq1.value * u.mag
# For physically dimensionless, we can compare with the function unit.
lq6 = u.Magnitude(np.arange(1.0, 4.0))
fv6 = lq6.value * u.mag
assert np.all(lq6 == fv6)
# but not some arbitrary unit, of course.
with pytest.raises(u.UnitsError):
lq6 < 2.0 * u.m
class TestLogQuantityMethods:
def setup_method(self):
self.mJy = np.arange(1.0, 5.0).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1.0, 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
@pytest.mark.parametrize(
"method",
(
"mean",
"min",
"max",
"round",
"trace",
"std",
"var",
"ptp",
"diff",
"ediff1d",
),
)
def test_always_ok(self, method):
for mag in self.mags:
res = getattr(mag, method)()
assert np.all(res.value == getattr(mag._function_view, method)().value)
if method in ("std", "ptp", "diff", "ediff1d"):
assert res.unit == u.mag()
elif method == "var":
assert res.unit == u.mag**2
else:
assert res.unit == mag.unit
def test_clip(self):
for mag in self.mags:
assert np.all(
mag.clip(2.0 * mag.unit, 4.0 * mag.unit).value
== mag.value.clip(2.0, 4.0)
)
@pytest.mark.parametrize("method", ("sum", "cumsum", "nansum"))
def test_only_ok_if_dimensionless(self, method):
res = getattr(self.m1, method)()
assert np.all(res.value == getattr(self.m1._function_view, method)().value)
assert res.unit == self.m1.unit
with pytest.raises(TypeError):
getattr(self.mJy, method)()
def test_dot(self):
assert np.all(self.m1.dot(self.m1).value == self.m1.value.dot(self.m1.value))
@pytest.mark.parametrize("method", ("prod", "cumprod"))
def test_never_ok(self, method):
with pytest.raises(TypeError):
getattr(self.mJy, method)()
with pytest.raises(TypeError):
getattr(self.m1, method)()
|
c5299b881191b52f5ad1b6c91f1d3d0c25793b0c72a016319b653f7a2fd46f18 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test the Quantity class and related."""
import sys
import typing as T
import numpy as np
import pytest
from astropy import units as u
from astropy.units._typing import Annotated
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires py3.9+")
class TestQuantityTyping:
"""Test Quantity Typing Annotations."""
def test_quantity_typing(self):
"""Test type hint creation from Quantity."""
annot = u.Quantity[u.m]
assert T.get_origin(annot) is Annotated
assert T.get_args(annot) == (u.Quantity, u.m)
# test usage
def func(x: annot, y: str) -> u.Quantity[u.s]:
return x, y
annots = T.get_type_hints(func, include_extras=True)
assert annots["x"] is annot
assert annots["return"].__metadata__[0] == u.s
def test_metadata_in_annotation(self):
"""Test Quantity annotation with added metadata."""
multi_annot = u.Quantity[u.m, T.Any, np.dtype]
def multi_func(x: multi_annot, y: str):
return x, y
annots = T.get_type_hints(multi_func, include_extras=True)
assert annots["x"] == multi_annot
def test_optional_and_annotated(self):
"""Test Quantity annotation in an Optional."""
opt_annot = T.Optional[u.Quantity[u.m]]
def opt_func(x: opt_annot, y: str):
return x, y
annots = T.get_type_hints(opt_func, include_extras=True)
assert annots["x"] == opt_annot
def test_union_and_annotated(self):
"""Test Quantity annotation in a Union."""
# double Quantity[]
union_annot1 = T.Union[u.Quantity[u.m], u.Quantity[u.s]]
# one Quantity, one physical-type
union_annot2 = T.Union[u.Quantity[u.m], u.Quantity["time"]]
# one Quantity, one general type
union_annot3 = T.Union[u.Quantity[u.m / u.s], float]
def union_func(x: union_annot1, y: union_annot2) -> union_annot3:
if isinstance(y, str): # value = time
return x.value # returns <float>
else:
return x / y # returns Quantity[m / s]
annots = T.get_type_hints(union_func, include_extras=True)
assert annots["x"] == union_annot1
assert annots["y"] == union_annot2
assert annots["return"] == union_annot3
def test_quantity_subclass_typing(self):
"""Test type hint creation from a Quantity subclasses."""
class Length(u.SpecificTypeQuantity):
_equivalent_unit = u.m
annot = Length[u.km]
assert T.get_origin(annot) is Annotated
assert T.get_args(annot) == (Length, u.km)
|
4ee91c8c3d469bb323b4fdb91265ed9c5895558dc13fbf79b726cc0b16e4091c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test Structured units and quantities.
"""
import copy
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.tests.helper import check_pickling_recovery, pickle_protocol # noqa: F401
from astropy.units import Quantity, StructuredUnit, Unit, UnitBase
from astropy.units.quantity import _structured_unit_like_dtype
from astropy.utils.compat import NUMPY_LT_1_21_1
from astropy.utils.masked import Masked
class StructuredTestBase:
@classmethod
def setup_class(self):
self.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
self.pv_t_dtype = np.dtype([("pv", self.pv_dtype), ("t", "f8")])
self.p_unit = u.km
self.v_unit = u.km / u.s
self.t_unit = u.s
self.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
self.pv_t_dtype = np.dtype([("pv", self.pv_dtype), ("t", "f8")])
self.pv = np.array([(1.0, 0.25), (2.0, 0.5), (3.0, 0.75)], self.pv_dtype)
self.pv_t = np.array(
[
((4.0, 2.5), 0.0),
((5.0, 5.0), 1.0),
((6.0, 7.5), 2.0),
],
self.pv_t_dtype,
)
class StructuredTestBaseWithUnits(StructuredTestBase):
@classmethod
def setup_class(self):
super().setup_class()
self.pv_unit = StructuredUnit((self.p_unit, self.v_unit), ("p", "v"))
self.pv_t_unit = StructuredUnit((self.pv_unit, self.t_unit), ("pv", "t"))
class TestStructuredUnitBasics(StructuredTestBase):
def test_initialization_and_keying(self):
su = StructuredUnit((self.p_unit, self.v_unit), ("p", "v"))
assert su["p"] is self.p_unit
assert su["v"] is self.v_unit
su2 = StructuredUnit((su, self.t_unit), ("pv", "t"))
assert isinstance(su2["pv"], StructuredUnit)
assert su2["pv"]["p"] is self.p_unit
assert su2["pv"]["v"] is self.v_unit
assert su2["t"] is self.t_unit
assert su2["pv"] == su
su3 = StructuredUnit(("AU", "AU/day"), ("p", "v"))
assert isinstance(su3["p"], UnitBase)
assert isinstance(su3["v"], UnitBase)
su4 = StructuredUnit("AU, AU/day", ("p", "v"))
assert su4["p"] == u.AU
assert su4["v"] == u.AU / u.day
su5 = StructuredUnit(("AU", "AU/day"))
assert su5.field_names == ("f0", "f1")
assert su5["f0"] == u.AU
assert su5["f1"] == u.AU / u.day
def test_recursive_initialization(self):
su = StructuredUnit(
((self.p_unit, self.v_unit), self.t_unit), (("p", "v"), "t")
)
assert isinstance(su["pv"], StructuredUnit)
assert su["pv"]["p"] is self.p_unit
assert su["pv"]["v"] is self.v_unit
assert su["t"] is self.t_unit
su2 = StructuredUnit(
((self.p_unit, self.v_unit), self.t_unit), (["p_v", ("p", "v")], "t")
)
assert isinstance(su2["p_v"], StructuredUnit)
assert su2["p_v"]["p"] is self.p_unit
assert su2["p_v"]["v"] is self.v_unit
assert su2["t"] is self.t_unit
su3 = StructuredUnit((("AU", "AU/day"), "yr"), (["p_v", ("p", "v")], "t"))
assert isinstance(su3["p_v"], StructuredUnit)
assert su3["p_v"]["p"] == u.AU
assert su3["p_v"]["v"] == u.AU / u.day
assert su3["t"] == u.yr
su4 = StructuredUnit("(AU, AU/day), yr", (("p", "v"), "t"))
assert isinstance(su4["pv"], StructuredUnit)
assert su4["pv"]["p"] == u.AU
assert su4["pv"]["v"] == u.AU / u.day
assert su4["t"] == u.yr
def test_extreme_recursive_initialization(self):
su = StructuredUnit(
"(yr,(AU,AU/day,(km,(day,day))),m)",
("t", ("p", "v", ("h", ("d1", "d2"))), "l"),
)
assert su.field_names == (
't', ['pvhd1d2',
('p', 'v',
['hd1d2',
('h',
['d1d2',
('d1', 'd2')])])],
'l',
) # fmt: skip
@pytest.mark.parametrize(
"names, invalid",
[
[("t", ["p", "v"]), "['p', 'v']"],
[("t", ["pv", "p", "v"]), "['pv', 'p', 'v']"],
[("t", ["pv", ["p", "v"]]), "['pv', ['p', 'v']"],
[("t", ()), "()"],
[("t", ("p", None)), "None"],
[("t", ["pv", ("p", "")]), "''"],
],
)
def test_initialization_names_invalid_list_errors(self, names, invalid):
with pytest.raises(ValueError) as exc:
StructuredUnit("(yr,(AU,AU/day)", names)
assert f"invalid entry {invalid}" in str(exc)
def test_looks_like_unit(self):
su = StructuredUnit((self.p_unit, self.v_unit), ("p", "v"))
assert Unit(su) is su
def test_initialize_with_float_dtype(self):
su = StructuredUnit(("AU", "AU/d"), self.pv_dtype)
assert isinstance(su["p"], UnitBase)
assert isinstance(su["v"], UnitBase)
assert su["p"] == u.AU
assert su["v"] == u.AU / u.day
su = StructuredUnit((("km", "km/s"), "yr"), self.pv_t_dtype)
assert isinstance(su["pv"], StructuredUnit)
assert isinstance(su["pv"]["p"], UnitBase)
assert isinstance(su["t"], UnitBase)
assert su["pv"]["v"] == u.km / u.s
su = StructuredUnit("(km, km/s), yr", self.pv_t_dtype)
assert isinstance(su["pv"], StructuredUnit)
assert isinstance(su["pv"]["p"], UnitBase)
assert isinstance(su["t"], UnitBase)
assert su["pv"]["v"] == u.km / u.s
def test_initialize_with_structured_unit_for_names(self):
su = StructuredUnit(("AU", "AU/d"), names=("p", "v"))
su2 = StructuredUnit(("km", "km/s"), names=su)
assert su2.field_names == ("p", "v")
assert su2["p"] == u.km
assert su2["v"] == u.km / u.s
def test_initialize_single_field(self):
su = StructuredUnit("AU", "p")
assert isinstance(su, StructuredUnit)
assert isinstance(su["p"], UnitBase)
assert su["p"] == u.AU
su = StructuredUnit("AU")
assert isinstance(su, StructuredUnit)
assert isinstance(su["f0"], UnitBase)
assert su["f0"] == u.AU
def test_equality(self):
su = StructuredUnit(("AU", "AU/d"), self.pv_dtype)
assert su == StructuredUnit(("AU", "AU/d"), self.pv_dtype)
assert su != StructuredUnit(("m", "AU/d"), self.pv_dtype)
# Names should be ignored.
assert su == StructuredUnit(("AU", "AU/d"))
assert su == StructuredUnit(("AU", "AU/d"), names=("q", "w"))
assert su != StructuredUnit(("m", "m/s"))
def test_parsing(self):
su = Unit("AU, AU/d")
assert isinstance(su, StructuredUnit)
assert isinstance(su["f0"], UnitBase)
assert isinstance(su["f1"], UnitBase)
assert su["f0"] == u.AU
assert su["f1"] == u.AU / u.day
su2 = Unit("AU, AU/d, yr")
assert isinstance(su2, StructuredUnit)
assert su2 == StructuredUnit(("AU", "AU/d", "yr"))
su2a = Unit("(AU, AU/d, yr)")
assert isinstance(su2a, StructuredUnit)
assert su2a == su2
su3 = Unit("(km, km/s), yr")
assert isinstance(su3, StructuredUnit)
assert su3 == StructuredUnit((("km", "km/s"), "yr"))
su4 = Unit("km,")
assert isinstance(su4, StructuredUnit)
assert su4 == StructuredUnit((u.km,))
su5 = Unit("(m,s),")
assert isinstance(su5, StructuredUnit)
assert su5 == StructuredUnit(((u.m, u.s),))
ldbody_unit = Unit("Msun, 0.5rad^2, (au, au/day)")
assert ldbody_unit == StructuredUnit(
(u.Msun, Unit(u.rad**2 / 2), (u.AU, u.AU / u.day))
)
def test_to_string(self):
su = StructuredUnit((u.km, u.km / u.s))
latex_str = r"$(\mathrm{km}, \mathrm{\frac{km}{s}})$"
assert su.to_string(format="latex") == latex_str
latex_str = r"$(\mathrm{km}, \mathrm{km\,s^{-1}})$"
assert su.to_string(format="latex_inline") == latex_str
def test_str(self):
su = StructuredUnit(((u.km, u.km / u.s), u.yr))
assert str(su) == "((km, km / s), yr)"
assert Unit(str(su)) == su
def test_repr(self):
su = StructuredUnit(((u.km, u.km / u.s), u.yr))
assert repr(su) == 'Unit("((km, km / s), yr)")'
assert eval(repr(su)) == su
class TestStructuredUnitsCopyPickle(StructuredTestBaseWithUnits):
def test_copy(self):
su_copy = copy.copy(self.pv_t_unit)
assert su_copy is not self.pv_t_unit
assert su_copy == self.pv_t_unit
assert su_copy._units is self.pv_t_unit._units
def test_deepcopy(self):
su_copy = copy.deepcopy(self.pv_t_unit)
assert su_copy is not self.pv_t_unit
assert su_copy == self.pv_t_unit
assert su_copy._units is not self.pv_t_unit._units
@pytest.mark.skipif(NUMPY_LT_1_21_1, reason="https://stackoverflow.com/q/69571643")
def test_pickle(self, pickle_protocol): # noqa: F811
check_pickling_recovery(self.pv_t_unit, pickle_protocol)
class TestStructuredUnitAsMapping(StructuredTestBaseWithUnits):
def test_len(self):
assert len(self.pv_unit) == 2
assert len(self.pv_t_unit) == 2
def test_keys(self):
slv = list(self.pv_t_unit.keys())
assert slv == ["pv", "t"]
def test_values(self):
values = self.pv_t_unit.values()
assert values == (self.pv_unit, self.t_unit)
def test_field_names(self):
field_names = self.pv_t_unit.field_names
assert isinstance(field_names, tuple)
assert field_names == (["pv", ("p", "v")], "t")
@pytest.mark.parametrize("iterable", [list, set])
def test_as_iterable(self, iterable):
sl = iterable(self.pv_unit)
assert isinstance(sl, iterable)
assert sl == iterable(["p", "v"])
def test_as_dict(self):
sd = dict(self.pv_t_unit)
assert sd == {"pv": self.pv_unit, "t": self.t_unit}
def test_contains(self):
assert "p" in self.pv_unit
assert "v" in self.pv_unit
assert "t" not in self.pv_unit
def test_setitem_fails(self):
with pytest.raises(TypeError, match="item assignment"):
self.pv_t_unit["t"] = u.Gyr
class TestStructuredUnitMethods(StructuredTestBaseWithUnits):
def test_physical_type_id(self):
pv_ptid = self.pv_unit._get_physical_type_id()
assert len(pv_ptid) == 2
assert pv_ptid.dtype.names == ("p", "v")
p_ptid = self.pv_unit["p"]._get_physical_type_id()
v_ptid = self.pv_unit["v"]._get_physical_type_id()
# Expected should be (subclass of) void, with structured object dtype.
expected = np.array((p_ptid, v_ptid), [("p", "O"), ("v", "O")])[()]
assert pv_ptid == expected
# Names should be ignored in comparison.
assert pv_ptid == np.array((p_ptid, v_ptid), "O,O")[()]
# Should be possible to address by field and by number.
assert pv_ptid["p"] == p_ptid
assert pv_ptid["v"] == v_ptid
assert pv_ptid[0] == p_ptid
assert pv_ptid[1] == v_ptid
# More complicated version.
pv_t_ptid = self.pv_t_unit._get_physical_type_id()
t_ptid = self.t_unit._get_physical_type_id()
assert pv_t_ptid == np.array((pv_ptid, t_ptid), "O,O")[()]
assert pv_t_ptid["pv"] == pv_ptid
assert pv_t_ptid["t"] == t_ptid
assert pv_t_ptid["pv"][1] == v_ptid
def test_physical_type(self):
pv_pt = self.pv_unit.physical_type
assert pv_pt == np.array(("length", "speed"), "O,O")[()]
pv_t_pt = self.pv_t_unit.physical_type
assert pv_t_pt == np.array((pv_pt, "time"), "O,O")[()]
def test_si(self):
pv_t_si = self.pv_t_unit.si
assert pv_t_si == self.pv_t_unit
assert pv_t_si["pv"]["v"].scale == 1000
def test_cgs(self):
pv_t_cgs = self.pv_t_unit.cgs
assert pv_t_cgs == self.pv_t_unit
assert pv_t_cgs["pv"]["v"].scale == 100000
def test_decompose(self):
pv_t_decompose = self.pv_t_unit.decompose()
assert pv_t_decompose["pv"]["v"].scale == 1000
def test_is_equivalent(self):
assert self.pv_unit.is_equivalent(("AU", "AU/day"))
assert not self.pv_unit.is_equivalent("m")
assert not self.pv_unit.is_equivalent(("AU", "AU"))
# Names should be ignored.
pv_alt = StructuredUnit("m,m/s", names=("q", "w"))
assert pv_alt.field_names != self.pv_unit.field_names
assert self.pv_unit.is_equivalent(pv_alt)
# Regular units should work too.
assert not u.m.is_equivalent(self.pv_unit)
def test_conversion(self):
pv1 = self.pv_unit.to(("AU", "AU/day"), self.pv)
assert isinstance(pv1, np.ndarray)
assert pv1.dtype == self.pv.dtype
assert np.all(pv1["p"] * u.AU == self.pv["p"] * self.p_unit)
assert np.all(pv1["v"] * u.AU / u.day == self.pv["v"] * self.v_unit)
# Names should be from value.
su2 = StructuredUnit((self.p_unit, self.v_unit), ("position", "velocity"))
pv2 = su2.to(("Mm", "mm/s"), self.pv)
assert pv2.dtype.names == ("p", "v")
assert pv2.dtype == self.pv.dtype
# Check recursion.
pv_t1 = self.pv_t_unit.to((("AU", "AU/day"), "Myr"), self.pv_t)
assert isinstance(pv_t1, np.ndarray)
assert pv_t1.dtype == self.pv_t.dtype
assert np.all(pv_t1["pv"]["p"] * u.AU == self.pv_t["pv"]["p"] * self.p_unit)
assert np.all(
pv_t1["pv"]["v"] * u.AU / u.day == self.pv_t["pv"]["v"] * self.v_unit
)
assert np.all(pv_t1["t"] * u.Myr == self.pv_t["t"] * self.t_unit)
# Passing in tuples should work.
pv_t2 = self.pv_t_unit.to((("AU", "AU/day"), "Myr"), ((1.0, 0.1), 10.0))
assert pv_t2["pv"]["p"] == self.p_unit.to("AU", 1.0)
assert pv_t2["pv"]["v"] == self.v_unit.to("AU/day", 0.1)
assert pv_t2["t"] == self.t_unit.to("Myr", 10.0)
pv_t3 = self.pv_t_unit.to(
(("AU", "AU/day"), "Myr"), [((1.0, 0.1), 10.0), ((2.0, 0.2), 20.0)]
)
assert np.all(pv_t3["pv"]["p"] == self.p_unit.to("AU", [1.0, 2.0]))
assert np.all(pv_t3["pv"]["v"] == self.v_unit.to("AU/day", [0.1, 0.2]))
assert np.all(pv_t3["t"] == self.t_unit.to("Myr", [10.0, 20.0]))
class TestStructuredUnitArithmatic(StructuredTestBaseWithUnits):
def test_multiplication(self):
pv_times_au = self.pv_unit * u.au
assert isinstance(pv_times_au, StructuredUnit)
assert pv_times_au.field_names == ("p", "v")
assert pv_times_au["p"] == self.p_unit * u.AU
assert pv_times_au["v"] == self.v_unit * u.AU
au_times_pv = u.au * self.pv_unit
assert au_times_pv == pv_times_au
pv_times_au2 = self.pv_unit * "au"
assert pv_times_au2 == pv_times_au
au_times_pv2 = "AU" * self.pv_unit
assert au_times_pv2 == pv_times_au
with pytest.raises(TypeError):
self.pv_unit * self.pv_unit
with pytest.raises(TypeError):
"s,s" * self.pv_unit
def test_division(self):
pv_by_s = self.pv_unit / u.s
assert isinstance(pv_by_s, StructuredUnit)
assert pv_by_s.field_names == ("p", "v")
assert pv_by_s["p"] == self.p_unit / u.s
assert pv_by_s["v"] == self.v_unit / u.s
pv_by_s2 = self.pv_unit / "s"
assert pv_by_s2 == pv_by_s
with pytest.raises(TypeError):
1.0 / self.pv_unit
with pytest.raises(TypeError):
u.s / self.pv_unit
class TestStructuredQuantity(StructuredTestBaseWithUnits):
def test_initialization_and_keying(self):
q_pv = Quantity(self.pv, self.pv_unit)
q_p = q_pv["p"]
assert isinstance(q_p, Quantity)
assert isinstance(q_p.unit, UnitBase)
assert np.all(q_p == self.pv["p"] * self.pv_unit["p"])
q_v = q_pv["v"]
assert isinstance(q_v, Quantity)
assert isinstance(q_v.unit, UnitBase)
assert np.all(q_v == self.pv["v"] * self.pv_unit["v"])
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_t = q_pv_t["t"]
assert np.all(q_t == self.pv_t["t"] * self.pv_t_unit["t"])
q_pv2 = q_pv_t["pv"]
assert isinstance(q_pv2, Quantity)
assert q_pv2.unit == self.pv_unit
with pytest.raises(ValueError):
Quantity(self.pv, self.pv_t_unit)
with pytest.raises(ValueError):
Quantity(self.pv_t, self.pv_unit)
def test_initialization_with_unit_tuples(self):
q_pv_t = Quantity(self.pv_t, (("km", "km/s"), "s"))
assert isinstance(q_pv_t.unit, StructuredUnit)
assert q_pv_t.unit == self.pv_t_unit
def test_initialization_with_string(self):
q_pv_t = Quantity(self.pv_t, "(km, km/s), s")
assert isinstance(q_pv_t.unit, StructuredUnit)
assert q_pv_t.unit == self.pv_t_unit
def test_initialization_by_multiplication_with_unit(self):
q_pv_t = self.pv_t * self.pv_t_unit
assert q_pv_t.unit is self.pv_t_unit
assert np.all(q_pv_t.value == self.pv_t)
assert not np.may_share_memory(q_pv_t, self.pv_t)
q_pv_t2 = self.pv_t_unit * self.pv_t
assert q_pv_t.unit is self.pv_t_unit
# Not testing equality of structured Quantity here.
assert np.all(q_pv_t2.value == q_pv_t.value)
def test_initialization_by_shifting_to_unit(self):
q_pv_t = self.pv_t << self.pv_t_unit
assert q_pv_t.unit is self.pv_t_unit
assert np.all(q_pv_t.value == self.pv_t)
assert np.may_share_memory(q_pv_t, self.pv_t)
def test_initialization_without_unit(self):
q_pv_t = u.Quantity(self.pv_t, unit=None)
assert np.all(q_pv_t.value == self.pv_t)
# Test that unit is a structured unit like the dtype
expected_unit = _structured_unit_like_dtype(
u.Quantity._default_unit, self.pv_t.dtype
)
assert q_pv_t.unit == expected_unit
# A more explicit test
assert q_pv_t.unit == u.StructuredUnit(((u.one, u.one), u.one))
def test_getitem(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t01 = q_pv_t[:2]
assert isinstance(q_pv_t01, Quantity)
assert q_pv_t01.unit == q_pv_t.unit
assert np.all(q_pv_t01["t"] == q_pv_t["t"][:2])
q_pv_t1 = q_pv_t[1]
assert isinstance(q_pv_t1, Quantity)
assert q_pv_t1.unit == q_pv_t.unit
assert q_pv_t1.shape == ()
assert q_pv_t1["t"] == q_pv_t["t"][1]
def test_value(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
value = q_pv_t.value
assert type(value) is np.ndarray
assert np.all(value == self.pv_t)
value1 = q_pv_t[1].value
assert type(value1) is np.void
assert np.all(value1 == self.pv_t[1])
def test_conversion(self):
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv.to(("AU", "AU/day"))
assert isinstance(q1, Quantity)
assert q1["p"].unit == u.AU
assert q1["v"].unit == u.AU / u.day
assert np.all(q1["p"] == q_pv["p"].to(u.AU))
assert np.all(q1["v"] == q_pv["v"].to(u.AU / u.day))
q2 = q_pv.to(self.pv_unit)
assert q2["p"].unit == self.p_unit
assert q2["v"].unit == self.v_unit
assert np.all(q2["p"].value == self.pv["p"])
assert np.all(q2["v"].value == self.pv["v"])
assert not np.may_share_memory(q2, q_pv)
pv1 = q_pv.to_value(("AU", "AU/day"))
assert type(pv1) is np.ndarray
assert np.all(pv1["p"] == q_pv["p"].to_value(u.AU))
assert np.all(pv1["v"] == q_pv["v"].to_value(u.AU / u.day))
pv11 = q_pv[1].to_value(("AU", "AU/day"))
assert type(pv11) is np.void
assert pv11 == pv1[1]
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t.to((("kpc", "kpc/Myr"), "Myr"))
assert q2["pv"]["p"].unit == u.kpc
assert q2["pv"]["v"].unit == u.kpc / u.Myr
assert q2["t"].unit == u.Myr
assert np.all(q2["pv"]["p"] == q_pv_t["pv"]["p"].to(u.kpc))
assert np.all(q2["pv"]["v"] == q_pv_t["pv"]["v"].to(u.kpc / u.Myr))
assert np.all(q2["t"] == q_pv_t["t"].to(u.Myr))
def test_conversion_via_lshift(self):
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv << StructuredUnit(("AU", "AU/day"))
assert isinstance(q1, Quantity)
assert q1["p"].unit == u.AU
assert q1["v"].unit == u.AU / u.day
assert np.all(q1["p"] == q_pv["p"].to(u.AU))
assert np.all(q1["v"] == q_pv["v"].to(u.AU / u.day))
q2 = q_pv << self.pv_unit
assert q2["p"].unit == self.p_unit
assert q2["v"].unit == self.v_unit
assert np.all(q2["p"].value == self.pv["p"])
assert np.all(q2["v"].value == self.pv["v"])
assert np.may_share_memory(q2, q_pv)
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t << "(kpc,kpc/Myr),Myr"
assert q2["pv"]["p"].unit == u.kpc
assert q2["pv"]["v"].unit == u.kpc / u.Myr
assert q2["t"].unit == u.Myr
assert np.all(q2["pv"]["p"] == q_pv_t["pv"]["p"].to(u.kpc))
assert np.all(q2["pv"]["v"] == q_pv_t["pv"]["v"].to(u.kpc / u.Myr))
assert np.all(q2["t"] == q_pv_t["t"].to(u.Myr))
def test_inplace_conversion(self):
# In principle, in-place might be possible, in which case this should be
# changed -- ie ``q1 is q_link``.
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv.copy()
q_link = q1
q1 <<= StructuredUnit(("AU", "AU/day"))
assert q1 is not q_link
assert q1["p"].unit == u.AU
assert q1["v"].unit == u.AU / u.day
assert np.all(q1["p"] == q_pv["p"].to(u.AU))
assert np.all(q1["v"] == q_pv["v"].to(u.AU / u.day))
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t.copy()
q_link = q2
q2 <<= "(kpc,kpc/Myr),Myr"
assert q2 is not q_link
assert q2["pv"]["p"].unit == u.kpc
assert q2["pv"]["v"].unit == u.kpc / u.Myr
assert q2["t"].unit == u.Myr
assert np.all(q2["pv"]["p"] == q_pv_t["pv"]["p"].to(u.kpc))
assert np.all(q2["pv"]["v"] == q_pv_t["pv"]["v"].to(u.kpc / u.Myr))
assert np.all(q2["t"] == q_pv_t["t"].to(u.Myr))
def test_si(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t_si = q_pv_t.si
assert_array_equal(q_pv_t_si, q_pv_t.to("(m,m/s),s"))
def test_cgs(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t_cgs = q_pv_t.cgs
assert_array_equal(q_pv_t_cgs, q_pv_t.to("(cm,cm/s),s"))
def test_equality(self):
q_pv = Quantity(self.pv, self.pv_unit)
equal = q_pv == q_pv
not_equal = q_pv != q_pv
assert np.all(equal)
assert not np.any(not_equal)
equal2 = q_pv == q_pv[1]
not_equal2 = q_pv != q_pv[1]
assert np.all(equal2 == [False, True, False])
assert np.all(not_equal2 != equal2)
q1 = q_pv.to(("AU", "AU/day"))
# Ensure same conversion is done, by placing q1 first.
assert np.all(q1 == q_pv)
assert not np.any(q1 != q_pv)
# Check different names in dtype.
assert np.all(q1.value * u.Unit("AU, AU/day") == q_pv)
assert not np.any(q1.value * u.Unit("AU, AU/day") != q_pv)
assert (q_pv == "b") is False
assert ("b" != q_pv) is True
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
assert np.all((q_pv_t[2] == q_pv_t) == [False, False, True])
assert np.all((q_pv_t[2] != q_pv_t) != [False, False, True])
assert (q_pv == q_pv_t) is False
assert (q_pv_t != q_pv) is True
def test_setitem(self):
q_pv = Quantity(self.pv, self.pv_unit)
q_pv[1] = (2.0, 2.0) * self.pv_unit
assert q_pv[1].value == np.array((2.0, 2.0), self.pv_dtype)
q_pv[1:2] = (1.0, 0.5) * u.Unit("AU, AU/day")
assert q_pv["p"][1] == 1.0 * u.AU
assert q_pv["v"][1] == 0.5 * u.AU / u.day
q_pv["v"] = 1.0 * u.km / u.s
assert np.all(q_pv["v"] == 1.0 * u.km / u.s)
with pytest.raises(u.UnitsError):
q_pv[1] = (1.0, 1.0) * u.Unit("AU, AU")
with pytest.raises(u.UnitsError):
q_pv["v"] = 1.0 * u.km
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t[1] = ((2.0, 2.0), 3.0) * self.pv_t_unit
assert q_pv_t[1].value == np.array(((2.0, 2.0), 3.0), self.pv_t_dtype)
q_pv_t[1:2] = ((1.0, 0.5), 5.0) * u.Unit("(AU, AU/day), yr")
assert q_pv_t["pv"][1] == (1.0, 0.5) * u.Unit("AU, AU/day")
assert q_pv_t["t"][1] == 5.0 * u.yr
q_pv_t["pv"] = (1.0, 0.5) * self.pv_unit
assert np.all(q_pv_t["pv"] == (1.0, 0.5) * self.pv_unit)
class TestStructuredQuantityFunctions(StructuredTestBaseWithUnits):
@classmethod
def setup_class(self):
super().setup_class()
self.q_pv = self.pv << self.pv_unit
self.q_pv_t = self.pv_t << self.pv_t_unit
def test_empty_like(self):
z = np.empty_like(self.q_pv)
assert z.dtype == self.pv_dtype
assert z.unit == self.pv_unit
assert z.shape == self.pv.shape
@pytest.mark.parametrize("func", [np.zeros_like, np.ones_like])
def test_zeros_ones_like(self, func):
z = func(self.q_pv)
assert z.dtype == self.pv_dtype
assert z.unit == self.pv_unit
assert z.shape == self.pv.shape
assert_array_equal(z, func(self.pv) << self.pv_unit)
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'km / s'"):
rfn.structured_to_unstructured(self.q_pv)
# For the other tests of ``structured_to_unstructured``, see
# ``test_quantity_non_ufuncs.TestRecFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
# can't structure something that's already structured
dtype = np.dtype([("f1", float), ("f2", float)])
with pytest.raises(ValueError, match="The length of the last dimension"):
rfn.unstructured_to_structured(self.q_pv, dtype=self.q_pv.dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_quantity_non_ufuncs.TestRecFunctions.test_unstructured_to_structured``
class TestStructuredSpecificTypeQuantity(StructuredTestBaseWithUnits):
def setup_class(self):
super().setup_class()
class PositionVelocity(u.SpecificTypeQuantity):
_equivalent_unit = self.pv_unit
self.PositionVelocity = PositionVelocity
def test_init(self):
pv = self.PositionVelocity(self.pv, self.pv_unit)
assert isinstance(pv, self.PositionVelocity)
assert type(pv["p"]) is u.Quantity
assert_array_equal(pv["p"], self.pv["p"] << self.pv_unit["p"])
pv2 = self.PositionVelocity(self.pv, "AU,AU/day")
assert_array_equal(pv2["p"], self.pv["p"] << u.AU)
def test_error_on_non_equivalent_unit(self):
with pytest.raises(u.UnitsError):
self.PositionVelocity(self.pv, "AU")
with pytest.raises(u.UnitsError):
self.PositionVelocity(self.pv, "AU,yr")
class TestStructuredLogUnit:
def setup_class(self):
self.mag_time_dtype = np.dtype([("mag", "f8"), ("t", "f8")])
self.mag_time = np.array([(20.0, 10.0), (25.0, 100.0)], self.mag_time_dtype)
def test_unit_initialization(self):
mag_time_unit = StructuredUnit((u.STmag, u.s), self.mag_time_dtype)
assert mag_time_unit["mag"] == u.STmag
assert mag_time_unit["t"] == u.s
mag_time_unit2 = u.Unit("mag(ST),s")
assert mag_time_unit2 == mag_time_unit
def test_quantity_initialization(self):
su = u.Unit("mag(ST),s")
mag_time = self.mag_time << su
assert isinstance(mag_time["mag"], u.Magnitude)
assert isinstance(mag_time["t"], u.Quantity)
assert mag_time.unit == su
assert_array_equal(mag_time["mag"], self.mag_time["mag"] << u.STmag)
assert_array_equal(mag_time["t"], self.mag_time["t"] << u.s)
def test_quantity_si(self):
mag_time = self.mag_time << u.Unit("mag(ST),yr")
mag_time_si = mag_time.si
assert_array_equal(mag_time_si["mag"], mag_time["mag"].si)
assert_array_equal(mag_time_si["t"], mag_time["t"].si)
class TestStructuredMaskedQuantity(StructuredTestBaseWithUnits):
"""Somewhat minimal tests. Conversion is most stringent."""
def setup_class(self):
super().setup_class()
self.qpv = self.pv << self.pv_unit
self.pv_mask = np.array(
[
(True, False),
(False, False),
(False, True),
],
[("p", bool), ("v", bool)],
)
self.mpv = Masked(self.qpv, mask=self.pv_mask)
def test_init(self):
assert isinstance(self.mpv, Masked)
assert isinstance(self.mpv, Quantity)
assert_array_equal(self.mpv.unmasked, self.qpv)
assert_array_equal(self.mpv.mask, self.pv_mask)
def test_slicing(self):
mp = self.mpv["p"]
assert isinstance(mp, Masked)
assert isinstance(mp, Quantity)
assert_array_equal(mp.unmasked, self.qpv["p"])
assert_array_equal(mp.mask, self.pv_mask["p"])
def test_conversion(self):
mpv = self.mpv.to("AU,AU/day")
assert isinstance(mpv, Masked)
assert isinstance(mpv, Quantity)
assert_array_equal(mpv.unmasked, self.qpv.to("AU,AU/day"))
assert_array_equal(mpv.mask, self.pv_mask)
assert np.all(mpv == self.mpv)
def test_si(self):
mpv = self.mpv.si
assert isinstance(mpv, Masked)
assert isinstance(mpv, Quantity)
assert_array_equal(mpv.unmasked, self.qpv.si)
assert_array_equal(mpv.mask, self.pv_mask)
assert np.all(mpv == self.mpv)
|
3207ff3e70e65a16fb75eccc9a07120279ee69d00f9f883032f01e095cba43fe | """
Test ``allclose`` and ``isclose``.
``allclose`` was ``quantity_allclose`` in ``astropy.tests.helper``.
"""
import numpy as np
import pytest
from astropy import units as u
@pytest.mark.parametrize(
("a", "b"),
[
([1, 2], [1, 2]),
([1, 2] * u.m, [100, 200] * u.cm),
(1 * u.s, 1000 * u.ms),
],
)
def test_allclose_isclose_default(a, b):
assert u.allclose(a, b)
assert np.all(u.isclose(a, b))
def test_allclose_isclose():
a = [1, 2] * u.m
b = [101, 201] * u.cm
delta = 2 * u.cm
assert u.allclose(a, b, atol=delta)
assert np.all(u.isclose(a, b, atol=delta))
c = [90, 200] * u.cm
assert not u.allclose(a, c)
assert not np.all(u.isclose(a, c))
|
7a36e1b72961adbfac49f31d1629b3f79c790ca1a5d20f3cd4f487999a605dcd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test the Quantity class and related."""
import copy
import decimal
import numbers
import pickle
from fractions import Fraction
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
from astropy import units as u
from astropy.units.quantity import _UNIT_NOT_INITIALISED
from astropy.utils import isiterable, minversion
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
""" The Quantity class will represent a number + unit + uncertainty """
class TestQuantityCreation:
def test_1(self):
# create objects through operations with Unit objects:
quantity = 11.42 * u.meter # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = u.meter * 11.42 # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = 11.42 / u.meter
assert isinstance(quantity, u.Quantity)
quantity = u.meter / 11.42
assert isinstance(quantity, u.Quantity)
quantity = 11.42 * u.meter / u.second
assert isinstance(quantity, u.Quantity)
with pytest.raises(TypeError):
quantity = 182.234 + u.meter
with pytest.raises(TypeError):
quantity = 182.234 - u.meter
with pytest.raises(TypeError):
quantity = 182.234 % u.meter
def test_2(self):
# create objects using the Quantity constructor:
_ = u.Quantity(11.412, unit=u.meter)
_ = u.Quantity(21.52, "cm")
q3 = u.Quantity(11.412)
# By default quantities that don't specify a unit are unscaled
# dimensionless
assert q3.unit == u.Unit(1)
with pytest.raises(TypeError):
u.Quantity(object(), unit=u.m)
def test_3(self):
# with pytest.raises(u.UnitsError):
with pytest.raises(ValueError): # Until @mdboom fixes the errors in units
u.Quantity(11.412, unit="testingggg")
def test_nan_inf(self):
# Not-a-number
q = u.Quantity("nan", unit="cm")
assert np.isnan(q.value)
q = u.Quantity("NaN", unit="cm")
assert np.isnan(q.value)
q = u.Quantity("-nan", unit="cm") # float() allows this
assert np.isnan(q.value)
q = u.Quantity("nan cm")
assert np.isnan(q.value)
assert q.unit == u.cm
# Infinity
q = u.Quantity("inf", unit="cm")
assert np.isinf(q.value)
q = u.Quantity("-inf", unit="cm")
assert np.isinf(q.value)
q = u.Quantity("inf cm")
assert np.isinf(q.value)
assert q.unit == u.cm
q = u.Quantity("Infinity", unit="cm") # float() allows this
assert np.isinf(q.value)
# make sure these strings don't parse...
with pytest.raises(TypeError):
q = u.Quantity("", unit="cm")
with pytest.raises(TypeError):
q = u.Quantity("spam", unit="cm")
def test_unit_property(self):
# test getting and setting 'unit' attribute
q1 = u.Quantity(11.4, unit=u.meter)
with pytest.raises(AttributeError):
q1.unit = u.cm
def test_preserve_dtype(self):
"""Test that if an explicit dtype is given, it is used, while if not,
numbers are converted to float (including decimal.Decimal, which
numpy converts to an object; closes #1419)
"""
# If dtype is specified, use it, but if not, convert int, bool to float
q1 = u.Quantity(12, unit=u.m / u.s, dtype=int)
assert q1.dtype == int
q2 = u.Quantity(q1)
assert q2.dtype == float
assert q2.value == float(q1.value)
assert q2.unit == q1.unit
# but we should preserve any float32 or even float16
a3_32 = np.array([1.0, 2.0], dtype=np.float32)
q3_32 = u.Quantity(a3_32, u.yr)
assert q3_32.dtype == a3_32.dtype
a3_16 = np.array([1.0, 2.0], dtype=np.float16)
q3_16 = u.Quantity(a3_16, u.yr)
assert q3_16.dtype == a3_16.dtype
# items stored as objects by numpy should be converted to float
# by default
q4 = u.Quantity(decimal.Decimal("10.25"), u.m)
assert q4.dtype == float
q5 = u.Quantity(decimal.Decimal("10.25"), u.m, dtype=object)
assert q5.dtype == object
def test_numpy_style_dtype_inspect(self):
"""Test that if ``dtype=None``, NumPy's dtype inspection is used."""
q2 = u.Quantity(12, dtype=None)
assert np.issubdtype(q2.dtype, np.integer)
def test_float_dtype_promotion(self):
"""Test that if ``dtype=numpy.inexact``, the minimum precision is float64."""
q1 = u.Quantity(12, dtype=np.inexact)
assert not np.issubdtype(q1.dtype, np.integer)
assert q1.dtype == np.float64
q2 = u.Quantity(np.float64(12), dtype=np.inexact)
assert q2.dtype == np.float64
q3 = u.Quantity(np.float32(12), dtype=np.inexact)
assert q3.dtype == np.float32
if hasattr(np, "float16"):
q3 = u.Quantity(np.float16(12), dtype=np.inexact)
assert q3.dtype == np.float16
if hasattr(np, "float128"):
q4 = u.Quantity(np.float128(12), dtype=np.inexact)
assert q4.dtype == np.float128
def test_copy(self):
# By default, a new quantity is constructed, but not if copy=False
a = np.arange(10.0)
q0 = u.Quantity(a, unit=u.m / u.s)
assert q0.base is not a
q1 = u.Quantity(a, unit=u.m / u.s, copy=False)
assert q1.base is a
q2 = u.Quantity(q0)
assert q2 is not q0
assert q2.base is not q0.base
q2 = u.Quantity(q0, copy=False)
assert q2 is q0
assert q2.base is q0.base
q3 = u.Quantity(q0, q0.unit, copy=False)
assert q3 is q0
assert q3.base is q0.base
q4 = u.Quantity(q0, u.cm / u.s, copy=False)
assert q4 is not q0
assert q4.base is not q0.base
def test_subok(self):
"""Test subok can be used to keep class, or to insist on Quantity"""
class MyQuantitySubclass(u.Quantity):
pass
myq = MyQuantitySubclass(np.arange(10.0), u.m)
# try both with and without changing the unit
assert type(u.Quantity(myq)) is u.Quantity
assert type(u.Quantity(myq, subok=True)) is MyQuantitySubclass
assert type(u.Quantity(myq, u.km)) is u.Quantity
assert type(u.Quantity(myq, u.km, subok=True)) is MyQuantitySubclass
def test_order(self):
"""Test that order is correctly propagated to np.array"""
ac = np.array(np.arange(10.0), order="C")
qcc = u.Quantity(ac, u.m, order="C")
assert qcc.flags["C_CONTIGUOUS"]
qcf = u.Quantity(ac, u.m, order="F")
assert qcf.flags["F_CONTIGUOUS"]
qca = u.Quantity(ac, u.m, order="A")
assert qca.flags["C_CONTIGUOUS"]
# check it works also when passing in a quantity
assert u.Quantity(qcc, order="C").flags["C_CONTIGUOUS"]
assert u.Quantity(qcc, order="A").flags["C_CONTIGUOUS"]
assert u.Quantity(qcc, order="F").flags["F_CONTIGUOUS"]
af = np.array(np.arange(10.0), order="F")
qfc = u.Quantity(af, u.m, order="C")
assert qfc.flags["C_CONTIGUOUS"]
qff = u.Quantity(ac, u.m, order="F")
assert qff.flags["F_CONTIGUOUS"]
qfa = u.Quantity(af, u.m, order="A")
assert qfa.flags["F_CONTIGUOUS"]
assert u.Quantity(qff, order="C").flags["C_CONTIGUOUS"]
assert u.Quantity(qff, order="A").flags["F_CONTIGUOUS"]
assert u.Quantity(qff, order="F").flags["F_CONTIGUOUS"]
def test_ndmin(self):
"""Test that ndmin is correctly propagated to np.array"""
a = np.arange(10.0)
q1 = u.Quantity(a, u.m, ndmin=1)
assert q1.ndim == 1 and q1.shape == (10,)
q2 = u.Quantity(a, u.m, ndmin=2)
assert q2.ndim == 2 and q2.shape == (1, 10)
# check it works also when passing in a quantity
q3 = u.Quantity(q1, u.m, ndmin=3)
assert q3.ndim == 3 and q3.shape == (1, 1, 10)
# see github issue #10063
assert u.Quantity(u.Quantity(1, "m"), "m", ndmin=1).ndim == 1
assert u.Quantity(u.Quantity(1, "cm"), "m", ndmin=1).ndim == 1
def test_non_quantity_with_unit(self):
"""Test that unit attributes in objects get recognized."""
class MyQuantityLookalike(np.ndarray):
pass
a = np.arange(3.0)
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = "m"
q1 = u.Quantity(mylookalike)
assert isinstance(q1, u.Quantity)
assert q1.unit is u.m
assert np.all(q1.value == a)
q2 = u.Quantity(mylookalike, u.mm)
assert q2.unit is u.mm
assert np.all(q2.value == 1000.0 * a)
q3 = u.Quantity(mylookalike, copy=False)
assert np.all(q3.value == mylookalike)
q3[2] = 0
assert q3[2] == 0.0
assert mylookalike[2] == 0.0
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = u.m
q4 = u.Quantity(mylookalike, u.mm, copy=False)
q4[2] = 0
assert q4[2] == 0.0
assert mylookalike[2] == 2.0
mylookalike.unit = "nonsense"
with pytest.raises(TypeError):
u.Quantity(mylookalike)
def test_creation_via_view(self):
# This works but is no better than 1. * u.m
q1 = 1.0 << u.m
assert isinstance(q1, u.Quantity)
assert q1.unit == u.m
assert q1.value == 1.0
# With an array, we get an actual view.
a2 = np.arange(10.0)
q2 = a2 << u.m / u.s
assert isinstance(q2, u.Quantity)
assert q2.unit == u.m / u.s
assert np.all(q2.value == a2)
a2[9] = 0.0
assert np.all(q2.value == a2)
# But with a unit change we get a copy.
q3 = q2 << u.mm / u.s
assert isinstance(q3, u.Quantity)
assert q3.unit == u.mm / u.s
assert np.all(q3.value == a2 * 1000.0)
a2[8] = 0.0
assert q3[8].value == 8000.0
# Without a unit change, we do get a view.
q4 = q2 << q2.unit
a2[7] = 0.0
assert np.all(q4.value == a2)
with pytest.raises(u.UnitsError):
q2 << u.s
# But one can do an in-place unit change.
a2_copy = a2.copy()
q2 <<= u.mm / u.s
assert q2.unit == u.mm / u.s
# Of course, this changes a2 as well.
assert np.all(q2.value == a2)
# Sanity check on the values.
assert np.all(q2.value == a2_copy * 1000.0)
a2[8] = -1.0
# Using quantities, one can also work with strings.
q5 = q2 << "km/hr"
assert q5.unit == u.km / u.hr
assert np.all(q5 == q2)
# Finally, we can use scalar quantities as units.
not_quite_a_foot = 30.0 * u.cm
a6 = np.arange(5.0)
q6 = a6 << not_quite_a_foot
assert q6.unit == u.Unit(not_quite_a_foot)
assert np.all(q6.to_value(u.cm) == 30.0 * a6)
def test_rshift_warns(self):
with pytest.raises(TypeError), pytest.warns(
AstropyWarning, match="is not implemented"
) as warning_lines:
1 >> u.m
assert len(warning_lines) == 1
q = 1.0 * u.km
with pytest.raises(TypeError), pytest.warns(
AstropyWarning, match="is not implemented"
) as warning_lines:
q >> u.m
assert len(warning_lines) == 1
with pytest.raises(TypeError), pytest.warns(
AstropyWarning, match="is not implemented"
) as warning_lines:
q >>= u.m
assert len(warning_lines) == 1
with pytest.raises(TypeError), pytest.warns(
AstropyWarning, match="is not implemented"
) as warning_lines:
1.0 >> q
assert len(warning_lines) == 1
class TestQuantityOperations:
q1 = u.Quantity(11.42, u.meter)
q2 = u.Quantity(8.0, u.centimeter)
def test_addition(self):
# Take units from left object, q1
new_quantity = self.q1 + self.q2
assert new_quantity.value == 11.5
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 + self.q1
assert new_quantity.value == 1150.0
assert new_quantity.unit == u.centimeter
new_q = u.Quantity(1500.1, u.m) + u.Quantity(13.5, u.km)
assert new_q.unit == u.m
assert new_q.value == 15000.1
def test_subtraction(self):
# Take units from left object, q1
new_quantity = self.q1 - self.q2
assert new_quantity.value == 11.34
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 - self.q1
assert new_quantity.value == -1134.0
assert new_quantity.unit == u.centimeter
def test_multiplication(self):
# Take units from left object, q1
new_quantity = self.q1 * self.q2
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.meter * u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 * self.q1
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.centimeter * u.meter)
# Multiply with a number
new_quantity = 15.0 * self.q1
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
# Multiply with a number
new_quantity = self.q1 * 15.0
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
def test_division(self):
# Take units from left object, q1
new_quantity = self.q1 / self.q2
assert_array_almost_equal(new_quantity.value, 1.4275, decimal=5)
assert new_quantity.unit == (u.meter / u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 / self.q1
assert_array_almost_equal(new_quantity.value, 0.70052539404553416, decimal=16)
assert new_quantity.unit == (u.centimeter / u.meter)
q1 = u.Quantity(11.4, unit=u.meter)
q2 = u.Quantity(10.0, unit=u.second)
new_quantity = q1 / q2
assert_array_almost_equal(new_quantity.value, 1.14, decimal=10)
assert new_quantity.unit == (u.meter / u.second)
# divide with a number
new_quantity = self.q1 / 10.0
assert new_quantity.value == 1.142
assert new_quantity.unit == u.meter
# divide with a number
new_quantity = 11.42 / self.q1
assert new_quantity.value == 1.0
assert new_quantity.unit == u.Unit("1/m")
def test_commutativity(self):
"""Regression test for issue #587."""
new_q = u.Quantity(11.42, "m*s")
assert self.q1 * u.s == u.s * self.q1 == new_q
assert self.q1 / u.s == u.Quantity(11.42, "m/s")
assert u.s / self.q1 == u.Quantity(1 / 11.42, "s/m")
def test_power(self):
# raise quantity to a power
new_quantity = self.q1**2
assert_array_almost_equal(new_quantity.value, 130.4164, decimal=5)
assert new_quantity.unit == u.Unit("m^2")
new_quantity = self.q1**3
assert_array_almost_equal(new_quantity.value, 1489.355288, decimal=7)
assert new_quantity.unit == u.Unit("m^3")
def test_matrix_multiplication(self):
a = np.eye(3)
q = a * u.m
result1 = q @ a
assert np.all(result1 == q)
result2 = a @ q
assert np.all(result2 == q)
result3 = q @ q
assert np.all(result3 == a * u.m**2)
q2 = np.array(
[[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]
) / u.s # fmt: skip
result4 = q @ q2
assert np.all(result4 == np.matmul(a, q2.value) * q.unit * q2.unit)
def test_unary(self):
# Test the minus unary operator
new_quantity = -self.q1
assert new_quantity.value == -self.q1.value
assert new_quantity.unit == self.q1.unit
new_quantity = -(-self.q1)
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
# Test the plus unary operator
new_quantity = +self.q1
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
def test_abs(self):
q = 1.0 * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == q.value
assert new_quantity.unit == q.unit
q = -1.0 * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == -q.value
assert new_quantity.unit == q.unit
def test_incompatible_units(self):
"""When trying to add or subtract units that aren't compatible, throw an error"""
q1 = u.Quantity(11.412, unit=u.meter)
q2 = u.Quantity(21.52, unit=u.second)
with pytest.raises(u.UnitsError):
q1 + q2
def test_non_number_type(self):
q1 = u.Quantity(11.412, unit=u.meter)
with pytest.raises(TypeError) as exc:
q1 + {"a": 1}
assert exc.value.args[0].startswith(
"Unsupported operand type(s) for ufunc add:"
)
with pytest.raises(TypeError):
q1 + u.meter
def test_dimensionless_operations(self):
# test conversion to dimensionless
dq = 3.0 * u.m / u.km
dq1 = dq + 1.0 * u.mm / u.km
assert dq1.value == 3.001
assert dq1.unit == dq.unit
dq2 = dq + 1.0
assert dq2.value == 1.003
assert dq2.unit == u.dimensionless_unscaled
# this test will check that operations with dimensionless Quantities
# don't work
with pytest.raises(u.UnitsError):
self.q1 + u.Quantity(0.1, unit=u.Unit(""))
with pytest.raises(u.UnitsError):
self.q1 - u.Quantity(0.1, unit=u.Unit(""))
# and test that scaling of integers works
q = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
q2 = q + np.array([4, 5, 6])
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, np.array([4.001, 5.002, 6.003]))
# but not if doing it inplace
with pytest.raises(TypeError):
q += np.array([1, 2, 3])
# except if it is actually possible
q = np.array([1, 2, 3]) * u.km / u.m
q += np.array([4, 5, 6])
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == np.array([1004, 2005, 3006]))
def test_complicated_operation(self):
"""Perform a more complicated test"""
from astropy.units import imperial
# Multiple units
distance = u.Quantity(15.0, u.meter)
time = u.Quantity(11.0, u.second)
velocity = (distance / time).to(imperial.mile / u.hour)
assert_array_almost_equal(velocity.value, 3.05037, decimal=5)
G = u.Quantity(6.673e-11, u.m**3 / u.kg / u.s**2)
_ = (1.0 / (4.0 * np.pi * G)).to(u.pc**-3 / u.s**-2 * u.kg)
# Area
side1 = u.Quantity(11.0, u.centimeter)
side2 = u.Quantity(7.0, u.centimeter)
area = side1 * side2
assert_array_almost_equal(area.value, 77.0, decimal=15)
assert area.unit == u.cm * u.cm
def test_comparison(self):
# equality/ non-equality is straightforward for quantity objects
assert (1 / (u.cm * u.cm)) == 1 * u.cm**-2
assert 1 * u.m == 100 * u.cm
assert 1 * u.m != 1 * u.cm
# when one is a unit, Quantity does not know what to do,
# but unit is fine with it, so it still works
unit = u.cm**3
q = 1.0 * unit
assert q.__eq__(unit) is NotImplemented
assert unit.__eq__(q) is True
assert q == unit
q = 1000.0 * u.mm**3
assert q == unit
# mismatched types should never work
assert not 1.0 * u.cm == 1.0
assert 1.0 * u.cm != 1.0
# comparison with zero should raise a deprecation warning
for quantity in (1.0 * u.cm, 1.0 * u.dimensionless_unscaled):
with pytest.warns(
AstropyDeprecationWarning,
match=(
"The truth value of a Quantity is ambiguous. "
"In the future this will raise a ValueError."
),
):
bool(quantity)
def test_numeric_converters(self):
# float, int, long, and __index__ should only work for single
# quantities, of appropriate type, and only if they are dimensionless.
# for index, this should be unscaled as well
# (Check on __index__ is also a regression test for #1557)
# quantities with units should never convert, or be usable as an index
q1 = u.Quantity(1, u.m)
converter_err_msg = (
"only dimensionless scalar quantities can be converted to Python scalars"
)
index_err_msg = (
"only integer dimensionless scalar quantities "
"can be converted to a Python index"
)
with pytest.raises(TypeError) as exc:
float(q1)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q1)
assert exc.value.args[0] == converter_err_msg
# We used to test `q1 * ['a', 'b', 'c'] here, but that that worked
# at all was a really odd confluence of bugs. Since it doesn't work
# in numpy >=1.10 any more, just go directly for `__index__` (which
# makes the test more similar to the `int`, `long`, etc., tests).
with pytest.raises(TypeError) as exc:
q1.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless but scaled is OK, however
q2 = u.Quantity(1.23, u.m / u.km)
assert float(q2) == float(q2.to_value(u.dimensionless_unscaled))
assert int(q2) == int(q2.to_value(u.dimensionless_unscaled))
with pytest.raises(TypeError) as exc:
q2.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless unscaled is OK, though for index needs to be int
q3 = u.Quantity(1.23, u.dimensionless_unscaled)
assert float(q3) == 1.23
assert int(q3) == 1
with pytest.raises(TypeError) as exc:
q3.__index__()
assert exc.value.args[0] == index_err_msg
# integer dimensionless unscaled is good for all
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert float(q4) == 2.0
assert int(q4) == 2
assert q4.__index__() == 2
# but arrays are not OK
q5 = u.Quantity([1, 2], u.m)
with pytest.raises(TypeError) as exc:
float(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
q5.__index__()
assert exc.value.args[0] == index_err_msg
# See https://github.com/numpy/numpy/issues/5074
# It seems unlikely this will be resolved, so xfail'ing it.
@pytest.mark.xfail(reason="list multiplication only works for numpy <=1.10")
def test_numeric_converter_to_index_in_practice(self):
"""Test that use of __index__ actually works."""
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert q4 * ["a", "b", "c"] == ["a", "b", "c", "a", "b", "c"]
def test_array_converters(self):
# Scalar quantity
q = u.Quantity(1.23, u.m)
assert np.all(np.array(q) == np.array([1.23]))
# Array quantity
q = u.Quantity([1.0, 2.0, 3.0], u.m)
assert np.all(np.array(q) == np.array([1.0, 2.0, 3.0]))
def test_quantity_conversion():
q1 = u.Quantity(0.1, unit=u.meter)
value = q1.value
assert value == 0.1
value_in_km = q1.to_value(u.kilometer)
assert value_in_km == 0.0001
new_quantity = q1.to(u.kilometer)
assert new_quantity.value == 0.0001
with pytest.raises(u.UnitsError):
q1.to(u.zettastokes)
with pytest.raises(u.UnitsError):
q1.to_value(u.zettastokes)
def test_quantity_ilshift(): # in-place conversion
q = u.Quantity(10, unit=u.one)
# Incompatible units. This goes through ilshift and hits a
# UnitConversionError first in ilshift, then in the unit's rlshift.
with pytest.raises(u.UnitConversionError):
q <<= u.rad
# unless the equivalency is enabled
with u.add_enabled_equivalencies(u.dimensionless_angles()):
q <<= u.rad
assert np.isclose(q, 10 * u.rad)
def test_regression_12964():
# This will fail if the fix to
# https://github.com/astropy/astropy/issues/12964 doesn't work.
x = u.Quantity(10, u.km, dtype=int)
x <<= u.pc
# We add a test that this worked.
assert x.unit is u.pc
assert x.dtype == np.float64
def test_quantity_value_views():
q1 = u.Quantity([1.0, 2.0], unit=u.meter)
# views if the unit is the same.
v1 = q1.value
v1[0] = 0.0
assert np.all(q1 == [0.0, 2.0] * u.meter)
v2 = q1.to_value()
v2[1] = 3.0
assert np.all(q1 == [0.0, 3.0] * u.meter)
v3 = q1.to_value("m")
v3[0] = 1.0
assert np.all(q1 == [1.0, 3.0] * u.meter)
q2 = q1.to("m", copy=False)
q2[0] = 2 * u.meter
assert np.all(q1 == [2.0, 3.0] * u.meter)
v4 = q1.to_value("cm")
v4[0] = 0.0
# copy if different unit.
assert np.all(q1 == [2.0, 3.0] * u.meter)
def test_quantity_conversion_with_equiv():
q1 = u.Quantity(0.1, unit=u.meter)
v2 = q1.to_value(u.Hz, equivalencies=u.spectral())
assert_allclose(v2, 2997924580.0)
q2 = q1.to(u.Hz, equivalencies=u.spectral())
assert_allclose(q2.value, v2)
q1 = u.Quantity(0.4, unit=u.arcsecond)
v2 = q1.to_value(u.au, equivalencies=u.parallax())
q2 = q1.to(u.au, equivalencies=u.parallax())
v3 = q2.to_value(u.arcminute, equivalencies=u.parallax())
q3 = q2.to(u.arcminute, equivalencies=u.parallax())
assert_allclose(v2, 515662.015)
assert_allclose(q2.value, v2)
assert q2.unit == u.au
assert_allclose(v3, 0.0066666667)
assert_allclose(q3.value, v3)
assert q3.unit == u.arcminute
def test_quantity_conversion_equivalency_passed_on():
class MySpectral(u.Quantity):
_equivalencies = u.spectral()
def __quantity_view__(self, obj, unit):
return obj.view(MySpectral)
def __quantity_instance__(self, *args, **kwargs):
return MySpectral(*args, **kwargs)
q1 = MySpectral([1000, 2000], unit=u.Hz)
q2 = q1.to(u.nm)
assert q2.unit == u.nm
q3 = q2.to(u.Hz)
assert q3.unit == u.Hz
assert_allclose(q3.value, q1.value)
q4 = MySpectral([1000, 2000], unit=u.nm)
q5 = q4.to(u.Hz).to(u.nm)
assert q5.unit == u.nm
assert_allclose(q4.value, q5.value)
# Regression test for issue #2315, divide-by-zero error when examining 0*unit
def test_self_equivalency():
assert u.deg.is_equivalent(0 * u.radian)
assert u.deg.is_equivalent(1 * u.radian)
def test_si():
q1 = 10.0 * u.m * u.s**2 / (200.0 * u.ms) ** 2 # 250 meters
assert q1.si.value == 250
assert q1.si.unit == u.m
q = 10.0 * u.m # 10 meters
assert q.si.value == 10
assert q.si.unit == u.m
q = 10.0 / u.m # 10 1 / meters
assert q.si.value == 10
assert q.si.unit == (1 / u.m)
def test_cgs():
q1 = 10.0 * u.cm * u.s**2 / (200.0 * u.ms) ** 2 # 250 centimeters
assert q1.cgs.value == 250
assert q1.cgs.unit == u.cm
q = 10.0 * u.m # 10 centimeters
assert q.cgs.value == 1000
assert q.cgs.unit == u.cm
q = 10.0 / u.cm # 10 1 / centimeters
assert q.cgs.value == 10
assert q.cgs.unit == (1 / u.cm)
q = 10.0 * u.Pa # 10 pascals
assert q.cgs.value == 100
assert q.cgs.unit == u.barye
class TestQuantityComparison:
def test_quantity_equality(self):
assert u.Quantity(1000, unit="m") == u.Quantity(1, unit="km")
assert not (u.Quantity(1, unit="m") == u.Quantity(1, unit="km"))
# for ==, !=, return False, True if units do not match
assert (u.Quantity(1100, unit=u.m) != u.Quantity(1, unit=u.s)) is True
assert (u.Quantity(1100, unit=u.m) == u.Quantity(1, unit=u.s)) is False
assert (u.Quantity(0, unit=u.m) == u.Quantity(0, unit=u.s)) is False
# But allow comparison with 0, +/-inf if latter unitless
assert u.Quantity(0, u.m) == 0.0
assert u.Quantity(1, u.m) != 0.0
assert u.Quantity(1, u.m) != np.inf
assert u.Quantity(np.inf, u.m) == np.inf
def test_quantity_equality_array(self):
a = u.Quantity([0.0, 1.0, 1000.0], u.m)
b = u.Quantity(1.0, u.km)
eq = a == b
ne = a != b
assert np.all(eq == [False, False, True])
assert np.all(eq != ne)
# For mismatched units, we should just get True, False
c = u.Quantity(1.0, u.s)
eq = a == c
ne = a != c
assert eq is False
assert ne is True
# Constants are treated as dimensionless, so False too.
eq = a == 1.0
ne = a != 1.0
assert eq is False
assert ne is True
# But 0 can have any units, so we can compare.
eq = a == 0
ne = a != 0
assert np.all(eq == [True, False, False])
assert np.all(eq != ne)
# But we do not extend that to arrays; they should have the same unit.
d = np.array([0, 1.0, 1000.0])
eq = a == d
ne = a != d
assert eq is False
assert ne is True
def test_quantity_comparison(self):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) < u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) < u.Quantity(1, unit=u.second)
assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) <= u.Quantity(1, unit=u.second)
assert u.Quantity(1200, unit=u.meter) != u.Quantity(1, unit=u.kilometer)
class TestQuantityDisplay:
scalarintq = u.Quantity(1, unit="m", dtype=int)
scalarfloatq = u.Quantity(1.3, unit="m")
arrq = u.Quantity([1, 2.3, 8.9], unit="m")
scalar_complex_q = u.Quantity(complex(1.0, 2.0))
scalar_big_complex_q = u.Quantity(complex(1.0, 2.0e27) * 1e25)
scalar_big_neg_complex_q = u.Quantity(complex(-1.0, -2.0e27) * 1e36)
arr_complex_q = u.Quantity(np.arange(3) * (complex(-1.0, -2.0e27) * 1e36))
big_arr_complex_q = u.Quantity(np.arange(125) * (complex(-1.0, -2.0e27) * 1e36))
def test_dimensionless_quantity_repr(self):
q2 = u.Quantity(1.0, unit="m-1")
q3 = u.Quantity(1, unit="m-1", dtype=int)
assert repr(self.scalarintq * q2) == "<Quantity 1.>"
assert repr(self.arrq * q2) == "<Quantity [1. , 2.3, 8.9]>"
assert repr(self.scalarintq * q3) == "<Quantity 1>"
def test_dimensionless_quantity_str(self):
q2 = u.Quantity(1.0, unit="m-1")
q3 = u.Quantity(1, unit="m-1", dtype=int)
assert str(self.scalarintq * q2) == "1.0"
assert str(self.scalarintq * q3) == "1"
assert str(self.arrq * q2) == "[1. 2.3 8.9]"
def test_dimensionless_quantity_format(self):
q1 = u.Quantity(3.14)
assert format(q1, ".2f") == "3.14"
assert f"{q1:cds}" == "3.14"
def test_scalar_quantity_str(self):
assert str(self.scalarintq) == "1 m"
assert str(self.scalarfloatq) == "1.3 m"
def test_scalar_quantity_repr(self):
assert repr(self.scalarintq) == "<Quantity 1 m>"
assert repr(self.scalarfloatq) == "<Quantity 1.3 m>"
def test_array_quantity_str(self):
assert str(self.arrq) == "[1. 2.3 8.9] m"
def test_array_quantity_repr(self):
assert repr(self.arrq) == "<Quantity [1. , 2.3, 8.9] m>"
def test_scalar_quantity_format(self):
assert format(self.scalarintq, "02d") == "01 m"
assert format(self.scalarfloatq, ".1f") == "1.3 m"
assert format(self.scalarfloatq, ".0f") == "1 m"
assert f"{self.scalarintq:cds}" == "1 m"
assert f"{self.scalarfloatq:cds}" == "1.3 m"
def test_uninitialized_unit_format(self):
bad_quantity = np.arange(10.0).view(u.Quantity)
assert str(bad_quantity).endswith(_UNIT_NOT_INITIALISED)
assert repr(bad_quantity).endswith(_UNIT_NOT_INITIALISED + ">")
def test_to_string(self):
qscalar = u.Quantity(1.5e14, "m/s")
# __str__ is the default `format`
assert str(qscalar) == qscalar.to_string()
res = "Quantity as KMS: 150000000000.0 km / s"
assert f"Quantity as KMS: {qscalar.to_string(unit=u.km / u.s)}" == res
# With precision set
res = "Quantity as KMS: 1.500e+11 km / s"
assert (
f"Quantity as KMS: {qscalar.to_string(precision=3, unit=u.km / u.s)}" == res
)
res = r"$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$"
assert qscalar.to_string(format="latex") == res
assert qscalar.to_string(format="latex", subfmt="inline") == res
res = r"$\displaystyle 1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$"
assert qscalar.to_string(format="latex", subfmt="display") == res
res = r"$1.5 \times 10^{14} \; \mathrm{m\,s^{-1}}$"
assert qscalar.to_string(format="latex_inline") == res
assert qscalar.to_string(format="latex_inline", subfmt="inline") == res
res = r"$\displaystyle 1.5 \times 10^{14} \; \mathrm{m\,s^{-1}}$"
assert qscalar.to_string(format="latex_inline", subfmt="display") == res
res = "[0 1 2] (Unit not initialised)"
assert np.arange(3).view(u.Quantity).to_string() == res
def test_repr_latex(self):
from astropy.units.quantity import conf
q2scalar = u.Quantity(1.5e14, "m/s")
assert self.scalarintq._repr_latex_() == r"$1 \; \mathrm{m}$"
assert self.scalarfloatq._repr_latex_() == r"$1.3 \; \mathrm{m}$"
assert (
q2scalar._repr_latex_() == r"$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$"
)
assert self.arrq._repr_latex_() == r"$[1,~2.3,~8.9] \; \mathrm{m}$"
# Complex quantities
assert self.scalar_complex_q._repr_latex_() == r"$(1+2i) \; \mathrm{}$"
assert (
self.scalar_big_complex_q._repr_latex_()
== r"$(1 \times 10^{25}+2 \times 10^{52}i) \; \mathrm{}$"
)
assert (
self.scalar_big_neg_complex_q._repr_latex_()
== r"$(-1 \times 10^{36}-2 \times 10^{63}i) \; \mathrm{}$"
)
assert self.arr_complex_q._repr_latex_() == (
r"$[(0-0i),~(-1 \times 10^{36}-2 \times 10^{63}i),"
r"~(-2 \times 10^{36}-4 \times 10^{63}i)] \; \mathrm{}$"
)
assert r"\dots" in self.big_arr_complex_q._repr_latex_()
qmed = np.arange(100) * u.m
qbig = np.arange(1000) * u.m
qvbig = np.arange(10000) * 1e9 * u.m
pops = np.get_printoptions()
oldlat = conf.latex_array_threshold
try:
# check precision behavior
q = u.Quantity(987654321.123456789, "m/s")
qa = np.array([7.89123, 123456789.987654321, 0]) * u.cm
np.set_printoptions(precision=8)
assert (
q._repr_latex_() == r"$9.8765432 \times 10^{8} \; \mathrm{\frac{m}{s}}$"
)
assert (
qa._repr_latex_()
== r"$[7.89123,~1.2345679 \times 10^{8},~0] \; \mathrm{cm}$"
)
np.set_printoptions(precision=2)
assert q._repr_latex_() == r"$9.9 \times 10^{8} \; \mathrm{\frac{m}{s}}$"
assert qa._repr_latex_() == r"$[7.9,~1.2 \times 10^{8},~0] \; \mathrm{cm}$"
# check thresholding behavior
conf.latex_array_threshold = 100 # should be default
lsmed = qmed._repr_latex_()
assert r"\dots" not in lsmed
lsbig = qbig._repr_latex_()
assert r"\dots" in lsbig
lsvbig = qvbig._repr_latex_()
assert r"\dots" in lsvbig
conf.latex_array_threshold = 1001
lsmed = qmed._repr_latex_()
assert r"\dots" not in lsmed
lsbig = qbig._repr_latex_()
assert r"\dots" not in lsbig
lsvbig = qvbig._repr_latex_()
assert r"\dots" in lsvbig
conf.latex_array_threshold = -1 # means use the numpy threshold
np.set_printoptions(threshold=99)
lsmed = qmed._repr_latex_()
assert r"\dots" in lsmed
lsbig = qbig._repr_latex_()
assert r"\dots" in lsbig
lsvbig = qvbig._repr_latex_()
assert r"\dots" in lsvbig
assert lsvbig.endswith(",~1 \\times 10^{13}] \\; \\mathrm{m}$")
finally:
# prevent side-effects from influencing other tests
np.set_printoptions(**pops)
conf.latex_array_threshold = oldlat
qinfnan = [np.inf, -np.inf, np.nan] * u.m
assert qinfnan._repr_latex_() == r"$[\infty,~-\infty,~{\rm NaN}] \; \mathrm{m}$"
def test_decompose():
q1 = 5 * u.N
assert q1.decompose() == (5 * u.kg * u.m * u.s**-2)
def test_decompose_regression():
"""
Regression test for bug #1163
If decompose was called multiple times on a Quantity with an array and a
scale != 1, the result changed every time. This is because the value was
being referenced not copied, then modified, which changed the original
value.
"""
q = np.array([1, 2, 3]) * u.m / (2.0 * u.km)
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
assert np.all(q == np.array([1, 2, 3]) * u.m / (2.0 * u.km))
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
def test_arrays():
"""
Test using quantites with array values
"""
qsec = u.Quantity(np.arange(10), u.second)
assert isinstance(qsec.value, np.ndarray)
assert not qsec.isscalar
# len and indexing should work for arrays
assert len(qsec) == len(qsec.value)
qsecsub25 = qsec[2:5]
assert qsecsub25.unit == qsec.unit
assert isinstance(qsecsub25, u.Quantity)
assert len(qsecsub25) == 3
# make sure isscalar, len, and indexing behave correctly for non-arrays.
qsecnotarray = u.Quantity(10.0, u.second)
assert qsecnotarray.isscalar
with pytest.raises(TypeError):
len(qsecnotarray)
with pytest.raises(TypeError):
qsecnotarray[0]
qseclen0array = u.Quantity(np.array(10), u.second, dtype=int)
# 0d numpy array should act basically like a scalar
assert qseclen0array.isscalar
with pytest.raises(TypeError):
len(qseclen0array)
with pytest.raises(TypeError):
qseclen0array[0]
assert isinstance(qseclen0array.value, numbers.Integral)
a = np.array(
[(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)],
dtype=[("x", float), ("y", float), ("z", float)],
)
qkpc = u.Quantity(a, u.kpc)
assert not qkpc.isscalar
qkpc0 = qkpc[0]
assert qkpc0.value == a[0]
assert qkpc0.unit == qkpc.unit
assert isinstance(qkpc0, u.Quantity)
assert qkpc0.isscalar
qkpcx = qkpc["x"]
assert np.all(qkpcx.value == a["x"])
assert qkpcx.unit == qkpc.unit
assert isinstance(qkpcx, u.Quantity)
assert not qkpcx.isscalar
qkpcx1 = qkpc["x"][1]
assert qkpcx1.unit == qkpc.unit
assert isinstance(qkpcx1, u.Quantity)
assert qkpcx1.isscalar
qkpc1x = qkpc[1]["x"]
assert qkpc1x.isscalar
assert qkpc1x == qkpcx1
# can also create from lists, will auto-convert to arrays
qsec = u.Quantity(list(range(10)), u.second)
assert isinstance(qsec.value, np.ndarray)
# quantity math should work with arrays
assert_array_equal((qsec * 2).value, (np.arange(10) * 2))
assert_array_equal((qsec / 2).value, (np.arange(10) / 2))
# quantity addition/subtraction should *not* work with arrays b/c unit
# ambiguous
with pytest.raises(u.UnitsError):
assert_array_equal((qsec + 2).value, (np.arange(10) + 2))
with pytest.raises(u.UnitsError):
assert_array_equal((qsec - 2).value, (np.arange(10) + 2))
# should create by unit multiplication, too
qsec2 = np.arange(10) * u.second
qsec3 = u.second * np.arange(10)
assert np.all(qsec == qsec2)
assert np.all(qsec2 == qsec3)
# make sure numerical-converters fail when arrays are present
with pytest.raises(TypeError):
float(qsec)
with pytest.raises(TypeError):
int(qsec)
def test_array_indexing_slicing():
q = np.array([1.0, 2.0, 3.0]) * u.m
assert q[0] == 1.0 * u.m
assert np.all(q[0:2] == u.Quantity([1.0, 2.0], u.m))
def test_array_setslice():
q = np.array([1.0, 2.0, 3.0]) * u.m
q[1:2] = np.array([400.0]) * u.cm
assert np.all(q == np.array([1.0, 4.0, 3.0]) * u.m)
def test_inverse_quantity():
"""
Regression test from issue #679
"""
q = u.Quantity(4.0, u.meter / u.second)
qot = q / 2
toq = 2 / q
npqot = q / np.array(2)
assert npqot.value == 2.0
assert npqot.unit == (u.meter / u.second)
assert qot.value == 2.0
assert qot.unit == (u.meter / u.second)
assert toq.value == 0.5
assert toq.unit == (u.second / u.meter)
def test_quantity_mutability():
q = u.Quantity(9.8, u.meter / u.second / u.second)
with pytest.raises(AttributeError):
q.value = 3
with pytest.raises(AttributeError):
q.unit = u.kg
def test_quantity_initialized_with_quantity():
q1 = u.Quantity(60, u.second)
q2 = u.Quantity(q1, u.minute)
assert q2.value == 1
q3 = u.Quantity([q1, q2], u.second)
assert q3[0].value == 60
assert q3[1].value == 60
q4 = u.Quantity([q2, q1])
assert q4.unit == q2.unit
assert q4[0].value == 1
assert q4[1].value == 1
def test_quantity_string_unit():
q1 = 1.0 * u.m / "s"
assert q1.value == 1
assert q1.unit == (u.m / u.s)
q2 = q1 * "m"
assert q2.unit == ((u.m * u.m) / u.s)
def test_quantity_invalid_unit_string():
with pytest.raises(ValueError):
"foo" * u.m
def test_implicit_conversion():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
assert_allclose(q.centimeter, 100)
assert_allclose(q.cm, 100)
assert_allclose(q.parsec, 3.240779289469756e-17)
def test_implicit_conversion_autocomplete():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
q.foo = 42
attrs = dir(q)
assert "centimeter" in attrs
assert "cm" in attrs
assert "parsec" in attrs
assert "foo" in attrs
assert "to" in attrs
assert "value" in attrs
# Something from the base class, object
assert "__setattr__" in attrs
with pytest.raises(AttributeError):
q.l
def test_quantity_iterability():
"""Regressiont est for issue #878.
Scalar quantities should not be iterable and should raise a type error on
iteration.
"""
q1 = [15.0, 17.0] * u.m
assert isiterable(q1)
q2 = next(iter(q1))
assert q2 == 15.0 * u.m
assert not isiterable(q2)
pytest.raises(TypeError, iter, q2)
def test_copy():
q1 = u.Quantity(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), unit=u.m)
q2 = q1.copy()
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
q3 = q1.copy(order="F")
assert q3.flags["F_CONTIGUOUS"]
assert np.all(q1.value == q3.value)
assert q1.unit == q3.unit
assert q1.dtype == q3.dtype
assert q1.value is not q3.value
q4 = q1.copy(order="C")
assert q4.flags["C_CONTIGUOUS"]
assert np.all(q1.value == q4.value)
assert q1.unit == q4.unit
assert q1.dtype == q4.dtype
assert q1.value is not q4.value
def test_deepcopy():
q1 = u.Quantity(np.array([1.0, 2.0, 3.0]), unit=u.m)
q2 = copy.deepcopy(q1)
assert isinstance(q2, u.Quantity)
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
def test_equality_numpy_scalar():
"""
A regression test to ensure that numpy scalars are correctly compared
(which originally failed due to the lack of ``__array_priority__``).
"""
assert 10 != 10.0 * u.m
assert np.int64(10) != 10 * u.m
assert 10 * u.m != np.int64(10)
def test_quantity_pickelability():
"""
Testing pickleability of quantity
"""
q1 = np.arange(10) * u.m
q2 = pickle.loads(pickle.dumps(q1))
assert np.all(q1.value == q2.value)
assert q1.unit.is_equivalent(q2.unit)
assert q1.unit == q2.unit
def test_quantity_initialisation_from_string():
q = u.Quantity("1")
assert q.unit == u.dimensionless_unscaled
assert q.value == 1.0
q = u.Quantity("1.5 m/s")
assert q.unit == u.m / u.s
assert q.value == 1.5
assert u.Unit(q) == u.Unit("1.5 m/s")
q = u.Quantity(".5 m")
assert q == u.Quantity(0.5, u.m)
q = u.Quantity("-1e1km")
assert q == u.Quantity(-10, u.km)
q = u.Quantity("-1e+1km")
assert q == u.Quantity(-10, u.km)
q = u.Quantity("+.5km")
assert q == u.Quantity(0.5, u.km)
q = u.Quantity("+5e-1km")
assert q == u.Quantity(0.5, u.km)
q = u.Quantity("5", u.m)
assert q == u.Quantity(5.0, u.m)
q = u.Quantity("5 km", u.m)
assert q.value == 5000.0
assert q.unit == u.m
q = u.Quantity("5Em")
assert q == u.Quantity(5.0, u.Em)
with pytest.raises(TypeError):
u.Quantity("")
with pytest.raises(TypeError):
u.Quantity("m")
with pytest.raises(TypeError):
u.Quantity("1.2.3 deg")
with pytest.raises(TypeError):
u.Quantity("1+deg")
with pytest.raises(TypeError):
u.Quantity("1-2deg")
with pytest.raises(TypeError):
u.Quantity("1.2e-13.3m")
with pytest.raises(TypeError):
u.Quantity(["5"])
with pytest.raises(TypeError):
u.Quantity(np.array(["5"]))
with pytest.raises(ValueError):
u.Quantity("5E")
with pytest.raises(ValueError):
u.Quantity("5 foo")
def test_unsupported():
q1 = np.arange(10) * u.m
with pytest.raises(TypeError):
np.bitwise_and(q1, q1)
def test_unit_identity():
q = 1.0 * u.hour
assert q.unit is u.hour
def test_quantity_to_view():
q1 = np.array([1000, 2000]) * u.m
q2 = q1.to(u.km)
assert q1.value[0] == 1000
assert q2.value[0] == 1
def test_quantity_tuple_power():
with pytest.raises(ValueError):
(5.0 * u.m) ** (1, 2)
def test_quantity_fraction_power():
q = (25.0 * u.m**2) ** Fraction(1, 2)
assert q.value == 5.0
assert q.unit == u.m
# Regression check to ensure we didn't create an object type by raising
# the value of the quantity to a Fraction. [#3922]
assert q.dtype.kind == "f"
def test_quantity_from_table():
"""
Checks that units from tables are respected when converted to a Quantity.
This also generically checks the use of *anything* with a `unit` attribute
passed into Quantity
"""
from astropy.table import Table
t = Table(data=[np.arange(5), np.arange(5)], names=["a", "b"])
t["a"].unit = u.kpc
qa = u.Quantity(t["a"])
assert qa.unit == u.kpc
assert_array_equal(qa.value, t["a"])
qb = u.Quantity(t["b"])
assert qb.unit == u.dimensionless_unscaled
assert_array_equal(qb.value, t["b"])
# This does *not* auto-convert, because it's not necessarily obvious that's
# desired. Instead we revert to standard `Quantity` behavior
qap = u.Quantity(t["a"], u.pc)
assert qap.unit == u.pc
assert_array_equal(qap.value, t["a"] * 1000)
qbp = u.Quantity(t["b"], u.pc)
assert qbp.unit == u.pc
assert_array_equal(qbp.value, t["b"])
# Also check with a function unit (regression test for gh-8430)
t["a"].unit = u.dex(u.cm / u.s**2)
fq = u.Dex(t["a"])
assert fq.unit == u.dex(u.cm / u.s**2)
assert_array_equal(fq.value, t["a"])
fq2 = u.Quantity(t["a"], subok=True)
assert isinstance(fq2, u.Dex)
assert fq2.unit == u.dex(u.cm / u.s**2)
assert_array_equal(fq2.value, t["a"])
with pytest.raises(u.UnitTypeError):
u.Quantity(t["a"])
def test_assign_slice_with_quantity_like():
# Regression tests for gh-5961
from astropy.table import Column, Table
# first check directly that we can use a Column to assign to a slice.
c = Column(np.arange(10.0), unit=u.mm)
q = u.Quantity(c)
q[:2] = c[:2]
# next check that we do not fail the original problem.
t = Table()
t["x"] = np.arange(10) * u.mm
t["y"] = np.ones(10) * u.mm
assert type(t["x"]) is Column
xy = np.vstack([t["x"], t["y"]]).T * u.mm
ii = [0, 2, 4]
assert xy[ii, 0].unit == t["x"][ii].unit
# should not raise anything
xy[ii, 0] = t["x"][ii]
def test_insert():
"""
Test Quantity.insert method. This does not test the full capabilities
of the underlying np.insert, but hits the key functionality for
Quantity.
"""
q = [1, 2] * u.m
# Insert a compatible float with different units
q2 = q.insert(0, 1 * u.km)
assert np.all(q2.value == [1000, 1, 2])
assert q2.unit is u.m
assert q2.dtype.kind == "f"
if minversion(np, "1.8.0"):
q2 = q.insert(1, [1, 2] * u.km)
assert np.all(q2.value == [1, 1000, 2000, 2])
assert q2.unit is u.m
# Cannot convert 1.5 * u.s to m
with pytest.raises(u.UnitsError):
q.insert(1, 1.5 * u.s)
# Tests with multi-dim quantity
q = [[1, 2], [3, 4]] * u.m
q2 = q.insert(1, [10, 20] * u.m, axis=0)
assert np.all(q2.value == [[1, 2], [10, 20], [3, 4]])
q2 = q.insert(1, [10, 20] * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2], [3, 20, 4]])
q2 = q.insert(1, 10 * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2], [3, 10, 4]])
def test_repr_array_of_quantity():
"""
Test print/repr of object arrays of Quantity objects with different
units.
Regression test for the issue first reported in
https://github.com/astropy/astropy/issues/3777
"""
a = np.array([1 * u.m, 2 * u.s], dtype=object)
assert repr(a) == "array([<Quantity 1. m>, <Quantity 2. s>], dtype=object)"
assert str(a) == "[<Quantity 1. m> <Quantity 2. s>]"
class TestSpecificTypeQuantity:
def setup_method(self):
class Length(u.SpecificTypeQuantity):
_equivalent_unit = u.m
class Length2(Length):
_default_unit = u.m
class Length3(Length):
_unit = u.m
self.Length = Length
self.Length2 = Length2
self.Length3 = Length3
def test_creation(self):
l = self.Length(np.arange(10.0) * u.km)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.0) * u.hour)
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.0))
l2 = self.Length2(np.arange(5.0))
assert type(l2) is self.Length2
assert l2._default_unit is self.Length2._default_unit
with pytest.raises(u.UnitTypeError):
self.Length3(np.arange(10.0))
def test_view(self):
l = (np.arange(5.0) * u.km).view(self.Length)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
(np.arange(5.0) * u.s).view(self.Length)
v = np.arange(5.0).view(self.Length)
assert type(v) is self.Length
assert v._unit is None
l3 = np.ones((2, 2)).view(self.Length3)
assert type(l3) is self.Length3
assert l3.unit is self.Length3._unit
def test_operation_precedence_and_fallback(self):
l = self.Length(np.arange(5.0) * u.cm)
sum1 = l + 1.0 * u.m
assert type(sum1) is self.Length
sum2 = 1.0 * u.km + l
assert type(sum2) is self.Length
sum3 = l + l
assert type(sum3) is self.Length
res1 = l * (1.0 * u.m)
assert type(res1) is u.Quantity
res2 = l * l
assert type(res2) is u.Quantity
def test_unit_class_override():
class MyQuantity(u.Quantity):
pass
my_unit = u.Unit("my_deg", u.deg)
my_unit._quantity_class = MyQuantity
q1 = u.Quantity(1.0, my_unit)
assert type(q1) is u.Quantity
q2 = u.Quantity(1.0, my_unit, subok=True)
assert type(q2) is MyQuantity
class QuantityMimic:
def __init__(self, value, unit):
self.value = value
self.unit = unit
def __array__(self):
return np.array(self.value)
class QuantityMimic2(QuantityMimic):
def to(self, unit):
return u.Quantity(self.value, self.unit).to(unit)
def to_value(self, unit):
return u.Quantity(self.value, self.unit).to_value(unit)
class TestQuantityMimics:
"""Test Quantity Mimics that are not ndarray subclasses."""
@pytest.mark.parametrize("Mimic", (QuantityMimic, QuantityMimic2))
def test_mimic_input(self, Mimic):
value = np.arange(10.0)
mimic = Mimic(value, u.m)
q = u.Quantity(mimic)
assert q.unit == u.m
assert np.all(q.value == value)
q2 = u.Quantity(mimic, u.cm)
assert q2.unit == u.cm
assert np.all(q2.value == 100 * value)
@pytest.mark.parametrize("Mimic", (QuantityMimic, QuantityMimic2))
def test_mimic_setting(self, Mimic):
mimic = Mimic([1.0, 2.0], u.m)
q = u.Quantity(np.arange(10.0), u.cm)
q[8:] = mimic
assert np.all(q[:8].value == np.arange(8.0))
assert np.all(q[8:].value == [100.0, 200.0])
def test_mimic_function_unit(self):
mimic = QuantityMimic([1.0, 2.0], u.dex(u.cm / u.s**2))
d = u.Dex(mimic)
assert isinstance(d, u.Dex)
assert d.unit == u.dex(u.cm / u.s**2)
assert np.all(d.value == [1.0, 2.0])
q = u.Quantity(mimic, subok=True)
assert isinstance(q, u.Dex)
assert q.unit == u.dex(u.cm / u.s**2)
assert np.all(q.value == [1.0, 2.0])
with pytest.raises(u.UnitTypeError):
u.Quantity(mimic)
def test_masked_quantity_str_repr():
"""Ensure we don't break masked Quantity representation."""
# Really, masked quantities do not work well, but at least let the
# basics work.
masked_quantity = np.ma.array([1, 2, 3, 4] * u.kg, mask=[True, False, True, False])
str(masked_quantity)
repr(masked_quantity)
class TestQuantitySubclassAboveAndBelow:
@classmethod
def setup_class(self):
class MyArray(np.ndarray):
def __array_finalize__(self, obj):
super_array_finalize = super().__array_finalize__
if super_array_finalize is not None:
super_array_finalize(obj)
if hasattr(obj, "my_attr"):
self.my_attr = obj.my_attr
self.MyArray = MyArray
self.MyQuantity1 = type("MyQuantity1", (u.Quantity, MyArray), dict(my_attr="1"))
self.MyQuantity2 = type("MyQuantity2", (MyArray, u.Quantity), dict(my_attr="2"))
def test_setup(self):
mq1 = self.MyQuantity1(10, u.m)
assert isinstance(mq1, self.MyQuantity1)
assert mq1.my_attr == "1"
assert mq1.unit is u.m
mq2 = self.MyQuantity2(10, u.m)
assert isinstance(mq2, self.MyQuantity2)
assert mq2.my_attr == "2"
assert mq2.unit is u.m
def test_attr_propagation(self):
mq1 = self.MyQuantity1(10, u.m)
mq12 = self.MyQuantity2(mq1)
assert isinstance(mq12, self.MyQuantity2)
assert not isinstance(mq12, self.MyQuantity1)
assert mq12.my_attr == "1"
assert mq12.unit is u.m
mq2 = self.MyQuantity2(10, u.m)
mq21 = self.MyQuantity1(mq2)
assert isinstance(mq21, self.MyQuantity1)
assert not isinstance(mq21, self.MyQuantity2)
assert mq21.my_attr == "2"
assert mq21.unit is u.m
|
5f7cda66df517024c2345254d3b31fa468a0aaba96c2dff008df798c4417408c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test Structured units and quantities specifically with the ERFA ufuncs.
"""
import erfa
import numpy as np
import pytest
from erfa import ufunc as erfa_ufunc
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.introspection import minversion
ERFA_LE_2_0_0 = not minversion(erfa, "2.0.0.1")
class TestPVUfuncs:
def setup_class(self):
self.pv_unit = u.Unit("AU,AU/day")
self.pv_value = np.array(
[
([1.0, 0.0, 0.0], [0.0, 0.0125, 0.0]),
([0.0, 1.0, 0.0], [-0.0125, 0.0, 0.0]),
],
dtype=erfa_ufunc.dt_pv,
)
self.pv = self.pv_value << self.pv_unit
def test_cpv(self):
pv_copy = erfa_ufunc.cpv(self.pv)
assert_array_equal(pv_copy, self.pv)
assert not np.may_share_memory(pv_copy, self.pv)
def test_p2pv(self):
p2pv = erfa_ufunc.p2pv(self.pv["p"])
assert_array_equal(p2pv["p"], self.pv["p"])
assert_array_equal(
p2pv["v"], np.zeros(self.pv.shape + (3,), float) << u.m / u.s
)
@pytest.mark.xfail(
erfa.__version__ <= "2.0.0",
reason="erfa bug; https://github.com/liberfa/pyerfa/issues/70)",
)
def test_p2pv_inplace(self):
# TODO: fix np.zeros_like.
out = np.zeros_like(self.pv_value) << self.pv_unit
p2pv = erfa_ufunc.p2pv(self.pv["p"], out=out)
assert out is p2pv
assert_array_equal(p2pv["p"], self.pv["p"])
assert_array_equal(
p2pv["v"], np.zeros(self.pv.shape + (3,), float) << u.m / u.s
)
def test_pv2p(self):
p = erfa_ufunc.pv2p(self.pv)
assert_array_equal(p, self.pv["p"])
out = np.zeros_like(p)
p2 = erfa_ufunc.pv2p(self.pv, out=out)
assert out is p2
assert_array_equal(p2, self.pv["p"])
def test_pv2s(self):
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(self.pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(self.pv.shape)) # latitude
assert r.unit == u.AU
assert_array_equal(r.value, np.ones(self.pv.shape))
assert td.unit == u.radian / u.day
assert_array_equal(td.value, np.array([0.0125] * 2))
assert pd.unit == u.radian / u.day
assert_array_equal(pd.value, np.zeros(self.pv.shape))
assert rd.unit == u.AU / u.day
assert_array_equal(rd.value, np.zeros(self.pv.shape))
def test_pv2s_non_standard_units(self):
pv = self.pv_value << u.Unit("Pa,Pa/m")
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(pv.shape)) # latitude
assert r.unit == u.Pa
assert_array_equal(r.value, np.ones(pv.shape))
assert td.unit == u.radian / u.m
assert_array_equal(td.value, np.array([0.0125] * 2))
assert pd.unit == u.radian / u.m
assert_array_equal(pd.value, np.zeros(pv.shape))
assert rd.unit == u.Pa / u.m
assert_array_equal(rd.value, np.zeros(pv.shape))
@pytest.mark.xfail(
reason=(
"erfa ufuncs cannot take different names; it is not yet clear whether "
"this is changeable; see https://github.com/liberfa/pyerfa/issues/77"
)
)
def test_pv2s_non_standard_names_and_units(self):
pv_value = np.array(self.pv_value, dtype=[("pos", "f8"), ("vel", "f8")])
pv = pv_value << u.Unit("Pa,Pa/m")
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(pv.shape)) # latitude
assert r.unit == u.Pa
assert_array_equal(r.value, np.ones(pv.shape))
assert td.unit == u.radian / u.m
assert_array_equal(td.value, np.array([0.0125] * 2))
assert pd.unit == u.radian / u.m
assert_array_equal(pd.value, np.zeros(pv.shape))
assert rd.unit == u.Pa / u.m
assert_array_equal(rd.value, np.zeros(pv.shape))
def test_s2pv(self):
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(self.pv)
# On purpose change some of the units away from expected by s2pv.
pv = erfa_ufunc.s2pv(
theta.to(u.deg), phi, r.to(u.m), td.to(u.deg / u.day), pd, rd.to(u.m / u.s)
)
assert pv.unit == u.StructuredUnit("m, m/s", names=("p", "v"))
assert_quantity_allclose(pv["p"], self.pv["p"], atol=1 * u.m, rtol=0)
assert_quantity_allclose(pv["v"], self.pv["v"], atol=1 * u.mm / u.s, rtol=0)
def test_pvstar(self):
ra, dec, pmr, pmd, px, rv, stat = erfa_ufunc.pvstar(self.pv)
assert_array_equal(stat, np.zeros(self.pv.shape, dtype="i4"))
assert ra.unit == u.radian
assert_quantity_allclose(ra, [0, 90] * u.deg)
assert dec.unit == u.radian
assert_array_equal(dec.value, np.zeros(self.pv.shape)) # latitude
assert pmr.unit == u.radian / u.year
assert_quantity_allclose(pmr, [0.0125, 0.0125] * u.radian / u.day)
assert pmd.unit == u.radian / u.year
assert_array_equal(pmd.value, np.zeros(self.pv.shape))
assert px.unit == u.arcsec
assert_quantity_allclose(px, 1 * u.radian)
assert rv.unit == u.km / u.s
assert_array_equal(rv.value, np.zeros(self.pv.shape))
def test_starpv(self):
ra, dec, pmr, pmd, px, rv, stat = erfa_ufunc.pvstar(self.pv)
pv, stat = erfa_ufunc.starpv(
ra.to(u.deg), dec.to(u.deg), pmr, pmd, px, rv.to(u.m / u.s)
)
assert_array_equal(stat, np.zeros(self.pv.shape, dtype="i4"))
assert pv.unit == self.pv.unit
# Roundtrip is not as good as hoped on 32bit, not clear why.
# But proper motions are ridiculously high...
assert_quantity_allclose(pv["p"], self.pv["p"], atol=1 * u.m, rtol=0)
assert_quantity_allclose(pv["v"], self.pv["v"], atol=1 * u.m / u.s, rtol=0)
def test_pvtob(self):
pv = erfa_ufunc.pvtob(
[90, 0] * u.deg,
0.0 * u.deg,
100 * u.km,
0 * u.deg,
0 * u.deg,
0 * u.deg,
90 * u.deg,
)
assert pv.unit == u.StructuredUnit("m, m/s", names=("p", "v"))
assert pv.unit["v"] == u.m / u.s
assert_quantity_allclose(
pv["p"], [[-6478, 0, 0], [0, 6478, 0]] * u.km, atol=2 * u.km
)
assert_quantity_allclose(
pv["v"], [[0, -0.5, 0], [-0.5, 0, 0]] * u.km / u.s, atol=0.1 * u.km / u.s
)
def test_pvdpv(self):
pvdpv = erfa_ufunc.pvdpv(self.pv, self.pv)
assert pvdpv["pdp"].unit == self.pv.unit["p"] ** 2
assert pvdpv["pdv"].unit == self.pv.unit["p"] * self.pv.unit["v"]
assert_array_equal(
pvdpv["pdp"], np.einsum("...i,...i->...", self.pv["p"], self.pv["p"])
)
assert_array_equal(
pvdpv["pdv"], 2 * np.einsum("...i,...i->...", self.pv["p"], self.pv["v"])
)
z_axis = u.Quantity(np.array(([0, 0, 1], [0, 0, 0]), erfa_ufunc.dt_pv), "1,1/s")
pvdpv2 = erfa_ufunc.pvdpv(self.pv, z_axis)
assert pvdpv2["pdp"].unit == self.pv.unit["p"]
assert pvdpv2["pdv"].unit == self.pv.unit["v"]
assert_array_equal(pvdpv2["pdp"].value, np.zeros(self.pv.shape))
assert_array_equal(pvdpv2["pdv"].value, np.zeros(self.pv.shape))
def test_pvxpv(self):
pvxpv = erfa_ufunc.pvxpv(self.pv, self.pv)
assert pvxpv["p"].unit == self.pv.unit["p"] ** 2
assert pvxpv["v"].unit == self.pv.unit["p"] * self.pv.unit["v"]
assert_array_equal(pvxpv["p"].value, np.zeros(self.pv["p"].shape))
assert_array_equal(pvxpv["v"].value, np.zeros(self.pv["v"].shape))
z_axis = u.Quantity(np.array(([0, 0, 1], [0, 0, 0]), erfa_ufunc.dt_pv), "1,1/s")
pvxpv2 = erfa_ufunc.pvxpv(self.pv, z_axis)
assert pvxpv2["p"].unit == self.pv.unit["p"]
assert pvxpv2["v"].unit == self.pv.unit["v"]
assert_array_equal(pvxpv2["p"], [[0.0, -1, 0.0], [1.0, 0.0, 0.0]] * u.AU)
assert_array_equal(
pvxpv2["v"], [[0.0125, 0.0, 0.0], [0.0, 0.0125, 0.0]] * u.AU / u.day
)
def test_pvm(self):
pm, vm = erfa_ufunc.pvm(self.pv)
assert pm.unit == self.pv.unit["p"]
assert vm.unit == self.pv.unit["v"]
assert_array_equal(pm, np.linalg.norm(self.pv["p"], axis=-1))
assert_array_equal(vm, np.linalg.norm(self.pv["v"], axis=-1))
def test_pvmpv(self):
pvmpv = erfa_ufunc.pvmpv(self.pv, self.pv)
assert pvmpv.unit == self.pv.unit
assert_array_equal(pvmpv["p"], 0 * self.pv["p"])
assert_array_equal(pvmpv["v"], 0 * self.pv["v"])
def test_pvppv(self):
pvppv = erfa_ufunc.pvppv(self.pv, self.pv)
assert pvppv.unit == self.pv.unit
assert_array_equal(pvppv["p"], 2 * self.pv["p"])
assert_array_equal(pvppv["v"], 2 * self.pv["v"])
def test_pvu(self):
pvu = erfa_ufunc.pvu(86400 * u.s, self.pv)
assert pvu.unit == self.pv.unit
assert_array_equal(pvu["p"], self.pv["p"] + 1 * u.day * self.pv["v"])
assert_array_equal(pvu["v"], self.pv["v"])
def test_pvup(self):
pvup = erfa_ufunc.pvup(86400 * u.s, self.pv)
assert pvup.unit == self.pv.unit["p"]
assert_array_equal(pvup, self.pv["p"] + 1 * u.day * self.pv["v"])
def test_sxpv(self):
# Not a realistic example!!
sxpv = erfa_ufunc.sxpv(10.0, self.pv)
assert sxpv.unit == self.pv.unit
assert_array_equal(sxpv["p"], self.pv["p"] * 10)
assert_array_equal(sxpv["v"], self.pv["v"] * 10)
sxpv2 = erfa_ufunc.sxpv(30.0 * u.s, self.pv)
assert sxpv2.unit == u.StructuredUnit("AU s,AU s/d", names=("p", "v"))
assert_array_equal(sxpv2["p"], self.pv["p"] * 30 * u.s)
assert_array_equal(sxpv2["v"], self.pv["v"] * 30 * u.s)
def test_s2xpv(self):
# Not a realistic example!!
s2xpv = erfa_ufunc.s2xpv(10.0, 1 * u.s, self.pv)
assert s2xpv.unit == u.StructuredUnit("AU,AU s/d", names=("p", "v"))
assert_array_equal(s2xpv["p"], self.pv["p"] * 10)
assert_array_equal(s2xpv["v"], self.pv["v"] * u.s)
@pytest.mark.parametrize(
"r",
[
np.eye(3),
np.array(
[
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
),
np.eye(3) / u.s,
],
)
def test_rxpv(self, r):
result = erfa_ufunc.rxpv(r, self.pv)
assert_array_equal(result["p"], np.einsum("...ij,...j->...i", r, self.pv["p"]))
assert_array_equal(result["v"], np.einsum("...ij,...j->...i", r, self.pv["v"]))
@pytest.mark.parametrize(
"r",
[
np.eye(3),
np.array(
[
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
),
np.eye(3) / u.s,
],
)
def test_trxpv(self, r):
result = erfa_ufunc.trxpv(r, self.pv)
assert_array_equal(
result["p"], np.einsum("...ij,...j->...i", r.T, self.pv["p"])
)
assert_array_equal(
result["v"], np.einsum("...ij,...j->...i", r.T, self.pv["v"])
)
@pytest.mark.xfail(
erfa.__version__ < "1.7.3.1",
reason="dt_eraLDBODY incorrectly defined",
scope="class",
)
class TestEraStructUfuncs:
def setup_class(self):
ldbody = np.array(
[
(0.00028574, 3e-10, ([-7.81014427, -5.60956681, -1.98079819],
[0.0030723249, -0.00406995477, -0.00181335842])),
(0.00095435, 3e-9, ([0.738098796, 4.63658692, 1.9693136],
[-0.00755816922, 0.00126913722, 0.000727999001])),
(1.0, 6e-6, ([-0.000712174377, -0.00230478303, -0.00105865966],
[6.29235213e-6, -3.30888387e-7, -2.96486623e-7]))
],
dtype=erfa_ufunc.dt_eraLDBODY
) # fmt: skip
ldbody_unit = u.StructuredUnit("Msun,radian,(AU,AU/day)", ldbody.dtype)
self.ldbody = ldbody << ldbody_unit
self.ob = [-0.974170437, -0.2115201, -0.0917583114] << u.AU
self.sc = np.array([-0.763276255, -0.608633767, -0.216735543])
# From t_atciq in t_erfa_c.c
astrom, eo = erfa_ufunc.apci13(2456165.5, 0.401182685)
self.astrom_unit = u.StructuredUnit(
"yr,AU,1,AU,1,1,1,rad,rad,rad,rad,1,1,1,rad,rad,rad", astrom.dtype
)
self.astrom = astrom << self.astrom_unit
self.rc = 2.71 * u.rad
self.dc = 0.174 * u.rad
self.pr = 1e-5 * u.rad / u.year
self.pd = 5e-6 * u.rad / u.year
self.px = 0.1 * u.arcsec
self.rv = 55.0 * u.km / u.s
def test_ldn_basic(self):
sn = erfa_ufunc.ldn(self.ldbody, self.ob, self.sc)
assert_quantity_allclose(
sn,
[-0.7632762579693333866, -0.6086337636093002660, -0.2167355420646328159]
* u.one,
atol=1e-12,
rtol=0,
)
def test_ldn_in_other_unit(self):
ldbody = self.ldbody.to("kg,rad,(m,m/s)")
ob = self.ob.to("m")
sn = erfa_ufunc.ldn(ldbody, ob, self.sc)
assert_quantity_allclose(
sn,
[-0.7632762579693333866, -0.6086337636093002660, -0.2167355420646328159]
* u.one,
atol=1e-12,
rtol=0,
)
def test_ldn_in_SI(self):
sn = erfa_ufunc.ldn(self.ldbody.si, self.ob.si, self.sc)
assert_quantity_allclose(
sn,
[-0.7632762579693333866, -0.6086337636093002660, -0.2167355420646328159]
* u.one,
atol=1e-12,
rtol=0,
)
def test_aper(self):
along = self.astrom["along"]
astrom2 = erfa_ufunc.aper(10 * u.deg, self.astrom)
assert astrom2["eral"].unit == u.radian
assert_quantity_allclose(astrom2["eral"], along + 10 * u.deg)
astrom3 = self.astrom.to("s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,rad,rad,rad")
astrom4 = erfa_ufunc.aper(10 * u.deg, astrom3)
assert astrom3["eral"].unit == u.rad
assert astrom4["eral"].unit == u.deg
assert astrom4.unit == "s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,deg,rad,rad"
assert_quantity_allclose(astrom4["eral"], along + 10 * u.deg)
def test_atciq_basic(self):
ri, di = erfa_ufunc.atciq(
self.rc, self.dc, self.pr, self.pd, self.px, self.rv, self.astrom
)
assert_quantity_allclose(ri, 2.710121572968696744 * u.rad)
assert_quantity_allclose(di, 0.1729371367219539137 * u.rad)
def test_atciq_in_other_unit(self):
astrom = self.astrom.to("s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,deg,deg,deg")
ri, di = erfa_ufunc.atciq(
self.rc.to(u.deg),
self.dc.to(u.deg),
self.pr.to(u.mas / u.yr),
self.pd.to(u.mas / u.yr),
self.px,
self.rv.to(u.m / u.s),
astrom,
)
assert_quantity_allclose(ri, 2.710121572968696744 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(di, 0.1729371367219539137 * u.rad, atol=1e-12 * u.rad)
def test_atciqn(self):
ri, di = erfa_ufunc.atciqn(
self.rc.to(u.deg),
self.dc.to(u.deg),
self.pr.to(u.mas / u.yr),
self.pd.to(u.mas / u.yr),
self.px,
self.rv.to(u.m / u.s),
self.astrom.si,
self.ldbody.si,
)
assert_quantity_allclose(ri, 2.710122008104983335 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(di, 0.1729371916492767821 * u.rad, atol=1e-12 * u.rad)
def test_atciqz(self):
ri, di = erfa_ufunc.atciqz(self.rc.to(u.deg), self.dc.to(u.deg), self.astrom.si)
assert_quantity_allclose(ri, 2.709994899247256984 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(di, 0.1728740720984931891 * u.rad, atol=1e-12 * u.rad)
def test_aticq(self):
ri = 2.710121572969038991 * u.rad
di = 0.1729371367218230438 * u.rad
rc, dc = erfa_ufunc.aticq(ri.to(u.deg), di.to(u.deg), self.astrom.si)
assert_quantity_allclose(rc, 2.710126504531716819 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(dc, 0.1740632537627034482 * u.rad, atol=1e-12 * u.rad)
def test_aticqn(self):
ri = 2.709994899247599271 * u.rad
di = 0.1728740720983623469 * u.rad
rc, dc = erfa_ufunc.aticqn(
ri.to(u.deg), di.to(u.deg), self.astrom.si, self.ldbody.si
)
assert_quantity_allclose(rc, 2.709999575033027333 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(dc, 0.1739999656316469990 * u.rad, atol=1e-12 * u.rad)
def test_atioq_atoiq(self):
astrom, _ = erfa_ufunc.apio13(
2456384.5,
0.969254051,
0.1550675,
-0.527800806,
-1.2345856,
2738.0,
2.47230737e-7,
1.82640464e-6,
731.0,
12.8,
0.59,
0.55,
)
astrom = astrom << self.astrom_unit
ri = 2.710121572969038991 * u.rad
di = 0.1729371367218230438 * u.rad
aob, zob, hob, dob, rob = erfa_ufunc.atioq(
ri.to(u.deg), di.to(u.deg), astrom.si
)
assert_quantity_allclose(
aob, 0.9233952224895122499e-1 * u.rad, atol=1e-12 * u.rad
)
assert_quantity_allclose(zob, 1.407758704513549991 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(
hob, -0.9247619879881698140e-1 * u.rad, atol=1e-12 * u.rad
)
assert_quantity_allclose(dob, 0.1717653435756234676 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(rob, 2.710085107988480746 * u.rad, atol=1e-12 * u.rad)
# Sadly does not just use the values from above.
ob1 = 2.710085107986886201 * u.rad
ob2 = 0.1717653435758265198 * u.rad
ri2, di2 = erfa_ufunc.atoiq("R", ob1.to(u.deg), ob2.to(u.deg), astrom.si)
assert_quantity_allclose(ri2, 2.710121574447540810 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(
di2, 0.17293718391166087785 * u.rad, atol=1e-12 * u.rad
)
@pytest.mark.xfail(erfa.__version__ < "2.0.0", reason="comparisons changed")
def test_apio(self):
sp = -3.01974337e-11 * u.rad
theta = 3.14540971 * u.rad
elong = -0.527800806 * u.rad
phi = -1.2345856 * u.rad
hm = 2738.0 * u.m
xp = 2.47230737e-7 * u.rad
yp = 1.82640464e-6 * u.rad
refa = 0.000201418779 * u.rad
refb = -2.36140831e-7 * u.rad
astrom = erfa_ufunc.apio(
sp.to(u.deg), theta, elong, phi, hm.to(u.km), xp, yp, refa, refb
)
assert astrom.unit == self.astrom_unit
for name, value in [
("along", -0.5278008060295995734),
("xpl", 0.1133427418130752958e-5),
("ypl", 0.1453347595780646207e-5),
("sphi", -0.9440115679003211329),
("cphi", 0.3299123514971474711),
("diurab", 0.5135843661699913529e-6),
("eral", 2.617608903970400427),
("refa", 0.2014187790000000000e-3),
("refb", -0.2361408310000000000e-6),
]:
assert_quantity_allclose(
astrom[name],
value * self.astrom_unit[name],
rtol=1e-12,
atol=0 * self.astrom_unit[name],
)
|
f9e2eec6b690bbd9125eea2c3509f8c3b170e9ee239bcd8a84a38605cca4cf21 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import itertools
import numpy as np
import numpy.lib.recfunctions as rfn
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED,
DISPATCHED_FUNCTIONS,
FUNCTION_HELPERS,
IGNORED_FUNCTIONS,
SUBCLASS_SAFE_FUNCTIONS,
TBD_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24
needs_array_function = pytest.mark.xfail(
not ARRAY_FUNCTION_ENABLED, reason="Needs __array_function__ support"
)
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
def get_wrapped_functions(*modules):
wrapped_functions = {}
for mod in modules:
for name, f in mod.__dict__.items():
if f is np.printoptions or name.startswith("_"):
continue
if callable(f) and hasattr(f, "__wrapped__"):
wrapped_functions[name] = f
return wrapped_functions
all_wrapped_functions = get_wrapped_functions(
np, np.fft, np.linalg, np.lib.recfunctions
)
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup_method(self):
self.q = np.arange(9.0).reshape(3, 3) / 4.0 * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1.0 * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1.0 * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1.0 * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
self.check(np.broadcast_to, (3, 3, 3), subok=True)
out = np.broadcast_to(self.q, (3, 3, 3))
assert type(out) is np.ndarray # NOT Quantity
def test_broadcast_arrays(self):
# Decided *not* to change default for subok for Quantity, since
# that would be contrary to the docstring and might break code.
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
a1, a2 = np.broadcast_arrays(self.q, q2)
assert type(a1) is np.ndarray
assert type(a2) is np.ndarray
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150.0, 350.0]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@needs_array_function
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices, axis=0) * self.q.unit
assert np.all(out == expected)
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis, self.q.value) * self.q.unit**2
assert_array_equal(out, expected)
@needs_array_function
@pytest.mark.parametrize("axes", ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup_method(self):
self.q = (np.arange(9.0).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@needs_array_function
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@needs_array_function
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.0)
def test_ones_like(self):
self.check(np.ones_like)
@needs_array_function
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
@needs_array_function
def test_diag_1d_input(self):
# Also check 1-D case; drops unit w/o __array_function__.
q = self.q.ravel()
o = np.diag(q)
expected = np.diag(q.value) << q.unit
assert o.unit == self.q.unit
assert o.shape == expected.shape
assert_array_equal(o, expected)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value, axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True], self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.0) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@needs_array_function
def test_putmask(self):
q = np.arange(3.0) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.0)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_place(self):
q = np.arange(3.0) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
@needs_array_function
def test_copyto(self):
q = np.arange(3.0) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.0).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25.0 * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@needs_array_function
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup_method(self):
self.q1 = np.arange(6.0).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop("q_list", [self.q1, self.q2])
q_ref = kwargs.pop("q_ref", q_list[0])
o = func(q_list, *args, **kwargs)
v_list = [q_ref._to_own_unit(q) for q in q_list]
expected = func(v_list, *args, **kwargs) * q_ref.unit
assert o.shape == expected.shape
assert np.all(o == expected)
@needs_array_function
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
# regression test for gh-13322.
self.check(np.concatenate, dtype="f4")
self.check(
np.concatenate,
q_list=[np.zeros(self.q1.shape), self.q1, self.q2],
q_ref=self.q1,
)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = (
np.concatenate([self.q1.value, self.q2.to_value(self.q1.unit)])
* self.q1.unit
)
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@needs_array_function
def test_stack(self):
self.check(np.stack)
@needs_array_function
def test_column_stack(self):
self.check(np.column_stack)
@needs_array_function
def test_hstack(self):
self.check(np.hstack)
@needs_array_function
def test_vstack(self):
self.check(np.vstack)
@needs_array_function
def test_dstack(self):
self.check(np.dstack)
@needs_array_function
def test_block(self):
self.check(np.block)
result = np.block([[0.0, 1.0 * u.m], [1.0 * u.cm, 2.0 * u.km]])
assert np.all(result == np.block([[0, 1.0], [0.01, 2000.0]]) << u.m)
@needs_array_function
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = (
np.append(self.q1.value, self.q2.to_value(self.q1.unit), axis=0)
* self.q1.unit
)
assert np.all(out == expected)
a = np.arange(3.0)
result = np.append(a, 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@needs_array_function
def test_insert(self):
# Unit of inserted values is not ignored.
q = np.arange(12.0).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50.0, 25.0] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) << q.unit
assert np.all(out == expected)
# 0 can have any unit.
out2 = np.insert(q, (3, 5), 0)
expected2 = np.insert(q.value, (3, 5), 0) << q.unit
assert np.all(out2 == expected2)
a = np.arange(3.0)
result = np.insert(a, (2,), 50.0 * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50.0 * u.cm)
with pytest.raises(u.UnitsError):
np.insert(q, (3, 5), 0.0 * u.s)
@needs_array_function
def test_pad(self):
q = np.arange(1.0, 6.0) * u.m
out = np.pad(q, (2, 3), "constant", constant_values=(0.0, 150.0 * u.cm))
assert out.unit == q.unit
expected = (
np.pad(q.value, (2, 3), "constant", constant_values=(0.0, 1.5)) * q.unit
)
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), "constant", constant_values=150.0 * u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), "constant", constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), "linear_ramp", end_values=(25.0 * u.cm, 0.0))
assert out3.unit == q.unit
expected3 = (
np.pad(q.value, (2, 3), "linear_ramp", end_values=(0.25, 0.0)) * q.unit
)
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.arange(54.0).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(TypeError):
np.any(self.q)
def test_all(self):
with pytest.raises(TypeError):
np.all(self.q)
def test_sometrue(self):
with pytest.raises(TypeError):
np.sometrue(self.q)
def test_alltrue(self):
with pytest.raises(TypeError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
def test_angle(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0.0, 10.0, 20.0]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
unit = self.q.unit
expected = (
np.clip(self.q.value, qmin.to_value(unit), qmax.to_value(unit)) * unit
)
assert np.all(out == expected)
@needs_array_function
def test_sinc(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.0 * u.one)
@needs_array_function
def test_where(self):
out = np.where([True, False, True], self.q, 1.0 * u.km)
expected = np.where([True, False, True], self.q.value, 1000.0) * self.q.unit
assert np.all(out == expected)
@needs_array_function
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@needs_array_function
def test_select(self):
q = self.q
out = np.select(
[q < 0.55 * u.m, q > 1.0 * u.m], [q, q.to(u.cm)], default=-1.0 * u.km
)
expected = (
np.select([q.value < 0.55, q.value > 1], [q.value, q.value], default=-1000)
* u.m
)
assert np.all(out == expected)
@needs_array_function
def test_real_if_close(self):
q = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_tril(self):
self.check(np.tril)
@needs_array_function
def test_triu(self):
self.check(np.triu)
@needs_array_function
def test_unwrap(self):
q = [0.0, 3690.0, -270.0, 690.0] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1 * u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1.0, 2.0] * u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.0 * u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@needs_array_function
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
out = np.nan_to_num(q, nan=1.0 * u.km, posinf=2.0 * u.km, neginf=-2 * u.km)
expected = [-2000.0, 2000.0, 1000.0, 3.0, 4.0] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3.0, 4.0]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1.0 + 1j] * u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1.0 + 1j] * u.m)
def test_isclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 102.0, 199.0]) * u.cm
atol = 1.5 * u.cm
rtol = 1.0 * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(
q1.value, q2.to_value(q1.unit), atol=atol.to_value(q1.unit)
)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == "b"
assert np.all(out == expected)
@needs_array_function
def test_allclose_atol_default_unit(self):
q_cm = self.q.to(u.cm)
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 198.0]) * u.cm
out = np.isclose(q1, q2, atol=0.011, rtol=0)
expected = np.isclose(q1.value, q2.to_value(q1.unit), atol=0.011, rtol=0)
assert np.all(out == expected)
out2 = np.isclose(q2, q1, atol=0.011, rtol=0)
expected2 = np.isclose(q2.value, q1.to_value(q2.unit), atol=0.011, rtol=0)
assert np.all(out2 == expected2)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = np.average(q1.value, weights=q2.value) * u.m
assert np.all(o == expected)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
o = np.var(self.q)
expected = np.var(self.q.value) * self.q.unit**2
assert np.all(o == expected)
def test_median(self):
self.check(np.median)
@needs_array_function
def test_quantile(self):
self.check(np.quantile, 0.5)
o = np.quantile(self.q, 50 * u.percent)
expected = np.quantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
# For ndarray input, we return a Quantity.
o2 = np.quantile(self.q.value, 50 * u.percent)
assert o2.unit == u.dimensionless_unscaled
assert np.all(o2 == expected.value)
o3 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, out=o3)
assert result is o3
assert np.all(o3 == expected)
o4 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, None, o4)
assert result is o4
assert np.all(o4 == expected)
@needs_array_function
def test_percentile(self):
self.check(np.percentile, 0.5)
o = np.percentile(self.q, 0.5 * u.one)
expected = np.percentile(self.q.value, 50) * u.m
assert np.all(o == expected)
def test_trace(self):
self.check(np.trace)
@needs_array_function
def test_count_nonzero(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.count_nonzero(q1)
assert type(o) is not u.Quantity
assert o == 8
o = np.count_nonzero(q1, axis=1)
# Returns integer Quantity with units of m
assert type(o) is np.ndarray
assert np.all(o == np.array([2, 3, 3]))
def test_allclose(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
atol = 2 * u.cm
rtol = 1.0 * u.percent
assert np.allclose(q1, q2, atol=atol)
assert np.allclose(q1, q2, atol=0.0, rtol=rtol)
@needs_array_function
def test_allclose_atol_default_unit(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
assert np.allclose(q1, q2, atol=0.011, rtol=0)
assert not np.allclose(q2, q1, atol=0.011, rtol=0)
def test_allclose_failures(self):
q1 = np.arange(3.0) * u.m
q2 = np.array([0.0, 101.0, 199.0]) * u.cm
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=2 * u.s, rtol=0)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=0, rtol=1.0 * u.s)
@needs_array_function
def test_array_equal(self):
q1 = np.arange(3.0) * u.m
q2 = q1.to(u.cm)
assert np.array_equal(q1, q2)
q3 = q1.value * u.cm
assert not np.array_equal(q1, q3)
@pytest.mark.parametrize("equal_nan", [False, True])
def test_array_equal_nan(self, equal_nan):
q1 = np.linspace(0, 1, num=11) * u.m
q1[0] = np.nan
q2 = q1.to(u.cm)
result = np.array_equal(q1, q2, equal_nan=equal_nan)
assert result == equal_nan
@needs_array_function
def test_array_equiv(self):
q1 = np.array([[0.0, 1.0, 2.0]] * 3) * u.m
q2 = q1[0].to(u.cm)
assert np.array_equiv(q1, q2)
q3 = q1[0].value * u.cm
assert not np.array_equiv(q1, q3)
class TestNanFunctions(InvariantUnitTestSetup):
def setup_method(self):
super().setup_method()
self.q[1, 1] = np.nan
def test_nanmax(self):
self.check(np.nanmax)
def test_nanmin(self):
self.check(np.nanmin)
def test_nanargmin(self):
out = np.nanargmin(self.q)
expected = np.nanargmin(self.q.value)
assert out == expected
def test_nanargmax(self):
out = np.nanargmax(self.q)
expected = np.nanargmax(self.q.value)
assert out == expected
def test_nanmean(self):
self.check(np.nanmean)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nansum(self):
self.check(np.nansum)
def test_nancumsum(self):
self.check(np.nancumsum)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanvar(self):
out = np.nanvar(self.q)
expected = np.nanvar(self.q.value) * self.q.unit**2
assert np.all(out == expected)
def test_nanprod(self):
with pytest.raises(u.UnitsError):
np.nanprod(self.q)
def test_nancumprod(self):
with pytest.raises(u.UnitsError):
np.nancumprod(self.q)
@needs_array_function
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
@needs_array_function
def test_nanpercentile(self):
self.check(np.nanpercentile, 0.5)
o = np.nanpercentile(self.q, 0.5 * u.one)
expected = np.nanpercentile(self.q.value, 50) * u.m
assert np.all(o == expected)
class TestVariousProductFunctions(metaclass=CoverageMeta):
"""
Test functions that are similar to gufuncs
"""
@needs_array_function
def test_cross(self):
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.cross(q1, q2)
expected = np.cross(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_outer(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([1, 2]) / u.s
o = np.outer(q1, q2)
assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s)
o2 = 0 * o
result = np.outer(q1, q2, out=o2)
assert result is o2
assert np.all(o2 == o)
with pytest.raises(TypeError):
np.outer(q1, q2, out=object())
@needs_array_function
def test_inner(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([4, 5, 6]) / u.s
o = np.inner(q1, q2)
assert o == 32 * u.m / u.s
@needs_array_function
def test_dot(self):
q1 = np.array([1.0, 2.0, 3.0]) * u.m
q2 = np.array([4.0, 5.0, 6.0]) / u.s
o = np.dot(q1, q2)
assert o == 32.0 * u.m / u.s
@needs_array_function
def test_vdot(self):
q1 = np.array([1j, 2j, 3j]) * u.m
q2 = np.array([4j, 5j, 6j]) / u.s
o = np.vdot(q1, q2)
assert o == (32.0 + 0j) * u.m / u.s
@needs_array_function
def test_tensordot(self):
# From the docstring example
a = np.arange(60.0).reshape(3, 4, 5) * u.m
b = np.arange(24.0).reshape(4, 3, 2) / u.s
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
expected = np.tensordot(a.value, b.value, axes=([1, 0], [0, 1])) * u.m / u.s
assert np.all(c == expected)
@needs_array_function
def test_kron(self):
q1 = np.eye(2) * u.m
q2 = np.ones(2) / u.s
o = np.kron(q1, q2)
expected = np.kron(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@needs_array_function
def test_einsum(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum("...i", q1)
assert np.all(o == q1)
o = np.einsum("ii", q1)
expected = np.einsum("ii", q1.value) * u.m
assert np.all(o == expected)
q2 = np.eye(3) / u.s
o2 = np.einsum("ij,jk", q1, q2)
assert np.all(o2 == q1 / u.s)
o3 = 0 * o2
result = np.einsum("ij,jk", q1, q2, out=o3)
assert result is o3
assert np.all(o3 == o2)
def test_einsum_path(self):
q1 = np.arange(9.0).reshape(3, 3) * u.m
o = np.einsum_path("...i", q1)
assert o[0] == ["einsum_path", (0,)]
o = np.einsum_path("ii", q1)
assert o[0] == ["einsum_path", (0,)]
q2 = np.eye(3) / u.s
o = np.einsum_path("ij,jk", q1, q2)
assert o[0] == ["einsum_path", (0, 1)]
class TestIntDiffFunctions(metaclass=CoverageMeta):
def test_trapz(self):
y = np.arange(9.0) * u.m / u.s
out = np.trapz(y)
expected = np.trapz(y.value) * y.unit
assert np.all(out == expected)
dx = 10.0 * u.s
out = np.trapz(y, dx=dx)
expected = np.trapz(y.value, dx=dx.value) * y.unit * dx.unit
assert np.all(out == expected)
x = np.arange(9.0) * u.s
out = np.trapz(y, x)
expected = np.trapz(y.value, x.value) * y.unit * x.unit
assert np.all(out == expected)
def test_diff(self):
# Simple diff works out of the box.
x = np.arange(10.0) * u.m
out = np.diff(x)
expected = np.diff(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_diff_prepend_append(self):
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km)
expected = np.diff(x.value, prepend=-0.125, append=1000.0) * x.unit
assert np.all(out == expected)
x = np.arange(10.0) * u.m
out = np.diff(x, prepend=-12.5 * u.cm, append=1 * u.km, n=2)
expected = np.diff(x.value, prepend=-0.125, append=1000.0, n=2) * x.unit
assert np.all(out == expected)
with pytest.raises(TypeError):
np.diff(x, prepend=object())
def test_gradient(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
out = np.gradient(x)
expected = np.gradient(x.value) * u.m
assert np.all(out == expected)
@needs_array_function
def test_gradient_spacing(self):
# Simple gradient works out of the box.
x = np.arange(10.0) * u.m
spacing = 10.0 * u.s
out = np.gradient(x, spacing)
expected = np.gradient(x.value, spacing.value) * (x.unit / spacing.unit)
assert np.all(out == expected)
f = np.array([[1, 2, 6], [3, 4, 5]]) * u.m
dx = 2.0 * u.s
y = [1.0, 1.5, 3.5] * u.GHz
dfdx, dfdy = np.gradient(f, dx, y)
exp_dfdx, exp_dfdy = np.gradient(f.value, dx.value, y.value)
exp_dfdx = exp_dfdx * f.unit / dx.unit
exp_dfdy = exp_dfdy * f.unit / y.unit
assert np.all(dfdx == exp_dfdx)
assert np.all(dfdy == exp_dfdy)
dfdx2 = np.gradient(f, dx, axis=0)
assert np.all(dfdx2 == exp_dfdx)
dfdy2 = np.gradient(f, y, axis=(1,))
assert np.all(dfdy2 == exp_dfdy)
class TestSpaceFunctions(metaclass=CoverageMeta):
def test_linspace(self):
# Note: linspace gets unit of end point, not superlogical.
out = np.linspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.linspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(6.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.linspace(q1, q2, 5)
expected = np.linspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
@needs_array_function
def test_logspace(self):
unit = u.m / u.s**2
out = np.logspace(10.0 * u.dex(unit), 20 * u.dex(unit), 10)
expected = np.logspace(10.0, 20.0, 10) * unit
assert np.all(out == expected)
out = np.logspace(10.0 * u.STmag, 20 * u.STmag, 10)
expected = np.logspace(10.0, 20.0, 10, base=10.0 ** (-0.4)) * u.ST
assert u.allclose(out, expected)
@needs_array_function
def test_geomspace(self):
out = np.geomspace(1000.0 * u.m, 10.0 * u.km, 5)
expected = np.geomspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(1.0, 7.0).reshape(2, 3) * u.m
q2 = 10000.0 * u.cm
out = np.geomspace(q1, q2, 5)
expected = np.geomspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
class TestInterpolationFunctions(metaclass=CoverageMeta):
@needs_array_function
def test_interp(self):
x = np.array([1250.0, 2750.0]) * u.m
xp = np.arange(5.0) * u.km
yp = np.arange(5.0) * u.day
out = np.interp(x, xp, yp)
expected = np.interp(x.to_value(xp.unit), xp.value, yp.value) * yp.unit
assert np.all(out == expected)
out = np.interp(x, xp, yp.value)
assert type(out) is np.ndarray
assert np.all(out == expected.value)
@needs_array_function
def test_piecewise(self):
x = np.linspace(-2.5, 2.5, 6) * u.m
out = np.piecewise(x, [x < 0, x >= 0], [-1 * u.s, 1 * u.day])
expected = (
np.piecewise(x.value, [x.value < 0, x.value >= 0], [-1, 24 * 3600]) * u.s
)
assert out.unit == expected.unit
assert np.all(out == expected)
out2 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [-1 * u.s, 1 * u.day, lambda x: 1 * u.hour]
)
expected2 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [-1, 24 * 3600, 3600])
* u.s
)
assert out2.unit == expected2.unit
assert np.all(out2 == expected2)
out3 = np.piecewise(
x, [x < 1 * u.m, x >= 0], [0, 1 * u.percent, lambda x: 1 * u.one]
)
expected3 = (
np.piecewise(x.value, [x.value < 1, x.value >= 0], [0, 0.01, 1]) * u.one
)
assert out3.unit == expected3.unit
assert np.all(out3 == expected3)
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x, [x], [0.0])
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x.value, [x], [0.0])
class TestBincountDigitize(metaclass=CoverageMeta):
@needs_array_function
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
weights = np.arange(len(i)) * u.Jy
out = np.bincount(i, weights)
expected = np.bincount(i, weights.value) * weights.unit
assert_array_equal(out, expected)
with pytest.raises(TypeError):
np.bincount(weights)
@needs_array_function
def test_digitize(self):
x = np.array([1500.0, 2500.0, 4500.0]) * u.m
bins = np.arange(10.0) * u.km
out = np.digitize(x, bins)
expected = np.digitize(x.to_value(bins.unit), bins.value)
assert_array_equal(out, expected)
class TestHistogramFunctions(metaclass=CoverageMeta):
def setup_method(self):
self.x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
self.y = np.array([1.2, 2.2, 2.4, 3.0, 4.0]) * u.cm
self.weights = np.arange(len(self.x)) / u.s
def check(
self,
function,
*args,
value_args=None,
value_kwargs=None,
expected_units=None,
**kwargs
):
"""Check quanties are treated correctly in the histogram function.
Test is done by applying ``function(*args, **kwargs)``, where
the argument can be quantities, and comparing the result to
``function(*value_args, **value_kwargs)``, with the outputs
converted to quantities using the ``expected_units`` (where `None`
indicates the output is expected to be a regular array).
For ``**value_kwargs``, any regular ``kwargs`` are treated as
defaults, i.e., non-quantity arguments do not have to be repeated.
"""
if value_kwargs is None:
value_kwargs = kwargs
else:
for k, v in kwargs.items():
value_kwargs.setdefault(k, v)
# Get the result, using the Quantity override.
out = function(*args, **kwargs)
# Get the comparison, with non-Quantity arguments.
expected = function(*value_args, **value_kwargs)
# All histogram functions return a tuple of the actual histogram
# and the bin edges. First, check the actual histogram.
out_h = out[0]
expected_h = expected[0]
if expected_units[0] is not None:
expected_h = expected_h * expected_units[0]
assert_array_equal(out_h, expected_h)
# Check bin edges. Here, histogramdd returns an interable of the
# bin edges as the second return argument, while histogram and
# histogram2d return the bin edges directly.
if function is np.histogramdd:
bin_slice = 1
else:
bin_slice = slice(1, None)
for o_bin, e_bin, e_unit in zip(
out[bin_slice], expected[bin_slice], expected_units[bin_slice]
):
if e_unit is not None:
e_bin = e_bin * e_unit
assert_array_equal(o_bin, e_bin)
@needs_array_function
def test_histogram(self):
x = self.x
weights = self.weights
# Plain histogram.
self.check(
np.histogram, x, value_args=(x.value,), expected_units=(None, x.unit)
)
# With bins.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
value_args=(x.value, [1.25, 2.0]),
expected_units=(None, x.unit),
)
# With density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
density=True,
value_args=(x.value, [1.25, 2.0]),
expected_units=(1 / x.unit, x.unit),
)
# With weights.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit),
)
# With weights and density.
self.check(
np.histogram,
x,
[125, 200] * u.cm,
weights=weights,
density=True,
value_args=(x.value, [1.25, 2.0]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit / x.unit, x.unit),
)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram(x.value, [125, 200] * u.s)
@needs_array_function
def test_histogram_bin_edges(self):
x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
out_b = np.histogram_bin_edges(x)
expected_b = np.histogram_bin_edges(x.value) * x.unit
assert np.all(out_b == expected_b)
# With bins
out2_b = np.histogram_bin_edges(x, [125, 200] * u.cm)
expected2_b = np.histogram_bin_edges(x.value, [1.25, 2.0]) * x.unit
assert np.all(out2_b == expected2_b)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x.value, [125, 200] * u.s)
@needs_array_function
def test_histogram2d(self):
x, y = self.x, self.y
weights = self.weights
# Basic tests with X, Y.
self.check(
np.histogram2d,
x,
y,
value_args=(x.value, y.value),
expected_units=(None, x.unit, y.unit),
)
# Check units with density.
self.check(
np.histogram2d,
x,
y,
density=True,
value_args=(x.value, y.value),
expected_units=(1 / (x.unit * y.unit), x.unit, y.unit),
)
# Check units with weights.
self.check(
np.histogram2d,
x,
y,
weights=weights,
value_args=(x.value, y.value),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit, y.unit),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogram2d,
x,
y,
[5, inb_y],
value_args=(x.value, y.value, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, x.unit, y.unit),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogram2d,
x.value,
y.value,
bins=[5, inb2_y],
value_args=(x.value, y.value),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, u.one, u.one),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogram2d(x, y, 125 * u.s)
with pytest.raises(TypeError):
np.histogram2d(x.value, y.value, 125 * u.s)
# Bin units need to match units of x, y.
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogram2d(x.value, y.value, [125, 200] * u.s)
@needs_array_function
def test_histogramdd(self):
# First replicates of the histogram2d tests, but using the
# histogramdd override. Normally takes the sample as a tuple
# with a given number of dimensions, and returns the histogram
# as well as a tuple of bin edges.
sample = self.x, self.y
sample_units = self.x.unit, self.y.unit
sample_values = (self.x.value, self.y.value)
weights = self.weights
# Basic tests with X, Y
self.check(
np.histogramdd,
sample,
value_args=(sample_values,),
expected_units=(None, sample_units),
)
# Check units with density.
self.check(
np.histogramdd,
sample,
density=True,
value_args=(sample_values,),
expected_units=(1 / (self.x.unit * self.y.unit), sample_units),
)
# Check units with weights.
self.check(
np.histogramdd,
sample,
weights=weights,
value_args=(sample_values,),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, sample_units),
)
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.0] * u.m
self.check(
np.histogramdd,
sample,
[5, inb_y],
value_args=(sample_values, [5, np.array([0, 2.5, 100.0])]),
expected_units=(None, sample_units),
)
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.0] * u.percent
self.check(
np.histogramdd,
sample_values,
bins=[5, inb2_y],
value_args=(sample_values,),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.0])]),
expected_units=(None, (u.one, u.one)),
)
# For quantities, it is probably not that likely one would pass
# in the sample as an array, but check that it works anyway.
# This also gives a 3-D check.
xyz = np.random.normal(size=(10, 3)) * u.m
self.check(
np.histogramdd,
xyz,
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Passing it in as a tuple should work just as well; note the
# *last* axis contains the sample dimension.
self.check(
np.histogramdd,
(xyz[:, 0], xyz[:, 1], xyz[:, 2]),
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,) * 3),
)
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogramdd(sample, 125 * u.s)
# Sequence of single items should be integer.
with pytest.raises(TypeError):
np.histogramdd(sample, [125, 200] * u.s)
with pytest.raises(TypeError):
np.histogramdd(sample_values, [125, 200] * u.s)
# Units of bins should match.
with pytest.raises(u.UnitsError):
np.histogramdd(sample, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogramdd(sample_values, ([125, 200] * u.s, [125, 200]))
@needs_array_function
def test_correlate(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.correlate(x1, x2)
expected = np.correlate(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_convolve(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.convolve(x1, x2)
expected = np.convolve(x1.value, x2.value) * u.m**2
assert np.all(out == expected)
@needs_array_function
def test_cov(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.cov(x)
@needs_array_function
def test_corrcoef(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.corrcoef(x)
class TestSortFunctions(InvariantUnitTestSetup):
def test_sort(self):
self.check(np.sort)
def test_sort_axis(self):
self.check(np.sort, axis=0)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
self.check(np.msort)
@needs_array_function
def test_sort_complex(self):
self.check(np.sort_complex)
def test_partition(self):
self.check(np.partition, 2)
class TestStringFunctions(metaclass=CoverageMeta):
# For these, making behaviour work means deviating only slightly from
# the docstring, and by default they fail miserably. So, might as well.
def setup_method(self):
self.q = np.arange(3.0) * u.Jy
@needs_array_function
def test_array2string(self):
# The default formatters cannot handle units, so if we do not pass
# a relevant formatter, we are better off just treating it as an
# array (which happens for all subtypes).
out0 = np.array2string(self.q)
expected0 = str(self.q.value)
assert out0 == expected0
# Arguments are interpreted as usual.
out1 = np.array2string(self.q, separator=", ")
expected1 = "[0., 1., 2.]"
assert out1 == expected1
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.q, separator=", ", formatter={"all": str})
expected2 = "[0.0 Jy, 1.0 Jy, 2.0 Jy]"
assert out2 == expected2
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.q, None, None, None, ", ", "", np._NoValue, {"float": str}
)
assert out3 == expected2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.q, separator=", ", formatter={"int": str})
assert out4 == expected1
@needs_array_function
def test_array_repr(self):
out = np.array_repr(self.q)
assert out == "Quantity([0., 1., 2.], unit='Jy')"
q2 = self.q.astype("f4")
out2 = np.array_repr(q2)
assert out2 == "Quantity([0., 1., 2.], unit='Jy', dtype=float32)"
@needs_array_function
def test_array_str(self):
out = np.array_str(self.q)
expected = str(self.q)
assert out == expected
class TestBitAndIndexFunctions(metaclass=CoverageMeta):
# Index/bit functions generally fail for floats, so the usual
# float quantity are safe, but the integer ones are not.
def setup_method(self):
self.q = np.arange(3) * u.m
self.uint_q = u.Quantity(np.arange(3), "m", dtype="u1")
@needs_array_function
def test_packbits(self):
with pytest.raises(TypeError):
np.packbits(self.q)
with pytest.raises(TypeError):
np.packbits(self.uint_q)
@needs_array_function
def test_unpackbits(self):
with pytest.raises(TypeError):
np.unpackbits(self.q)
with pytest.raises(TypeError):
np.unpackbits(self.uint_q)
@needs_array_function
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.q, 3)
with pytest.raises(TypeError):
np.unravel_index(self.uint_q, 3)
@needs_array_function
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.q,), 3)
with pytest.raises(TypeError):
np.ravel_multi_index((self.uint_q,), 3)
@needs_array_function
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.q)
with pytest.raises(TypeError):
np.ix_(self.uint_q)
class TestDtypeFunctions(NoUnitTestSetup):
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.q.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.q[0])
expected = np.min_scalar_type(self.q.value[0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(metaclass=CoverageMeta):
def test_meshgrid(self):
q1 = np.arange(3.0) * u.m
q2 = np.arange(5.0) * u.s
o1, o2 = np.meshgrid(q1, q2)
e1, e2 = np.meshgrid(q1.value, q2.value)
assert np.all(o1 == e1 * q1.unit)
assert np.all(o2 == e2 * q2.unit)
class TestMemoryFunctions(NoUnitTestSetup):
def test_shares_memory(self):
self.check(np.shares_memory, self.q.value)
def test_may_share_memory(self):
self.check(np.may_share_memory, self.q.value)
class TestSetOpsFcuntions(metaclass=CoverageMeta):
def setup_method(self):
self.q = np.array([[0.0, 1.0, -1.0], [3.0, 5.0, 3.0], [0.0, 1.0, -1]]) * u.m
self.q2 = np.array([0.0, 100.0, 150.0, 200.0]) * u.cm
def check(self, function, qs, *args, **kwargs):
unit = kwargs.pop("unit", self.q.unit)
out = function(*qs, *args, **kwargs)
qv = tuple(q.to_value(self.q.unit) for q in qs)
expected = function(*qv, *args, **kwargs)
if isinstance(expected, tuple):
if unit:
expected = (expected[0] * unit,) + expected[1:]
for o, e in zip(out, expected):
assert_array_equal(o, e)
else:
if unit:
expected = expected * unit
assert_array_equal(out, expected)
def check1(self, function, *args, **kwargs):
self.check(function, (self.q,), *args, **kwargs)
def check2(self, function, *args, **kwargs):
self.check(function, (self.q, self.q2), *args, **kwargs)
@pytest.mark.parametrize(
"kwargs",
(
dict(return_index=True, return_inverse=True),
dict(return_counts=True),
dict(return_index=True, return_inverse=True, return_counts=True),
),
)
def test_unique(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize(
"kwargs",
(
dict(axis=0),
dict(axis=1),
dict(return_counts=True, return_inverse=False, axis=1),
),
)
def test_unique_more_complex(self, kwargs):
self.check1(np.unique, **kwargs)
@needs_array_function
@pytest.mark.parametrize("kwargs", (dict(), dict(return_indices=True)))
def test_intersect1d(self, kwargs):
self.check2(np.intersect1d, **kwargs)
@needs_array_function
def test_setxor1d(self):
self.check2(np.setxor1d)
@needs_array_function
def test_union1d(self):
self.check2(np.union1d)
result = np.union1d(np.array([0.0, np.nan]), np.arange(3) << u.m)
assert result.unit is u.m
assert_array_equal(result.value, np.array([0.0, 1.0, 2.0, np.nan]))
@needs_array_function
def test_setdiff1d(self):
self.check2(np.setdiff1d)
@needs_array_function
def test_in1d(self):
self.check2(np.in1d, unit=None)
# Check zero is treated as having any unit.
assert np.in1d(np.zeros(1), self.q2)
with pytest.raises(u.UnitsError):
np.in1d(np.ones(1), self.q2)
@needs_array_function
def test_isin(self):
self.check2(np.isin, unit=None)
def test_ediff1d(self):
# ediff1d works always as it calls the Quantity method.
self.check1(np.ediff1d)
x = np.arange(10.0) * u.m
out = np.ediff1d(x, to_begin=-12.5 * u.cm, to_end=1 * u.km)
expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.0) * x.unit
assert_array_equal(out, expected)
class TestDatetimeFunctions(BasicTestSetup):
def test_busday_count(self):
with pytest.raises(TypeError):
np.busday_count(self.q, self.q)
def test_busday_offset(self):
with pytest.raises(TypeError):
np.busday_offset(self.q, self.q)
def test_datetime_as_string(self):
with pytest.raises(TypeError):
np.datetime_as_string(self.q)
def test_is_busday(self):
with pytest.raises(TypeError):
np.is_busday(self.q)
# These functions always worked; ensure they do not regress.
# Note that they are *not* wrapped so no need to check coverage.
@pytest.mark.parametrize("function", [np.fft.fftfreq, np.fft.rfftfreq])
def test_fft_frequencies(function):
out = function(128, d=0.1 * u.s)
expected = function(128, d=0.1) / u.s
assert_array_equal(out, expected)
@needs_array_function
class TestFFT(InvariantUnitTestSetup):
# These are all trivial, just preserve the unit.
def setup_method(self):
# Use real input; gets turned into complex as needed.
self.q = np.arange(128.0).reshape(8, -1) * u.s
def test_fft(self):
self.check(np.fft.fft)
def test_ifft(self):
self.check(np.fft.ifft)
def test_rfft(self):
self.check(np.fft.rfft)
def test_irfft(self):
self.check(np.fft.irfft)
def test_fft2(self):
self.check(np.fft.fft2)
def test_ifft2(self):
self.check(np.fft.ifft2)
def test_rfft2(self):
self.check(np.fft.rfft2)
def test_irfft2(self):
self.check(np.fft.irfft2)
def test_fftn(self):
self.check(np.fft.fftn)
def test_ifftn(self):
self.check(np.fft.ifftn)
def test_rfftn(self):
self.check(np.fft.rfftn)
def test_irfftn(self):
self.check(np.fft.irfftn)
def test_hfft(self):
self.check(np.fft.hfft)
def test_ihfft(self):
self.check(np.fft.ihfft)
def test_fftshift(self):
self.check(np.fft.fftshift)
def test_ifftshift(self):
self.check(np.fft.ifftshift)
class TestLinAlg(metaclass=CoverageMeta):
def setup_method(self):
self.q = (
np.array(
[[ 1.0, -1.0, 2.0],
[ 0.0, 3.0, -1.0],
[-1.0, -1.0, 1.0]]
) << u.m
) # fmt: skip
def test_cond(self):
c = np.linalg.cond(self.q)
expected = np.linalg.cond(self.q.value)
assert c == expected
def test_matrix_rank(self):
r = np.linalg.matrix_rank(self.q)
x = np.linalg.matrix_rank(self.q.value)
assert r == x
@needs_array_function
def test_matrix_rank_with_tol(self):
# Use a matrix that is not so good, so tol=1 and tol=0.01 differ.
q = np.arange(9.0).reshape(3, 3) / 4 * u.m
tol = 1.0 * u.cm
r2 = np.linalg.matrix_rank(q, tol)
x2 = np.linalg.matrix_rank(q.value, tol.to_value(q.unit))
assert r2 == x2
def test_matrix_power(self):
q1 = np.linalg.matrix_power(self.q, 1)
assert_array_equal(q1, self.q)
q2 = np.linalg.matrix_power(self.q, 2)
assert_array_equal(q2, self.q @ self.q)
q2 = np.linalg.matrix_power(self.q, 4)
assert_array_equal(q2, self.q @ self.q @ self.q @ self.q)
@needs_array_function
def test_matrix_inv_power(self):
qinv = np.linalg.inv(self.q.value) / self.q.unit
qm1 = np.linalg.matrix_power(self.q, -1)
assert_array_equal(qm1, qinv)
qm3 = np.linalg.matrix_power(self.q, -3)
assert_array_equal(qm3, qinv @ qinv @ qinv)
@needs_array_function
def test_multi_dot(self):
q2 = np.linalg.multi_dot([self.q, self.q])
q2x = self.q @ self.q
assert_array_equal(q2, q2x)
q3 = np.linalg.multi_dot([self.q, self.q, self.q])
q3x = self.q @ self.q @ self.q
assert_array_equal(q3, q3x)
@needs_array_function
def test_svd(self):
m = np.arange(10.0) * np.arange(5.0)[:, np.newaxis] * u.m
svd_u, svd_s, svd_vt = np.linalg.svd(m, full_matrices=False)
svd_ux, svd_sx, svd_vtx = np.linalg.svd(m.value, full_matrices=False)
svd_sx <<= m.unit
assert_array_equal(svd_u, svd_ux)
assert_array_equal(svd_vt, svd_vtx)
assert_array_equal(svd_s, svd_sx)
assert u.allclose(svd_u @ np.diag(svd_s) @ svd_vt, m)
s2 = np.linalg.svd(m, compute_uv=False)
svd_s2x = np.linalg.svd(m.value, compute_uv=False) << m.unit
assert_array_equal(s2, svd_s2x)
@needs_array_function
def test_inv(self):
inv = np.linalg.inv(self.q)
expected = np.linalg.inv(self.q.value) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_pinv(self):
pinv = np.linalg.pinv(self.q)
expected = np.linalg.pinv(self.q.value) / self.q.unit
assert_array_equal(pinv, expected)
rcond = 0.01 * u.cm
pinv2 = np.linalg.pinv(self.q, rcond)
expected2 = (
np.linalg.pinv(self.q.value, rcond.to_value(self.q.unit)) / self.q.unit
)
assert_array_equal(pinv2, expected2)
@needs_array_function
def test_tensorinv(self):
inv = np.linalg.tensorinv(self.q, ind=1)
expected = np.linalg.tensorinv(self.q.value, ind=1) / self.q.unit
assert_array_equal(inv, expected)
@needs_array_function
def test_det(self):
det = np.linalg.det(self.q)
expected = np.linalg.det(self.q.value)
expected <<= self.q.unit ** self.q.shape[-1]
assert_array_equal(det, expected)
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[0]) # Not 2-D
with pytest.raises(np.linalg.LinAlgError):
np.linalg.det(self.q[:-1]) # Not square.
@needs_array_function
def test_slogdet(self):
# TODO: Could be supported if we had a natural logarithm unit.
with pytest.raises(TypeError):
logdet = np.linalg.slogdet(self.q)
assert hasattr(logdet, "unit")
@needs_array_function
def test_solve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.solve(self.q, b)
xx = np.linalg.solve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_tensorsolve(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x = np.linalg.tensorsolve(self.q, b)
xx = np.linalg.tensorsolve(self.q.value, b.value)
xx <<= b.unit / self.q.unit
assert_array_equal(x, xx)
assert u.allclose(self.q @ x, b)
@needs_array_function
def test_lstsq(self):
b = np.array([1.0, 2.0, 4.0]) * u.m / u.s
x, residuals, rank, s = np.linalg.lstsq(self.q, b, rcond=None)
xx, residualsx, rankx, sx = np.linalg.lstsq(self.q.value, b.value, rcond=None)
xx <<= b.unit / self.q.unit
residualsx <<= b.unit**2
sx <<= self.q.unit
assert_array_equal(x, xx)
assert_array_equal(residuals, residualsx)
assert_array_equal(s, sx)
assert rank == rankx
assert u.allclose(self.q @ x, b)
# Also do one where we can check the answer...
m = np.eye(3)
b = np.arange(3) * u.m
x, residuals, rank, s = np.linalg.lstsq(m, b, rcond=1.0 * u.percent)
assert_array_equal(x, b)
assert np.all(residuals == 0 * u.m**2)
assert rank == 3
assert_array_equal(s, np.array([1.0, 1.0, 1.0]) << u.one)
with pytest.raises(u.UnitsError):
np.linalg.lstsq(m, b, rcond=1.0 * u.s)
@needs_array_function
def test_norm(self):
n = np.linalg.norm(self.q)
expected = np.linalg.norm(self.q.value) << self.q.unit
assert_array_equal(n, expected)
# Special case: 1-D, ord=0.
n1 = np.linalg.norm(self.q[0], ord=0)
expected1 = np.linalg.norm(self.q[0].value, ord=0) << u.one
assert_array_equal(n1, expected1)
@needs_array_function
def test_cholesky(self):
# Numbers from np.linalg.cholesky docstring.
q = np.array([[1, -2j], [2j, 5]]) * u.m
cd = np.linalg.cholesky(q)
cdx = np.linalg.cholesky(q.value) << q.unit**0.5
assert_array_equal(cd, cdx)
assert u.allclose(cd @ cd.T.conj(), q)
@needs_array_function
def test_qr(self):
# This is not exhaustive...
a = np.array([[1, -2j], [2j, 5]]) * u.m
q, r = np.linalg.qr(a)
qx, rx = np.linalg.qr(a.value)
qx <<= u.one
rx <<= a.unit
assert_array_equal(q, qx)
assert_array_equal(r, rx)
assert u.allclose(q @ r, a)
@needs_array_function
def test_eig(self):
w, v = np.linalg.eig(self.q)
wx, vx = np.linalg.eig(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w, v = np.linalg.eig(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
assert_array_equal(v, np.eye(3))
@needs_array_function
def test_eigvals(self):
w = np.linalg.eigvals(self.q)
wx = np.linalg.eigvals(self.q.value) << self.q.unit
assert_array_equal(w, wx)
# Comprehensible example
q = np.diag((1, 2, 3) * u.m)
w = np.linalg.eigvals(q)
assert_array_equal(w, np.arange(1, 4) * u.m)
@needs_array_function
def test_eigh(self):
w, v = np.linalg.eigh(self.q)
wx, vx = np.linalg.eigh(self.q.value)
wx <<= self.q.unit
vx <<= u.one
assert_array_equal(w, wx)
assert_array_equal(v, vx)
@needs_array_function
def test_eigvalsh(self):
w = np.linalg.eigvalsh(self.q)
wx = np.linalg.eigvalsh(self.q.value) << self.q.unit
assert_array_equal(w, wx)
class TestRecFunctions(metaclass=CoverageMeta):
@classmethod
def setup_class(self):
self.pv_dtype = np.dtype([("p", "f8"), ("v", "f8")])
self.pv_t_dtype = np.dtype(
[("pv", np.dtype([("pp", "f8"), ("vv", "f8")])), ("t", "f8")]
)
self.pv = np.array([(1.0, 0.25), (2.0, 0.5), (3.0, 0.75)], self.pv_dtype)
self.pv_t = np.array(
[((4.0, 2.5), 0.0), ((5.0, 5.0), 1.0), ((6.0, 7.5), 2.0)], self.pv_t_dtype
)
self.pv_unit = u.StructuredUnit((u.km, u.km / u.s), ("p", "v"))
self.pv_t_unit = u.StructuredUnit((self.pv_unit, u.s), ("pv", "t"))
self.q_pv = self.pv << self.pv_unit
self.q_pv_t = self.pv_t << self.pv_t_unit
def test_structured_to_unstructured(self):
# can't unstructure something with incompatible units
with pytest.raises(u.UnitConversionError, match="'m'"):
rfn.structured_to_unstructured(u.Quantity((0, 0.6), u.Unit("(eV, m)")))
# it works if all the units are equal
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, eV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 0.6] * u.eV)
# also if the units are convertible
struct = u.Quantity((0, 0, 0.6), u.Unit("(eV, eV, keV)"))
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [0, 0, 600] * u.eV)
struct = u.Quantity((0, 0, 1.7827e-33), u.Unit("(eV, eV, g)"))
with u.add_enabled_equivalencies(u.mass_energy()):
unstruct = rfn.structured_to_unstructured(struct)
u.allclose(unstruct, [0, 0, 1.0000214] * u.eV)
# and if the dtype is nested
struct = [(5, (400.0, 3e6))] * u.Unit("m, (cm, um)")
unstruct = rfn.structured_to_unstructured(struct)
assert_array_equal(unstruct, [[5, 4, 3]] * u.m)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_structured_to_unstructured``
def test_unstructured_to_structured(self):
unstruct = [1, 2, 3] * u.m
dtype = np.dtype([("f1", float), ("f2", float), ("f3", float)])
# It works.
struct = rfn.unstructured_to_structured(unstruct, dtype=dtype)
assert struct.unit == u.Unit("(m, m, m)")
assert_array_equal(rfn.structured_to_unstructured(struct), unstruct)
# Can't structure something that's already structured.
with pytest.raises(ValueError, match="arr must have at least one dimension"):
rfn.unstructured_to_structured(struct, dtype=dtype)
# For the other tests of ``structured_to_unstructured``, see
# ``test_structured.TestStructuredQuantityFunctions.test_unstructured_to_structured``
def test_merge_arrays_repeat_dtypes(self):
# Cannot merge things with repeat dtypes.
q1 = u.Quantity([(1,)], dtype=[("f1", float)])
q2 = u.Quantity([(1,)], dtype=[("f1", float)])
with pytest.raises(ValueError, match="field 'f1' occurs more than once"):
rfn.merge_arrays((q1, q2))
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays(self, flatten):
"""Test `numpy.lib.recfunctions.merge_arrays`."""
# Merge single normal array.
arr = rfn.merge_arrays(self.q_pv["p"], flatten=flatten)
assert_array_equal(arr["f0"], self.q_pv["p"])
assert arr.unit == (u.km,)
# Merge single structured array.
arr = rfn.merge_arrays(self.q_pv, flatten=flatten)
assert_array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
# Merge 1-element tuple.
arr = rfn.merge_arrays((self.q_pv,), flatten=flatten)
assert np.array_equal(arr, self.q_pv)
assert arr.unit == (u.km, u.km / u.s)
@pytest.mark.xfail
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_nonquantities(self, flatten):
# Fails because cannot create quantity from structured array.
arr = rfn.merge_arrays((q_pv["p"], q_pv.value), flatten=flatten)
def test_merge_array_nested_structure(self):
# Merge 2-element tuples without flattening.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t))
assert_array_equal(arr["f0"], self.q_pv)
assert_array_equal(arr["f1"], self.q_pv_t)
assert arr.unit == ((u.km, u.km / u.s), ((u.km, u.km / u.s), u.s))
def test_merge_arrays_flatten_nested_structure(self):
# Merge 2-element tuple, flattening it.
arr = rfn.merge_arrays((self.q_pv, self.q_pv_t), flatten=True)
assert_array_equal(arr["p"], self.q_pv["p"])
assert_array_equal(arr["v"], self.q_pv["v"])
assert_array_equal(arr["pp"], self.q_pv_t["pv"]["pp"])
assert_array_equal(arr["vv"], self.q_pv_t["pv"]["vv"])
assert_array_equal(arr["t"], self.q_pv_t["t"])
assert arr.unit == (u.km, u.km / u.s, u.km, u.km / u.s, u.s)
def test_merge_arrays_asrecarray(self):
with pytest.raises(ValueError, match="asrecarray=True is not supported."):
rfn.merge_arrays(self.q_pv, asrecarray=True)
def test_merge_arrays_usemask(self):
with pytest.raises(ValueError, match="usemask=True is not supported."):
rfn.merge_arrays(self.q_pv, usemask=True)
@pytest.mark.parametrize("flatten", [True, False])
def test_merge_arrays_str(self, flatten):
with pytest.raises(
TypeError, match="the Quantity implementation cannot handle"
):
rfn.merge_arrays((self.q_pv, np.array(["a", "b", "c"])), flatten=flatten)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
rec_functions = {
rfn.rec_append_fields, rfn.rec_drop_fields, rfn.rec_join,
rfn.drop_fields, rfn.rename_fields, rfn.append_fields, rfn.join_by,
rfn.repack_fields, rfn.apply_along_fields, rfn.assign_fields_by_name,
rfn.stack_arrays, rfn.find_duplicates,
rfn.recursive_fill_fields, rfn.require_fields,
} # fmt: skip
untested_functions |= rec_functions
@needs_array_function
def test_testing_completeness():
assert not CoverageMeta.covered.intersection(untested_functions)
assert all_wrapped == (CoverageMeta.covered | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
SUBCLASS_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(FUNCTION_HELPERS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
@needs_array_function
def test_all_included(self):
included_in_helpers = (
SUBCLASS_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(FUNCTION_HELPERS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
# untested_function is created using all_wrapped_functions
@needs_array_function
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS | TBD_FUNCTIONS == untested_functions
|
a0ff3b5a6f59e2aa3ceed28277204060986151ac7b5842630f67593c5bf2baa1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Unit tests for the handling of physical types in `astropy.units`.
"""
import pickle
import pytest
from astropy import units as u
from astropy.constants import hbar
from astropy.units import physical
from astropy.utils.exceptions import AstropyDeprecationWarning
unit_physical_type_pairs = [
(u.m, "length"),
(u.cm**3, "volume"),
(u.km / u.h, "speed"),
(u.barn * u.Mpc, "volume"),
(u.m * u.s**8, "unknown"),
(u.m / u.m, "dimensionless"),
(hbar.unit, "angular momentum"),
(u.erg / (u.cm**2 * u.s * u.AA), "spectral flux density wav"),
(u.photon / (u.cm**2 * u.s * u.AA), "photon flux density wav"),
(u.photon / (u.cm**2 * u.s * u.Hz), "photon flux density"),
(u.byte, "data quantity"),
(u.bit, "data quantity"),
(u.imperial.mi / u.week, "speed"),
(u.erg / u.s, "power"),
(u.C / u.s, "electrical current"),
(u.C / u.s / u.cm**2, "electrical current density"),
(u.T * u.m**2, "magnetic flux"),
(u.N * u.m, "energy"),
(u.rad / u.ms, "angular speed"),
(u.Unit(1), "dimensionless"),
(u.m**2, "area"),
(u.s, "time"),
(u.rad, "angle"),
(u.sr, "solid angle"),
(u.m / u.s**2, "acceleration"),
(u.Hz, "frequency"),
(u.g, "mass"),
(u.mol, "amount of substance"),
(u.K, "temperature"),
(u.deg_C, "temperature"),
(u.imperial.deg_F, "temperature"),
(u.imperial.deg_R, "temperature"),
(u.imperial.deg_R / u.m, "temperature_gradient"),
(u.N, "force"),
(u.J, "energy"),
(u.Pa, "pressure"),
(u.W, "power"),
(u.kg / u.m**3, "mass density"),
(u.m**3 / u.kg, "specific volume"),
(u.mol / u.m**3, "molar concentration"),
(u.kg * u.m / u.s, "momentum/impulse"),
(u.kg * u.m**2 / u.s, "angular momentum"),
(u.rad / u.s, "angular speed"),
(u.rad / u.s**2, "angular acceleration"),
(u.g / (u.m * u.s), "dynamic viscosity"),
(u.m**2 / u.s, "kinematic viscosity"),
(u.m**-1, "wavenumber"),
(u.A, "electrical current"),
(u.C, "electrical charge"),
(u.V, "electrical potential"),
(u.Ohm, "electrical resistance"),
(u.S, "electrical conductance"),
(u.F, "electrical capacitance"),
(u.C * u.m, "electrical dipole moment"),
(u.A / u.m**2, "electrical current density"),
(u.V / u.m, "electrical field strength"),
(u.C / u.m**2, "electrical flux density"),
(u.C / u.m**3, "electrical charge density"),
(u.F / u.m, "permittivity"),
(u.Wb, "magnetic flux"),
(u.T, "magnetic flux density"),
(u.A / u.m, "magnetic field strength"),
(u.H / u.m, "electromagnetic field strength"),
(u.H, "inductance"),
(u.cd, "luminous intensity"),
(u.lm, "luminous flux"),
(u.lx, "luminous emittance/illuminance"),
(u.W / u.sr, "radiant intensity"),
(u.cd / u.m**2, "luminance"),
(u.astrophys.Jy, "spectral flux density"),
(u.astrophys.R, "photon flux"),
(u.misc.bit, "data quantity"),
(u.misc.bit / u.s, "bandwidth"),
(u.cgs.Franklin, "electrical charge (ESU)"),
(u.cgs.statampere, "electrical current (ESU)"),
(u.cgs.Biot, "electrical current (EMU)"),
(u.cgs.abcoulomb, "electrical charge (EMU)"),
(u.imperial.btu / (u.s * u.m * u.imperial.deg_F), "thermal conductivity"),
(u.imperial.cal / u.deg_C, "heat capacity"),
(u.imperial.cal / u.deg_C / u.g, "specific heat capacity"),
(u.J * u.m**-2 * u.s**-1, "energy flux"),
(u.W / u.m**2, "energy flux"),
(u.m**3 / u.mol, "molar volume"),
(u.m / u.S, "electrical resistivity"),
(u.S / u.m, "electrical conductivity"),
(u.A * u.m**2, "magnetic moment"),
(u.J / u.T, "magnetic moment"),
(u.yr**-1 * u.Mpc**-3, "volumetric rate"),
(u.m / u.s**3, "jerk"),
(u.m / u.s**4, "snap"),
(u.m / u.s**5, "crackle"),
(u.m / u.s**6, "pop"),
(u.deg_C / u.m, "temperature gradient"),
(u.imperial.deg_F / u.m, "temperature gradient"),
(u.imperial.deg_R / u.imperial.ft, "temperature gradient"),
(u.imperial.Calorie / u.g, "specific energy"),
(u.mol / u.L / u.s, "reaction rate"),
(u.imperial.lbf * u.imperial.ft * u.s**2, "moment of inertia"),
(u.mol / u.s, "catalytic activity"),
(u.imperial.kcal / u.deg_C / u.mol, "molar heat capacity"),
(u.mol / u.kg, "molality"),
(u.imperial.inch * u.hr, "absement"),
(u.imperial.ft**3 / u.s, "volumetric flow rate"),
(u.Hz / u.s, "frequency drift"),
(u.Pa**-1, "compressibility"),
(u.dimensionless_unscaled, "dimensionless"),
]
@pytest.mark.parametrize("unit, physical_type", unit_physical_type_pairs)
def test_physical_type_names(unit, physical_type):
"""
Test that the `physical_type` attribute of `u.Unit` objects provides
the expected physical type for various units.
Many of these tests are used to test backwards compatibility.
"""
assert unit.physical_type == physical_type, (
f"{unit!r}.physical_type was expected to return "
f"{physical_type!r}, but instead returned {unit.physical_type!r}."
)
length = u.m.physical_type
time = u.s.physical_type
speed = (u.m / u.s).physical_type
area = (u.m**2).physical_type
wavenumber = (u.m**-1).physical_type
dimensionless = u.dimensionless_unscaled.physical_type
pressure = u.Pa.physical_type
momentum = (u.kg * u.m / u.s).physical_type
@pytest.mark.parametrize(
"physical_type_representation, physical_type_name",
[
(1.0, "dimensionless"),
(u.m, "length"),
("work", "work"),
(5 * u.m, "length"),
(length, length),
(u.Pa, "energy_density"), # attribute-accessible name
("energy_density", "energy_density"), # attribute-accessible name
],
)
def test_getting_physical_type(physical_type_representation, physical_type_name):
"""Test different ways of getting a physical type."""
physical_type = physical.get_physical_type(physical_type_representation)
assert isinstance(physical_type, physical.PhysicalType)
assert physical_type == physical_type_name
@pytest.mark.parametrize(
"argument, exception",
[
("unknown", ValueError),
("not a name of a physical type", ValueError),
({"this set cannot be made into a Quantity"}, TypeError),
],
)
def test_getting_physical_type_exceptions(argument, exception):
"""
Test that `get_physical_type` raises appropriate exceptions when
provided with invalid arguments.
"""
with pytest.raises(exception):
physical.get_physical_type(argument)
def test_physical_type_cannot_become_quantity():
"""
Test that `PhysicalType` instances cannot be cast into `Quantity`
objects. A failure in this test could be related to failures
in subsequent tests.
"""
with pytest.raises(TypeError):
u.Quantity(u.m.physical_type, u.m)
# left term, right term, operator, expected value
operation_parameters = [
(length, length, "__eq__", True),
(length, area, "__eq__", False),
(length, "length", "__eq__", True),
("length", length, "__eq__", NotImplemented),
(dimensionless, dimensionless, "__eq__", True),
(momentum, "momentum/impulse", "__eq__", True), # test delimiters in names
(pressure, "energy_density", "__eq__", True), # test underscores in names
((u.m**8).physical_type, "unknown", "__eq__", True),
((u.m**8).physical_type, (u.m**9).physical_type, "__eq__", False),
(length, length, "__ne__", False),
(speed, time, "__ne__", True),
(pressure, dimensionless, "__ne__", True),
(length, u.m, "__eq__", NotImplemented),
(length, length, "__mul__", area),
(speed, time, "__mul__", length),
(speed, time, "__rmul__", length),
(length, time, "__truediv__", speed),
(area, length, "__truediv__", length),
(length, area, "__rtruediv__", length),
(dimensionless, dimensionless, "__mul__", dimensionless),
(dimensionless, dimensionless, "__truediv__", dimensionless),
(length, 2, "__pow__", area),
(area, 0.5, "__pow__", length),
(dimensionless, 4, "__pow__", dimensionless),
(u.m, length, "__mul__", NotImplemented),
(3.2, length, "__mul__", NotImplemented),
(u.m, time, "__truediv__", NotImplemented),
(3.2, length, "__truediv__", NotImplemented),
(length, u.m, "__mul__", area),
(length, u.m, "__rmul__", area),
(speed, u.s, "__mul__", length),
(length, 1, "__mul__", length),
(length, 1, "__rmul__", length),
(length, u.s, "__truediv__", speed),
(area, 1, "__truediv__", area),
(time, u.m, "__rtruediv__", speed),
(length, 1.0, "__rtruediv__", wavenumber),
(length, 2, "__pow__", area),
(length, 32, "__mul__", NotImplemented),
(length, 0, "__rmul__", NotImplemented),
(length, 3.2, "__truediv__", NotImplemented),
(length, -1, "__rtruediv__", NotImplemented),
(length, "length", "__mul__", area),
(length, "length", "__rmul__", area),
(area, "length", "__truediv__", length),
(length, "area", "__rtruediv__", length),
]
@pytest.mark.parametrize("left, right, operator, expected", operation_parameters)
def test_physical_type_operations(left, right, operator, expected):
"""
Test that `PhysicalType` dunder methods that require another
argument behave as intended.
"""
assert getattr(left, operator)(right) == expected
unit_with_physical_type_set = [
(u.m, {"length"}),
(u.kg * u.m / u.s, {"impulse", "momentum"}),
(u.Pa, {"energy density", "pressure", "stress"}),
]
@pytest.mark.parametrize("unit, expected_set", unit_with_physical_type_set)
def test_physical_type_as_set(unit, expected_set):
"""Test making a `physical.PhysicalType` instance into a `set`."""
resulting_set = set(unit.physical_type)
assert resulting_set == expected_set
def test_physical_type_iteration():
"""Test iterating through different physical type names."""
physical_type_names = [physical_type_name for physical_type_name in pressure]
assert physical_type_names == ["energy density", "pressure", "stress"]
def test_physical_type_in():
"""
Test that `in` works as expected for `PhysicalType` objects with one
or multiple names.
"""
assert "length" in length
assert "pressure" in pressure
equivalent_unit_pairs = [
(u.m, u.m),
(u.m, u.cm),
(u.N, u.kg * u.m * u.s**-2),
(u.barn * u.Mpc, u.cm**3),
(u.K, u.deg_C),
(u.K, u.imperial.deg_R),
(u.K, u.imperial.deg_F),
(u.deg_C, u.imperial.deg_F),
(u.m**18, u.pc**18),
]
@pytest.mark.parametrize("unit1, unit2", equivalent_unit_pairs)
def test_physical_type_instance_equality(unit1, unit2):
"""
Test that `physical.PhysicalType` instances for units of the same
dimensionality are equal.
"""
assert (unit1.physical_type == unit2.physical_type) is True
assert (unit1.physical_type != unit2.physical_type) is False
@pytest.mark.parametrize("unit1, unit2", equivalent_unit_pairs)
def test_get_physical_type_equivalent_pairs(unit1, unit2):
"""
Test that `get_physical_type` retrieves the same `PhysicalType`
instances for equivalent physical types, except for unknown types
which are not cataloged.
"""
physical_type1 = physical.get_physical_type(unit1)
physical_type2 = physical.get_physical_type(unit2)
assert physical_type1 == physical_type2
if physical_type1 != "unknown":
assert physical_type1 is physical_type2
nonequivalent_unit_pairs = [
(u.m, u.s),
(u.m**18, u.m**19),
(u.N, u.J),
(u.barn, u.imperial.deg_F),
]
@pytest.mark.parametrize("unit1, unit2", nonequivalent_unit_pairs)
def test_physical_type_instance_inequality(unit1, unit2):
"""
Test that `physical.PhysicalType` instances for units with different
dimensionality are considered unequal.
"""
physical_type1 = physical.PhysicalType(unit1, "ptype1")
physical_type2 = physical.PhysicalType(unit2, "ptype2")
assert (physical_type1 != physical_type2) is True
assert (physical_type1 == physical_type2) is False
physical_type_with_expected_str = [
(length, "length"),
(speed, "speed/velocity"),
(pressure, "energy density/pressure/stress"),
(u.deg_C.physical_type, "temperature"),
((u.J / u.K / u.kg).physical_type, "specific entropy/specific heat capacity"),
]
physical_type_with_expected_repr = [
(length, "PhysicalType('length')"),
(speed, "PhysicalType({'speed', 'velocity'})"),
(pressure, "PhysicalType({'energy density', 'pressure', 'stress'})"),
(u.deg_C.physical_type, "PhysicalType('temperature')"),
(
(u.J / u.K / u.kg).physical_type,
"PhysicalType({'specific entropy', 'specific heat capacity'})",
),
]
@pytest.mark.parametrize("physical_type, expected_str", physical_type_with_expected_str)
def test_physical_type_str(physical_type, expected_str):
"""Test using `str` on a `PhysicalType` instance."""
assert str(physical_type) == expected_str
@pytest.mark.parametrize(
"physical_type, expected_repr", physical_type_with_expected_repr
)
def physical_type_repr(physical_type, expected_repr):
"""Test using `repr` on a `PhysicalType` instance."""
assert repr(physical_type) == expected_repr
def test_physical_type_hash():
"""Test that a `PhysicalType` instance can be used as a dict key."""
dictionary = {length: 42}
assert dictionary[length] == 42
@pytest.mark.parametrize("multiplicand", [list(), 42, 0, -1])
def test_physical_type_multiplication(multiplicand):
"""
Test that multiplication of a physical type returns `NotImplemented`
when attempted for an invalid type.
"""
with pytest.raises(TypeError):
length * multiplicand
def test_unrecognized_unit_physical_type():
"""
Test basic functionality for the physical type of an unrecognized
unit.
"""
unrecognized_unit = u.Unit("parrot", parse_strict="silent")
physical_type = unrecognized_unit.physical_type
assert isinstance(physical_type, physical.PhysicalType)
assert physical_type == "unknown"
invalid_inputs = [(42,), ("valid input", 42)]
@pytest.mark.parametrize("invalid_input", invalid_inputs)
def test_invalid_physical_types(invalid_input):
"""
Test that `PhysicalType` cannot be instantiated when one of the
supplied names is not a string, while making sure that the physical
type for the unit remains unknown.
"""
obscure_unit = u.s**87
with pytest.raises(ValueError):
physical.PhysicalType(obscure_unit, invalid_input)
assert obscure_unit.physical_type == "unknown"
class TestDefPhysType:
weird_unit = u.m**99
strange_unit = u.s**42
def test_attempt_to_define_unknown_physical_type(self):
"""Test that a unit cannot be defined as unknown."""
with pytest.raises(ValueError):
physical.def_physical_type(self.weird_unit, "unknown")
assert "unknown" not in physical._unit_physical_mapping
def test_multiple_same_physical_type_names(self):
"""
Test that `def_physical_type` raises an exception when it tries to
set the physical type of a new unit as the name of an existing
physical type.
"""
with pytest.raises(ValueError):
physical.def_physical_type(self.weird_unit, {"time", "something"})
assert self.weird_unit.physical_type == "unknown"
def test_expanding_names_for_physical_type(self):
"""
Test that calling `def_physical_type` on an existing physical
type adds a new physical type name.
"""
weird_name = "weird name"
strange_name = "strange name"
try:
physical.def_physical_type(self.weird_unit, weird_name)
assert (
self.weird_unit.physical_type == weird_name
), f"unable to set physical type for {self.weird_unit}"
except Exception:
raise
finally: # cleanup added name
physical._attrname_physical_mapping.pop(weird_name.replace(" ", "_"), None)
physical._name_physical_mapping.pop(weird_name, None)
# add both strange_name and weird_name
try:
physical.def_physical_type(self.weird_unit, strange_name)
assert set((self.weird_unit).physical_type) == {
weird_name,
strange_name,
}, "did not correctly append a new physical type name."
except Exception:
raise
finally: # cleanup added names
physical._attrname_physical_mapping.pop(
strange_name.replace(" ", "_"), None
)
physical._name_physical_mapping.pop(strange_name, None)
physical._attrname_physical_mapping.pop(weird_name.replace(" ", "_"), None)
physical._name_physical_mapping.pop(weird_name, None)
def test_redundant_physical_type(self):
"""
Test that a physical type name already in use cannot be assigned
for another unit (excluding `"unknown"`).
"""
with pytest.raises(ValueError):
physical.def_physical_type(self.weird_unit, "length")
@staticmethod
def _undef_physical_type(unit):
"""Reset the physical type of unit to "unknown"."""
for name in list(unit.physical_type):
del physical._unit_physical_mapping[name]
del physical._physical_unit_mapping[unit._get_physical_type_id()]
assert unit.physical_type == "unknown"
def teardown_method(self):
"""
Remove the definitions of the physical types that were added
using `def_physical_unit` for testing purposes.
"""
for unit in [self.weird_unit, self.strange_unit]:
physical_type = physical.get_physical_type(unit)
if physical_type != "unknown":
self._undef_physical_type(unit)
assert unit.physical_type == "unknown", (
f"the physical type for {unit}, which was added for"
"testing, was not deleted."
)
@pytest.mark.parametrize(
"method, expected",
[("title", "Length"), ("isalpha", True), ("isnumeric", False), ("upper", "LENGTH")],
)
def test_that_str_methods_work_with_physical_types(method, expected):
"""
Test that str methods work for `PhysicalType` instances while issuing
a deprecation warning.
"""
with pytest.warns(AstropyDeprecationWarning, match="PhysicalType instances"):
result_of_method_call = getattr(length, method)()
assert result_of_method_call == expected
def test_missing_physical_type_attribute():
"""
Test that a missing attribute raises an `AttributeError`.
This test should be removed when the deprecated option of calling
string methods on PhysicalType instances is removed from
`PhysicalType.__getattr__`.
"""
with pytest.raises(AttributeError):
length.not_the_name_of_a_str_or_physical_type_attribute
@pytest.mark.parametrize("ptype_name", ["length", "speed", "entropy"])
def test_pickling(ptype_name):
# Regression test for #11685
ptype = u.get_physical_type(ptype_name)
pkl = pickle.dumps(ptype)
other = pickle.loads(pkl)
assert other == ptype
def test_physical_types_module_access():
# all physical type names in dir
assert set(dir(physical)).issuperset(physical._attrname_physical_mapping.keys())
assert set(dir(physical)).issuperset(physical.__all__)
# all physical type can be accessed by name
for pname in physical._attrname_physical_mapping.keys():
ptype = physical._attrname_physical_mapping[pname]
assert hasattr(physical, pname) # make sure works in lazy load
assert getattr(physical, pname) is ptype
# a failed access
with pytest.raises(AttributeError, match="has no attribute"):
physical.not_a_valid_physical_type_name
|
e832cb69804235f3a3709ad3264e2c29ec1fb52f6490f6e130dacabe08df2ea0 | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import concurrent.futures
import warnings
from collections import namedtuple
import numpy as np
import pytest
from erfa import ufunc as erfa_ufunc
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.units import quantity_helper as qh
from astropy.units.quantity_helper.converters import UfuncHelpers
from astropy.units.quantity_helper.helpers import helper_sqrt
from astropy.utils.compat.optional_deps import HAS_SCIPY
testcase = namedtuple("testcase", ["f", "q_in", "q_out"])
testexc = namedtuple("testexc", ["f", "q_in", "exc", "msg"])
testwarn = namedtuple("testwarn", ["f", "q_in", "wfilter"])
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results,) if not isinstance(results, tuple) else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.0e-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
class TestUfuncHelpers:
# Note that this test should work even if scipy is present, since
# the scipy.special ufuncs are only loaded on demand.
# The test passes independently of whether erfa is already loaded
# (which will be the case for a full test, since coordinates uses it).
def test_coverage(self):
"""Test that we cover all ufunc's"""
all_np_ufuncs = {
ufunc
for ufunc in np.core.umath.__dict__.values()
if isinstance(ufunc, np.ufunc)
}
all_q_ufuncs = qh.UNSUPPORTED_UFUNCS | set(qh.UFUNC_HELPERS.keys())
# Check that every numpy ufunc is covered.
assert all_np_ufuncs - all_q_ufuncs == set()
# Check that all ufuncs we cover come from numpy or erfa.
# (Since coverage for erfa is incomplete, we do not check
# this the other way).
all_erfa_ufuncs = {
ufunc
for ufunc in erfa_ufunc.__dict__.values()
if isinstance(ufunc, np.ufunc)
}
assert all_q_ufuncs - all_np_ufuncs - all_erfa_ufuncs == set()
def test_scipy_registered(self):
# Should be registered as existing even if scipy is not available.
assert "scipy.special" in qh.UFUNC_HELPERS.modules
def test_removal_addition(self):
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = None
assert np.add not in qh.UFUNC_HELPERS
assert np.add in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = qh.UFUNC_HELPERS[np.subtract]
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
@pytest.mark.slow
def test_thread_safety(self, fast_thread_switching):
def dummy_ufunc(*args, **kwargs):
return np.sqrt(*args, **kwargs)
def register():
return {dummy_ufunc: helper_sqrt}
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for p in range(10000):
helpers = UfuncHelpers()
helpers.register_module(
"astropy.units.tests.test_quantity_ufuncs",
["dummy_ufunc"],
register,
)
futures = [
executor.submit(lambda: helpers[dummy_ufunc])
for i in range(workers)
]
values = [future.result() for future in futures]
assert values == [helper_sqrt] * workers
class TestQuantityTrigonometricFuncs:
"""
Test trigonometric functions
"""
@pytest.mark.parametrize(
"tc",
(
testcase(
f=np.sin,
q_in=(30.0 * u.degree,),
q_out=(0.5 * u.dimensionless_unscaled,),
),
testcase(
f=np.sin,
q_in=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
q_out=(np.array([0.0, 1.0 / np.sqrt(2.0), 1.0]) * u.one,),
),
testcase(
f=np.arcsin,
q_in=(np.sin(30.0 * u.degree),),
q_out=(np.radians(30.0) * u.radian,),
),
testcase(
f=np.arcsin,
q_in=(np.sin(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian),),
q_out=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
),
testcase(
f=np.cos,
q_in=(np.pi / 3.0 * u.radian,),
q_out=(0.5 * u.dimensionless_unscaled,),
),
testcase(
f=np.cos,
q_in=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
q_out=(np.array([1.0, 1.0 / np.sqrt(2.0), 0.0]) * u.one,),
),
testcase(
f=np.arccos,
q_in=(np.cos(np.pi / 3.0 * u.radian),),
q_out=(np.pi / 3.0 * u.radian,),
),
testcase(
f=np.arccos,
q_in=(np.cos(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian),),
q_out=(np.array([0.0, np.pi / 4.0, np.pi / 2.0]) * u.radian,),
),
testcase(
f=np.tan,
q_in=(np.pi / 3.0 * u.radian,),
q_out=(np.sqrt(3.0) * u.dimensionless_unscaled,),
),
testcase(
f=np.tan,
q_in=(np.array([0.0, 45.0, 135.0, 180.0]) * u.degree,),
q_out=(np.array([0.0, 1.0, -1.0, 0.0]) * u.dimensionless_unscaled,),
),
testcase(
f=np.arctan,
q_in=(np.tan(np.pi / 3.0 * u.radian),),
q_out=(np.pi / 3.0 * u.radian,),
),
testcase(
f=np.arctan,
q_in=(np.tan(np.array([10.0, 30.0, 70.0, 80.0]) * u.degree),),
q_out=(np.radians(np.array([10.0, 30.0, 70.0, 80.0]) * u.degree),),
),
testcase(
f=np.arctan2,
q_in=(np.array([10.0, 30.0, 70.0, 80.0]) * u.m, 2.0 * u.km),
q_out=(
np.arctan2(np.array([10.0, 30.0, 70.0, 80.0]), 2000.0) * u.radian,
),
),
testcase(
f=np.arctan2,
q_in=((np.array([10.0, 80.0]) * u.m / (2.0 * u.km)).to(u.one), 1.0),
q_out=(np.arctan2(np.array([10.0, 80.0]) / 2000.0, 1.0) * u.radian,),
),
testcase(f=np.deg2rad, q_in=(180.0 * u.degree,), q_out=(np.pi * u.radian,)),
testcase(f=np.radians, q_in=(180.0 * u.degree,), q_out=(np.pi * u.radian,)),
testcase(f=np.deg2rad, q_in=(3.0 * u.radian,), q_out=(3.0 * u.radian,)),
testcase(f=np.radians, q_in=(3.0 * u.radian,), q_out=(3.0 * u.radian,)),
testcase(f=np.rad2deg, q_in=(60.0 * u.degree,), q_out=(60.0 * u.degree,)),
testcase(f=np.degrees, q_in=(60.0 * u.degree,), q_out=(60.0 * u.degree,)),
testcase(f=np.rad2deg, q_in=(np.pi * u.radian,), q_out=(180.0 * u.degree,)),
testcase(f=np.degrees, q_in=(np.pi * u.radian,), q_out=(180.0 * u.degree,)),
),
)
def test_testcases(self, tc):
return test_testcase(tc)
@pytest.mark.parametrize(
"te",
(
testexc(f=np.deg2rad, q_in=(3.0 * u.m,), exc=TypeError, msg=None),
testexc(f=np.radians, q_in=(3.0 * u.m,), exc=TypeError, msg=None),
testexc(f=np.rad2deg, q_in=(3.0 * u.m), exc=TypeError, msg=None),
testexc(f=np.degrees, q_in=(3.0 * u.m), exc=TypeError, msg=None),
testexc(
f=np.sin,
q_in=(3.0 * u.m,),
exc=TypeError,
msg="Can only apply 'sin' function to quantities with angle units",
),
testexc(
f=np.arcsin,
q_in=(3.0 * u.m,),
exc=TypeError,
msg="Can only apply 'arcsin' function to dimensionless quantities",
),
testexc(
f=np.cos,
q_in=(3.0 * u.s,),
exc=TypeError,
msg="Can only apply 'cos' function to quantities with angle units",
),
testexc(
f=np.arccos,
q_in=(3.0 * u.s,),
exc=TypeError,
msg="Can only apply 'arccos' function to dimensionless quantities",
),
testexc(
f=np.tan,
q_in=(np.array([1, 2, 3]) * u.N,),
exc=TypeError,
msg="Can only apply 'tan' function to quantities with angle units",
),
testexc(
f=np.arctan,
q_in=(np.array([1, 2, 3]) * u.N,),
exc=TypeError,
msg="Can only apply 'arctan' function to dimensionless quantities",
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.0 * u.s),
exc=u.UnitsError,
msg="compatible dimensions",
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.0),
exc=u.UnitsError,
msg="dimensionless quantities when other arg",
),
),
)
def test_testexcs(self, te):
return test_testexc(te)
@pytest.mark.parametrize(
"tw",
(testwarn(f=np.arcsin, q_in=(27.0 * u.pc / (15 * u.kpc),), wfilter="error"),),
)
def test_testwarns(self, tw):
return test_testwarn(tw)
class TestQuantityMathFuncs:
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4.0 * u.m, 2.0 / u.s) == 8.0 * u.m / u.s
assert np.multiply(4.0 * u.m, 2.0) == 8.0 * u.m
assert np.multiply(4.0, 2.0 / u.s) == 8.0 / u.s
def test_multiply_array(self):
assert np.all(
np.multiply(np.arange(3.0) * u.m, 2.0 / u.s)
== np.arange(0, 6.0, 2.0) * u.m / u.s
)
@pytest.mark.skipif(
not isinstance(getattr(np, "matmul", None), np.ufunc),
reason="np.matmul is not yet a gufunc",
)
def test_matmul(self):
q = np.arange(3.0) * u.m
r = np.matmul(q, q)
assert r == 5.0 * u.m**2
# less trivial case.
q1 = np.eye(3) * u.m
q2 = np.array(
[[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]
) / u.s # fmt: skip
r2 = np.matmul(q1, q2)
assert np.all(r2 == np.matmul(q1.value, q2.value) * q1.unit * q2.unit)
@pytest.mark.parametrize("function", (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4.0 * u.m, 2.0 * u.s) == function(4.0, 2.0) * u.m / u.s
assert function(4.0 * u.m, 2.0) == function(4.0, 2.0) * u.m
assert function(4.0, 2.0 * u.s) == function(4.0, 2.0) / u.s
@pytest.mark.parametrize("function", (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(
function(np.arange(3.0) * u.m, 2.0 * u.s)
== function(np.arange(3.0), 2.0) * u.m / u.s
)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1.0, 2.0, 3.0]) * u.m
divisor = np.array([3.0, 4.0, 5.0]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13.0, 19.0, 23.0])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4.0 * u.m) == 2.0 * u.m**0.5
def test_sqrt_array(self):
assert np.all(
np.sqrt(np.array([1.0, 4.0, 9.0]) * u.m)
== np.array([1.0, 2.0, 3.0]) * u.m**0.5
)
def test_square_scalar(self):
assert np.square(4.0 * u.m) == 16.0 * u.m**2
def test_square_array(self):
assert np.all(
np.square(np.array([1.0, 2.0, 3.0]) * u.m)
== np.array([1.0, 4.0, 9.0]) * u.m**2
)
def test_reciprocal_scalar(self):
assert np.reciprocal(4.0 * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(
np.reciprocal(np.array([1.0, 2.0, 4.0]) * u.m)
== np.array([1.0, 0.5, 0.25]) / u.m
)
def test_heaviside_scalar(self):
assert np.heaviside(0.0 * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert (
np.heaviside(0.0 * u.s, 25 * u.percent) == 0.25 * u.dimensionless_unscaled
)
assert np.heaviside(2.0 * u.J, 0.25) == 1.0 * u.dimensionless_unscaled
def test_heaviside_array(self):
values = np.array([-1.0, 0.0, 0.0, +1.0])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(
np.heaviside(values * u.m, halfway * u.dimensionless_unscaled)
== [0, 0.25, 0.75, +1.0] * u.dimensionless_unscaled
)
@pytest.mark.parametrize("function", (np.cbrt,))
def test_cbrt_scalar(self, function):
assert function(8.0 * u.m**3) == 2.0 * u.m
@pytest.mark.parametrize("function", (np.cbrt,))
def test_cbrt_array(self, function):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1.0, 8.0, 64.0])
assert np.all(function(values * u.m**3) == function(values) * u.m)
def test_power_scalar(self):
assert np.power(4.0 * u.m, 2.0) == 16.0 * u.m**2
assert np.power(4.0, 200.0 * u.cm / u.m) == u.Quantity(
16.0, u.dimensionless_unscaled
)
# regression check on #1696
assert np.power(4.0 * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(
np.power(np.array([1.0, 2.0, 3.0]) * u.m, 3.0)
== np.array([1.0, 8.0, 27.0]) * u.m**3
)
# regression check on #1696
assert np.all(
np.power(np.arange(4.0) * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
)
def test_float_power_array(self):
assert np.all(
np.float_power(np.array([1.0, 2.0, 3.0]) * u.m, 3.0)
== np.array([1.0, 8.0, 27.0]) * u.m**3
)
# regression check on #1696
assert np.all(
np.float_power(np.arange(4.0) * u.m, 0.0) == 1.0 * u.dimensionless_unscaled
)
def test_power_array_array(self):
with pytest.raises(ValueError):
np.power(4.0 * u.m, [2.0, 4.0])
def test_power_array_array2(self):
with pytest.raises(ValueError):
np.power([2.0, 4.0] * u.m, [2.0, 4.0])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2.0, 4.0] * u.m / u.m
powers = [2.0, 4.0]
res = np.power(q, powers)
assert np.all(res.value == q.value**powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2.0, 4.0] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2**2
assert np.all(res3.value == q2.value**2)
assert res3.unit == q2.unit**2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError, match="raise something to a dimensionless"):
np.power(3.0, 4.0 * u.m)
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.0) == 3.0 * u.m
assert np.copysign(3 * u.m, 1.0 * u.s) == 3.0 * u.m
assert np.copysign(3 * u.m, -1.0) == -3.0 * u.m
assert np.copysign(3 * u.m, -1.0 * u.s) == -3.0 * u.m
def test_copysign_array(self):
assert np.all(
np.copysign(np.array([1.0, 2.0, 3.0]) * u.s, -1.0)
== -np.array([1.0, 2.0, 3.0]) * u.s
)
assert np.all(
np.copysign(np.array([1.0, 2.0, 3.0]) * u.s, -1.0 * u.m)
== -np.array([1.0, 2.0, 3.0]) * u.s
)
assert np.all(
np.copysign(
np.array([1.0, 2.0, 3.0]) * u.s, np.array([-2.0, 2.0, -4.0]) * u.m
)
== np.array([-1.0, 2.0, -3.0]) * u.s
)
q = np.copysign(np.array([1.0, 2.0, 3.0]), -3 * u.m)
assert np.all(q == np.array([-1.0, -2.0, -3.0]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4.0 * u.m, 2) == 16.0 * u.m
def test_ldexp_array(self):
assert np.all(
np.ldexp(np.array([1.0, 2.0, 3.0]) * u.m, [3, 2, 1])
== np.array([8.0, 8.0, 6.0]) * u.m
)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3.0 * u.m, 4.0)
with pytest.raises(TypeError):
np.ldexp(3.0, u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_scalar(self, function):
q = function(3.0 * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_array(self, function):
q = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == function(np.array([1.0 / 3.0, 1.0 / 2.0, 1.0])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, function(np.array([100.0 / 3.0, 100.0 / 2.0, 100.0])))
@pytest.mark.parametrize(
"function", (np.exp, np.expm1, np.exp2, np.log, np.log2, np.log10, np.log1p)
)
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function "
"to dimensionless quantities"
),
):
function(3.0 * u.m / u.s)
def test_modf_scalar(self):
q = np.modf(9.0 * u.m / (600.0 * u.cm))
assert q == (0.5 * u.dimensionless_unscaled, 1.0 * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.0) * u.m / (500.0 * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3.0 * u.m / (6.0 * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m))
assert all(
(_q0, _q1) == np.frexp(_d)
for _q0, _q1, _d in zip(q[0], q[1], [1.0 / 3.0, 1.0 / 2.0, 1.0])
)
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
"Can only apply 'frexp' function to unscaled dimensionless quantities"
),
):
np.frexp(3.0 * u.m / u.s)
# also does not work on quantities that can be made dimensionless
with pytest.raises(
TypeError,
match=(
"Can only apply 'frexp' function to unscaled dimensionless quantities"
),
):
np.frexp(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm))
@pytest.mark.parametrize("function", (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm), 1.0)
assert q.unit == u.dimensionless_unscaled
assert_allclose(
q.value, function(np.array([100.0 / 3.0, 100.0 / 2.0, 100.0]), 1.0)
)
@pytest.mark.parametrize("function", (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function to dimensionless"
" quantities"
),
):
function(1.0 * u.km / u.s, 3.0 * u.m / u.s)
class TestInvariantUfuncs:
@pytest.mark.parametrize(
"ufunc",
[
np.absolute,
np.fabs,
np.conj,
np.conjugate,
np.negative,
np.spacing,
np.rint,
np.floor,
np.ceil,
np.positive,
],
)
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(
"ufunc", [np.absolute, np.conjugate, np.negative, np.rint, np.floor, np.ceil]
)
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(
("ufunc", "arbitrary"),
[
(np.add, 0.0),
(np.subtract, 0.0),
(np.hypot, 0.0),
(np.maximum, 0.0),
(np.minimum, 0.0),
(np.nextafter, 0.0),
(np.remainder, np.inf),
(np.mod, np.inf),
(np.fmod, np.inf),
],
)
def test_invariant_twoarg_one_arbitrary(self, ufunc, arbitrary):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i1, arbitrary)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary))
@pytest.mark.parametrize(
"ufunc",
[
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
],
)
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
class TestComparisonUfuncs:
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.0)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(
q_o2 == ufunc((q_i1 / q_i2).to_value(u.dimensionless_unscaled), 2.0)
)
# comparison with 0., inf, nan is OK even for dimensional quantities
# (though ignore numpy runtime warnings for comparisons with nan).
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
for arbitrary_unit_value in (0.0, np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value * np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0.0, np.inf, np.nan]))
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.isinf, np.isnan, np.signbit))
def test_onearg_test_ufuncs(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = ufunc(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = np.sign(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
class TestInplaceUfuncs:
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value / 10.0, out=s)
assert check is s
assert np.all(check.value == np.arcsin(value / 10.0))
assert check.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100.0 * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v)
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# can also replace input with first output when scaling
v4 = v_copy.copy()
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.0
assert check is s
assert np.all(check.value == value / 2.0)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2.0 * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1.0 * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2.0 / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1.0, 2.0, 3.0]) * u.dimensionless_unscaled
np.add(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert np.all(s.value == np.array([3.0, 6.0, 9.0]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert_allclose(s.value, np.arctan2(1.0, 2.0))
assert s.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.0 * u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.0) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1.0 * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += 20.0 * u.km
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
@pytest.mark.parametrize("ufunc", (np.equal, np.greater))
def test_comparison_ufuncs_inplace(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
check = np.empty(q_i1.shape, bool)
ufunc(q_i1.value, q_i2.to_value(q_i1.unit), out=check)
result = np.empty(q_i1.shape, bool)
q_o = ufunc(q_i1, q_i2, out=result)
assert q_o is result
assert type(q_o) is np.ndarray
assert q_o.dtype == bool
assert np.all(q_o == check)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.signbit))
def test_onearg_test_ufuncs_inplace(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, bool)
ufunc(q.value, out=check)
result = np.empty(q.shape, bool)
out = ufunc(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign_inplace(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, q.dtype)
np.sign(q.value, out=check)
result = np.empty(q.shape, q.dtype)
out = np.sign(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
def test_ndarray_inplace_op_with_quantity(self):
"""Regression test for gh-13911."""
a = np.arange(3.0)
q = u.Quantity([12.5, 25.0], u.percent)
a[:2] += q # This used to fail
assert_array_equal(a, np.array([0.125, 1.25, 2.0]))
@pytest.mark.skipif(
not hasattr(np.core.umath, "clip"), reason="no clip ufunc available"
)
class TestClip:
"""Test the clip ufunc.
In numpy, this is hidden behind a function that does not backwards
compatibility checks. We explicitly test the ufunc here.
"""
def setup_method(self):
self.clip = np.core.umath.clip
def test_clip_simple(self):
q = np.arange(-1.0, 10.0) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
result = self.clip(q, q_min, q_max)
assert result.unit == q.unit
expected = (
self.clip(q.value, q_min.to_value(q.unit), q_max.to_value(q.unit)) * q.unit
)
assert np.all(result == expected)
def test_clip_unitless_parts(self):
q = np.arange(-1.0, 10.0) * u.m
qlim = 0.0055 * u.km
# one-sided
result1 = self.clip(q, -np.inf, qlim)
expected1 = self.clip(q.value, -np.inf, qlim.to_value(q.unit)) * q.unit
assert np.all(result1 == expected1)
result2 = self.clip(q, qlim, np.inf)
expected2 = self.clip(q.value, qlim.to_value(q.unit), np.inf) * q.unit
assert np.all(result2 == expected2)
# Zero
result3 = self.clip(q, np.zeros(q.shape), qlim)
expected3 = self.clip(q.value, 0, qlim.to_value(q.unit)) * q.unit
assert np.all(result3 == expected3)
# Two unitless parts, array-shaped.
result4 = self.clip(q, np.zeros(q.shape), np.full(q.shape, np.inf))
expected4 = self.clip(q.value, 0, np.inf) * q.unit
assert np.all(result4 == expected4)
def test_clip_dimensionless(self):
q = np.arange(-1.0, 10.0) * u.dimensionless_unscaled
result = self.clip(q, 200 * u.percent, 5.0)
expected = self.clip(q, 2.0, 5.0)
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_ndarray(self):
a = np.arange(-1.0, 10.0)
result = self.clip(a, 200 * u.percent, 5.0 * u.dimensionless_unscaled)
assert isinstance(result, u.Quantity)
expected = self.clip(a, 2.0, 5.0) * u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_quantity_inplace(self):
q = np.arange(-1.0, 10.0) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
expected = (
self.clip(q.value, q_min.to_value(q.unit), q_max.to_value(q.unit)) * q.unit
)
result = self.clip(q, q_min, q_max, out=q)
assert result is q
assert np.all(result == expected)
def test_clip_ndarray_dimensionless_output(self):
a = np.arange(-1.0, 10.0)
q = np.zeros_like(a) * u.m
expected = self.clip(a, 2.0, 5.0) * u.dimensionless_unscaled
result = self.clip(a, 200 * u.percent, 5.0 * u.dimensionless_unscaled, out=q)
assert result is q
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_errors(self):
q = np.arange(-1.0, 10.0) * u.m
with pytest.raises(u.UnitsError):
self.clip(q, 0, 1 * u.s)
with pytest.raises(u.UnitsError):
self.clip(q.value, 0, 1 * u.s)
with pytest.raises(u.UnitsError):
self.clip(q, -1, 0.0)
with pytest.raises(u.UnitsError):
self.clip(q, 0.0, 1.0)
class TestUfuncAt:
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.0) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.0) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.0) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.0) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.0) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.0) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.0 * u.km)
np.add.at(check, i, 1000.0)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.0 * u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1 * u.s)
# but be fine if it does not
s = np.arange(10.0) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.0 * u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.0) * u.m
np.multiply.at(s, i, 2.0)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.0 * u.km)
class TestUfuncReduceReduceatAccumulate:
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.0) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.0) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
class TestUfuncOuter:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.0) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.0) * u.m
s2 = np.arange(2.0) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.0) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
if HAS_SCIPY:
from scipy import special as sps
erf_like_ufuncs = (
sps.erf, sps.erfc, sps.erfcx, sps.erfi,
sps.gamma, sps.gammaln, sps.loggamma, sps.gammasgn, sps.psi,
sps.rgamma, sps.digamma, sps.wofz, sps.dawsn,
sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10,
) # fmt: skip
if isinstance(sps.erfinv, np.ufunc):
erf_like_ufuncs += (sps.erfinv, sps.erfcinv)
def test_scipy_registration():
"""Check that scipy gets loaded upon first use."""
assert sps.erf not in qh.UFUNC_HELPERS
sps.erf(1.0 * u.percent)
assert sps.erf in qh.UFUNC_HELPERS
if isinstance(sps.erfinv, np.ufunc):
assert sps.erfinv in qh.UFUNC_HELPERS
else:
assert sps.erfinv not in qh.UFUNC_HELPERS
class TestScipySpecialUfuncs:
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_scalar(self, function):
TestQuantityMathFuncs.test_exp_scalar(None, function)
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_array(self, function):
TestQuantityMathFuncs.test_exp_array(None, function)
@pytest.mark.parametrize("function", erf_like_ufuncs)
def test_erf_invalid_units(self, function):
TestQuantityMathFuncs.test_exp_invalid_units(None, function)
@pytest.mark.parametrize("function", (sps.cbrt,))
def test_cbrt_scalar(self, function):
TestQuantityMathFuncs.test_cbrt_scalar(None, function)
@pytest.mark.parametrize("function", (sps.cbrt,))
def test_cbrt_array(self, function):
TestQuantityMathFuncs.test_cbrt_array(None, function)
@pytest.mark.parametrize("function", (sps.radian,))
def test_radian(self, function):
q1 = function(180.0 * u.degree, 0.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = function(0.0 * u.degree, 30.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q2.value, (30.0 * u.arcmin).to(u.radian).value)
assert q2.unit == u.radian
q3 = function(0.0 * u.degree, 0.0 * u.arcmin, 30.0 * u.arcsec)
assert_allclose(q3.value, (30.0 * u.arcsec).to(u.radian).value)
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q4 = function(3.0 * u.radian, 0.0 * u.arcmin, 0.0 * u.arcsec)
assert_allclose(q4.value, 3.0)
assert q4.unit == u.radian
with pytest.raises(TypeError):
function(3.0 * u.m, 2.0 * u.s, 1.0 * u.kg)
jv_like_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e,
) # fmt: skip
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_scalar(self, function):
q = function(2.0 * u.m / (2.0 * u.m), 3.0 * u.m / (6.0 * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(1.0, 0.5)
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_array(self, function):
q = function(
np.ones(3) * u.m / (1.0 * u.m),
np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.m),
)
assert q.unit == u.dimensionless_unscaled
assert np.all(
q.value == function(np.ones(3), np.array([1.0 / 3.0, 1.0 / 2.0, 1.0]))
)
# should also work on quantities that can be made dimensionless
q2 = function(
np.ones(3) * u.m / (1.0 * u.m),
np.array([2.0, 3.0, 6.0]) * u.m / (6.0 * u.cm),
)
assert q2.unit == u.dimensionless_unscaled
assert_allclose(
q2.value,
function(np.ones(3), np.array([100.0 / 3.0, 100.0 / 2.0, 100.0])),
)
@pytest.mark.parametrize("function", jv_like_ufuncs)
def test_jv_invalid_units(self, function):
# Can't use jv() with non-dimensionless quantities
with pytest.raises(
TypeError,
match=(
f"Can only apply '{function.__name__}' function to dimensionless"
" quantities"
),
):
function(1.0 * u.kg, 3.0 * u.m / u.s)
|
1f6a4dae2b1c145f7b65890cb3e493386cdd8662fc40d8fea09317a21a2e7e6c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import sys
import typing
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
from astropy import units as u
from astropy.units._typing import HAS_ANNOTATED
# list of pairs (target unit/physical type, input unit)
x_inputs = [
(u.arcsec, u.deg),
("angle", u.deg),
(u.kpc / u.Myr, u.km / u.s),
("speed", u.km / u.s),
([u.arcsec, u.km], u.deg),
([u.arcsec, u.km], u.km), # multiple allowed
(["angle", "length"], u.deg),
(["angle", "length"], u.km),
]
y_inputs = [
(u.m, u.km),
(u.km, u.m),
(u.arcsec, u.deg),
("angle", u.deg),
(u.kpc / u.Myr, u.km / u.s),
("speed", u.km / u.s),
]
@pytest.fixture(scope="module", params=list(range(len(x_inputs))))
def x_input(request):
return x_inputs[request.param]
@pytest.fixture(scope="module", params=list(range(len(y_inputs))))
def y_input(request):
return y_inputs[request.param]
# ---- Tests that use the fixtures defined above ----
def test_args(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1 * x_unit, 1 * y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_args_nonquantity(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1 * x_unit, 100)
assert isinstance(x, u.Quantity)
assert isinstance(y, int)
assert x.unit == x_unit
def test_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(
u.UnitsError,
match=(
"Argument 'y' to function 'myfunc_args' must be in units "
f"convertible to '{str(y_target)}'."
),
):
x, y = myfunc_args(1 * x_unit, 100 * u.Joule) # has to be an unspecified unit
def test_wrong_unit_annotated(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input
def myfunc_args(x: x_target, y: y_target):
return x, y
with pytest.raises(u.UnitsError, match="Argument 'y' to function 'myfunc_args'"):
x, y = myfunc_args(1 * x_unit, 100 * u.Joule) # has to be an unspecified unit
def test_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y):
return x, y
with pytest.raises(
TypeError,
match=(
"Argument 'y' to function 'myfunc_args' has no 'unit' attribute. "
"You should pass in an astropy Quantity instead."
),
):
x, y = myfunc_args(1 * x_unit, 100)
def test_not_quantity_annotated(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input
def myfunc_args(x: x_target, y: y_target):
return x, y
with pytest.raises(
TypeError,
match=(
"Argument 'y' to function 'myfunc_args' has no 'unit' attribute. "
"You should pass in an astropy Quantity instead."
),
):
x, y = myfunc_args(1 * x_unit, 100)
def test_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg, y=1 * y_unit):
return x, my_arg, y
x, my_arg, y = myfunc_args(1 * x_unit, 100, y=100 * y_unit)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg, int)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
def test_unused_kwargs(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, my_arg1, y=y_unit, my_arg2=1000):
return x, my_arg1, y, my_arg2
x, my_arg1, y, my_arg2 = myfunc_args(1 * x_unit, 100, y=100 * y_unit, my_arg2=10)
assert isinstance(x, u.Quantity)
assert isinstance(my_arg1, int)
assert isinstance(y, u.Quantity)
assert isinstance(my_arg2, int)
assert y.unit == y_unit
assert my_arg2 == 10
def test_kwarg_wrong_unit(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10 * y_unit):
return x, y
with pytest.raises(
u.UnitsError,
match=(
"Argument 'y' to function 'myfunc_args' must be in units "
f"convertible to '{str(y_target)}'."
),
):
x, y = myfunc_args(1 * x_unit, y=100 * u.Joule)
def test_kwarg_not_quantity(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10 * y_unit):
return x, y
with pytest.raises(
TypeError,
match=(
"Argument 'y' to function 'myfunc_args' has no 'unit' attribute. "
"You should pass in an astropy Quantity instead."
),
):
x, y = myfunc_args(1 * x_unit, y=100)
def test_kwarg_default(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=10 * y_unit):
return x, y
x, y = myfunc_args(1 * x_unit)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_input(x_input, y_input):
x_target, x_unit = x_input
y_target, y_unit = y_input
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x=1 * x_unit, y=1 * y_unit):
return x, y
kwargs = {"x": 10 * x_unit, "y": 10 * y_unit}
x, y = myfunc_args(**kwargs)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == x_unit
assert y.unit == y_unit
def test_kwargs_extra(x_input):
x_target, x_unit = x_input
@u.quantity_input(x=x_target)
def myfunc_args(x, **kwargs):
return x
x = myfunc_args(1 * x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
# ---- Tests that don't used the fixtures ----
@pytest.mark.parametrize("x_unit,y_unit", [(u.arcsec, u.eV), ("angle", "energy")])
def test_arg_equivalencies(x_unit, y_unit):
@u.quantity_input(x=x_unit, y=y_unit, equivalencies=u.mass_energy())
def myfunc_args(x, y):
return x, y + (10 * u.J) # Add an energy to check equiv is working
x, y = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(y, u.Quantity)
assert x.unit == u.arcsec
assert y.unit == u.gram
@pytest.mark.parametrize("x_unit,energy_unit", [(u.arcsec, u.eV), ("angle", "energy")])
def test_kwarg_equivalencies(x_unit, energy_unit):
@u.quantity_input(x=x_unit, energy=energy_unit, equivalencies=u.mass_energy())
def myfunc_args(x, energy=10 * u.eV):
return x, energy + (10 * u.J) # Add an energy to check equiv is working
x, energy = myfunc_args(1 * u.arcsec, 100 * u.gram)
assert isinstance(x, u.Quantity)
assert isinstance(energy, u.Quantity)
assert x.unit == u.arcsec
assert energy.unit == u.gram
def test_no_equivalent():
class test_unit:
pass
class test_quantity:
unit = test_unit()
@u.quantity_input(x=u.arcsec)
def myfunc_args(x):
return x
with pytest.raises(
TypeError,
match=(
"Argument 'x' to function 'myfunc_args' has a 'unit' attribute without an"
" 'is_equivalent' method. You should pass in an astropy Quantity instead."
),
):
x, y = myfunc_args(test_quantity())
def test_kwarg_invalid_physical_type():
@u.quantity_input(x="angle", y="africanswallow")
def myfunc_args(x, y=10 * u.deg):
return x, y
with pytest.raises(
ValueError, match="Invalid unit or physical type 'africanswallow'."
):
x, y = myfunc_args(1 * u.arcsec, y=100 * u.deg)
def test_default_value_check():
x_target = u.deg
x_unit = u.arcsec
with pytest.raises(TypeError):
@u.quantity_input(x=x_target)
def myfunc_args(x=1.0):
return x
x = myfunc_args()
x = myfunc_args(1 * x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
def test_str_unit_typo():
@u.quantity_input
def myfunc_args(x: "kilograam"):
return x
with pytest.raises(ValueError):
result = myfunc_args(u.kg)
@pytest.mark.skipif(not HAS_ANNOTATED, reason="need `Annotated`")
class TestTypeAnnotations:
@pytest.mark.parametrize(
"annot",
[u.m, u.Quantity[u.m], u.Quantity[u.m, "more"]] if HAS_ANNOTATED else [None],
) # Note: parametrization is done even if test class is skipped
def test_single_annotation_unit(self, annot):
"""Try a variety of valid annotations."""
@u.quantity_input
def myfunc_args(x: annot, y: str):
return x, y
i_q, i_str = 2 * u.m, "cool string"
o_q, o_str = myfunc_args(i_q, i_str)
assert o_q == i_q
assert o_str == i_str
def test_args_None():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
y_unit = u.kpc
@u.quantity_input(x=[x_target, None], y=[None, y_target])
def myfunc_args(x, y):
return x, y
x, y = myfunc_args(1 * x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(None, 1 * y_unit)
assert isinstance(y, u.Quantity)
assert y.unit == y_unit
assert x is None
def test_args_None_kwarg():
x_target = u.deg
x_unit = u.arcsec
y_target = u.km
@u.quantity_input(x=x_target, y=y_target)
def myfunc_args(x, y=None):
return x, y
x, y = myfunc_args(1 * x_unit)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
x, y = myfunc_args(1 * x_unit, None)
assert isinstance(x, u.Quantity)
assert x.unit == x_unit
assert y is None
with pytest.raises(TypeError):
x, y = myfunc_args(None, None)
@pytest.mark.parametrize("val", [1.0, 1, np.arange(10), np.arange(10.0)])
def test_allow_dimensionless_numeric(val):
"""
When dimensionless_unscaled is an allowed unit, numbers and numeric numpy
arrays are allowed through
"""
@u.quantity_input(velocity=[u.km / u.s, u.dimensionless_unscaled])
def myfunc(velocity):
return velocity
assert np.all(myfunc(val) == val)
@pytest.mark.parametrize("val", [1.0, 1, np.arange(10), np.arange(10.0)])
def test_allow_dimensionless_numeric_strict(val):
"""
When dimensionless_unscaled is an allowed unit, but we are being strict,
don't allow numbers and numeric numpy arrays through
"""
@u.quantity_input(
velocity=[u.km / u.s, u.dimensionless_unscaled], strict_dimensionless=True
)
def myfunc(velocity):
return velocity
with pytest.raises(TypeError):
assert myfunc(val)
@pytest.mark.parametrize("val", [1 * u.deg, [1, 2, 3] * u.m])
def test_dimensionless_with_nondimensionless_input(val):
"""
When dimensionless_unscaled is the only allowed unit, don't let input with
non-dimensionless units through
"""
@u.quantity_input(x=u.dimensionless_unscaled)
def myfunc(x):
return x
with pytest.raises(u.UnitsError):
myfunc(val)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires py3.9+")
def test_annotated_not_quantity():
"""Test when annotation looks like a Quantity[X], but isn't."""
@u.quantity_input()
def myfunc(x: typing.Annotated[object, u.m]):
return x
# nothing happens when wrong unit is passed
assert myfunc(1) == 1
assert myfunc(1 * u.m) == 1 * u.m
assert myfunc(1 * u.s) == 1 * u.s
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires py3.9+")
def test_annotated_not_unit():
"""Test when annotation looks like a Quantity[X], but the unit's wrong."""
@u.quantity_input()
def myfunc(x: typing.Annotated[u.Quantity, object()]):
return x
# nothing happens when wrong unit is passed
assert myfunc(1) == 1
assert myfunc(1 * u.m) == 1 * u.m
assert myfunc(1 * u.s) == 1 * u.s
|
e663bdcdd668e7d4e4723153cb587653bfb4fe89286acacdce7cf32111953530 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numbers
import numpy as np
from astropy.units import (
CompositeUnit,
Unit,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
photometric,
)
from .core import FunctionQuantity, FunctionUnitBase
from .units import dB, dex, mag
__all__ = [
"LogUnit",
"MagUnit",
"DexUnit",
"DecibelUnit",
"LogQuantity",
"Magnitude",
"Decibel",
"Dex",
"STmag",
"ABmag",
"M_bol",
"m_bol",
]
class LogUnit(FunctionUnitBase):
"""Logarithmic unit containing a physical one
Usually, logarithmic units are instantiated via specific subclasses
such `~astropy.units.MagUnit`, `~astropy.units.DecibelUnit`, and
`~astropy.units.DexUnit`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the logarithmic function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the logarithmic unit set by the subclass.
"""
# the four essential overrides of FunctionUnitBase
@property
def _default_function_unit(self):
return dex
@property
def _quantity_class(self):
return LogQuantity
def from_physical(self, x):
"""Transformation from value in physical to value in logarithmic units.
Used in equivalency."""
return dex.to(self._function_unit, np.log10(x))
def to_physical(self, x):
"""Transformation from value in logarithmic to value in physical units.
Used in equivalency."""
return 10 ** self._function_unit.to(dex, x)
# ^^^^ the four essential overrides of FunctionUnitBase
# add addition and subtraction, which imply multiplication/division of
# the underlying physical units
def _add_and_adjust_physical_unit(self, other, sign_self, sign_other):
"""Add/subtract LogUnit to/from another unit, and adjust physical unit.
self and other are multiplied by sign_self and sign_other, resp.
We wish to do: ±lu_1 + ±lu_2 -> lu_f (lu=logarithmic unit)
and pu_1^(±1) * pu_2^(±1) -> pu_f (pu=physical unit)
Raises
------
UnitsError
If function units are not equivalent.
"""
# First, insist on compatible logarithmic type. Here, plain u.mag,
# u.dex, and u.dB are OK, i.e., other does not have to be LogUnit
# (this will indirectly test whether other is a unit at all).
try:
getattr(other, "function_unit", other)._to(self._function_unit)
except AttributeError:
# if other is not a unit (i.e., does not have _to).
return NotImplemented
except UnitsError:
raise UnitsError(
"Can only add/subtract logarithmic units of compatible type."
)
other_physical_unit = getattr(other, "physical_unit", dimensionless_unscaled)
physical_unit = CompositeUnit(
1, [self._physical_unit, other_physical_unit], [sign_self, sign_other]
)
return self._copy(physical_unit)
def __neg__(self):
return self._copy(self.physical_unit ** (-1))
def __add__(self, other):
# Only know how to add to a logarithmic unit with compatible type,
# be it a plain one (u.mag, etc.,) or another LogUnit
return self._add_and_adjust_physical_unit(other, +1, +1)
def __radd__(self, other):
return self._add_and_adjust_physical_unit(other, +1, +1)
def __sub__(self, other):
return self._add_and_adjust_physical_unit(other, +1, -1)
def __rsub__(self, other):
# here, in normal usage other cannot be LogUnit; only equivalent one
# would be u.mag,u.dB,u.dex. But might as well use common routine.
return self._add_and_adjust_physical_unit(other, -1, +1)
class MagUnit(LogUnit):
"""Logarithmic physical units expressed in magnitudes
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``mag``, but this allows one to use an equivalent
unit such as ``2 mag``.
"""
@property
def _default_function_unit(self):
return mag
@property
def _quantity_class(self):
return Magnitude
class DexUnit(LogUnit):
"""Logarithmic physical units expressed in magnitudes
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the magnitude function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dex``, but this allows one to use an equivalent
unit such as ``0.5 dex``.
"""
@property
def _default_function_unit(self):
return dex
@property
def _quantity_class(self):
return Dex
def to_string(self, format="generic"):
if format == "cds":
if self.physical_unit == dimensionless_unscaled:
return "[-]" # by default, would get "[---]".
else:
return f"[{self.physical_unit.to_string(format=format)}]"
else:
return super().to_string()
class DecibelUnit(LogUnit):
"""Logarithmic physical units expressed in dB
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the decibel function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, this is ``dB``, but this allows one to use an equivalent
unit such as ``2 dB``.
"""
@property
def _default_function_unit(self):
return dB
@property
def _quantity_class(self):
return Decibel
class LogQuantity(FunctionQuantity):
"""A representation of a (scaled) logarithm of a number with a unit
Parameters
----------
value : number, `~astropy.units.Quantity`, `~astropy.units.LogQuantity`, or sequence of quantity-like.
The numerical value of the logarithmic quantity. If a number or
a `~astropy.units.Quantity` with a logarithmic unit, it will be
converted to ``unit`` and the physical unit will be inferred from
``unit``. If a `~astropy.units.Quantity` with just a physical unit,
it will converted to the logarithmic unit, after, if necessary,
converting it to the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.FunctionUnitBase`, optional
For an `~astropy.units.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The ``dtype`` of the resulting Numpy array or scalar that will
hold the value. If not provided, is is determined automatically
from the input value.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
Examples
--------
Typically, use is made of an `~astropy.units.FunctionQuantity`
subclass, as in::
>>> import astropy.units as u
>>> u.Magnitude(-2.5)
<Magnitude -2.5 mag>
>>> u.Magnitude(10.*u.count/u.second)
<Magnitude -2.5 mag(ct / s)>
>>> u.Decibel(1.*u.W, u.DecibelUnit(u.mW)) # doctest: +FLOAT_CMP
<Decibel 30. dB(mW)>
"""
# only override of FunctionQuantity
_unit_class = LogUnit
# additions that work just for logarithmic units
def __add__(self, other):
# Add function units, thus multiplying physical units. If no unit is
# given, assume dimensionless_unscaled; this will give the appropriate
# exception in LogUnit.__add__.
new_unit = self.unit + getattr(other, "unit", dimensionless_unscaled)
# Add actual logarithmic values, rescaling, e.g., dB -> dex.
result = self._function_view + getattr(other, "_function_view", other)
return self._new_view(result, new_unit)
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
new_unit = self.unit + getattr(other, "unit", dimensionless_unscaled)
# Do calculation in-place using _function_view of array.
function_view = self._function_view
function_view += getattr(other, "_function_view", other)
self._set_unit(new_unit)
return self
def __sub__(self, other):
# Subtract function units, thus dividing physical units.
new_unit = self.unit - getattr(other, "unit", dimensionless_unscaled)
# Subtract actual logarithmic values, rescaling, e.g., dB -> dex.
result = self._function_view - getattr(other, "_function_view", other)
return self._new_view(result, new_unit)
def __rsub__(self, other):
new_unit = self.unit.__rsub__(getattr(other, "unit", dimensionless_unscaled))
result = self._function_view.__rsub__(getattr(other, "_function_view", other))
# Ensure the result is in right function unit scale
# (with rsub, this does not have to be one's own).
result = result.to(new_unit.function_unit)
return self._new_view(result, new_unit)
def __isub__(self, other):
new_unit = self.unit - getattr(other, "unit", dimensionless_unscaled)
# Do calculation in-place using _function_view of array.
function_view = self._function_view
function_view -= getattr(other, "_function_view", other)
self._set_unit(new_unit)
return self
def __mul__(self, other):
# Multiply by a float or a dimensionless quantity
if isinstance(other, numbers.Number):
# Multiplying a log means putting the factor into the exponent
# of the unit
new_physical_unit = self.unit.physical_unit**other
result = self.view(np.ndarray) * other
return self._new_view(result, self.unit._copy(new_physical_unit))
else:
return super().__mul__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
if isinstance(other, numbers.Number):
new_physical_unit = self.unit.physical_unit**other
function_view = self._function_view
function_view *= other
self._set_unit(self.unit._copy(new_physical_unit))
return self
else:
return super().__imul__(other)
def __truediv__(self, other):
# Divide by a float or a dimensionless quantity
if isinstance(other, numbers.Number):
# Dividing a log means putting the nominator into the exponent
# of the unit
new_physical_unit = self.unit.physical_unit ** (1 / other)
result = self.view(np.ndarray) / other
return self._new_view(result, self.unit._copy(new_physical_unit))
else:
return super().__truediv__(other)
def __itruediv__(self, other):
if isinstance(other, numbers.Number):
new_physical_unit = self.unit.physical_unit ** (1 / other)
function_view = self._function_view
function_view /= other
self._set_unit(self.unit._copy(new_physical_unit))
return self
else:
return super().__itruediv__(other)
def __pow__(self, other):
# We check if this power is OK by applying it first to the unit.
try:
other = float(other)
except TypeError:
return NotImplemented
new_unit = self.unit**other
new_value = self.view(np.ndarray) ** other
return self._new_view(new_value, new_unit)
def __ilshift__(self, other):
try:
other = Unit(other)
except UnitTypeError:
return NotImplemented
if not isinstance(other, self._unit_class):
return NotImplemented
try:
factor = self.unit.physical_unit._to(other.physical_unit)
except UnitConversionError:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] += self.unit.from_physical(factor)
self._set_unit(other)
return self
# Methods that do not work for function units generally but are OK for
# logarithmic units as they imply differences and independence of
# physical unit.
def var(self, axis=None, dtype=None, out=None, ddof=0):
unit = self.unit.function_unit**2
return self._wrap_function(np.var, axis, dtype, out=out, ddof=ddof, unit=unit)
def std(self, axis=None, dtype=None, out=None, ddof=0):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof, unit=unit)
def ptp(self, axis=None, out=None):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.ptp, axis, out=out, unit=unit)
def diff(self, n=1, axis=-1):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.diff, n, axis, unit=unit)
def ediff1d(self, to_end=None, to_begin=None):
unit = self.unit._copy(dimensionless_unscaled)
return self._wrap_function(np.ediff1d, to_end, to_begin, unit=unit)
_supported_functions = FunctionQuantity._supported_functions | {
getattr(np, function) for function in ("var", "std", "ptp", "diff", "ediff1d")
}
class Dex(LogQuantity):
_unit_class = DexUnit
class Decibel(LogQuantity):
_unit_class = DecibelUnit
class Magnitude(LogQuantity):
_unit_class = MagUnit
dex._function_unit_class = DexUnit
dB._function_unit_class = DecibelUnit
mag._function_unit_class = MagUnit
STmag = MagUnit(photometric.STflux)
STmag.__doc__ = "ST magnitude: STmag=-21.1 corresponds to 1 erg/s/cm2/A"
ABmag = MagUnit(photometric.ABflux)
ABmag.__doc__ = "AB magnitude: ABmag=-48.6 corresponds to 1 erg/s/cm2/Hz"
M_bol = MagUnit(photometric.Bol)
M_bol.__doc__ = (
f"Absolute bolometric magnitude: M_bol=0 corresponds to L_bol0={photometric.Bol.si}"
)
m_bol = MagUnit(photometric.bol)
m_bol.__doc__ = (
f"Apparent bolometric magnitude: m_bol=0 corresponds to f_bol0={photometric.bol.si}"
)
|
b07ede8dc4c6b44f3f6f037515c331a354c7cdc85bdf1f5a4c1fee8972db8619 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Function Units and Quantities."""
from abc import ABCMeta, abstractmethod
import numpy as np
from astropy.units import (
Quantity,
Unit,
UnitBase,
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
)
__all__ = ["FunctionUnitBase", "FunctionQuantity"]
SUPPORTED_UFUNCS = {
getattr(np.core.umath, ufunc)
for ufunc in (
"isfinite",
"isinf",
"isnan",
"sign",
"signbit",
"rint",
"floor",
"ceil",
"trunc",
"_ones_like",
"ones_like",
"positive",
)
if hasattr(np.core.umath, ufunc)
}
# TODO: the following could work if helper changed relative to Quantity:
# - spacing should return dimensionless, not same unit
# - negative should negate unit too,
# - add, subtract, comparisons can work if units added/subtracted
SUPPORTED_FUNCTIONS = {
getattr(np, function)
for function in ("clip", "trace", "mean", "min", "max", "round")
}
# subclassing UnitBase or CompositeUnit was found to be problematic, requiring
# a large number of overrides. Hence, define new class.
class FunctionUnitBase(metaclass=ABCMeta):
"""Abstract base class for function units.
Function units are functions containing a physical unit, such as dB(mW).
Most of the arithmetic operations on function units are defined in this
base class.
While instantiation is defined, this class should not be used directly.
Rather, subclasses should be used that override the abstract properties
`_default_function_unit` and `_quantity_class`, and the abstract methods
`from_physical`, and `to_physical`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the function unit set by the subclass.
"""
# ↓↓↓ the following four need to be set by subclasses
# Make this a property so we can ensure subclasses define it.
@property
@abstractmethod
def _default_function_unit(self):
"""Default function unit corresponding to the function.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.mag`.
"""
# This has to be a property because the function quantity will not be
# known at unit definition time, as it gets defined after.
@property
@abstractmethod
def _quantity_class(self):
"""Function quantity class corresponding to this function unit.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.Magnitude`.
"""
@abstractmethod
def from_physical(self, x):
"""Transformation from value in physical to value in function units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
@abstractmethod
def to_physical(self, x):
"""Transformation from value in function to value in physical units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
# ↑↑↑ the above four need to be set by subclasses
# have priority over arrays, regular units, and regular quantities
__array_priority__ = 30000
def __init__(self, physical_unit=None, function_unit=None):
if physical_unit is None:
physical_unit = dimensionless_unscaled
else:
physical_unit = Unit(physical_unit)
if not isinstance(physical_unit, UnitBase) or physical_unit.is_equivalent(
self._default_function_unit
):
raise UnitConversionError(f"{physical_unit} is not a physical unit.")
if function_unit is None:
function_unit = self._default_function_unit
else:
# any function unit should be equivalent to subclass default
function_unit = Unit(getattr(function_unit, "function_unit", function_unit))
if not function_unit.is_equivalent(self._default_function_unit):
raise UnitConversionError(
f"Cannot initialize '{self.__class__.__name__}' instance with "
f"function unit '{function_unit}', as it is not equivalent to "
f"default function unit '{self._default_function_unit}'."
)
self._physical_unit = physical_unit
self._function_unit = function_unit
def _copy(self, physical_unit=None):
"""Copy oneself, possibly with a different physical unit."""
if physical_unit is None:
physical_unit = self.physical_unit
return self.__class__(physical_unit, self.function_unit)
@property
def physical_unit(self):
return self._physical_unit
@property
def function_unit(self):
return self._function_unit
@property
def equivalencies(self):
"""List of equivalencies between function and physical units.
Uses the `from_physical` and `to_physical` methods.
"""
return [(self, self.physical_unit, self.to_physical, self.from_physical)]
# ↓↓↓ properties/methods required to behave like a unit
def decompose(self, bases=set()):
"""Copy the current unit with the physical unit decomposed.
For details, see `~astropy.units.UnitBase.decompose`.
"""
return self._copy(self.physical_unit.decompose(bases))
@property
def si(self):
"""Copy the current function unit with the physical unit in SI."""
return self._copy(self.physical_unit.si)
@property
def cgs(self):
"""Copy the current function unit with the physical unit in CGS."""
return self._copy(self.physical_unit.cgs)
def _get_physical_type_id(self):
"""Get physical type corresponding to physical unit."""
return self.physical_unit._get_physical_type_id()
@property
def physical_type(self):
"""Return the physical type of the physical unit (e.g., 'length')."""
return self.physical_unit.physical_type
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, string, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to the built-in equivalencies between the
function unit and the physical one, as well as possible global
defaults set by, e.g., `~astropy.units.set_enabled_equivalencies`.
Use `None` to turn off any global equivalencies.
Returns
-------
bool
"""
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies) for u in other)
other_physical_unit = getattr(
other,
"physical_unit",
(
dimensionless_unscaled
if self.function_unit.is_equivalent(other)
else other
),
)
return self.physical_unit.is_equivalent(other_physical_unit, equivalencies)
def to(self, other, value=1.0, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : `~astropy.units.Unit`, `~astropy.units.FunctionUnitBase`, or str
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the specified unit.
If not provided, defaults to 1.0.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in meant to treat only equivalencies between different
physical units; the built-in equivalency between the function
unit and the physical one is automatically taken into account.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
`~astropy.units.UnitsError`
If units are inconsistent.
"""
# conversion to one's own physical unit should be fastest
if other is self.physical_unit:
return self.to_physical(value)
other_function_unit = getattr(other, "function_unit", other)
if self.function_unit.is_equivalent(other_function_unit):
# when other is an equivalent function unit:
# first convert physical units to other's physical units
other_physical_unit = getattr(
other, "physical_unit", dimensionless_unscaled
)
if self.physical_unit != other_physical_unit:
value_other_physical = self.physical_unit.to(
other_physical_unit, self.to_physical(value), equivalencies
)
# make function unit again, in own system
value = self.from_physical(value_other_physical)
# convert possible difference in function unit (e.g., dex->dB)
return self.function_unit.to(other_function_unit, value)
else:
try:
# when other is not a function unit
return self.physical_unit.to(
other, self.to_physical(value), equivalencies
)
except UnitConversionError as e:
if self.function_unit == Unit("mag"):
# One can get to raw magnitudes via math that strips the dimensions off.
# Include extra information in the exception to remind users of this.
msg = "Did you perhaps subtract magnitudes so the unit got lost?"
e.args += (msg,)
raise e
else:
raise
def is_unity(self):
return False
def __eq__(self, other):
return self.physical_unit == getattr(
other, "physical_unit", dimensionless_unscaled
) and self.function_unit == getattr(other, "function_unit", other)
def __ne__(self, other):
return not self.__eq__(other)
def __rlshift__(self, other):
"""Unit conversion operator ``<<``"""
try:
return self._quantity_class(other, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __mul__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit * other
else:
raise UnitsError(
"Cannot multiply a function unit with a physical dimension "
"with any unit."
)
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit / other
else:
raise UnitsError(
"Cannot divide a function unit with a physical dimension "
"by any unit."
)
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(1.0 / other, unit=self)
except Exception:
return NotImplemented
def __rtruediv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return other / self.function_unit
else:
raise UnitsError(
"Cannot divide a function unit with a physical dimension "
"into any unit"
)
else:
# Don't know what to do with anything not like a unit.
return NotImplemented
def __pow__(self, power):
if power == 0:
return dimensionless_unscaled
elif power == 1:
return self._copy()
if self.physical_unit == dimensionless_unscaled:
return self.function_unit**power
raise UnitsError(
"Cannot raise a function unit with a physical dimension "
"to any power but 0 or 1."
)
def __pos__(self):
return self._copy()
def to_string(self, format="generic"):
"""
Output the unit in the given format as a string.
The physical unit is appended, within parentheses, to the function
unit, as in "dB(mW)", with both units set using the given format
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
if format not in ("generic", "unscaled", "latex", "latex_inline"):
raise ValueError(
f"Function units cannot be written in {format} "
"format. Only 'generic', 'unscaled', 'latex' and "
"'latex_inline' are supported."
)
self_str = self.function_unit.to_string(format)
pu_str = self.physical_unit.to_string(format)
if pu_str == "":
pu_str = "1"
if format.startswith("latex"):
# need to strip leading and trailing "$"
self_str += rf"$\mathrm{{\left( {pu_str[1:-1]} \right)}}$"
else:
self_str += f"({pu_str})"
return self_str
def __str__(self):
"""Return string representation for unit."""
self_str = str(self.function_unit)
pu_str = str(self.physical_unit)
if pu_str:
self_str += f"({pu_str})"
return self_str
def __repr__(self):
# By default, try to give a representation using `Unit(<string>)`,
# with string such that parsing it would give the correct FunctionUnit.
if callable(self.function_unit):
return f'Unit("{self.to_string()}")'
else:
return '{}("{}"{})'.format(
self.__class__.__name__,
self.physical_unit,
""
if self.function_unit is self._default_function_unit
else f', unit="{self.function_unit}"',
)
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return self.to_string("latex")
def __hash__(self):
return hash((self.function_unit, self.physical_unit))
class FunctionQuantity(Quantity):
"""A representation of a (scaled) function of a number with a unit.
Function quantities are quantities whose units are functions containing a
physical unit, such as dB(mW). Most of the arithmetic operations on
function quantities are defined in this base class.
While instantiation is also defined here, this class should not be
instantiated directly. Rather, subclasses should be made which have
``_unit_class`` pointing back to the corresponding function unit class.
Parameters
----------
value : number, quantity-like, or sequence thereof
The numerical value of the function quantity. If a number or
a `~astropy.units.Quantity` with a function unit, it will be converted
to ``unit`` and the physical unit will be inferred from ``unit``.
If a `~astropy.units.Quantity` with just a physical unit, it will
converted to the function unit, after, if necessary, converting it to
the physical unit inferred from ``unit``.
unit : str, `~astropy.units.UnitBase`, or `~astropy.units.FunctionUnitBase`, optional
For an `~astropy.units.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. Ignored
if the input does not need to be converted and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be of the
class used. Otherwise, subclasses will be passed through.
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`~astropy.units.Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not a `~astropy.units.FunctionUnitBase`
or `~astropy.units.Unit` object, or a parseable string unit.
"""
_unit_class = None
"""Default `~astropy.units.FunctionUnitBase` subclass.
This should be overridden by subclasses.
"""
# Ensure priority over ndarray, regular Unit & Quantity, and FunctionUnit.
__array_priority__ = 40000
# Define functions that work on FunctionQuantity.
_supported_ufuncs = SUPPORTED_UFUNCS
_supported_functions = SUPPORTED_FUNCTIONS
def __new__(
cls,
value,
unit=None,
dtype=np.inexact,
copy=True,
order=None,
subok=False,
ndmin=0,
):
if unit is not None:
# Convert possible string input to a (function) unit.
unit = Unit(unit)
if not isinstance(unit, FunctionUnitBase):
# By default, use value's physical unit.
value_unit = getattr(value, "unit", None)
if value_unit is None:
# if iterable, see if first item has a unit
# (mixed lists fail in super call below).
try:
value_unit = getattr(value[0], "unit")
except Exception:
pass
physical_unit = getattr(value_unit, "physical_unit", value_unit)
unit = cls._unit_class(physical_unit, function_unit=unit)
# initialise!
return super().__new__(
cls,
value,
unit,
dtype=dtype,
copy=copy,
order=order,
subok=subok,
ndmin=ndmin,
)
# ↓↓↓ properties not found in Quantity
@property
def physical(self):
"""The physical quantity corresponding the function one."""
return self.to(self.unit.physical_unit)
@property
def _function_view(self):
"""View as Quantity with function unit, dropping the physical unit.
Use `~astropy.units.quantity.Quantity.value` for just the value.
"""
return self._new_view(unit=self.unit.function_unit)
# ↓↓↓ methods overridden to change the behavior
@property
def si(self):
"""Return a copy with the physical unit in SI units."""
return self.__class__(self.physical.si)
@property
def cgs(self):
"""Return a copy with the physical unit in CGS units."""
return self.__class__(self.physical.cgs)
def decompose(self, bases=[]):
"""Generate a new instance with the physical unit decomposed.
For details, see `~astropy.units.Quantity.decompose`.
"""
return self.__class__(self.physical.decompose(bases))
# ↓↓↓ methods overridden to add additional behavior
def __quantity_subclass__(self, unit):
if isinstance(unit, FunctionUnitBase):
return self.__class__, True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if not isinstance(unit, self._unit_class):
# Have to take care of, e.g., (10*u.mag).view(u.Magnitude)
try:
# "or 'nonsense'" ensures `None` breaks, just in case.
unit = self._unit_class(function_unit=unit or "nonsense")
except Exception:
raise UnitTypeError(
f"{type(self).__name__} instances require"
f" {self._unit_class.__name__} function units, so cannot set it to"
f" '{unit}'."
)
self._unit = unit
def __array_ufunc__(self, function, method, *inputs, **kwargs):
# TODO: it would be more logical to have this in Quantity already,
# instead of in UFUNC_HELPERS, where it cannot be overridden.
# And really it should just return NotImplemented, since possibly
# another argument might know what to do.
if function not in self._supported_ufuncs:
raise UnitTypeError(
f"Cannot use ufunc '{function.__name__}' with function quantities"
)
return super().__array_ufunc__(function, method, *inputs, **kwargs)
def _maybe_new_view(self, result):
"""View as function quantity if the unit is unchanged.
Used for the case that self.unit.physical_unit is dimensionless,
where multiplication and division is done using the Quantity
equivalent, to transform them back to a FunctionQuantity if possible.
"""
if isinstance(result, Quantity) and result.unit == self.unit:
return self._new_view(result)
else:
return result
# ↓↓↓ methods overridden to change behavior
def __mul__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view * other)
raise UnitTypeError(
"Cannot multiply function quantities which are not dimensionless "
"with anything."
)
def __truediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view / other)
raise UnitTypeError(
"Cannot divide function quantities which are not dimensionless by anything."
)
def __rtruediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._maybe_new_view(self._function_view.__rtruediv__(other))
raise UnitTypeError(
"Cannot divide function quantities which are not dimensionless "
"into anything."
)
def _comparison(self, other, comparison_func):
"""Do a comparison between self and other, raising UnitsError when
other cannot be converted to self because it has different physical
unit, and returning NotImplemented when there are other errors."""
try:
# will raise a UnitsError if physical units not equivalent
other_in_own_unit = self._to_own_unit(other, check_precision=False)
except UnitsError as exc:
if self.unit.physical_unit != dimensionless_unscaled:
raise exc
try:
other_in_own_unit = self._function_view._to_own_unit(
other, check_precision=False
)
except Exception:
raise exc
except Exception:
return NotImplemented
return comparison_func(other_in_own_unit)
def __eq__(self, other):
try:
return self._comparison(other, self.value.__eq__)
except UnitsError:
return False
def __ne__(self, other):
try:
return self._comparison(other, self.value.__ne__)
except UnitsError:
return True
def __gt__(self, other):
return self._comparison(other, self.value.__gt__)
def __ge__(self, other):
return self._comparison(other, self.value.__ge__)
def __lt__(self, other):
return self._comparison(other, self.value.__lt__)
def __le__(self, other):
return self._comparison(other, self.value.__le__)
def __lshift__(self, other):
"""Unit conversion operator `<<`"""
try:
other = Unit(other, parse_strict="silent")
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
# Ensure Quantity methods are used only if they make sense.
def _wrap_function(self, function, *args, **kwargs):
if function in self._supported_functions:
return super()._wrap_function(function, *args, **kwargs)
# For dimensionless, we can convert to regular quantities.
if all(
arg.unit.physical_unit == dimensionless_unscaled
for arg in (self,) + args
if (hasattr(arg, "unit") and hasattr(arg.unit, "physical_unit"))
):
args = tuple(getattr(arg, "_function_view", arg) for arg in args)
return self._function_view._wrap_function(function, *args, **kwargs)
raise TypeError(
f"Cannot use method that uses function '{function.__name__}' with "
"function quantities that are not dimensionless."
)
# Override functions that are supported but do not use _wrap_function
# in Quantity.
def max(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.max, axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.min, axis, out=out, keepdims=keepdims)
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.sum, axis, dtype, out=out, keepdims=keepdims)
def cumsum(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.cumsum, axis, dtype, out=out)
def clip(self, a_min, a_max, out=None):
return self._wrap_function(
np.clip, self._to_own_unit(a_min), self._to_own_unit(a_max), out=out
)
|
151d074751e60477eef7fab601cde66cdb2d420b8c78b71b331bbe7a1a90deee | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines units that can also be used as functions of other units.
If called, their arguments are used to initialize the corresponding function
unit (e.g., ``u.mag(u.ct/u.s)``). Note that the prefixed versions cannot be
called, as it would be unclear what, e.g., ``u.mmag(u.ct/u.s)`` would mean.
"""
from astropy.units.core import _add_prefixes
from .mixin import IrreducibleFunctionUnit, RegularFunctionUnit
_ns = globals()
###########################################################################
# Logarithmic units
# These calls are what core.def_unit would do, but we need to use the callable
# unit versions. The actual function unit classes get added in logarithmic.
dex = IrreducibleFunctionUnit(
["dex"], namespace=_ns, doc="Dex: Base 10 logarithmic unit"
)
dB = RegularFunctionUnit(
["dB", "decibel"],
0.1 * dex,
namespace=_ns,
doc="Decibel: ten per base 10 logarithmic unit",
)
mag = RegularFunctionUnit(
["mag"],
-0.4 * dex,
namespace=_ns,
doc="Astronomical magnitude: -2.5 per base 10 logarithmic unit",
)
_add_prefixes(mag, namespace=_ns, prefixes=True)
###########################################################################
# CLEANUP
del RegularFunctionUnit
del IrreducibleFunctionUnit
###########################################################################
# DOCSTRING
if __doc__ is not None:
# This generates a docstring for this module that describes all of the
# standard units defined here.
from astropy.units.utils import generate_unit_summary as _generate_unit_summary
__doc__ += _generate_unit_summary(globals())
|
016cf3223b37ccda1f2f115381024a7a86963885006c8abf4ec69ff47b0458bb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.units.core import IrreducibleUnit, Unit
class FunctionMixin:
"""Mixin class that makes UnitBase subclasses callable.
Provides a __call__ method that passes on arguments to a FunctionUnit.
Instances of this class should define ``_function_unit_class`` pointing
to the relevant class.
See units.py and logarithmic.py for usage.
"""
def __call__(self, unit=None):
return self._function_unit_class(physical_unit=unit, function_unit=self)
class IrreducibleFunctionUnit(FunctionMixin, IrreducibleUnit):
pass
class RegularFunctionUnit(FunctionMixin, Unit):
pass
|
d472e4fb1dd08ebe239c378baedca5c156e44f57d9c93d3e6fcd936b0b62e1f3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
from collections.abc import MappingView
from types import MappingProxyType
import numpy as np
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates.angles import Angle
from astropy.coordinates.attributes import (
CoordinateAttribute,
DifferentialAttribute,
QuantityAttribute,
)
from astropy.coordinates.baseframe import (
BaseCoordinateFrame,
base_doc,
frame_transform_graph,
)
from astropy.coordinates.errors import ConvertError
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
from astropy.coordinates.transformations import AffineTransform
from astropy.utils.decorators import classproperty, deprecated, format_doc
from astropy.utils.state import ScienceState
from .icrs import ICRS
__all__ = ["Galactocentric"]
# Measured by minimizing the difference between a plane of coordinates along
# l=0, b=[-90,90] and the Galactocentric x-z plane
# This is not used directly, but accessed via `get_roll0`. We define it here to
# prevent having to create new Angle objects every time `get_roll0` is called.
_ROLL0 = Angle(58.5986320306 * u.degree)
class _StateProxy(MappingView):
"""
`~collections.abc.MappingView` with a read-only ``getitem`` through
`~types.MappingProxyType`.
"""
def __init__(self, mapping):
super().__init__(mapping)
self._mappingproxy = MappingProxyType(self._mapping) # read-only
def __getitem__(self, key):
"""Read-only ``getitem``."""
return self._mappingproxy[key]
def __deepcopy__(self, memo):
return copy.deepcopy(self._mapping, memo=memo)
class galactocentric_frame_defaults(ScienceState):
"""Global setting of default values for the frame attributes in the `~astropy.coordinates.Galactocentric` frame.
These constancts may be updated in future versions of ``astropy``. Note
that when using `~astropy.coordinates.Galactocentric`, changing values
here will not affect any attributes that are set explicitly by passing
values in to the `~astropy.coordinates.Galactocentric`
initializer. Modifying these defaults will only affect the frame attribute
values when using the frame as, e.g., ``Galactocentric`` or
``Galactocentric()`` with no explicit arguments.
This class controls the parameter settings by specifying a string name,
with the following pre-specified options:
- 'pre-v4.0': The current default value, which sets the default frame
attribute values to their original (pre-astropy-v4.0) values.
- 'v4.0': The attribute values as updated in Astropy version 4.0.
- 'latest': An alias of the most recent parameter set (currently: 'v4.0')
Alternatively, user-defined parameter settings may be registered, with
:meth:`~astropy.coordinates.galactocentric_frame_defaults.register`,
and used identically as pre-specified parameter sets. At minimum,
registrations must have unique names and a dictionary of parameters
with keys "galcen_coord", "galcen_distance", "galcen_v_sun", "z_sun",
"roll". See examples below.
This class also tracks the references for all parameter values in the
attribute ``references``, as well as any further information the registry.
The pre-specified options can be extended to include similar
state information as user-defined parameter settings -- for example, to add
parameter uncertainties.
The preferred method for getting a parameter set and metadata, by name, is
:meth:`~astropy.coordinates.galactocentric_frame_defaults.get_from_registry`
since it ensures the immutability of the registry.
See :ref:`astropy:astropy-coordinates-galactocentric-defaults` for more
information.
Examples
--------
The default `~astropy.coordinates.Galactocentric` frame parameters can be
modified globally::
>>> from astropy.coordinates import galactocentric_frame_defaults
>>> _ = galactocentric_frame_defaults.set('v4.0') # doctest: +SKIP
>>> Galactocentric() # doctest: +SKIP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.122 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg)>
>>> _ = galactocentric_frame_defaults.set('pre-v4.0') # doctest: +SKIP
>>> Galactocentric() # doctest: +SKIP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
The default parameters can also be updated by using this class as a context
manager::
>>> with galactocentric_frame_defaults.set('pre-v4.0'):
... print(Galactocentric()) # doctest: +FLOAT_CMP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.3 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
Again, changing the default parameter values will not affect frame
attributes that are explicitly specified::
>>> import astropy.units as u
>>> with galactocentric_frame_defaults.set('pre-v4.0'):
... print(Galactocentric(galcen_distance=8.0*u.kpc)) # doctest: +FLOAT_CMP
<Galactocentric Frame (galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.0 kpc, galcen_v_sun=(11.1, 232.24, 7.25) km / s, z_sun=27.0 pc, roll=0.0 deg)>
Additional parameter sets may be registered, for instance to use the
Dehnen & Binney (1998) measurements of the solar motion. We can also
add metadata, such as the 1-sigma errors. In this example we will modify
the required key "parameters", change the recommended key "references" to
match "parameters", and add the extra key "error" (any key can be added)::
>>> state = galactocentric_frame_defaults.get_from_registry("v4.0")
>>> state["parameters"]["galcen_v_sun"] = (10.00, 225.25, 7.17) * (u.km / u.s)
>>> state["references"]["galcen_v_sun"] = "https://ui.adsabs.harvard.edu/full/1998MNRAS.298..387D"
>>> state["error"] = {"galcen_v_sun": (0.36, 0.62, 0.38) * (u.km / u.s)}
>>> galactocentric_frame_defaults.register(name="DB1998", **state)
Just as in the previous examples, the new parameter set can be retrieved with::
>>> state = galactocentric_frame_defaults.get_from_registry("DB1998")
>>> print(state["error"]["galcen_v_sun"]) # doctest: +FLOAT_CMP
[0.36 0.62 0.38] km / s
"""
_latest_value = "v4.0"
_value = None
_references = None
_state = dict() # all other data
# Note: _StateProxy() produces read-only view of enclosed mapping.
_registry = {
"v4.0": {
"parameters": _StateProxy(
{
"galcen_coord": ICRS(
ra=266.4051 * u.degree, dec=-28.936175 * u.degree
),
"galcen_distance": 8.122 * u.kpc,
"galcen_v_sun": r.CartesianDifferential(
[12.9, 245.6, 7.78] * (u.km / u.s)
),
"z_sun": 20.8 * u.pc,
"roll": 0 * u.deg,
}
),
"references": _StateProxy(
{
"galcen_coord": (
"https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R"
),
"galcen_distance": (
"https://ui.adsabs.harvard.edu/abs/2018A%26A...615L..15G"
),
"galcen_v_sun": [
"https://ui.adsabs.harvard.edu/abs/2018RNAAS...2..210D",
"https://ui.adsabs.harvard.edu/abs/2018A%26A...615L..15G",
"https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R",
],
"z_sun": "https://ui.adsabs.harvard.edu/abs/2019MNRAS.482.1417B",
"roll": None,
}
),
},
"pre-v4.0": {
"parameters": _StateProxy(
{
"galcen_coord": ICRS(
ra=266.4051 * u.degree, dec=-28.936175 * u.degree
),
"galcen_distance": 8.3 * u.kpc,
"galcen_v_sun": r.CartesianDifferential(
[11.1, 220 + 12.24, 7.25] * (u.km / u.s)
),
"z_sun": 27.0 * u.pc,
"roll": 0 * u.deg,
}
),
"references": _StateProxy(
{
"galcen_coord": (
"https://ui.adsabs.harvard.edu/abs/2004ApJ...616..872R"
),
"galcen_distance": (
"https://ui.adsabs.harvard.edu/#abs/2009ApJ...692.1075G"
),
"galcen_v_sun": [
"https://ui.adsabs.harvard.edu/#abs/2010MNRAS.403.1829S",
"https://ui.adsabs.harvard.edu/#abs/2015ApJS..216...29B",
],
"z_sun": "https://ui.adsabs.harvard.edu/#abs/2001ApJ...553..184C",
"roll": None,
}
),
},
}
@classproperty # read-only
def parameters(cls):
return cls._value
@classproperty # read-only
def references(cls):
return cls._references
@classmethod
def get_from_registry(cls, name: str):
"""
Return Galactocentric solar parameters and metadata given string names
for the parameter sets. This method ensures the returned state is a
mutable copy, so any changes made do not affect the registry state.
Returns
-------
state : dict
Copy of the registry for the string name.
Should contain, at minimum:
- "parameters": dict
Galactocentric solar parameters
- "references" : Dict[str, Union[str, Sequence[str]]]
References for "parameters".
Fields are str or sequence of str.
Raises
------
KeyError
If invalid string input to registry
to retrieve solar parameters for Galactocentric frame.
"""
# Resolve the meaning of 'latest': latest parameter set is from v4.0
# - update this as newer parameter choices are added
if name == "latest":
name = cls._latest_value
# Get the state from the registry.
# Copy to ensure registry is immutable to modifications of "_value".
# Raises KeyError if `name` is invalid string input to registry
# to retrieve solar parameters for Galactocentric frame.
state = copy.deepcopy(cls._registry[name]) # ensure mutable
return state
@deprecated("v4.2", alternative="`get_from_registry`")
@classmethod
def get_solar_params_from_string(cls, arg):
"""
Return Galactocentric solar parameters given string names
for the parameter sets.
Returns
-------
parameters : dict
Copy of Galactocentric solar parameters from registry
Raises
------
KeyError
If invalid string input to registry
to retrieve solar parameters for Galactocentric frame.
"""
return cls.get_from_registry(arg)["parameters"]
@classmethod
def validate(cls, value):
if value is None:
value = cls._latest_value
if isinstance(value, str):
state = cls.get_from_registry(value)
cls._references = state["references"]
cls._state = state
parameters = state["parameters"]
elif isinstance(value, dict):
parameters = value
elif isinstance(value, Galactocentric):
# turn the frame instance into a dict of frame attributes
parameters = dict()
for k in value.frame_attributes:
parameters[k] = getattr(value, k)
cls._references = value.frame_attribute_references.copy()
cls._state = dict(parameters=parameters, references=cls._references)
else:
raise ValueError(
"Invalid input to retrieve solar parameters for Galactocentric frame:"
" input must be a string, dict, or Galactocentric instance"
)
return parameters
@classmethod
def register(cls, name: str, parameters: dict, references=None, **meta: dict):
"""Register a set of parameters.
Parameters
----------
name : str
The registration name for the parameter and metadata set.
parameters : dict
The solar parameters for Galactocentric frame.
references : dict or None, optional
References for contents of `parameters`.
None becomes empty dict.
**meta : dict, optional
Any other properties to register.
"""
# check on contents of `parameters`
must_have = {"galcen_coord", "galcen_distance", "galcen_v_sun", "z_sun", "roll"}
missing = must_have.difference(parameters)
if missing:
raise ValueError(f"Missing parameters: {missing}")
references = references or {} # None -> {}
state = dict(parameters=parameters, references=references)
state.update(meta) # meta never has keys "parameters" or "references"
cls._registry[name] = state
doc_components = """
x : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`x` position component.
y : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`y` position component.
z : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`z` position component.
v_x : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_x` velocity component.
v_y : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_y` velocity component.
v_z : `~astropy.units.Quantity`, optional
Cartesian, Galactocentric :math:`v_z` velocity component.
"""
doc_footer = """
Other parameters
----------------
galcen_coord : `~astropy.coordinates.ICRS`, optional, keyword-only
The ICRS coordinates of the Galactic center.
galcen_distance : `~astropy.units.Quantity`, optional, keyword-only
The distance from the sun to the Galactic center.
galcen_v_sun : `~astropy.coordinates.CartesianDifferential`, `~astropy.units.Quantity` ['speed'], optional, keyword-only
The velocity of the sun *in the Galactocentric frame* as Cartesian
velocity components.
z_sun : `~astropy.units.Quantity` ['length'], optional, keyword-only
The distance from the sun to the Galactic midplane.
roll : `~astropy.coordinates.Angle`, optional, keyword-only
The angle to rotate about the final x-axis, relative to the
orientation for Galactic. For example, if this roll angle is 0,
the final x-z plane will align with the Galactic coordinates x-z
plane. Unless you really know what this means, you probably should
not change this!
Examples
--------
To transform to the Galactocentric frame with the default
frame attributes, pass the uninstantiated class name to the
``transform_to()`` method of a `~astropy.coordinates.SkyCoord` object::
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> c = coord.SkyCoord(ra=[158.3122, 24.5] * u.degree,
... dec=[-17.3, 81.52] * u.degree,
... distance=[11.5, 24.12] * u.kpc,
... frame='icrs')
>>> c.transform_to(coord.Galactocentric) # doctest: +FLOAT_CMP
<SkyCoord (Galactocentric: galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.122 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg): (x, y, z) in kpc
[( -9.43489286, -9.40062188, 6.51345359),
(-21.11044918, 18.76334013, 7.83175149)]>
To specify a custom set of parameters, you have to include extra keyword
arguments when initializing the Galactocentric frame object::
>>> c.transform_to(coord.Galactocentric(galcen_distance=8.1*u.kpc)) # doctest: +FLOAT_CMP
<SkyCoord (Galactocentric: galcen_coord=<ICRS Coordinate: (ra, dec) in deg
(266.4051, -28.936175)>, galcen_distance=8.1 kpc, galcen_v_sun=(12.9, 245.6, 7.78) km / s, z_sun=20.8 pc, roll=0.0 deg): (x, y, z) in kpc
[( -9.41284763, -9.40062188, 6.51346272),
(-21.08839478, 18.76334013, 7.83184184)]>
Similarly, transforming from the Galactocentric frame to another coordinate frame::
>>> c = coord.SkyCoord(x=[-8.3, 4.5] * u.kpc,
... y=[0., 81.52] * u.kpc,
... z=[0.027, 24.12] * u.kpc,
... frame=coord.Galactocentric)
>>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)
[( 88.22423301, 29.88672864, 0.17813456),
(289.72864549, 49.9865043 , 85.93949064)]>
Or, with custom specification of the Galactic center::
>>> c = coord.SkyCoord(x=[-8.0, 4.5] * u.kpc,
... y=[0., 81.52] * u.kpc,
... z=[21.0, 24120.0] * u.pc,
... frame=coord.Galactocentric,
... z_sun=21 * u.pc, galcen_distance=8. * u.kpc)
>>> c.transform_to(coord.ICRS) # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)
[( 86.2585249 , 28.85773187, 2.75625475e-05),
(289.77285255, 50.06290457, 8.59216010e+01)]>
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class Galactocentric(BaseCoordinateFrame):
r"""
A coordinate or frame in the Galactocentric system.
This frame allows specifying the Sun-Galactic center distance, the height of
the Sun above the Galactic midplane, and the solar motion relative to the
Galactic center. However, as there is no modern standard definition of a
Galactocentric reference frame, it is important to pay attention to the
default values used in this class if precision is important in your code.
The default values of the parameters of this frame are taken from the
original definition of the frame in 2014. As such, the defaults are somewhat
out of date relative to recent measurements made possible by, e.g., Gaia.
The defaults can, however, be changed at runtime by setting the parameter
set name in `~astropy.coordinates.galactocentric_frame_defaults`.
The current default parameter set is ``"pre-v4.0"``, indicating that the
parameters were adopted before ``astropy`` version 4.0. A regularly-updated
parameter set can instead be used by setting
``galactocentric_frame_defaults.set ('latest')``, and other parameter set
names may be added in future versions. To find out the scientific papers
that the current default parameters are derived from, use
``galcen.frame_attribute_references`` (where ``galcen`` is an instance of
this frame), which will update even if the default parameter set is changed.
The position of the Sun is assumed to be on the x axis of the final,
right-handed system. That is, the x axis points from the position of
the Sun projected to the Galactic midplane to the Galactic center --
roughly towards :math:`(l,b) = (0^\circ,0^\circ)`. For the default
transformation (:math:`{\rm roll}=0^\circ`), the y axis points roughly
towards Galactic longitude :math:`l=90^\circ`, and the z axis points
roughly towards the North Galactic Pole (:math:`b=90^\circ`).
For a more detailed look at the math behind this transformation, see
the document :ref:`astropy:coordinates-galactocentric`.
The frame attributes are listed under **Other Parameters**.
"""
default_representation = r.CartesianRepresentation
default_differential = r.CartesianDifferential
# frame attributes
galcen_coord = CoordinateAttribute(frame=ICRS)
galcen_distance = QuantityAttribute(unit=u.kpc)
galcen_v_sun = DifferentialAttribute(allowed_classes=[r.CartesianDifferential])
z_sun = QuantityAttribute(unit=u.pc)
roll = QuantityAttribute(unit=u.deg)
def __init__(self, *args, **kwargs):
# Set default frame attribute values based on the ScienceState instance
# for the solar parameters defined above
default_params = galactocentric_frame_defaults.get()
self.frame_attribute_references = (
galactocentric_frame_defaults.references.copy()
)
for k in default_params:
if k in kwargs:
# If a frame attribute is set by the user, remove its reference
self.frame_attribute_references.pop(k, None)
# Keep the frame attribute if it is set by the user, otherwise use
# the default value
kwargs[k] = kwargs.get(k, default_params[k])
super().__init__(*args, **kwargs)
@classmethod
def get_roll0(cls):
"""The additional roll angle (about the final x axis) necessary to align the
final z axis to match the Galactic yz-plane. Setting the ``roll``
frame attribute to -this method's return value removes this rotation,
allowing the use of the `~astropy.coordinates.Galactocentric` frame
in more general contexts.
"""
# note that the actual value is defined at the module level. We make at
# a property here because this module isn't actually part of the public
# API, so it's better for it to be accessible from Galactocentric
return _ROLL0
# ICRS to/from Galactocentric ----------------------->
def get_matrix_vectors(galactocentric_frame, inverse=False):
"""
Use the ``inverse`` argument to get the inverse transformation, matrix and
offsets to go from Galactocentric to ICRS.
"""
# shorthand
gcf = galactocentric_frame
# rotation matrix to align x(ICRS) with the vector to the Galactic center
mat1 = rotation_matrix(-gcf.galcen_coord.dec, "y")
mat2 = rotation_matrix(gcf.galcen_coord.ra, "z")
# extra roll away from the Galactic x-z plane
mat0 = rotation_matrix(gcf.get_roll0() - gcf.roll, "x")
# construct transformation matrix and use it
R = mat0 @ mat1 @ mat2
# Now need to translate by Sun-Galactic center distance around x' and
# rotate about y' to account for tilt due to Sun's height above the plane
translation = r.CartesianRepresentation(gcf.galcen_distance * [1.0, 0.0, 0.0])
z_d = gcf.z_sun / gcf.galcen_distance
H = rotation_matrix(-np.arcsin(z_d), "y")
# compute total matrices
A = H @ R
# Now we re-align the translation vector to account for the Sun's height
# above the midplane
offset = -translation.transform(H)
if inverse:
# the inverse of a rotation matrix is a transpose, which is much faster
# and more stable to compute
A = matrix_transpose(A)
offset = (-offset).transform(A)
offset_v = r.CartesianDifferential.from_cartesian(
(-gcf.galcen_v_sun).to_cartesian().transform(A)
)
offset = offset.with_differentials(offset_v)
else:
offset = offset.with_differentials(gcf.galcen_v_sun)
return A, offset
def _check_coord_repr_diff_types(c):
if isinstance(c.data, r.UnitSphericalRepresentation):
raise ConvertError(
"Transforming to/from a Galactocentric frame requires a 3D coordinate, e.g."
" (angle, angle, distance) or (x, y, z)."
)
if "s" in c.data.differentials and isinstance(
c.data.differentials["s"],
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential,
),
):
raise ConvertError(
"Transforming to/from a Galactocentric frame requires a 3D velocity, e.g.,"
" proper motion components and radial velocity."
)
@frame_transform_graph.transform(AffineTransform, ICRS, Galactocentric)
def icrs_to_galactocentric(icrs_coord, galactocentric_frame):
_check_coord_repr_diff_types(icrs_coord)
return get_matrix_vectors(galactocentric_frame)
@frame_transform_graph.transform(AffineTransform, Galactocentric, ICRS)
def galactocentric_to_icrs(galactocentric_coord, icrs_frame):
_check_coord_repr_diff_types(galactocentric_coord)
return get_matrix_vectors(galactocentric_coord, inverse=True)
# Create loopback transformation
frame_transform_graph._add_merged_transform(Galactocentric, ICRS, Galactocentric)
|
4d99dfe58bf6afdbaafba5c5b6e504d0bb98af81ccfff520aa0f6bc316b34a73 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
from astropy.coordinates.transformations import StaticMatrixTransform
from .galactic import Galactic
from .supergalactic import Supergalactic
@frame_transform_graph.transform(StaticMatrixTransform, Galactic, Supergalactic)
def gal_to_supergal():
return (
rotation_matrix(90, "z")
@ rotation_matrix(90 - Supergalactic._nsgp_gal.b.degree, "y")
@ rotation_matrix(Supergalactic._nsgp_gal.l.degree, "z")
)
@frame_transform_graph.transform(StaticMatrixTransform, Supergalactic, Galactic)
def supergal_to_gal():
return matrix_transpose(gal_to_supergal())
|
ea0ce8276c1942b67e3b5593af3b820b18bc9584265936ae183d2b28f4d3a939 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates import earth_orientation as earth
from astropy.coordinates.attributes import TimeAttribute
from astropy.coordinates.baseframe import base_doc, frame_transform_graph
from astropy.coordinates.transformations import DynamicMatrixTransform
from astropy.utils.decorators import format_doc
from .baseradec import BaseRADecFrame, doc_components
from .utils import EQUINOX_J2000
__all__ = ["FK5"]
doc_footer = """
Other parameters
----------------
equinox : `~astropy.time.Time`
The equinox of this frame.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class FK5(BaseRADecFrame):
"""
A coordinate or frame in the FK5 system.
Note that this is a barycentric version of FK5 - that is, the origin for
this frame is the Solar System Barycenter, *not* the Earth geocenter.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
@staticmethod
def _precession_matrix(oldequinox, newequinox):
"""
Compute and return the precession matrix for FK5 based on Capitaine et
al. 2003/IAU2006. Used inside some of the transformation functions.
Parameters
----------
oldequinox : `~astropy.time.Time`
The equinox to precess from.
newequinox : `~astropy.time.Time`
The equinox to precess to.
Returns
-------
newcoord : array
The precession matrix to transform to the new equinox
"""
return earth.precession_matrix_Capitaine(oldequinox, newequinox)
# This is the "self-transform". Defined at module level because the decorator
# needs a reference to the FK5 class
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK5)
def fk5_to_fk5(fk5coord1, fk5frame2):
return fk5coord1._precession_matrix(fk5coord1.equinox, fk5frame2.equinox)
|
64ae3ef51cd2bcf3b9c7b6a2d04d5c079f291bd29b07be387604577116aa5e5d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting to/from ITRS, TEME, GCRS, and CIRS.
These are distinct from the ICRS and AltAz functions because they are just
rotations without aberration corrections or offsets.
"""
import erfa
import numpy as np
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose
from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference
from .cirs import CIRS
from .equatorial import TEME, TETE
from .gcrs import GCRS, PrecessedGeocentric
from .icrs import ICRS
from .itrs import ITRS
from .utils import get_jd12, get_polar_motion
# # first define helper functions
def teme_to_itrs_mat(time):
# Sidereal time, rotates from ITRS to mean equinox
# Use 1982 model for consistency with Vallado et al (2006)
# http://www.celestrak.com/publications/aiaa/2006-6753/AIAA-2006-6753.pdf
gst = erfa.gmst82(*get_jd12(time, "ut1"))
# Polar Motion
# Do not include TIO locator s' because it is not used in Vallado 2006
xp, yp = get_polar_motion(time)
pmmat = erfa.pom00(xp, yp, 0)
# rotation matrix
# c2tcio expects a GCRS->CIRS matrix as it's first argument.
# Here, we just set that to an I-matrix, because we're already
# in TEME and the difference between TEME and CIRS is just the
# rotation by the sidereal time rather than the Earth Rotation Angle
return erfa.c2tcio(np.eye(3), gst, pmmat)
def gcrs_to_cirs_mat(time):
# celestial-to-intermediate matrix
return erfa.c2i06a(*get_jd12(time, "tt"))
def cirs_to_itrs_mat(time):
# compute the polar motion p-matrix
xp, yp = get_polar_motion(time)
sp = erfa.sp00(*get_jd12(time, "tt"))
pmmat = erfa.pom00(xp, yp, sp)
# now determine the Earth Rotation Angle for the input obstime
# era00 accepts UT1, so we convert if need be
era = erfa.era00(*get_jd12(time, "ut1"))
# c2tcio expects a GCRS->CIRS matrix, but we just set that to an I-matrix
# because we're already in CIRS
return erfa.c2tcio(np.eye(3), era, pmmat)
def tete_to_itrs_mat(time, rbpn=None):
"""Compute the polar motion p-matrix at the given time.
If the nutation-precession matrix is already known, it should be passed in,
as this is by far the most expensive calculation.
"""
xp, yp = get_polar_motion(time)
sp = erfa.sp00(*get_jd12(time, "tt"))
pmmat = erfa.pom00(xp, yp, sp)
# now determine the greenwich apparent sidereal time for the input obstime
# we use the 2006A model for consistency with RBPN matrix use in GCRS <-> TETE
ujd1, ujd2 = get_jd12(time, "ut1")
jd1, jd2 = get_jd12(time, "tt")
if rbpn is None:
# erfa.gst06a calls pnm06a to calculate rbpn and then gst06. Use it in
# favour of getting rbpn with erfa.pnm06a to avoid a possibly large array.
gast = erfa.gst06a(ujd1, ujd2, jd1, jd2)
else:
gast = erfa.gst06(ujd1, ujd2, jd1, jd2, rbpn)
# c2tcio expects a GCRS->CIRS matrix, but we just set that to an I-matrix
# because we're already in CIRS equivalent frame
return erfa.c2tcio(np.eye(3), gast, pmmat)
def gcrs_precession_mat(equinox):
gamb, phib, psib, epsa = erfa.pfw06(*get_jd12(equinox, "tt"))
return erfa.fw2m(gamb, phib, psib, epsa)
def get_location_gcrs(location, obstime, ref_to_itrs, gcrs_to_ref):
"""Create a GCRS frame at the location and obstime.
The reference frame z axis must point to the Celestial Intermediate Pole
(as is the case for CIRS and TETE).
This function is here to avoid location.get_gcrs(obstime), which would
recalculate matrices that are already available below (and return a GCRS
coordinate, rather than a frame with obsgeoloc and obsgeovel). Instead,
it uses the private method that allows passing in the matrices.
"""
obsgeoloc, obsgeovel = location._get_gcrs_posvel(obstime, ref_to_itrs, gcrs_to_ref)
return GCRS(obstime=obstime, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)
# now the actual transforms
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, TETE)
def gcrs_to_tete(gcrs_coo, tete_frame):
# Classical NPB matrix, IAU 2006/2000A
# (same as in builtin_frames.utils.get_cip).
rbpn = erfa.pnm06a(*get_jd12(tete_frame.obstime, "tt"))
# Get GCRS coordinates for the target observer location and time.
loc_gcrs = get_location_gcrs(
tete_frame.location,
tete_frame.obstime,
tete_to_itrs_mat(tete_frame.obstime, rbpn=rbpn),
rbpn,
)
gcrs_coo2 = gcrs_coo.transform_to(loc_gcrs)
# Now we are relative to the correct observer, do the transform to TETE.
# These rotations are defined at the geocenter, but can be applied to
# topocentric positions as well, assuming rigid Earth. See p57 of
# https://www.usno.navy.mil/USNO/astronomical-applications/publications/Circular_179.pdf
crepr = gcrs_coo2.cartesian.transform(rbpn)
return tete_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, TETE, GCRS)
def tete_to_gcrs(tete_coo, gcrs_frame):
# Compute the pn matrix, and then multiply by its transpose.
rbpn = erfa.pnm06a(*get_jd12(tete_coo.obstime, "tt"))
newrepr = tete_coo.cartesian.transform(matrix_transpose(rbpn))
# We now have a GCRS vector for the input location and obstime.
# Turn it into a GCRS frame instance.
loc_gcrs = get_location_gcrs(
tete_coo.location,
tete_coo.obstime,
tete_to_itrs_mat(tete_coo.obstime, rbpn=rbpn),
rbpn,
)
gcrs = loc_gcrs.realize_frame(newrepr)
# Finally, do any needed offsets (no-op if same obstime and location)
return gcrs.transform_to(gcrs_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, TETE, ITRS)
def tete_to_itrs(tete_coo, itrs_frame):
# first get us to TETE at the target obstime, and location (no-op if same)
tete_coo2 = tete_coo.transform_to(
TETE(obstime=itrs_frame.obstime, location=itrs_frame.location)
)
# now get the pmatrix
pmat = tete_to_itrs_mat(itrs_frame.obstime)
crepr = tete_coo2.cartesian.transform(pmat)
return itrs_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, TETE)
def itrs_to_tete(itrs_coo, tete_frame):
# compute the pmatrix, and then multiply by its transpose
pmat = tete_to_itrs_mat(itrs_coo.obstime)
newrepr = itrs_coo.cartesian.transform(matrix_transpose(pmat))
tete = TETE(newrepr, obstime=itrs_coo.obstime, location=itrs_coo.location)
# now do any needed offsets (no-op if same obstime and location)
return tete.transform_to(tete_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, CIRS)
def gcrs_to_cirs(gcrs_coo, cirs_frame):
# first get the pmatrix
pmat = gcrs_to_cirs_mat(cirs_frame.obstime)
# Get GCRS coordinates for the target observer location and time.
loc_gcrs = get_location_gcrs(
cirs_frame.location,
cirs_frame.obstime,
cirs_to_itrs_mat(cirs_frame.obstime),
pmat,
)
gcrs_coo2 = gcrs_coo.transform_to(loc_gcrs)
# Now we are relative to the correct observer, do the transform to CIRS.
crepr = gcrs_coo2.cartesian.transform(pmat)
return cirs_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, GCRS)
def cirs_to_gcrs(cirs_coo, gcrs_frame):
# Compute the pmatrix, and then multiply by its transpose,
pmat = gcrs_to_cirs_mat(cirs_coo.obstime)
newrepr = cirs_coo.cartesian.transform(matrix_transpose(pmat))
# We now have a GCRS vector for the input location and obstime.
# Turn it into a GCRS frame instance.
loc_gcrs = get_location_gcrs(
cirs_coo.location, cirs_coo.obstime, cirs_to_itrs_mat(cirs_coo.obstime), pmat
)
gcrs = loc_gcrs.realize_frame(newrepr)
# Finally, do any needed offsets (no-op if same obstime and location)
return gcrs.transform_to(gcrs_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ITRS)
def cirs_to_itrs(cirs_coo, itrs_frame):
# first get us to CIRS at the target obstime, and location (no-op if same)
cirs_coo2 = cirs_coo.transform_to(
CIRS(obstime=itrs_frame.obstime, location=itrs_frame.location)
)
# now get the pmatrix
pmat = cirs_to_itrs_mat(itrs_frame.obstime)
crepr = cirs_coo2.cartesian.transform(pmat)
return itrs_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, CIRS)
def itrs_to_cirs(itrs_coo, cirs_frame):
# compute the pmatrix, and then multiply by its transpose
pmat = cirs_to_itrs_mat(itrs_coo.obstime)
newrepr = itrs_coo.cartesian.transform(matrix_transpose(pmat))
cirs = CIRS(newrepr, obstime=itrs_coo.obstime, location=itrs_coo.location)
# now do any needed offsets (no-op if same obstime and location)
return cirs.transform_to(cirs_frame)
# TODO: implement GCRS<->CIRS if there's call for it. The thing that's awkward
# is that they both have obstimes, so an extra set of transformations are necessary.
# so unless there's a specific need for that, better to just have it go through the above
# two steps anyway
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference, GCRS, PrecessedGeocentric
)
def gcrs_to_precessedgeo(from_coo, to_frame):
# first get us to GCRS with the right attributes (might be a no-op)
gcrs_coo = from_coo.transform_to(
GCRS(
obstime=to_frame.obstime,
obsgeoloc=to_frame.obsgeoloc,
obsgeovel=to_frame.obsgeovel,
)
)
# now precess to the requested equinox
pmat = gcrs_precession_mat(to_frame.equinox)
crepr = gcrs_coo.cartesian.transform(pmat)
return to_frame.realize_frame(crepr)
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference, PrecessedGeocentric, GCRS
)
def precessedgeo_to_gcrs(from_coo, to_frame):
# first un-precess
pmat = gcrs_precession_mat(from_coo.equinox)
crepr = from_coo.cartesian.transform(matrix_transpose(pmat))
gcrs_coo = GCRS(
crepr,
obstime=from_coo.obstime,
obsgeoloc=from_coo.obsgeoloc,
obsgeovel=from_coo.obsgeovel,
)
# then move to the GCRS that's actually desired
return gcrs_coo.transform_to(to_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, TEME, ITRS)
def teme_to_itrs(teme_coo, itrs_frame):
# use the pmatrix to transform to ITRS in the source obstime
pmat = teme_to_itrs_mat(teme_coo.obstime)
crepr = teme_coo.cartesian.transform(pmat)
itrs = ITRS(crepr, obstime=teme_coo.obstime)
# transform the ITRS coordinate to the target obstime
return itrs.transform_to(itrs_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, TEME)
def itrs_to_teme(itrs_coo, teme_frame):
# transform the ITRS coordinate to the target obstime
itrs_coo2 = itrs_coo.transform_to(ITRS(obstime=teme_frame.obstime))
# compute the pmatrix, and then multiply by its transpose
pmat = teme_to_itrs_mat(teme_frame.obstime)
newrepr = itrs_coo2.cartesian.transform(matrix_transpose(pmat))
return teme_frame.realize_frame(newrepr)
# Create loopback transformations
frame_transform_graph._add_merged_transform(ITRS, CIRS, ITRS)
frame_transform_graph._add_merged_transform(
PrecessedGeocentric, GCRS, PrecessedGeocentric
)
frame_transform_graph._add_merged_transform(TEME, ITRS, TEME)
frame_transform_graph._add_merged_transform(TETE, ICRS, TETE)
|
5df716a5c6a52c6b0d5145c73f6377ca842a612d03a9e6db52765d6eb742f5ad | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.attributes import TimeAttribute
from astropy.coordinates.baseframe import base_doc
from astropy.utils.decorators import format_doc
from .baseradec import BaseRADecFrame, doc_components
from .utils import DEFAULT_OBSTIME
__all__ = ["HCRS"]
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position of the Sun.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class HCRS(BaseRADecFrame):
"""
A coordinate or frame in a Heliocentric system, with axes aligned to ICRS.
The ICRS has an origin at the Barycenter and axes which are fixed with
respect to space.
This coordinate system is distinct from ICRS mainly in that it is relative
to the Sun's center-of-mass rather than the solar system Barycenter.
In principle, therefore, this frame should include the effects of
aberration (unlike ICRS), but this is not done, since they are very small,
of the order of 8 milli-arcseconds.
For more background on the ICRS and related coordinate transformations, see
the references provided in the :ref:`astropy:astropy-coordinates-seealso`
section of the documentation.
The frame attributes are listed under **Other Parameters**.
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
# Transformations are defined in icrs_circ_transforms.py
|
ce3408416a1a9592f043023bcaa7cff83bfac4de8a256e42e5a8b3b4c4e45ab7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates import earth_orientation as earth
from astropy.coordinates.attributes import TimeAttribute
from astropy.coordinates.baseframe import base_doc, frame_transform_graph
from astropy.coordinates.representation import (
CartesianRepresentation,
UnitSphericalRepresentation,
)
from astropy.coordinates.transformations import (
DynamicMatrixTransform,
FunctionTransformWithFiniteDifference,
)
from astropy.utils.decorators import format_doc
from .baseradec import BaseRADecFrame, doc_components
from .utils import EQUINOX_B1950
__all__ = ["FK4", "FK4NoETerms"]
doc_footer_fk4 = """
Other parameters
----------------
equinox : `~astropy.time.Time`
The equinox of this frame.
obstime : `~astropy.time.Time`
The time this frame was observed. If ``None``, will be the same as
``equinox``.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer_fk4)
class FK4(BaseRADecFrame):
"""
A coordinate or frame in the FK4 system.
Note that this is a barycentric version of FK4 - that is, the origin for
this frame is the Solar System Barycenter, *not* the Earth geocenter.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_B1950)
obstime = TimeAttribute(default=None, secondary_attribute="equinox")
# the "self" transform
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, FK4, FK4)
def fk4_to_fk4(fk4coord1, fk4frame2):
# deceptively complicated: need to transform to No E-terms FK4, precess, and
# then come back, because precession is non-trivial with E-terms
fnoe_w_eqx1 = fk4coord1.transform_to(FK4NoETerms(equinox=fk4coord1.equinox))
fnoe_w_eqx2 = fnoe_w_eqx1.transform_to(FK4NoETerms(equinox=fk4frame2.equinox))
return fnoe_w_eqx2.transform_to(fk4frame2)
@format_doc(base_doc, components=doc_components, footer=doc_footer_fk4)
class FK4NoETerms(BaseRADecFrame):
"""
A coordinate or frame in the FK4 system, but with the E-terms of aberration
removed.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_B1950)
obstime = TimeAttribute(default=None, secondary_attribute="equinox")
@staticmethod
def _precession_matrix(oldequinox, newequinox):
"""
Compute and return the precession matrix for FK4 using Newcomb's method.
Used inside some of the transformation functions.
Parameters
----------
oldequinox : `~astropy.time.Time`
The equinox to precess from.
newequinox : `~astropy.time.Time`
The equinox to precess to.
Returns
-------
newcoord : array
The precession matrix to transform to the new equinox
"""
return earth._precession_matrix_besselian(oldequinox.byear, newequinox.byear)
# the "self" transform
@frame_transform_graph.transform(DynamicMatrixTransform, FK4NoETerms, FK4NoETerms)
def fk4noe_to_fk4noe(fk4necoord1, fk4neframe2):
return fk4necoord1._precession_matrix(fk4necoord1.equinox, fk4neframe2.equinox)
# FK4-NO-E to/from FK4 ----------------------------->
# Unlike other frames, this module include *two* frame classes for FK4
# coordinates - one including the E-terms of aberration (FK4), and
# one not including them (FK4NoETerms). The following functions
# implement the transformation between these two.
def fk4_e_terms(equinox):
"""
Return the e-terms of aberration vector
Parameters
----------
equinox : Time object
The equinox for which to compute the e-terms
"""
# Constant of aberration at J2000; from Explanatory Supplement to the
# Astronomical Almanac (Seidelmann, 2005).
k = 0.0056932 # in degrees (v_earth/c ~ 1e-4 rad ~ 0.0057 deg)
k = np.radians(k)
# Eccentricity of the Earth's orbit
e = earth.eccentricity(equinox.jd)
# Mean longitude of perigee of the solar orbit
g = earth.mean_lon_of_perigee(equinox.jd)
g = np.radians(g)
# Obliquity of the ecliptic
o = earth.obliquity(equinox.jd, algorithm=1980)
o = np.radians(o)
return (
e * k * np.sin(g),
-e * k * np.cos(g) * np.cos(o),
-e * k * np.cos(g) * np.sin(o),
)
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference, FK4, FK4NoETerms
)
def fk4_to_fk4_no_e(fk4coord, fk4noeframe):
# Extract cartesian vector
rep = fk4coord.cartesian
# Find distance (for re-normalization)
d_orig = rep.norm()
rep /= d_orig
# Apply E-terms of aberration. Note that this depends on the equinox (not
# the observing time/epoch) of the coordinates. See issue #1496 for a
# discussion of this.
eterms_a = CartesianRepresentation(
u.Quantity(fk4_e_terms(fk4coord.equinox), u.dimensionless_unscaled, copy=False),
copy=False,
)
rep = rep - eterms_a + eterms_a.dot(rep) * rep
# Find new distance (for re-normalization)
d_new = rep.norm()
# Renormalize
rep *= d_orig / d_new
# now re-cast into an appropriate Representation, and precess if need be
if isinstance(fk4coord.data, UnitSphericalRepresentation):
rep = rep.represent_as(UnitSphericalRepresentation)
# if no obstime was given in the new frame, use the old one for consistency
newobstime = (
fk4coord._obstime if fk4noeframe._obstime is None else fk4noeframe._obstime
)
fk4noe = FK4NoETerms(rep, equinox=fk4coord.equinox, obstime=newobstime)
if fk4coord.equinox != fk4noeframe.equinox:
# precession
fk4noe = fk4noe.transform_to(fk4noeframe)
return fk4noe
@frame_transform_graph.transform(
FunctionTransformWithFiniteDifference, FK4NoETerms, FK4
)
def fk4_no_e_to_fk4(fk4noecoord, fk4frame):
# first precess, if necessary
if fk4noecoord.equinox != fk4frame.equinox:
fk4noe_w_fk4equinox = FK4NoETerms(
equinox=fk4frame.equinox, obstime=fk4noecoord.obstime
)
fk4noecoord = fk4noecoord.transform_to(fk4noe_w_fk4equinox)
# Extract cartesian vector
rep = fk4noecoord.cartesian
# Find distance (for re-normalization)
d_orig = rep.norm()
rep /= d_orig
# Apply E-terms of aberration. Note that this depends on the equinox (not
# the observing time/epoch) of the coordinates. See issue #1496 for a
# discussion of this.
eterms_a = CartesianRepresentation(
u.Quantity(
fk4_e_terms(fk4noecoord.equinox), u.dimensionless_unscaled, copy=False
),
copy=False,
)
rep0 = rep.copy()
for _ in range(10):
rep = (eterms_a + rep0) / (1.0 + eterms_a.dot(rep))
# Find new distance (for re-normalization)
d_new = rep.norm()
# Renormalize
rep *= d_orig / d_new
# now re-cast into an appropriate Representation, and precess if need be
if isinstance(fk4noecoord.data, UnitSphericalRepresentation):
rep = rep.represent_as(UnitSphericalRepresentation)
return fk4frame.realize_frame(rep)
|
9ca3d87a7ff2711a08bfe105d852e6d53a990f0352b511a671f41350c206071f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import (
BaseCoordinateFrame,
RepresentationMapping,
base_doc,
)
from astropy.utils.decorators import format_doc
__all__ = ["BaseRADecFrame"]
doc_components = """
ra : `~astropy.coordinates.Angle`, optional, keyword-only
The RA for this object (``dec`` must also be given and ``representation``
must be None).
dec : `~astropy.coordinates.Angle`, optional, keyword-only
The Declination for this object (``ra`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
(``representation`` must be None).
pm_ra_cosdec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Right Ascension (including the ``cos(dec)`` factor)
for this object (``pm_dec`` must also be given).
pm_dec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(base_doc, components=doc_components, footer="")
class BaseRADecFrame(BaseCoordinateFrame):
"""
A base class that defines default representation info for frames that
represent longitude and latitude as Right Ascension and Declination
following typical "equatorial" conventions.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "ra"),
RepresentationMapping("lat", "dec"),
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
|
edb92ccb402cc769157fd427ced24f0fa997910f7612dc64b064b43be4ef05d7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains the coordinate frames implemented by astropy.
Users shouldn't use this module directly, but rather import from the
`astropy.coordinates` module. While it is likely to exist for the long-term,
the existence of this package and details of its organization should be
considered an implementation detail, and is not guaranteed to hold for future
versions of astropy.
Notes
-----
The builtin frame classes are all imported automatically into this package's
namespace, so there's no need to access the sub-modules directly.
To implement a new frame in Astropy, a developer should add the frame as a new
module in this package. Any "self" transformations (i.e., those that transform
from one frame to another frame of the same class) should be included in that
module. Transformation functions connecting the new frame to other frames
should be in a separate module, which should be imported in this package's
``__init__.py`` to ensure the transformations are hooked up when this package is
imported. Placing the transformation functions in separate modules avoids
circular dependencies, because they need references to the frame classes.
"""
from astropy.coordinates.baseframe import frame_transform_graph
from .altaz import AltAz
from .baseradec import BaseRADecFrame
from .cirs import CIRS
from .ecliptic import (
BarycentricMeanEcliptic,
BarycentricTrueEcliptic,
BaseEclipticFrame,
CustomBarycentricEcliptic,
GeocentricMeanEcliptic,
GeocentricTrueEcliptic,
HeliocentricEclipticIAU76,
HeliocentricMeanEcliptic,
HeliocentricTrueEcliptic,
)
from .equatorial import TEME, TETE
from .fk4 import FK4, FK4NoETerms
from .fk5 import FK5
from .galactic import Galactic
from .galactocentric import Galactocentric, galactocentric_frame_defaults
from .gcrs import GCRS, PrecessedGeocentric
from .hadec import HADec
from .hcrs import HCRS
from .icrs import ICRS
from .itrs import ITRS
from .skyoffset import SkyOffsetFrame
from .supergalactic import Supergalactic
# isort: split
# need to import transformations so that they get registered in the graph
from . import (
cirs_observed_transforms,
fk4_fk5_transforms,
galactic_transforms,
icrs_cirs_transforms,
icrs_fk5_transforms,
icrs_observed_transforms,
intermediate_rotation_transforms,
itrs_observed_transforms,
supergalactic_transforms,
)
# isort: split
from . import ecliptic_transforms
# isort: split
# Import this after importing other frames, since this requires various
# transformations to set up the LSR frames
from .lsr import LSR, LSRD, LSRK, GalacticLSR
# we define an __all__ because otherwise the transformation modules
# get included. Note that the order here determines the order in the
# documentation of the built-in frames (see make_transform_graphs_docs).
__all__ = [
"ICRS",
"FK5",
"FK4",
"FK4NoETerms",
"Galactic",
"Galactocentric",
"Supergalactic",
"AltAz",
"HADec",
"GCRS",
"CIRS",
"ITRS",
"HCRS",
"TEME",
"TETE",
"PrecessedGeocentric",
"GeocentricMeanEcliptic",
"BarycentricMeanEcliptic",
"HeliocentricMeanEcliptic",
"GeocentricTrueEcliptic",
"BarycentricTrueEcliptic",
"HeliocentricTrueEcliptic",
"HeliocentricEclipticIAU76",
"CustomBarycentricEcliptic",
"LSR",
"LSRK",
"LSRD",
"GalacticLSR",
"SkyOffsetFrame",
"BaseEclipticFrame",
"BaseRADecFrame",
"galactocentric_frame_defaults",
"make_transform_graph_docs",
]
def _get_doc_header(cls):
"""Get the first line of a docstring.
Skips possible empty first lines, and then combine following text until
the first period or a fully empty line.
"""
out = []
for line in cls.__doc__.splitlines():
if line:
parts = line.split(".")
out.append(parts[0].strip())
if len(parts) > 1:
break
elif out:
break
return " ".join(out) + "."
def make_transform_graph_docs(transform_graph):
"""
Generates a string that can be used in other docstrings to include a
transformation graph, showing the available transforms and
coordinate systems.
Parameters
----------
transform_graph : `~astropy.coordinates.TransformGraph`
Returns
-------
docstring : str
A string that can be added to the end of a docstring to show the
transform graph.
"""
from textwrap import dedent
coosys = {
(cls := transform_graph.lookup_name(item)).__name__: cls
for item in transform_graph.get_names()
}
# currently, all of the priorities are set to 1, so we don't need to show
# then in the transform graph.
graphstr = transform_graph.to_dot_graph(
addnodes=list(coosys.values()), priorities=False
)
docstr = """
The diagram below shows all of the built in coordinate systems,
their aliases (useful for converting other coordinates to them using
attribute-style access) and the pre-defined transformations between
them. The user is free to override any of these transformations by
defining new transformations between these systems, but the
pre-defined transformations should be sufficient for typical usage.
The color of an edge in the graph (i.e. the transformations between two
frames) is set by the type of transformation; the legend box defines the
mapping from transform class name to color.
.. Wrap the graph in a div with a custom class to allow themeing.
.. container:: frametransformgraph
.. graphviz::
"""
docstr = dedent(docstr) + " " + graphstr.replace("\n", "\n ")
# colors are in dictionary at the bottom of transformations.py
from astropy.coordinates.transformations import trans_to_color
html_list_items = []
for cls, color in trans_to_color.items():
block = f"""
<li style='list-style: none;'>
<p style="font-size: 12px;line-height: 24px;font-weight: normal;color: #848484;padding: 0;margin: 0;">
<b>{cls.__name__}:</b>
<span style="font-size: 24px; color: {color};"><b>➝</b></span>
</p>
</li>
"""
html_list_items.append(block)
nl = "\n"
graph_legend = f"""
.. raw:: html
<ul>
{nl.join(html_list_items)}
</ul>
"""
docstr = docstr + dedent(graph_legend)
# Add table with built-in frame classes.
template = """
* - `~astropy.coordinates.{}`
- {}
"""
table = """
Built-in Frame Classes
^^^^^^^^^^^^^^^^^^^^^^
.. list-table::
:widths: 20 80
""" + "".join(
template.format(name, _get_doc_header(coosys[name]))
for name in __all__
if name in coosys
)
return docstr + dedent(table)
_transform_graph_docs = make_transform_graph_docs(frame_transform_graph)
# Here, we override the module docstring so that sphinx renders the transform
# graph without the developer documentation in the main docstring above.
__doc__ = _transform_graph_docs
|
a6980e91b1ad60301ec1047427564d53c3d12606a290098952c55c13ef155f83 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import (
BaseCoordinateFrame,
RepresentationMapping,
base_doc,
)
from astropy.utils.decorators import format_doc
from .galactic import Galactic
__all__ = ["Supergalactic"]
doc_components = """
sgl : `~astropy.coordinates.Angle`, optional, keyword-only
The supergalactic longitude for this object (``sgb`` must also be given and
``representation`` must be None).
sgb : `~astropy.coordinates.Angle`, optional, keyword-only
The supergalactic latitude for this object (``sgl`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The Distance for this object along the line-of-sight.
pm_sgl_cossgb : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Right Ascension for this object (``pm_sgb`` must
also be given).
pm_sgb : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Declination for this object (``pm_sgl_cossgb`` must
also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(base_doc, components=doc_components, footer="")
class Supergalactic(BaseCoordinateFrame):
"""
Supergalactic Coordinates
(see Lahav et al. 2000, <https://ui.adsabs.harvard.edu/abs/2000MNRAS.312..166L>,
and references therein).
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "sgl"),
RepresentationMapping("lat", "sgb"),
],
r.CartesianRepresentation: [
RepresentationMapping("x", "sgx"),
RepresentationMapping("y", "sgy"),
RepresentationMapping("z", "sgz"),
],
r.CartesianDifferential: [
RepresentationMapping("d_x", "v_x", u.km / u.s),
RepresentationMapping("d_y", "v_y", u.km / u.s),
RepresentationMapping("d_z", "v_z", u.km / u.s),
],
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
# North supergalactic pole in Galactic coordinates.
# Needed for transformations to/from Galactic coordinates.
_nsgp_gal = Galactic(l=47.37 * u.degree, b=+6.32 * u.degree)
|
b79a6a729fd90867a6dd0cc7b6264196826245e4262c57472f698436eb10db02 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.baseframe import base_doc
from astropy.utils.decorators import format_doc
from .baseradec import BaseRADecFrame, doc_components
__all__ = ["ICRS"]
@format_doc(base_doc, components=doc_components, footer="")
class ICRS(BaseRADecFrame):
"""
A coordinate or frame in the ICRS system.
If you're looking for "J2000" coordinates, and aren't sure if you want to
use this or `~astropy.coordinates.FK5`, you probably want to use ICRS. It's
more well-defined as a catalog coordinate and is an inertial system, and is
very close (within tens of milliarcseconds) to J2000 equatorial.
For more background on the ICRS and related coordinate transformations, see
the references provided in the :ref:`astropy:astropy-coordinates-seealso`
section of the documentation.
"""
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.