response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Checks radial velocity between Earth and M31 | def test_spectral_coord_m31():
"""
Checks radial velocity between Earth and M31
"""
obstime = time.Time("2018-12-13 9:00")
obs = GREENWICH.get_gcrs(obstime)
# Coordinates were obtained from the following then hard-coded to avoid download
# m31 = SkyCoord.from_name('M31')
m31 = SkyCoord(
ra=10.6847 * u.deg,
dec=41.269 * u.deg,
distance=710 * u.kpc,
radial_velocity=-300 * u.km / u.s,
)
spc = SpectralCoord([100, 200, 300] * u.nm, observer=obs, target=m31)
# The velocity should be less than ~300 + 30 + a bit extra in km/s, which
# is the maximum possible relative velocity. We check the exact values
# here (determined from SpectralCoord, so this serves as a test to check
# that this value doesn't change - the value is not a ground truth)
assert_quantity_allclose(spc.radial_velocity, -279.755128 * u.km / u.s)
assert_allclose(spc.redshift, -0.0009327276702120191) |
This tests storing a spectral coordinate with a specific redshift, and then
doing basic rest-to-observed-and-back transformations | def test_shift_to_rest_galaxy():
"""
This tests storing a spectral coordinate with a specific redshift, and then
doing basic rest-to-observed-and-back transformations
"""
z = 5
rest_line_wls = [5007, 6563] * u.AA
observed_spc = SpectralCoord(rest_line_wls * (z + 1), redshift=z)
rest_spc = observed_spc.to_rest()
# alternatively:
# rest_spc = observed_spc.with_observer(observed_spec.target)
# although then it would have to be clearly documented, or the `to_rest`
# implemented in Spectrum1D?
assert_quantity_allclose(rest_spc, rest_line_wls)
# No frames are explicitly defined, so to the user, the observer and
# target are not set.
with pytest.raises(AttributeError):
assert_frame_allclose(rest_spc.observer, rest_spc.target) |
This test mocks up the use case of observing a spectrum of an asteroid
at different times and from different observer locations. | def test_asteroid_velocity_frame_shifts():
"""
This test mocks up the use case of observing a spectrum of an asteroid
at different times and from different observer locations.
"""
time1 = time.Time("2018-12-13 9:00")
dt = 12 * u.hour
time2 = time1 + dt
# make the silly but simplifying assumption that the asteroid is moving along
# the x-axis of GCRS, and makes a 10 earth-radius closest approach
v_ast = [5, 0, 0] * u.km / u.s
x1 = -v_ast[0] * dt / 2
x2 = v_ast[0] * dt / 2
z = 10 * u.Rearth
cdiff = CartesianDifferential(v_ast)
asteroid_loc1 = GCRS(
CartesianRepresentation(x1.to(u.km), 0 * u.km, z.to(u.km), differentials=cdiff),
obstime=time1,
)
asteroid_loc2 = GCRS(
CartesianRepresentation(x2.to(u.km), 0 * u.km, z.to(u.km), differentials=cdiff),
obstime=time2,
)
# assume satellites that are essentially fixed in geostationary orbit on
# opposite sides of the earth
observer1 = GCRS(
CartesianRepresentation([0 * u.km, 35000 * u.km, 0 * u.km]), obstime=time1
)
observer2 = GCRS(
CartesianRepresentation([0 * u.km, -35000 * u.km, 0 * u.km]), obstime=time2
)
wls = np.linspace(4000, 7000, 100) * u.AA
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spec_coord1 = SpectralCoord(wls, observer=observer1, target=asteroid_loc1)
assert spec_coord1.radial_velocity < 0 * u.km / u.s
assert spec_coord1.radial_velocity > -5 * u.km / u.s
with pytest.warns(AstropyUserWarning, match="No velocity defined on frame"):
spec_coord2 = SpectralCoord(wls, observer=observer2, target=asteroid_loc2)
assert spec_coord2.radial_velocity > 0 * u.km / u.s
assert spec_coord2.radial_velocity < 5 * u.km / u.s
# now check the behavior of with_observer_stationary_relative_to: we shift each coord
# into the velocity frame of its *own* target. That would then be a
# spectralcoord that would allow direct physical comparison of the two
# different spec_corrds. There's no way to test that, without
# actual data, though.
# spec_coord2 is redshifted, so we test that it behaves the way "shifting
# to rest frame" should - the as-observed spectral coordinate should become
# the rest frame, so something that starts out red should become bluer
target_sc2 = spec_coord2.with_observer_stationary_relative_to(spec_coord2.target)
assert np.all(target_sc2 < spec_coord2)
# rv/redshift should be 0 since the observer and target velocities should
# be the same
assert_quantity_allclose(
target_sc2.radial_velocity, 0 * u.km / u.s, atol=1e-7 * u.km / u.s
)
# check that the same holds for spec_coord1, but be more specific: it
# should follow the standard redshift formula (which in this case yields
# a blueshift, although the formula is the same as 1+z)
target_sc1 = spec_coord1.with_observer_stationary_relative_to(spec_coord1.target)
assert_quantity_allclose(target_sc1, spec_coord1 / (1 + spec_coord1.redshift)) |
Tests the class-based/OO syntax for creating transforms | def test_transform_classes():
"""
Tests the class-based/OO syntax for creating transforms
"""
def tfun(c, f):
return f.__class__(ra=c.ra, dec=c.dec)
_ = FunctionTransform(tfun, TCoo1, TCoo2, register_graph=frame_transform_graph)
c1 = TCoo1(ra=1 * u.radian, dec=0.5 * u.radian)
c2 = c1.transform_to(TCoo2())
assert_allclose(c2.ra.radian, 1)
assert_allclose(c2.dec.radian, 0.5)
def matfunc(coo, fr):
return [[1, 0, 0], [0, coo.ra.degree, 0], [0, 0, 1]]
trans2 = DynamicMatrixTransform(matfunc, TCoo1, TCoo2)
trans2.register(frame_transform_graph)
c3 = TCoo1(ra=1 * u.deg, dec=2 * u.deg)
c4 = c3.transform_to(TCoo2())
assert_allclose(c4.ra.degree, 1)
assert_allclose(c4.ra.degree, 1)
# be sure to unregister the second one - no need for trans1 because it
# already got unregistered when trans2 was created.
trans2.unregister(frame_transform_graph) |
Tests the decorator syntax for creating transforms | def test_transform_decos():
"""
Tests the decorator syntax for creating transforms
"""
c1 = TCoo1(ra=1 * u.deg, dec=2 * u.deg)
@frame_transform_graph.transform(FunctionTransform, TCoo1, TCoo2)
def trans(coo1, f):
return TCoo2(ra=coo1.ra, dec=coo1.dec * 2)
c2 = c1.transform_to(TCoo2())
assert_allclose(c2.ra.degree, 1)
assert_allclose(c2.dec.degree, 4)
c3 = TCoo1(r.CartesianRepresentation(x=1 * u.pc, y=1 * u.pc, z=2 * u.pc))
@frame_transform_graph.transform(StaticMatrixTransform, TCoo1, TCoo2)
def matrix():
return [[2, 0, 0], [0, 1, 0], [0, 0, 1]]
c4 = c3.transform_to(TCoo2())
assert_allclose(c4.cartesian.x, 2 * u.pc)
assert_allclose(c4.cartesian.y, 1 * u.pc)
assert_allclose(c4.cartesian.z, 2 * u.pc) |
Tests the spherical <-> cartesian transform functions | def test_sphere_cart():
"""
Tests the spherical <-> cartesian transform functions
"""
from astropy.coordinates import cartesian_to_spherical, spherical_to_cartesian
from astropy.utils import NumpyRNGContext
x, y, z = spherical_to_cartesian(1, 0, 0)
assert_allclose(x, 1)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(0, 1, 1)
assert_allclose(x, 0)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4.0 / 5.0))
assert_allclose(x, 3)
assert_allclose(y, 4)
assert_allclose(z, 0)
r, lat, lon = cartesian_to_spherical(0, 1, 0)
assert_allclose(r, 1)
assert_allclose(lat, 0 * u.deg)
assert_allclose(lon, np.pi / 2 * u.rad)
# test round-tripping
with NumpyRNGContext(13579):
x, y, z = np.random.randn(3, 5)
x2, y2, z2 = spherical_to_cartesian(*cartesian_to_spherical(x, y, z))
assert_allclose(x, x2)
assert_allclose(y, y2)
assert_allclose(z, z2) |
This checks that the transformation path prioritization works by
making sure the ICRS -> Gal transformation always goes through FK5
and not FK4. | def test_transform_path_pri():
"""
This checks that the transformation path prioritization works by
making sure the ICRS -> Gal transformation always goes through FK5
and not FK4.
"""
frame_transform_graph.invalidate_cache()
tpath, td = frame_transform_graph.find_shortest_path(ICRS, Galactic)
assert tpath == [ICRS, FK5, Galactic]
assert td == 2
# but direct from FK4 to Galactic should still be possible
tpath, td = frame_transform_graph.find_shortest_path(FK4, Galactic)
assert tpath == [FK4, FK4NoETerms, Galactic]
assert td == 2 |
Checks to make sure observation time is
accounted for at least in FK4 <-> ICRS transformations | def test_obstime():
"""
Checks to make sure observation time is
accounted for at least in FK4 <-> ICRS transformations
"""
b1950 = Time("B1950")
j1975 = Time("J1975")
fk4_50 = FK4(ra=1 * u.deg, dec=2 * u.deg, obstime=b1950)
fk4_75 = FK4(ra=1 * u.deg, dec=2 * u.deg, obstime=j1975)
icrs_50 = fk4_50.transform_to(ICRS())
icrs_75 = fk4_75.transform_to(ICRS())
# now check that the resulting coordinates are *different* - they should be,
# because the obstime is different
assert icrs_50.ra.degree != icrs_75.ra.degree
assert icrs_50.dec.degree != icrs_75.dec.degree |
It was previously possible to define a frame with an attribute with the
same name as a component. We don't want to allow this! | def test_frame_override_component_with_attribute():
"""
It was previously possible to define a frame with an attribute with the
same name as a component. We don't want to allow this!
"""
class BorkedFrame(BaseCoordinateFrame):
ra = Attribute(default=150)
dec = Attribute(default=150)
def trans_func(coo1, f):
pass
trans = FunctionTransform(trans_func, BorkedFrame, ICRS)
with pytest.raises(ValueError) as exc:
trans.register(frame_transform_graph)
assert (
"BorkedFrame" in exc.value.args[0]
and "'ra'" in exc.value.args[0]
and "'dec'" in exc.value.args[0]
) |
Check that combined staticmatrixtransform matrices provide the same
transformation as using an intermediate transformation.
This is somewhat of a regression test for #7706 | def test_static_matrix_combine_paths():
"""
Check that combined staticmatrixtransform matrices provide the same
transformation as using an intermediate transformation.
This is somewhat of a regression test for #7706
"""
class AFrame(BaseCoordinateFrame):
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
t1 = StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "z"), ICRS, AFrame)
t1.register(frame_transform_graph)
t2 = StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "z").T, AFrame, ICRS)
t2.register(frame_transform_graph)
class BFrame(BaseCoordinateFrame):
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
t3 = StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "x"), ICRS, BFrame)
t3.register(frame_transform_graph)
t4 = StaticMatrixTransform(rotation_matrix(30.0 * u.deg, "x").T, BFrame, ICRS)
t4.register(frame_transform_graph)
c = Galactic(123 * u.deg, 45 * u.deg)
c_direct = c.transform_to(BFrame())
c_through_A = c.transform_to(AFrame()).transform_to(BFrame())
c_through_ICRS = c.transform_to(ICRS()).transform_to(BFrame())
assert quantity_allclose(c_direct.lon, c_through_A.lon)
assert quantity_allclose(c_direct.lat, c_through_A.lat)
assert quantity_allclose(c_direct.lon, c_through_ICRS.lon)
assert quantity_allclose(c_direct.lat, c_through_ICRS.lat)
for t_ in [t1, t2, t3, t4]:
t_.unregister(frame_transform_graph) |
Compare the heliocentric correction to the IRAF rvcorrect.
`generate_IRAF_input` function is provided to show how the comparison data
was produced
def generate_IRAF_input(writefn=None):
dt = test_input_time.utc.datetime
coos = input_radecs # `input_radecs` is implemented as pytest fixture
lines = []
for ra, dec in zip(coos.ra, coos.dec):
rastr = Angle(ra).to_string(u.hour, sep=":")
decstr = Angle(dec).to_string(u.deg, sep=":")
lines.append(
f"{dt.year} {dt.month} {dt.day} {dt.hour}:{dt.minute} {rastr} {decstr}"
)
if writefn:
with open(writefn, "w") as f:
for l in lines:
f.write(l)
else:
for l in lines:
print(l)
print("Run IRAF as:
astutil
rvcorrect f=<filename> observatory=Paranal")
| def test_helio_iraf(input_radecs):
"""
Compare the heliocentric correction to the IRAF rvcorrect.
`generate_IRAF_input` function is provided to show how the comparison data
was produced
def generate_IRAF_input(writefn=None):
dt = test_input_time.utc.datetime
coos = input_radecs # `input_radecs` is implemented as pytest fixture
lines = []
for ra, dec in zip(coos.ra, coos.dec):
rastr = Angle(ra).to_string(u.hour, sep=":")
decstr = Angle(dec).to_string(u.deg, sep=":")
lines.append(
f"{dt.year} {dt.month} {dt.day} {dt.hour}:{dt.minute} {rastr} {decstr}"
)
if writefn:
with open(writefn, "w") as f:
for l in lines:
f.write(l)
else:
for l in lines:
print(l)
print("Run IRAF as:\nastutil\nrvcorrect f=<filename> observatory=Paranal")
"""
rvcorr_result = """
# RVCORRECT: Observatory parameters for European Southern Observatory: Paranal
# latitude = -24:37.5
# longitude = 70:24.2
# altitude = 2635
## HJD VOBS VHELIO VLSR VDIURNAL VLUNAR VANNUAL VSOLAR
2457244.50120 0.00 -10.36 -20.35 -0.034 -0.001 -10.325 -9.993
2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656
2457244.50278 0.00 -2.29 -11.75 0.115 0.004 -2.413 -9.459
2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656
2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888
2457244.50317 0.00 -17.19 -17.44 0.078 0.001 -17.269 -0.253
2457244.50348 0.00 2.35 -6.21 0.192 0.006 2.156 -8.560
2457244.49959 0.00 2.13 -15.06 -0.078 -0.000 2.211 -17.194
2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888
2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721
2457244.50186 0.00 -24.47 -22.16 -0.038 -0.004 -24.433 2.313
2457244.50470 0.00 -11.11 -8.57 0.221 0.005 -11.332 2.534
2457244.50402 0.00 6.90 -0.38 0.259 0.008 6.629 -7.277
2457244.50051 0.00 11.53 -5.78 0.038 0.004 11.489 -17.311
2457244.49768 0.00 -1.84 -19.37 -0.221 -0.004 -1.612 -17.533
2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721
2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209
2457244.50109 0.00 -27.69 -22.90 -0.096 -0.006 -27.584 4.785
2457244.50457 0.00 -17.00 -9.30 0.196 0.003 -17.201 7.704
2457244.50532 0.00 2.62 2.97 0.340 0.009 2.276 0.349
2457244.50277 0.00 16.42 4.67 0.228 0.009 16.178 -11.741
2457244.49884 0.00 13.98 -5.48 -0.056 0.002 14.039 -19.463
2457244.49649 0.00 -2.84 -19.84 -0.297 -0.007 -2.533 -17.000
2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209
2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419
2457244.50025 0.00 -29.30 -22.47 -0.149 -0.008 -29.146 6.831
2457244.50398 0.00 -21.55 -9.88 0.146 0.001 -21.700 11.670
2457244.50577 0.00 -3.26 4.00 0.356 0.009 -3.623 7.263
2457244.50456 0.00 14.87 11.06 0.357 0.011 14.497 -3.808
2457244.50106 0.00 22.20 7.14 0.149 0.008 22.045 -15.058
2457244.49732 0.00 14.45 -5.44 -0.146 -0.001 14.600 -19.897
2457244.49554 0.00 -3.84 -19.33 -0.356 -0.008 -3.478 -15.491
2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419
2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432
2457244.49942 0.00 -29.36 -20.83 -0.193 -0.009 -29.157 8.527
2457244.50312 0.00 -24.26 -9.75 0.088 -0.001 -24.348 14.511
2457244.50552 0.00 -8.66 4.06 0.327 0.007 -8.996 12.721
2457244.50549 0.00 10.14 14.13 0.413 0.012 9.715 3.994
2457244.50305 0.00 23.35 15.76 0.306 0.011 23.031 -7.586
2457244.49933 0.00 24.78 8.18 0.056 0.006 24.721 -16.601
2457244.49609 0.00 13.77 -5.06 -0.221 -0.003 13.994 -18.832
2457244.49483 0.00 -4.53 -17.77 -0.394 -0.010 -4.131 -13.237
2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432
2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335
2457244.49907 0.00 -28.17 -17.30 -0.197 -0.009 -27.966 10.874
2457244.50285 0.00 -22.96 -5.96 0.090 -0.001 -23.048 16.995
2457244.50531 0.00 -7.00 8.16 0.335 0.007 -7.345 15.164
2457244.50528 0.00 12.23 18.47 0.423 0.012 11.795 6.238
2457244.50278 0.00 25.74 20.13 0.313 0.012 25.416 -5.607
2457244.49898 0.00 27.21 12.38 0.057 0.006 27.144 -14.829
2457244.49566 0.00 15.94 -1.17 -0.226 -0.003 16.172 -17.111
2457244.49437 0.00 -2.78 -14.17 -0.403 -0.010 -2.368 -11.387
2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335
2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776
2457244.49875 0.00 -25.73 -12.99 -0.193 -0.009 -25.525 12.734
2457244.50246 0.00 -20.63 -1.91 0.088 -0.001 -20.716 18.719
2457244.50485 0.00 -5.03 11.90 0.327 0.007 -5.365 16.928
2457244.50482 0.00 13.77 21.97 0.413 0.012 13.347 8.202
2457244.50238 0.00 26.98 23.60 0.306 0.011 26.663 -3.378
2457244.49867 0.00 28.41 16.02 0.056 0.005 28.353 -12.393
2457244.49542 0.00 17.40 2.78 -0.221 -0.003 17.625 -14.625
2457244.49416 0.00 -0.90 -9.93 -0.394 -0.010 -0.499 -9.029
2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776
2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808
2457244.49894 0.00 -22.20 -7.14 -0.149 -0.008 -22.045 15.058
2457244.50268 0.00 -14.45 5.44 0.146 0.001 -14.600 19.897
2457244.50446 0.00 3.84 19.33 0.356 0.008 3.478 15.491
2457244.50325 0.00 21.97 26.39 0.357 0.011 21.598 4.419
2457244.49975 0.00 29.30 22.47 0.149 0.008 29.146 -6.831
2457244.49602 0.00 21.55 9.88 -0.146 -0.001 21.700 -11.670
2457244.49423 0.00 3.26 -4.00 -0.356 -0.009 3.623 -7.263
2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808
2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670
2457244.49921 0.00 -17.43 -0.77 -0.096 -0.006 -17.333 16.664
2457244.50269 0.00 -6.75 12.83 0.196 0.003 -6.949 19.583
2457244.50344 0.00 12.88 25.10 0.340 0.009 12.527 12.227
2457244.50089 0.00 26.67 26.80 0.228 0.009 26.430 0.137
2457244.49696 0.00 24.24 16.65 -0.056 0.002 24.290 -7.584
2457244.49461 0.00 7.42 2.29 -0.297 -0.007 7.719 -5.122
2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670
2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277
2457244.49949 0.00 -11.53 5.78 -0.038 -0.004 -11.489 17.311
2457244.50232 0.00 1.84 19.37 0.221 0.004 1.612 17.533
2457244.50165 0.00 19.84 27.56 0.259 0.008 19.573 7.721
2457244.49814 0.00 24.47 22.16 0.038 0.004 24.433 -2.313
2457244.49530 0.00 11.11 8.57 -0.221 -0.005 11.332 -2.534
2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277
2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560
2457244.50041 0.00 -2.13 15.06 0.078 0.000 -2.211 17.194
2457244.50071 0.00 17.41 26.30 0.192 0.006 17.214 8.888
2457244.49683 0.00 17.19 17.44 -0.078 -0.001 17.269 0.253
2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560
2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459
2457244.49975 0.00 14.20 23.86 0.115 0.004 14.085 9.656
2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459
2457244.49805 0.00 6.84 16.77 -0.034 -0.001 6.874 9.935
"""
vhs_iraf = []
for line in rvcorr_result.strip().split("\n")[5:]:
vhs_iraf.append(float(line.split()[2]))
vhs_iraf = vhs_iraf * u.km / u.s
targets = SkyCoord(input_radecs, obstime=test_input_time, location=test_input_loc)
vhs_astropy = targets.radial_velocity_correction("heliocentric")
assert_quantity_allclose(vhs_astropy, vhs_iraf, atol=150 * u.m / u.s) |
Make sure that when we include the proper motion and radial velocity of
a SkyCoord, our velocity corrections remain close to TEMPO2.
We check that tau Ceti is within 5mm/s | def test_regression_10094():
"""
Make sure that when we include the proper motion and radial velocity of
a SkyCoord, our velocity corrections remain close to TEMPO2.
We check that tau Ceti is within 5mm/s
"""
# Wright & Eastman (2014) Table2
# Corrections for tau Ceti
wright_table = Table.read(
get_pkg_data_filename("coordinates/wright_eastmann_2014_tau_ceti.fits")
)
reduced_jds = wright_table["JD-2400000"]
tempo2 = wright_table["TEMPO2"]
barycorr = wright_table["BARYCORR"]
# tau Ceti Hipparchos data
tauCet = SkyCoord(
"01 44 05.1275 -15 56 22.4006",
unit=(u.hour, u.deg),
pm_ra_cosdec=-1721.05 * u.mas / u.yr,
pm_dec=854.16 * u.mas / u.yr,
distance=Distance(parallax=273.96 * u.mas),
radial_velocity=-16.597 * u.km / u.s,
obstime=Time(48348.5625, format="mjd"),
)
# CTIO location as used in Wright & Eastmann
xyz = u.Quantity([1814985.3, -5213916.8, -3187738.1], u.m)
obs = EarthLocation(*xyz)
times = Time(2400000, reduced_jds, format="jd")
tempo2 = tempo2 * speed_of_light
barycorr = barycorr * speed_of_light
astropy = tauCet.radial_velocity_correction(location=obs, obstime=times)
assert_quantity_allclose(astropy, tempo2, atol=5 * u.mm / u.s)
assert_quantity_allclose(astropy, barycorr, atol=5 * u.mm / u.s) |
Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK4
conversion, with arbitrary equinoxes and epoch of observation. | def ref_fk4_no_e_fk4(fnout="fk4_no_e_fk4.csv"):
"""
Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK4
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the FK4
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to FK4.
ra = np.random.uniform(0.0, 360.0, N)
dec = np.degrees(np.arcsin(np.random.uniform(-1.0, 1.0, N)))
# Generate random observation epoch and equinoxes
obstime = [f"B{x:7.2f}" for x in np.random.uniform(1950.0, 2000.0, N)]
ra_fk4ne, dec_fk4ne = [], []
ra_fk4, dec_fk4 = [], []
for i in range(N):
# Set up frames for AST
frame_fk4ne = Ast.SkyFrame(f"System=FK4-NO-E,Epoch={obstime[i]},Equinox=B1950")
frame_fk4 = Ast.SkyFrame(f"System=FK4,Epoch={obstime[i]},Equinox=B1950")
# FK4 to FK4 (no E-terms)
frameset = frame_fk4.convert(frame_fk4ne)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk4ne.append(coords[0, 0])
dec_fk4ne.append(coords[1, 0])
# FK4 (no E-terms) to FK4
frameset = frame_fk4ne.convert(frame_fk4)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk4.append(coords[0, 0])
dec_fk4.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name="obstime", data=obstime))
t.add_column(Column(name="ra_in", data=ra))
t.add_column(Column(name="dec_in", data=dec))
t.add_column(Column(name="ra_fk4ne", data=ra_fk4ne))
t.add_column(Column(name="dec_fk4ne", data=dec_fk4ne))
t.add_column(Column(name="ra_fk4", data=ra_fk4))
t.add_column(Column(name="dec_fk4", data=dec_fk4))
f = open(os.path.join("data", fnout), "wb")
f.write(
f"# This file was generated with the {os.path.basename(__file__)} script, and"
" the reference values were computed using AST\n"
)
t.write(f, format="ascii", delimiter=",") |
Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation. | def ref_fk4_no_e_fk5(fnout="fk4_no_e_fk5.csv"):
"""
Accuracy tests for the FK4 (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the FK4
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to FK4.
ra = np.random.uniform(0.0, 360.0, N)
dec = np.degrees(np.arcsin(np.random.uniform(-1.0, 1.0, N)))
# Generate random observation epoch and equinoxes
obstime = [f"B{x:7.2f}" for x in np.random.uniform(1950.0, 2000.0, N)]
equinox_fk4 = [f"B{x:7.2f}" for x in np.random.uniform(1925.0, 1975.0, N)]
equinox_fk5 = [f"J{x:7.2f}" for x in np.random.uniform(1975.0, 2025.0, N)]
ra_fk4, dec_fk4 = [], []
ra_fk5, dec_fk5 = [], []
for i in range(N):
# Set up frames for AST
frame_fk4 = Ast.SkyFrame(
f"System=FK4-NO-E,Epoch={obstime[i]},Equinox={equinox_fk4[i]}"
)
frame_fk5 = Ast.SkyFrame(
f"System=FK5,Epoch={obstime[i]},Equinox={equinox_fk5[i]}"
)
# FK4 to FK5
frameset = frame_fk4.convert(frame_fk5)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk5.append(coords[0, 0])
dec_fk5.append(coords[1, 0])
# FK5 to FK4
frameset = frame_fk5.convert(frame_fk4)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk4.append(coords[0, 0])
dec_fk4.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name="equinox_fk4", data=equinox_fk4))
t.add_column(Column(name="equinox_fk5", data=equinox_fk5))
t.add_column(Column(name="obstime", data=obstime))
t.add_column(Column(name="ra_in", data=ra))
t.add_column(Column(name="dec_in", data=dec))
t.add_column(Column(name="ra_fk5", data=ra_fk5))
t.add_column(Column(name="dec_fk5", data=dec_fk5))
t.add_column(Column(name="ra_fk4", data=ra_fk4))
t.add_column(Column(name="dec_fk4", data=dec_fk4))
f = open(os.path.join("data", fnout), "wb")
f.write(
f"# This file was generated with the {os.path.basename(__file__)} script, and"
" the reference values were computed using AST\n"
)
t.write(f, format="ascii", delimiter=",") |
Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation. | def ref_galactic_fk4(fnout="galactic_fk4.csv"):
"""
Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the ICRS
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to ICRS.
lon = np.random.uniform(0.0, 360.0, N)
lat = np.degrees(np.arcsin(np.random.uniform(-1.0, 1.0, N)))
# Generate random observation epoch and equinoxes
obstime = [f"B{x:7.2f}" for x in np.random.uniform(1950.0, 2000.0, N)]
equinox_fk4 = [f"J{x:7.2f}" for x in np.random.uniform(1975.0, 2025.0, N)]
lon_gal, lat_gal = [], []
ra_fk4, dec_fk4 = [], []
for i in range(N):
# Set up frames for AST
frame_gal = Ast.SkyFrame(f"System=Galactic,Epoch={obstime[i]}")
frame_fk4 = Ast.SkyFrame(
f"System=FK4,Epoch={obstime[i]},Equinox={equinox_fk4[i]}"
)
# ICRS to FK5
frameset = frame_gal.convert(frame_fk4)
coords = np.degrees(frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]]))
ra_fk4.append(coords[0, 0])
dec_fk4.append(coords[1, 0])
# FK5 to ICRS
frameset = frame_fk4.convert(frame_gal)
coords = np.degrees(frameset.tran([[np.radians(lon[i])], [np.radians(lat[i])]]))
lon_gal.append(coords[0, 0])
lat_gal.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name="equinox_fk4", data=equinox_fk4))
t.add_column(Column(name="obstime", data=obstime))
t.add_column(Column(name="lon_in", data=lon))
t.add_column(Column(name="lat_in", data=lat))
t.add_column(Column(name="ra_fk4", data=ra_fk4))
t.add_column(Column(name="dec_fk4", data=dec_fk4))
t.add_column(Column(name="lon_gal", data=lon_gal))
t.add_column(Column(name="lat_gal", data=lat_gal))
f = open(os.path.join("data", fnout), "wb")
f.write(
f"# This file was generated with the {os.path.basename(__file__)} script, and"
" the reference values were computed using AST\n"
)
t.write(f, format="ascii", delimiter=",") |
Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation. | def ref_icrs_fk5(fnout="icrs_fk5.csv"):
"""
Accuracy tests for the ICRS (with no E-terms of aberration) to/from FK5
conversion, with arbitrary equinoxes and epoch of observation.
"""
import starlink.Ast as Ast
np.random.seed(12345)
N = 200
# Sample uniformly on the unit sphere. These will be either the ICRS
# coordinates for the transformation to FK5, or the FK5 coordinates for the
# transformation to ICRS.
ra = np.random.uniform(0.0, 360.0, N)
dec = np.degrees(np.arcsin(np.random.uniform(-1.0, 1.0, N)))
# Generate random observation epoch and equinoxes
obstime = [f"B{x:7.2f}" for x in np.random.uniform(1950.0, 2000.0, N)]
equinox_fk5 = [f"J{x:7.2f}" for x in np.random.uniform(1975.0, 2025.0, N)]
ra_icrs, dec_icrs = [], []
ra_fk5, dec_fk5 = [], []
for i in range(N):
# Set up frames for AST
frame_icrs = Ast.SkyFrame(f"System=ICRS,Epoch={obstime[i]}")
frame_fk5 = Ast.SkyFrame(
f"System=FK5,Epoch={obstime[i]},Equinox={equinox_fk5[i]}"
)
# ICRS to FK5
frameset = frame_icrs.convert(frame_fk5)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_fk5.append(coords[0, 0])
dec_fk5.append(coords[1, 0])
# FK5 to ICRS
frameset = frame_fk5.convert(frame_icrs)
coords = np.degrees(frameset.tran([[np.radians(ra[i])], [np.radians(dec[i])]]))
ra_icrs.append(coords[0, 0])
dec_icrs.append(coords[1, 0])
# Write out table to a CSV file
t = Table()
t.add_column(Column(name="equinox_fk5", data=equinox_fk5))
t.add_column(Column(name="obstime", data=obstime))
t.add_column(Column(name="ra_in", data=ra))
t.add_column(Column(name="dec_in", data=dec))
t.add_column(Column(name="ra_fk5", data=ra_fk5))
t.add_column(Column(name="dec_fk5", data=dec_fk5))
t.add_column(Column(name="ra_icrs", data=ra_icrs))
t.add_column(Column(name="dec_icrs", data=dec_icrs))
f = open(os.path.join("data", fnout), "wb")
f.write(
f"# This file was generated with the {os.path.basename(__file__)} script, and"
" the reference values were computed using AST\n"
)
t.write(f, format="ascii", delimiter=",") |
Check that Astropy gives consistent results with an IDL hor2eq example.
See : http://idlastro.gsfc.nasa.gov/ftp/pro/astro/hor2eq.pro
Test is against these run outputs, run at 2000-01-01T12:00:00::
# NORMAL ATMOSPHERE CASE
IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=781.0, temp=273.0
Latitude = +31 57 48.0 Longitude = *** 36 00.0
Julian Date = 2451545.000000
Az, El = 17 39 40.4 +37 54 41 (Observer Coords)
Az, El = 17 39 40.4 +37 53 40 (Apparent Coords)
LMST = +11 15 26.5
LAST = +11 15 25.7
Hour Angle = +03 38 30.1 (hh:mm:ss)
Ra, Dec: 07 36 55.6 +15 25 02 (Apparent Coords)
Ra, Dec: 07 36 55.2 +15 25 08 (J2000.0000)
Ra, Dec: 07 36 55.2 +15 25 08 (J2000)
IDL> print, ra, dec
114.23004 15.418818
# NO PRESSURE CASE
IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=0.0, temp=273.0
Latitude = +31 57 48.0 Longitude = *** 36 00.0
Julian Date = 2451545.000000
Az, El = 17 39 40.4 +37 54 41 (Observer Coords)
Az, El = 17 39 40.4 +37 54 41 (Apparent Coords)
LMST = +11 15 26.5
LAST = +11 15 25.7
Hour Angle = +03 38 26.4 (hh:mm:ss)
Ra, Dec: 07 36 59.3 +15 25 31 (Apparent Coords)
Ra, Dec: 07 36 58.9 +15 25 37 (J2000.0000)
Ra, Dec: 07 36 58.9 +15 25 37 (J2000)
IDL> print, ra, dec
114.24554 15.427022 | def test_against_hor2eq():
"""Check that Astropy gives consistent results with an IDL hor2eq example.
See : http://idlastro.gsfc.nasa.gov/ftp/pro/astro/hor2eq.pro
Test is against these run outputs, run at 2000-01-01T12:00:00::
# NORMAL ATMOSPHERE CASE
IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=781.0, temp=273.0
Latitude = +31 57 48.0 Longitude = *** 36 00.0
Julian Date = 2451545.000000
Az, El = 17 39 40.4 +37 54 41 (Observer Coords)
Az, El = 17 39 40.4 +37 53 40 (Apparent Coords)
LMST = +11 15 26.5
LAST = +11 15 25.7
Hour Angle = +03 38 30.1 (hh:mm:ss)
Ra, Dec: 07 36 55.6 +15 25 02 (Apparent Coords)
Ra, Dec: 07 36 55.2 +15 25 08 (J2000.0000)
Ra, Dec: 07 36 55.2 +15 25 08 (J2000)
IDL> print, ra, dec
114.23004 15.418818
# NO PRESSURE CASE
IDL> hor2eq, ten(37,54,41), ten(264,55,06), 2451545.0d, ra, dec, /verb, obs='kpno', pres=0.0, temp=273.0
Latitude = +31 57 48.0 Longitude = *** 36 00.0
Julian Date = 2451545.000000
Az, El = 17 39 40.4 +37 54 41 (Observer Coords)
Az, El = 17 39 40.4 +37 54 41 (Apparent Coords)
LMST = +11 15 26.5
LAST = +11 15 25.7
Hour Angle = +03 38 26.4 (hh:mm:ss)
Ra, Dec: 07 36 59.3 +15 25 31 (Apparent Coords)
Ra, Dec: 07 36 58.9 +15 25 37 (J2000.0000)
Ra, Dec: 07 36 58.9 +15 25 37 (J2000)
IDL> print, ra, dec
114.24554 15.427022
"""
# Observatory position for `kpno` from here:
# http://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro
location = EarthLocation(
lon=Angle("-111d36.0m"), lat=Angle("31d57.8m"), height=2120.0 * u.m
)
obstime = Time(2451545.0, format="jd", scale="ut1")
altaz_frame = AltAz(
obstime=obstime,
location=location,
temperature=0 * u.deg_C,
pressure=0.781 * u.bar,
)
altaz_frame_noatm = AltAz(
obstime=obstime,
location=location,
temperature=0 * u.deg_C,
pressure=0.0 * u.bar,
)
altaz = SkyCoord("264d55m06s 37d54m41s", frame=altaz_frame)
altaz_noatm = SkyCoord("264d55m06s 37d54m41s", frame=altaz_frame_noatm)
radec_frame = "icrs"
radec_actual = altaz.transform_to(radec_frame)
radec_actual_noatm = altaz_noatm.transform_to(radec_frame)
radec_expected = SkyCoord("07h36m55.2s +15d25m08s", frame=radec_frame)
distance = radec_actual.separation(radec_expected).to("arcsec")
# this comes from running the example hor2eq but with the pressure set to 0
radec_expected_noatm = SkyCoord("07h36m58.9s +15d25m37s", frame=radec_frame)
distance_noatm = radec_actual_noatm.separation(radec_expected_noatm).to("arcsec")
# The baseline difference is ~2.3 arcsec with one atm of pressure. The
# difference is mainly due to the somewhat different atmospheric model that
# hor2eq assumes. This is confirmed by the second test which has the
# atmosphere "off" - the residual difference is small enough to be embedded
# in the assumptions about "J2000" or rounding errors.
assert distance < 5 * u.arcsec
assert distance_noatm < 0.4 * u.arcsec |
Test run of pyephem, just in case the numbers below need to be reproduced. | def run_pyephem():
"""Test run of pyephem, just in case the numbers below need to be reproduced."""
import ephem
observer = ephem.Observer()
observer.lon = -1 * np.radians(109 + 24 / 60.0 + 53.1 / 60**2)
observer.lat = np.radians(33 + 41 / 60.0 + 46.0 / 60.0**2)
observer.elevation = 300
observer.date = 2455822.868055556 - ephem.julian_date(0)
ra, dec = observer.radec_of(np.radians(6.8927), np.radians(60.7665))
print(f"EPHEM: {observer.date}: {np.degrees(ra)}, {np.degrees(dec)}") |
Check that Astropy gives consistent results with one PyEphem example.
PyEphem: https://rhodesmill.org/pyephem/
See example input and output here:
https://gist.github.com/zonca/1672906
https://github.com/phn/pytpm/issues/2#issuecomment-3698679 | def test_against_pyephem():
"""Check that Astropy gives consistent results with one PyEphem example.
PyEphem: https://rhodesmill.org/pyephem/
See example input and output here:
https://gist.github.com/zonca/1672906
https://github.com/phn/pytpm/issues/2#issuecomment-3698679
"""
obstime = Time("2011-09-18 08:50:00")
location = EarthLocation(
lon=Angle("-109d24m53.1s"), lat=Angle("33d41m46.0s"), height=300.0 * u.m
)
# We are using the default pressure and temperature in PyEphem
# relative_humidity = ?
# obswl = ?
altaz_frame = AltAz(
obstime=obstime,
location=location,
temperature=15 * u.deg_C,
pressure=1.010 * u.bar,
)
altaz = SkyCoord("6.8927d +60.7665d", frame=altaz_frame)
radec_actual = altaz.transform_to("icrs")
radec_expected = SkyCoord("27.107480889479397d +62.512687777362046d", frame="icrs")
distance_ephem = radec_actual.separation(radec_expected).to("arcsec")
# 2021-04-06: 2.42 arcsec
assert distance_ephem < 3 * u.arcsec
# Add assert on current Astropy result so that we notice if something changes
radec_expected = SkyCoord("27.10602683d +62.51275391d", frame="icrs")
distance_astropy = radec_actual.separation(radec_expected).to("arcsec")
# 2021-04-06: 5e-6 arcsec (erfa 1.7.2 vs erfa 1.7.1).
assert distance_astropy < 0.1 * u.arcsec |
Check that Astropy gives consistent results with the JPL Horizons example.
The input parameters and reference results are taken from this page:
(from the first row of the Results table at the bottom of that page)
http://ssd.jpl.nasa.gov/?horizons_tutorial | def test_against_jpl_horizons():
"""Check that Astropy gives consistent results with the JPL Horizons example.
The input parameters and reference results are taken from this page:
(from the first row of the Results table at the bottom of that page)
http://ssd.jpl.nasa.gov/?horizons_tutorial
"""
obstime = Time("1998-07-28 03:00")
location = EarthLocation(
lon=Angle("248.405300d"), lat=Angle("31.9585d"), height=2.06 * u.km
)
# No atmosphere
altaz_frame = AltAz(obstime=obstime, location=location)
altaz = SkyCoord("143.2970d 2.6223d", frame=altaz_frame)
radec_actual = altaz.transform_to("icrs")
radec_expected = SkyCoord("19h24m55.01s -40d56m28.9s", frame="icrs")
distance = radec_actual.separation(radec_expected).to("arcsec")
# 2021-04-06: astropy 4.2.1, erfa 1.7.1: 0.23919259 arcsec
# 2021-04-06: astropy 4.3dev, erfa 1.7.2: 0.2391959 arcsec
assert distance < 1 * u.arcsec |
http://phn.github.io/pytpm/conversions.html#fk5-equinox-and-epoch-j2000-0-to-topocentric-observed | def test_fk5_equinox_and_epoch_j2000_0_to_topocentric_observed():
"""
http://phn.github.io/pytpm/conversions.html#fk5-equinox-and-epoch-j2000-0-to-topocentric-observed
"""
# Observatory position for `kpno` from here:
# http://idlastro.gsfc.nasa.gov/ftp/pro/astro/observatory.pro
location = EarthLocation(
lon=Angle("-111.598333d"), lat=Angle("31.956389d"), height=2093.093 * u.m
) # TODO: height correct?
obstime = Time("2010-01-01 12:00:00")
# relative_humidity = ?
# obswl = ?
altaz_frame = AltAz(
obstime=obstime,
location=location,
temperature=0 * u.deg_C,
pressure=0.781 * u.bar,
)
radec = SkyCoord("12h22m54.899s 15d49m20.57s", frame="fk5")
altaz_actual = radec.transform_to(altaz_frame)
altaz_expected = SkyCoord("264d55m06s 37d54m41s", frame="altaz")
# altaz_expected = SkyCoord('343.586827647d 15.7683070508d', frame='altaz')
# altaz_expected = SkyCoord('133.498195532d 22.0162383595d', frame='altaz')
distance = altaz_actual.separation(altaz_expected)
# print(altaz_actual)
# print(altaz_expected)
# print(distance)
"""TODO: Current output is completely incorrect ... xfailing this test for now.
<SkyCoord (AltAz: obstime=2010-01-01 12:00:00.000, location=(-1994497.7199061865, -5037954.447348028, 3357437.2294832403) m, pressure=781.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron):00:00.000, location=(-1994497.7199061865, -5037954.447348028, 3357437.2294832403) m, pressure=781.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron): az=133.4869896371561 deg, alt=67.97857990957701 deg>
<SkyCoord (AltAz: obstime=None, location=None, pressure=0.0 hPa, temperature=0.0 deg_C, relative_humidity=0, obswl=1.0 micron): az=264.91833333333335 deg, alt=37.91138888888889 deg>
68d02m45.732s
"""
assert distance < 1 * u.arcsec |
Check that Astropy's Ecliptic systems give answers consistent with pyTPM
Currently this is only testing against the example given in the pytpm docs | def test_against_pytpm_doc_example():
"""
Check that Astropy's Ecliptic systems give answers consistent with pyTPM
Currently this is only testing against the example given in the pytpm docs
"""
fk5_in = SkyCoord("12h22m54.899s", "15d49m20.57s", frame=FK5(equinox="J2000"))
pytpm_out = BarycentricMeanEcliptic(
lon=178.78256462 * u.deg, lat=16.7597002513 * u.deg, equinox="J2000"
)
astropy_out = fk5_in.transform_to(pytpm_out)
assert pytpm_out.separation(astropy_out) < (1 * u.arcsec) |
Check that the ecliptic transformations for heliocentric and barycentric
at least more or less make sense | def test_ecliptic_heliobary():
"""
Check that the ecliptic transformations for heliocentric and barycentric
at least more or less make sense
"""
icrs = ICRS(1 * u.deg, 2 * u.deg, distance=1.5 * R_sun)
bary = icrs.transform_to(BarycentricMeanEcliptic())
helio = icrs.transform_to(HeliocentricMeanEcliptic())
# make sure there's a sizable distance shift - in 3d hundreds of km, but
# this is 1D so we allow it to be somewhat smaller
assert np.abs(bary.distance - helio.distance) > 1 * u.km
# now make something that's got the location of helio but in bary's frame.
# this is a convenience to allow `separation` to work as expected
helio_in_bary_frame = bary.realize_frame(helio.cartesian)
assert bary.separation(helio_in_bary_frame) > 1 * u.arcmin |
Check that the various ecliptic transformations at least roundtrip | def test_ecliptic_roundtrips(trueframe, meanframe):
"""
Check that the various ecliptic transformations at least roundtrip
"""
icrs = ICRS(1 * u.deg, 2 * u.deg, distance=1.5 * R_sun)
truecoo = icrs.transform_to(trueframe())
meancoo = truecoo.transform_to(meanframe())
truecoo2 = meancoo.transform_to(trueframe())
assert not quantity_allclose(truecoo.cartesian.xyz, meancoo.cartesian.xyz)
assert quantity_allclose(truecoo.cartesian.xyz, truecoo2.cartesian.xyz) |
Check that the ecliptic true/mean transformations preserve latitude | def test_ecliptic_true_mean_preserve_latitude(trueframe, meanframe):
"""
Check that the ecliptic true/mean transformations preserve latitude
"""
truecoo = trueframe(90 * u.deg, 0 * u.deg, distance=1 * u.AU)
meancoo = truecoo.transform_to(meanframe())
assert not quantity_allclose(truecoo.lon, meancoo.lon)
assert quantity_allclose(truecoo.lat, meancoo.lat, atol=1e-10 * u.arcsec) |
Check that the geocentric version at least gets well away from GCRS. For a
true "accuracy" test we need a comparison dataset that is similar to the
geocentric/GCRS comparison we want to do here. Contributions welcome! | def test_ecl_geo():
"""
Check that the geocentric version at least gets well away from GCRS. For a
true "accuracy" test we need a comparison dataset that is similar to the
geocentric/GCRS comparison we want to do here. Contributions welcome!
"""
gcrs = GCRS(10 * u.deg, 20 * u.deg, distance=1.5 * R_earth)
gecl = gcrs.transform_to(GeocentricMeanEcliptic())
assert quantity_allclose(gecl.distance, gcrs.distance) |
Test that transforms to/from ecliptic coordinates work on array coordinates
(not testing for accuracy.) | def test_arraytransforms():
"""
Test that transforms to/from ecliptic coordinates work on array coordinates
(not testing for accuracy.)
"""
ra = np.ones((4,), dtype=float) * u.deg
dec = 2 * np.ones((4,), dtype=float) * u.deg
distance = np.ones((4,), dtype=float) * u.au
test_icrs = ICRS(ra=ra, dec=dec, distance=distance)
test_gcrs = GCRS(test_icrs.data)
bary_arr = test_icrs.transform_to(BarycentricMeanEcliptic())
assert bary_arr.shape == ra.shape
helio_arr = test_icrs.transform_to(HeliocentricMeanEcliptic())
assert helio_arr.shape == ra.shape
geo_arr = test_gcrs.transform_to(GeocentricMeanEcliptic())
assert geo_arr.shape == ra.shape
# now check that we also can go back the other way without shape problems
bary_icrs = bary_arr.transform_to(ICRS())
assert bary_icrs.shape == test_icrs.shape
helio_icrs = helio_arr.transform_to(ICRS())
assert helio_icrs.shape == test_icrs.shape
geo_gcrs = geo_arr.transform_to(GCRS())
assert geo_gcrs.shape == test_gcrs.shape |
Combine two sets of affine parameters.
The parameters for an affine transformation are a 3 x 3 Cartesian
transformation matrix and a displacement vector, which can include an
attached velocity. Either type of parameter can be ``None``. | def _combine_affine_params(params, next_params):
"""
Combine two sets of affine parameters.
The parameters for an affine transformation are a 3 x 3 Cartesian
transformation matrix and a displacement vector, which can include an
attached velocity. Either type of parameter can be ``None``.
"""
M, vec = params
next_M, next_vec = next_params
# Multiply the transformation matrices if they both exist
if M is not None and next_M is not None:
new_M = next_M @ M
else:
new_M = M if M is not None else next_M
if vec is not None:
# Transform the first displacement vector by the second transformation matrix
if next_M is not None:
vec = vec.transform(next_M)
# Calculate the new displacement vector
if next_vec is not None:
if "s" in vec.differentials and "s" in next_vec.differentials:
# Adding vectors with velocities takes more steps
# TODO: Add support in representation.py
new_vec_velocity = vec.differentials["s"] + next_vec.differentials["s"]
new_vec = vec.without_differentials() + next_vec.without_differentials()
new_vec = new_vec.with_differentials({"s": new_vec_velocity})
else:
new_vec = vec + next_vec
else:
new_vec = vec
else:
new_vec = next_vec
return new_M, new_vec |
A `dict` of all the attributes of all frame classes in this
`~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them. | def frame_attrs_from_set(frame_set):
"""
A `dict` of all the attributes of all frame classes in this
`~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = {}
for frame_cls in frame_set:
result.update(frame_cls.frame_attributes)
return result |
A `set` of all component names every defined within any frame class in
this `~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them. | def frame_comps_from_set(frame_set):
"""
A `set` of all component names every defined within any frame class in
this `~astropy.coordinates.TransformGraph`.
Broken out of the class so this can be called on a temporary frame set to
validate new additions to the transform graph before actually adding them.
"""
result = set()
for frame_cls in frame_set:
rep_info = frame_cls._frame_specific_representation_info
for mappings in rep_info.values():
for rep_map in mappings:
result.update([rep_map.framename])
return result |
Decorator to precompute the class' signature.
This provides around a 20x speedup for future calls of ``inspect.signature(cls)``.
`Cosmology` has a lot of I/O methods that use the signature, so this is a
significant speedup for those methods.
Note that CPython does not promise that this precomputation is a stable feature.
If it is removed, the worst that will happen is that the signature will be
computed on the fly, the speedup will be lost, and this decorator can be
deprecated. | def _with_signature(cls: type[Cosmology]) -> type[Cosmology]:
"""Decorator to precompute the class' signature.
This provides around a 20x speedup for future calls of ``inspect.signature(cls)``.
`Cosmology` has a lot of I/O methods that use the signature, so this is a
significant speedup for those methods.
Note that CPython does not promise that this precomputation is a stable feature.
If it is removed, the worst that will happen is that the signature will be
computed on the fly, the speedup will be lost, and this decorator can be
deprecated.
"""
cls.__signature__ = None # clear the signature cache
cls.__signature__ = inspect.signature(cls) # add the new signature to the class
return cls |
Decorator for the dataclass transform.
Returns
-------
cls : type
The `cls` transformed into a frozen `~dataclasses.dataclass`.
The ``__eq__`` method is custom (``eq=False``).
The signature is precomputed and added to the class. | def dataclass_decorator(cls):
"""Decorator for the dataclass transform.
Returns
-------
cls : type
The `cls` transformed into a frozen `~dataclasses.dataclass`.
The ``__eq__`` method is custom (``eq=False``).
The signature is precomputed and added to the class.
"""
return _with_signature(dataclass(frozen=True, repr=True, eq=False, init=True)(cls)) |
Get parameters of cosmology representations with lazy import from ``PEP 562``. | def __getattr__(name):
"""Get parameters of cosmology representations with lazy import from ``PEP 562``."""
from astropy.cosmology import realizations
cosmo = getattr(realizations, name)
m = cosmo.to_format("mapping", cosmology_as_str=True, move_from_meta=True)
proxy = MappingProxyType(m)
# Cache in this module so `__getattr__` is only called once per `name`.
setattr(sys.modules[__name__], name, proxy)
return proxy |
Directory, including lazily-imported objects. | def __dir__():
"""Directory, including lazily-imported objects."""
return __all__ |
Make specific realizations from data files with lazy import from ``PEP 562``.
Raises
------
AttributeError
If "name" is not in :mod:`astropy.cosmology.realizations` | def __getattr__(name: str) -> Cosmology:
"""Make specific realizations from data files with lazy import from ``PEP 562``.
Raises
------
AttributeError
If "name" is not in :mod:`astropy.cosmology.realizations`
"""
if name not in available:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}.")
cosmo = Cosmology.read(
str(_COSMOLOGY_DATA_DIR / name) + ".ecsv", format="ascii.ecsv"
)
object.__setattr__(
cosmo,
"__doc__",
f"{name} instance of {cosmo.__class__.__qualname__} "
f"cosmology\n(from {cosmo.meta['reference']})",
)
# Cache in this module so `__getattr__` is only called once per `name`.
setattr(sys.modules[__name__], name, cosmo)
return cosmo |
Directory, including lazily-imported objects. | def __dir__() -> list[str]:
"""Directory, including lazily-imported objects."""
return __all__ |
Allow redshift to be 1-to-1 equivalent to dimensionless.
It is special compared to other equivalency pairs in that it allows
this independent of the power to which the redshift is raised, and
independent of whether it is part of a more complicated unit. It is
similar to u.dimensionless_angles() in this respect. | def dimensionless_redshift():
"""Allow redshift to be 1-to-1 equivalent to dimensionless.
It is special compared to other equivalency pairs in that it allows
this independent of the power to which the redshift is raised, and
independent of whether it is part of a more complicated unit. It is
similar to u.dimensionless_angles() in this respect.
"""
return u.Equivalency([(redshift, None)], "dimensionless_redshift") |
Convert quantities between redshift and distance.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
kind : {'comoving', 'lookback', 'luminosity'}, optional
The distance type for the Equivalency.
Note this does NOT include the angular diameter distance as this
distance measure is not monotonic.
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`, which is used to
convert distance to redshift.
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and temperature.
Raises
------
`~astropy.cosmology.CosmologyError`
If the distance corresponds to a redshift that is larger than ``zmax``.
Exception
See :func:`~astropy.cosmology.z_at_value` for possible exceptions, e.g. if the
distance maps to a redshift that is larger than ``zmax``, the maximum redshift.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> d = z.to(u.Mpc, cu.redshift_distance(WMAP9, kind="comoving"))
>>> d # doctest: +FLOAT_CMP
<Quantity 14004.03157418 Mpc>
The reverse operation is also possible, though not always as simple. To convert a
very large distance to a redshift it might be necessary to specify a large enough
``zmax`` value. See :func:`~astropy.cosmology.z_at_value` for details.
>>> d.to(cu.redshift, cu.redshift_distance(WMAP9, kind="comoving", zmax=1200)) # doctest: +FLOAT_CMP
<Quantity 1100.000 redshift> | def redshift_distance(cosmology=None, kind="comoving", **atzkw):
"""Convert quantities between redshift and distance.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
kind : {'comoving', 'lookback', 'luminosity'}, optional
The distance type for the Equivalency.
Note this does NOT include the angular diameter distance as this
distance measure is not monotonic.
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`, which is used to
convert distance to redshift.
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and temperature.
Raises
------
`~astropy.cosmology.CosmologyError`
If the distance corresponds to a redshift that is larger than ``zmax``.
Exception
See :func:`~astropy.cosmology.z_at_value` for possible exceptions, e.g. if the
distance maps to a redshift that is larger than ``zmax``, the maximum redshift.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> d = z.to(u.Mpc, cu.redshift_distance(WMAP9, kind="comoving"))
>>> d # doctest: +FLOAT_CMP
<Quantity 14004.03157418 Mpc>
The reverse operation is also possible, though not always as simple. To convert a
very large distance to a redshift it might be necessary to specify a large enough
``zmax`` value. See :func:`~astropy.cosmology.z_at_value` for details.
>>> d.to(cu.redshift, cu.redshift_distance(WMAP9, kind="comoving", zmax=1200)) # doctest: +FLOAT_CMP
<Quantity 1100.000 redshift>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
allowed_kinds = ("comoving", "lookback", "luminosity")
if kind not in allowed_kinds:
raise ValueError(f"`kind` is not one of {allowed_kinds}")
method = getattr(cosmology, kind + "_distance")
def z_to_distance(z):
"""Redshift to distance."""
return method(z)
def distance_to_z(d):
"""Distance to redshift."""
return z_at_value(method, d << u.Mpc, **atzkw)
return u.Equivalency(
[(redshift, u.Mpc, z_to_distance, distance_to_z)],
"redshift_distance",
{"cosmology": cosmology, "distance": kind},
) |
Convert quantities between redshift and Hubble parameter and little-h.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and Hubble parameter and little-h unit.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> equivalency = cu.redshift_hubble(WMAP9) # construct equivalency
>>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 1565637.40154275 km / (Mpc s)>
>>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP
<Quantity 15656.37401543 littleh> | def redshift_hubble(cosmology=None, **atzkw):
"""Convert quantities between redshift and Hubble parameter and little-h.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and Hubble parameter and little-h unit.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> equivalency = cu.redshift_hubble(WMAP9) # construct equivalency
>>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 1565637.40154275 km / (Mpc s)>
>>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP
<Quantity 15656.37401543 littleh>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
def z_to_hubble(z):
"""Redshift to Hubble parameter."""
return cosmology.H(z)
def hubble_to_z(H):
"""Hubble parameter to redshift."""
return z_at_value(cosmology.H, H << (u.km / u.s / u.Mpc), **atzkw)
def z_to_littleh(z):
"""Redshift to :math:`h`-unit Quantity."""
return z_to_hubble(z).to_value(u.km / u.s / u.Mpc) / 100 * littleh
def littleh_to_z(h):
""":math:`h`-unit Quantity to redshift."""
return hubble_to_z(h * 100)
return u.Equivalency(
[
(redshift, u.km / u.s / u.Mpc, z_to_hubble, hubble_to_z),
(redshift, littleh, z_to_littleh, littleh_to_z),
],
"redshift_hubble",
{"cosmology": cosmology},
) |
Convert quantities between redshift and CMB temperature.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> z.to(u.K, cu.redshift_temperature(WMAP9))
<Quantity 3000.225 K> | def redshift_temperature(cosmology=None, **atzkw):
"""Convert quantities between redshift and CMB temperature.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> z.to(u.K, cu.redshift_temperature(WMAP9))
<Quantity 3000.225 K>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
def z_to_Tcmb(z):
return cosmology.Tcmb(z)
def Tcmb_to_z(T):
return z_at_value(cosmology.Tcmb, T << u.K, **atzkw)
return u.Equivalency(
[(redshift, u.K, z_to_Tcmb, Tcmb_to_z)],
"redshift_temperature",
{"cosmology": cosmology},
) |
Convert quantities between measures of cosmological distance.
Note: by default all equivalencies are on and must be explicitly turned off.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If `None`, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
distance : {'comoving', 'lookback', 'luminosity'} or None (optional, keyword-only)
The type of distance equivalency to create or `None`.
Default is 'comoving'.
hubble : bool (optional, keyword-only)
Whether to create a Hubble parameter <-> redshift equivalency, using
``Cosmology.H``. Default is `True`.
Tcmb : bool (optional, keyword-only)
Whether to create a CMB temperature <-> redshift equivalency, using
``Cosmology.Tcmb``. Default is `True`.
atzkw : dict or None (optional, keyword-only)
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
With equivalencies between redshift and distance / Hubble / temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> equivalency = cu.with_redshift(WMAP9)
>>> z = 1100 * cu.redshift
Redshift to (comoving) distance:
>>> z.to(u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 14004.03157418 Mpc>
Redshift to the Hubble parameter:
>>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 1565637.40154275 km / (Mpc s)>
>>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP
<Quantity 15656.37401543 littleh>
Redshift to CMB temperature:
>>> z.to(u.K, equivalency)
<Quantity 3000.225 K> | def with_redshift(
cosmology=None, *, distance="comoving", hubble=True, Tcmb=True, atzkw=None
):
"""Convert quantities between measures of cosmological distance.
Note: by default all equivalencies are on and must be explicitly turned off.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If `None`, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
distance : {'comoving', 'lookback', 'luminosity'} or None (optional, keyword-only)
The type of distance equivalency to create or `None`.
Default is 'comoving'.
hubble : bool (optional, keyword-only)
Whether to create a Hubble parameter <-> redshift equivalency, using
``Cosmology.H``. Default is `True`.
Tcmb : bool (optional, keyword-only)
Whether to create a CMB temperature <-> redshift equivalency, using
``Cosmology.Tcmb``. Default is `True`.
atzkw : dict or None (optional, keyword-only)
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
With equivalencies between redshift and distance / Hubble / temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> equivalency = cu.with_redshift(WMAP9)
>>> z = 1100 * cu.redshift
Redshift to (comoving) distance:
>>> z.to(u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 14004.03157418 Mpc>
Redshift to the Hubble parameter:
>>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 1565637.40154275 km / (Mpc s)>
>>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP
<Quantity 15656.37401543 littleh>
Redshift to CMB temperature:
>>> z.to(u.K, equivalency)
<Quantity 3000.225 K>
"""
from astropy.cosmology import default_cosmology
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
atzkw = atzkw if atzkw is not None else {}
equivs = [] # will append as built
# Hubble <-> Redshift
if hubble:
equivs.extend(redshift_hubble(cosmology, **atzkw))
# CMB Temperature <-> Redshift
if Tcmb:
equivs.extend(redshift_temperature(cosmology, **atzkw))
# Distance <-> Redshift, but need to choose which distance
if distance is not None:
equivs.extend(redshift_distance(cosmology, kind=distance, **atzkw))
# -----------
return u.Equivalency(
equivs,
"with_redshift",
{"cosmology": cosmology, "distance": distance, "hubble": hubble, "Tcmb": Tcmb},
) |
Convert between quantities with little-h and the equivalent physical units.
Parameters
----------
H0 : None or `~astropy.units.Quantity` ['frequency']
The value of the Hubble constant to assume. If a
`~astropy.units.Quantity`, will assume the quantity *is* ``H0``. If
`None` (default), use the ``H0`` attribute from
:mod:`~astropy.cosmology.default_cosmology`.
References
----------
For an illuminating discussion on why you may or may not want to use
little-h at all, see https://arxiv.org/pdf/1308.4150.pdf | def with_H0(H0=None):
"""Convert between quantities with little-h and the equivalent physical units.
Parameters
----------
H0 : None or `~astropy.units.Quantity` ['frequency']
The value of the Hubble constant to assume. If a
`~astropy.units.Quantity`, will assume the quantity *is* ``H0``. If
`None` (default), use the ``H0`` attribute from
:mod:`~astropy.cosmology.default_cosmology`.
References
----------
For an illuminating discussion on why you may or may not want to use
little-h at all, see https://arxiv.org/pdf/1308.4150.pdf
"""
if H0 is None:
from .realizations import default_cosmology
H0 = default_cosmology.get().H0
h100_val_unit = u.Unit(100 / (H0.to_value((u.km / u.s) / u.Mpc)) * littleh)
return u.Equivalency([(h100_val_unit, None)], "with_H0", kwargs={"H0": H0}) |
Get realizations using lazy import from ``PEP 562``.
Raises
------
AttributeError
If "name" is not in :mod:`astropy.cosmology.realizations` | def __getattr__(name: str) -> Callable[..., Any]:
"""Get realizations using lazy import from ``PEP 562``.
Raises
------
AttributeError
If "name" is not in :mod:`astropy.cosmology.realizations`
"""
if name not in ("vectorize_redshift_method", "aszarr"):
raise AttributeError(f"module {__name__!r} has no attribute {name!r}.")
from . import _utils
func = deprecated(
since="v6.0",
message=(
"this private function has been moved to the private module"
" `astropy.cosmology._utils`"
),
)(getattr(_utils, name))
sys.modules[__name__].__dict__[name] = func # cache for next time
return func |
Vectorize a method of redshift(s).
Parameters
----------
func : callable or None
method to wrap. If `None` returns a :func:`functools.partial`
with ``nin`` loaded.
nin : int
Number of positional redshift arguments.
Returns
-------
wrapper : callable
:func:`functools.wraps` of ``func`` where the first ``nin``
arguments are converted from |Quantity| to :class:`numpy.ndarray`. | def vectorize_redshift_method(func=None, nin=1):
"""Vectorize a method of redshift(s).
Parameters
----------
func : callable or None
method to wrap. If `None` returns a :func:`functools.partial`
with ``nin`` loaded.
nin : int
Number of positional redshift arguments.
Returns
-------
wrapper : callable
:func:`functools.wraps` of ``func`` where the first ``nin``
arguments are converted from |Quantity| to :class:`numpy.ndarray`.
"""
# allow for pie-syntax & setting nin
if func is None:
return functools.partial(vectorize_redshift_method, nin=nin)
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
"""Wrapper converting arguments to numpy-compatible inputs.
:func:`functools.wraps` of ``func`` where the first ``nin`` arguments are
converted from |Quantity| to `numpy.ndarray` or scalar.
"""
# process inputs
# TODO! quantity-aware vectorization can simplify this.
zs = [
z if not isinstance(z, Quantity) else z.to_value(cu.redshift)
for z in args[:nin]
]
# scalar inputs
if all(isinstance(z, (Number, np.generic)) for z in zs):
return func(self, *zs, *args[nin:], **kwargs)
# non-scalar. use vectorized func
return wrapper.__vectorized__(self, *zs, *args[nin:], **kwargs)
wrapper.__vectorized__ = np.vectorize(func) # attach vectorized function
# TODO! use frompyfunc when can solve return type errors
return wrapper |
Redshift as a `~numbers.Number` or |ndarray| / |Quantity| / |Column|.
Allows for any ndarray ducktype by checking for attribute "shape". | def aszarr(z):
"""Redshift as a `~numbers.Number` or |ndarray| / |Quantity| / |Column|.
Allows for any ndarray ducktype by checking for attribute "shape".
"""
if isinstance(z, (Number, np.generic)): # scalars
return z
elif hasattr(z, "shape"): # ducktypes NumPy array
if getattr(z, "__module__", "").startswith("pandas"):
# See https://github.com/astropy/astropy/issues/15576. Pandas does not play
# well with others and will ignore unit-ful calculations so we need to
# convert to it's underlying value.
z = z.values
if hasattr(z, "unit"): # Quantity Column
return (z << cu.redshift).value # for speed only use enabled equivs
return z
# not one of the preferred types: Number / array ducktype
return Quantity(z, cu.redshift).value |
Return all variables in the whole class hierarchy. | def all_cls_vars(obj: object | type, /) -> dict[str, Any]:
"""Return all variables in the whole class hierarchy."""
cls = obj if isinstance(obj, type) else obj.__class__
return functools.reduce(operator.__or__, map(vars, cls.mro()[::-1])) |
Get all fields of a dataclass, including those not-yet finalized.
Parameters
----------
obj : object | type
A dataclass.
Returns
-------
dict[str, Field | Parameter]
All fields of the dataclass, including those not yet finalized in the class, if
it's still under construction, e.g. in ``__init_subclass__``. | def all_parameters(obj: object, /) -> dict[str, Field | Parameter]:
"""Get all fields of a dataclass, including those not-yet finalized.
Parameters
----------
obj : object | type
A dataclass.
Returns
-------
dict[str, Field | Parameter]
All fields of the dataclass, including those not yet finalized in the class, if
it's still under construction, e.g. in ``__init_subclass__``.
"""
from astropy.cosmology.parameter import Parameter
return {
k: (v if isinstance(v, Parameter) else v.default)
for k, v in all_cls_vars(obj).items()
if (
isinstance(v, Parameter)
or (isinstance(v, Field) and isinstance(v.default, Parameter))
)
} |
Get realizations using lazy import from ``PEP 562``.
Raises
------
AttributeError
If "name" is not in :mod:`astropy.cosmology.realizations` | def __getattr__(name: str) -> Cosmology:
"""Get realizations using lazy import from ``PEP 562``.
Raises
------
AttributeError
If "name" is not in :mod:`astropy.cosmology.realizations`
"""
if name not in available:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}.")
return getattr(realizations, name) |
Directory, including lazily-imported objects. | def __dir__() -> list[str]:
"""Directory, including lazily-imported objects."""
return __all__ |
Filter ``m``, returning key-value pairs not including keys in ``filter``.
Parameters
----------
m : mapping[K, V]
A mapping from which to remove keys in ``filter_out``.
filter_out : sequence[K]
Sequence of keys to filter out from ``m``.
Returns
-------
iterable[K, V]
Iterable of ``(key, value)`` pairs with the ``filter_out`` keys removed. | def filter_keys_from_items(
m: Mapping[K, V], /, filter_out: Sequence[K]
) -> Iterable[K, V]:
"""Filter ``m``, returning key-value pairs not including keys in ``filter``.
Parameters
----------
m : mapping[K, V]
A mapping from which to remove keys in ``filter_out``.
filter_out : sequence[K]
Sequence of keys to filter out from ``m``.
Returns
-------
iterable[K, V]
Iterable of ``(key, value)`` pairs with the ``filter_out`` keys removed.
"""
return ((k, v) for k, v in m.items() if k not in filter_out) |
Test stand-in functions when optional dependencies not installed. | def test_optional_deps_functions():
"""Test stand-in functions when optional dependencies not installed."""
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.integrate'"):
quad() |
Test getattr raises error for DNE. | def test_getattr_error_attr_not_found():
"""Test getattr raises error for DNE."""
with pytest.raises(ImportError):
from astropy.cosmology.flrw import this_is_not_a_variable |
Test stand-in functions when optional dependencies not installed. | def test_optional_deps_functions():
"""Test stand-in functions when optional dependencies not installed."""
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"):
ellipkinc()
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"):
hyp2f1() |
Test a flat cosmology at z=1 against several other on-line calculators.
Test values were taken from the following web cosmology calculators on
2012-02-11:
Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
(https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W)
Kempner: http://www.kempner.net/cosmic.php
iCosmos: http://www.icosmos.co.uk/index.html | def test_flat_z1():
"""Test a flat cosmology at z=1 against several other on-line calculators.
Test values were taken from the following web cosmology calculators on
2012-02-11:
Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
(https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W)
Kempner: http://www.kempner.net/cosmic.php
iCosmos: http://www.icosmos.co.uk/index.html
"""
cosmo = FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
# The order of values below is Wright, Kempner, iCosmos'
assert u.allclose(
cosmo.comoving_distance(1), [3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4
)
assert u.allclose(
cosmo.angular_diameter_distance(1),
[1682.3, 1682.4, 1682.3994] * u.Mpc,
rtol=1e-4,
)
assert u.allclose(
cosmo.luminosity_distance(1), [6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4
)
assert u.allclose(
cosmo.lookback_time(1), [7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3
)
assert u.allclose(
cosmo.lookback_distance(1), [2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3
) |
Regression test for #10980
Test that specialized comoving distance methods handle iterable arguments. | def test_comoving_distance_iterable_argument(cosmo, z):
"""
Regression test for #10980
Test that specialized comoving distance methods handle iterable arguments.
"""
assert u.allclose(
cosmo.comoving_distance(z), cosmo._integral_comoving_distance_z1z2(0.0, z)
) |
Regression test for #10980
Test that specialized comoving distance methods broadcast array arguments. | def test_comoving_distance_broadcast(cosmo):
"""
Regression test for #10980
Test that specialized comoving distance methods broadcast array arguments.
"""
z1 = np.zeros((2, 5))
z2 = np.ones((3, 1, 5))
z3 = np.ones((7, 5))
output_shape = np.broadcast(z1, z2).shape
# Check compatible array arguments return an array with the correct shape
assert cosmo._comoving_distance_z1z2(z1, z2).shape == output_shape
# Check incompatible array arguments raise an error
with pytest.raises(ValueError, match="z1 and z2 have different shapes"):
cosmo._comoving_distance_z1z2(z1, z3) |
Regression test for #8388. | def test_elliptic_comoving_distance_z1z2():
"""Regression test for #8388."""
cosmo = LambdaCDM(70.0, 2.3, 0.05, Tcmb0=0)
z = 0.2
assert u.allclose(
cosmo.comoving_distance(z), cosmo._integral_comoving_distance_z1z2(0.0, z)
)
assert u.allclose(
cosmo._elliptic_comoving_distance_z1z2(0.0, z),
cosmo._integral_comoving_distance_z1z2(0.0, z),
) |
Tests the effects of changing the temperature of the CMB | def test_ogamma():
"""Tests the effects of changing the temperature of the CMB"""
# Tested against Ned Wright's advanced cosmology calculator,
# Sep 7 2012. The accuracy of our comparison is limited by
# how many digits it outputs, which limits our test to about
# 0.2% accuracy. The NWACC does not allow one
# to change the number of nuetrino species, fixing that at 3.
# Also, inspection of the NWACC code shows it uses inaccurate
# constants at the 0.2% level (specifically, a_B),
# so we shouldn't expect to match it that well. The integral is
# also done rather crudely. Therefore, we should not expect
# the NWACC to be accurate to better than about 0.5%, which is
# unfortunate, but reflects a problem with it rather than this code.
# More accurate tests below using Mathematica
z = np.array([1.0, 10.0, 500.0, 1000.0])
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)
assert u.allclose(
cosmo.angular_diameter_distance(z),
[1651.9, 858.2, 26.855, 13.642] * u.Mpc,
rtol=5e-4,
)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)
assert u.allclose(
cosmo.angular_diameter_distance(z),
[1651.8, 857.9, 26.767, 13.582] * u.Mpc,
rtol=5e-4,
)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)
assert u.allclose(
cosmo.angular_diameter_distance(z),
[1651.4, 856.6, 26.489, 13.405] * u.Mpc,
rtol=5e-4,
)
# Next compare with doing the integral numerically in Mathematica,
# which allows more precision in the test. It is at least as
# good as 0.01%, possibly better
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)
assert u.allclose(
cosmo.angular_diameter_distance(z),
[1651.91, 858.205, 26.8586, 13.6469] * u.Mpc,
rtol=1e-5,
)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)
assert u.allclose(
cosmo.angular_diameter_distance(z),
[1651.76, 857.817, 26.7688, 13.5841] * u.Mpc,
rtol=1e-5,
)
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)
assert u.allclose(
cosmo.angular_diameter_distance(z),
[1651.21, 856.411, 26.4845, 13.4028] * u.Mpc,
rtol=1e-5,
)
# Just to be really sure, we also do a version where the integral
# is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0**3 * 2.725**4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04
Or0 = (Ogamma0h2 + Onu0h2) / 0.7**2
Om0 = 1.0 - Or0
hubdis = (299792.458 / 70.0) * u.Mpc
cosmo = FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)
targvals = 2.0 * hubdis * (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert u.allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
# And integers for z
assert u.allclose(cosmo.comoving_distance(z.astype(int)), targvals, rtol=1e-5)
# Try Tcmb0 = 4
Or0 *= (4.0 / 2.725) ** 4
Om0 = 1.0 - Or0
cosmo = FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)
targvals = 2.0 * hubdis * (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert u.allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5) |
Test against the tabulated values generated from icosmo.org
with three example cosmologies (flat, open and closed). | def test_flat_open_closed_icosmo(file_name):
"""Test against the tabulated values generated from icosmo.org
with three example cosmologies (flat, open and closed).
"""
with u.add_enabled_units(cu):
tbl = QTable.read(pathlib.Path(__file__).parent / "data" / file_name)
cosmo = LambdaCDM(
H0=100 * tbl.meta["h"], Om0=tbl.meta["Om"], Ode0=tbl.meta["Ol"], Tcmb0=0.0
)
assert u.allclose(cosmo.comoving_transverse_distance(tbl["redshift"]), tbl["dm"])
assert u.allclose(cosmo.angular_diameter_distance(tbl["redshift"]), tbl["da"])
assert u.allclose(cosmo.luminosity_distance(tbl["redshift"]), tbl["dl"]) |
Test if the right units are being returned | def test_units():
"""Test if the right units are being returned"""
cosmo = FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0)
assert cosmo.comoving_distance(1.0).unit == u.Mpc
assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc
assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc
assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.luminosity_distance(1.0).unit == u.Mpc
assert cosmo.lookback_time(1.0).unit == u.Gyr
assert cosmo.lookback_distance(1.0).unit == u.Mpc
assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s
assert cosmo.Tcmb(1.0).unit == u.K
assert cosmo.Tcmb([0.0, 1.0]).unit == u.K
assert cosmo.Tnu(1.0).unit == u.K
assert cosmo.Tnu([0.0, 1.0]).unit == u.K
assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc
assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc
assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.critical_density(1.0).unit == u.g / u.cm**3
assert cosmo.comoving_volume(1.0).unit == u.Mpc**3
assert cosmo.age(1.0).unit == u.Gyr
assert cosmo.distmod(1.0).unit == u.mag |
Test of absorption and lookback integrand | def test_xtfuncs():
"""Test of absorption and lookback integrand"""
cosmo = LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
z = np.array([2.0, 3.2])
assert u.allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378, rtol=1e-4)
assert u.allclose(
cosmo.lookback_time_integrand(z), [0.10333179, 0.04644541], rtol=1e-4
)
assert u.allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402, rtol=1e-4)
assert u.allclose(
cosmo.abs_distance_integrand(z), [2.7899584, 3.44104758], rtol=1e-4
) |
Check that age in de Sitter and Einstein-de Sitter Universes work.
Some analytic solutions fail at these critical points. | def test_age_in_special_cosmologies():
"""Check that age in de Sitter and Einstein-de Sitter Universes work.
Some analytic solutions fail at these critical points.
"""
c_dS = FlatLambdaCDM(100, 0, Tcmb0=0)
assert u.allclose(c_dS.age(z=0), np.inf * u.Gyr)
assert u.allclose(c_dS.age(z=1), np.inf * u.Gyr)
assert u.allclose(c_dS.lookback_time(z=0), 0 * u.Gyr)
assert u.allclose(c_dS.lookback_time(z=1), 6.777539216261741 * u.Gyr)
c_EdS = FlatLambdaCDM(100, 1, Tcmb0=0)
assert u.allclose(c_EdS.age(z=0), 6.518614811154189 * u.Gyr)
assert u.allclose(c_EdS.age(z=1), 2.3046783684542738 * u.Gyr)
assert u.allclose(c_EdS.lookback_time(z=0), 0 * u.Gyr)
assert u.allclose(c_EdS.lookback_time(z=1), 4.213936442699092 * u.Gyr) |
Check that de Sitter and Einstein-de Sitter Universes both work.
Some analytic solutions fail at these critical points. | def test_distance_in_special_cosmologies():
"""Check that de Sitter and Einstein-de Sitter Universes both work.
Some analytic solutions fail at these critical points.
"""
c_dS = FlatLambdaCDM(100, 0, Tcmb0=0)
assert u.allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert u.allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = FlatLambdaCDM(100, 1, Tcmb0=0)
assert u.allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert u.allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
c_dS = LambdaCDM(100, 0, 1, Tcmb0=0)
assert u.allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert u.allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = LambdaCDM(100, 1, 0, Tcmb0=0)
assert u.allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert u.allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc) |
Test array shape broadcasting for functions with single
redshift inputs | def test_distance_broadcast():
"""Test array shape broadcasting for functions with single
redshift inputs"""
cosmo = FlatLambdaCDM(H0=70, Om0=0.27, m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV))
z = np.linspace(0.1, 1, 6)
z_reshape2d = z.reshape(2, 3)
z_reshape3d = z.reshape(3, 2, 1)
# Things with units
methods = [
"comoving_distance",
"luminosity_distance",
"comoving_transverse_distance",
"angular_diameter_distance",
"distmod",
"lookback_time",
"age",
"comoving_volume",
"differential_comoving_volume",
"kpc_comoving_per_arcmin",
]
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert value_flat.unit == value_2d.unit
assert value_flat.unit == value_3d.unit
assert u.allclose(value_flat, value_2d.flatten())
assert u.allclose(value_flat, value_3d.flatten())
# Also test unitless ones
methods = [
"absorption_distance",
"Om",
"Ode",
"Ok",
"H",
"w",
"de_density_scale",
"Onu",
"Ogamma",
"nu_relative_density",
]
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert u.allclose(value_flat, value_2d.flatten())
assert u.allclose(value_flat, value_3d.flatten()) |
Test that efunc and inv_efunc give inverse values | def test_efunc_vs_invefunc_flrw():
"""Test that efunc and inv_efunc give inverse values"""
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# FLRW is abstract, so requires W1 defined earlier
# This requires scipy, unlike the built-ins, because it
# calls de_density_scale, which has an integral in it
cosmo = W1()
assert u.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert u.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# Add neutrinos
cosmo = W1nu()
assert u.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert u.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) |
Tests a few varying dark energy EOS models against a Mathematica computation. | def test_varyde_lumdist_mathematica():
"""Tests a few varying dark energy EOS models against a Mathematica computation."""
z = np.array([0.2, 0.4, 0.9, 1.2])
# w0wa models
cosmo = w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0)
assert u.allclose(
cosmo.luminosity_distance(z),
[1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc,
rtol=1e-4,
)
assert u.allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5)
assert u.allclose(
cosmo.de_density_scale([0.0, 0.5, 1.5]),
[1.0, 0.9246310669529021, 0.9184087000251957],
)
cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0)
assert u.allclose(
cosmo.luminosity_distance(z),
[971.667, 2141.67, 5685.96, 8107.41] * u.Mpc,
rtol=1e-4,
)
cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5, Tcmb0=0.0)
assert u.allclose(
cosmo.luminosity_distance(z),
[974.087, 2157.08, 5783.92, 8274.08] * u.Mpc,
rtol=1e-4,
) |
Test equality and equivalence. | def test_equality():
"""Test equality and equivalence."""
# mismatched signatures, both directions.
newcosmo = w0waCDM(**Planck18.parameters, Ode0=0.6)
assert newcosmo != Planck18
assert Planck18 != newcosmo |
Tests a few varying dark energy EOS models against a Mathematica computation. | def test_varyde_lumdist_mathematica():
"""Tests a few varying dark energy EOS models against a Mathematica computation."""
z = np.array([0.2, 0.4, 0.9, 1.2])
# wpwa models
cosmo = wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5, Tcmb0=0.0)
assert u.allclose(
cosmo.luminosity_distance(z),
[1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc,
rtol=1e-4,
)
cosmo = wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9, Tcmb0=0.0)
assert u.allclose(
cosmo.luminosity_distance(z),
[1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc,
rtol=1e-4,
) |
Parse Cosmology-like input into Cosmologies, given a format hint.
Parameters
----------
cosmo : |Cosmology|-like, positional-only
|Cosmology| to parse.
format : bool or None or str, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting.
Returns
-------
|Cosmology| or generator thereof
Raises
------
TypeError
If ``cosmo`` is not a |Cosmology| and ``format`` equals `False`.
TypeError
If ``cosmo`` is a |Cosmology| and ``format`` is not `None` or equal to
`True`. | def _parse_format(cosmo: Any, format: _FormatType, /) -> Cosmology:
"""Parse Cosmology-like input into Cosmologies, given a format hint.
Parameters
----------
cosmo : |Cosmology|-like, positional-only
|Cosmology| to parse.
format : bool or None or str, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting.
Returns
-------
|Cosmology| or generator thereof
Raises
------
TypeError
If ``cosmo`` is not a |Cosmology| and ``format`` equals `False`.
TypeError
If ``cosmo`` is a |Cosmology| and ``format`` is not `None` or equal to
`True`.
"""
# Deal with private wrapper
if isinstance(cosmo, _CosmologyWrapper):
cosmo = cosmo.wrapped
# Shortcut if already a cosmology
if isinstance(cosmo, Cosmology):
if format not in _COSMO_AOK:
allowed = "/".join(map(str, _COSMO_AOK))
raise ValueError(
f"for parsing a Cosmology, 'format' must be {allowed}, not {format}"
)
return cosmo
# Convert, if allowed.
elif format == False_: # catches False and False_
raise TypeError(
f"if 'format' is False, arguments must be a Cosmology, not {cosmo}"
)
else:
format = None if format == True_ else format # str->str, None/True/True_->None
out = Cosmology.from_format(cosmo, format=format) # this can error!
return out |
Parse Cosmology-like to |Cosmology|, using provided formats.
``format`` is broadcast to match the shape of the cosmology arguments. Note
that the cosmology arguments are not broadcast against ``format``, so it
cannot determine the output shape.
Parameters
----------
*cosmos : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by the corresponding ``format``.
format : bool or None or str or array-like thereof, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting. Note ``format`` is broadcast as an object array to match the
shape of ``cosmos`` so ``format`` cannot determine the output shape.
Raises
------
TypeError
If any in ``cosmos`` is not a |Cosmology| and the corresponding
``format`` equals `False`. | def _parse_formats(*cosmos: object, format: _FormatsType) -> ndarray:
"""Parse Cosmology-like to |Cosmology|, using provided formats.
``format`` is broadcast to match the shape of the cosmology arguments. Note
that the cosmology arguments are not broadcast against ``format``, so it
cannot determine the output shape.
Parameters
----------
*cosmos : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by the corresponding ``format``.
format : bool or None or str or array-like thereof, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting. Note ``format`` is broadcast as an object array to match the
shape of ``cosmos`` so ``format`` cannot determine the output shape.
Raises
------
TypeError
If any in ``cosmos`` is not a |Cosmology| and the corresponding
``format`` equals `False`.
"""
formats = np.broadcast_to(np.array(format, dtype=object), len(cosmos))
# parse each cosmo & format
# Have to deal with things that do not broadcast well.
# astropy.row cannot be used in an array, even if dtype=object
# and will raise a segfault when used in a ufunc.
wcosmos = [
c if not isinstance(c, _CANT_BROADCAST) else _CosmologyWrapper(c)
for c in cosmos
]
return _parse_format(wcosmos, formats) |
Decorator to make wrapper function that parses |Cosmology|-like inputs.
Parameters
----------
pyfunc : Python function object
An arbitrary Python function.
Returns
-------
callable[..., Any]
Wrapped `pyfunc`, as described above.
Notes
-----
All decorated functions should add the following to 'Parameters'.
format : bool or None or str or array-like thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape. | def _comparison_decorator(pyfunc: Callable[..., Any]) -> Callable[..., Any]:
"""Decorator to make wrapper function that parses |Cosmology|-like inputs.
Parameters
----------
pyfunc : Python function object
An arbitrary Python function.
Returns
-------
callable[..., Any]
Wrapped `pyfunc`, as described above.
Notes
-----
All decorated functions should add the following to 'Parameters'.
format : bool or None or str or array-like thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
"""
sig = inspect.signature(pyfunc)
nin = sum(p.kind == 0 for p in sig.parameters.values())
# Make wrapper function that parses cosmology-like inputs
@functools.wraps(pyfunc)
def wrapper(*cosmos: Any, format: _FormatsType = False, **kwargs: Any) -> bool:
if len(cosmos) > nin:
raise TypeError(
f"{wrapper.__wrapped__.__name__} takes {nin} positional"
f" arguments but {len(cosmos)} were given"
)
# Parse cosmologies to format. Only do specified number.
cosmos = _parse_formats(*cosmos, format=format)
# Evaluate pyfunc, erroring if didn't match specified number.
result = wrapper.__wrapped__(*cosmos, **kwargs)
# Return, casting to correct type casting is possible.
return result
return wrapper |
Return element-wise equality check on the cosmologies.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
Examples
--------
Assuming the following imports
>>> import astropy.units as u
>>> from astropy.cosmology import FlatLambdaCDM
Two identical cosmologies are equal.
>>> cosmo1 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmology_equal(cosmo1, cosmo2)
True
And cosmologies with different parameters are not.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.4)
>>> cosmology_equal(cosmo1, cosmo3)
False
Two cosmologies may be equivalent even if not of the same class. In these
examples the |LambdaCDM| has :attr:`~astropy.cosmology.LambdaCDM.Ode0` set
to the same value calculated in |FlatLambdaCDM|.
>>> from astropy.cosmology import LambdaCDM
>>> cosmo3 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmology_equal(cosmo1, cosmo3)
False
>>> cosmology_equal(cosmo1, cosmo3, allow_equivalent=True)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo4 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmology_equal(cosmo3, cosmo4, allow_equivalent=True)
False
Also, using the keyword argument, the notion of equality is extended to any
Python object that can be converted to a |Cosmology|.
>>> mapping = cosmo2.to_format("mapping")
>>> cosmology_equal(cosmo1, mapping, format=True)
True
Either (or both) arguments can be |Cosmology|-like.
>>> cosmology_equal(mapping, cosmo2, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be checked
with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of these
kinds can still be checked for equality, but the correct format string must
be used.
>>> yml = cosmo2.to_format("yaml")
>>> cosmology_equal(cosmo1, yml, format=(None, "yaml"))
True
This also works with an array of ``format`` matching the number of
cosmologies.
>>> cosmology_equal(mapping, yml, format=[True, "yaml"])
True | def cosmology_equal(
cosmo1: Any, cosmo2: Any, /, *, allow_equivalent: bool = False
) -> bool:
r"""Return element-wise equality check on the cosmologies.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
Examples
--------
Assuming the following imports
>>> import astropy.units as u
>>> from astropy.cosmology import FlatLambdaCDM
Two identical cosmologies are equal.
>>> cosmo1 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmology_equal(cosmo1, cosmo2)
True
And cosmologies with different parameters are not.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.4)
>>> cosmology_equal(cosmo1, cosmo3)
False
Two cosmologies may be equivalent even if not of the same class. In these
examples the |LambdaCDM| has :attr:`~astropy.cosmology.LambdaCDM.Ode0` set
to the same value calculated in |FlatLambdaCDM|.
>>> from astropy.cosmology import LambdaCDM
>>> cosmo3 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmology_equal(cosmo1, cosmo3)
False
>>> cosmology_equal(cosmo1, cosmo3, allow_equivalent=True)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo4 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmology_equal(cosmo3, cosmo4, allow_equivalent=True)
False
Also, using the keyword argument, the notion of equality is extended to any
Python object that can be converted to a |Cosmology|.
>>> mapping = cosmo2.to_format("mapping")
>>> cosmology_equal(cosmo1, mapping, format=True)
True
Either (or both) arguments can be |Cosmology|-like.
>>> cosmology_equal(mapping, cosmo2, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be checked
with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of these
kinds can still be checked for equality, but the correct format string must
be used.
>>> yml = cosmo2.to_format("yaml")
>>> cosmology_equal(cosmo1, yml, format=(None, "yaml"))
True
This also works with an array of ``format`` matching the number of
cosmologies.
>>> cosmology_equal(mapping, yml, format=[True, "yaml"])
True
"""
# Check parameter equality
if not allow_equivalent:
eq = cosmo1 == cosmo2
else:
# Check parameter equivalence
# The options are: 1) same class & parameters; 2) same class, different
# parameters; 3) different classes, equivalent parameters; 4) different
# classes, different parameters. (1) & (3) => True, (2) & (4) => False.
eq = cosmo1.__equiv__(cosmo2)
if eq is NotImplemented:
eq = cosmo2.__equiv__(cosmo1) # that failed, try from 'other'
eq = False if eq is NotImplemented else eq
# TODO! include equality check of metadata
return eq |
Return element-wise cosmology non-equality check.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
out : ndarray, None, optional
A location into which the result is stored. If provided, it must have a
shape that the inputs broadcast to. If not provided or None, a
freshly-allocated array is returned.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a Cosmology. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. ``format`` is
broadcast to match the shape of the cosmology arguments. Note that the
cosmology arguments are not broadcast against ``format``, so it cannot
determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
See Also
--------
astropy.cosmology.cosmology_equal
Element-wise equality check, with argument conversion to Cosmology. | def _cosmology_not_equal(
cosmo1: Any, cosmo2: Any, /, *, allow_equivalent: bool = False
) -> bool:
r"""Return element-wise cosmology non-equality check.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
out : ndarray, None, optional
A location into which the result is stored. If provided, it must have a
shape that the inputs broadcast to. If not provided or None, a
freshly-allocated array is returned.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a Cosmology. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. ``format`` is
broadcast to match the shape of the cosmology arguments. Note that the
cosmology arguments are not broadcast against ``format``, so it cannot
determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
See Also
--------
astropy.cosmology.cosmology_equal
Element-wise equality check, with argument conversion to Cosmology.
"""
neq = not cosmology_equal(cosmo1, cosmo2, allow_equivalent=allow_equivalent)
# TODO! it might eventually be worth the speed boost to implement some of
# the internals of cosmology_equal here, but for now it's a hassle.
return neq |
Find the redshift ``z`` at which ``func(z) = fval``.
See :func:`astropy.cosmology.funcs.z_at_value`. | def _z_at_scalar_value(
func,
fval,
zmin=1e-8,
zmax=1000,
ztol=1e-8,
maxfun=500,
method="Brent",
bracket=None,
verbose=False,
):
"""Find the redshift ``z`` at which ``func(z) = fval``.
See :func:`astropy.cosmology.funcs.z_at_value`.
"""
from scipy.optimize import minimize_scalar
opt = {"maxiter": maxfun, "xtol": ztol}
# Assume custom methods support the same options as default; otherwise user
# will see warnings.
if callable(method): # can skip callables
pass
elif str(method).lower() == "bounded":
opt["xatol"] = opt.pop("xtol")
if bracket is not None:
warnings.warn(f"Option 'bracket' is ignored by method {method}.")
bracket = None
# fval falling inside the interval of bracketing function values does not
# guarantee it has a unique solution, but for Standard Cosmological
# quantities normally should (being monotonic or having a single extremum).
# In these cases keep solver from returning solutions outside of bracket.
fval_zmin, fval_zmax = func(zmin), func(zmax)
nobracket = False
if np.sign(fval - fval_zmin) != np.sign(fval_zmax - fval):
if bracket is None:
nobracket = True
else:
fval_brac = func(np.asanyarray(bracket))
if np.sign(fval - fval_brac[0]) != np.sign(fval_brac[-1] - fval):
nobracket = True
else:
zmin, zmax = bracket[0], bracket[-1]
fval_zmin, fval_zmax = fval_brac[[0, -1]]
if nobracket:
warnings.warn(
f"fval is not bracketed by func(zmin)={fval_zmin} and "
f"func(zmax)={fval_zmax}. This means either there is no "
"solution, or that there is more than one solution "
"between zmin and zmax satisfying fval = func(z).",
AstropyUserWarning,
)
if isinstance(fval_zmin, Quantity):
val = fval.to_value(fval_zmin.unit)
else:
val = fval
# Construct bounds (Brent and Golden fail if bounds are not None)
if callable(method) or str(method).lower() not in {"brent", "golden"}:
bounds = (zmin, zmax)
else:
bounds = None
# Objective function to minimize.
# 'Brent' and 'Golden' ignore `bounds` but this keeps the domain witihin the bounds.
def f(z):
if z > zmax:
return 1.0e300 * (1.0 + z - zmax)
elif z < zmin:
return 1.0e300 * (1.0 + zmin - z)
elif isinstance(fval_zmin, Quantity):
return abs(func(z).value - val)
else:
return abs(func(z) - val)
# Perform the minimization
res = minimize_scalar(f, method=method, bounds=bounds, bracket=bracket, options=opt)
# Scipy docs state that `OptimizeResult` always has 'status' and 'message'
# attributes, but only `_minimize_scalar_bounded()` seems to have really
# implemented them.
if not res.success:
warnings.warn(
f"Solver returned {res.get('status')}:"
f" {res.get('message', 'Unsuccessful')}\nPrecision {res.fun} reached after"
f" {res.nfev} function calls.",
AstropyUserWarning,
)
if verbose:
print(res)
if np.allclose(res.x, zmax):
raise CosmologyError(
f"Best guess z={res.x} is very close to the upper z limit {zmax}."
"\nTry re-running with a different zmax."
)
elif np.allclose(res.x, zmin):
raise CosmologyError(
f"Best guess z={res.x} is very close to the lower z limit {zmin}."
"\nTry re-running with a different zmin."
)
return res.x |
Find the redshift ``z`` at which ``func(z) = fval``.
This finds the redshift at which one of the cosmology functions or
methods (for example Planck13.distmod) is equal to a known value.
.. warning::
Make sure you understand the behavior of the function that you are
trying to invert! Depending on the cosmology, there may not be a
unique solution. For example, in the standard Lambda CDM cosmology,
there are two redshifts which give an angular diameter distance of
1500 Mpc, z ~ 0.7 and z ~ 3.8. To force ``z_at_value`` to find the
solution you are interested in, use the ``zmin`` and ``zmax`` keywords
to limit the search range (see the example below).
Parameters
----------
func : function or method
A function that takes a redshift as input.
fval : `~astropy.units.Quantity`
The (scalar or array) value of ``func(z)`` to recover.
zmin : float or array-like['dimensionless'] or quantity-like, optional
The lower search limit for ``z``. Beware of divergences
in some cosmological functions, such as distance moduli,
at z=0 (default 1e-8).
zmax : float or array-like['dimensionless'] or quantity-like, optional
The upper search limit for ``z`` (default 1000).
ztol : float or array-like['dimensionless'], optional
The relative error in ``z`` acceptable for convergence.
maxfun : int or array-like, optional
The maximum number of function evaluations allowed in the
optimization routine (default 500).
method : str or callable, optional
Type of solver to pass to the minimizer. The built-in options provided
by :func:`~scipy.optimize.minimize_scalar` are 'Brent' (default),
'Golden' and 'Bounded' with names case insensitive - see documentation
there for details. It also accepts a custom solver by passing any
user-provided callable object that meets the requirements listed
therein under the Notes on "Custom minimizers" - or in more detail in
:doc:`scipy:tutorial/optimize` - although their use is currently
untested.
.. versionadded:: 4.3
bracket : sequence or object array[sequence], optional
For methods 'Brent' and 'Golden', ``bracket`` defines the bracketing
interval and can either have three items (z1, z2, z3) so that
z1 < z2 < z3 and ``func(z2) < func (z1), func(z3)`` or two items z1
and z3 which are assumed to be a starting interval for a downhill
bracket search. For non-monotonic functions such as angular diameter
distance this may be used to start the search on the desired side of
the maximum, but see Examples below for usage notes.
.. versionadded:: 4.3
verbose : bool, optional keyword-only
Print diagnostic output from solver (default `False`).
.. versionadded:: 4.3
.. versionchanged:: 6.1
Changed to keyword-only.
Returns
-------
z : `~astropy.units.Quantity` ['redshift']
The redshift ``z`` satisfying ``zmin < z < zmax`` and ``func(z) =
fval`` within ``ztol``. Has units of cosmological redshift.
Warns
-----
:class:`~astropy.utils.exceptions.AstropyUserWarning`
If ``fval`` is not bracketed by ``func(zmin)=fval(zmin)`` and
``func(zmax)=fval(zmax)``.
If the solver was not successful.
Raises
------
:class:`astropy.cosmology.CosmologyError`
If the result is very close to either ``zmin`` or ``zmax``.
ValueError
If ``bracket`` is not an array nor a 2 (or 3) element sequence.
TypeError
If ``bracket`` is not an object array. 2 (or 3) element sequences will
be turned into object arrays, so this error should only occur if a
non-object array is used for ``bracket``.
Notes
-----
This works for any arbitrary input cosmology, but is inefficient if you
want to invert a large number of values for the same cosmology. In this
case, it is faster to instead generate an array of values at many
closely-spaced redshifts that cover the relevant redshift range, and then
use interpolation to find the redshift at each value you are interested
in. For example, to efficiently find the redshifts corresponding to 10^6
values of the distance modulus in a Planck13 cosmology, you could do the
following:
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, z_at_value
Generate 10^6 distance moduli between 24 and 44 for which we
want to find the corresponding redshifts:
>>> Dvals = (24 + np.random.rand(1000000) * 20) * u.mag
Make a grid of distance moduli covering the redshift range we
need using 50 equally log-spaced values between zmin and
zmax. We use log spacing to adequately sample the steep part of
the curve at low distance moduli:
>>> zmin = z_at_value(Planck13.distmod, Dvals.min())
>>> zmax = z_at_value(Planck13.distmod, Dvals.max())
>>> zgrid = np.geomspace(zmin, zmax, 50)
>>> Dgrid = Planck13.distmod(zgrid)
Finally interpolate to find the redshift at each distance modulus:
>>> zvals = np.interp(Dvals.value, Dgrid.value, zgrid)
Examples
--------
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, Planck18, z_at_value
The age and lookback time are monotonic with redshift, and so a
unique solution can be found:
>>> z_at_value(Planck13.age, 2 * u.Gyr) # doctest: +FLOAT_CMP
<Quantity 3.19812268 redshift>
The angular diameter is not monotonic however, and there are two
redshifts that give a value of 1500 Mpc. You can use the zmin and
zmax keywords to find the one you are interested in:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, zmax=1.5) # doctest: +FLOAT_CMP
<Quantity 0.68044452 redshift>
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, zmin=2.5) # doctest: +FLOAT_CMP
<Quantity 3.7823268 redshift>
Alternatively the ``bracket`` option may be used to initialize the
function solver on a desired region, but one should be aware that this
does not guarantee it will remain close to this starting bracket.
For the example of angular diameter distance, which has a maximum near
a redshift of 1.6 in this cosmology, defining a bracket on either side
of this maximum will often return a solution on the same side:
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc,
... method="Brent", bracket=(1.0, 1.2)) # doctest: +FLOAT_CMP +IGNORE_WARNINGS
<Quantity 0.68044452 redshift>
But this is not ascertained especially if the bracket is chosen too wide
and/or too close to the turning point:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(0.1, 1.5)) # doctest: +SKIP
<Quantity 3.7823268 redshift> # doctest: +SKIP
Likewise, even for the same minimizer and same starting conditions different
results can be found depending on architecture or library versions:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP
<Quantity 3.7823268 redshift> # doctest: +SKIP
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP
<Quantity 0.68044452 redshift> # doctest: +SKIP
It is therefore generally safer to use the 3-parameter variant to ensure
the solution stays within the bracketing limits:
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc, method="Brent",
... bracket=(0.1, 1.0, 1.5)) # doctest: +FLOAT_CMP
<Quantity 0.68044452 redshift>
Also note that the luminosity distance and distance modulus (two
other commonly inverted quantities) are monotonic in flat and open
universes, but not in closed universes.
All the arguments except ``func``, ``method`` and ``verbose`` accept array
inputs. This does NOT use interpolation tables or any method to speed up
evaluations, rather providing a convenient means to broadcast arguments
over an element-wise scalar evaluation.
The most common use case for non-scalar input is to evaluate 'func' for an
array of ``fval``:
>>> z_at_value(Planck13.age, [2, 7] * u.Gyr) # doctest: +FLOAT_CMP
<Quantity [3.19812061, 0.75620443] redshift>
``fval`` can be any shape:
>>> z_at_value(Planck13.age, [[2, 7], [1, 3]]*u.Gyr) # doctest: +FLOAT_CMP
<Quantity [[3.19812061, 0.75620443],
[5.67661227, 2.19131955]] redshift>
Other arguments can be arrays. For non-monotic functions -- for example,
the angular diameter distance -- this can be useful to find all solutions.
>>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc,
... zmin=[0, 2.5], zmax=[2, 4]) # doctest: +FLOAT_CMP
<Quantity [0.68127747, 3.79149062] redshift>
The ``bracket`` argument can likewise be be an array. However, since
bracket must already be a sequence (or None), it MUST be given as an
object `numpy.ndarray`. Importantly, the depth of the array must be such
that each bracket subsequence is an object. Errors or unexpected results
will happen otherwise. A convenient means to ensure the right depth is by
including a length-0 tuple as a bracket and then truncating the object
array to remove the placeholder. This can be seen in the following
example:
>>> bracket=np.array([(1.0, 1.2),(2.0, 2.5), ()], dtype=object)[:-1]
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc,
... bracket=bracket) # doctest: +SKIP
<Quantity [0.68044452, 3.7823268] redshift> | def z_at_value(
func,
fval,
zmin=1e-8,
zmax=1000,
ztol=1e-8,
maxfun=500,
method="Brent",
bracket=None,
*,
verbose=False,
):
"""Find the redshift ``z`` at which ``func(z) = fval``.
This finds the redshift at which one of the cosmology functions or
methods (for example Planck13.distmod) is equal to a known value.
.. warning::
Make sure you understand the behavior of the function that you are
trying to invert! Depending on the cosmology, there may not be a
unique solution. For example, in the standard Lambda CDM cosmology,
there are two redshifts which give an angular diameter distance of
1500 Mpc, z ~ 0.7 and z ~ 3.8. To force ``z_at_value`` to find the
solution you are interested in, use the ``zmin`` and ``zmax`` keywords
to limit the search range (see the example below).
Parameters
----------
func : function or method
A function that takes a redshift as input.
fval : `~astropy.units.Quantity`
The (scalar or array) value of ``func(z)`` to recover.
zmin : float or array-like['dimensionless'] or quantity-like, optional
The lower search limit for ``z``. Beware of divergences
in some cosmological functions, such as distance moduli,
at z=0 (default 1e-8).
zmax : float or array-like['dimensionless'] or quantity-like, optional
The upper search limit for ``z`` (default 1000).
ztol : float or array-like['dimensionless'], optional
The relative error in ``z`` acceptable for convergence.
maxfun : int or array-like, optional
The maximum number of function evaluations allowed in the
optimization routine (default 500).
method : str or callable, optional
Type of solver to pass to the minimizer. The built-in options provided
by :func:`~scipy.optimize.minimize_scalar` are 'Brent' (default),
'Golden' and 'Bounded' with names case insensitive - see documentation
there for details. It also accepts a custom solver by passing any
user-provided callable object that meets the requirements listed
therein under the Notes on "Custom minimizers" - or in more detail in
:doc:`scipy:tutorial/optimize` - although their use is currently
untested.
.. versionadded:: 4.3
bracket : sequence or object array[sequence], optional
For methods 'Brent' and 'Golden', ``bracket`` defines the bracketing
interval and can either have three items (z1, z2, z3) so that
z1 < z2 < z3 and ``func(z2) < func (z1), func(z3)`` or two items z1
and z3 which are assumed to be a starting interval for a downhill
bracket search. For non-monotonic functions such as angular diameter
distance this may be used to start the search on the desired side of
the maximum, but see Examples below for usage notes.
.. versionadded:: 4.3
verbose : bool, optional keyword-only
Print diagnostic output from solver (default `False`).
.. versionadded:: 4.3
.. versionchanged:: 6.1
Changed to keyword-only.
Returns
-------
z : `~astropy.units.Quantity` ['redshift']
The redshift ``z`` satisfying ``zmin < z < zmax`` and ``func(z) =
fval`` within ``ztol``. Has units of cosmological redshift.
Warns
-----
:class:`~astropy.utils.exceptions.AstropyUserWarning`
If ``fval`` is not bracketed by ``func(zmin)=fval(zmin)`` and
``func(zmax)=fval(zmax)``.
If the solver was not successful.
Raises
------
:class:`astropy.cosmology.CosmologyError`
If the result is very close to either ``zmin`` or ``zmax``.
ValueError
If ``bracket`` is not an array nor a 2 (or 3) element sequence.
TypeError
If ``bracket`` is not an object array. 2 (or 3) element sequences will
be turned into object arrays, so this error should only occur if a
non-object array is used for ``bracket``.
Notes
-----
This works for any arbitrary input cosmology, but is inefficient if you
want to invert a large number of values for the same cosmology. In this
case, it is faster to instead generate an array of values at many
closely-spaced redshifts that cover the relevant redshift range, and then
use interpolation to find the redshift at each value you are interested
in. For example, to efficiently find the redshifts corresponding to 10^6
values of the distance modulus in a Planck13 cosmology, you could do the
following:
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, z_at_value
Generate 10^6 distance moduli between 24 and 44 for which we
want to find the corresponding redshifts:
>>> Dvals = (24 + np.random.rand(1000000) * 20) * u.mag
Make a grid of distance moduli covering the redshift range we
need using 50 equally log-spaced values between zmin and
zmax. We use log spacing to adequately sample the steep part of
the curve at low distance moduli:
>>> zmin = z_at_value(Planck13.distmod, Dvals.min())
>>> zmax = z_at_value(Planck13.distmod, Dvals.max())
>>> zgrid = np.geomspace(zmin, zmax, 50)
>>> Dgrid = Planck13.distmod(zgrid)
Finally interpolate to find the redshift at each distance modulus:
>>> zvals = np.interp(Dvals.value, Dgrid.value, zgrid)
Examples
--------
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, Planck18, z_at_value
The age and lookback time are monotonic with redshift, and so a
unique solution can be found:
>>> z_at_value(Planck13.age, 2 * u.Gyr) # doctest: +FLOAT_CMP
<Quantity 3.19812268 redshift>
The angular diameter is not monotonic however, and there are two
redshifts that give a value of 1500 Mpc. You can use the zmin and
zmax keywords to find the one you are interested in:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, zmax=1.5) # doctest: +FLOAT_CMP
<Quantity 0.68044452 redshift>
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, zmin=2.5) # doctest: +FLOAT_CMP
<Quantity 3.7823268 redshift>
Alternatively the ``bracket`` option may be used to initialize the
function solver on a desired region, but one should be aware that this
does not guarantee it will remain close to this starting bracket.
For the example of angular diameter distance, which has a maximum near
a redshift of 1.6 in this cosmology, defining a bracket on either side
of this maximum will often return a solution on the same side:
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc,
... method="Brent", bracket=(1.0, 1.2)) # doctest: +FLOAT_CMP +IGNORE_WARNINGS
<Quantity 0.68044452 redshift>
But this is not ascertained especially if the bracket is chosen too wide
and/or too close to the turning point:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(0.1, 1.5)) # doctest: +SKIP
<Quantity 3.7823268 redshift> # doctest: +SKIP
Likewise, even for the same minimizer and same starting conditions different
results can be found depending on architecture or library versions:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP
<Quantity 3.7823268 redshift> # doctest: +SKIP
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP
<Quantity 0.68044452 redshift> # doctest: +SKIP
It is therefore generally safer to use the 3-parameter variant to ensure
the solution stays within the bracketing limits:
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc, method="Brent",
... bracket=(0.1, 1.0, 1.5)) # doctest: +FLOAT_CMP
<Quantity 0.68044452 redshift>
Also note that the luminosity distance and distance modulus (two
other commonly inverted quantities) are monotonic in flat and open
universes, but not in closed universes.
All the arguments except ``func``, ``method`` and ``verbose`` accept array
inputs. This does NOT use interpolation tables or any method to speed up
evaluations, rather providing a convenient means to broadcast arguments
over an element-wise scalar evaluation.
The most common use case for non-scalar input is to evaluate 'func' for an
array of ``fval``:
>>> z_at_value(Planck13.age, [2, 7] * u.Gyr) # doctest: +FLOAT_CMP
<Quantity [3.19812061, 0.75620443] redshift>
``fval`` can be any shape:
>>> z_at_value(Planck13.age, [[2, 7], [1, 3]]*u.Gyr) # doctest: +FLOAT_CMP
<Quantity [[3.19812061, 0.75620443],
[5.67661227, 2.19131955]] redshift>
Other arguments can be arrays. For non-monotic functions -- for example,
the angular diameter distance -- this can be useful to find all solutions.
>>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc,
... zmin=[0, 2.5], zmax=[2, 4]) # doctest: +FLOAT_CMP
<Quantity [0.68127747, 3.79149062] redshift>
The ``bracket`` argument can likewise be be an array. However, since
bracket must already be a sequence (or None), it MUST be given as an
object `numpy.ndarray`. Importantly, the depth of the array must be such
that each bracket subsequence is an object. Errors or unexpected results
will happen otherwise. A convenient means to ensure the right depth is by
including a length-0 tuple as a bracket and then truncating the object
array to remove the placeholder. This can be seen in the following
example:
>>> bracket=np.array([(1.0, 1.2),(2.0, 2.5), ()], dtype=object)[:-1]
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc,
... bracket=bracket) # doctest: +SKIP
<Quantity [0.68044452, 3.7823268] redshift>
"""
# `fval` can be a Quantity, which isn't (yet) compatible w/ `numpy.nditer`
# so we strip it of units for broadcasting and restore the units when
# passing the elements to `_z_at_scalar_value`.
fval = np.asanyarray(fval)
unit = getattr(fval, "unit", 1) # can be unitless
zmin = Quantity(zmin, cu.redshift).value # must be unitless
zmax = Quantity(zmax, cu.redshift).value
# bracket must be an object array (assumed to be correct) or a 'scalar'
# bracket: 2 or 3 elt sequence
if not isinstance(bracket, np.ndarray): # 'scalar' bracket
if bracket is not None and len(bracket) not in (2, 3):
raise ValueError(
"`bracket` is not an array nor a 2 (or 3) element sequence."
)
else: # munge bracket into a 1-elt object array
bracket = np.array([bracket, ()], dtype=object)[:1].squeeze()
if bracket.dtype != np.object_:
raise TypeError(f"`bracket` has dtype {bracket.dtype}, not 'O'")
# make multi-dimensional iterator for all but `method`, `verbose`
with np.nditer(
[fval, zmin, zmax, ztol, maxfun, bracket, None],
flags=["refs_ok"],
op_flags=[
*[["readonly"]] * 6, # ← inputs output ↓
["writeonly", "allocate", "no_subtype"],
],
op_dtypes=(*(None,) * 6, fval.dtype),
casting="no",
) as it:
for fv, zmn, zmx, zt, mfe, bkt, zs in it: # ← eltwise unpack & eval ↓
zs[...] = _z_at_scalar_value(
func,
fv * unit,
zmin=zmn,
zmax=zmx,
ztol=zt,
maxfun=mfe,
bracket=bkt.item(),
# not broadcasted
method=method,
verbose=verbose,
)
# since bracket is an object array, the output will be too, so it is
# cast to the same type as the function value.
result = it.operands[-1] # zs
return result << cu.redshift |
Test that numpy vectorize fails on Quantities.
If this test starts failing then numpy vectorize can be used instead of
the home-brewed vectorization. Please submit a PR making the change. | def test_z_at_value_numpyvectorize():
"""Test that numpy vectorize fails on Quantities.
If this test starts failing then numpy vectorize can be used instead of
the home-brewed vectorization. Please submit a PR making the change.
"""
z_at_value = np.vectorize(
_z_at_scalar_value, excluded=["func", "method", "verbose"]
)
with pytest.raises(u.UnitConversionError, match="dimensionless quantities"):
z_at_value(Planck15.age, 10 * u.Gyr) |
Test 2 solutions for angular diameter distance by not constraining zmin, zmax,
but setting `bracket` on the appropriate side of the turning point z.
Setting zmin / zmax should override `bracket`. | def test_z_at_value_bracketed(method):
"""
Test 2 solutions for angular diameter distance by not constraining zmin, zmax,
but setting `bracket` on the appropriate side of the turning point z.
Setting zmin / zmax should override `bracket`.
"""
cosmo = Planck13
if PYTEST_LT_8_0:
ctx_fval = nullcontext()
else:
ctx_fval = pytest.warns(AstropyUserWarning, match="fval is not bracketed")
if method == "Bounded":
with pytest.warns(AstropyUserWarning, match="fval is not bracketed"):
z = z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method)
if z > 1.6:
z = 3.7914908
bracket = (0.9, 1.5)
else:
z = 0.6812777
bracket = (1.6, 2.0)
with pytest.warns(UserWarning, match="Option 'bracket' is ignored"), ctx_fval:
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=bracket,
),
z,
rtol=1e-6,
)
else:
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.3, 1.0),
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(2.0, 4.0),
),
3.7914908,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.1, 1.5),
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.1, 1.0, 2.0),
),
0.6812777,
rtol=1e-6,
)
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.9, 1.5),
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(1.6, 2.0),
),
3.7914908,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(1.6, 2.0),
zmax=1.6,
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.9, 1.5),
zmin=1.5,
),
3.7914908,
rtol=1e-6,
)
if not PYTEST_LT_8_0 and method == "Bounded":
ctx_bracket = pytest.warns(
UserWarning, match="Option 'bracket' is ignored by method Bounded"
)
else:
ctx_bracket = nullcontext()
with (
pytest.raises(core.CosmologyError),
pytest.warns(AstropyUserWarning, match="fval is not bracketed"),
ctx_bracket,
):
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(3.9, 5.0),
zmin=4.0,
) |
Test warnings on non-converged solution when setting `maxfun` to too small iteration number -
only 'Bounded' returns status value and specific message. | def test_z_at_value_unconverged(method):
"""
Test warnings on non-converged solution when setting `maxfun` to too small iteration number -
only 'Bounded' returns status value and specific message.
"""
cosmo = Planck18
ztol = {"Brent": [1e-4, 1e-4], "Golden": [1e-3, 1e-2], "Bounded": [1e-3, 1e-1]}
if method == "Bounded":
ctx = pytest.warns(
AstropyUserWarning,
match="Solver returned 1: Maximum number of function calls reached",
)
else:
ctx = pytest.warns(AstropyUserWarning, match="Solver returned None")
with ctx:
z0 = z_at_value(
cosmo.angular_diameter_distance, 1 * u.Gpc, zmax=2, maxfun=13, method=method
)
with ctx:
z1 = z_at_value(
cosmo.angular_diameter_distance, 1 * u.Gpc, zmin=2, maxfun=13, method=method
)
assert allclose(z0, 0.32442, rtol=ztol[method][0])
assert allclose(z1, 8.18551, rtol=ztol[method][1]) |
Calculate values from a known redshift, and then check that
z_at_value returns the right answer. | def test_z_at_value_roundtrip(cosmo):
"""
Calculate values from a known redshift, and then check that
z_at_value returns the right answer.
"""
z = 0.5
# Skip Ok, w, de_density_scale because in the Planck cosmologies
# they are redshift independent and hence uninvertable,
# *_distance_z1z2 methods take multiple arguments, so require
# special handling
# clone is not a redshift-dependent method
# nu_relative_density is not redshift-dependent in the WMAP cosmologies
skip = (
"Ok",
"Otot",
"angular_diameter_distance_z1z2",
"clone",
"is_equivalent",
"de_density_scale",
"w",
)
if str(cosmo.name).startswith("WMAP"):
skip += ("nu_relative_density",)
methods = inspect.getmembers(cosmo, predicate=inspect.ismethod)
for name, func in methods:
if name.startswith("_") or name in skip:
continue
fval = func(z)
# we need zmax here to pick the right solution for
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
got = z_at_value(func, fval, bracket=[0.3, 1.0], ztol=1e-12)
assert allclose(got, z, rtol=2e-11), f"Round-trip testing {name} failed"
# Test distance functions between two redshifts; only for realizations
if isinstance(getattr(cosmo, "name", None), str):
z2 = 2.0
func_z1z2 = [
lambda z1: cosmo._comoving_distance_z1z2(z1, z2),
lambda z1: cosmo._comoving_transverse_distance_z1z2(z1, z2),
lambda z1: cosmo.angular_diameter_distance_z1z2(z1, z2),
]
for func in func_z1z2:
fval = func(z)
assert allclose(z, z_at_value(func, fval, zmax=1.5, ztol=1e-12), rtol=2e-11) |
Decorator to register a new kind of validator function.
Parameters
----------
key : str
fvalidate : callable[[object, object, Any], Any] or None, optional
Value validation function.
Returns
-------
``validator`` or callable[``validator``]
if validator is None returns a function that takes and registers a
validator. This allows ``register_validator`` to be used as a
decorator. | def _register_validator(key, fvalidate=None):
"""Decorator to register a new kind of validator function.
Parameters
----------
key : str
fvalidate : callable[[object, object, Any], Any] or None, optional
Value validation function.
Returns
-------
``validator`` or callable[``validator``]
if validator is None returns a function that takes and registers a
validator. This allows ``register_validator`` to be used as a
decorator.
"""
if key in _REGISTRY_FVALIDATORS:
raise KeyError(f"validator {key!r} already registered with Parameter.")
# fvalidate directly passed
if fvalidate is not None:
_REGISTRY_FVALIDATORS[key] = fvalidate
return fvalidate
# for use as a decorator
def register(fvalidate):
"""Register validator function.
Parameters
----------
fvalidate : callable[[object, object, Any], Any]
Validation function.
Returns
-------
``validator``
"""
_REGISTRY_FVALIDATORS[key] = fvalidate
return fvalidate
return register |
Default Parameter value validator.
Adds/converts units if Parameter has a unit. | def _validate_with_unit(cosmology, param, value):
"""Default Parameter value validator.
Adds/converts units if Parameter has a unit.
"""
if param.unit is not None:
with u.add_enabled_equivalencies(param.equivalencies):
value = u.Quantity(value, param.unit)
return value |
Parameter value validator with units, and converted to float. | def _validate_to_float(cosmology, param, value):
"""Parameter value validator with units, and converted to float."""
value = _validate_with_unit(cosmology, param, value)
return float(value) |
Parameter value validator where value is a positive float. | def _validate_non_negative(cosmology, param, value):
"""Parameter value validator where value is a positive float."""
value = _validate_to_float(cosmology, param, value)
if value < 0.0:
raise ValueError(f"{param.name} cannot be negative.")
return value |
Test :class:`astropy.cosmology.Parameter` attributes on class. | def test_registry_validators():
"""Test :class:`astropy.cosmology.Parameter` attributes on class."""
# _registry_validators
assert isinstance(_REGISTRY_FVALIDATORS, dict)
assert all(isinstance(k, str) for k in _REGISTRY_FVALIDATORS.keys())
assert all(callable(v) for v in _REGISTRY_FVALIDATORS.values()) |
Get redshift methods from a cosmology.
Parameters
----------
cosmology : |Cosmology| class or instance
include_private : bool
Whether to include private methods, i.e. starts with an underscore.
include_z2 : bool
Whether to include methods that are functions of 2 (or more) redshifts,
not the more common 1 redshift argument.
Returns
-------
set[str]
The names of the redshift methods on `cosmology`, satisfying
`include_private` and `include_z2`. | def get_redshift_methods(cosmology, include_private=True, include_z2=True):
"""Get redshift methods from a cosmology.
Parameters
----------
cosmology : |Cosmology| class or instance
include_private : bool
Whether to include private methods, i.e. starts with an underscore.
include_z2 : bool
Whether to include methods that are functions of 2 (or more) redshifts,
not the more common 1 redshift argument.
Returns
-------
set[str]
The names of the redshift methods on `cosmology`, satisfying
`include_private` and `include_z2`.
"""
# Get all the method names, optionally sieving out private methods
methods = set()
for n in dir(cosmology):
try: # get method, some will error on ABCs
m = getattr(cosmology, n)
except NotImplementedError:
continue
# Add anything callable, optionally excluding private methods.
if callable(m) and (not n.startswith("_") or include_private):
methods.add(n)
# Sieve out incompatible methods.
# The index to check for redshift depends on whether cosmology is a class
# or instance and does/doesn't include 'self'.
iz1 = int(isinstance(cosmology, type))
for n in tuple(methods):
try:
sig = inspect.signature(getattr(cosmology, n))
except ValueError: # Remove non-introspectable methods.
methods.discard(n)
continue
else:
params = list(sig.parameters.keys())
# Remove non redshift methods:
if len(params) <= iz1: # Check there are enough arguments.
methods.discard(n)
elif len(params) >= iz1 + 1 and not params[iz1].startswith(
"z"
): # First non-self arg is z.
methods.discard(n)
# If methods with 2 z args are not allowed, the following arg is checked.
elif (
not include_z2
and (len(params) >= iz1 + 2)
and params[iz1 + 1].startswith("z")
):
methods.discard(n)
return methods |
`pytest.fixture` for clearing and restoring ``_COSMOLOGY_CLASSES``. | def clean_registry():
"""`pytest.fixture` for clearing and restoring ``_COSMOLOGY_CLASSES``."""
# TODO! with monkeypatch instead for thread safety.
ORIGINAL_COSMOLOGY_CLASSES = core._COSMOLOGY_CLASSES
core._COSMOLOGY_CLASSES = {} # set as empty dict
yield core._COSMOLOGY_CLASSES
core._COSMOLOGY_CLASSES = ORIGINAL_COSMOLOGY_CLASSES |
Make a list of valid redshifts for testing. | def make_valid_zs(max_z: float = 1e5) -> tuple[list, NDArray[float], list, list]:
"""Make a list of valid redshifts for testing."""
# scalar
scalar_zs = [
0,
1,
min(1100, max_z), # interesting times
# FIXME! np.inf breaks some funcs. 0 * inf is an error
np.float64(min(3300, max_z)), # different type
2 * cu.redshift,
3 * u.one, # compatible units
]
# array
_zarr = np.linspace(0, min(1e5, max_z), num=20)
array_zs = [
_zarr, # numpy
_zarr.tolist(), # pure python
Column(_zarr), # table-like
_zarr * cu.redshift, # Quantity
]
return scalar_zs, _zarr, array_zs, scalar_zs + array_zs |
Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.__nonflatclass__`
when there's more than one non-flat class in the inheritance. | def test__nonflatclass__multiple_nonflat_inheritance():
"""
Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.__nonflatclass__`
when there's more than one non-flat class in the inheritance.
"""
# Define a non-operable minimal subclass of Cosmology.
@dataclass_decorator
class SubCosmology2(Cosmology):
@property
def is_flat(self):
return False
# Now make an ambiguous flat cosmology from the two SubCosmologies
with pytest.raises(TypeError, match="cannot create a consistent non-flat class"):
class FlatSubCosmology(FlatCosmologyMixin, SubCosmology, SubCosmology2):
@property
def nonflat(self):
pass |
Test the realizations are in ``dir`` of :mod:`astropy.cosmology.parameters`. | def test_realizations_in_dir():
"""Test the realizations are in ``dir`` of :mod:`astropy.cosmology.parameters`."""
d = dir(parameters)
assert set(d) == set(parameters.__all__)
for n in parameters.available:
assert n in d |
Test getting 'parameters' and that it is derived from the corresponding
realization. | def test_getting_parameters(name):
"""
Test getting 'parameters' and that it is derived from the corresponding
realization.
"""
params = getattr(parameters, name)
assert isinstance(params, MappingProxyType)
assert params["name"] == name
# Check parameters have the right keys and values
cosmo = getattr(realizations, name)
assert params["name"] == cosmo.name
assert params["cosmology"] == cosmo.__class__.__qualname__
# All the cosmology parameters are equal
for k, v in cosmo.parameters.items():
np.testing.assert_array_equal(params[k], v)
# All the metadata is included. Parameter values take precedence, so only
# checking the keys.
assert set(cosmo.meta.keys()).issubset(params.keys())
# Lastly, check the generation process.
m = cosmo.to_format("mapping", cosmology_as_str=True, move_from_meta=True)
assert params == m |
Test the realizations are in ``dir`` of :mod:`astropy.cosmology`. | def test_realizations_in_toplevel_dir():
"""Test the realizations are in ``dir`` of :mod:`astropy.cosmology`."""
d = dir(cosmology)
assert set(d) == set(cosmology.__all__)
for n in parameters.available:
assert n in d |
Test the realizations are in ``dir`` of :mod:`astropy.cosmology.realizations`. | def test_realizations_in_realizations_dir():
"""Test the realizations are in ``dir`` of :mod:`astropy.cosmology.realizations`."""
d = dir(realizations)
assert set(d) == set(realizations.__all__)
for n in parameters.available:
assert n in d |
Test in-built realizations can pickle and unpickle.
Also a regression test for #12008. | def test_pickle_builtin_realizations(name, pickle_protocol):
"""
Test in-built realizations can pickle and unpickle.
Also a regression test for #12008.
"""
# get class instance
original = getattr(cosmology, name)
# pickle and unpickle
f = pickle.dumps(original, protocol=pickle_protocol)
with u.add_enabled_units(cu):
unpickled = pickle.loads(f)
assert unpickled == original
assert unpickled.meta == original.meta
# if the units are not enabled, it isn't equal because redshift units
# are not equal. This is a weird, known issue.
unpickled = pickle.loads(f)
assert unpickled == original
assert unpickled.meta != original.meta |
Test :func:`astropy.cosmology.units.with_H0`. | def test_littleh():
"""Test :func:`astropy.cosmology.units.with_H0`."""
H0_70 = 70 * u.km / u.s / u.Mpc
h70dist = 70 * u.Mpc / cu.littleh
assert_quantity_allclose(h70dist.to(u.Mpc, cu.with_H0(H0_70)), 100 * u.Mpc)
# make sure using the default cosmology works
cosmodist = default_cosmology.get().H0.value * u.Mpc / cu.littleh
assert_quantity_allclose(cosmodist.to(u.Mpc, cu.with_H0()), 100 * u.Mpc)
# Now try a luminosity scaling
h1lum = 0.49 * u.Lsun * cu.littleh**-2
assert_quantity_allclose(h1lum.to(u.Lsun, cu.with_H0(H0_70)), 1 * u.Lsun)
# And the trickiest one: magnitudes. Using H0=10 here for the round numbers
H0_10 = 10 * u.km / u.s / u.Mpc
# assume the "true" magnitude M = 12.
# Then M - 5*log_10(h) = M + 5 = 17
withlittlehmag = 17 * (u.mag - u.MagUnit(cu.littleh**2))
assert_quantity_allclose(withlittlehmag.to(u.mag, cu.with_H0(H0_10)), 12 * u.mag) |
Test :func:`astropy.cosmology.units.dimensionless_redshift`. | def test_dimensionless_redshift():
"""Test :func:`astropy.cosmology.units.dimensionless_redshift`."""
z = 3 * cu.redshift
val = 3 * u.one
# show units not equal
assert z.unit == cu.redshift
assert z.unit != u.one
assert u.get_physical_type(z) == "redshift"
# test equivalency enabled by default
assert z == val
# also test that it works for powers
assert (3 * cu.redshift**3) == val
# and in composite units
assert (3 * u.km / cu.redshift**3) == 3 * u.km
# test it also works as an equivalency
with u.set_enabled_equivalencies([]): # turn off default equivalencies
assert z.to(u.one, equivalencies=cu.dimensionless_redshift()) == val
with pytest.raises(ValueError):
z.to(u.one)
# if this fails, something is really wrong
with u.add_enabled_equivalencies(cu.dimensionless_redshift()):
assert z == val |
Test :func:`astropy.cosmology.units.redshift_temperature`. | def test_redshift_temperature():
"""Test :func:`astropy.cosmology.units.redshift_temperature`."""
cosmo = Planck13.clone(Tcmb0=3 * u.K)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.redshift_temperature(cosmo)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_temperature(cosmo, ztol=1e-10)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z) |
Test :func:`astropy.cosmology.units.redshift_hubble`. | def test_redshift_hubble():
"""Test :func:`astropy.cosmology.units.redshift_hubble`."""
unit = u.km / u.s / u.Mpc
cosmo = Planck13.clone(H0=100 * unit)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
H = cosmo.H(z)
h = H.to_value(u.km / u.s / u.Mpc) / 100 * cu.littleh
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_hubble()
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_hubble()
assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z))
assert default_cosmo.H(z) != H
# 2) Specifying the cosmology
equivalency = cu.redshift_hubble(cosmo)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_hubble(cosmo, ztol=1e-10)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H
assert_quantity_allclose(h.to(cu.redshift, equivalency), z) |
Test :func:`astropy.cosmology.units.redshift_distance`. | def test_redshift_distance(kind):
"""Test :func:`astropy.cosmology.units.redshift_distance`."""
z = 15 * cu.redshift
d = getattr(Planck13, kind + "_distance")(z)
equivalency = cu.redshift_distance(cosmology=Planck13, kind=kind)
# properties of Equivalency
assert equivalency.name[0] == "redshift_distance"
assert equivalency.kwargs[0]["cosmology"] == Planck13
assert equivalency.kwargs[0]["distance"] == kind
# roundtrip
assert_quantity_allclose(z.to(u.Mpc, equivalency), d)
assert_quantity_allclose(d.to(cu.redshift, equivalency), z) |
Test :func:`astropy.cosmology.units.redshift_distance` wrong kind. | def test_redshift_distance_wrong_kind():
"""Test :func:`astropy.cosmology.units.redshift_distance` wrong kind."""
with pytest.raises(ValueError, match="`kind`"):
cu.redshift_distance(kind=None) |
Test :func:`astropy.cosmology._utils.vectorize_redshift_method`. | def test_vectorize_redshift_method():
"""Test :func:`astropy.cosmology._utils.vectorize_redshift_method`."""
class Class:
@vectorize_redshift_method
def method(self, z):
return z
c = Class()
assert hasattr(c.method, "__vectorized__")
assert isinstance(c.method.__vectorized__, np.vectorize)
# calling with Number
assert c.method(1) == 1
assert isinstance(c.method(1), int)
# calling with a numpy scalar
assert c.method(np.float64(1)) == np.float64(1)
assert isinstance(c.method(np.float64(1)), np.float64)
# numpy array
assert all(c.method(np.array([1, 2])) == np.array([1, 2]))
assert isinstance(c.method(np.array([1, 2])), np.ndarray)
# non-scalar
assert all(c.method([1, 2]) == np.array([1, 2]))
assert isinstance(c.method([1, 2]), np.ndarray) |
Subsets and Splits