response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Various tests for copying of constraint values between compound models and
their members.
Regression test for https://github.com/astropy/astropy/issues/3481 | def test_inherit_constraints():
"""
Various tests for copying of constraint values between compound models and
their members.
Regression test for https://github.com/astropy/astropy/issues/3481
"""
model = Gaussian1D(bounds={"stddev": (0, 0.3)}, fixed={"mean": True}) + Gaussian1D(
fixed={"mean": True}
)
# Lots of assertions in this test as there are multiple interfaces to
# parameter constraints
assert "stddev_0" in model.bounds
assert model.bounds["stddev_0"] == (0, 0.3)
assert model.stddev_0.bounds == (0, 0.3)
assert "mean_0" in model.fixed
assert model.fixed["mean_0"] is True
assert model.mean_0.fixed is True
assert "mean_1" in model.fixed
assert model.fixed["mean_1"] is True
assert model.mean_1.fixed is True
assert model.stddev_0 is model[0].stddev
# Great, all the constraints were inherited properly
# Now what about if we update them through the sub-models?
model.stddev_0.bounds = (0, 0.4)
assert model[0].stddev.bounds == (0, 0.4)
assert model[0].bounds["stddev"] == (0, 0.4)
model.stddev_0.bounds = (0.1, 0.5)
assert model[0].stddev.bounds == (0.1, 0.5)
assert model[0].bounds["stddev"] == (0.1, 0.5)
model[1].mean.fixed = False
assert model.mean_1.fixed is False
assert model[1].mean.fixed is False
# Now turn off syncing of constraints
assert model.bounds["stddev_0"] == (0.1, 0.5)
model.sync_constraints = False
model[0].stddev.bounds = (0, 0.2)
assert model.bounds["stddev_0"] == (0.1, 0.5)
model.sync_constraints = True
assert model.bounds["stddev_0"] == (0, 0.2) |
Test that a compound model with a custom inverse has that inverse applied
when the inverse of another model, of which it is a component, is computed.
Regression test for https://github.com/astropy/astropy/issues/3542 | def test_compound_custom_inverse():
"""
Test that a compound model with a custom inverse has that inverse applied
when the inverse of another model, of which it is a component, is computed.
Regression test for https://github.com/astropy/astropy/issues/3542
"""
poly = Polynomial1D(1, c0=1, c1=2)
scale = Scale(1)
shift = Shift(1)
model1 = poly | scale
model1.inverse = poly
# model1 now has a custom inverse (the polynomial itself, ignoring the
# trivial scale factor)
model2 = shift | model1
assert_allclose(model2.inverse(1), (poly | shift.inverse)(1))
# Make sure an inverse is not allowed if the models were combined with the
# wrong operator, or if one of the models doesn't have an inverse defined
MESSAGE = (
r"No analytical or user-supplied inverse transform has been implemented for"
r" this model"
)
with pytest.raises(NotImplementedError, match=MESSAGE):
(shift + model1).inverse
with pytest.raises(NotImplementedError, match=MESSAGE):
(model1 & poly).inverse |
Regression test for
https://github.com/astropy/astropy/issues/3867#issuecomment-114547228 | def test_pickle_compound():
"""
Regression test for
https://github.com/astropy/astropy/issues/3867#issuecomment-114547228
"""
# Test pickling a compound model instance
g1 = Gaussian1D(1.0, 0.0, 0.1)
g2 = Gaussian1D([2.0, 3.0], [0.0, 0.0], [0.2, 0.3])
m = g1 + g2
m2 = pickle.loads(pickle.dumps(m))
assert m.param_names == m2.param_names
assert m.__class__.__name__ == m2.__class__.__name__
assert np.all(m.parameters == m2.parameters)
assert np.all(m(0) == m2(0)) |
Issue #7411 - evaluate should not change the shape of the output. | def test_tabular_in_compound():
"""
Issue #7411 - evaluate should not change the shape of the output.
"""
t = Tabular1D(points=([1, 5, 7],), lookup_table=[12, 15, 19], bounds_error=False)
rot = Rotation2D(2)
p = Polynomial1D(1)
x = np.arange(12).reshape((3, 4))
# Create a compound model which does not execute Tabular.__call__,
# but model.evaluate and is followed by a Rotation2D which
# checks the exact shapes.
model = p & t | rot
x1, y1 = model(x, x)
assert x1.ndim == 2
assert y1.ndim == 2 |
Tests that polynomials are offset when used in compound models.
Issue #3699 | def test_compound_with_polynomials_1d(poly):
"""
Tests that polynomials are offset when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x = np.linspace(-5, 5, 10)
result_compound = model(x)
result = shift(poly(x))
assert_allclose(result, result_compound)
assert model.param_names == (
"c0_0",
"c1_0",
"c2_0",
"c3_0",
"c4_0",
"c5_0",
"offset_1",
) |
Replace a model in a Compound model | def test_replace_submodel():
"""
Replace a model in a Compound model
"""
S1 = Shift(2, name="shift2") | Scale(3, name="scale3") # First shift then scale
S2 = Scale(2, name="scale2") | Shift(3, name="shift3") # First scale then shift
m = S1 & S2
assert m(1, 2) == (9, 7)
m2 = m.replace_submodel("scale3", Scale(4, name="scale4"))
assert m2(1, 2) == (12, 7)
assert m(1, 2) == (9, 7)
# Check the inverse has been updated
assert m2.inverse(12, 7) == (1, 2)
# Produce the same result by replacing a single model with a compound
m3 = m.replace_submodel("shift2", Shift(2) | Scale(2))
assert m(1, 2) == (9, 7)
assert m3(1, 2) == (18, 7)
# Check the inverse has been updated
assert m3.inverse(18, 7) == (1, 2)
# Test with arithmetic model compunding operator
m = S1 + S2
assert m(1) == 14
m2 = m.replace_submodel("scale2", Scale(4, name="scale4"))
assert m2(1) == 16
# Test with fix_inputs()
R = fix_inputs(Rotation2D(angle=90, name="rotate"), {0: 1})
m4 = S1 | R
assert_allclose(m4(0), (-6, 1))
m5 = m4.replace_submodel("rotate", Rotation2D(180))
assert_allclose(m5(0), (-1, -6))
# Check we get a value error when model name doesn't exist
MESSAGE = r"No submodels found named not_there"
with pytest.raises(ValueError, match=MESSAGE):
m2 = m.replace_submodel("not_there", Scale(2))
# And now a model set
P = Polynomial1D(degree=1, n_models=2, name="poly")
S = Shift([1, 2], n_models=2)
m = P | S
assert_array_equal(m([0, 1]), (1, 2))
MESSAGE = r"New and old models must have equal values for n_models"
with pytest.raises(ValueError, match=MESSAGE):
m2 = m.replace_submodel("poly", Polynomial1D(degree=1, c0=1))
m2 = m.replace_submodel("poly", Polynomial1D(degree=1, c0=[1, 2], n_models=2))
assert_array_equal(m2([0, 1]), (2, 4))
# Ensure previous _user_inverse doesn't stick around
S1 = Shift(1)
S2 = Shift(2)
S3 = Shift(3, name="S3")
S23 = S2 | S3
S23.inverse = Shift(-4.9)
m = S1 & S23
# This should delete the S23._user_inverse
m2 = m.replace_submodel("S3", Shift(4))
assert m2(1, 2) == (2, 8)
assert m2.inverse(2, 8) == (1, 2) |
Tests that compound evaluate function produces the same
result as the models with the operator applied | def test_compound_evaluate(expr):
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
# Some evaluate functions assume that inputs are numpy arrays or quantities including Const1D
p1 = np.array([1, 2, 3, 4, 1, 2])
p2 = np.array([1, 0, 0.5])
model1 = Polynomial1D(5)
model2 = Gaussian1D(2, 1, 5)
compound = expr(model1, model2)
assert_array_equal(
compound.evaluate(x, *p1, *p2),
expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)),
) |
Tests that compound evaluate function produces the same
result as the models with the power operator applied | def test_compound_evaluate_power():
"""
Tests that compound evaluate function produces the same
result as the models with the power operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3])
model1 = Gaussian1D(2, 1, 5)
model2 = Const1D(2)
compound = model1**model2
assert_array_equal(
compound.evaluate(x, *p1, *p2),
model1.evaluate(x, *p1) ** model2.evaluate(x, *p2),
) |
Tests that compound evaluate function produces the same
result as the models with the operator applied | def test_compound_evaluate_named_param(expr):
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3, 0.5, 0.5])
model1 = Gaussian1D(2, 1, 5)
model2 = Gaussian1D(2, 1, 5)
compound = expr(model1, model2)
assert_array_equal(
compound.evaluate(x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]),
expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)),
) |
Tests that compound evaluate function produces the same
result as the models with the power operator applied | def test_compound_evaluate_name_param_power():
"""
Tests that compound evaluate function produces the same
result as the models with the power operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3])
model1 = Gaussian1D(2, 1, 5)
model2 = Const1D(2)
compound = model1**model2
assert_array_equal(
compound.evaluate(x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]),
model1.evaluate(x, *p1) ** model2.evaluate(x, *p2),
) |
Tests that compound evaluate function produces the same
result as the models with the operator applied | def test_compound_evaluate_and():
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0.1, 0.5])
p2 = np.array([3])
model1 = Gaussian1D()
model2 = Shift()
compound = model1 & model2
assert_array_equal(
compound.evaluate(x, x, *p1, p2),
[model1.evaluate(x, *p1), model2.evaluate(x, p2)],
) |
Tests that compound evaluate function produces the same
result as the models with the operator applied | def test_compound_evaluate_or():
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([0.5])
p2_amplitude = np.array([3])
p2_mean = np.array([0])
p2_std = np.array([0.1])
model1 = Shift(0.5)
model2 = Gaussian1D(1, 0, 0.5)
compound = model1 | model2
assert_array_equal(
compound.evaluate(x, p1, p2_amplitude, p2_mean, p2_std),
model2.evaluate(model1.evaluate(x, p1), p2_amplitude, p2_mean, p2_std),
) |
Tests that compound evaluate function produces the same
result as the models fix_inputs operator is applied
when using the keyword | def test_compound_evaluate_fix_inputs_by_keyword():
"""
Tests that compound evaluate function produces the same
result as the models fix_inputs operator is applied
when using the keyword
"""
y, x = np.mgrid[:10, :10]
model_params = [3, 0, 0.1, 1, 0.5, 0]
model = Gaussian2D(1, 2, 0, 0.5)
compound = fix_inputs(model, {"x": x + 5})
assert_array_equal(
compound.evaluate(x, y, *model_params),
model.evaluate(x + 5, y, *model_params),
) |
Tests that compound evaluate function produces the same
result as the models fix_inputs operator is applied
when using the input index | def test_compound_evaluate_fix_inputs_by_position():
"""
Tests that compound evaluate function produces the same
result as the models fix_inputs operator is applied
when using the input index
"""
y, x = np.mgrid[:10, :10]
model_params = [3, 0, 0.1, 1, 0.5, 0]
model = Gaussian2D(1, 2, 0, 0.5)
compound = fix_inputs(model, {0: x + 5})
assert_array_equal(
compound.evaluate(x, y, *model_params),
model.evaluate(x + 5, y, *model_params),
) |
Regression test for issue #12320 | def test_fit_multiplied_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.kg / u.s
m1 = Linear1D(slope=5 * u.m / u.s / u.s, intercept=1.0 * u.m / u.s)
m2 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg)
truth = m1 * m2
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name) |
Regression test for issue #12320 | def test_fit_multiplied_recursive_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.m * u.kg / u.s
m1 = Linear1D(slope=5 * u.m / u.s / u.s, intercept=1.0 * u.m / u.s)
m2 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg)
m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m)
truth = m1 * m2 * m3
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.m * u.kg * u.kg / u.s
m1 = Linear1D(slope=5 * u.m / u.s / u.s, intercept=1.0 * u.m / u.s)
m2 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg)
m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m)
m4 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg)
m11 = m1 * m2
m22 = m3 * m4
truth = m11 * m22
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name) |
Regression test for issue #12320 | def test_fit_divided_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.m / u.s
m1 = Linear1D(slope=5 * u.kg * u.m / u.s, intercept=1.0 * u.kg * u.m)
m2 = Linear1D(slope=0.0 * u.s / u.s, intercept=10.0 * u.s)
truth = m1 / m2
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name) |
Regression test for issue #12320 | def test_fit_mixed_recursive_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.m * u.m / u.s
m1 = Linear1D(slope=5 * u.kg * u.m / u.s, intercept=1.0 * u.kg * u.m)
m2 = Linear1D(slope=0.0 * u.s / u.s, intercept=10.0 * u.s)
m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m)
truth = m1 / m2 * m3
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.kg * u.m * u.m / u.s
m1 = Linear1D(slope=5 * u.kg * u.m / u.s, intercept=1.0 * u.kg * u.m)
m2 = Linear1D(slope=0.0 * u.s / u.s, intercept=10.0 * u.s)
m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m)
m4 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg)
m11 = m1 / m2
m22 = m3 * m4
truth = m11 * m22
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name) |
Regression test for https://github.com/astropy/astropy/issues/2396
Ensure that default constraints defined on parameters are carried through
to instances of the models those parameters are defined for. | def test_default_constraints():
"""Regression test for https://github.com/astropy/astropy/issues/2396
Ensure that default constraints defined on parameters are carried through
to instances of the models those parameters are defined for.
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=0, min=0, fixed=True)
@staticmethod
def evaluate(x, a, b):
return x * a + b
assert MyModel.a.default == 1
assert MyModel.b.default == 0
assert MyModel.b.min == 0
assert MyModel.b.bounds == (0, None)
assert MyModel.b.fixed is True
m = MyModel()
assert m.a.value == 1
assert m.b.value == 0
assert m.b.min == 0
assert m.b.bounds == (0, None)
assert m.b.fixed is True
assert m.bounds == {"a": (None, None), "b": (0, None)}
assert m.fixed == {"a": False, "b": True}
# Make a model instance that overrides the default constraints and values
m = MyModel(
3, 4, bounds={"a": (1, None), "b": (2, None)}, fixed={"a": True, "b": False}
)
assert m.a.value == 3
assert m.b.value == 4
assert m.a.min == 1
assert m.b.min == 2
assert m.a.bounds == (1, None)
assert m.b.bounds == (2, None)
assert m.a.fixed is True
assert m.b.fixed is False
assert m.bounds == {"a": (1, None), "b": (2, None)}
assert m.fixed == {"a": True, "b": False} |
Regression test for https://github.com/astropy/astropy/issues/2235
Currently doesn't test that the fit is any *good*--just that parameters
stay within their given constraints. | def test_fit_with_fixed_and_bound_constraints(fitter):
"""
Regression test for https://github.com/astropy/astropy/issues/2235
Currently doesn't test that the fit is any *good*--just that parameters
stay within their given constraints.
"""
# DogBoxLSQFitter causes failure on s390x, aremel possibly others (not x86_64 or arm64)
if fitter == fitting.DogBoxLSQFitter and (
platform.machine() not in ("x86_64", "arm64")
):
pytest.xfail(
"DogBoxLSQFitter can to be unstable on non-standard platforms leading to "
"random test failures"
)
fitter = fitter()
m = models.Gaussian1D(
amplitude=3,
mean=4,
stddev=1,
bounds={"mean": (4, 5)},
fixed={"amplitude": True},
)
x = np.linspace(0, 10, 10)
y = np.exp(-(x**2) / 2)
if isinstance(fitter, fitting.TRFLSQFitter):
ctx = np.errstate(invalid="ignore", divide="ignore")
else:
ctx = nullcontext()
with ctx:
fitted_1 = fitter(m, x, y)
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
assert fitted_1.amplitude == 3.0
m.amplitude.fixed = False
# Cannot enter np.errstate twice, so we need to indent everything in between.
_ = fitter(m, x, y)
# It doesn't matter anymore what the amplitude ends up as so long as the
# bounds constraint was still obeyed
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5 |
Regression test for https://github.com/astropy/astropy/issues/2400
Checks that bounds constraints are obeyed on a custom model that does not
define fit_deriv (and thus its Jacobian must be estimated for non-linear
fitting). | def test_fit_with_bound_constraints_estimate_jacobian(fitter):
"""
Regression test for https://github.com/astropy/astropy/issues/2400
Checks that bounds constraints are obeyed on a custom model that does not
define fit_deriv (and thus its Jacobian must be estimated for non-linear
fitting).
"""
fitter = fitter()
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=2)
@staticmethod
def evaluate(x, a, b):
return a * x + b
m_real = MyModel(a=1.5, b=-3)
x = np.arange(100)
y = m_real(x)
m = MyModel()
fitted_1 = fitter(m, x, y)
# This fit should be trivial so even without constraints on the bounds it
# should be right
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
m2 = MyModel()
m2.a.bounds = (-2, 2)
_ = fitter(m2, x, y)
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
# Check that the estimated Jacobian was computed (it doesn't matter what
# the values are so long as they're not all zero.
if fitter == fitting.LevMarLSQFitter:
assert np.any(fitter.fit_info["fjac"] != 0) |
Issue #6403 | def test_2d_model(fitter):
"""Issue #6403"""
from astropy.utils import NumpyRNGContext
fitter = fitter()
# 2D model with LevMarLSQFitter
gauss2d = models.Gaussian2D(10.2, 4.3, 5, 2, 1.2, 1.4)
X = np.linspace(-1, 7, 200)
Y = np.linspace(-1, 7, 200)
x, y = np.meshgrid(X, Y)
z = gauss2d(x, y)
w = np.ones(x.size)
w.shape = x.shape
with NumpyRNGContext(1234567890):
n = np.random.randn(x.size)
n.shape = x.shape
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# 2D model with LevMarLSQFitter, fixed constraint
gauss2d.x_stddev.fixed = True
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False
p2 = models.Polynomial2D(1, c0_0=1, c1_0=1.2, c0_1=3.2)
z = p2(x, y)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False, fixed constraint
p2.c1_0.fixed = True
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05) |
Regression test for
https://github.com/astropy/astropy/pull/3772#issuecomment-101821641 | def test_inputless_model():
"""
Regression test for
https://github.com/astropy/astropy/pull/3772#issuecomment-101821641
"""
class TestModel(Model):
n_outputs = 1
a = Parameter()
@staticmethod
def evaluate(a):
return a
m = TestModel(1)
assert m.a == 1
assert m() == 1
# Test array-like output
m = TestModel([1, 2, 3], model_set_axis=False)
assert len(m) == 1
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[1, 2, 3], model_set_axis=0)
assert len(m) == 3
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0)
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=np.int64(0))
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]]) |
Tests that the signatures for the __init__ and __call__
methods of custom models are useful. | def test_custom_model_signature():
"""
Tests that the signatures for the __init__ and __call__
methods of custom models are useful.
"""
@custom_model
def model_a(x):
return x
assert model_a.param_names == ()
assert model_a.n_inputs == 1
sig = signature(model_a.__init__)
assert list(sig.parameters.keys()) == ["self", "args", "meta", "name", "kwargs"]
sig = signature(model_a.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
@custom_model
def model_b(x, a=1, b=2):
return x + a + b
assert model_b.param_names == ("a", "b")
assert model_b.n_inputs == 1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "b", "kwargs"]
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
]
@custom_model
def model_c(x, y, a=1, b=2):
return x + y + a + b
assert model_c.param_names == ("a", "b")
assert model_c.n_inputs == 2
sig = signature(model_c.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "b", "kwargs"]
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_c.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
] |
Test that custom models can be subclassed. | def test_custom_model_subclass():
"""Test that custom models can be subclassed."""
@custom_model
def model_a(x, a=1):
return x * a
class model_b(model_a):
# Override the evaluate from model_a
@classmethod
def evaluate(cls, x, a):
return -super().evaluate(x, a)
b = model_b()
assert b.param_names == ("a",)
assert b.a == 1
assert b(1) == -1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ["self", "a", "kwargs"]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == [
"self",
"inputs",
"model_set_axis",
"with_bounding_box",
"fill_value",
"equivalencies",
"inputs_map",
"new_inputs",
] |
Tests using custom_model as a decorator with parameters. | def test_custom_model_parametrized_decorator():
"""Tests using custom_model as a decorator with parameters."""
def cosine(x, amplitude=1):
return [amplitude * np.cos(x)]
@custom_model(fit_deriv=cosine)
def sine(x, amplitude=1):
return amplitude * np.sin(x)
assert issubclass(sine, Model)
s = sine(2)
assert_allclose(s(np.pi / 2), 2)
assert_allclose(s.fit_deriv(0, 2), 2) |
Test creating a custom_model which has more than one output, which
requires special handling.
Demonstrates issue #11791's ``n_outputs`` error has been solved | def test_custom_model_n_outputs():
"""
Test creating a custom_model which has more than one output, which
requires special handling.
Demonstrates issue #11791's ``n_outputs`` error has been solved
"""
@custom_model
def model(x, y, n_outputs=2):
return x + 1, y + 1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 2
assert m.outputs == ("x0", "x1")
assert (
separability_matrix(m)
== [
[True, True],
[True, True],
]
).all()
@custom_model
def model(x, y, z, n_outputs=3):
return x + 1, y + 1, z + 1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 3
assert m.outputs == ("x0", "x1", "x2")
assert (
separability_matrix(m)
== [
[True, True, True],
[True, True, True],
[True, True, True],
]
).all() |
Test creating a custom_model which specifically sets adjustable model
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, settable parameters
should be allowed to have defaults set. | def test_custom_model_settable_parameters():
"""
Test creating a custom_model which specifically sets adjustable model
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, settable parameters
should be allowed to have defaults set.
"""
@custom_model
def model(x, y, n_outputs=2, bounding_box=((1, 2), (3, 4))):
return x + 1, y + 1
m = model()
assert m.n_outputs == 2
assert m.bounding_box == ((1, 2), (3, 4))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
m = model(bounding_box=((5, 6), (7, 8)))
assert m.n_outputs == 2
assert m.bounding_box == ((5, 6), (7, 8))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
@custom_model
def model(x, y, n_outputs=2, outputs=("z0", "z1")):
return x + 1, y + 1
m = model()
assert m.n_outputs == 2
assert m.outputs == ("z0", "z1")
m.outputs = ("a0", "a1")
assert m.outputs == ("a0", "a1")
m = model(outputs=("w0", "w1"))
assert m.n_outputs == 2
assert m.outputs == ("w0", "w1")
m.outputs = ("a0", "a1")
assert m.outputs == ("a0", "a1") |
Test creating a custom_model which attempts to override non-overridable
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, non-settable parameters
should raise an error (unexpected behavior may occur). | def test_custom_model_regected_parameters():
"""
Test creating a custom_model which attempts to override non-overridable
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, non-settable parameters
should raise an error (unexpected behavior may occur).
"""
with pytest.raises(
ValueError, match=r"Parameter 'n_inputs' cannot be a model property: *"
):
@custom_model
def model1(x, y, n_outputs=2, n_inputs=3):
return x + 1, y + 1
with pytest.raises(
ValueError, match=r"Parameter 'uses_quantity' cannot be a model property: *"
):
@custom_model
def model2(x, y, n_outputs=2, uses_quantity=True):
return x + 1, y + 1 |
Test setting a custom inverse on a model. | def test_custom_inverse():
"""Test setting a custom inverse on a model."""
p = models.Polynomial1D(1, c0=-2, c1=3)
# A trivial inverse for a trivial polynomial
inv = models.Polynomial1D(1, c0=(2.0 / 3.0), c1=(1.0 / 3.0))
MESSAGE = (
r"No analytical or user-supplied inverse transform has been implemented for"
r" this model"
)
with pytest.raises(NotImplementedError, match=MESSAGE):
p.inverse
p.inverse = inv
x = np.arange(100)
assert_allclose(x, p(p.inverse(x)))
assert_allclose(x, p.inverse(p(x)))
p.inverse = None
with pytest.raises(NotImplementedError, match=MESSAGE):
p.inverse |
Test resetting a custom inverse to the model's default inverse. | def test_custom_inverse_reset():
"""Test resetting a custom inverse to the model's default inverse."""
class TestModel(Model):
n_inputs = 0
outputs = ("y",)
@property
def inverse(self):
return models.Shift()
@staticmethod
def evaluate():
return 0
# The above test model has no meaning, nor does its inverse--this just
# tests that setting an inverse and resetting to the default inverse works
m = TestModel()
assert isinstance(m.inverse, models.Shift)
m.inverse = models.Scale()
assert isinstance(m.inverse, models.Scale)
del m.inverse
assert isinstance(m.inverse, models.Shift) |
Test different out.dtype for model.render. | def test_render_model_out_dtype():
"""Test different out.dtype for model.render."""
MESSAGE = (
r"Cannot cast ufunc 'add' output from .* to .* with casting rule 'same_kind"
)
for model in [models.Gaussian2D(), models.Gaussian2D() + models.Planar2D()]:
for dtype in [np.float64, np.float32, np.complex64]:
im = np.zeros((40, 40), dtype=dtype)
imout = model.render(out=im)
assert imout is im
assert imout.sum() != 0
with pytest.raises(TypeError, match=MESSAGE):
im = np.zeros((40, 40), dtype=np.int32)
imout = model.render(out=im) |
Tests that the bounding_box setter works. | def test_custom_bounding_box_1d():
"""
Tests that the bounding_box setter works.
"""
# 1D models
g1 = models.Gaussian1D()
bb = g1.bounding_box
expected = g1.render()
# assign the same bounding_box, now through the bounding_box setter
g1.bounding_box = bb
assert_allclose(g1.render(), expected)
# 2D models
g2 = models.Gaussian2D()
bb = g2.bounding_box
expected = g2.render()
# assign the same bounding_box, now through the bounding_box setter
g2.bounding_box = bb
assert_allclose(g2.render(), expected) |
Tests that numpy integers can be passed as dictionary keys to fix_inputs
Issue #11358 | def test_fix_inputs_integer():
"""
Tests that numpy integers can be passed as dictionary keys to fix_inputs
Issue #11358
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {1: 22})
assert mf(1) == (1, 22)
mf_int32 = models.fix_inputs(m, {np.int32(1): 33})
assert mf_int32(1) == (1, 33)
mf_int64 = models.fix_inputs(m, {np.int64(1): 44})
assert mf_int64(1) == (1, 44) |
Tests that empty dictionary can be passed to fix_inputs
Issue #11355 | def test_fix_inputs_empty_dict():
"""
Tests that empty dictionary can be passed to fix_inputs
Issue #11355
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {})
assert mf(1, 2) == (1, 2) |
Tests that _prepare_outputs_single_model does not fail when a smaller
array is passed as first input, but output is broadcast to larger
array.
Issue #10170 | def test_prepare_outputs_mixed_broadcast():
"""
Tests that _prepare_outputs_single_model does not fail when a smaller
array is passed as first input, but output is broadcast to larger
array.
Issue #10170
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model([1, 2], 3)
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.9692332344763441, 1.0])
output = model(4, [5, 6])
assert output.shape == (2,)
np.testing.assert_allclose(output, [0.8146473164114145, 0.7371233743916278]) |
jwst and gwcs both require that single entry vectors produce single
entry output vectors, not scalars. This tests for that behavior. | def test_prepare_outputs_single_entry_vector():
"""
jwst and gwcs both require that single entry vectors produce single
entry output vectors, not scalars. This tests for that behavior.
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model(np.array([1]), np.array([2]))
assert output.shape == (1,)
np.testing.assert_allclose(output, [0.9500411305585278]) |
Test to show that #11060 has been solved. | def test_prepare_outputs_sparse_grid():
"""
Test to show that #11060 has been solved.
"""
shape = (3, 3)
data = np.arange(np.prod(shape)).reshape(shape) * u.m / u.s
points_unit = u.pix
points = [np.arange(size) * points_unit for size in shape]
kwargs = {
"bounds_error": False,
"fill_value": np.nan,
"method": "nearest",
}
transform = models.Tabular2D(points, data, **kwargs)
truth = (
np.array(
[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0],
]
)
* u.m
/ u.s
)
points = np.meshgrid(np.arange(3), np.arange(3), indexing="ij", sparse=True)
x = points[0] * u.pix
y = points[1] * u.pix
value = transform(x, y)
assert (value == truth).all()
points = (
np.meshgrid(np.arange(3), np.arange(3), indexing="ij", sparse=False) * u.pix
)
value = transform(*points)
assert (value == truth).all() |
Test that issue #11310 has been fixed | def test_print_special_operator_CompoundModel(capsys):
"""
Test that issue #11310 has been fixed
"""
model = convolve_models(models.Sersic2D(), models.Gaussian2D())
with astropy.conf.set_temp("max_width", 80):
# fmt: off
assert str(model) == (
"Model: CompoundModel\n"
"Inputs: ('x', 'y')\n"
"Outputs: ('z',)\n"
"Model set size: 1\n"
"Expression: convolve_fft (([0]), ([1]))\n"
"Components: \n"
" [0]: <Sersic2D(amplitude=1., r_eff=1., n=4., "
"x_0=0., y_0=0., ellip=0., theta=0.)>\n"
"\n"
" [1]: <Gaussian2D(amplitude=1., x_mean=0., y_mean=0., "
"x_stddev=1., y_stddev=1., theta=0.)>\n"
"Parameters:\n"
" amplitude_0 r_eff_0 n_0 x_0_0 y_0_0 ... y_mean_1 x_stddev_1 y_stddev_1 theta_1\n"
" ----------- ------- --- ----- ----- ... -------- ---------- ---------- -------\n"
" 1.0 1.0 4.0 0.0 0.0 ... 0.0 1.0 1.0 0.0"
) |
This demonstrates how to bind multiple bounding_boxes which are
selectable using the `with_bounding_box`, note there must be a
fall-back to implicit. | def test_bind_compound_bounding_box_using_with_bounding_box_select():
"""
This demonstrates how to bind multiple bounding_boxes which are
selectable using the `with_bounding_box`, note there must be a
fall-back to implicit.
"""
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox = (0, 1)
MESSAGE = r"'tuple' object has no attribute 'items"
with pytest.raises(AttributeError, match=MESSAGE):
bind_compound_bounding_box(model, bbox, "x")
bbox = {0: (-1, 0), 1: (0, 1)}
bind_compound_bounding_box(model, bbox, [("x", False)])
# No bounding box
assert model(-0.5) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0) == truth(0)
assert model(1) == truth(1)
# `with_bounding_box` selects as `-0.5` will not be a key
assert model(-0.5, with_bounding_box=0) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=1))
# `with_bounding_box` selects as `0.5` will not be a key
assert model(0.5, with_bounding_box=1) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(0,)))
# Fall back onto implicit selector
assert model(0, with_bounding_box=True) == truth(0)
assert model(1, with_bounding_box=True) == truth(1)
# Attempt to fall-back on implicit selector, but no bounding_box
MESSAGE = r"No bounding box is defined for selector: .*"
with pytest.raises(RuntimeError, match=MESSAGE):
model(0.5, with_bounding_box=True)
# Override implicit selector
assert np.isnan(model(1, with_bounding_box=0)) |
Regression test for issue #12370 | def test_compound_model_copy_user_attribute():
"""Regression test for issue #12370"""
model = models.Gaussian2D(100, 25, 25, 5, 5) | models.Identity(1)
model.xname = "x_mean" # user-defined attribute
assert hasattr(model, "xname")
assert model.xname == "x_mean"
model_copy = model.copy()
model_copy.xname
assert hasattr(model_copy, "xname")
assert model_copy.xname == "x_mean" |
Regression test for issue #12319 | def test_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Gaussian2D()
bbox = ModelBoundingBox.validate(model, ((-1, 1), (-np.inf, np.inf)), order="F")
model.bounding_box = bbox
x = np.array([-0.5, 0.5])
y = 0
# Everything works when its all in the bounding box
assert (model(x, y) == (model(x, y, with_bounding_box=True))).all() |
Regression test for issue #12319 | def test_compound_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ("x", "y", "slit_id")
bbox = ModelBoundingBox.validate(
model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order="F"
)
model.bounding_box = bbox
x = np.array([1000, 1001])
y = np.array([2000, 2001])
slit_id = 0
# Everything works when its all in the bounding box
value0 = model(x, y, slit_id)
value1 = model(x, y, slit_id, with_bounding_box=True)
assert_equal(value0, value1) |
Regression test for issue #12373 | def test_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,))
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4]) |
Regression test for issue #12373 | def test_compound_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,)) | models.Shift(1)
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5]) |
Test the possibility of setting ignored variables in bounding box | def test_bounding_box_pass_with_ignored():
"""Test the possibility of setting ignored variables in bounding box"""
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, (-1, 1), ignored=["y"])
model.bounding_box = bbox
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
model = models.Polynomial2D(2)
bind_bounding_box(model, (-1, 1), ignored=["y"])
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox |
Regression for PR 12561; verify that compound model components
can be accessed by integer index | def test_model_integer_indexing(int_type):
"""Regression for PR 12561; verify that compound model components
can be accessed by integer index"""
gauss = models.Gaussian2D()
airy = models.AiryDisk2D()
compound = gauss + airy
assert compound[int_type(0)] == gauss
assert compound[int_type(1)] == airy |
Regression for PR 12561; verify that compound model components
can be accessed by indexing with model name | def test_model_string_indexing():
"""Regression for PR 12561; verify that compound model components
can be accessed by indexing with model name"""
gauss = models.Gaussian2D()
gauss.name = "Model1"
airy = models.AiryDisk2D()
airy.name = "Model2"
compound = gauss + airy
assert compound["Model1"] == gauss
assert compound["Model2"] == airy |
Test model set fitting with outlier removal (issue #6819) | def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(),
sigma_clip,
sigma=2.5,
niter=3,
cenfunc=np.ma.mean,
stdfunc=np.ma.std,
)
x = np.arange(10)
y = np.array([2.5 * x - 4, 2 * x * x + x + 10])
y[1, 5] = -1000 # outlier
poly_set, filt_y = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4.0, 10.0], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.0], atol=1e-14)
assert_allclose(poly_set.c2, [0.0, 2.0], atol=1e-14) |
Test fitting 2D model set (axis 2) with outlier removal (issue #6819) | def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(
LinearLSQFitter(),
sigma_clip,
sigma=2.5,
niter=3,
cenfunc=np.ma.mean,
stdfunc=np.ma.std,
)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x + y, 1 - 0.1 * x + 0.2 * y]), 0, 3)
z[3, 3:5, 0] = 100.0 # outliers
poly_set, filt_z = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0.0, 1.0]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1.0, -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1.0, 0.2]]], atol=1e-14) |
Issue #5737 | def test_fitters_with_weights(fitter):
"""Issue #5737"""
fitter = fitter()
if isinstance(fitter, _NLLSQFitter):
pytest.xfail(
"This test is poorly designed and causes issues for "
"scipy.optimize.least_squares based fitters"
)
Xin, Yin = np.mgrid[0:21, 0:21]
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10) / 1.2
z = p2(Xin, Yin)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2)) |
Regression test for #7035 | def test_linear_fitter_with_weights():
"""Regression test for #7035"""
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10) / 1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig ** (-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2)) |
Same as the above #7035 test but with flattened inputs | def test_linear_fitter_with_weights_flat():
"""Same as the above #7035 test but with flattened inputs"""
Xin, Yin = np.mgrid[0:21, 0:21]
Xin, Yin = Xin.flatten(), Yin.flatten()
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10) / 1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig ** (-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2)) |
Test that ``**kwargs`` work with all optimizers.
This is a basic smoke test. | def test_fitters_interface(fitter):
"""
Test that ``**kwargs`` work with all optimizers.
This is a basic smoke test.
"""
fitter = fitter()
model = models.Gaussian1D(10, 4, 0.3)
x = np.arange(21)
y = model(x)
if isinstance(fitter, SimplexLSQFitter):
kwargs = {"maxiter": 79, "verblevel": 1, "acc": 1e-6}
else:
kwargs = {"maxiter": 77, "verblevel": 1, "epsilon": 1e-2, "acc": 1e-6}
if isinstance(fitter, (LevMarLSQFitter, _NLLSQFitter)):
kwargs.pop("verblevel")
_ = fitter(model, x, y, **kwargs) |
Test that FittingWithOutlierRemoval stops prior to reaching niter if the
set of masked points has converged and correctly reports the actual number
of iterations performed. | def test_fitting_with_outlier_removal_niter():
"""
Test that FittingWithOutlierRemoval stops prior to reaching niter if the
set of masked points has converged and correctly reports the actual number
of iterations performed.
"""
# 2 rows with some noise around a constant level and 1 deviant point:
x = np.arange(25)
with NumpyRNGContext(_RANDOM_SEED):
y = np.random.normal(loc=10.0, scale=1.0, size=(2, 25))
y[0, 14] = 100.0
# Fit 2 models with up to 5 iterations (should only take 2):
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(),
outlier_func=sigma_clip,
niter=5,
sigma_lower=3.0,
sigma_upper=3.0,
maxiters=1,
)
model, mask = fitter(models.Chebyshev1D(2, n_models=2), x, y)
# Confirm that only the deviant point was rejected, in 2 iterations:
assert_equal(np.where(mask), [[0], [14]])
assert fitter.fit_info["niter"] == 2
# Refit just the first row without any rejection iterations, to ensure
# there are no regressions for that special case:
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(),
outlier_func=sigma_clip,
niter=0,
sigma_lower=3.0,
sigma_upper=3.0,
maxiters=1,
)
model, mask = fitter(models.Chebyshev1D(2), x, y[0])
# Confirm that there were no iterations or rejected points:
assert mask.sum() == 0
assert fitter.fit_info["niter"] == 0 |
Regression test error introduced to solve issues #3575 and #12809 | def test_non_finite_error(fitter, weights):
"""Regression test error introduced to solve issues #3575 and #12809"""
x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16])
m_init = models.Gaussian1D()
fit = fitter()
# Raise warning, notice fit fails due to nans
with pytest.raises(
NonFiniteValueError, match=r"Objective function has encountered.*"
):
fit(m_init, x, y, weights=weights) |
Regression test filter introduced to remove non-finte values from data | def test_non_finite_filter_1D(fitter, weights):
"""Regression test filter introduced to remove non-finte values from data"""
x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, np.inf])
m_init = models.Gaussian1D()
fit = fitter()
if weights is not None:
weights[[1, 4]] = np.nan
with pytest.warns(
AstropyUserWarning,
match=r"Non-Finite input data has been removed by the fitter",
):
fit(m_init, x, y, filter_non_finite=True, weights=weights) |
Regression test filter introduced to remove non-finte values from data | def test_non_finite_filter_2D(fitter, weights):
"""Regression test filter introduced to remove non-finte values from data"""
x, y = np.mgrid[0:10, 0:10]
m_true = models.Gaussian2D(amplitude=1, x_mean=5, y_mean=5, x_stddev=2, y_stddev=2)
with NumpyRNGContext(_RANDOM_SEED):
z = m_true(x, y) + np.random.rand(*x.shape)
z[0, 0] = np.nan
z[3, 3] = np.inf
z[7, 5] = -np.inf
if weights is not None:
weights[1, 1] = np.nan
weights[4, 3] = np.inf
m_init = models.Gaussian2D()
fit = fitter()
with pytest.warns(
AstropyUserWarning,
match=r"Non-Finite input data has been removed by the fitter",
):
fit(m_init, x, y, z, filter_non_finite=True, weights=weights) |
Regression test for issue #13617
Issue:
Weighted non-linear weighted fits of O-degree polynomials cause an error
to be raised by scipy.
Fix:
There should be no error raised in this circumstance | def test_non_linear_fit_zero_degree_polynomial_with_weights(fitter):
"""
Regression test for issue #13617
Issue:
Weighted non-linear weighted fits of O-degree polynomials cause an error
to be raised by scipy.
Fix:
There should be no error raised in this circumstance
"""
model = models.Polynomial1D(0, c0=0)
fitter = fitter()
x = np.arange(10, dtype=float)
y = np.ones((10,))
weights = np.ones((10,))
fit = fitter(model, x, y)
assert_almost_equal(fit.c0, 1.0)
fit = fitter(model, x, y, weights=weights)
assert_almost_equal(fit.c0, 1.0) |
Test that the GAUSSIAN_SIGMA_TO_FWHM constant matches the
gaussian_sigma_to_fwhm constant in astropy.stats. We define
it manually in astropy.modeling to avoid importing from
astropy.stats. | def test_sigma_constant():
"""
Test that the GAUSSIAN_SIGMA_TO_FWHM constant matches the
gaussian_sigma_to_fwhm constant in astropy.stats. We define
it manually in astropy.modeling to avoid importing from
astropy.stats.
"""
from astropy.modeling.functional_models import GAUSSIAN_SIGMA_TO_FWHM
from astropy.stats.funcs import gaussian_sigma_to_fwhm
assert gaussian_sigma_to_fwhm == GAUSSIAN_SIGMA_TO_FWHM |
Regression test for https://github.com/astropy/astropy/issues/1721 | def test_Trapezoid1D():
"""Regression test for https://github.com/astropy/astropy/issues/1721"""
model = models.Trapezoid1D(amplitude=4.2, x_0=2.0, width=1.0, slope=3)
xx = np.linspace(0, 4, 8)
yy = model(xx)
yy_ref = [0.0, 1.41428571, 3.12857143, 4.2, 4.2, 3.12857143, 1.41428571, 0.0]
assert_allclose(yy, yy_ref, rtol=0, atol=1e-6) |
Test rotated elliptical Gaussian2D model.
https://github.com/astropy/astropy/pull/2038 | def test_Gaussian2D():
"""
Test rotated elliptical Gaussian2D model.
https://github.com/astropy/astropy/pull/2038
"""
model = models.Gaussian2D(
4.2, 1.7, 3.1, x_stddev=5.1, y_stddev=3.3, theta=np.pi / 6.0
)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [
[3.01907812, 2.99051889, 2.81271552, 2.5119566, 2.13012709],
[3.55982239, 3.6086023, 3.4734158, 3.17454575, 2.75494838],
[3.88059142, 4.0257528, 3.96554926, 3.70908389, 3.29410187],
[3.91095768, 4.15212857, 4.18567526, 4.00652015, 3.64146544],
[3.6440466, 3.95922417, 4.08454159, 4.00113878, 3.72161094],
]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
assert_allclose(
[model.x_fwhm, model.y_fwhm], [12.009582229657841, 7.7709061486021325]
) |
Test rotated elliptical Gaussian2D model when cov_matrix is input.
https://github.com/astropy/astropy/pull/2199 | def test_Gaussian2DCovariance():
"""
Test rotated elliptical Gaussian2D model when cov_matrix is input.
https://github.com/astropy/astropy/pull/2199
"""
cov_matrix = [[49.0, -16.0], [-16.0, 9.0]]
model = models.Gaussian2D(17.0, 2.0, 2.5, cov_matrix=cov_matrix)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [
[4.3744505, 5.8413977, 7.42988694, 9.00160175, 10.38794269],
[8.83290201, 10.81772851, 12.61946384, 14.02225593, 14.84113227],
[13.68528889, 15.37184621, 16.44637743, 16.76048705, 16.26953638],
[16.26953638, 16.76048705, 16.44637743, 15.37184621, 13.68528889],
[14.84113227, 14.02225593, 12.61946384, 10.81772851, 8.83290201],
]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
# Test bad cov_matrix shape
cov_matrix = [[49.0, 3.14, -16.0], [3.14, -16.0, 9.0], [-16, 27, 3.14]]
MESSAGE = r"Covariance matrix must be 2x2"
with pytest.raises(ValueError, match=MESSAGE):
models.Gaussian2D(17.0, 2.0, 2.5, cov_matrix=cov_matrix) |
Like ``test_ScaleModel()``. | def test_RedshiftScaleFactor():
"""Like ``test_ScaleModel()``."""
# Scale by a scalar
m = models.RedshiftScaleFactor(0.4)
assert m(0) == 0
assert_array_equal(m([1, 2]), [1.4, 2.8])
assert_allclose(m.inverse(m([1, 2])), [1, 2])
# Scale by a list
m = models.RedshiftScaleFactor([-0.5, 0, 0.5], n_models=3)
assert_array_equal(m(0), 0)
assert_array_equal(m([1, 2], model_set_axis=False), [[0.5, 1], [1, 2], [1.5, 3]])
assert_allclose(
m.inverse(m([1, 2], model_set_axis=False)), [[1, 2], [1, 2], [1, 2]]
) |
Test fitting RedshiftScaleFactor model with LevMarLSQFitter. | def test_RedshiftScaleFactor_model_levmar_fit():
"""Test fitting RedshiftScaleFactor model with LevMarLSQFitter."""
init_model = models.RedshiftScaleFactor()
x = np.arange(10)
y = 2.7174 * x
fitter = fitting.LevMarLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [1.7174]) |
Test Ellipse2D model. | def test_Ellipse2D():
"""Test Ellipse2D model."""
amplitude = 7.5
x0, y0 = 15, 15
theta = Angle(45, "deg")
em = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta.radian)
y, x = np.mgrid[0:30, 0:30]
e = em(x, y)
assert np.all(e[e > 0] == amplitude)
assert e[y0, x0] == amplitude
rotation = models.Rotation2D(angle=theta.degree)
point1 = [2, 0] # Rotation2D center is (0, 0)
point2 = rotation(*point1)
point1 = np.array(point1) + [x0, y0]
point2 = np.array(point2) + [x0, y0]
e1 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=0.0)
e2 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=theta.radian)
assert e1(*point1) == e2(*point2) |
Test that circular Ellipse2D agrees with Disk2D [3736]. | def test_Ellipse2D_circular():
"""Test that circular Ellipse2D agrees with Disk2D [3736]."""
amplitude = 7.5
radius = 10
size = (radius * 2) + 1
y, x = np.mgrid[0:size, 0:size]
ellipse = models.Ellipse2D(amplitude, radius, radius, radius, radius, theta=0)(x, y)
disk = models.Disk2D(amplitude, radius, radius, radius)(x, y)
assert np.all(ellipse == disk) |
Test fitting Shift model with LevMarLSQFitter (issue #6103). | def test_Shift_model_levmar_fit(fitter):
"""Test fitting Shift model with LevMarLSQFitter (issue #6103)."""
fitter = fitter()
init_model = models.Shift()
x = np.arange(10)
y = x + 0.1
with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"):
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [0.1], atol=1e-15) |
Test linear fitting of Shift model (issue #6103). | def test_Shift_model_set_linear_fit():
"""Test linear fitting of Shift model (issue #6103)."""
init_model = models.Shift(offset=[0, 0], n_models=2)
x = np.arange(10)
yy = np.array([x + 0.1, x - 0.2])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [0.1, -0.2], atol=1e-15) |
Test linear fitting of Scale model (#6103). | def test_Scale_model_set_linear_fit(Model):
"""Test linear fitting of Scale model (#6103)."""
init_model = Model(factor=[0, 0], n_models=2)
x = np.arange(-3, 7)
yy = np.array([1.15 * x, 0.96 * x])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [1.15, 0.96], atol=1e-15) |
Test integral of normalized Voigt profile. | def test_Voigt1D_norm(algorithm):
"""Test integral of normalized Voigt profile."""
from scipy.integrate import quad
if algorithm == "humlicek2":
ctx = pytest.warns(
AstropyDeprecationWarning, match=r"humlicek2 has been deprecated since .*"
)
atol = 1e-8
else:
ctx = nullcontext()
atol = 1e-14
def voigt(algorithm):
return models.Voigt1D(
amplitude_L=1.0 / np.pi, x_0=0.0, fwhm_L=2.0, fwhm_G=1.5, method=algorithm
)
with ctx:
voi = models.Voigt1D(
amplitude_L=1.0 / np.pi, x_0=0.0, fwhm_L=2.0, fwhm_G=1.5, method=algorithm
)
assert_allclose(quad(voi, -np.inf, np.inf)[0], 1.0, atol=atol) |
Verify accuracy of Voigt profile in Humlicek approximation to Faddeeva.cc (SciPy). | def test_Voigt1D_hum2(doppler):
"""
Verify accuracy of Voigt profile in Humlicek approximation to Faddeeva.cc (SciPy).
"""
x = np.linspace(-20, 20, 400001)
voi_w = models.Voigt1D(
amplitude_L=2.0 / np.pi, fwhm_L=1.0, fwhm_G=doppler, method="wofz"
)
vf_w = voi_w(x)
dvda_w = voi_w.fit_deriv(
x, x_0=0, amplitude_L=2.0 / np.pi, fwhm_L=1.0, fwhm_G=doppler
)
voi_h = models.Voigt1D(
amplitude_L=2.0 / np.pi, fwhm_L=1.0, fwhm_G=doppler, method="humlicek2"
)
vf_h = voi_h(x)
dvda_h = voi_h.fit_deriv(
x, x_0=0, amplitude_L=2.0 / np.pi, fwhm_L=1.0, fwhm_G=doppler
)
assert_allclose(vf_h, vf_w, rtol=1e-7 * (2 + 1 / np.sqrt(doppler)))
assert_allclose(dvda_h, dvda_w, rtol=1e-9, atol=1e-7 * (1 + 30 / doppler)) |
Test Voigt1D default method | def test_Voigt1D_method():
"""Test Voigt1D default method"""
voi = models.Voigt1D(method="humlicek2")
assert voi.method == "_hum2zpf16c"
voi = models.Voigt1D()
if HAS_SCIPY:
assert voi.method == "wofz"
voi = models.Voigt1D(method="wofz")
assert voi.method == "wofz"
voi = models.Voigt1D(method="scipy")
assert voi.method == "wofz"
else:
assert voi.method == "_hum2zpf16c" |
Test calling a model with positional, keywrd and a mixture of both arguments. | def test_call_keyword_args_1(model):
"""
Test calling a model with positional, keywrd and a mixture of both arguments.
"""
positional = model(1, 2)
assert_allclose(positional, model(x=1, y=2))
assert_allclose(positional, model(1, y=2))
model.inputs = ("r", "t")
assert_allclose(positional, model(r=1, t=2))
assert_allclose(positional, model(1, t=2))
assert_allclose(positional, model(1, 2))
MESSAGE = r"Too many input arguments - expected 2, got .*"
with pytest.raises(ValueError, match=MESSAGE):
model(1, 2, 3)
with pytest.raises(ValueError, match=MESSAGE):
model(1, 2, t=12, r=3)
MESSAGE = r"Missing input arguments - expected 2, got 1"
with pytest.raises(ValueError, match=MESSAGE):
model(1) |
Test calling a model with positional, keywrd and a mixture of both arguments. | def test_call_keyword_args_2(model):
"""
Test calling a model with positional, keywrd and a mixture of both arguments.
"""
positional = model(1)
assert_allclose(positional, model(x=1))
model.inputs = ("r",)
assert_allclose(positional, model(r=1))
MESSAGE = r"Too many input arguments - expected .*, got .*"
with pytest.raises(ValueError, match=MESSAGE):
model(1, 2, 3)
with pytest.raises(ValueError, match=MESSAGE):
model(1, 2, t=12, r=3)
MESSAGE = r"Missing input arguments - expected 1, got 0"
with pytest.raises(ValueError, match=MESSAGE):
model() |
Test calling a model with positional, keywrd and a mixture of both arguments. | def test_call_keyword_args_3(model):
"""
Test calling a model with positional, keywrd and a mixture of both arguments.
"""
positional = model(1, 2)
model.inputs = ("r", "t")
assert_allclose(positional, model(r=1, t=2))
assert_allclose(positional, model(1, t=2))
MESSAGE = r"Too many input arguments - expected .*, got .*"
with pytest.raises(ValueError, match=MESSAGE):
model(1, 2, 3)
with pytest.raises(ValueError, match=MESSAGE):
model(1, 2, t=12, r=3)
MESSAGE = r"Missing input arguments - expected 2, got 0"
with pytest.raises(ValueError, match=MESSAGE):
model() |
Test calling a model with positional, keywrd and a mixture of both arguments. | def test_call_keyword_mappings(model):
"""
Test calling a model with positional, keywrd and a mixture of both arguments.
"""
positional = model(1, 2)
assert_allclose(positional, model(x0=1, x1=2))
assert_allclose(positional, model(1, x1=2))
# We take a copy before modifying the model since otherwise this changes
# the instance used in the parametrize call and affects future test runs.
model = model.copy()
model.inputs = ("r", "t")
assert_allclose(positional, model(r=1, t=2))
assert_allclose(positional, model(1, t=2))
assert_allclose(positional, model(1, 2))
MESSAGE = r"Too many input arguments - expected .*, got .*"
with pytest.raises(ValueError, match=MESSAGE):
model(1, 2, 3)
with pytest.raises(ValueError, match=MESSAGE):
model(1, 2, t=12, r=3)
MESSAGE = r"Missing input arguments - expected 2, got 1"
with pytest.raises(ValueError, match=MESSAGE):
model(1) |
Test bounding box evaluation for a 3D model | def test_custom_model_bounding_box():
"""Test bounding box evaluation for a 3D model"""
def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(models.custom_model(ellipsoid)):
@property
def bounding_box(self):
return (
(self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a),
)
model = Ellipsoid3D()
bbox = model.bounding_box
zlim, ylim, xlim = bbox.bounding_box()
dz, dy, dx = (np.diff(bbox) / 2).ravel()
z1, y1, x1 = np.mgrid[
slice(zlim[0], zlim[1] + 1),
slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1),
]
z2, y2, x2 = np.mgrid[
slice(zlim[0] - dz, zlim[1] + dz + 1),
slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1),
]
arr = model(x2, y2, z2, with_bounding_box=True)
sub_arr = model(x1, y1, z1, with_bounding_box=True)
# check for flux agreement
assert abs(np.nansum(arr) - np.nansum(sub_arr)) < np.nansum(arr) * 1e-7 |
Create instance of model class. | def create_model(
model_class, test_parameters, use_constraints=True, parameter_key="parameters"
):
"""Create instance of model class."""
constraints = {}
if issubclass(model_class, PolynomialBase):
return model_class(**test_parameters[parameter_key])
elif issubclass(model_class, FittableModel):
if "requires_scipy" in test_parameters and not HAS_SCIPY:
pytest.skip("SciPy not found")
if use_constraints:
if "constraints" in test_parameters:
constraints = test_parameters["constraints"]
return model_class(*test_parameters[parameter_key], **constraints) |
Currently just tests that the model peaks at its origin.
Regression test for https://github.com/astropy/astropy/issues/3942 | def test_voigt_model():
"""
Currently just tests that the model peaks at its origin.
Regression test for https://github.com/astropy/astropy/issues/3942
"""
m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
x = np.arange(0, 10, 0.01)
y = m(x)
assert y[500] == y.max() |
Test Tabular1D model. | def test_tabular_interp_1d():
"""
Test Tabular1D model.
"""
points = np.arange(0, 5)
values = [1.0, 10, 2, 45, -3]
LookupTable = models.tabular_model(1)
model = LookupTable(points=points, lookup_table=values)
xnew = [0.0, 0.7, 1.4, 2.1, 3.9]
ans1 = [1.0, 7.3, 6.8, 6.3, 1.8]
assert_allclose(model(xnew), ans1)
# Test evaluate without passing `points`.
model = LookupTable(lookup_table=values)
assert_allclose(model(xnew), ans1)
# Test bounds error.
xextrap = [0.0, 0.7, 1.4, 2.1, 3.9, 4.1]
MESSAGE = r"One of the requested xi is out of bounds in dimension 0"
with pytest.raises(ValueError, match=MESSAGE):
model(xextrap)
# test extrapolation and fill value
model = LookupTable(lookup_table=values, bounds_error=False, fill_value=None)
assert_allclose(model(xextrap), [1.0, 7.3, 6.8, 6.3, 1.8, -7.8])
# Test unit support
xnew = xnew * u.nm
ans1 = ans1 * u.nJy
model = LookupTable(points=points * u.nm, lookup_table=values * u.nJy)
assert_quantity_allclose(model(xnew), ans1)
assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
assert model.bounding_box == (0 * u.nm, 4 * u.nm)
# Test fill value unit conversion and unitless input on table with unit
model = LookupTable(
[1, 2, 3],
[10, 20, 30] * u.nJy,
bounds_error=False,
fill_value=1e-33 * (u.W / (u.m * u.m * u.Hz)),
)
assert_quantity_allclose(model(np.arange(5)), [100, 10, 20, 30, 100] * u.nJy) |
Test the option to evaluate a model respecting
its bunding_box. | def test_with_bounding_box():
"""
Test the option to evaluate a model respecting
its bunding_box.
"""
p = models.Polynomial2D(2) & models.Polynomial2D(2)
m = models.Mapping((0, 1, 0, 1)) | p
with NumpyRNGContext(1234567):
m.parameters = np.random.rand(12)
m.bounding_box = ((3, 9), (1, 8))
x, y = np.mgrid[:10, :10]
a, b = m(x, y)
aw, bw = m(x, y, with_bounding_box=True)
ind = (~np.isnan(aw)).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)
ind = (aw != 1000).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
# test the order of bbox is not reversed for 1D models
p = models.Polynomial1D(1, c0=12, c1=2.3)
p.bounding_box = (0, 5)
assert p(1) == p(1, with_bounding_box=True)
t3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
t3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(
t3([1, 1], [7, 7], [3, 5], with_bounding_box=True),
[[np.nan, 11], [np.nan, 14], [np.nan, 4]],
)
trans3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
trans3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(trans3(1, 7, 5, with_bounding_box=True), [11, 14, 4]) |
Test that the Tabular1D inverse is defined | def test_tabular1d_inverse():
"""Test that the Tabular1D inverse is defined"""
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t.inverse((3.4, 6.7))
assert_allclose(result, np.array((1.0, 2.0)))
# Check that it works for descending values in lookup_table
t2 = models.Tabular1D(points, values[::-1])
assert_allclose(t2.inverse.points[0], t2.lookup_table[::-1])
result2 = t2.inverse((7, 6.7))
assert_allclose(result2, np.array((1.0, 2.0)))
# Check that it errors on double-valued lookup_table
points = np.arange(5)
values = np.array([1.5, 3.4, 3.4, 32, 25])
t = models.Tabular1D(points, values)
with pytest.raises(NotImplementedError, match=r""):
t.inverse((3.4, 7.0))
# Check that Tabular2D.inverse raises an error
table = np.arange(5 * 5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t3 = models.Tabular2D(points=points, lookup_table=table)
with pytest.raises(NotImplementedError, match=r""):
t3.inverse((3, 3))
# Check that it uses the same kwargs as the original model
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
MESSAGE = r"One of the requested xi is out of bounds in dimension 0"
with pytest.raises(ValueError, match=MESSAGE):
t.inverse(100)
t = models.Tabular1D(points, values, bounds_error=False, fill_value=None)
result = t.inverse(100)
assert_allclose(t(result), 100) |
The module name must be set manually because
these classes are created dynamically. | def test_tabular_module_name():
"""
The module name must be set manually because
these classes are created dynamically.
"""
for model in [models.Tabular1D, models.Tabular2D]:
assert model.__module__ == "astropy.modeling.tabular" |
Test can pass kwargs to Models | def test_metaclass_kwargs():
"""Test can pass kwargs to Models"""
class ClassModel(FittableModel, flag="flag"):
def evaluate(self):
pass |
Test can pass kwargs to Model subclasses. | def test_submetaclass_kwargs():
"""Test can pass kwargs to Model subclasses."""
class ClassModel(FittableModel, metaclass=_ExtendedModelMeta, flag="flag"):
def evaluate(self):
pass
assert ClassModel.flag == "flag" |
Test that a model initialized with model_set_axis=1
can be evaluated with model_set_axis=False. | def test_model1d_axis_1(model_class):
"""
Test that a model initialized with model_set_axis=1
can be evaluated with model_set_axis=False.
"""
n_models = 2
model_axis = 1
c0 = [[2, 3]]
c1 = [[1, 2]]
t1 = model_class(1, c0=2, c1=1)
t2 = model_class(1, c0=3, c1=2)
p1 = model_class(1, c0=c0, c1=c1, n_models=n_models, model_set_axis=model_axis)
MESSAGE = r"For model_set_axis=1, all inputs must be at least 2-dimensional"
with pytest.raises(ValueError, match=MESSAGE):
p1(x)
y = p1(x, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0], t1(x))
assert_allclose(y[:, 1], t2(x))
y = p1(xx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :], t1(xx))
assert_allclose(y[:, 1, :], t2(xx))
y = p1(xxx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :, :], t1(xxx))
assert_allclose(y[:, 1, :, :], t2(xxx)) |
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False. | def test_model1d_axis_2(model_class):
"""
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False.
"""
p1 = model_class(
1, c0=[[[1, 2, 3]]], c1=[[[10, 20, 30]]], n_models=3, model_set_axis=2
)
t1 = model_class(1, c0=1, c1=10)
t2 = model_class(1, c0=2, c1=20)
t3 = model_class(1, c0=3, c1=30)
MESSAGE = r"For model_set_axis=2, all inputs must be at least 3-dimensional"
with pytest.raises(ValueError, match=MESSAGE):
p1(x)
with pytest.raises(ValueError, match=MESSAGE):
p1(xx)
y = p1(x, model_set_axis=False)
assert y.shape == (1, 4, 3)
assert_allclose(y[:, :, 0].flatten(), t1(x))
assert_allclose(y[:, :, 1].flatten(), t2(x))
assert_allclose(y[:, :, 2].flatten(), t3(x)) |
Test that a model initialized with model_set_axis=0
can be evaluated with model_set_axis=False. | def test_model1d_axis_0(model_class):
"""
Test that a model initialized with model_set_axis=0
can be evaluated with model_set_axis=False.
"""
p1 = model_class(1, n_models=2, model_set_axis=0)
p1.c0 = [2, 3]
p1.c1 = [1, 2]
t1 = model_class(1, c0=2, c1=1)
t2 = model_class(1, c0=3, c1=2)
MESSAGE = r"Input argument 'x' does not have the correct dimensions in .*"
with pytest.raises(ValueError, match=MESSAGE):
p1(x)
y = p1(xx)
assert len(y) == 2
assert_allclose(y[0], t1(xx[0]))
assert_allclose(y[1], t2(xx[1]))
y = p1(x, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(x))
assert_allclose(y[1], t2(x))
y = p1(xx, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(xx))
assert_allclose(y[1], t2(xx))
y = p1(xxx, model_set_axis=False)
assert_allclose(y[0], t1(xxx))
assert_allclose(y[1], t2(xxx))
assert len(y) == 2 |
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False. | def test_model2d_axis_2(model_class):
"""
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False.
"""
p2 = model_class(
1,
1,
c0_0=[[[0, 1, 2]]],
c0_1=[[[3, 4, 5]]],
c1_0=[[[5, 6, 7]]],
c1_1=[[[1, 1, 1]]],
n_models=3,
model_set_axis=2,
)
t1 = model_class(1, 1, c0_0=0, c0_1=3, c1_0=5, c1_1=1)
t2 = model_class(1, 1, c0_0=1, c0_1=4, c1_0=6, c1_1=1)
t3 = model_class(1, 1, c0_0=2, c0_1=5, c1_0=7, c1_1=1)
assert p2.c0_0.shape == (1, 1, 3)
y = p2(x, x, model_set_axis=False)
assert y.shape == (1, 4, 3)
# These are columns along the 2nd axis.
assert_allclose(y[:, :, 0].flatten(), t1(x, x))
assert_allclose(y[:, :, 1].flatten(), t2(x, x))
assert_allclose(y[:, :, 2].flatten(), t3(x, x)) |
Tests evaluation of Linear1D and Planar2D with different model_set_axis. | def test_eval():
"""Tests evaluation of Linear1D and Planar2D with different model_set_axis."""
model = Linear1D(slope=[1, 2], intercept=[3, 4], n_models=2)
p = Polynomial1D(1, c0=[3, 4], c1=[1, 2], n_models=2)
assert_allclose(model(xx), p(xx))
assert_allclose(model(x, model_set_axis=False), p(x, model_set_axis=False))
MESSAGE = r"Input argument 'x' does not have the correct dimensions in .*"
with pytest.raises(ValueError, match=MESSAGE):
model(x)
model = Linear1D(slope=[[1, 2]], intercept=[[3, 4]], n_models=2, model_set_axis=1)
p = Polynomial1D(1, c0=[[3, 4]], c1=[[1, 2]], n_models=2, model_set_axis=1)
assert_allclose(model(xx.T), p(xx.T))
assert_allclose(model(x, model_set_axis=False), p(x, model_set_axis=False))
with pytest.raises(ValueError, match=MESSAGE):
model(xx)
model = Planar2D(slope_x=[1, 2], slope_y=[1, 2], intercept=[3, 4], n_models=2)
y = model(xx, xx)
assert y.shape == (2, 4)
MESSAGE = r"Missing input arguments - expected 2, got 1"
with pytest.raises(ValueError, match=MESSAGE):
model(x) |
Issue #7159 | def test_linearlsqfitter(model_class):
"""
Issue #7159
"""
p = model_class(1, n_models=2, model_set_axis=1)
# Generate data for fitting 2 models and re-stack them along the last axis:
y = np.array([2 * x + 1, x + 4])
y = np.rollaxis(y, 0, -1).T
f = LinearLSQFitter()
# This seems to fit the model_set correctly:
fit = f(p, x, y)
model_y = fit(x, model_set_axis=False)
m1 = model_class(1, c0=fit.c0[0][0], c1=fit.c1[0][0], domain=fit.domain)
m2 = model_class(1, c0=fit.c0[0][1], c1=fit.c1[0][1], domain=fit.domain)
assert_allclose(model_y[:, 0], m1(x))
assert_allclose(model_y[:, 1], m2(x))
p = model_class(1, n_models=2, model_set_axis=0)
fit = f(p, x, y.T) |
Test fitting model sets of Linear1D and Planar2D. | def test_fitting_shapes():
"""Test fitting model sets of Linear1D and Planar2D."""
fitter = LinearLSQFitter()
model = Linear1D(slope=[1, 2], intercept=[3, 4], n_models=2)
y = model(xx)
fitter(model, x, y)
model = Linear1D(slope=[[1, 2]], intercept=[[3, 4]], n_models=2, model_set_axis=1)
fitter(model, x, y.T)
model = Planar2D(slope_x=[1, 2], slope_y=[1, 2], intercept=[3, 4], n_models=2)
y = model(xx, xx)
fitter(model, x, x, y) |
Tests fitting multiple models simultaneously. | def test_linear_fit_model_set_common_weight():
"""Tests fitting multiple models simultaneously."""
init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
weights = np.ones(10)
weights[[0, -1]] = 0
fitted_model = fitter(init_model, x, y, weights=weights)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1)
# Check that using null weights raises an error
# ValueError: On entry to DLASCL parameter number 4 had an illegal value
with pytest.raises(ValueError, match=r"Found NaNs in the coefficient matrix"):
with pytest.warns(
RuntimeWarning, match=r"invalid value encountered in.*divide"
):
fitted_model = fitter(init_model, x, y, weights=np.zeros(10)) |
Tests fitting multiple models simultaneously. | def test_linear_fit_model_set_weights():
"""Tests fitting multiple models simultaneously."""
init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
weights = np.ones_like(y)
# Put a null weight for the min and max values
weights[[0, 1], weights.argmin(axis=1)] = 0
weights[[0, 1], weights.argmax(axis=1)] = 0
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, weights=weights)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1)
# Check that using null weights raises an error
weights[0] = 0
with pytest.raises(ValueError, match=r"Found NaNs in the coefficient matrix"):
with pytest.warns(
RuntimeWarning, match=r"invalid value encountered in.*divide"
):
fitted_model = fitter(init_model, x, y, weights=weights)
# Now we mask the values where weight is 0
with pytest.warns(RuntimeWarning, match=r"invalid value encountered in.*divide"):
fitted_model = fitter(
init_model, x, np.ma.array(y, mask=np.isclose(weights, 0)), weights=weights
)
# Parameters for the first model are all NaNs
assert np.all(np.isnan(fitted_model.param_sets[:, 0]))
assert np.all(np.isnan(fitted_model(x, model_set_axis=False)[0]))
# Second model is fitted correctly
assert_allclose(fitted_model(x, model_set_axis=False)[1], y_expected[1], rtol=1e-1) |
Test if getting / setting of Parameter properties works. | def test_parameter_properties():
"""Test if getting / setting of Parameter properties works."""
p = Parameter("alpha", default=1)
assert p.name == "alpha"
# Parameter names are immutable
with pytest.raises(AttributeError):
p.name = "beta"
assert p.fixed is False
p.fixed = True
assert p.fixed is True
assert p.tied is False
p.tied = lambda _: 0
p.tied = False
assert p.tied is False
assert p.min is None
p.min = 42
assert p.min == 42
p.min = None
assert p.min is None
assert p.max is None
p.max = 41
assert p.max == 41 |
Test if the parameter arithmetic operators work. | def test_parameter_operators():
"""Test if the parameter arithmetic operators work."""
par = Parameter("alpha", default=42)
num = 42.0
val = 3
assert par - val == num - val
assert val - par == val - num
assert par / val == num / val
assert val / par == val / num
assert par**val == num**val
assert val**par == val**num
assert par < 45
assert par > 41
assert par <= par
assert par >= par
assert par == par
assert -par == -num
assert abs(par) == abs(num) |
Tests that in a model with 3 parameters that do not all mutually broadcast,
this is determined correctly regardless of what order the parameters are
in. | def test_non_broadcasting_parameters():
"""
Tests that in a model with 3 parameters that do not all mutually broadcast,
this is determined correctly regardless of what order the parameters are
in.
"""
a = 3
b = np.array([[1, 2, 3], [4, 5, 6]])
c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
class TestModel(Model):
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
def evaluate(self, *args):
return
# a broadcasts with both b and c, but b does not broadcast with c
MESSAGE = (
r"Parameter '.*' of shape .* cannot be broadcast with parameter '.*' of"
r" shape .*"
)
for args in itertools.permutations((a, b, c)):
with pytest.raises(InputParameterError, match=MESSAGE):
TestModel(*args) |
Test Planck function with overflow. | def test_blackbody_overflow():
"""Test Planck function with overflow."""
photlam = u.photon / (u.cm**2 * u.s * u.AA)
wave = [0.0, 1000.0, 100000.0, 1e55] # Angstrom
temp = 10000.0 # Kelvin
bb = BlackBody(temperature=temp * u.K, scale=1.0)
with pytest.warns(
AstropyUserWarning,
match=r"Input contains invalid wavelength/frequency value\(s\)",
):
with np.errstate(all="ignore"):
bb_lam = bb(wave) * u.sr
flux = bb_lam.to(photlam, u.spectral_density(wave * u.AA)) / u.sr
# First element is NaN, last element is very small, others normal
assert np.isnan(flux[0])
with np.errstate(all="ignore"):
assert np.log10(flux[-1].value) < -134
np.testing.assert_allclose(
flux.value[1:-1], [0.00046368, 0.04636773], rtol=1e-3
) # 0.1% accuracy in PHOTLAM/sr
with np.errstate(all="ignore"):
flux = bb(1.0 * u.AA)
assert flux.value == 0 |
Test exceptions. | def test_blackbody_exceptions_and_warnings():
"""Test exceptions."""
# Negative temperature
with pytest.raises(
ValueError, match="Temperature should be positive: \\[-100.\\] K"
):
bb = BlackBody(-100 * u.K)
bb(1.0 * u.micron)
bb = BlackBody(5000 * u.K)
# Zero wavelength given for conversion to Hz
with (
pytest.warns(AstropyUserWarning, match="invalid") as w,
np.errstate(divide="ignore", invalid="ignore"),
):
bb(0 * u.AA)
assert len(w) == 1
# Negative wavelength given for conversion to Hz
with pytest.warns(AstropyUserWarning, match="invalid") as w:
bb(-1.0 * u.AA)
assert len(w) == 1
# Test that a non surface brightness convertible scale unit raises an error
with pytest.raises(
ValueError, match="scale units not dimensionless or in surface brightness: Jy"
):
bb = BlackBody(5000 * u.K, scale=1.0 * u.Jy) |
Subsets and Splits